Peter Zijlstras rework of atomics and fallbacks. This solves two problems:

1) Compilers uninline small atomic_* static inline functions which can
      expose them to instrumentation.
 
   2) The instrumentation of atomic primitives was done at the architecture
      level while composites or fallbacks were provided at the generic level.
      As a result there are no uninstrumented variants of the fallbacks.
 
 Both issues were in the way of fully isolating fragile entry code pathes
 and especially the text poke int3 handler which is prone to an endless
 recursion problem when anything in that code path is about to be
 instrumented. This was always a problem, but got elevated due to the new
 batch mode updates of tracing.
 
 The solution is to mark the functions __always_inline and to flip the
 fallback and instrumentation so the non-instrumented variants are at the
 architecture level and the instrumentation is done in generic code.
 
 The latter introduces another fallback variant which will go away once all
 architectures have been moved over to arch_atomic_*.
 -----BEGIN PGP SIGNATURE-----
 
 iQJHBAABCgAxFiEEQp8+kY+LLUocC4bMphj1TA10mKEFAl7imyETHHRnbHhAbGlu
 dXRyb25peC5kZQAKCRCmGPVMDXSYoT0wEACcI3mDiK/9hNlfnobIJTup1E8erUdY
 /EZX8yFc/FgpSSKAMROu3kswZ+rSWmBEyzTJLEtBAaYU6haAuGx77AugoDHfVkYi
 +CEJvVEpeK7fzsgu9aTb/5B6EDUo/P1fzTFjVTK1I9M9KrGLxbkGRZWYUeX3KRZd
 RskRJMbp9L4oiNJNAuIP6QKoJ7PK/sL16e8oVZSQR6WW9ZH4uDZbyfl5z0xLjI7u
 PIsFCoDu7/ig2wpOhtAYRVsL8C6EQ8mSeEUMKeM7A7UFAkVadYB8PTmEJ/QcixW+
 5R0+cnQE/3I/n0KRwfz/7p2gzILJk/cY6XJWVoAsQb990MD2ahjZJPYI4jdknjz6
 8bL/QjBq+pZwbHWOhy+IdUntIYGkyjfLKoPLdSoh+uK1kl8Jsg+AlB2lN469BV1D
 r0NltiCLggvtqXEDLV4YZqxie6H38gvOzPDbH8I6M34+WkOI2sM0D1P/Naqw/Wgl
 M1Ygx4wYG8X4zDESAYMy9tSXh5lGDIjiF6sjGTOPYWwUIeRlINfWeJkiXKnYNwv/
 qTiC8ciCxhlQcDifdyfQjT3mHNcP7YpVKp317TCtU4+WxMSrW1h2SL6m6j74dNI/
 P7/J6GKONeLRbt0ZQbQGjqHxSuu6kqUEu69aVs5W9+WjNEoJW1EW4vrJ3TeF5jLh
 0Srl4VsyDwzuXw==
 =Jkzv
 -----END PGP SIGNATURE-----

Merge tag 'locking-urgent-2020-06-11' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull atomics rework from Thomas Gleixner:
 "Peter Zijlstras rework of atomics and fallbacks. This solves two
  problems:

   1) Compilers uninline small atomic_* static inline functions which
      can expose them to instrumentation.

   2) The instrumentation of atomic primitives was done at the
      architecture level while composites or fallbacks were provided at
      the generic level. As a result there are no uninstrumented
      variants of the fallbacks.

  Both issues were in the way of fully isolating fragile entry code
  pathes and especially the text poke int3 handler which is prone to an
  endless recursion problem when anything in that code path is about to
  be instrumented. This was always a problem, but got elevated due to
  the new batch mode updates of tracing.

  The solution is to mark the functions __always_inline and to flip the
  fallback and instrumentation so the non-instrumented variants are at
  the architecture level and the instrumentation is done in generic
  code.

  The latter introduces another fallback variant which will go away once
  all architectures have been moved over to arch_atomic_*"

* tag 'locking-urgent-2020-06-11' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  locking/atomics: Flip fallbacks and instrumentation
  asm-generic/atomic: Use __always_inline for fallback wrappers
This commit is contained in:
Linus Torvalds 2020-06-11 18:27:19 -07:00
commit 9716e57a01
28 changed files with 2594 additions and 269 deletions

View File

@ -101,8 +101,8 @@ static inline long arch_atomic64_dec_if_positive(atomic64_t *v)
#define ATOMIC_INIT(i) { (i) }
#define arch_atomic_read(v) READ_ONCE((v)->counter)
#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
#define arch_atomic_read(v) __READ_ONCE((v)->counter)
#define arch_atomic_set(v, i) __WRITE_ONCE(((v)->counter), (i))
#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
#define arch_atomic_add_return_acquire arch_atomic_add_return_acquire
@ -225,6 +225,6 @@ static inline long arch_atomic64_dec_if_positive(atomic64_t *v)
#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
#include <asm-generic/atomic-instrumented.h>
#define ARCH_ATOMIC
#endif /* __ASM_ATOMIC_H */

View File

@ -28,7 +28,7 @@ static __always_inline int arch_atomic_read(const atomic_t *v)
* Note for KASAN: we deliberately don't use READ_ONCE_NOCHECK() here,
* it's non-inlined function that increases binary size and stack usage.
*/
return READ_ONCE((v)->counter);
return __READ_ONCE((v)->counter);
}
/**
@ -40,7 +40,7 @@ static __always_inline int arch_atomic_read(const atomic_t *v)
*/
static __always_inline void arch_atomic_set(atomic_t *v, int i)
{
WRITE_ONCE(v->counter, i);
__WRITE_ONCE(v->counter, i);
}
/**
@ -166,6 +166,7 @@ static __always_inline int arch_atomic_add_return(int i, atomic_t *v)
{
return i + xadd(&v->counter, i);
}
#define arch_atomic_add_return arch_atomic_add_return
/**
* arch_atomic_sub_return - subtract integer and return
@ -178,32 +179,37 @@ static __always_inline int arch_atomic_sub_return(int i, atomic_t *v)
{
return arch_atomic_add_return(-i, v);
}
#define arch_atomic_sub_return arch_atomic_sub_return
static __always_inline int arch_atomic_fetch_add(int i, atomic_t *v)
{
return xadd(&v->counter, i);
}
#define arch_atomic_fetch_add arch_atomic_fetch_add
static __always_inline int arch_atomic_fetch_sub(int i, atomic_t *v)
{
return xadd(&v->counter, -i);
}
#define arch_atomic_fetch_sub arch_atomic_fetch_sub
static __always_inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
{
return arch_cmpxchg(&v->counter, old, new);
}
#define arch_atomic_cmpxchg arch_atomic_cmpxchg
#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
static __always_inline bool arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
{
return try_cmpxchg(&v->counter, old, new);
}
#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
static inline int arch_atomic_xchg(atomic_t *v, int new)
{
return arch_xchg(&v->counter, new);
}
#define arch_atomic_xchg arch_atomic_xchg
static inline void arch_atomic_and(int i, atomic_t *v)
{
@ -221,6 +227,7 @@ static inline int arch_atomic_fetch_and(int i, atomic_t *v)
return val;
}
#define arch_atomic_fetch_and arch_atomic_fetch_and
static inline void arch_atomic_or(int i, atomic_t *v)
{
@ -238,6 +245,7 @@ static inline int arch_atomic_fetch_or(int i, atomic_t *v)
return val;
}
#define arch_atomic_fetch_or arch_atomic_fetch_or
static inline void arch_atomic_xor(int i, atomic_t *v)
{
@ -255,6 +263,7 @@ static inline int arch_atomic_fetch_xor(int i, atomic_t *v)
return val;
}
#define arch_atomic_fetch_xor arch_atomic_fetch_xor
#ifdef CONFIG_X86_32
# include <asm/atomic64_32.h>
@ -262,6 +271,6 @@ static inline int arch_atomic_fetch_xor(int i, atomic_t *v)
# include <asm/atomic64_64.h>
#endif
#include <asm-generic/atomic-instrumented.h>
#define ARCH_ATOMIC
#endif /* _ASM_X86_ATOMIC_H */

View File

@ -75,6 +75,7 @@ static inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
{
return arch_cmpxchg64(&v->counter, o, n);
}
#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
/**
* arch_atomic64_xchg - xchg atomic64 variable
@ -94,6 +95,7 @@ static inline s64 arch_atomic64_xchg(atomic64_t *v, s64 n)
: "memory");
return o;
}
#define arch_atomic64_xchg arch_atomic64_xchg
/**
* arch_atomic64_set - set atomic64 variable
@ -138,6 +140,7 @@ static inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
ASM_NO_INPUT_CLOBBER("memory"));
return i;
}
#define arch_atomic64_add_return arch_atomic64_add_return
/*
* Other variants with different arithmetic operators:
@ -149,6 +152,7 @@ static inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v)
ASM_NO_INPUT_CLOBBER("memory"));
return i;
}
#define arch_atomic64_sub_return arch_atomic64_sub_return
static inline s64 arch_atomic64_inc_return(atomic64_t *v)
{
@ -242,6 +246,7 @@ static inline int arch_atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
"S" (v) : "memory");
return (int)a;
}
#define arch_atomic64_add_unless arch_atomic64_add_unless
static inline int arch_atomic64_inc_not_zero(atomic64_t *v)
{
@ -281,6 +286,7 @@ static inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v)
return old;
}
#define arch_atomic64_fetch_and arch_atomic64_fetch_and
static inline void arch_atomic64_or(s64 i, atomic64_t *v)
{
@ -299,6 +305,7 @@ static inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v)
return old;
}
#define arch_atomic64_fetch_or arch_atomic64_fetch_or
static inline void arch_atomic64_xor(s64 i, atomic64_t *v)
{
@ -317,6 +324,7 @@ static inline s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
return old;
}
#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
static inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
{
@ -327,6 +335,7 @@ static inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
return old;
}
#define arch_atomic64_fetch_add arch_atomic64_fetch_add
#define arch_atomic64_fetch_sub(i, v) arch_atomic64_fetch_add(-(i), (v))

View File

@ -19,7 +19,7 @@
*/
static inline s64 arch_atomic64_read(const atomic64_t *v)
{
return READ_ONCE((v)->counter);
return __READ_ONCE((v)->counter);
}
/**
@ -31,7 +31,7 @@ static inline s64 arch_atomic64_read(const atomic64_t *v)
*/
static inline void arch_atomic64_set(atomic64_t *v, s64 i)
{
WRITE_ONCE(v->counter, i);
__WRITE_ONCE(v->counter, i);
}
/**
@ -159,37 +159,43 @@ static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
{
return i + xadd(&v->counter, i);
}
#define arch_atomic64_add_return arch_atomic64_add_return
static inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v)
{
return arch_atomic64_add_return(-i, v);
}
#define arch_atomic64_sub_return arch_atomic64_sub_return
static inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
{
return xadd(&v->counter, i);
}
#define arch_atomic64_fetch_add arch_atomic64_fetch_add
static inline s64 arch_atomic64_fetch_sub(s64 i, atomic64_t *v)
{
return xadd(&v->counter, -i);
}
#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
static inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
{
return arch_cmpxchg(&v->counter, old, new);
}
#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
static __always_inline bool arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
{
return try_cmpxchg(&v->counter, old, new);
}
#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
static inline s64 arch_atomic64_xchg(atomic64_t *v, s64 new)
{
return arch_xchg(&v->counter, new);
}
#define arch_atomic64_xchg arch_atomic64_xchg
static inline void arch_atomic64_and(s64 i, atomic64_t *v)
{
@ -207,6 +213,7 @@ static inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v)
} while (!arch_atomic64_try_cmpxchg(v, &val, val & i));
return val;
}
#define arch_atomic64_fetch_and arch_atomic64_fetch_and
static inline void arch_atomic64_or(s64 i, atomic64_t *v)
{
@ -224,6 +231,7 @@ static inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v)
} while (!arch_atomic64_try_cmpxchg(v, &val, val | i));
return val;
}
#define arch_atomic64_fetch_or arch_atomic64_fetch_or
static inline void arch_atomic64_xor(s64 i, atomic64_t *v)
{
@ -241,5 +249,6 @@ static inline s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
} while (!arch_atomic64_try_cmpxchg(v, &val, val ^ i));
return val;
}
#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
#endif /* _ASM_X86_ATOMIC64_64_H */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -25,6 +25,12 @@
* See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions.
*/
#define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
#define atomic_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))
#define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
#define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))
/*
* The idea here is to build acquire/release variants by adding explicit
* barriers on top of the relaxed variant. In the case where the relaxed
@ -71,7 +77,12 @@
__ret; \
})
#ifdef ARCH_ATOMIC
#include <linux/atomic-arch-fallback.h>
#include <asm-generic/atomic-instrumented.h>
#else
#include <linux/atomic-fallback.h>
#endif
#include <asm-generic/atomic-long.h>

View File

@ -1,8 +1,8 @@
cat <<EOF
static inline ${ret}
${atomic}_${pfx}${name}${sfx}_acquire(${params})
static __always_inline ${ret}
${arch}${atomic}_${pfx}${name}${sfx}_acquire(${params})
{
${ret} ret = ${atomic}_${pfx}${name}${sfx}_relaxed(${args});
${ret} ret = ${arch}${atomic}_${pfx}${name}${sfx}_relaxed(${args});
__atomic_acquire_fence();
return ret;
}

View File

@ -1,6 +1,6 @@
cat <<EOF
/**
* ${atomic}_add_negative - add and test if negative
* ${arch}${atomic}_add_negative - add and test if negative
* @i: integer value to add
* @v: pointer of type ${atomic}_t
*
@ -8,9 +8,9 @@ cat <<EOF
* if the result is negative, or false when
* result is greater than or equal to zero.
*/
static inline bool
${atomic}_add_negative(${int} i, ${atomic}_t *v)
static __always_inline bool
${arch}${atomic}_add_negative(${int} i, ${atomic}_t *v)
{
return ${atomic}_add_return(i, v) < 0;
return ${arch}${atomic}_add_return(i, v) < 0;
}
EOF

View File

@ -1,6 +1,6 @@
cat << EOF
/**
* ${atomic}_add_unless - add unless the number is already a given value
* ${arch}${atomic}_add_unless - add unless the number is already a given value
* @v: pointer of type ${atomic}_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
@ -8,9 +8,9 @@ cat << EOF
* Atomically adds @a to @v, if @v was not already @u.
* Returns true if the addition was done.
*/
static inline bool
${atomic}_add_unless(${atomic}_t *v, ${int} a, ${int} u)
static __always_inline bool
${arch}${atomic}_add_unless(${atomic}_t *v, ${int} a, ${int} u)
{
return ${atomic}_fetch_add_unless(v, a, u) != u;
return ${arch}${atomic}_fetch_add_unless(v, a, u) != u;
}
EOF

View File

@ -1,7 +1,7 @@
cat <<EOF
static inline ${ret}
${atomic}_${pfx}andnot${sfx}${order}(${int} i, ${atomic}_t *v)
static __always_inline ${ret}
${arch}${atomic}_${pfx}andnot${sfx}${order}(${int} i, ${atomic}_t *v)
{
${retstmt}${atomic}_${pfx}and${sfx}${order}(~i, v);
${retstmt}${arch}${atomic}_${pfx}and${sfx}${order}(~i, v);
}
EOF

View File

@ -1,7 +1,7 @@
cat <<EOF
static inline ${ret}
${atomic}_${pfx}dec${sfx}${order}(${atomic}_t *v)
static __always_inline ${ret}
${arch}${atomic}_${pfx}dec${sfx}${order}(${atomic}_t *v)
{
${retstmt}${atomic}_${pfx}sub${sfx}${order}(1, v);
${retstmt}${arch}${atomic}_${pfx}sub${sfx}${order}(1, v);
}
EOF

View File

@ -1,15 +1,15 @@
cat <<EOF
/**
* ${atomic}_dec_and_test - decrement and test
* ${arch}${atomic}_dec_and_test - decrement and test
* @v: pointer of type ${atomic}_t
*
* Atomically decrements @v by 1 and
* returns true if the result is 0, or false for all other
* cases.
*/
static inline bool
${atomic}_dec_and_test(${atomic}_t *v)
static __always_inline bool
${arch}${atomic}_dec_and_test(${atomic}_t *v)
{
return ${atomic}_dec_return(v) == 0;
return ${arch}${atomic}_dec_return(v) == 0;
}
EOF

View File

@ -1,14 +1,14 @@
cat <<EOF
static inline ${ret}
${atomic}_dec_if_positive(${atomic}_t *v)
static __always_inline ${ret}
${arch}${atomic}_dec_if_positive(${atomic}_t *v)
{
${int} dec, c = ${atomic}_read(v);
${int} dec, c = ${arch}${atomic}_read(v);
do {
dec = c - 1;
if (unlikely(dec < 0))
break;
} while (!${atomic}_try_cmpxchg(v, &c, dec));
} while (!${arch}${atomic}_try_cmpxchg(v, &c, dec));
return dec;
}

View File

@ -1,13 +1,13 @@
cat <<EOF
static inline bool
${atomic}_dec_unless_positive(${atomic}_t *v)
static __always_inline bool
${arch}${atomic}_dec_unless_positive(${atomic}_t *v)
{
${int} c = ${atomic}_read(v);
${int} c = ${arch}${atomic}_read(v);
do {
if (unlikely(c > 0))
return false;
} while (!${atomic}_try_cmpxchg(v, &c, c - 1));
} while (!${arch}${atomic}_try_cmpxchg(v, &c, c - 1));
return true;
}

View File

@ -1,10 +1,10 @@
cat <<EOF
static inline ${ret}
${atomic}_${pfx}${name}${sfx}(${params})
static __always_inline ${ret}
${arch}${atomic}_${pfx}${name}${sfx}(${params})
{
${ret} ret;
__atomic_pre_full_fence();
ret = ${atomic}_${pfx}${name}${sfx}_relaxed(${args});
ret = ${arch}${atomic}_${pfx}${name}${sfx}_relaxed(${args});
__atomic_post_full_fence();
return ret;
}

View File

@ -1,6 +1,6 @@
cat << EOF
/**
* ${atomic}_fetch_add_unless - add unless the number is already a given value
* ${arch}${atomic}_fetch_add_unless - add unless the number is already a given value
* @v: pointer of type ${atomic}_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
@ -8,15 +8,15 @@ cat << EOF
* Atomically adds @a to @v, so long as @v was not already @u.
* Returns original value of @v
*/
static inline ${int}
${atomic}_fetch_add_unless(${atomic}_t *v, ${int} a, ${int} u)
static __always_inline ${int}
${arch}${atomic}_fetch_add_unless(${atomic}_t *v, ${int} a, ${int} u)
{
${int} c = ${atomic}_read(v);
${int} c = ${arch}${atomic}_read(v);
do {
if (unlikely(c == u))
break;
} while (!${atomic}_try_cmpxchg(v, &c, c + a));
} while (!${arch}${atomic}_try_cmpxchg(v, &c, c + a));
return c;
}

View File

@ -1,7 +1,7 @@
cat <<EOF
static inline ${ret}
${atomic}_${pfx}inc${sfx}${order}(${atomic}_t *v)
static __always_inline ${ret}
${arch}${atomic}_${pfx}inc${sfx}${order}(${atomic}_t *v)
{
${retstmt}${atomic}_${pfx}add${sfx}${order}(1, v);
${retstmt}${arch}${atomic}_${pfx}add${sfx}${order}(1, v);
}
EOF

View File

@ -1,15 +1,15 @@
cat <<EOF
/**
* ${atomic}_inc_and_test - increment and test
* ${arch}${atomic}_inc_and_test - increment and test
* @v: pointer of type ${atomic}_t
*
* Atomically increments @v by 1
* and returns true if the result is zero, or false for all
* other cases.
*/
static inline bool
${atomic}_inc_and_test(${atomic}_t *v)
static __always_inline bool
${arch}${atomic}_inc_and_test(${atomic}_t *v)
{
return ${atomic}_inc_return(v) == 0;
return ${arch}${atomic}_inc_return(v) == 0;
}
EOF

View File

@ -1,14 +1,14 @@
cat <<EOF
/**
* ${atomic}_inc_not_zero - increment unless the number is zero
* ${arch}${atomic}_inc_not_zero - increment unless the number is zero
* @v: pointer of type ${atomic}_t
*
* Atomically increments @v by 1, if @v is non-zero.
* Returns true if the increment was done.
*/
static inline bool
${atomic}_inc_not_zero(${atomic}_t *v)
static __always_inline bool
${arch}${atomic}_inc_not_zero(${atomic}_t *v)
{
return ${atomic}_add_unless(v, 1, 0);
return ${arch}${atomic}_add_unless(v, 1, 0);
}
EOF

View File

@ -1,13 +1,13 @@
cat <<EOF
static inline bool
${atomic}_inc_unless_negative(${atomic}_t *v)
static __always_inline bool
${arch}${atomic}_inc_unless_negative(${atomic}_t *v)
{
${int} c = ${atomic}_read(v);
${int} c = ${arch}${atomic}_read(v);
do {
if (unlikely(c < 0))
return false;
} while (!${atomic}_try_cmpxchg(v, &c, c + 1));
} while (!${arch}${atomic}_try_cmpxchg(v, &c, c + 1));
return true;
}

View File

@ -1,6 +1,6 @@
cat <<EOF
static inline ${ret}
${atomic}_read_acquire(const ${atomic}_t *v)
static __always_inline ${ret}
${arch}${atomic}_read_acquire(const ${atomic}_t *v)
{
return smp_load_acquire(&(v)->counter);
}

View File

@ -1,8 +1,8 @@
cat <<EOF
static inline ${ret}
${atomic}_${pfx}${name}${sfx}_release(${params})
static __always_inline ${ret}
${arch}${atomic}_${pfx}${name}${sfx}_release(${params})
{
__atomic_release_fence();
${retstmt}${atomic}_${pfx}${name}${sfx}_relaxed(${args});
${retstmt}${arch}${atomic}_${pfx}${name}${sfx}_relaxed(${args});
}
EOF

View File

@ -1,6 +1,6 @@
cat <<EOF
static inline void
${atomic}_set_release(${atomic}_t *v, ${int} i)
static __always_inline void
${arch}${atomic}_set_release(${atomic}_t *v, ${int} i)
{
smp_store_release(&(v)->counter, i);
}

View File

@ -1,6 +1,6 @@
cat <<EOF
/**
* ${atomic}_sub_and_test - subtract value from variable and test result
* ${arch}${atomic}_sub_and_test - subtract value from variable and test result
* @i: integer value to subtract
* @v: pointer of type ${atomic}_t
*
@ -8,9 +8,9 @@ cat <<EOF
* true if the result is zero, or false for all
* other cases.
*/
static inline bool
${atomic}_sub_and_test(${int} i, ${atomic}_t *v)
static __always_inline bool
${arch}${atomic}_sub_and_test(${int} i, ${atomic}_t *v)
{
return ${atomic}_sub_return(i, v) == 0;
return ${arch}${atomic}_sub_return(i, v) == 0;
}
EOF

View File

@ -1,9 +1,9 @@
cat <<EOF
static inline bool
${atomic}_try_cmpxchg${order}(${atomic}_t *v, ${int} *old, ${int} new)
static __always_inline bool
${arch}${atomic}_try_cmpxchg${order}(${atomic}_t *v, ${int} *old, ${int} new)
{
${int} r, o = *old;
r = ${atomic}_cmpxchg${order}(v, o, new);
r = ${arch}${atomic}_cmpxchg${order}(v, o, new);
if (unlikely(r != o))
*old = r;
return likely(r == o);

View File

@ -2,10 +2,11 @@
# SPDX-License-Identifier: GPL-2.0
ATOMICDIR=$(dirname $0)
ARCH=$2
. ${ATOMICDIR}/atomic-tbl.sh
#gen_template_fallback(template, meta, pfx, name, sfx, order, atomic, int, args...)
#gen_template_fallback(template, meta, pfx, name, sfx, order, arch, atomic, int, args...)
gen_template_fallback()
{
local template="$1"; shift
@ -14,10 +15,11 @@ gen_template_fallback()
local name="$1"; shift
local sfx="$1"; shift
local order="$1"; shift
local arch="$1"; shift
local atomic="$1"; shift
local int="$1"; shift
local atomicname="${atomic}_${pfx}${name}${sfx}${order}"
local atomicname="${arch}${atomic}_${pfx}${name}${sfx}${order}"
local ret="$(gen_ret_type "${meta}" "${int}")"
local retstmt="$(gen_ret_stmt "${meta}")"
@ -32,7 +34,7 @@ gen_template_fallback()
fi
}
#gen_proto_fallback(meta, pfx, name, sfx, order, atomic, int, args...)
#gen_proto_fallback(meta, pfx, name, sfx, order, arch, atomic, int, args...)
gen_proto_fallback()
{
local meta="$1"; shift
@ -56,16 +58,17 @@ cat << EOF
EOF
}
#gen_proto_order_variants(meta, pfx, name, sfx, atomic, int, args...)
#gen_proto_order_variants(meta, pfx, name, sfx, arch, atomic, int, args...)
gen_proto_order_variants()
{
local meta="$1"; shift
local pfx="$1"; shift
local name="$1"; shift
local sfx="$1"; shift
local atomic="$1"
local arch="$1"
local atomic="$2"
local basename="${atomic}_${pfx}${name}${sfx}"
local basename="${arch}${atomic}_${pfx}${name}${sfx}"
local template="$(find_fallback_template "${pfx}" "${name}" "${sfx}" "${order}")"
@ -94,7 +97,7 @@ gen_proto_order_variants()
gen_basic_fallbacks "${basename}"
if [ ! -z "${template}" ]; then
printf "#endif /* ${atomic}_${pfx}${name}${sfx} */\n\n"
printf "#endif /* ${arch}${atomic}_${pfx}${name}${sfx} */\n\n"
gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "" "$@"
gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "_acquire" "$@"
gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "_release" "$@"
@ -149,20 +152,19 @@ cat << EOF
#ifndef _LINUX_ATOMIC_FALLBACK_H
#define _LINUX_ATOMIC_FALLBACK_H
#include <linux/compiler.h>
EOF
for xchg in "xchg" "cmpxchg" "cmpxchg64"; do
for xchg in "${ARCH}xchg" "${ARCH}cmpxchg" "${ARCH}cmpxchg64"; do
gen_xchg_fallbacks "${xchg}"
done
grep '^[a-z]' "$1" | while read name meta args; do
gen_proto "${meta}" "${name}" "atomic" "int" ${args}
gen_proto "${meta}" "${name}" "${ARCH}" "atomic" "int" ${args}
done
cat <<EOF
#define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
#define atomic_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))
#ifdef CONFIG_GENERIC_ATOMIC64
#include <asm-generic/atomic64.h>
#endif
@ -170,12 +172,9 @@ cat <<EOF
EOF
grep '^[a-z]' "$1" | while read name meta args; do
gen_proto "${meta}" "${name}" "atomic64" "s64" ${args}
gen_proto "${meta}" "${name}" "${ARCH}" "atomic64" "s64" ${args}
done
cat <<EOF
#define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
#define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))
#endif /* _LINUX_ATOMIC_FALLBACK_H */
EOF

View File

@ -10,10 +10,11 @@ LINUXDIR=${ATOMICDIR}/../..
cat <<EOF |
gen-atomic-instrumented.sh asm-generic/atomic-instrumented.h
gen-atomic-long.sh asm-generic/atomic-long.h
gen-atomic-fallback.sh linux/atomic-arch-fallback.h arch_
gen-atomic-fallback.sh linux/atomic-fallback.h
EOF
while read script header; do
/bin/sh ${ATOMICDIR}/${script} ${ATOMICTBL} > ${LINUXDIR}/include/${header}
while read script header args; do
/bin/sh ${ATOMICDIR}/${script} ${ATOMICTBL} ${args} > ${LINUXDIR}/include/${header}
HASH="$(sha1sum ${LINUXDIR}/include/${header})"
HASH="${HASH%% *}"
printf "// %s\n" "${HASH}" >> ${LINUXDIR}/include/${header}