locking/atomic: atomic: simplify ifdeffery

Now that asm-generic/atomic.h is only used by architectures without any
architecture-specific atomic definitions, we know that there will be no
architecture-specific implementations to override, and can remove the
ifdeffery this has previously required, bringing it into line with
asm-generic/atomic64.h.

At the same time, we can implement atomic_add() and atomic_sub()
directly using ATOMIC_OP(), since we know architectures won't provide
atomic_add_return() or atomic_sub_return().

There should be no functional change as a result of this patch.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20210525140232.53872-9-mark.rutland@arm.com
This commit is contained in:
Mark Rutland 2021-05-25 15:02:07 +01:00 committed by Peter Zijlstra
parent 89eb78d542
commit d0e03218ca

View file

@ -93,65 +93,27 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
#endif /* CONFIG_SMP */
#ifndef atomic_add_return
ATOMIC_OP_RETURN(add, +)
#endif
#ifndef atomic_sub_return
ATOMIC_OP_RETURN(sub, -)
#endif
#ifndef atomic_fetch_add
ATOMIC_FETCH_OP(add, +)
#endif
#ifndef atomic_fetch_sub
ATOMIC_FETCH_OP(sub, -)
#endif
#ifndef atomic_fetch_and
ATOMIC_FETCH_OP(and, &)
#endif
#ifndef atomic_fetch_or
ATOMIC_FETCH_OP(or, |)
#endif
#ifndef atomic_fetch_xor
ATOMIC_FETCH_OP(xor, ^)
#endif
#ifndef atomic_and
ATOMIC_OP(add, +)
ATOMIC_OP(sub, -)
ATOMIC_OP(and, &)
#endif
#ifndef atomic_or
ATOMIC_OP(or, |)
#endif
#ifndef atomic_xor
ATOMIC_OP(xor, ^)
#endif
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
#ifndef atomic_read
#define atomic_read(v) READ_ONCE((v)->counter)
#endif
#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
static inline void atomic_add(int i, atomic_t *v)
{
atomic_add_return(i, v);
}
static inline void atomic_sub(int i, atomic_t *v)
{
atomic_sub_return(i, v);
}
#define atomic_read(v) READ_ONCE((v)->counter)
#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v)))
#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))