linux-stable/arch/h8300/include/asm/atomic.h
Mark Rutland 9837559d8e atomics/treewide: Make unconditional inc/dec ops optional
Many of the inc/dec ops are mandatory, but for most architectures inc/dec are
simply trivial wrappers around their corresponding add/sub ops.

Let's make all the inc/dec ops optional, so that we can get rid of these
boilerplate wrappers.

The instrumented atomics are updated accordingly.

There should be no functional change as a result of this patch.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Will Deacon <will.deacon@arm.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Palmer Dabbelt <palmer@sifive.com>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: https://lore.kernel.org/lkml/20180621121321.4761-17-mark.rutland@arm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2018-06-21 14:25:24 +02:00

99 lines
2.1 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ARCH_H8300_ATOMIC__
#define __ARCH_H8300_ATOMIC__
#include <linux/compiler.h>
#include <linux/types.h>
#include <asm/cmpxchg.h>
#include <asm/irqflags.h>
/*
* Atomic operations that C can't guarantee us. Useful for
* resource counting etc..
*/
#define ATOMIC_INIT(i) { (i) }
#define atomic_read(v) READ_ONCE((v)->counter)
#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
#define ATOMIC_OP_RETURN(op, c_op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \
{ \
h8300flags flags; \
int ret; \
\
flags = arch_local_irq_save(); \
ret = v->counter c_op i; \
arch_local_irq_restore(flags); \
return ret; \
}
#define ATOMIC_FETCH_OP(op, c_op) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \
{ \
h8300flags flags; \
int ret; \
\
flags = arch_local_irq_save(); \
ret = v->counter; \
v->counter c_op i; \
arch_local_irq_restore(flags); \
return ret; \
}
#define ATOMIC_OP(op, c_op) \
static inline void atomic_##op(int i, atomic_t *v) \
{ \
h8300flags flags; \
\
flags = arch_local_irq_save(); \
v->counter c_op i; \
arch_local_irq_restore(flags); \
}
ATOMIC_OP_RETURN(add, +=)
ATOMIC_OP_RETURN(sub, -=)
#define ATOMIC_OPS(op, c_op) \
ATOMIC_OP(op, c_op) \
ATOMIC_FETCH_OP(op, c_op)
ATOMIC_OPS(and, &=)
ATOMIC_OPS(or, |=)
ATOMIC_OPS(xor, ^=)
ATOMIC_OPS(add, +=)
ATOMIC_OPS(sub, -=)
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
int ret;
h8300flags flags;
flags = arch_local_irq_save();
ret = v->counter;
if (likely(ret == old))
v->counter = new;
arch_local_irq_restore(flags);
return ret;
}
static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int ret;
h8300flags flags;
flags = arch_local_irq_save();
ret = v->counter;
if (ret != u)
v->counter += a;
arch_local_irq_restore(flags);
return ret;
}
#define atomic_fetch_add_unless atomic_fetch_add_unless
#endif /* __ARCH_H8300_ATOMIC __ */