mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-30 08:02:30 +00:00
6d2779ecae
Since commit:9257959a6e
("locking/atomic: scripts: restructure fallback ifdeffery") The ordering fallbacks for atomic*_read_acquire() and atomic*_set_release() erroneously fall back to the implictly relaxed atomic*_read() and atomic*_set() variants respectively, without any additional barriers. This loses the ACQUIRE and RELEASE ordering semantics, which can result in a wide variety of problems, even on strongly-ordered architectures where the implementation of atomic*_read() and/or atomic*_set() allows the compiler to reorder those relative to other accesses. In practice this has been observed to break bit spinlocks on arm64, resulting in dentry cache corruption. The fallback logic was intended to allow ACQUIRE/RELEASE/RELAXED ops to be defined in terms of FULL ops, but where an op had RELAXED ordering by default, this unintentionally permitted the ACQUIRE/RELEASE ops to be defined in terms of the implicitly RELAXED default. This patch corrects the logic to avoid falling back to implicitly RELAXED ops, resulting in the same behaviour as prior to commit9257959a6e
. I've verified the resulting assembly on arm64 by generating outlined wrappers of the atomics. Prior to this patch the compiler generates sequences using relaxed load (LDR) and store (STR) instructions, e.g. | <outlined_atomic64_read_acquire>: | ldr x0, [x0] | ret | | <outlined_atomic64_set_release>: | str x1, [x0] | ret With this patch applied the compiler generates sequences using the intended load-acquire (LDAR) and store-release (STLR) instructions, e.g. | <outlined_atomic64_read_acquire>: | ldar x0, [x0] | ret | | <outlined_atomic64_set_release>: | stlr x1, [x0] | ret To make sure that there were no other victims of the ifdeffery rewrite, I generated outlined copies of all of the {atomic,atomic64,atomic_long} atomic operations before and after commit9257959a6e
. A diff of the generated assembly on arm64 shows that only the read_acquire() and set_release() operations were changed, and only lost their intended ordering: | [mark@lakrids:~/src/linux]% diff -u \ | <(aarch64-linux-gnu-objdump -d before-9257959a6e5b4fca.o) | <(aarch64-linux-gnu-objdump -d after-9257959a6e5b4fca.o) | --- /proc/self/fd/11 2023-09-19 16:51:51.114779415 +0100 | +++ /proc/self/fd/16 2023-09-19 16:51:51.114779415 +0100 | @@ -1,5 +1,5 @@ | | -before-9257959a6e5b4fca.o: file format elf64-littleaarch64 | +after-9257959a6e5b4fca.o: file format elf64-littleaarch64 | | | Disassembly of section .text: | @@ -9,7 +9,7 @@ | 4: d65f03c0 ret | | 0000000000000008 <outlined_atomic_read_acquire>: | - 8: 88dffc00 ldar w0, [x0] | + 8: b9400000 ldr w0, [x0] | c: d65f03c0 ret | | 0000000000000010 <outlined_atomic_set>: | @@ -17,7 +17,7 @@ | 14: d65f03c0 ret | | 0000000000000018 <outlined_atomic_set_release>: | - 18: 889ffc01 stlr w1, [x0] | + 18: b9000001 str w1, [x0] | 1c: d65f03c0 ret | | 0000000000000020 <outlined_atomic_add>: | @@ -1230,7 +1230,7 @@ | 1070: d65f03c0 ret | | 0000000000001074 <outlined_atomic64_read_acquire>: | - 1074: c8dffc00 ldar x0, [x0] | + 1074: f9400000 ldr x0, [x0] | 1078: d65f03c0 ret | | 000000000000107c <outlined_atomic64_set>: | @@ -1238,7 +1238,7 @@ | 1080: d65f03c0 ret | | 0000000000001084 <outlined_atomic64_set_release>: | - 1084: c89ffc01 stlr x1, [x0] | + 1084: f9000001 str x1, [x0] | 1088: d65f03c0 ret | | 000000000000108c <outlined_atomic64_add>: | @@ -2427,7 +2427,7 @@ | 207c: d65f03c0 ret | | 0000000000002080 <outlined_atomic_long_read_acquire>: | - 2080: c8dffc00 ldar x0, [x0] | + 2080: f9400000 ldr x0, [x0] | 2084: d65f03c0 ret | | 0000000000002088 <outlined_atomic_long_set>: | @@ -2435,7 +2435,7 @@ | 208c: d65f03c0 ret | | 0000000000002090 <outlined_atomic_long_set_release>: | - 2090: c89ffc01 stlr x1, [x0] | + 2090: f9000001 str x1, [x0] | 2094: d65f03c0 ret | | 0000000000002098 <outlined_atomic_long_add>: I've build tested this with a variety of configs for alpha, arm, arm64, csky, i386, m68k, microblaze, mips, nios2, openrisc, powerpc, riscv, s390, sh, sparc, x86_64, and xtensa, for which I've seen no issues. I was unable to build test for ia64 and parisc due to existing build breakage in v6.6-rc2. Fixes:9257959a6e
("locking/atomic: scripts: restructure fallback ifdeffery") Reported-by: Ming Lei <ming.lei@redhat.com> Reported-by: Darrick J. Wong <djwong@kernel.org> Signed-off-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: Baokun Li <libaokun1@huawei.com> Link: https://lkml.kernel.org/r/20230919171430.2697727-1-mark.rutland@arm.com
4652 lines
124 KiB
C
4652 lines
124 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
// Generated by scripts/atomic/gen-atomic-fallback.sh
|
|
// DO NOT MODIFY THIS FILE DIRECTLY
|
|
|
|
#ifndef _LINUX_ATOMIC_FALLBACK_H
|
|
#define _LINUX_ATOMIC_FALLBACK_H
|
|
|
|
#include <linux/compiler.h>
|
|
|
|
#if defined(arch_xchg)
|
|
#define raw_xchg arch_xchg
|
|
#elif defined(arch_xchg_relaxed)
|
|
#define raw_xchg(...) \
|
|
__atomic_op_fence(arch_xchg, __VA_ARGS__)
|
|
#else
|
|
extern void raw_xchg_not_implemented(void);
|
|
#define raw_xchg(...) raw_xchg_not_implemented()
|
|
#endif
|
|
|
|
#if defined(arch_xchg_acquire)
|
|
#define raw_xchg_acquire arch_xchg_acquire
|
|
#elif defined(arch_xchg_relaxed)
|
|
#define raw_xchg_acquire(...) \
|
|
__atomic_op_acquire(arch_xchg, __VA_ARGS__)
|
|
#elif defined(arch_xchg)
|
|
#define raw_xchg_acquire arch_xchg
|
|
#else
|
|
extern void raw_xchg_acquire_not_implemented(void);
|
|
#define raw_xchg_acquire(...) raw_xchg_acquire_not_implemented()
|
|
#endif
|
|
|
|
#if defined(arch_xchg_release)
|
|
#define raw_xchg_release arch_xchg_release
|
|
#elif defined(arch_xchg_relaxed)
|
|
#define raw_xchg_release(...) \
|
|
__atomic_op_release(arch_xchg, __VA_ARGS__)
|
|
#elif defined(arch_xchg)
|
|
#define raw_xchg_release arch_xchg
|
|
#else
|
|
extern void raw_xchg_release_not_implemented(void);
|
|
#define raw_xchg_release(...) raw_xchg_release_not_implemented()
|
|
#endif
|
|
|
|
#if defined(arch_xchg_relaxed)
|
|
#define raw_xchg_relaxed arch_xchg_relaxed
|
|
#elif defined(arch_xchg)
|
|
#define raw_xchg_relaxed arch_xchg
|
|
#else
|
|
extern void raw_xchg_relaxed_not_implemented(void);
|
|
#define raw_xchg_relaxed(...) raw_xchg_relaxed_not_implemented()
|
|
#endif
|
|
|
|
#if defined(arch_cmpxchg)
|
|
#define raw_cmpxchg arch_cmpxchg
|
|
#elif defined(arch_cmpxchg_relaxed)
|
|
#define raw_cmpxchg(...) \
|
|
__atomic_op_fence(arch_cmpxchg, __VA_ARGS__)
|
|
#else
|
|
extern void raw_cmpxchg_not_implemented(void);
|
|
#define raw_cmpxchg(...) raw_cmpxchg_not_implemented()
|
|
#endif
|
|
|
|
#if defined(arch_cmpxchg_acquire)
|
|
#define raw_cmpxchg_acquire arch_cmpxchg_acquire
|
|
#elif defined(arch_cmpxchg_relaxed)
|
|
#define raw_cmpxchg_acquire(...) \
|
|
__atomic_op_acquire(arch_cmpxchg, __VA_ARGS__)
|
|
#elif defined(arch_cmpxchg)
|
|
#define raw_cmpxchg_acquire arch_cmpxchg
|
|
#else
|
|
extern void raw_cmpxchg_acquire_not_implemented(void);
|
|
#define raw_cmpxchg_acquire(...) raw_cmpxchg_acquire_not_implemented()
|
|
#endif
|
|
|
|
#if defined(arch_cmpxchg_release)
|
|
#define raw_cmpxchg_release arch_cmpxchg_release
|
|
#elif defined(arch_cmpxchg_relaxed)
|
|
#define raw_cmpxchg_release(...) \
|
|
__atomic_op_release(arch_cmpxchg, __VA_ARGS__)
|
|
#elif defined(arch_cmpxchg)
|
|
#define raw_cmpxchg_release arch_cmpxchg
|
|
#else
|
|
extern void raw_cmpxchg_release_not_implemented(void);
|
|
#define raw_cmpxchg_release(...) raw_cmpxchg_release_not_implemented()
|
|
#endif
|
|
|
|
#if defined(arch_cmpxchg_relaxed)
|
|
#define raw_cmpxchg_relaxed arch_cmpxchg_relaxed
|
|
#elif defined(arch_cmpxchg)
|
|
#define raw_cmpxchg_relaxed arch_cmpxchg
|
|
#else
|
|
extern void raw_cmpxchg_relaxed_not_implemented(void);
|
|
#define raw_cmpxchg_relaxed(...) raw_cmpxchg_relaxed_not_implemented()
|
|
#endif
|
|
|
|
#if defined(arch_cmpxchg64)
|
|
#define raw_cmpxchg64 arch_cmpxchg64
|
|
#elif defined(arch_cmpxchg64_relaxed)
|
|
#define raw_cmpxchg64(...) \
|
|
__atomic_op_fence(arch_cmpxchg64, __VA_ARGS__)
|
|
#else
|
|
extern void raw_cmpxchg64_not_implemented(void);
|
|
#define raw_cmpxchg64(...) raw_cmpxchg64_not_implemented()
|
|
#endif
|
|
|
|
#if defined(arch_cmpxchg64_acquire)
|
|
#define raw_cmpxchg64_acquire arch_cmpxchg64_acquire
|
|
#elif defined(arch_cmpxchg64_relaxed)
|
|
#define raw_cmpxchg64_acquire(...) \
|
|
__atomic_op_acquire(arch_cmpxchg64, __VA_ARGS__)
|
|
#elif defined(arch_cmpxchg64)
|
|
#define raw_cmpxchg64_acquire arch_cmpxchg64
|
|
#else
|
|
extern void raw_cmpxchg64_acquire_not_implemented(void);
|
|
#define raw_cmpxchg64_acquire(...) raw_cmpxchg64_acquire_not_implemented()
|
|
#endif
|
|
|
|
#if defined(arch_cmpxchg64_release)
|
|
#define raw_cmpxchg64_release arch_cmpxchg64_release
|
|
#elif defined(arch_cmpxchg64_relaxed)
|
|
#define raw_cmpxchg64_release(...) \
|
|
__atomic_op_release(arch_cmpxchg64, __VA_ARGS__)
|
|
#elif defined(arch_cmpxchg64)
|
|
#define raw_cmpxchg64_release arch_cmpxchg64
|
|
#else
|
|
extern void raw_cmpxchg64_release_not_implemented(void);
|
|
#define raw_cmpxchg64_release(...) raw_cmpxchg64_release_not_implemented()
|
|
#endif
|
|
|
|
#if defined(arch_cmpxchg64_relaxed)
|
|
#define raw_cmpxchg64_relaxed arch_cmpxchg64_relaxed
|
|
#elif defined(arch_cmpxchg64)
|
|
#define raw_cmpxchg64_relaxed arch_cmpxchg64
|
|
#else
|
|
extern void raw_cmpxchg64_relaxed_not_implemented(void);
|
|
#define raw_cmpxchg64_relaxed(...) raw_cmpxchg64_relaxed_not_implemented()
|
|
#endif
|
|
|
|
#if defined(arch_cmpxchg128)
|
|
#define raw_cmpxchg128 arch_cmpxchg128
|
|
#elif defined(arch_cmpxchg128_relaxed)
|
|
#define raw_cmpxchg128(...) \
|
|
__atomic_op_fence(arch_cmpxchg128, __VA_ARGS__)
|
|
#else
|
|
extern void raw_cmpxchg128_not_implemented(void);
|
|
#define raw_cmpxchg128(...) raw_cmpxchg128_not_implemented()
|
|
#endif
|
|
|
|
#if defined(arch_cmpxchg128_acquire)
|
|
#define raw_cmpxchg128_acquire arch_cmpxchg128_acquire
|
|
#elif defined(arch_cmpxchg128_relaxed)
|
|
#define raw_cmpxchg128_acquire(...) \
|
|
__atomic_op_acquire(arch_cmpxchg128, __VA_ARGS__)
|
|
#elif defined(arch_cmpxchg128)
|
|
#define raw_cmpxchg128_acquire arch_cmpxchg128
|
|
#else
|
|
extern void raw_cmpxchg128_acquire_not_implemented(void);
|
|
#define raw_cmpxchg128_acquire(...) raw_cmpxchg128_acquire_not_implemented()
|
|
#endif
|
|
|
|
#if defined(arch_cmpxchg128_release)
|
|
#define raw_cmpxchg128_release arch_cmpxchg128_release
|
|
#elif defined(arch_cmpxchg128_relaxed)
|
|
#define raw_cmpxchg128_release(...) \
|
|
__atomic_op_release(arch_cmpxchg128, __VA_ARGS__)
|
|
#elif defined(arch_cmpxchg128)
|
|
#define raw_cmpxchg128_release arch_cmpxchg128
|
|
#else
|
|
extern void raw_cmpxchg128_release_not_implemented(void);
|
|
#define raw_cmpxchg128_release(...) raw_cmpxchg128_release_not_implemented()
|
|
#endif
|
|
|
|
#if defined(arch_cmpxchg128_relaxed)
|
|
#define raw_cmpxchg128_relaxed arch_cmpxchg128_relaxed
|
|
#elif defined(arch_cmpxchg128)
|
|
#define raw_cmpxchg128_relaxed arch_cmpxchg128
|
|
#else
|
|
extern void raw_cmpxchg128_relaxed_not_implemented(void);
|
|
#define raw_cmpxchg128_relaxed(...) raw_cmpxchg128_relaxed_not_implemented()
|
|
#endif
|
|
|
|
#if defined(arch_try_cmpxchg)
|
|
#define raw_try_cmpxchg arch_try_cmpxchg
|
|
#elif defined(arch_try_cmpxchg_relaxed)
|
|
#define raw_try_cmpxchg(...) \
|
|
__atomic_op_fence(arch_try_cmpxchg, __VA_ARGS__)
|
|
#else
|
|
#define raw_try_cmpxchg(_ptr, _oldp, _new) \
|
|
({ \
|
|
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
|
|
___r = raw_cmpxchg((_ptr), ___o, (_new)); \
|
|
if (unlikely(___r != ___o)) \
|
|
*___op = ___r; \
|
|
likely(___r == ___o); \
|
|
})
|
|
#endif
|
|
|
|
#if defined(arch_try_cmpxchg_acquire)
|
|
#define raw_try_cmpxchg_acquire arch_try_cmpxchg_acquire
|
|
#elif defined(arch_try_cmpxchg_relaxed)
|
|
#define raw_try_cmpxchg_acquire(...) \
|
|
__atomic_op_acquire(arch_try_cmpxchg, __VA_ARGS__)
|
|
#elif defined(arch_try_cmpxchg)
|
|
#define raw_try_cmpxchg_acquire arch_try_cmpxchg
|
|
#else
|
|
#define raw_try_cmpxchg_acquire(_ptr, _oldp, _new) \
|
|
({ \
|
|
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
|
|
___r = raw_cmpxchg_acquire((_ptr), ___o, (_new)); \
|
|
if (unlikely(___r != ___o)) \
|
|
*___op = ___r; \
|
|
likely(___r == ___o); \
|
|
})
|
|
#endif
|
|
|
|
#if defined(arch_try_cmpxchg_release)
|
|
#define raw_try_cmpxchg_release arch_try_cmpxchg_release
|
|
#elif defined(arch_try_cmpxchg_relaxed)
|
|
#define raw_try_cmpxchg_release(...) \
|
|
__atomic_op_release(arch_try_cmpxchg, __VA_ARGS__)
|
|
#elif defined(arch_try_cmpxchg)
|
|
#define raw_try_cmpxchg_release arch_try_cmpxchg
|
|
#else
|
|
#define raw_try_cmpxchg_release(_ptr, _oldp, _new) \
|
|
({ \
|
|
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
|
|
___r = raw_cmpxchg_release((_ptr), ___o, (_new)); \
|
|
if (unlikely(___r != ___o)) \
|
|
*___op = ___r; \
|
|
likely(___r == ___o); \
|
|
})
|
|
#endif
|
|
|
|
#if defined(arch_try_cmpxchg_relaxed)
|
|
#define raw_try_cmpxchg_relaxed arch_try_cmpxchg_relaxed
|
|
#elif defined(arch_try_cmpxchg)
|
|
#define raw_try_cmpxchg_relaxed arch_try_cmpxchg
|
|
#else
|
|
#define raw_try_cmpxchg_relaxed(_ptr, _oldp, _new) \
|
|
({ \
|
|
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
|
|
___r = raw_cmpxchg_relaxed((_ptr), ___o, (_new)); \
|
|
if (unlikely(___r != ___o)) \
|
|
*___op = ___r; \
|
|
likely(___r == ___o); \
|
|
})
|
|
#endif
|
|
|
|
#if defined(arch_try_cmpxchg64)
|
|
#define raw_try_cmpxchg64 arch_try_cmpxchg64
|
|
#elif defined(arch_try_cmpxchg64_relaxed)
|
|
#define raw_try_cmpxchg64(...) \
|
|
__atomic_op_fence(arch_try_cmpxchg64, __VA_ARGS__)
|
|
#else
|
|
#define raw_try_cmpxchg64(_ptr, _oldp, _new) \
|
|
({ \
|
|
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
|
|
___r = raw_cmpxchg64((_ptr), ___o, (_new)); \
|
|
if (unlikely(___r != ___o)) \
|
|
*___op = ___r; \
|
|
likely(___r == ___o); \
|
|
})
|
|
#endif
|
|
|
|
#if defined(arch_try_cmpxchg64_acquire)
|
|
#define raw_try_cmpxchg64_acquire arch_try_cmpxchg64_acquire
|
|
#elif defined(arch_try_cmpxchg64_relaxed)
|
|
#define raw_try_cmpxchg64_acquire(...) \
|
|
__atomic_op_acquire(arch_try_cmpxchg64, __VA_ARGS__)
|
|
#elif defined(arch_try_cmpxchg64)
|
|
#define raw_try_cmpxchg64_acquire arch_try_cmpxchg64
|
|
#else
|
|
#define raw_try_cmpxchg64_acquire(_ptr, _oldp, _new) \
|
|
({ \
|
|
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
|
|
___r = raw_cmpxchg64_acquire((_ptr), ___o, (_new)); \
|
|
if (unlikely(___r != ___o)) \
|
|
*___op = ___r; \
|
|
likely(___r == ___o); \
|
|
})
|
|
#endif
|
|
|
|
#if defined(arch_try_cmpxchg64_release)
|
|
#define raw_try_cmpxchg64_release arch_try_cmpxchg64_release
|
|
#elif defined(arch_try_cmpxchg64_relaxed)
|
|
#define raw_try_cmpxchg64_release(...) \
|
|
__atomic_op_release(arch_try_cmpxchg64, __VA_ARGS__)
|
|
#elif defined(arch_try_cmpxchg64)
|
|
#define raw_try_cmpxchg64_release arch_try_cmpxchg64
|
|
#else
|
|
#define raw_try_cmpxchg64_release(_ptr, _oldp, _new) \
|
|
({ \
|
|
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
|
|
___r = raw_cmpxchg64_release((_ptr), ___o, (_new)); \
|
|
if (unlikely(___r != ___o)) \
|
|
*___op = ___r; \
|
|
likely(___r == ___o); \
|
|
})
|
|
#endif
|
|
|
|
#if defined(arch_try_cmpxchg64_relaxed)
|
|
#define raw_try_cmpxchg64_relaxed arch_try_cmpxchg64_relaxed
|
|
#elif defined(arch_try_cmpxchg64)
|
|
#define raw_try_cmpxchg64_relaxed arch_try_cmpxchg64
|
|
#else
|
|
#define raw_try_cmpxchg64_relaxed(_ptr, _oldp, _new) \
|
|
({ \
|
|
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
|
|
___r = raw_cmpxchg64_relaxed((_ptr), ___o, (_new)); \
|
|
if (unlikely(___r != ___o)) \
|
|
*___op = ___r; \
|
|
likely(___r == ___o); \
|
|
})
|
|
#endif
|
|
|
|
#if defined(arch_try_cmpxchg128)
|
|
#define raw_try_cmpxchg128 arch_try_cmpxchg128
|
|
#elif defined(arch_try_cmpxchg128_relaxed)
|
|
#define raw_try_cmpxchg128(...) \
|
|
__atomic_op_fence(arch_try_cmpxchg128, __VA_ARGS__)
|
|
#else
|
|
#define raw_try_cmpxchg128(_ptr, _oldp, _new) \
|
|
({ \
|
|
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
|
|
___r = raw_cmpxchg128((_ptr), ___o, (_new)); \
|
|
if (unlikely(___r != ___o)) \
|
|
*___op = ___r; \
|
|
likely(___r == ___o); \
|
|
})
|
|
#endif
|
|
|
|
#if defined(arch_try_cmpxchg128_acquire)
|
|
#define raw_try_cmpxchg128_acquire arch_try_cmpxchg128_acquire
|
|
#elif defined(arch_try_cmpxchg128_relaxed)
|
|
#define raw_try_cmpxchg128_acquire(...) \
|
|
__atomic_op_acquire(arch_try_cmpxchg128, __VA_ARGS__)
|
|
#elif defined(arch_try_cmpxchg128)
|
|
#define raw_try_cmpxchg128_acquire arch_try_cmpxchg128
|
|
#else
|
|
#define raw_try_cmpxchg128_acquire(_ptr, _oldp, _new) \
|
|
({ \
|
|
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
|
|
___r = raw_cmpxchg128_acquire((_ptr), ___o, (_new)); \
|
|
if (unlikely(___r != ___o)) \
|
|
*___op = ___r; \
|
|
likely(___r == ___o); \
|
|
})
|
|
#endif
|
|
|
|
#if defined(arch_try_cmpxchg128_release)
|
|
#define raw_try_cmpxchg128_release arch_try_cmpxchg128_release
|
|
#elif defined(arch_try_cmpxchg128_relaxed)
|
|
#define raw_try_cmpxchg128_release(...) \
|
|
__atomic_op_release(arch_try_cmpxchg128, __VA_ARGS__)
|
|
#elif defined(arch_try_cmpxchg128)
|
|
#define raw_try_cmpxchg128_release arch_try_cmpxchg128
|
|
#else
|
|
#define raw_try_cmpxchg128_release(_ptr, _oldp, _new) \
|
|
({ \
|
|
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
|
|
___r = raw_cmpxchg128_release((_ptr), ___o, (_new)); \
|
|
if (unlikely(___r != ___o)) \
|
|
*___op = ___r; \
|
|
likely(___r == ___o); \
|
|
})
|
|
#endif
|
|
|
|
#if defined(arch_try_cmpxchg128_relaxed)
|
|
#define raw_try_cmpxchg128_relaxed arch_try_cmpxchg128_relaxed
|
|
#elif defined(arch_try_cmpxchg128)
|
|
#define raw_try_cmpxchg128_relaxed arch_try_cmpxchg128
|
|
#else
|
|
#define raw_try_cmpxchg128_relaxed(_ptr, _oldp, _new) \
|
|
({ \
|
|
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
|
|
___r = raw_cmpxchg128_relaxed((_ptr), ___o, (_new)); \
|
|
if (unlikely(___r != ___o)) \
|
|
*___op = ___r; \
|
|
likely(___r == ___o); \
|
|
})
|
|
#endif
|
|
|
|
#define raw_cmpxchg_local arch_cmpxchg_local
|
|
|
|
#ifdef arch_try_cmpxchg_local
|
|
#define raw_try_cmpxchg_local arch_try_cmpxchg_local
|
|
#else
|
|
#define raw_try_cmpxchg_local(_ptr, _oldp, _new) \
|
|
({ \
|
|
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
|
|
___r = raw_cmpxchg_local((_ptr), ___o, (_new)); \
|
|
if (unlikely(___r != ___o)) \
|
|
*___op = ___r; \
|
|
likely(___r == ___o); \
|
|
})
|
|
#endif
|
|
|
|
#define raw_cmpxchg64_local arch_cmpxchg64_local
|
|
|
|
#ifdef arch_try_cmpxchg64_local
|
|
#define raw_try_cmpxchg64_local arch_try_cmpxchg64_local
|
|
#else
|
|
#define raw_try_cmpxchg64_local(_ptr, _oldp, _new) \
|
|
({ \
|
|
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
|
|
___r = raw_cmpxchg64_local((_ptr), ___o, (_new)); \
|
|
if (unlikely(___r != ___o)) \
|
|
*___op = ___r; \
|
|
likely(___r == ___o); \
|
|
})
|
|
#endif
|
|
|
|
#define raw_cmpxchg128_local arch_cmpxchg128_local
|
|
|
|
#ifdef arch_try_cmpxchg128_local
|
|
#define raw_try_cmpxchg128_local arch_try_cmpxchg128_local
|
|
#else
|
|
#define raw_try_cmpxchg128_local(_ptr, _oldp, _new) \
|
|
({ \
|
|
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
|
|
___r = raw_cmpxchg128_local((_ptr), ___o, (_new)); \
|
|
if (unlikely(___r != ___o)) \
|
|
*___op = ___r; \
|
|
likely(___r == ___o); \
|
|
})
|
|
#endif
|
|
|
|
#define raw_sync_cmpxchg arch_sync_cmpxchg
|
|
|
|
/**
|
|
* raw_atomic_read() - atomic load with relaxed ordering
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically loads the value of @v with relaxed ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_read() elsewhere.
|
|
*
|
|
* Return: The value loaded from @v.
|
|
*/
|
|
static __always_inline int
|
|
raw_atomic_read(const atomic_t *v)
|
|
{
|
|
return arch_atomic_read(v);
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_read_acquire() - atomic load with acquire ordering
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically loads the value of @v with acquire ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_read_acquire() elsewhere.
|
|
*
|
|
* Return: The value loaded from @v.
|
|
*/
|
|
static __always_inline int
|
|
raw_atomic_read_acquire(const atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_read_acquire)
|
|
return arch_atomic_read_acquire(v);
|
|
#else
|
|
int ret;
|
|
|
|
if (__native_word(atomic_t)) {
|
|
ret = smp_load_acquire(&(v)->counter);
|
|
} else {
|
|
ret = raw_atomic_read(v);
|
|
__atomic_acquire_fence();
|
|
}
|
|
|
|
return ret;
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_set() - atomic set with relaxed ordering
|
|
* @v: pointer to atomic_t
|
|
* @i: int value to assign
|
|
*
|
|
* Atomically sets @v to @i with relaxed ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_set() elsewhere.
|
|
*
|
|
* Return: Nothing.
|
|
*/
|
|
static __always_inline void
|
|
raw_atomic_set(atomic_t *v, int i)
|
|
{
|
|
arch_atomic_set(v, i);
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_set_release() - atomic set with release ordering
|
|
* @v: pointer to atomic_t
|
|
* @i: int value to assign
|
|
*
|
|
* Atomically sets @v to @i with release ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_set_release() elsewhere.
|
|
*
|
|
* Return: Nothing.
|
|
*/
|
|
static __always_inline void
|
|
raw_atomic_set_release(atomic_t *v, int i)
|
|
{
|
|
#if defined(arch_atomic_set_release)
|
|
arch_atomic_set_release(v, i);
|
|
#else
|
|
if (__native_word(atomic_t)) {
|
|
smp_store_release(&(v)->counter, i);
|
|
} else {
|
|
__atomic_release_fence();
|
|
raw_atomic_set(v, i);
|
|
}
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_add() - atomic add with relaxed ordering
|
|
* @i: int value to add
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v + @i) with relaxed ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_add() elsewhere.
|
|
*
|
|
* Return: Nothing.
|
|
*/
|
|
static __always_inline void
|
|
raw_atomic_add(int i, atomic_t *v)
|
|
{
|
|
arch_atomic_add(i, v);
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_add_return() - atomic add with full ordering
|
|
* @i: int value to add
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v + @i) with full ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_add_return() elsewhere.
|
|
*
|
|
* Return: The updated value of @v.
|
|
*/
|
|
static __always_inline int
|
|
raw_atomic_add_return(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_add_return)
|
|
return arch_atomic_add_return(i, v);
|
|
#elif defined(arch_atomic_add_return_relaxed)
|
|
int ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic_add_return_relaxed(i, v);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
#error "Unable to define raw_atomic_add_return"
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_add_return_acquire() - atomic add with acquire ordering
|
|
* @i: int value to add
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v + @i) with acquire ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_add_return_acquire() elsewhere.
|
|
*
|
|
* Return: The updated value of @v.
|
|
*/
|
|
static __always_inline int
|
|
raw_atomic_add_return_acquire(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_add_return_acquire)
|
|
return arch_atomic_add_return_acquire(i, v);
|
|
#elif defined(arch_atomic_add_return_relaxed)
|
|
int ret = arch_atomic_add_return_relaxed(i, v);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic_add_return)
|
|
return arch_atomic_add_return(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic_add_return_acquire"
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_add_return_release() - atomic add with release ordering
|
|
* @i: int value to add
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v + @i) with release ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_add_return_release() elsewhere.
|
|
*
|
|
* Return: The updated value of @v.
|
|
*/
|
|
static __always_inline int
|
|
raw_atomic_add_return_release(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_add_return_release)
|
|
return arch_atomic_add_return_release(i, v);
|
|
#elif defined(arch_atomic_add_return_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic_add_return_relaxed(i, v);
|
|
#elif defined(arch_atomic_add_return)
|
|
return arch_atomic_add_return(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic_add_return_release"
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_add_return_relaxed() - atomic add with relaxed ordering
|
|
* @i: int value to add
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v + @i) with relaxed ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_add_return_relaxed() elsewhere.
|
|
*
|
|
* Return: The updated value of @v.
|
|
*/
|
|
static __always_inline int
|
|
raw_atomic_add_return_relaxed(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_add_return_relaxed)
|
|
return arch_atomic_add_return_relaxed(i, v);
|
|
#elif defined(arch_atomic_add_return)
|
|
return arch_atomic_add_return(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic_add_return_relaxed"
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_fetch_add() - atomic add with full ordering
|
|
* @i: int value to add
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v + @i) with full ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_fetch_add() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline int
|
|
raw_atomic_fetch_add(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_add)
|
|
return arch_atomic_fetch_add(i, v);
|
|
#elif defined(arch_atomic_fetch_add_relaxed)
|
|
int ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic_fetch_add_relaxed(i, v);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
#error "Unable to define raw_atomic_fetch_add"
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_fetch_add_acquire() - atomic add with acquire ordering
|
|
* @i: int value to add
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v + @i) with acquire ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_fetch_add_acquire() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline int
|
|
raw_atomic_fetch_add_acquire(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_add_acquire)
|
|
return arch_atomic_fetch_add_acquire(i, v);
|
|
#elif defined(arch_atomic_fetch_add_relaxed)
|
|
int ret = arch_atomic_fetch_add_relaxed(i, v);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic_fetch_add)
|
|
return arch_atomic_fetch_add(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic_fetch_add_acquire"
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_fetch_add_release() - atomic add with release ordering
|
|
* @i: int value to add
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v + @i) with release ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_fetch_add_release() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline int
|
|
raw_atomic_fetch_add_release(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_add_release)
|
|
return arch_atomic_fetch_add_release(i, v);
|
|
#elif defined(arch_atomic_fetch_add_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic_fetch_add_relaxed(i, v);
|
|
#elif defined(arch_atomic_fetch_add)
|
|
return arch_atomic_fetch_add(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic_fetch_add_release"
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_fetch_add_relaxed() - atomic add with relaxed ordering
|
|
* @i: int value to add
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v + @i) with relaxed ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_fetch_add_relaxed() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline int
|
|
raw_atomic_fetch_add_relaxed(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_add_relaxed)
|
|
return arch_atomic_fetch_add_relaxed(i, v);
|
|
#elif defined(arch_atomic_fetch_add)
|
|
return arch_atomic_fetch_add(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic_fetch_add_relaxed"
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_sub() - atomic subtract with relaxed ordering
|
|
* @i: int value to subtract
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v - @i) with relaxed ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_sub() elsewhere.
|
|
*
|
|
* Return: Nothing.
|
|
*/
|
|
static __always_inline void
|
|
raw_atomic_sub(int i, atomic_t *v)
|
|
{
|
|
arch_atomic_sub(i, v);
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_sub_return() - atomic subtract with full ordering
|
|
* @i: int value to subtract
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v - @i) with full ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_sub_return() elsewhere.
|
|
*
|
|
* Return: The updated value of @v.
|
|
*/
|
|
static __always_inline int
|
|
raw_atomic_sub_return(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_sub_return)
|
|
return arch_atomic_sub_return(i, v);
|
|
#elif defined(arch_atomic_sub_return_relaxed)
|
|
int ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic_sub_return_relaxed(i, v);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
#error "Unable to define raw_atomic_sub_return"
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_sub_return_acquire() - atomic subtract with acquire ordering
|
|
* @i: int value to subtract
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v - @i) with acquire ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_sub_return_acquire() elsewhere.
|
|
*
|
|
* Return: The updated value of @v.
|
|
*/
|
|
static __always_inline int
|
|
raw_atomic_sub_return_acquire(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_sub_return_acquire)
|
|
return arch_atomic_sub_return_acquire(i, v);
|
|
#elif defined(arch_atomic_sub_return_relaxed)
|
|
int ret = arch_atomic_sub_return_relaxed(i, v);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic_sub_return)
|
|
return arch_atomic_sub_return(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic_sub_return_acquire"
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_sub_return_release() - atomic subtract with release ordering
|
|
* @i: int value to subtract
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v - @i) with release ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_sub_return_release() elsewhere.
|
|
*
|
|
* Return: The updated value of @v.
|
|
*/
|
|
static __always_inline int
|
|
raw_atomic_sub_return_release(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_sub_return_release)
|
|
return arch_atomic_sub_return_release(i, v);
|
|
#elif defined(arch_atomic_sub_return_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic_sub_return_relaxed(i, v);
|
|
#elif defined(arch_atomic_sub_return)
|
|
return arch_atomic_sub_return(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic_sub_return_release"
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_sub_return_relaxed() - atomic subtract with relaxed ordering
|
|
* @i: int value to subtract
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v - @i) with relaxed ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_sub_return_relaxed() elsewhere.
|
|
*
|
|
* Return: The updated value of @v.
|
|
*/
|
|
static __always_inline int
|
|
raw_atomic_sub_return_relaxed(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_sub_return_relaxed)
|
|
return arch_atomic_sub_return_relaxed(i, v);
|
|
#elif defined(arch_atomic_sub_return)
|
|
return arch_atomic_sub_return(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic_sub_return_relaxed"
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_fetch_sub() - atomic subtract with full ordering
|
|
* @i: int value to subtract
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v - @i) with full ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_fetch_sub() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline int
|
|
raw_atomic_fetch_sub(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_sub)
|
|
return arch_atomic_fetch_sub(i, v);
|
|
#elif defined(arch_atomic_fetch_sub_relaxed)
|
|
int ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic_fetch_sub_relaxed(i, v);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
#error "Unable to define raw_atomic_fetch_sub"
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_fetch_sub_acquire() - atomic subtract with acquire ordering
|
|
* @i: int value to subtract
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v - @i) with acquire ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_fetch_sub_acquire() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline int
|
|
raw_atomic_fetch_sub_acquire(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_sub_acquire)
|
|
return arch_atomic_fetch_sub_acquire(i, v);
|
|
#elif defined(arch_atomic_fetch_sub_relaxed)
|
|
int ret = arch_atomic_fetch_sub_relaxed(i, v);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic_fetch_sub)
|
|
return arch_atomic_fetch_sub(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic_fetch_sub_acquire"
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_fetch_sub_release() - atomic subtract with release ordering
|
|
* @i: int value to subtract
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v - @i) with release ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_fetch_sub_release() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline int
|
|
raw_atomic_fetch_sub_release(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_sub_release)
|
|
return arch_atomic_fetch_sub_release(i, v);
|
|
#elif defined(arch_atomic_fetch_sub_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic_fetch_sub_relaxed(i, v);
|
|
#elif defined(arch_atomic_fetch_sub)
|
|
return arch_atomic_fetch_sub(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic_fetch_sub_release"
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_fetch_sub_relaxed() - atomic subtract with relaxed ordering
|
|
* @i: int value to subtract
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v - @i) with relaxed ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_fetch_sub_relaxed() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline int
|
|
raw_atomic_fetch_sub_relaxed(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_sub_relaxed)
|
|
return arch_atomic_fetch_sub_relaxed(i, v);
|
|
#elif defined(arch_atomic_fetch_sub)
|
|
return arch_atomic_fetch_sub(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic_fetch_sub_relaxed"
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_inc() - atomic increment with relaxed ordering
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v + 1) with relaxed ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_inc() elsewhere.
|
|
*
|
|
* Return: Nothing.
|
|
*/
|
|
static __always_inline void
|
|
raw_atomic_inc(atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_inc)
|
|
arch_atomic_inc(v);
|
|
#else
|
|
raw_atomic_add(1, v);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_inc_return() - atomic increment with full ordering
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v + 1) with full ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_inc_return() elsewhere.
|
|
*
|
|
* Return: The updated value of @v.
|
|
*/
|
|
static __always_inline int
|
|
raw_atomic_inc_return(atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_inc_return)
|
|
return arch_atomic_inc_return(v);
|
|
#elif defined(arch_atomic_inc_return_relaxed)
|
|
int ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic_inc_return_relaxed(v);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
return raw_atomic_add_return(1, v);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_inc_return_acquire() - atomic increment with acquire ordering
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v + 1) with acquire ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_inc_return_acquire() elsewhere.
|
|
*
|
|
* Return: The updated value of @v.
|
|
*/
|
|
static __always_inline int
|
|
raw_atomic_inc_return_acquire(atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_inc_return_acquire)
|
|
return arch_atomic_inc_return_acquire(v);
|
|
#elif defined(arch_atomic_inc_return_relaxed)
|
|
int ret = arch_atomic_inc_return_relaxed(v);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic_inc_return)
|
|
return arch_atomic_inc_return(v);
|
|
#else
|
|
return raw_atomic_add_return_acquire(1, v);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_inc_return_release() - atomic increment with release ordering
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v + 1) with release ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_inc_return_release() elsewhere.
|
|
*
|
|
* Return: The updated value of @v.
|
|
*/
|
|
static __always_inline int
|
|
raw_atomic_inc_return_release(atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_inc_return_release)
|
|
return arch_atomic_inc_return_release(v);
|
|
#elif defined(arch_atomic_inc_return_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic_inc_return_relaxed(v);
|
|
#elif defined(arch_atomic_inc_return)
|
|
return arch_atomic_inc_return(v);
|
|
#else
|
|
return raw_atomic_add_return_release(1, v);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_inc_return_relaxed() - atomic increment with relaxed ordering
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v + 1) with relaxed ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_inc_return_relaxed() elsewhere.
|
|
*
|
|
* Return: The updated value of @v.
|
|
*/
|
|
static __always_inline int
|
|
raw_atomic_inc_return_relaxed(atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_inc_return_relaxed)
|
|
return arch_atomic_inc_return_relaxed(v);
|
|
#elif defined(arch_atomic_inc_return)
|
|
return arch_atomic_inc_return(v);
|
|
#else
|
|
return raw_atomic_add_return_relaxed(1, v);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_fetch_inc() - atomic increment with full ordering
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v + 1) with full ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_fetch_inc() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline int
|
|
raw_atomic_fetch_inc(atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_inc)
|
|
return arch_atomic_fetch_inc(v);
|
|
#elif defined(arch_atomic_fetch_inc_relaxed)
|
|
int ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic_fetch_inc_relaxed(v);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
return raw_atomic_fetch_add(1, v);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_fetch_inc_acquire() - atomic increment with acquire ordering
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v + 1) with acquire ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_fetch_inc_acquire() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline int
|
|
raw_atomic_fetch_inc_acquire(atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_inc_acquire)
|
|
return arch_atomic_fetch_inc_acquire(v);
|
|
#elif defined(arch_atomic_fetch_inc_relaxed)
|
|
int ret = arch_atomic_fetch_inc_relaxed(v);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic_fetch_inc)
|
|
return arch_atomic_fetch_inc(v);
|
|
#else
|
|
return raw_atomic_fetch_add_acquire(1, v);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_fetch_inc_release() - atomic increment with release ordering
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v + 1) with release ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_fetch_inc_release() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline int
|
|
raw_atomic_fetch_inc_release(atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_inc_release)
|
|
return arch_atomic_fetch_inc_release(v);
|
|
#elif defined(arch_atomic_fetch_inc_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic_fetch_inc_relaxed(v);
|
|
#elif defined(arch_atomic_fetch_inc)
|
|
return arch_atomic_fetch_inc(v);
|
|
#else
|
|
return raw_atomic_fetch_add_release(1, v);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_fetch_inc_relaxed() - atomic increment with relaxed ordering
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v + 1) with relaxed ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_fetch_inc_relaxed() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline int
|
|
raw_atomic_fetch_inc_relaxed(atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_inc_relaxed)
|
|
return arch_atomic_fetch_inc_relaxed(v);
|
|
#elif defined(arch_atomic_fetch_inc)
|
|
return arch_atomic_fetch_inc(v);
|
|
#else
|
|
return raw_atomic_fetch_add_relaxed(1, v);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_dec() - atomic decrement with relaxed ordering
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v - 1) with relaxed ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_dec() elsewhere.
|
|
*
|
|
* Return: Nothing.
|
|
*/
|
|
static __always_inline void
|
|
raw_atomic_dec(atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_dec)
|
|
arch_atomic_dec(v);
|
|
#else
|
|
raw_atomic_sub(1, v);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_dec_return() - atomic decrement with full ordering
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v - 1) with full ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_dec_return() elsewhere.
|
|
*
|
|
* Return: The updated value of @v.
|
|
*/
|
|
static __always_inline int
|
|
raw_atomic_dec_return(atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_dec_return)
|
|
return arch_atomic_dec_return(v);
|
|
#elif defined(arch_atomic_dec_return_relaxed)
|
|
int ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic_dec_return_relaxed(v);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
return raw_atomic_sub_return(1, v);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_dec_return_acquire() - atomic decrement with acquire ordering
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v - 1) with acquire ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_dec_return_acquire() elsewhere.
|
|
*
|
|
* Return: The updated value of @v.
|
|
*/
|
|
static __always_inline int
|
|
raw_atomic_dec_return_acquire(atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_dec_return_acquire)
|
|
return arch_atomic_dec_return_acquire(v);
|
|
#elif defined(arch_atomic_dec_return_relaxed)
|
|
int ret = arch_atomic_dec_return_relaxed(v);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic_dec_return)
|
|
return arch_atomic_dec_return(v);
|
|
#else
|
|
return raw_atomic_sub_return_acquire(1, v);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_dec_return_release() - atomic decrement with release ordering
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v - 1) with release ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_dec_return_release() elsewhere.
|
|
*
|
|
* Return: The updated value of @v.
|
|
*/
|
|
static __always_inline int
|
|
raw_atomic_dec_return_release(atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_dec_return_release)
|
|
return arch_atomic_dec_return_release(v);
|
|
#elif defined(arch_atomic_dec_return_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic_dec_return_relaxed(v);
|
|
#elif defined(arch_atomic_dec_return)
|
|
return arch_atomic_dec_return(v);
|
|
#else
|
|
return raw_atomic_sub_return_release(1, v);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_dec_return_relaxed() - atomic decrement with relaxed ordering
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v - 1) with relaxed ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_dec_return_relaxed() elsewhere.
|
|
*
|
|
* Return: The updated value of @v.
|
|
*/
|
|
static __always_inline int
|
|
raw_atomic_dec_return_relaxed(atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_dec_return_relaxed)
|
|
return arch_atomic_dec_return_relaxed(v);
|
|
#elif defined(arch_atomic_dec_return)
|
|
return arch_atomic_dec_return(v);
|
|
#else
|
|
return raw_atomic_sub_return_relaxed(1, v);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_fetch_dec() - atomic decrement with full ordering
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v - 1) with full ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_fetch_dec() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline int
|
|
raw_atomic_fetch_dec(atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_dec)
|
|
return arch_atomic_fetch_dec(v);
|
|
#elif defined(arch_atomic_fetch_dec_relaxed)
|
|
int ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic_fetch_dec_relaxed(v);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
return raw_atomic_fetch_sub(1, v);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_fetch_dec_acquire() - atomic decrement with acquire ordering
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v - 1) with acquire ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_fetch_dec_acquire() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline int
|
|
raw_atomic_fetch_dec_acquire(atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_dec_acquire)
|
|
return arch_atomic_fetch_dec_acquire(v);
|
|
#elif defined(arch_atomic_fetch_dec_relaxed)
|
|
int ret = arch_atomic_fetch_dec_relaxed(v);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic_fetch_dec)
|
|
return arch_atomic_fetch_dec(v);
|
|
#else
|
|
return raw_atomic_fetch_sub_acquire(1, v);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_fetch_dec_release() - atomic decrement with release ordering
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v - 1) with release ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_fetch_dec_release() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline int
|
|
raw_atomic_fetch_dec_release(atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_dec_release)
|
|
return arch_atomic_fetch_dec_release(v);
|
|
#elif defined(arch_atomic_fetch_dec_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic_fetch_dec_relaxed(v);
|
|
#elif defined(arch_atomic_fetch_dec)
|
|
return arch_atomic_fetch_dec(v);
|
|
#else
|
|
return raw_atomic_fetch_sub_release(1, v);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_fetch_dec_relaxed() - atomic decrement with relaxed ordering
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v - 1) with relaxed ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_fetch_dec_relaxed() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline int
|
|
raw_atomic_fetch_dec_relaxed(atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_dec_relaxed)
|
|
return arch_atomic_fetch_dec_relaxed(v);
|
|
#elif defined(arch_atomic_fetch_dec)
|
|
return arch_atomic_fetch_dec(v);
|
|
#else
|
|
return raw_atomic_fetch_sub_relaxed(1, v);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_and() - atomic bitwise AND with relaxed ordering
|
|
* @i: int value
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v & @i) with relaxed ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_and() elsewhere.
|
|
*
|
|
* Return: Nothing.
|
|
*/
|
|
static __always_inline void
|
|
raw_atomic_and(int i, atomic_t *v)
|
|
{
|
|
arch_atomic_and(i, v);
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_fetch_and() - atomic bitwise AND with full ordering
|
|
* @i: int value
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v & @i) with full ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_fetch_and() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline int
|
|
raw_atomic_fetch_and(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_and)
|
|
return arch_atomic_fetch_and(i, v);
|
|
#elif defined(arch_atomic_fetch_and_relaxed)
|
|
int ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic_fetch_and_relaxed(i, v);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
#error "Unable to define raw_atomic_fetch_and"
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_fetch_and_acquire() - atomic bitwise AND with acquire ordering
|
|
* @i: int value
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v & @i) with acquire ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_fetch_and_acquire() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline int
|
|
raw_atomic_fetch_and_acquire(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_and_acquire)
|
|
return arch_atomic_fetch_and_acquire(i, v);
|
|
#elif defined(arch_atomic_fetch_and_relaxed)
|
|
int ret = arch_atomic_fetch_and_relaxed(i, v);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic_fetch_and)
|
|
return arch_atomic_fetch_and(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic_fetch_and_acquire"
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_fetch_and_release() - atomic bitwise AND with release ordering
|
|
* @i: int value
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v & @i) with release ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_fetch_and_release() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline int
|
|
raw_atomic_fetch_and_release(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_and_release)
|
|
return arch_atomic_fetch_and_release(i, v);
|
|
#elif defined(arch_atomic_fetch_and_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic_fetch_and_relaxed(i, v);
|
|
#elif defined(arch_atomic_fetch_and)
|
|
return arch_atomic_fetch_and(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic_fetch_and_release"
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_fetch_and_relaxed() - atomic bitwise AND with relaxed ordering
|
|
* @i: int value
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v & @i) with relaxed ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_fetch_and_relaxed() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline int
|
|
raw_atomic_fetch_and_relaxed(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_and_relaxed)
|
|
return arch_atomic_fetch_and_relaxed(i, v);
|
|
#elif defined(arch_atomic_fetch_and)
|
|
return arch_atomic_fetch_and(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic_fetch_and_relaxed"
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_andnot() - atomic bitwise AND NOT with relaxed ordering
|
|
* @i: int value
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v & ~@i) with relaxed ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_andnot() elsewhere.
|
|
*
|
|
* Return: Nothing.
|
|
*/
|
|
static __always_inline void
|
|
raw_atomic_andnot(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_andnot)
|
|
arch_atomic_andnot(i, v);
|
|
#else
|
|
raw_atomic_and(~i, v);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_fetch_andnot() - atomic bitwise AND NOT with full ordering
|
|
* @i: int value
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v & ~@i) with full ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_fetch_andnot() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline int
|
|
raw_atomic_fetch_andnot(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_andnot)
|
|
return arch_atomic_fetch_andnot(i, v);
|
|
#elif defined(arch_atomic_fetch_andnot_relaxed)
|
|
int ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic_fetch_andnot_relaxed(i, v);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
return raw_atomic_fetch_and(~i, v);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_fetch_andnot_acquire() - atomic bitwise AND NOT with acquire ordering
|
|
* @i: int value
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v & ~@i) with acquire ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_fetch_andnot_acquire() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline int
|
|
raw_atomic_fetch_andnot_acquire(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_andnot_acquire)
|
|
return arch_atomic_fetch_andnot_acquire(i, v);
|
|
#elif defined(arch_atomic_fetch_andnot_relaxed)
|
|
int ret = arch_atomic_fetch_andnot_relaxed(i, v);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic_fetch_andnot)
|
|
return arch_atomic_fetch_andnot(i, v);
|
|
#else
|
|
return raw_atomic_fetch_and_acquire(~i, v);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_fetch_andnot_release() - atomic bitwise AND NOT with release ordering
|
|
* @i: int value
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v & ~@i) with release ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_fetch_andnot_release() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline int
|
|
raw_atomic_fetch_andnot_release(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_andnot_release)
|
|
return arch_atomic_fetch_andnot_release(i, v);
|
|
#elif defined(arch_atomic_fetch_andnot_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic_fetch_andnot_relaxed(i, v);
|
|
#elif defined(arch_atomic_fetch_andnot)
|
|
return arch_atomic_fetch_andnot(i, v);
|
|
#else
|
|
return raw_atomic_fetch_and_release(~i, v);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_fetch_andnot_relaxed() - atomic bitwise AND NOT with relaxed ordering
|
|
* @i: int value
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v & ~@i) with relaxed ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_fetch_andnot_relaxed() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline int
|
|
raw_atomic_fetch_andnot_relaxed(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_andnot_relaxed)
|
|
return arch_atomic_fetch_andnot_relaxed(i, v);
|
|
#elif defined(arch_atomic_fetch_andnot)
|
|
return arch_atomic_fetch_andnot(i, v);
|
|
#else
|
|
return raw_atomic_fetch_and_relaxed(~i, v);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_or() - atomic bitwise OR with relaxed ordering
|
|
* @i: int value
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v | @i) with relaxed ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_or() elsewhere.
|
|
*
|
|
* Return: Nothing.
|
|
*/
|
|
static __always_inline void
|
|
raw_atomic_or(int i, atomic_t *v)
|
|
{
|
|
arch_atomic_or(i, v);
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_fetch_or() - atomic bitwise OR with full ordering
|
|
* @i: int value
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v | @i) with full ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_fetch_or() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline int
|
|
raw_atomic_fetch_or(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_or)
|
|
return arch_atomic_fetch_or(i, v);
|
|
#elif defined(arch_atomic_fetch_or_relaxed)
|
|
int ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic_fetch_or_relaxed(i, v);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
#error "Unable to define raw_atomic_fetch_or"
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_fetch_or_acquire() - atomic bitwise OR with acquire ordering
|
|
* @i: int value
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v | @i) with acquire ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_fetch_or_acquire() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline int
|
|
raw_atomic_fetch_or_acquire(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_or_acquire)
|
|
return arch_atomic_fetch_or_acquire(i, v);
|
|
#elif defined(arch_atomic_fetch_or_relaxed)
|
|
int ret = arch_atomic_fetch_or_relaxed(i, v);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic_fetch_or)
|
|
return arch_atomic_fetch_or(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic_fetch_or_acquire"
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_fetch_or_release() - atomic bitwise OR with release ordering
|
|
* @i: int value
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v | @i) with release ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_fetch_or_release() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline int
|
|
raw_atomic_fetch_or_release(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_or_release)
|
|
return arch_atomic_fetch_or_release(i, v);
|
|
#elif defined(arch_atomic_fetch_or_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic_fetch_or_relaxed(i, v);
|
|
#elif defined(arch_atomic_fetch_or)
|
|
return arch_atomic_fetch_or(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic_fetch_or_release"
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_fetch_or_relaxed() - atomic bitwise OR with relaxed ordering
|
|
* @i: int value
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v | @i) with relaxed ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_fetch_or_relaxed() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline int
|
|
raw_atomic_fetch_or_relaxed(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_or_relaxed)
|
|
return arch_atomic_fetch_or_relaxed(i, v);
|
|
#elif defined(arch_atomic_fetch_or)
|
|
return arch_atomic_fetch_or(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic_fetch_or_relaxed"
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_xor() - atomic bitwise XOR with relaxed ordering
|
|
* @i: int value
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v ^ @i) with relaxed ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_xor() elsewhere.
|
|
*
|
|
* Return: Nothing.
|
|
*/
|
|
static __always_inline void
|
|
raw_atomic_xor(int i, atomic_t *v)
|
|
{
|
|
arch_atomic_xor(i, v);
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_fetch_xor() - atomic bitwise XOR with full ordering
|
|
* @i: int value
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v ^ @i) with full ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_fetch_xor() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline int
|
|
raw_atomic_fetch_xor(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_xor)
|
|
return arch_atomic_fetch_xor(i, v);
|
|
#elif defined(arch_atomic_fetch_xor_relaxed)
|
|
int ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic_fetch_xor_relaxed(i, v);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
#error "Unable to define raw_atomic_fetch_xor"
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_fetch_xor_acquire() - atomic bitwise XOR with acquire ordering
|
|
* @i: int value
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v ^ @i) with acquire ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_fetch_xor_acquire() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline int
|
|
raw_atomic_fetch_xor_acquire(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_xor_acquire)
|
|
return arch_atomic_fetch_xor_acquire(i, v);
|
|
#elif defined(arch_atomic_fetch_xor_relaxed)
|
|
int ret = arch_atomic_fetch_xor_relaxed(i, v);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic_fetch_xor)
|
|
return arch_atomic_fetch_xor(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic_fetch_xor_acquire"
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_fetch_xor_release() - atomic bitwise XOR with release ordering
|
|
* @i: int value
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v ^ @i) with release ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_fetch_xor_release() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline int
|
|
raw_atomic_fetch_xor_release(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_xor_release)
|
|
return arch_atomic_fetch_xor_release(i, v);
|
|
#elif defined(arch_atomic_fetch_xor_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic_fetch_xor_relaxed(i, v);
|
|
#elif defined(arch_atomic_fetch_xor)
|
|
return arch_atomic_fetch_xor(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic_fetch_xor_release"
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_fetch_xor_relaxed() - atomic bitwise XOR with relaxed ordering
|
|
* @i: int value
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v ^ @i) with relaxed ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_fetch_xor_relaxed() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline int
|
|
raw_atomic_fetch_xor_relaxed(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_fetch_xor_relaxed)
|
|
return arch_atomic_fetch_xor_relaxed(i, v);
|
|
#elif defined(arch_atomic_fetch_xor)
|
|
return arch_atomic_fetch_xor(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic_fetch_xor_relaxed"
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_xchg() - atomic exchange with full ordering
|
|
* @v: pointer to atomic_t
|
|
* @new: int value to assign
|
|
*
|
|
* Atomically updates @v to @new with full ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_xchg() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline int
|
|
raw_atomic_xchg(atomic_t *v, int new)
|
|
{
|
|
#if defined(arch_atomic_xchg)
|
|
return arch_atomic_xchg(v, new);
|
|
#elif defined(arch_atomic_xchg_relaxed)
|
|
int ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic_xchg_relaxed(v, new);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
return raw_xchg(&v->counter, new);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_xchg_acquire() - atomic exchange with acquire ordering
|
|
* @v: pointer to atomic_t
|
|
* @new: int value to assign
|
|
*
|
|
* Atomically updates @v to @new with acquire ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_xchg_acquire() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline int
|
|
raw_atomic_xchg_acquire(atomic_t *v, int new)
|
|
{
|
|
#if defined(arch_atomic_xchg_acquire)
|
|
return arch_atomic_xchg_acquire(v, new);
|
|
#elif defined(arch_atomic_xchg_relaxed)
|
|
int ret = arch_atomic_xchg_relaxed(v, new);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic_xchg)
|
|
return arch_atomic_xchg(v, new);
|
|
#else
|
|
return raw_xchg_acquire(&v->counter, new);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_xchg_release() - atomic exchange with release ordering
|
|
* @v: pointer to atomic_t
|
|
* @new: int value to assign
|
|
*
|
|
* Atomically updates @v to @new with release ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_xchg_release() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline int
|
|
raw_atomic_xchg_release(atomic_t *v, int new)
|
|
{
|
|
#if defined(arch_atomic_xchg_release)
|
|
return arch_atomic_xchg_release(v, new);
|
|
#elif defined(arch_atomic_xchg_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic_xchg_relaxed(v, new);
|
|
#elif defined(arch_atomic_xchg)
|
|
return arch_atomic_xchg(v, new);
|
|
#else
|
|
return raw_xchg_release(&v->counter, new);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_xchg_relaxed() - atomic exchange with relaxed ordering
|
|
* @v: pointer to atomic_t
|
|
* @new: int value to assign
|
|
*
|
|
* Atomically updates @v to @new with relaxed ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_xchg_relaxed() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline int
|
|
raw_atomic_xchg_relaxed(atomic_t *v, int new)
|
|
{
|
|
#if defined(arch_atomic_xchg_relaxed)
|
|
return arch_atomic_xchg_relaxed(v, new);
|
|
#elif defined(arch_atomic_xchg)
|
|
return arch_atomic_xchg(v, new);
|
|
#else
|
|
return raw_xchg_relaxed(&v->counter, new);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_cmpxchg() - atomic compare and exchange with full ordering
|
|
* @v: pointer to atomic_t
|
|
* @old: int value to compare with
|
|
* @new: int value to assign
|
|
*
|
|
* If (@v == @old), atomically updates @v to @new with full ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_cmpxchg() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline int
|
|
raw_atomic_cmpxchg(atomic_t *v, int old, int new)
|
|
{
|
|
#if defined(arch_atomic_cmpxchg)
|
|
return arch_atomic_cmpxchg(v, old, new);
|
|
#elif defined(arch_atomic_cmpxchg_relaxed)
|
|
int ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic_cmpxchg_relaxed(v, old, new);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
return raw_cmpxchg(&v->counter, old, new);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
|
|
* @v: pointer to atomic_t
|
|
* @old: int value to compare with
|
|
* @new: int value to assign
|
|
*
|
|
* If (@v == @old), atomically updates @v to @new with acquire ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_cmpxchg_acquire() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline int
|
|
raw_atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
|
|
{
|
|
#if defined(arch_atomic_cmpxchg_acquire)
|
|
return arch_atomic_cmpxchg_acquire(v, old, new);
|
|
#elif defined(arch_atomic_cmpxchg_relaxed)
|
|
int ret = arch_atomic_cmpxchg_relaxed(v, old, new);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic_cmpxchg)
|
|
return arch_atomic_cmpxchg(v, old, new);
|
|
#else
|
|
return raw_cmpxchg_acquire(&v->counter, old, new);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_cmpxchg_release() - atomic compare and exchange with release ordering
|
|
* @v: pointer to atomic_t
|
|
* @old: int value to compare with
|
|
* @new: int value to assign
|
|
*
|
|
* If (@v == @old), atomically updates @v to @new with release ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_cmpxchg_release() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline int
|
|
raw_atomic_cmpxchg_release(atomic_t *v, int old, int new)
|
|
{
|
|
#if defined(arch_atomic_cmpxchg_release)
|
|
return arch_atomic_cmpxchg_release(v, old, new);
|
|
#elif defined(arch_atomic_cmpxchg_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic_cmpxchg_relaxed(v, old, new);
|
|
#elif defined(arch_atomic_cmpxchg)
|
|
return arch_atomic_cmpxchg(v, old, new);
|
|
#else
|
|
return raw_cmpxchg_release(&v->counter, old, new);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
|
|
* @v: pointer to atomic_t
|
|
* @old: int value to compare with
|
|
* @new: int value to assign
|
|
*
|
|
* If (@v == @old), atomically updates @v to @new with relaxed ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_cmpxchg_relaxed() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline int
|
|
raw_atomic_cmpxchg_relaxed(atomic_t *v, int old, int new)
|
|
{
|
|
#if defined(arch_atomic_cmpxchg_relaxed)
|
|
return arch_atomic_cmpxchg_relaxed(v, old, new);
|
|
#elif defined(arch_atomic_cmpxchg)
|
|
return arch_atomic_cmpxchg(v, old, new);
|
|
#else
|
|
return raw_cmpxchg_relaxed(&v->counter, old, new);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_try_cmpxchg() - atomic compare and exchange with full ordering
|
|
* @v: pointer to atomic_t
|
|
* @old: pointer to int value to compare with
|
|
* @new: int value to assign
|
|
*
|
|
* If (@v == @old), atomically updates @v to @new with full ordering.
|
|
* Otherwise, updates @old to the current value of @v.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_try_cmpxchg() elsewhere.
|
|
*
|
|
* Return: @true if the exchange occured, @false otherwise.
|
|
*/
|
|
static __always_inline bool
|
|
raw_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
|
|
{
|
|
#if defined(arch_atomic_try_cmpxchg)
|
|
return arch_atomic_try_cmpxchg(v, old, new);
|
|
#elif defined(arch_atomic_try_cmpxchg_relaxed)
|
|
bool ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic_try_cmpxchg_relaxed(v, old, new);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
int r, o = *old;
|
|
r = raw_atomic_cmpxchg(v, o, new);
|
|
if (unlikely(r != o))
|
|
*old = r;
|
|
return likely(r == o);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_try_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
|
|
* @v: pointer to atomic_t
|
|
* @old: pointer to int value to compare with
|
|
* @new: int value to assign
|
|
*
|
|
* If (@v == @old), atomically updates @v to @new with acquire ordering.
|
|
* Otherwise, updates @old to the current value of @v.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_try_cmpxchg_acquire() elsewhere.
|
|
*
|
|
* Return: @true if the exchange occured, @false otherwise.
|
|
*/
|
|
static __always_inline bool
|
|
raw_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
|
|
{
|
|
#if defined(arch_atomic_try_cmpxchg_acquire)
|
|
return arch_atomic_try_cmpxchg_acquire(v, old, new);
|
|
#elif defined(arch_atomic_try_cmpxchg_relaxed)
|
|
bool ret = arch_atomic_try_cmpxchg_relaxed(v, old, new);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic_try_cmpxchg)
|
|
return arch_atomic_try_cmpxchg(v, old, new);
|
|
#else
|
|
int r, o = *old;
|
|
r = raw_atomic_cmpxchg_acquire(v, o, new);
|
|
if (unlikely(r != o))
|
|
*old = r;
|
|
return likely(r == o);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_try_cmpxchg_release() - atomic compare and exchange with release ordering
|
|
* @v: pointer to atomic_t
|
|
* @old: pointer to int value to compare with
|
|
* @new: int value to assign
|
|
*
|
|
* If (@v == @old), atomically updates @v to @new with release ordering.
|
|
* Otherwise, updates @old to the current value of @v.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_try_cmpxchg_release() elsewhere.
|
|
*
|
|
* Return: @true if the exchange occured, @false otherwise.
|
|
*/
|
|
static __always_inline bool
|
|
raw_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
|
|
{
|
|
#if defined(arch_atomic_try_cmpxchg_release)
|
|
return arch_atomic_try_cmpxchg_release(v, old, new);
|
|
#elif defined(arch_atomic_try_cmpxchg_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic_try_cmpxchg_relaxed(v, old, new);
|
|
#elif defined(arch_atomic_try_cmpxchg)
|
|
return arch_atomic_try_cmpxchg(v, old, new);
|
|
#else
|
|
int r, o = *old;
|
|
r = raw_atomic_cmpxchg_release(v, o, new);
|
|
if (unlikely(r != o))
|
|
*old = r;
|
|
return likely(r == o);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_try_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
|
|
* @v: pointer to atomic_t
|
|
* @old: pointer to int value to compare with
|
|
* @new: int value to assign
|
|
*
|
|
* If (@v == @old), atomically updates @v to @new with relaxed ordering.
|
|
* Otherwise, updates @old to the current value of @v.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_try_cmpxchg_relaxed() elsewhere.
|
|
*
|
|
* Return: @true if the exchange occured, @false otherwise.
|
|
*/
|
|
static __always_inline bool
|
|
raw_atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
|
|
{
|
|
#if defined(arch_atomic_try_cmpxchg_relaxed)
|
|
return arch_atomic_try_cmpxchg_relaxed(v, old, new);
|
|
#elif defined(arch_atomic_try_cmpxchg)
|
|
return arch_atomic_try_cmpxchg(v, old, new);
|
|
#else
|
|
int r, o = *old;
|
|
r = raw_atomic_cmpxchg_relaxed(v, o, new);
|
|
if (unlikely(r != o))
|
|
*old = r;
|
|
return likely(r == o);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_sub_and_test() - atomic subtract and test if zero with full ordering
|
|
* @i: int value to add
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v - @i) with full ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_sub_and_test() elsewhere.
|
|
*
|
|
* Return: @true if the resulting value of @v is zero, @false otherwise.
|
|
*/
|
|
static __always_inline bool
|
|
raw_atomic_sub_and_test(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_sub_and_test)
|
|
return arch_atomic_sub_and_test(i, v);
|
|
#else
|
|
return raw_atomic_sub_return(i, v) == 0;
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_dec_and_test() - atomic decrement and test if zero with full ordering
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v - 1) with full ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_dec_and_test() elsewhere.
|
|
*
|
|
* Return: @true if the resulting value of @v is zero, @false otherwise.
|
|
*/
|
|
static __always_inline bool
|
|
raw_atomic_dec_and_test(atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_dec_and_test)
|
|
return arch_atomic_dec_and_test(v);
|
|
#else
|
|
return raw_atomic_dec_return(v) == 0;
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_inc_and_test() - atomic increment and test if zero with full ordering
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v + 1) with full ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_inc_and_test() elsewhere.
|
|
*
|
|
* Return: @true if the resulting value of @v is zero, @false otherwise.
|
|
*/
|
|
static __always_inline bool
|
|
raw_atomic_inc_and_test(atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_inc_and_test)
|
|
return arch_atomic_inc_and_test(v);
|
|
#else
|
|
return raw_atomic_inc_return(v) == 0;
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_add_negative() - atomic add and test if negative with full ordering
|
|
* @i: int value to add
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v + @i) with full ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_add_negative() elsewhere.
|
|
*
|
|
* Return: @true if the resulting value of @v is negative, @false otherwise.
|
|
*/
|
|
static __always_inline bool
|
|
raw_atomic_add_negative(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_add_negative)
|
|
return arch_atomic_add_negative(i, v);
|
|
#elif defined(arch_atomic_add_negative_relaxed)
|
|
bool ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic_add_negative_relaxed(i, v);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
return raw_atomic_add_return(i, v) < 0;
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_add_negative_acquire() - atomic add and test if negative with acquire ordering
|
|
* @i: int value to add
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v + @i) with acquire ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_add_negative_acquire() elsewhere.
|
|
*
|
|
* Return: @true if the resulting value of @v is negative, @false otherwise.
|
|
*/
|
|
static __always_inline bool
|
|
raw_atomic_add_negative_acquire(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_add_negative_acquire)
|
|
return arch_atomic_add_negative_acquire(i, v);
|
|
#elif defined(arch_atomic_add_negative_relaxed)
|
|
bool ret = arch_atomic_add_negative_relaxed(i, v);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic_add_negative)
|
|
return arch_atomic_add_negative(i, v);
|
|
#else
|
|
return raw_atomic_add_return_acquire(i, v) < 0;
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_add_negative_release() - atomic add and test if negative with release ordering
|
|
* @i: int value to add
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v + @i) with release ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_add_negative_release() elsewhere.
|
|
*
|
|
* Return: @true if the resulting value of @v is negative, @false otherwise.
|
|
*/
|
|
static __always_inline bool
|
|
raw_atomic_add_negative_release(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_add_negative_release)
|
|
return arch_atomic_add_negative_release(i, v);
|
|
#elif defined(arch_atomic_add_negative_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic_add_negative_relaxed(i, v);
|
|
#elif defined(arch_atomic_add_negative)
|
|
return arch_atomic_add_negative(i, v);
|
|
#else
|
|
return raw_atomic_add_return_release(i, v) < 0;
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_add_negative_relaxed() - atomic add and test if negative with relaxed ordering
|
|
* @i: int value to add
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* Atomically updates @v to (@v + @i) with relaxed ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_add_negative_relaxed() elsewhere.
|
|
*
|
|
* Return: @true if the resulting value of @v is negative, @false otherwise.
|
|
*/
|
|
static __always_inline bool
|
|
raw_atomic_add_negative_relaxed(int i, atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_add_negative_relaxed)
|
|
return arch_atomic_add_negative_relaxed(i, v);
|
|
#elif defined(arch_atomic_add_negative)
|
|
return arch_atomic_add_negative(i, v);
|
|
#else
|
|
return raw_atomic_add_return_relaxed(i, v) < 0;
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_fetch_add_unless() - atomic add unless value with full ordering
|
|
* @v: pointer to atomic_t
|
|
* @a: int value to add
|
|
* @u: int value to compare with
|
|
*
|
|
* If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_fetch_add_unless() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline int
|
|
raw_atomic_fetch_add_unless(atomic_t *v, int a, int u)
|
|
{
|
|
#if defined(arch_atomic_fetch_add_unless)
|
|
return arch_atomic_fetch_add_unless(v, a, u);
|
|
#else
|
|
int c = raw_atomic_read(v);
|
|
|
|
do {
|
|
if (unlikely(c == u))
|
|
break;
|
|
} while (!raw_atomic_try_cmpxchg(v, &c, c + a));
|
|
|
|
return c;
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_add_unless() - atomic add unless value with full ordering
|
|
* @v: pointer to atomic_t
|
|
* @a: int value to add
|
|
* @u: int value to compare with
|
|
*
|
|
* If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_add_unless() elsewhere.
|
|
*
|
|
* Return: @true if @v was updated, @false otherwise.
|
|
*/
|
|
static __always_inline bool
|
|
raw_atomic_add_unless(atomic_t *v, int a, int u)
|
|
{
|
|
#if defined(arch_atomic_add_unless)
|
|
return arch_atomic_add_unless(v, a, u);
|
|
#else
|
|
return raw_atomic_fetch_add_unless(v, a, u) != u;
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_inc_not_zero() - atomic increment unless zero with full ordering
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* If (@v != 0), atomically updates @v to (@v + 1) with full ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_inc_not_zero() elsewhere.
|
|
*
|
|
* Return: @true if @v was updated, @false otherwise.
|
|
*/
|
|
static __always_inline bool
|
|
raw_atomic_inc_not_zero(atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_inc_not_zero)
|
|
return arch_atomic_inc_not_zero(v);
|
|
#else
|
|
return raw_atomic_add_unless(v, 1, 0);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_inc_unless_negative() - atomic increment unless negative with full ordering
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* If (@v >= 0), atomically updates @v to (@v + 1) with full ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_inc_unless_negative() elsewhere.
|
|
*
|
|
* Return: @true if @v was updated, @false otherwise.
|
|
*/
|
|
static __always_inline bool
|
|
raw_atomic_inc_unless_negative(atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_inc_unless_negative)
|
|
return arch_atomic_inc_unless_negative(v);
|
|
#else
|
|
int c = raw_atomic_read(v);
|
|
|
|
do {
|
|
if (unlikely(c < 0))
|
|
return false;
|
|
} while (!raw_atomic_try_cmpxchg(v, &c, c + 1));
|
|
|
|
return true;
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_dec_unless_positive() - atomic decrement unless positive with full ordering
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* If (@v <= 0), atomically updates @v to (@v - 1) with full ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_dec_unless_positive() elsewhere.
|
|
*
|
|
* Return: @true if @v was updated, @false otherwise.
|
|
*/
|
|
static __always_inline bool
|
|
raw_atomic_dec_unless_positive(atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_dec_unless_positive)
|
|
return arch_atomic_dec_unless_positive(v);
|
|
#else
|
|
int c = raw_atomic_read(v);
|
|
|
|
do {
|
|
if (unlikely(c > 0))
|
|
return false;
|
|
} while (!raw_atomic_try_cmpxchg(v, &c, c - 1));
|
|
|
|
return true;
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic_dec_if_positive() - atomic decrement if positive with full ordering
|
|
* @v: pointer to atomic_t
|
|
*
|
|
* If (@v > 0), atomically updates @v to (@v - 1) with full ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic_dec_if_positive() elsewhere.
|
|
*
|
|
* Return: The old value of (@v - 1), regardless of whether @v was updated.
|
|
*/
|
|
static __always_inline int
|
|
raw_atomic_dec_if_positive(atomic_t *v)
|
|
{
|
|
#if defined(arch_atomic_dec_if_positive)
|
|
return arch_atomic_dec_if_positive(v);
|
|
#else
|
|
int dec, c = raw_atomic_read(v);
|
|
|
|
do {
|
|
dec = c - 1;
|
|
if (unlikely(dec < 0))
|
|
break;
|
|
} while (!raw_atomic_try_cmpxchg(v, &c, dec));
|
|
|
|
return dec;
|
|
#endif
|
|
}
|
|
|
|
#ifdef CONFIG_GENERIC_ATOMIC64
|
|
#include <asm-generic/atomic64.h>
|
|
#endif
|
|
|
|
/**
|
|
* raw_atomic64_read() - atomic load with relaxed ordering
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically loads the value of @v with relaxed ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_read() elsewhere.
|
|
*
|
|
* Return: The value loaded from @v.
|
|
*/
|
|
static __always_inline s64
|
|
raw_atomic64_read(const atomic64_t *v)
|
|
{
|
|
return arch_atomic64_read(v);
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_read_acquire() - atomic load with acquire ordering
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically loads the value of @v with acquire ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_read_acquire() elsewhere.
|
|
*
|
|
* Return: The value loaded from @v.
|
|
*/
|
|
static __always_inline s64
|
|
raw_atomic64_read_acquire(const atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_read_acquire)
|
|
return arch_atomic64_read_acquire(v);
|
|
#else
|
|
s64 ret;
|
|
|
|
if (__native_word(atomic64_t)) {
|
|
ret = smp_load_acquire(&(v)->counter);
|
|
} else {
|
|
ret = raw_atomic64_read(v);
|
|
__atomic_acquire_fence();
|
|
}
|
|
|
|
return ret;
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_set() - atomic set with relaxed ordering
|
|
* @v: pointer to atomic64_t
|
|
* @i: s64 value to assign
|
|
*
|
|
* Atomically sets @v to @i with relaxed ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_set() elsewhere.
|
|
*
|
|
* Return: Nothing.
|
|
*/
|
|
static __always_inline void
|
|
raw_atomic64_set(atomic64_t *v, s64 i)
|
|
{
|
|
arch_atomic64_set(v, i);
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_set_release() - atomic set with release ordering
|
|
* @v: pointer to atomic64_t
|
|
* @i: s64 value to assign
|
|
*
|
|
* Atomically sets @v to @i with release ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_set_release() elsewhere.
|
|
*
|
|
* Return: Nothing.
|
|
*/
|
|
static __always_inline void
|
|
raw_atomic64_set_release(atomic64_t *v, s64 i)
|
|
{
|
|
#if defined(arch_atomic64_set_release)
|
|
arch_atomic64_set_release(v, i);
|
|
#else
|
|
if (__native_word(atomic64_t)) {
|
|
smp_store_release(&(v)->counter, i);
|
|
} else {
|
|
__atomic_release_fence();
|
|
raw_atomic64_set(v, i);
|
|
}
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_add() - atomic add with relaxed ordering
|
|
* @i: s64 value to add
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v + @i) with relaxed ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_add() elsewhere.
|
|
*
|
|
* Return: Nothing.
|
|
*/
|
|
static __always_inline void
|
|
raw_atomic64_add(s64 i, atomic64_t *v)
|
|
{
|
|
arch_atomic64_add(i, v);
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_add_return() - atomic add with full ordering
|
|
* @i: s64 value to add
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v + @i) with full ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_add_return() elsewhere.
|
|
*
|
|
* Return: The updated value of @v.
|
|
*/
|
|
static __always_inline s64
|
|
raw_atomic64_add_return(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_add_return)
|
|
return arch_atomic64_add_return(i, v);
|
|
#elif defined(arch_atomic64_add_return_relaxed)
|
|
s64 ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic64_add_return_relaxed(i, v);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
#error "Unable to define raw_atomic64_add_return"
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_add_return_acquire() - atomic add with acquire ordering
|
|
* @i: s64 value to add
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v + @i) with acquire ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_add_return_acquire() elsewhere.
|
|
*
|
|
* Return: The updated value of @v.
|
|
*/
|
|
static __always_inline s64
|
|
raw_atomic64_add_return_acquire(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_add_return_acquire)
|
|
return arch_atomic64_add_return_acquire(i, v);
|
|
#elif defined(arch_atomic64_add_return_relaxed)
|
|
s64 ret = arch_atomic64_add_return_relaxed(i, v);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic64_add_return)
|
|
return arch_atomic64_add_return(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic64_add_return_acquire"
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_add_return_release() - atomic add with release ordering
|
|
* @i: s64 value to add
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v + @i) with release ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_add_return_release() elsewhere.
|
|
*
|
|
* Return: The updated value of @v.
|
|
*/
|
|
static __always_inline s64
|
|
raw_atomic64_add_return_release(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_add_return_release)
|
|
return arch_atomic64_add_return_release(i, v);
|
|
#elif defined(arch_atomic64_add_return_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic64_add_return_relaxed(i, v);
|
|
#elif defined(arch_atomic64_add_return)
|
|
return arch_atomic64_add_return(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic64_add_return_release"
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_add_return_relaxed() - atomic add with relaxed ordering
|
|
* @i: s64 value to add
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v + @i) with relaxed ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_add_return_relaxed() elsewhere.
|
|
*
|
|
* Return: The updated value of @v.
|
|
*/
|
|
static __always_inline s64
|
|
raw_atomic64_add_return_relaxed(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_add_return_relaxed)
|
|
return arch_atomic64_add_return_relaxed(i, v);
|
|
#elif defined(arch_atomic64_add_return)
|
|
return arch_atomic64_add_return(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic64_add_return_relaxed"
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_fetch_add() - atomic add with full ordering
|
|
* @i: s64 value to add
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v + @i) with full ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_fetch_add() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_add(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_add)
|
|
return arch_atomic64_fetch_add(i, v);
|
|
#elif defined(arch_atomic64_fetch_add_relaxed)
|
|
s64 ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic64_fetch_add_relaxed(i, v);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
#error "Unable to define raw_atomic64_fetch_add"
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_fetch_add_acquire() - atomic add with acquire ordering
|
|
* @i: s64 value to add
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v + @i) with acquire ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_fetch_add_acquire() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_add_acquire)
|
|
return arch_atomic64_fetch_add_acquire(i, v);
|
|
#elif defined(arch_atomic64_fetch_add_relaxed)
|
|
s64 ret = arch_atomic64_fetch_add_relaxed(i, v);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic64_fetch_add)
|
|
return arch_atomic64_fetch_add(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic64_fetch_add_acquire"
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_fetch_add_release() - atomic add with release ordering
|
|
* @i: s64 value to add
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v + @i) with release ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_fetch_add_release() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_add_release(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_add_release)
|
|
return arch_atomic64_fetch_add_release(i, v);
|
|
#elif defined(arch_atomic64_fetch_add_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic64_fetch_add_relaxed(i, v);
|
|
#elif defined(arch_atomic64_fetch_add)
|
|
return arch_atomic64_fetch_add(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic64_fetch_add_release"
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_fetch_add_relaxed() - atomic add with relaxed ordering
|
|
* @i: s64 value to add
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v + @i) with relaxed ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_fetch_add_relaxed() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_add_relaxed(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_add_relaxed)
|
|
return arch_atomic64_fetch_add_relaxed(i, v);
|
|
#elif defined(arch_atomic64_fetch_add)
|
|
return arch_atomic64_fetch_add(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic64_fetch_add_relaxed"
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_sub() - atomic subtract with relaxed ordering
|
|
* @i: s64 value to subtract
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v - @i) with relaxed ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_sub() elsewhere.
|
|
*
|
|
* Return: Nothing.
|
|
*/
|
|
static __always_inline void
|
|
raw_atomic64_sub(s64 i, atomic64_t *v)
|
|
{
|
|
arch_atomic64_sub(i, v);
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_sub_return() - atomic subtract with full ordering
|
|
* @i: s64 value to subtract
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v - @i) with full ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_sub_return() elsewhere.
|
|
*
|
|
* Return: The updated value of @v.
|
|
*/
|
|
static __always_inline s64
|
|
raw_atomic64_sub_return(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_sub_return)
|
|
return arch_atomic64_sub_return(i, v);
|
|
#elif defined(arch_atomic64_sub_return_relaxed)
|
|
s64 ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic64_sub_return_relaxed(i, v);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
#error "Unable to define raw_atomic64_sub_return"
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_sub_return_acquire() - atomic subtract with acquire ordering
|
|
* @i: s64 value to subtract
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v - @i) with acquire ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_sub_return_acquire() elsewhere.
|
|
*
|
|
* Return: The updated value of @v.
|
|
*/
|
|
static __always_inline s64
|
|
raw_atomic64_sub_return_acquire(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_sub_return_acquire)
|
|
return arch_atomic64_sub_return_acquire(i, v);
|
|
#elif defined(arch_atomic64_sub_return_relaxed)
|
|
s64 ret = arch_atomic64_sub_return_relaxed(i, v);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic64_sub_return)
|
|
return arch_atomic64_sub_return(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic64_sub_return_acquire"
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_sub_return_release() - atomic subtract with release ordering
|
|
* @i: s64 value to subtract
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v - @i) with release ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_sub_return_release() elsewhere.
|
|
*
|
|
* Return: The updated value of @v.
|
|
*/
|
|
static __always_inline s64
|
|
raw_atomic64_sub_return_release(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_sub_return_release)
|
|
return arch_atomic64_sub_return_release(i, v);
|
|
#elif defined(arch_atomic64_sub_return_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic64_sub_return_relaxed(i, v);
|
|
#elif defined(arch_atomic64_sub_return)
|
|
return arch_atomic64_sub_return(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic64_sub_return_release"
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_sub_return_relaxed() - atomic subtract with relaxed ordering
|
|
* @i: s64 value to subtract
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v - @i) with relaxed ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_sub_return_relaxed() elsewhere.
|
|
*
|
|
* Return: The updated value of @v.
|
|
*/
|
|
static __always_inline s64
|
|
raw_atomic64_sub_return_relaxed(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_sub_return_relaxed)
|
|
return arch_atomic64_sub_return_relaxed(i, v);
|
|
#elif defined(arch_atomic64_sub_return)
|
|
return arch_atomic64_sub_return(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic64_sub_return_relaxed"
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_fetch_sub() - atomic subtract with full ordering
|
|
* @i: s64 value to subtract
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v - @i) with full ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_fetch_sub() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_sub(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_sub)
|
|
return arch_atomic64_fetch_sub(i, v);
|
|
#elif defined(arch_atomic64_fetch_sub_relaxed)
|
|
s64 ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic64_fetch_sub_relaxed(i, v);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
#error "Unable to define raw_atomic64_fetch_sub"
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_fetch_sub_acquire() - atomic subtract with acquire ordering
|
|
* @i: s64 value to subtract
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v - @i) with acquire ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_fetch_sub_acquire() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_sub_acquire)
|
|
return arch_atomic64_fetch_sub_acquire(i, v);
|
|
#elif defined(arch_atomic64_fetch_sub_relaxed)
|
|
s64 ret = arch_atomic64_fetch_sub_relaxed(i, v);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic64_fetch_sub)
|
|
return arch_atomic64_fetch_sub(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic64_fetch_sub_acquire"
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_fetch_sub_release() - atomic subtract with release ordering
|
|
* @i: s64 value to subtract
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v - @i) with release ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_fetch_sub_release() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_sub_release(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_sub_release)
|
|
return arch_atomic64_fetch_sub_release(i, v);
|
|
#elif defined(arch_atomic64_fetch_sub_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic64_fetch_sub_relaxed(i, v);
|
|
#elif defined(arch_atomic64_fetch_sub)
|
|
return arch_atomic64_fetch_sub(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic64_fetch_sub_release"
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_fetch_sub_relaxed() - atomic subtract with relaxed ordering
|
|
* @i: s64 value to subtract
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v - @i) with relaxed ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_fetch_sub_relaxed() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_sub_relaxed)
|
|
return arch_atomic64_fetch_sub_relaxed(i, v);
|
|
#elif defined(arch_atomic64_fetch_sub)
|
|
return arch_atomic64_fetch_sub(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic64_fetch_sub_relaxed"
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_inc() - atomic increment with relaxed ordering
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v + 1) with relaxed ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_inc() elsewhere.
|
|
*
|
|
* Return: Nothing.
|
|
*/
|
|
static __always_inline void
|
|
raw_atomic64_inc(atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_inc)
|
|
arch_atomic64_inc(v);
|
|
#else
|
|
raw_atomic64_add(1, v);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_inc_return() - atomic increment with full ordering
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v + 1) with full ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_inc_return() elsewhere.
|
|
*
|
|
* Return: The updated value of @v.
|
|
*/
|
|
static __always_inline s64
|
|
raw_atomic64_inc_return(atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_inc_return)
|
|
return arch_atomic64_inc_return(v);
|
|
#elif defined(arch_atomic64_inc_return_relaxed)
|
|
s64 ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic64_inc_return_relaxed(v);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
return raw_atomic64_add_return(1, v);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_inc_return_acquire() - atomic increment with acquire ordering
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v + 1) with acquire ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_inc_return_acquire() elsewhere.
|
|
*
|
|
* Return: The updated value of @v.
|
|
*/
|
|
static __always_inline s64
|
|
raw_atomic64_inc_return_acquire(atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_inc_return_acquire)
|
|
return arch_atomic64_inc_return_acquire(v);
|
|
#elif defined(arch_atomic64_inc_return_relaxed)
|
|
s64 ret = arch_atomic64_inc_return_relaxed(v);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic64_inc_return)
|
|
return arch_atomic64_inc_return(v);
|
|
#else
|
|
return raw_atomic64_add_return_acquire(1, v);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_inc_return_release() - atomic increment with release ordering
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v + 1) with release ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_inc_return_release() elsewhere.
|
|
*
|
|
* Return: The updated value of @v.
|
|
*/
|
|
static __always_inline s64
|
|
raw_atomic64_inc_return_release(atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_inc_return_release)
|
|
return arch_atomic64_inc_return_release(v);
|
|
#elif defined(arch_atomic64_inc_return_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic64_inc_return_relaxed(v);
|
|
#elif defined(arch_atomic64_inc_return)
|
|
return arch_atomic64_inc_return(v);
|
|
#else
|
|
return raw_atomic64_add_return_release(1, v);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_inc_return_relaxed() - atomic increment with relaxed ordering
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v + 1) with relaxed ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_inc_return_relaxed() elsewhere.
|
|
*
|
|
* Return: The updated value of @v.
|
|
*/
|
|
static __always_inline s64
|
|
raw_atomic64_inc_return_relaxed(atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_inc_return_relaxed)
|
|
return arch_atomic64_inc_return_relaxed(v);
|
|
#elif defined(arch_atomic64_inc_return)
|
|
return arch_atomic64_inc_return(v);
|
|
#else
|
|
return raw_atomic64_add_return_relaxed(1, v);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_fetch_inc() - atomic increment with full ordering
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v + 1) with full ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_fetch_inc() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_inc(atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_inc)
|
|
return arch_atomic64_fetch_inc(v);
|
|
#elif defined(arch_atomic64_fetch_inc_relaxed)
|
|
s64 ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic64_fetch_inc_relaxed(v);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
return raw_atomic64_fetch_add(1, v);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_fetch_inc_acquire() - atomic increment with acquire ordering
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v + 1) with acquire ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_fetch_inc_acquire() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_inc_acquire(atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_inc_acquire)
|
|
return arch_atomic64_fetch_inc_acquire(v);
|
|
#elif defined(arch_atomic64_fetch_inc_relaxed)
|
|
s64 ret = arch_atomic64_fetch_inc_relaxed(v);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic64_fetch_inc)
|
|
return arch_atomic64_fetch_inc(v);
|
|
#else
|
|
return raw_atomic64_fetch_add_acquire(1, v);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_fetch_inc_release() - atomic increment with release ordering
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v + 1) with release ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_fetch_inc_release() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_inc_release(atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_inc_release)
|
|
return arch_atomic64_fetch_inc_release(v);
|
|
#elif defined(arch_atomic64_fetch_inc_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic64_fetch_inc_relaxed(v);
|
|
#elif defined(arch_atomic64_fetch_inc)
|
|
return arch_atomic64_fetch_inc(v);
|
|
#else
|
|
return raw_atomic64_fetch_add_release(1, v);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_fetch_inc_relaxed() - atomic increment with relaxed ordering
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v + 1) with relaxed ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_fetch_inc_relaxed() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_inc_relaxed(atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_inc_relaxed)
|
|
return arch_atomic64_fetch_inc_relaxed(v);
|
|
#elif defined(arch_atomic64_fetch_inc)
|
|
return arch_atomic64_fetch_inc(v);
|
|
#else
|
|
return raw_atomic64_fetch_add_relaxed(1, v);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_dec() - atomic decrement with relaxed ordering
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v - 1) with relaxed ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_dec() elsewhere.
|
|
*
|
|
* Return: Nothing.
|
|
*/
|
|
static __always_inline void
|
|
raw_atomic64_dec(atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_dec)
|
|
arch_atomic64_dec(v);
|
|
#else
|
|
raw_atomic64_sub(1, v);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_dec_return() - atomic decrement with full ordering
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v - 1) with full ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_dec_return() elsewhere.
|
|
*
|
|
* Return: The updated value of @v.
|
|
*/
|
|
static __always_inline s64
|
|
raw_atomic64_dec_return(atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_dec_return)
|
|
return arch_atomic64_dec_return(v);
|
|
#elif defined(arch_atomic64_dec_return_relaxed)
|
|
s64 ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic64_dec_return_relaxed(v);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
return raw_atomic64_sub_return(1, v);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_dec_return_acquire() - atomic decrement with acquire ordering
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v - 1) with acquire ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_dec_return_acquire() elsewhere.
|
|
*
|
|
* Return: The updated value of @v.
|
|
*/
|
|
static __always_inline s64
|
|
raw_atomic64_dec_return_acquire(atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_dec_return_acquire)
|
|
return arch_atomic64_dec_return_acquire(v);
|
|
#elif defined(arch_atomic64_dec_return_relaxed)
|
|
s64 ret = arch_atomic64_dec_return_relaxed(v);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic64_dec_return)
|
|
return arch_atomic64_dec_return(v);
|
|
#else
|
|
return raw_atomic64_sub_return_acquire(1, v);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_dec_return_release() - atomic decrement with release ordering
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v - 1) with release ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_dec_return_release() elsewhere.
|
|
*
|
|
* Return: The updated value of @v.
|
|
*/
|
|
static __always_inline s64
|
|
raw_atomic64_dec_return_release(atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_dec_return_release)
|
|
return arch_atomic64_dec_return_release(v);
|
|
#elif defined(arch_atomic64_dec_return_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic64_dec_return_relaxed(v);
|
|
#elif defined(arch_atomic64_dec_return)
|
|
return arch_atomic64_dec_return(v);
|
|
#else
|
|
return raw_atomic64_sub_return_release(1, v);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_dec_return_relaxed() - atomic decrement with relaxed ordering
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v - 1) with relaxed ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_dec_return_relaxed() elsewhere.
|
|
*
|
|
* Return: The updated value of @v.
|
|
*/
|
|
static __always_inline s64
|
|
raw_atomic64_dec_return_relaxed(atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_dec_return_relaxed)
|
|
return arch_atomic64_dec_return_relaxed(v);
|
|
#elif defined(arch_atomic64_dec_return)
|
|
return arch_atomic64_dec_return(v);
|
|
#else
|
|
return raw_atomic64_sub_return_relaxed(1, v);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_fetch_dec() - atomic decrement with full ordering
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v - 1) with full ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_fetch_dec() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_dec(atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_dec)
|
|
return arch_atomic64_fetch_dec(v);
|
|
#elif defined(arch_atomic64_fetch_dec_relaxed)
|
|
s64 ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic64_fetch_dec_relaxed(v);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
return raw_atomic64_fetch_sub(1, v);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_fetch_dec_acquire() - atomic decrement with acquire ordering
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v - 1) with acquire ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_fetch_dec_acquire() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_dec_acquire(atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_dec_acquire)
|
|
return arch_atomic64_fetch_dec_acquire(v);
|
|
#elif defined(arch_atomic64_fetch_dec_relaxed)
|
|
s64 ret = arch_atomic64_fetch_dec_relaxed(v);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic64_fetch_dec)
|
|
return arch_atomic64_fetch_dec(v);
|
|
#else
|
|
return raw_atomic64_fetch_sub_acquire(1, v);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_fetch_dec_release() - atomic decrement with release ordering
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v - 1) with release ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_fetch_dec_release() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_dec_release(atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_dec_release)
|
|
return arch_atomic64_fetch_dec_release(v);
|
|
#elif defined(arch_atomic64_fetch_dec_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic64_fetch_dec_relaxed(v);
|
|
#elif defined(arch_atomic64_fetch_dec)
|
|
return arch_atomic64_fetch_dec(v);
|
|
#else
|
|
return raw_atomic64_fetch_sub_release(1, v);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_fetch_dec_relaxed() - atomic decrement with relaxed ordering
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v - 1) with relaxed ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_fetch_dec_relaxed() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_dec_relaxed(atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_dec_relaxed)
|
|
return arch_atomic64_fetch_dec_relaxed(v);
|
|
#elif defined(arch_atomic64_fetch_dec)
|
|
return arch_atomic64_fetch_dec(v);
|
|
#else
|
|
return raw_atomic64_fetch_sub_relaxed(1, v);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_and() - atomic bitwise AND with relaxed ordering
|
|
* @i: s64 value
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v & @i) with relaxed ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_and() elsewhere.
|
|
*
|
|
* Return: Nothing.
|
|
*/
|
|
static __always_inline void
|
|
raw_atomic64_and(s64 i, atomic64_t *v)
|
|
{
|
|
arch_atomic64_and(i, v);
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_fetch_and() - atomic bitwise AND with full ordering
|
|
* @i: s64 value
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v & @i) with full ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_fetch_and() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_and(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_and)
|
|
return arch_atomic64_fetch_and(i, v);
|
|
#elif defined(arch_atomic64_fetch_and_relaxed)
|
|
s64 ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic64_fetch_and_relaxed(i, v);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
#error "Unable to define raw_atomic64_fetch_and"
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_fetch_and_acquire() - atomic bitwise AND with acquire ordering
|
|
* @i: s64 value
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v & @i) with acquire ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_fetch_and_acquire() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_and_acquire)
|
|
return arch_atomic64_fetch_and_acquire(i, v);
|
|
#elif defined(arch_atomic64_fetch_and_relaxed)
|
|
s64 ret = arch_atomic64_fetch_and_relaxed(i, v);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic64_fetch_and)
|
|
return arch_atomic64_fetch_and(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic64_fetch_and_acquire"
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_fetch_and_release() - atomic bitwise AND with release ordering
|
|
* @i: s64 value
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v & @i) with release ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_fetch_and_release() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_and_release(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_and_release)
|
|
return arch_atomic64_fetch_and_release(i, v);
|
|
#elif defined(arch_atomic64_fetch_and_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic64_fetch_and_relaxed(i, v);
|
|
#elif defined(arch_atomic64_fetch_and)
|
|
return arch_atomic64_fetch_and(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic64_fetch_and_release"
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_fetch_and_relaxed() - atomic bitwise AND with relaxed ordering
|
|
* @i: s64 value
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v & @i) with relaxed ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_fetch_and_relaxed() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_and_relaxed(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_and_relaxed)
|
|
return arch_atomic64_fetch_and_relaxed(i, v);
|
|
#elif defined(arch_atomic64_fetch_and)
|
|
return arch_atomic64_fetch_and(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic64_fetch_and_relaxed"
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_andnot() - atomic bitwise AND NOT with relaxed ordering
|
|
* @i: s64 value
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v & ~@i) with relaxed ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_andnot() elsewhere.
|
|
*
|
|
* Return: Nothing.
|
|
*/
|
|
static __always_inline void
|
|
raw_atomic64_andnot(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_andnot)
|
|
arch_atomic64_andnot(i, v);
|
|
#else
|
|
raw_atomic64_and(~i, v);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_fetch_andnot() - atomic bitwise AND NOT with full ordering
|
|
* @i: s64 value
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v & ~@i) with full ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_fetch_andnot() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_andnot(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_andnot)
|
|
return arch_atomic64_fetch_andnot(i, v);
|
|
#elif defined(arch_atomic64_fetch_andnot_relaxed)
|
|
s64 ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic64_fetch_andnot_relaxed(i, v);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
return raw_atomic64_fetch_and(~i, v);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_fetch_andnot_acquire() - atomic bitwise AND NOT with acquire ordering
|
|
* @i: s64 value
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v & ~@i) with acquire ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_fetch_andnot_acquire() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_andnot_acquire)
|
|
return arch_atomic64_fetch_andnot_acquire(i, v);
|
|
#elif defined(arch_atomic64_fetch_andnot_relaxed)
|
|
s64 ret = arch_atomic64_fetch_andnot_relaxed(i, v);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic64_fetch_andnot)
|
|
return arch_atomic64_fetch_andnot(i, v);
|
|
#else
|
|
return raw_atomic64_fetch_and_acquire(~i, v);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_fetch_andnot_release() - atomic bitwise AND NOT with release ordering
|
|
* @i: s64 value
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v & ~@i) with release ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_fetch_andnot_release() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_andnot_release)
|
|
return arch_atomic64_fetch_andnot_release(i, v);
|
|
#elif defined(arch_atomic64_fetch_andnot_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic64_fetch_andnot_relaxed(i, v);
|
|
#elif defined(arch_atomic64_fetch_andnot)
|
|
return arch_atomic64_fetch_andnot(i, v);
|
|
#else
|
|
return raw_atomic64_fetch_and_release(~i, v);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_fetch_andnot_relaxed() - atomic bitwise AND NOT with relaxed ordering
|
|
* @i: s64 value
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v & ~@i) with relaxed ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_fetch_andnot_relaxed() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_andnot_relaxed)
|
|
return arch_atomic64_fetch_andnot_relaxed(i, v);
|
|
#elif defined(arch_atomic64_fetch_andnot)
|
|
return arch_atomic64_fetch_andnot(i, v);
|
|
#else
|
|
return raw_atomic64_fetch_and_relaxed(~i, v);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_or() - atomic bitwise OR with relaxed ordering
|
|
* @i: s64 value
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v | @i) with relaxed ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_or() elsewhere.
|
|
*
|
|
* Return: Nothing.
|
|
*/
|
|
static __always_inline void
|
|
raw_atomic64_or(s64 i, atomic64_t *v)
|
|
{
|
|
arch_atomic64_or(i, v);
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_fetch_or() - atomic bitwise OR with full ordering
|
|
* @i: s64 value
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v | @i) with full ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_fetch_or() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_or(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_or)
|
|
return arch_atomic64_fetch_or(i, v);
|
|
#elif defined(arch_atomic64_fetch_or_relaxed)
|
|
s64 ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic64_fetch_or_relaxed(i, v);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
#error "Unable to define raw_atomic64_fetch_or"
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_fetch_or_acquire() - atomic bitwise OR with acquire ordering
|
|
* @i: s64 value
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v | @i) with acquire ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_fetch_or_acquire() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_or_acquire)
|
|
return arch_atomic64_fetch_or_acquire(i, v);
|
|
#elif defined(arch_atomic64_fetch_or_relaxed)
|
|
s64 ret = arch_atomic64_fetch_or_relaxed(i, v);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic64_fetch_or)
|
|
return arch_atomic64_fetch_or(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic64_fetch_or_acquire"
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_fetch_or_release() - atomic bitwise OR with release ordering
|
|
* @i: s64 value
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v | @i) with release ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_fetch_or_release() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_or_release(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_or_release)
|
|
return arch_atomic64_fetch_or_release(i, v);
|
|
#elif defined(arch_atomic64_fetch_or_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic64_fetch_or_relaxed(i, v);
|
|
#elif defined(arch_atomic64_fetch_or)
|
|
return arch_atomic64_fetch_or(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic64_fetch_or_release"
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_fetch_or_relaxed() - atomic bitwise OR with relaxed ordering
|
|
* @i: s64 value
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v | @i) with relaxed ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_fetch_or_relaxed() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_or_relaxed(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_or_relaxed)
|
|
return arch_atomic64_fetch_or_relaxed(i, v);
|
|
#elif defined(arch_atomic64_fetch_or)
|
|
return arch_atomic64_fetch_or(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic64_fetch_or_relaxed"
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_xor() - atomic bitwise XOR with relaxed ordering
|
|
* @i: s64 value
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v ^ @i) with relaxed ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_xor() elsewhere.
|
|
*
|
|
* Return: Nothing.
|
|
*/
|
|
static __always_inline void
|
|
raw_atomic64_xor(s64 i, atomic64_t *v)
|
|
{
|
|
arch_atomic64_xor(i, v);
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_fetch_xor() - atomic bitwise XOR with full ordering
|
|
* @i: s64 value
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v ^ @i) with full ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_fetch_xor() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_xor(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_xor)
|
|
return arch_atomic64_fetch_xor(i, v);
|
|
#elif defined(arch_atomic64_fetch_xor_relaxed)
|
|
s64 ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic64_fetch_xor_relaxed(i, v);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
#error "Unable to define raw_atomic64_fetch_xor"
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_fetch_xor_acquire() - atomic bitwise XOR with acquire ordering
|
|
* @i: s64 value
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v ^ @i) with acquire ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_fetch_xor_acquire() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_xor_acquire)
|
|
return arch_atomic64_fetch_xor_acquire(i, v);
|
|
#elif defined(arch_atomic64_fetch_xor_relaxed)
|
|
s64 ret = arch_atomic64_fetch_xor_relaxed(i, v);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic64_fetch_xor)
|
|
return arch_atomic64_fetch_xor(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic64_fetch_xor_acquire"
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_fetch_xor_release() - atomic bitwise XOR with release ordering
|
|
* @i: s64 value
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v ^ @i) with release ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_fetch_xor_release() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_xor_release(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_xor_release)
|
|
return arch_atomic64_fetch_xor_release(i, v);
|
|
#elif defined(arch_atomic64_fetch_xor_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic64_fetch_xor_relaxed(i, v);
|
|
#elif defined(arch_atomic64_fetch_xor)
|
|
return arch_atomic64_fetch_xor(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic64_fetch_xor_release"
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_fetch_xor_relaxed() - atomic bitwise XOR with relaxed ordering
|
|
* @i: s64 value
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v ^ @i) with relaxed ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_fetch_xor_relaxed() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_fetch_xor_relaxed)
|
|
return arch_atomic64_fetch_xor_relaxed(i, v);
|
|
#elif defined(arch_atomic64_fetch_xor)
|
|
return arch_atomic64_fetch_xor(i, v);
|
|
#else
|
|
#error "Unable to define raw_atomic64_fetch_xor_relaxed"
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_xchg() - atomic exchange with full ordering
|
|
* @v: pointer to atomic64_t
|
|
* @new: s64 value to assign
|
|
*
|
|
* Atomically updates @v to @new with full ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_xchg() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline s64
|
|
raw_atomic64_xchg(atomic64_t *v, s64 new)
|
|
{
|
|
#if defined(arch_atomic64_xchg)
|
|
return arch_atomic64_xchg(v, new);
|
|
#elif defined(arch_atomic64_xchg_relaxed)
|
|
s64 ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic64_xchg_relaxed(v, new);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
return raw_xchg(&v->counter, new);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_xchg_acquire() - atomic exchange with acquire ordering
|
|
* @v: pointer to atomic64_t
|
|
* @new: s64 value to assign
|
|
*
|
|
* Atomically updates @v to @new with acquire ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_xchg_acquire() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline s64
|
|
raw_atomic64_xchg_acquire(atomic64_t *v, s64 new)
|
|
{
|
|
#if defined(arch_atomic64_xchg_acquire)
|
|
return arch_atomic64_xchg_acquire(v, new);
|
|
#elif defined(arch_atomic64_xchg_relaxed)
|
|
s64 ret = arch_atomic64_xchg_relaxed(v, new);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic64_xchg)
|
|
return arch_atomic64_xchg(v, new);
|
|
#else
|
|
return raw_xchg_acquire(&v->counter, new);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_xchg_release() - atomic exchange with release ordering
|
|
* @v: pointer to atomic64_t
|
|
* @new: s64 value to assign
|
|
*
|
|
* Atomically updates @v to @new with release ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_xchg_release() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline s64
|
|
raw_atomic64_xchg_release(atomic64_t *v, s64 new)
|
|
{
|
|
#if defined(arch_atomic64_xchg_release)
|
|
return arch_atomic64_xchg_release(v, new);
|
|
#elif defined(arch_atomic64_xchg_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic64_xchg_relaxed(v, new);
|
|
#elif defined(arch_atomic64_xchg)
|
|
return arch_atomic64_xchg(v, new);
|
|
#else
|
|
return raw_xchg_release(&v->counter, new);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_xchg_relaxed() - atomic exchange with relaxed ordering
|
|
* @v: pointer to atomic64_t
|
|
* @new: s64 value to assign
|
|
*
|
|
* Atomically updates @v to @new with relaxed ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_xchg_relaxed() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline s64
|
|
raw_atomic64_xchg_relaxed(atomic64_t *v, s64 new)
|
|
{
|
|
#if defined(arch_atomic64_xchg_relaxed)
|
|
return arch_atomic64_xchg_relaxed(v, new);
|
|
#elif defined(arch_atomic64_xchg)
|
|
return arch_atomic64_xchg(v, new);
|
|
#else
|
|
return raw_xchg_relaxed(&v->counter, new);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_cmpxchg() - atomic compare and exchange with full ordering
|
|
* @v: pointer to atomic64_t
|
|
* @old: s64 value to compare with
|
|
* @new: s64 value to assign
|
|
*
|
|
* If (@v == @old), atomically updates @v to @new with full ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_cmpxchg() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline s64
|
|
raw_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
|
|
{
|
|
#if defined(arch_atomic64_cmpxchg)
|
|
return arch_atomic64_cmpxchg(v, old, new);
|
|
#elif defined(arch_atomic64_cmpxchg_relaxed)
|
|
s64 ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic64_cmpxchg_relaxed(v, old, new);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
return raw_cmpxchg(&v->counter, old, new);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
|
|
* @v: pointer to atomic64_t
|
|
* @old: s64 value to compare with
|
|
* @new: s64 value to assign
|
|
*
|
|
* If (@v == @old), atomically updates @v to @new with acquire ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_cmpxchg_acquire() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline s64
|
|
raw_atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
|
|
{
|
|
#if defined(arch_atomic64_cmpxchg_acquire)
|
|
return arch_atomic64_cmpxchg_acquire(v, old, new);
|
|
#elif defined(arch_atomic64_cmpxchg_relaxed)
|
|
s64 ret = arch_atomic64_cmpxchg_relaxed(v, old, new);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic64_cmpxchg)
|
|
return arch_atomic64_cmpxchg(v, old, new);
|
|
#else
|
|
return raw_cmpxchg_acquire(&v->counter, old, new);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_cmpxchg_release() - atomic compare and exchange with release ordering
|
|
* @v: pointer to atomic64_t
|
|
* @old: s64 value to compare with
|
|
* @new: s64 value to assign
|
|
*
|
|
* If (@v == @old), atomically updates @v to @new with release ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_cmpxchg_release() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline s64
|
|
raw_atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
|
|
{
|
|
#if defined(arch_atomic64_cmpxchg_release)
|
|
return arch_atomic64_cmpxchg_release(v, old, new);
|
|
#elif defined(arch_atomic64_cmpxchg_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic64_cmpxchg_relaxed(v, old, new);
|
|
#elif defined(arch_atomic64_cmpxchg)
|
|
return arch_atomic64_cmpxchg(v, old, new);
|
|
#else
|
|
return raw_cmpxchg_release(&v->counter, old, new);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
|
|
* @v: pointer to atomic64_t
|
|
* @old: s64 value to compare with
|
|
* @new: s64 value to assign
|
|
*
|
|
* If (@v == @old), atomically updates @v to @new with relaxed ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_cmpxchg_relaxed() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline s64
|
|
raw_atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new)
|
|
{
|
|
#if defined(arch_atomic64_cmpxchg_relaxed)
|
|
return arch_atomic64_cmpxchg_relaxed(v, old, new);
|
|
#elif defined(arch_atomic64_cmpxchg)
|
|
return arch_atomic64_cmpxchg(v, old, new);
|
|
#else
|
|
return raw_cmpxchg_relaxed(&v->counter, old, new);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_try_cmpxchg() - atomic compare and exchange with full ordering
|
|
* @v: pointer to atomic64_t
|
|
* @old: pointer to s64 value to compare with
|
|
* @new: s64 value to assign
|
|
*
|
|
* If (@v == @old), atomically updates @v to @new with full ordering.
|
|
* Otherwise, updates @old to the current value of @v.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_try_cmpxchg() elsewhere.
|
|
*
|
|
* Return: @true if the exchange occured, @false otherwise.
|
|
*/
|
|
static __always_inline bool
|
|
raw_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
|
|
{
|
|
#if defined(arch_atomic64_try_cmpxchg)
|
|
return arch_atomic64_try_cmpxchg(v, old, new);
|
|
#elif defined(arch_atomic64_try_cmpxchg_relaxed)
|
|
bool ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
s64 r, o = *old;
|
|
r = raw_atomic64_cmpxchg(v, o, new);
|
|
if (unlikely(r != o))
|
|
*old = r;
|
|
return likely(r == o);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_try_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
|
|
* @v: pointer to atomic64_t
|
|
* @old: pointer to s64 value to compare with
|
|
* @new: s64 value to assign
|
|
*
|
|
* If (@v == @old), atomically updates @v to @new with acquire ordering.
|
|
* Otherwise, updates @old to the current value of @v.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_try_cmpxchg_acquire() elsewhere.
|
|
*
|
|
* Return: @true if the exchange occured, @false otherwise.
|
|
*/
|
|
static __always_inline bool
|
|
raw_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
|
|
{
|
|
#if defined(arch_atomic64_try_cmpxchg_acquire)
|
|
return arch_atomic64_try_cmpxchg_acquire(v, old, new);
|
|
#elif defined(arch_atomic64_try_cmpxchg_relaxed)
|
|
bool ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic64_try_cmpxchg)
|
|
return arch_atomic64_try_cmpxchg(v, old, new);
|
|
#else
|
|
s64 r, o = *old;
|
|
r = raw_atomic64_cmpxchg_acquire(v, o, new);
|
|
if (unlikely(r != o))
|
|
*old = r;
|
|
return likely(r == o);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_try_cmpxchg_release() - atomic compare and exchange with release ordering
|
|
* @v: pointer to atomic64_t
|
|
* @old: pointer to s64 value to compare with
|
|
* @new: s64 value to assign
|
|
*
|
|
* If (@v == @old), atomically updates @v to @new with release ordering.
|
|
* Otherwise, updates @old to the current value of @v.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_try_cmpxchg_release() elsewhere.
|
|
*
|
|
* Return: @true if the exchange occured, @false otherwise.
|
|
*/
|
|
static __always_inline bool
|
|
raw_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
|
|
{
|
|
#if defined(arch_atomic64_try_cmpxchg_release)
|
|
return arch_atomic64_try_cmpxchg_release(v, old, new);
|
|
#elif defined(arch_atomic64_try_cmpxchg_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
|
|
#elif defined(arch_atomic64_try_cmpxchg)
|
|
return arch_atomic64_try_cmpxchg(v, old, new);
|
|
#else
|
|
s64 r, o = *old;
|
|
r = raw_atomic64_cmpxchg_release(v, o, new);
|
|
if (unlikely(r != o))
|
|
*old = r;
|
|
return likely(r == o);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_try_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
|
|
* @v: pointer to atomic64_t
|
|
* @old: pointer to s64 value to compare with
|
|
* @new: s64 value to assign
|
|
*
|
|
* If (@v == @old), atomically updates @v to @new with relaxed ordering.
|
|
* Otherwise, updates @old to the current value of @v.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_try_cmpxchg_relaxed() elsewhere.
|
|
*
|
|
* Return: @true if the exchange occured, @false otherwise.
|
|
*/
|
|
static __always_inline bool
|
|
raw_atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
|
|
{
|
|
#if defined(arch_atomic64_try_cmpxchg_relaxed)
|
|
return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
|
|
#elif defined(arch_atomic64_try_cmpxchg)
|
|
return arch_atomic64_try_cmpxchg(v, old, new);
|
|
#else
|
|
s64 r, o = *old;
|
|
r = raw_atomic64_cmpxchg_relaxed(v, o, new);
|
|
if (unlikely(r != o))
|
|
*old = r;
|
|
return likely(r == o);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_sub_and_test() - atomic subtract and test if zero with full ordering
|
|
* @i: s64 value to add
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v - @i) with full ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_sub_and_test() elsewhere.
|
|
*
|
|
* Return: @true if the resulting value of @v is zero, @false otherwise.
|
|
*/
|
|
static __always_inline bool
|
|
raw_atomic64_sub_and_test(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_sub_and_test)
|
|
return arch_atomic64_sub_and_test(i, v);
|
|
#else
|
|
return raw_atomic64_sub_return(i, v) == 0;
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_dec_and_test() - atomic decrement and test if zero with full ordering
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v - 1) with full ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_dec_and_test() elsewhere.
|
|
*
|
|
* Return: @true if the resulting value of @v is zero, @false otherwise.
|
|
*/
|
|
static __always_inline bool
|
|
raw_atomic64_dec_and_test(atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_dec_and_test)
|
|
return arch_atomic64_dec_and_test(v);
|
|
#else
|
|
return raw_atomic64_dec_return(v) == 0;
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_inc_and_test() - atomic increment and test if zero with full ordering
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v + 1) with full ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_inc_and_test() elsewhere.
|
|
*
|
|
* Return: @true if the resulting value of @v is zero, @false otherwise.
|
|
*/
|
|
static __always_inline bool
|
|
raw_atomic64_inc_and_test(atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_inc_and_test)
|
|
return arch_atomic64_inc_and_test(v);
|
|
#else
|
|
return raw_atomic64_inc_return(v) == 0;
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_add_negative() - atomic add and test if negative with full ordering
|
|
* @i: s64 value to add
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v + @i) with full ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_add_negative() elsewhere.
|
|
*
|
|
* Return: @true if the resulting value of @v is negative, @false otherwise.
|
|
*/
|
|
static __always_inline bool
|
|
raw_atomic64_add_negative(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_add_negative)
|
|
return arch_atomic64_add_negative(i, v);
|
|
#elif defined(arch_atomic64_add_negative_relaxed)
|
|
bool ret;
|
|
__atomic_pre_full_fence();
|
|
ret = arch_atomic64_add_negative_relaxed(i, v);
|
|
__atomic_post_full_fence();
|
|
return ret;
|
|
#else
|
|
return raw_atomic64_add_return(i, v) < 0;
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_add_negative_acquire() - atomic add and test if negative with acquire ordering
|
|
* @i: s64 value to add
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v + @i) with acquire ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_add_negative_acquire() elsewhere.
|
|
*
|
|
* Return: @true if the resulting value of @v is negative, @false otherwise.
|
|
*/
|
|
static __always_inline bool
|
|
raw_atomic64_add_negative_acquire(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_add_negative_acquire)
|
|
return arch_atomic64_add_negative_acquire(i, v);
|
|
#elif defined(arch_atomic64_add_negative_relaxed)
|
|
bool ret = arch_atomic64_add_negative_relaxed(i, v);
|
|
__atomic_acquire_fence();
|
|
return ret;
|
|
#elif defined(arch_atomic64_add_negative)
|
|
return arch_atomic64_add_negative(i, v);
|
|
#else
|
|
return raw_atomic64_add_return_acquire(i, v) < 0;
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_add_negative_release() - atomic add and test if negative with release ordering
|
|
* @i: s64 value to add
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v + @i) with release ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_add_negative_release() elsewhere.
|
|
*
|
|
* Return: @true if the resulting value of @v is negative, @false otherwise.
|
|
*/
|
|
static __always_inline bool
|
|
raw_atomic64_add_negative_release(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_add_negative_release)
|
|
return arch_atomic64_add_negative_release(i, v);
|
|
#elif defined(arch_atomic64_add_negative_relaxed)
|
|
__atomic_release_fence();
|
|
return arch_atomic64_add_negative_relaxed(i, v);
|
|
#elif defined(arch_atomic64_add_negative)
|
|
return arch_atomic64_add_negative(i, v);
|
|
#else
|
|
return raw_atomic64_add_return_release(i, v) < 0;
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_add_negative_relaxed() - atomic add and test if negative with relaxed ordering
|
|
* @i: s64 value to add
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* Atomically updates @v to (@v + @i) with relaxed ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_add_negative_relaxed() elsewhere.
|
|
*
|
|
* Return: @true if the resulting value of @v is negative, @false otherwise.
|
|
*/
|
|
static __always_inline bool
|
|
raw_atomic64_add_negative_relaxed(s64 i, atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_add_negative_relaxed)
|
|
return arch_atomic64_add_negative_relaxed(i, v);
|
|
#elif defined(arch_atomic64_add_negative)
|
|
return arch_atomic64_add_negative(i, v);
|
|
#else
|
|
return raw_atomic64_add_return_relaxed(i, v) < 0;
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_fetch_add_unless() - atomic add unless value with full ordering
|
|
* @v: pointer to atomic64_t
|
|
* @a: s64 value to add
|
|
* @u: s64 value to compare with
|
|
*
|
|
* If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_fetch_add_unless() elsewhere.
|
|
*
|
|
* Return: The original value of @v.
|
|
*/
|
|
static __always_inline s64
|
|
raw_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
|
|
{
|
|
#if defined(arch_atomic64_fetch_add_unless)
|
|
return arch_atomic64_fetch_add_unless(v, a, u);
|
|
#else
|
|
s64 c = raw_atomic64_read(v);
|
|
|
|
do {
|
|
if (unlikely(c == u))
|
|
break;
|
|
} while (!raw_atomic64_try_cmpxchg(v, &c, c + a));
|
|
|
|
return c;
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_add_unless() - atomic add unless value with full ordering
|
|
* @v: pointer to atomic64_t
|
|
* @a: s64 value to add
|
|
* @u: s64 value to compare with
|
|
*
|
|
* If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_add_unless() elsewhere.
|
|
*
|
|
* Return: @true if @v was updated, @false otherwise.
|
|
*/
|
|
static __always_inline bool
|
|
raw_atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
|
|
{
|
|
#if defined(arch_atomic64_add_unless)
|
|
return arch_atomic64_add_unless(v, a, u);
|
|
#else
|
|
return raw_atomic64_fetch_add_unless(v, a, u) != u;
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_inc_not_zero() - atomic increment unless zero with full ordering
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* If (@v != 0), atomically updates @v to (@v + 1) with full ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_inc_not_zero() elsewhere.
|
|
*
|
|
* Return: @true if @v was updated, @false otherwise.
|
|
*/
|
|
static __always_inline bool
|
|
raw_atomic64_inc_not_zero(atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_inc_not_zero)
|
|
return arch_atomic64_inc_not_zero(v);
|
|
#else
|
|
return raw_atomic64_add_unless(v, 1, 0);
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_inc_unless_negative() - atomic increment unless negative with full ordering
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* If (@v >= 0), atomically updates @v to (@v + 1) with full ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_inc_unless_negative() elsewhere.
|
|
*
|
|
* Return: @true if @v was updated, @false otherwise.
|
|
*/
|
|
static __always_inline bool
|
|
raw_atomic64_inc_unless_negative(atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_inc_unless_negative)
|
|
return arch_atomic64_inc_unless_negative(v);
|
|
#else
|
|
s64 c = raw_atomic64_read(v);
|
|
|
|
do {
|
|
if (unlikely(c < 0))
|
|
return false;
|
|
} while (!raw_atomic64_try_cmpxchg(v, &c, c + 1));
|
|
|
|
return true;
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_dec_unless_positive() - atomic decrement unless positive with full ordering
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* If (@v <= 0), atomically updates @v to (@v - 1) with full ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_dec_unless_positive() elsewhere.
|
|
*
|
|
* Return: @true if @v was updated, @false otherwise.
|
|
*/
|
|
static __always_inline bool
|
|
raw_atomic64_dec_unless_positive(atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_dec_unless_positive)
|
|
return arch_atomic64_dec_unless_positive(v);
|
|
#else
|
|
s64 c = raw_atomic64_read(v);
|
|
|
|
do {
|
|
if (unlikely(c > 0))
|
|
return false;
|
|
} while (!raw_atomic64_try_cmpxchg(v, &c, c - 1));
|
|
|
|
return true;
|
|
#endif
|
|
}
|
|
|
|
/**
|
|
* raw_atomic64_dec_if_positive() - atomic decrement if positive with full ordering
|
|
* @v: pointer to atomic64_t
|
|
*
|
|
* If (@v > 0), atomically updates @v to (@v - 1) with full ordering.
|
|
*
|
|
* Safe to use in noinstr code; prefer atomic64_dec_if_positive() elsewhere.
|
|
*
|
|
* Return: The old value of (@v - 1), regardless of whether @v was updated.
|
|
*/
|
|
static __always_inline s64
|
|
raw_atomic64_dec_if_positive(atomic64_t *v)
|
|
{
|
|
#if defined(arch_atomic64_dec_if_positive)
|
|
return arch_atomic64_dec_if_positive(v);
|
|
#else
|
|
s64 dec, c = raw_atomic64_read(v);
|
|
|
|
do {
|
|
dec = c - 1;
|
|
if (unlikely(dec < 0))
|
|
break;
|
|
} while (!raw_atomic64_try_cmpxchg(v, &c, dec));
|
|
|
|
return dec;
|
|
#endif
|
|
}
|
|
|
|
#endif /* _LINUX_ATOMIC_FALLBACK_H */
|
|
// 2fdd6702823fa842f9cea57a002e6e4476ae780c
|