mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-30 16:07:39 +00:00
9eaa82935d
We'd like all architectures to convert to ARCH_ATOMIC, as once all architectures are converted it will be possible to make significant cleanups to the atomics headers, and this will make it much easier to generically enable atomic functionality (e.g. debug logic in the instrumented wrappers). As a step towards that, this patch migrates powerpc to ARCH_ATOMIC. The arch code provides arch_{atomic,atomic64,xchg,cmpxchg}*(), and common code wraps these with optional instrumentation to provide the regular functions. While atomic_try_cmpxchg_lock() is not part of the common atomic API, it is given an `arch_` prefix for consistency. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Boqun Feng <boqun.feng@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Will Deacon <will@kernel.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20210525140232.53872-28-mark.rutland@arm.com
84 lines
2 KiB
C
84 lines
2 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _ASM_POWERPC_QSPINLOCK_H
|
|
#define _ASM_POWERPC_QSPINLOCK_H
|
|
|
|
#include <asm-generic/qspinlock_types.h>
|
|
#include <asm/paravirt.h>
|
|
|
|
#define _Q_PENDING_LOOPS (1 << 9) /* not tuned */
|
|
|
|
#ifdef CONFIG_PARAVIRT_SPINLOCKS
|
|
extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
|
|
extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
|
|
extern void __pv_queued_spin_unlock(struct qspinlock *lock);
|
|
|
|
static __always_inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
|
|
{
|
|
if (!is_shared_processor())
|
|
native_queued_spin_lock_slowpath(lock, val);
|
|
else
|
|
__pv_queued_spin_lock_slowpath(lock, val);
|
|
}
|
|
|
|
#define queued_spin_unlock queued_spin_unlock
|
|
static inline void queued_spin_unlock(struct qspinlock *lock)
|
|
{
|
|
if (!is_shared_processor())
|
|
smp_store_release(&lock->locked, 0);
|
|
else
|
|
__pv_queued_spin_unlock(lock);
|
|
}
|
|
|
|
#else
|
|
extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
|
|
#endif
|
|
|
|
static __always_inline void queued_spin_lock(struct qspinlock *lock)
|
|
{
|
|
u32 val = 0;
|
|
|
|
if (likely(arch_atomic_try_cmpxchg_lock(&lock->val, &val, _Q_LOCKED_VAL)))
|
|
return;
|
|
|
|
queued_spin_lock_slowpath(lock, val);
|
|
}
|
|
#define queued_spin_lock queued_spin_lock
|
|
|
|
#ifdef CONFIG_PARAVIRT_SPINLOCKS
|
|
#define SPIN_THRESHOLD (1<<15) /* not tuned */
|
|
|
|
static __always_inline void pv_wait(u8 *ptr, u8 val)
|
|
{
|
|
if (*ptr != val)
|
|
return;
|
|
yield_to_any();
|
|
/*
|
|
* We could pass in a CPU here if waiting in the queue and yield to
|
|
* the previous CPU in the queue.
|
|
*/
|
|
}
|
|
|
|
static __always_inline void pv_kick(int cpu)
|
|
{
|
|
prod_cpu(cpu);
|
|
}
|
|
|
|
extern void __pv_init_lock_hash(void);
|
|
|
|
static inline void pv_spinlocks_init(void)
|
|
{
|
|
__pv_init_lock_hash();
|
|
}
|
|
|
|
#endif
|
|
|
|
/*
|
|
* Queued spinlocks rely heavily on smp_cond_load_relaxed() to busy-wait,
|
|
* which was found to have performance problems if implemented with
|
|
* the preferred spin_begin()/spin_end() SMT priority pattern. Use the
|
|
* generic version instead.
|
|
*/
|
|
|
|
#include <asm-generic/qspinlock.h>
|
|
|
|
#endif /* _ASM_POWERPC_QSPINLOCK_H */
|