mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-09-27 12:57:53 +00:00
459e39538e
This patch breaks a header loop involving qspinlock_types.h. The issue is that qspinlock_types.h includes atomic.h, which then eventually includes kernel.h which could lead back to the original file via spinlock_types.h. As ATOMIC_INIT is now defined by linux/types.h, there is no longer any need to include atomic.h from qspinlock_types.h. This also allows the CONFIG_PARAVIRT hack to be removed since it was trying to prevent exactly this loop. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Waiman Long <longman@redhat.com> Link: https://lkml.kernel.org/r/20200729123316.GC7047@gondor.apana.org.au
95 lines
2.1 KiB
C
95 lines
2.1 KiB
C
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
|
/*
|
|
* Queued spinlock
|
|
*
|
|
* (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
|
|
*
|
|
* Authors: Waiman Long <waiman.long@hp.com>
|
|
*/
|
|
#ifndef __ASM_GENERIC_QSPINLOCK_TYPES_H
|
|
#define __ASM_GENERIC_QSPINLOCK_TYPES_H
|
|
|
|
#include <linux/types.h>
|
|
|
|
typedef struct qspinlock {
|
|
union {
|
|
atomic_t val;
|
|
|
|
/*
|
|
* By using the whole 2nd least significant byte for the
|
|
* pending bit, we can allow better optimization of the lock
|
|
* acquisition for the pending bit holder.
|
|
*/
|
|
#ifdef __LITTLE_ENDIAN
|
|
struct {
|
|
u8 locked;
|
|
u8 pending;
|
|
};
|
|
struct {
|
|
u16 locked_pending;
|
|
u16 tail;
|
|
};
|
|
#else
|
|
struct {
|
|
u16 tail;
|
|
u16 locked_pending;
|
|
};
|
|
struct {
|
|
u8 reserved[2];
|
|
u8 pending;
|
|
u8 locked;
|
|
};
|
|
#endif
|
|
};
|
|
} arch_spinlock_t;
|
|
|
|
/*
|
|
* Initializier
|
|
*/
|
|
#define __ARCH_SPIN_LOCK_UNLOCKED { { .val = ATOMIC_INIT(0) } }
|
|
|
|
/*
|
|
* Bitfields in the atomic value:
|
|
*
|
|
* When NR_CPUS < 16K
|
|
* 0- 7: locked byte
|
|
* 8: pending
|
|
* 9-15: not used
|
|
* 16-17: tail index
|
|
* 18-31: tail cpu (+1)
|
|
*
|
|
* When NR_CPUS >= 16K
|
|
* 0- 7: locked byte
|
|
* 8: pending
|
|
* 9-10: tail index
|
|
* 11-31: tail cpu (+1)
|
|
*/
|
|
#define _Q_SET_MASK(type) (((1U << _Q_ ## type ## _BITS) - 1)\
|
|
<< _Q_ ## type ## _OFFSET)
|
|
#define _Q_LOCKED_OFFSET 0
|
|
#define _Q_LOCKED_BITS 8
|
|
#define _Q_LOCKED_MASK _Q_SET_MASK(LOCKED)
|
|
|
|
#define _Q_PENDING_OFFSET (_Q_LOCKED_OFFSET + _Q_LOCKED_BITS)
|
|
#if CONFIG_NR_CPUS < (1U << 14)
|
|
#define _Q_PENDING_BITS 8
|
|
#else
|
|
#define _Q_PENDING_BITS 1
|
|
#endif
|
|
#define _Q_PENDING_MASK _Q_SET_MASK(PENDING)
|
|
|
|
#define _Q_TAIL_IDX_OFFSET (_Q_PENDING_OFFSET + _Q_PENDING_BITS)
|
|
#define _Q_TAIL_IDX_BITS 2
|
|
#define _Q_TAIL_IDX_MASK _Q_SET_MASK(TAIL_IDX)
|
|
|
|
#define _Q_TAIL_CPU_OFFSET (_Q_TAIL_IDX_OFFSET + _Q_TAIL_IDX_BITS)
|
|
#define _Q_TAIL_CPU_BITS (32 - _Q_TAIL_CPU_OFFSET)
|
|
#define _Q_TAIL_CPU_MASK _Q_SET_MASK(TAIL_CPU)
|
|
|
|
#define _Q_TAIL_OFFSET _Q_TAIL_IDX_OFFSET
|
|
#define _Q_TAIL_MASK (_Q_TAIL_IDX_MASK | _Q_TAIL_CPU_MASK)
|
|
|
|
#define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET)
|
|
#define _Q_PENDING_VAL (1U << _Q_PENDING_OFFSET)
|
|
|
|
#endif /* __ASM_GENERIC_QSPINLOCK_TYPES_H */
|