linux-stable/include/linux/rwlock_rt.h
Thomas Gleixner 8282947f67 locking/rwlock: Provide RT variant
Similar to rw_semaphores, on RT the rwlock substitution is not writer fair,
because it's not feasible to have a writer inherit its priority to
multiple readers. Readers blocked on a writer follow the normal rules of
priority inheritance. Like RT spinlocks, RT rwlocks are state preserving
across the slow lock operations (contended case).

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/20210815211303.882793524@linutronix.de
2021-08-17 17:50:51 +02:00

140 lines
3 KiB
C

// SPDX-License-Identifier: GPL-2.0-only
#ifndef __LINUX_RWLOCK_RT_H
#define __LINUX_RWLOCK_RT_H
#ifndef __LINUX_SPINLOCK_RT_H
#error Do not #include directly. Use <linux/spinlock.h>.
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
extern void __rt_rwlock_init(rwlock_t *rwlock, const char *name,
struct lock_class_key *key);
#else
static inline void __rt_rwlock_init(rwlock_t *rwlock, char *name,
struct lock_class_key *key)
{
}
#endif
#define rwlock_init(rwl) \
do { \
static struct lock_class_key __key; \
\
init_rwbase_rt(&(rwl)->rwbase); \
__rt_rwlock_init(rwl, #rwl, &__key); \
} while (0)
extern void rt_read_lock(rwlock_t *rwlock);
extern int rt_read_trylock(rwlock_t *rwlock);
extern void rt_read_unlock(rwlock_t *rwlock);
extern void rt_write_lock(rwlock_t *rwlock);
extern int rt_write_trylock(rwlock_t *rwlock);
extern void rt_write_unlock(rwlock_t *rwlock);
static __always_inline void read_lock(rwlock_t *rwlock)
{
rt_read_lock(rwlock);
}
static __always_inline void read_lock_bh(rwlock_t *rwlock)
{
local_bh_disable();
rt_read_lock(rwlock);
}
static __always_inline void read_lock_irq(rwlock_t *rwlock)
{
rt_read_lock(rwlock);
}
#define read_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
rt_read_lock(lock); \
flags = 0; \
} while (0)
#define read_trylock(lock) __cond_lock(lock, rt_read_trylock(lock))
static __always_inline void read_unlock(rwlock_t *rwlock)
{
rt_read_unlock(rwlock);
}
static __always_inline void read_unlock_bh(rwlock_t *rwlock)
{
rt_read_unlock(rwlock);
local_bh_enable();
}
static __always_inline void read_unlock_irq(rwlock_t *rwlock)
{
rt_read_unlock(rwlock);
}
static __always_inline void read_unlock_irqrestore(rwlock_t *rwlock,
unsigned long flags)
{
rt_read_unlock(rwlock);
}
static __always_inline void write_lock(rwlock_t *rwlock)
{
rt_write_lock(rwlock);
}
static __always_inline void write_lock_bh(rwlock_t *rwlock)
{
local_bh_disable();
rt_write_lock(rwlock);
}
static __always_inline void write_lock_irq(rwlock_t *rwlock)
{
rt_write_lock(rwlock);
}
#define write_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
rt_write_lock(lock); \
flags = 0; \
} while (0)
#define write_trylock(lock) __cond_lock(lock, rt_write_trylock(lock))
#define write_trylock_irqsave(lock, flags) \
({ \
int __locked; \
\
typecheck(unsigned long, flags); \
flags = 0; \
__locked = write_trylock(lock); \
__locked; \
})
static __always_inline void write_unlock(rwlock_t *rwlock)
{
rt_write_unlock(rwlock);
}
static __always_inline void write_unlock_bh(rwlock_t *rwlock)
{
rt_write_unlock(rwlock);
local_bh_enable();
}
static __always_inline void write_unlock_irq(rwlock_t *rwlock)
{
rt_write_unlock(rwlock);
}
static __always_inline void write_unlock_irqrestore(rwlock_t *rwlock,
unsigned long flags)
{
rt_write_unlock(rwlock);
}
#define rwlock_is_contended(lock) (((void)(lock), 0))
#endif /* __LINUX_RWLOCK_RT_H */