locking/rwsem: Introduce rwsem_write_trylock()

One copy of this logic is better than three.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20201207090243.GE3040@hirez.programming.kicks-ass.net
This commit is contained in:
Peter Zijlstra 2020-12-08 10:25:06 +01:00
parent 3379116a0c
commit 285c61aedf

View file

@ -285,6 +285,18 @@ static inline bool rwsem_read_trylock(struct rw_semaphore *sem)
return false;
}
static inline bool rwsem_write_trylock(struct rw_semaphore *sem)
{
long tmp = RWSEM_UNLOCKED_VALUE;
if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp, RWSEM_WRITER_LOCKED)) {
rwsem_set_owner(sem);
return true;
}
return false;
}
/*
* Return just the real task structure pointer of the owner
*/
@ -1395,42 +1407,24 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
*/
static inline void __down_write(struct rw_semaphore *sem)
{
long tmp = RWSEM_UNLOCKED_VALUE;
if (unlikely(!atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
RWSEM_WRITER_LOCKED)))
if (unlikely(!rwsem_write_trylock(sem)))
rwsem_down_write_slowpath(sem, TASK_UNINTERRUPTIBLE);
else
rwsem_set_owner(sem);
}
static inline int __down_write_killable(struct rw_semaphore *sem)
{
long tmp = RWSEM_UNLOCKED_VALUE;
if (unlikely(!atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
RWSEM_WRITER_LOCKED))) {
if (unlikely(!rwsem_write_trylock(sem))) {
if (IS_ERR(rwsem_down_write_slowpath(sem, TASK_KILLABLE)))
return -EINTR;
} else {
rwsem_set_owner(sem);
}
return 0;
}
static inline int __down_write_trylock(struct rw_semaphore *sem)
{
long tmp;
DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
tmp = RWSEM_UNLOCKED_VALUE;
if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
RWSEM_WRITER_LOCKED)) {
rwsem_set_owner(sem);
return true;
}
return false;
return rwsem_write_trylock(sem);
}
/*