locking: Add rwsem_assert_held() and rwsem_assert_held_write()
Modelled after lockdep_assert_held() and lockdep_assert_held_write(), but are always active, even when lockdep is disabled. Of course, they don't test that _this_ thread is the owner, but it's sufficient to catch many bugs and doesn't incur the same performance penalty as lockdep. Acked-by: "Peter Zijlstra (Intel)" <peterz@infradead.org> Acked-by: Waiman Long <longman@redhat.com> Acked-by: "Darrick J. Wong" <djwong@kernel.org> Reviewed-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: "Matthew Wilcox (Oracle)" <willy@infradead.org> Signed-off-by: Chandan Babu R <chandanbabu@kernel.org>
This commit is contained in:
parent
49c379d3a7
commit
f70405afc9
|
@ -26,12 +26,17 @@ struct rwbase_rt {
|
|||
} while (0)
|
||||
|
||||
|
||||
static __always_inline bool rw_base_is_locked(struct rwbase_rt *rwb)
|
||||
static __always_inline bool rw_base_is_locked(const struct rwbase_rt *rwb)
|
||||
{
|
||||
return atomic_read(&rwb->readers) != READER_BIAS;
|
||||
}
|
||||
|
||||
static __always_inline bool rw_base_is_contended(struct rwbase_rt *rwb)
|
||||
static inline void rw_base_assert_held_write(const struct rwbase_rt *rwb)
|
||||
{
|
||||
WARN_ON(atomic_read(&rwb->readers) != WRITER_BIAS);
|
||||
}
|
||||
|
||||
static __always_inline bool rw_base_is_contended(const struct rwbase_rt *rwb)
|
||||
{
|
||||
return atomic_read(&rwb->readers) > 0;
|
||||
}
|
||||
|
|
|
@ -66,14 +66,24 @@ struct rw_semaphore {
|
|||
#endif
|
||||
};
|
||||
|
||||
/* In all implementations count != 0 means locked */
|
||||
#define RWSEM_UNLOCKED_VALUE 0UL
|
||||
#define RWSEM_WRITER_LOCKED (1UL << 0)
|
||||
#define __RWSEM_COUNT_INIT(name) .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE)
|
||||
|
||||
static inline int rwsem_is_locked(struct rw_semaphore *sem)
|
||||
{
|
||||
return atomic_long_read(&sem->count) != 0;
|
||||
return atomic_long_read(&sem->count) != RWSEM_UNLOCKED_VALUE;
|
||||
}
|
||||
|
||||
#define RWSEM_UNLOCKED_VALUE 0L
|
||||
#define __RWSEM_COUNT_INIT(name) .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE)
|
||||
static inline void rwsem_assert_held_nolockdep(const struct rw_semaphore *sem)
|
||||
{
|
||||
WARN_ON(atomic_long_read(&sem->count) == RWSEM_UNLOCKED_VALUE);
|
||||
}
|
||||
|
||||
static inline void rwsem_assert_held_write_nolockdep(const struct rw_semaphore *sem)
|
||||
{
|
||||
WARN_ON(!(atomic_long_read(&sem->count) & RWSEM_WRITER_LOCKED));
|
||||
}
|
||||
|
||||
/* Common initializer macros and functions */
|
||||
|
||||
|
@ -152,11 +162,21 @@ do { \
|
|||
__init_rwsem((sem), #sem, &__key); \
|
||||
} while (0)
|
||||
|
||||
static __always_inline int rwsem_is_locked(struct rw_semaphore *sem)
|
||||
static __always_inline int rwsem_is_locked(const struct rw_semaphore *sem)
|
||||
{
|
||||
return rw_base_is_locked(&sem->rwbase);
|
||||
}
|
||||
|
||||
static inline void rwsem_assert_held_nolockdep(const struct rw_semaphore *sem)
|
||||
{
|
||||
WARN_ON(!rwsem_is_locked(sem));
|
||||
}
|
||||
|
||||
static inline void rwsem_assert_held_write_nolockdep(const struct rw_semaphore *sem)
|
||||
{
|
||||
rw_base_assert_held_write(sem);
|
||||
}
|
||||
|
||||
static __always_inline int rwsem_is_contended(struct rw_semaphore *sem)
|
||||
{
|
||||
return rw_base_is_contended(&sem->rwbase);
|
||||
|
@ -169,6 +189,22 @@ static __always_inline int rwsem_is_contended(struct rw_semaphore *sem)
|
|||
* the RT specific variant.
|
||||
*/
|
||||
|
||||
static inline void rwsem_assert_held(const struct rw_semaphore *sem)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_LOCKDEP))
|
||||
lockdep_assert_held(sem);
|
||||
else
|
||||
rwsem_assert_held_nolockdep(sem);
|
||||
}
|
||||
|
||||
static inline void rwsem_assert_held_write(const struct rw_semaphore *sem)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_LOCKDEP))
|
||||
lockdep_assert_held_write(sem);
|
||||
else
|
||||
rwsem_assert_held_write_nolockdep(sem);
|
||||
}
|
||||
|
||||
/*
|
||||
* lock for reading
|
||||
*/
|
||||
|
|
Loading…
Reference in New Issue