mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-09-30 14:19:16 +00:00
locking/rwsem: Add __always_inline annotation to __down_read_common() and inlined callers
commit92cc5d00a4
upstream. Apparently despite it being marked inline, the compiler may not inline __down_read_common() which makes it difficult to identify the cause of lock contention, as the blocked function in traceevents will always be listed as __down_read_common(). So this patch adds __always_inline annotation to the common function (as well as the inlined helper callers) to force it to be inlined so the blocking function will be listed (via Wchan) in traceevents. Fixes:c995e638cc
("locking/rwsem: Fold __down_{read,write}*()") Reported-by: Tim Murray <timmurray@google.com> Signed-off-by: John Stultz <jstultz@google.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Waiman Long <longman@redhat.com> Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/20230503023351.2832796-1-jstultz@google.com Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
ed76d3a891
commit
c5c385baee
1 changed files with 4 additions and 4 deletions
|
@ -1221,7 +1221,7 @@ static struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
|
||||||
/*
|
/*
|
||||||
* lock for reading
|
* lock for reading
|
||||||
*/
|
*/
|
||||||
static inline int __down_read_common(struct rw_semaphore *sem, int state)
|
static __always_inline int __down_read_common(struct rw_semaphore *sem, int state)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
long count;
|
long count;
|
||||||
|
@ -1239,17 +1239,17 @@ static inline int __down_read_common(struct rw_semaphore *sem, int state)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __down_read(struct rw_semaphore *sem)
|
static __always_inline void __down_read(struct rw_semaphore *sem)
|
||||||
{
|
{
|
||||||
__down_read_common(sem, TASK_UNINTERRUPTIBLE);
|
__down_read_common(sem, TASK_UNINTERRUPTIBLE);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __down_read_interruptible(struct rw_semaphore *sem)
|
static __always_inline int __down_read_interruptible(struct rw_semaphore *sem)
|
||||||
{
|
{
|
||||||
return __down_read_common(sem, TASK_INTERRUPTIBLE);
|
return __down_read_common(sem, TASK_INTERRUPTIBLE);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __down_read_killable(struct rw_semaphore *sem)
|
static __always_inline int __down_read_killable(struct rw_semaphore *sem)
|
||||||
{
|
{
|
||||||
return __down_read_common(sem, TASK_KILLABLE);
|
return __down_read_common(sem, TASK_KILLABLE);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue