From c5c385baee9bdf3218fc3c37f3cbc4b52621aefb Mon Sep 17 00:00:00 2001 From: John Stultz Date: Wed, 3 May 2023 02:33:51 +0000 Subject: [PATCH] locking/rwsem: Add __always_inline annotation to __down_read_common() and inlined callers commit 92cc5d00a431e96e5a49c0b97e5ad4fa7536bd4b upstream. Apparently despite it being marked inline, the compiler may not inline __down_read_common() which makes it difficult to identify the cause of lock contention, as the blocked function in traceevents will always be listed as __down_read_common(). So this patch adds __always_inline annotation to the common function (as well as the inlined helper callers) to force it to be inlined so the blocking function will be listed (via Wchan) in traceevents. Fixes: c995e638ccbb ("locking/rwsem: Fold __down_{read,write}*()") Reported-by: Tim Murray Signed-off-by: John Stultz Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Waiman Long Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/20230503023351.2832796-1-jstultz@google.com Signed-off-by: Greg Kroah-Hartman --- kernel/locking/rwsem.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c index de375feada51..f0287a16b4ec 100644 --- a/kernel/locking/rwsem.c +++ b/kernel/locking/rwsem.c @@ -1221,7 +1221,7 @@ static struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem) /* * lock for reading */ -static inline int __down_read_common(struct rw_semaphore *sem, int state) +static __always_inline int __down_read_common(struct rw_semaphore *sem, int state) { int ret = 0; long count; @@ -1239,17 +1239,17 @@ static inline int __down_read_common(struct rw_semaphore *sem, int state) return ret; } -static inline void __down_read(struct rw_semaphore *sem) +static __always_inline void __down_read(struct rw_semaphore *sem) { __down_read_common(sem, TASK_UNINTERRUPTIBLE); } -static inline int __down_read_interruptible(struct rw_semaphore *sem) +static __always_inline int __down_read_interruptible(struct rw_semaphore *sem) { return __down_read_common(sem, TASK_INTERRUPTIBLE); } -static inline int __down_read_killable(struct rw_semaphore *sem) +static __always_inline int __down_read_killable(struct rw_semaphore *sem) { return __down_read_common(sem, TASK_KILLABLE); }