mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-09-29 13:53:33 +00:00
locking/qspinlock: Use smp_cond_load_relaxed() to wait for next node
When a locker reaches the head of the queue and takes the lock, a concurrent locker may enqueue and force the lock holder to spin whilst its node->next field is initialised. Rather than open-code a READ_ONCE/cpu_relax() loop, this can be implemented using smp_cond_load_relaxed() instead. Signed-off-by: Will Deacon <will.deacon@arm.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Waiman Long <longman@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: boqun.feng@gmail.com Cc: linux-arm-kernel@lists.infradead.org Cc: paulmck@linux.vnet.ibm.com Link: http://lkml.kernel.org/r/1524738868-31318-10-git-send-email-will.deacon@arm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
7f56b58a92
commit
c131a198c4
1 changed files with 2 additions and 4 deletions
|
@ -483,10 +483,8 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
|
|||
/*
|
||||
* contended path; wait for next if not observed yet, release.
|
||||
*/
|
||||
if (!next) {
|
||||
while (!(next = READ_ONCE(node->next)))
|
||||
cpu_relax();
|
||||
}
|
||||
if (!next)
|
||||
next = smp_cond_load_relaxed(&node->next, (VAL));
|
||||
|
||||
arch_mcs_spin_unlock_contended(&next->locked);
|
||||
pv_kick_node(lock, next);
|
||||
|
|
Loading…
Reference in a new issue