From d0c006402e7941558e5283ae434e2847c7999378 Mon Sep 17 00:00:00 2001 From: Uros Bizjak Date: Wed, 19 Oct 2022 16:08:50 +0200 Subject: [PATCH 1/2] jump_label: Use atomic_try_cmpxchg() in static_key_slow_inc_cpuslocked() Use atomic_try_cmpxchg() instead of atomic_cmpxchg (*ptr, old, new) == old in static_key_slow_inc_cpuslocked(). x86 CMPXCHG instruction returns success in ZF flag, so this change saves a compare after cmpxchg (and related move instruction in front of cmpxchg). Also, atomic_try_cmpxchg() implicitly assigns old *ptr value to "old" when cmpxchg fails, enabling further code simplifications. No functional change intended. Signed-off-by: Uros Bizjak Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20221019140850.3395-1-ubizjak@gmail.com --- kernel/jump_label.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/kernel/jump_label.c b/kernel/jump_label.c index 714ac4c3b556..4d6c6f5f60db 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c @@ -115,8 +115,6 @@ EXPORT_SYMBOL_GPL(static_key_count); void static_key_slow_inc_cpuslocked(struct static_key *key) { - int v, v1; - STATIC_KEY_CHECK_USE(key); lockdep_assert_cpus_held(); @@ -132,11 +130,9 @@ void static_key_slow_inc_cpuslocked(struct static_key *key) * so it counts as "enabled" in jump_label_update(). Note that * atomic_inc_unless_negative() checks >= 0, so roll our own. */ - for (v = atomic_read(&key->enabled); v > 0; v = v1) { - v1 = atomic_cmpxchg(&key->enabled, v, v + 1); - if (likely(v1 == v)) + for (int v = atomic_read(&key->enabled); v > 0; ) + if (likely(atomic_try_cmpxchg(&key->enabled, &v, v + 1))) return; - } jump_label_lock(); if (atomic_read(&key->enabled) == 0) { From 90d758896787048fa3d4209309d4800f3920e66f Mon Sep 17 00:00:00 2001 From: Alexey Izbyshev Date: Sat, 12 Nov 2022 00:54:39 +0300 Subject: [PATCH 2/2] futex: Resend potentially swallowed owner death notification Commit ca16d5bee598 ("futex: Prevent robust futex exit race") addressed two cases when tasks waiting on a robust non-PI futex remained blocked despite the futex not being owned anymore: * if the owner died after writing zero to the futex word, but before waking up a waiter * if a task waiting on the futex was woken up, but died before updating the futex word (effectively swallowing the notification without acting on it) In the second case, the task could be woken up either by the previous owner (after the futex word was reset to zero) or by the kernel (after the OWNER_DIED bit was set and the TID part of the futex word was reset to zero) if the previous owner died without the resetting the futex. Because the referenced commit wakes up a potential waiter only if the whole futex word is zero, the latter subcase remains unaddressed. Fix this by looking only at the TID part of the futex when deciding whether a wake up is needed. Fixes: ca16d5bee598 ("futex: Prevent robust futex exit race") Signed-off-by: Alexey Izbyshev Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20221111215439.248185-1-izbyshev@ispras.ru --- kernel/futex/core.c | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/kernel/futex/core.c b/kernel/futex/core.c index b22ef1efe751..514e4582b863 100644 --- a/kernel/futex/core.c +++ b/kernel/futex/core.c @@ -638,6 +638,7 @@ static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, bool pi, bool pending_op) { u32 uval, nval, mval; + pid_t owner; int err; /* Futex address must be 32bit aligned */ @@ -659,6 +660,10 @@ retry: * 2. A woken up waiter is killed before it can acquire the * futex in user space. * + * In the second case, the wake up notification could be generated + * by the unlock path in user space after setting the futex value + * to zero or by the kernel after setting the OWNER_DIED bit below. + * * In both cases the TID validation below prevents a wakeup of * potential waiters which can cause these waiters to block * forever. @@ -667,24 +672,27 @@ retry: * * 1) task->robust_list->list_op_pending != NULL * @pending_op == true - * 2) User space futex value == 0 + * 2) The owner part of user space futex value == 0 * 3) Regular futex: @pi == false * * If these conditions are met, it is safe to attempt waking up a * potential waiter without touching the user space futex value and - * trying to set the OWNER_DIED bit. The user space futex value is - * uncontended and the rest of the user space mutex state is - * consistent, so a woken waiter will just take over the - * uncontended futex. Setting the OWNER_DIED bit would create - * inconsistent state and malfunction of the user space owner died - * handling. + * trying to set the OWNER_DIED bit. If the futex value is zero, + * the rest of the user space mutex state is consistent, so a woken + * waiter will just take over the uncontended futex. Setting the + * OWNER_DIED bit would create inconsistent state and malfunction + * of the user space owner died handling. Otherwise, the OWNER_DIED + * bit is already set, and the woken waiter is expected to deal with + * this. */ - if (pending_op && !pi && !uval) { + owner = uval & FUTEX_TID_MASK; + + if (pending_op && !pi && !owner) { futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY); return 0; } - if ((uval & FUTEX_TID_MASK) != task_pid_vnr(curr)) + if (owner != task_pid_vnr(curr)) return 0; /*