futex: Restructure futex_requeue()

No point in taking two more 'requeue_pi' conditionals just to get to the
requeue. Same for the requeue_pi case just the other way round.

No functional change.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/20210815211305.468835790@linutronix.de
This commit is contained in:
Thomas Gleixner 2021-08-15 23:29:12 +02:00 committed by Ingo Molnar
parent 59c7ecf154
commit 64b7b715f7

View file

@ -2104,20 +2104,17 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
break; break;
} }
/* /* Plain futexes just wake or requeue and are done */
* Wake nr_wake waiters. For requeue_pi, if we acquired the if (!requeue_pi) {
* lock, we already woke the top_waiter. If not, it will be if (++task_count <= nr_wake)
* woken by futex_unlock_pi(). mark_wake_futex(&wake_q, this);
*/ else
if (++task_count <= nr_wake && !requeue_pi) { requeue_futex(this, hb1, hb2, &key2);
mark_wake_futex(&wake_q, this);
continue; continue;
} }
/* Ensure we requeue to the expected futex for requeue_pi. */ /* Ensure we requeue to the expected futex for requeue_pi. */
if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) { if (!match_futex(this->requeue_pi_key, &key2)) {
/* Don't account for it */
task_count--;
ret = -EINVAL; ret = -EINVAL;
break; break;
} }
@ -2125,50 +2122,45 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
/* /*
* Requeue nr_requeue waiters and possibly one more in the case * Requeue nr_requeue waiters and possibly one more in the case
* of requeue_pi if we couldn't acquire the lock atomically. * of requeue_pi if we couldn't acquire the lock atomically.
*
* Prepare the waiter to take the rt_mutex. Take a refcount
* on the pi_state and store the pointer in the futex_q
* object of the waiter.
*/ */
if (requeue_pi) { get_pi_state(pi_state);
this->pi_state = pi_state;
ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
this->rt_waiter, this->task);
if (ret == 1) {
/* /*
* Prepare the waiter to take the rt_mutex. Take a * We got the lock. We do neither drop the refcount
* refcount on the pi_state and store the pointer in * on pi_state nor clear this->pi_state because the
* the futex_q object of the waiter. * waiter needs the pi_state for cleaning up the
* user space value. It will drop the refcount
* after doing so.
*/ */
get_pi_state(pi_state); requeue_pi_wake_futex(this, &key2, hb2);
this->pi_state = pi_state; task_count++;
ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex, continue;
this->rt_waiter, } else if (ret) {
this->task); /*
if (ret == 1) { * rt_mutex_start_proxy_lock() detected a potential
/* * deadlock when we tried to queue that waiter.
* We got the lock. We do neither drop the * Drop the pi_state reference which we took above
* refcount on pi_state nor clear * and remove the pointer to the state from the
* this->pi_state because the waiter needs the * waiters futex_q object.
* pi_state for cleaning up the user space */
* value. It will drop the refcount after this->pi_state = NULL;
* doing so. put_pi_state(pi_state);
*/ /*
requeue_pi_wake_futex(this, &key2, hb2); * We stop queueing more waiters and let user space
continue; * deal with the mess.
} else if (ret) { */
/* break;
* rt_mutex_start_proxy_lock() detected a
* potential deadlock when we tried to queue
* that waiter. Drop the pi_state reference
* which we took above and remove the pointer
* to the state from the waiters futex_q
* object.
*/
this->pi_state = NULL;
put_pi_state(pi_state);
/* Don't account for it */
task_count--;
/*
* We stop queueing more waiters and let user
* space deal with the mess.
*/
break;
}
} }
/* Waiter is queued, move it to hb2 */
requeue_futex(this, hb1, hb2, &key2); requeue_futex(this, hb1, hb2, &key2);
task_count++;
} }
/* /*