introduce for_other_threads(p, t)

Cosmetic, but imho it makes the usage look more clear and simple, the new
helper doesn't require to initialize "t".

After this change while_each_thread() has only 3 users, and it is only
used in the do/while loops.

Link: https://lkml.kernel.org/r/20231030155710.GA9095@redhat.com
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Reviewed-by: Christian Brauner <brauner@kernel.org>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Oleg Nesterov 2023-10-30 16:57:10 +01:00 committed by Andrew Morton
parent a9a1d6ad66
commit 61a7a5e25f
3 changed files with 8 additions and 9 deletions

View File

@ -1578,11 +1578,10 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
* will be able to manipulate the current directory, etc.
* It would be nice to force an unshare instead...
*/
t = p;
n_fs = 1;
spin_lock(&p->fs->lock);
rcu_read_lock();
while_each_thread(p, t) {
for_other_threads(p, t) {
if (t->fs == p->fs)
n_fs++;
}

View File

@ -646,6 +646,9 @@ extern bool current_is_single_threaded(void);
#define while_each_thread(g, t) \
while ((t = next_thread(t)) != g)
#define for_other_threads(p, t) \
for (t = p; (t = next_thread(t)) != p; )
#define __for_each_thread(signal, t) \
list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node, \
lockdep_is_held(&tasklist_lock))

View File

@ -1376,12 +1376,12 @@ int force_sig_info(struct kernel_siginfo *info)
*/
int zap_other_threads(struct task_struct *p)
{
struct task_struct *t = p;
struct task_struct *t;
int count = 0;
p->signal->group_stop_count = 0;
while_each_thread(p, t) {
for_other_threads(p, t) {
task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
/* Don't require de_thread to wait for the vhost_worker */
if ((t->flags & (PF_IO_WORKER | PF_USER_WORKER)) != PF_USER_WORKER)
@ -2465,12 +2465,10 @@ static bool do_signal_stop(int signr)
sig->group_exit_code = signr;
sig->group_stop_count = 0;
if (task_set_jobctl_pending(current, signr | gstop))
sig->group_stop_count++;
t = current;
while_each_thread(current, t) {
for_other_threads(current, t) {
/*
* Setting state to TASK_STOPPED for a group
* stop is always done with the siglock held,
@ -2966,8 +2964,7 @@ static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
if (sigisemptyset(&retarget))
return;
t = tsk;
while_each_thread(tsk, t) {
for_other_threads(tsk, t) {
if (t->flags & PF_EXITING)
continue;