[PATCH] fix send_sigqueue() vs thread exit race

posix_timer_event() first checks that the thread (SIGEV_THREAD_ID case)
does not have PF_EXITING flag, then it calls send_sigqueue() which locks
task list.  But if the thread exits in between the kernel will oops
(->sighand == NULL after __exit_sighand).

This patch moves the PF_EXITING check into the send_sigqueue(), it must be
done atomically under tasklist_lock.  When send_sigqueue() detects exiting
thread it returns -1.  In that case posix_timer_event will send the signal
to thread group.

Also, this patch fixes task_struct use-after-free in posix_timer_event.

Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Oleg Nesterov 2005-09-06 15:17:42 -07:00 committed by Linus Torvalds
parent a97c9bf33f
commit e752dd6cc6
2 changed files with 27 additions and 23 deletions

View file

@ -427,21 +427,23 @@ int posix_timer_event(struct k_itimer *timr,int si_private)
timr->sigq->info.si_code = SI_TIMER;
timr->sigq->info.si_tid = timr->it_id;
timr->sigq->info.si_value = timr->it_sigev_value;
if (timr->it_sigev_notify & SIGEV_THREAD_ID) {
if (unlikely(timr->it_process->flags & PF_EXITING)) {
timr->it_sigev_notify = SIGEV_SIGNAL;
put_task_struct(timr->it_process);
timr->it_process = timr->it_process->group_leader;
goto group;
}
return send_sigqueue(timr->it_sigev_signo, timr->sigq,
timr->it_process);
}
else {
group:
return send_group_sigqueue(timr->it_sigev_signo, timr->sigq,
timr->it_process);
struct task_struct *leader;
int ret = send_sigqueue(timr->it_sigev_signo, timr->sigq,
timr->it_process);
if (likely(ret >= 0))
return ret;
timr->it_sigev_notify = SIGEV_SIGNAL;
leader = timr->it_process->group_leader;
put_task_struct(timr->it_process);
timr->it_process = leader;
}
return send_group_sigqueue(timr->it_sigev_signo, timr->sigq,
timr->it_process);
}
EXPORT_SYMBOL_GPL(posix_timer_event);

View file

@ -1366,16 +1366,16 @@ send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
unsigned long flags;
int ret = 0;
/*
* We need the tasklist lock even for the specific
* thread case (when we don't need to follow the group
* lists) in order to avoid races with "p->sighand"
* going away or changing from under us.
*/
BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
read_lock(&tasklist_lock);
read_lock(&tasklist_lock);
if (unlikely(p->flags & PF_EXITING)) {
ret = -1;
goto out_err;
}
spin_lock_irqsave(&p->sighand->siglock, flags);
if (unlikely(!list_empty(&q->list))) {
/*
* If an SI_TIMER entry is already queue just increment
@ -1385,7 +1385,7 @@ send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
BUG();
q->info.si_overrun++;
goto out;
}
}
/* Short-circuit ignored signals. */
if (sig_ignored(p, sig)) {
ret = 1;
@ -1400,8 +1400,10 @@ send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
out:
spin_unlock_irqrestore(&p->sighand->siglock, flags);
out_err:
read_unlock(&tasklist_lock);
return(ret);
return ret;
}
int