rcu-tasks: Improve comments explaining tasks_rcu_exit_srcu purpose

[ Upstream commit e4e1e8089c ]

Make sure we don't need to look again into the depths of git blame in
order not to miss a subtle part about how rcu-tasks is dealing with
exiting tasks.

Suggested-by: Boqun Feng <boqun.feng@gmail.com>
Suggested-by: Neeraj Upadhyay <quic_neeraju@quicinc.com>
Suggested-by: Paul E. McKenney <paulmck@kernel.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Lai Jiangshan <jiangshanlai@gmail.com>
Cc: Eric W. Biederman <ebiederm@xmission.com>
Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Stable-dep-of: 28319d6dc5 ("rcu-tasks: Fix synchronize_rcu_tasks() VS zap_pid_ns_processes()")
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
Frederic Weisbecker 2022-11-25 14:54:58 +01:00 committed by Greg Kroah-Hartman
parent e9ab0e81bc
commit a2b0cda452

View file

@ -827,11 +827,21 @@ static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop)
static void rcu_tasks_postscan(struct list_head *hop) static void rcu_tasks_postscan(struct list_head *hop)
{ {
/* /*
* Wait for tasks that are in the process of exiting. This * Exiting tasks may escape the tasklist scan. Those are vulnerable
* does only part of the job, ensuring that all tasks that were * until their final schedule() with TASK_DEAD state. To cope with
* previously exiting reach the point where they have disabled * this, divide the fragile exit path part in two intersecting
* preemption, allowing the later synchronize_rcu() to finish * read side critical sections:
* the job. *
* 1) An _SRCU_ read side starting before calling exit_notify(),
* which may remove the task from the tasklist, and ending after
* the final preempt_disable() call in do_exit().
*
* 2) An _RCU_ read side starting with the final preempt_disable()
* call in do_exit() and ending with the final call to schedule()
* with TASK_DEAD state.
*
* This handles the part 1). And postgp will handle part 2) with a
* call to synchronize_rcu().
*/ */
synchronize_srcu(&tasks_rcu_exit_srcu); synchronize_srcu(&tasks_rcu_exit_srcu);
} }
@ -898,7 +908,10 @@ static void rcu_tasks_postgp(struct rcu_tasks *rtp)
* *
* In addition, this synchronize_rcu() waits for exiting tasks * In addition, this synchronize_rcu() waits for exiting tasks
* to complete their final preempt_disable() region of execution, * to complete their final preempt_disable() region of execution,
* cleaning up after the synchronize_srcu() above. * cleaning up after synchronize_srcu(&tasks_rcu_exit_srcu),
* enforcing the whole region before tasklist removal until
* the final schedule() with TASK_DEAD state to be an RCU TASKS
* read side critical section.
*/ */
synchronize_rcu(); synchronize_rcu();
} }
@ -988,7 +1001,11 @@ void show_rcu_tasks_classic_gp_kthread(void)
EXPORT_SYMBOL_GPL(show_rcu_tasks_classic_gp_kthread); EXPORT_SYMBOL_GPL(show_rcu_tasks_classic_gp_kthread);
#endif // !defined(CONFIG_TINY_RCU) #endif // !defined(CONFIG_TINY_RCU)
/* Do the srcu_read_lock() for the above synchronize_srcu(). */ /*
* Contribute to protect against tasklist scan blind spot while the
* task is exiting and may be removed from the tasklist. See
* corresponding synchronize_srcu() for further details.
*/
void exit_tasks_rcu_start(void) __acquires(&tasks_rcu_exit_srcu) void exit_tasks_rcu_start(void) __acquires(&tasks_rcu_exit_srcu)
{ {
preempt_disable(); preempt_disable();
@ -996,7 +1013,11 @@ void exit_tasks_rcu_start(void) __acquires(&tasks_rcu_exit_srcu)
preempt_enable(); preempt_enable();
} }
/* Do the srcu_read_unlock() for the above synchronize_srcu(). */ /*
* Contribute to protect against tasklist scan blind spot while the
* task is exiting and may be removed from the tasklist. See
* corresponding synchronize_srcu() for further details.
*/
void exit_tasks_rcu_finish(void) __releases(&tasks_rcu_exit_srcu) void exit_tasks_rcu_finish(void) __releases(&tasks_rcu_exit_srcu)
{ {
struct task_struct *t = current; struct task_struct *t = current;