mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-30 08:02:30 +00:00
rcu: Don't flag non-starting GPs before GP kthread is running
Currently rcu_check_gp_start_stall() complains if a grace period takes too long to start, where "too long" is roughly one RCU CPU stall-warning interval. This has worked well, but there are some debugging Kconfig options (such as CONFIG_EFI_PGT_DUMP=y) that can make booting take a very long time, so much so that the stall-warning interval has expired before RCU's grace-period kthread has even been spawned. This commit therefore resets the rcu_state.gp_req_activity and rcu_state.gp_activity timestamps just before the grace-period kthread is spawned, and modifies the checks and adds ordering to ensure that if rcu_check_gp_start_stall() sees that the grace-period kthread has been spawned, that it will also see the resets applied to the rcu_state.gp_req_activity and rcu_state.gp_activity timestamps. Reported-by: Qian Cai <cai@lca.pw> Signed-off-by: Paul E. McKenney <paulmck@kernel.org> [ paulmck: Fix whitespace issues reported by Qian Cai. ] Tested-by: Qian Cai <cai@lca.pw> [ paulmck: Simplify grace-period wakeup check per Steve Rostedt feedback. ]
This commit is contained in:
parent
4dfd5cd83d
commit
5648d65912
2 changed files with 20 additions and 15 deletions
|
@ -1202,7 +1202,7 @@ static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp,
|
|||
trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot"));
|
||||
WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_INIT);
|
||||
WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
|
||||
if (!rcu_state.gp_kthread) {
|
||||
if (!READ_ONCE(rcu_state.gp_kthread)) {
|
||||
trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread"));
|
||||
goto unlock_out;
|
||||
}
|
||||
|
@ -1237,12 +1237,13 @@ static bool rcu_future_gp_cleanup(struct rcu_node *rnp)
|
|||
}
|
||||
|
||||
/*
|
||||
* Awaken the grace-period kthread. Don't do a self-awaken (unless in
|
||||
* an interrupt or softirq handler), and don't bother awakening when there
|
||||
* is nothing for the grace-period kthread to do (as in several CPUs raced
|
||||
* to awaken, and we lost), and finally don't try to awaken a kthread that
|
||||
* has not yet been created. If all those checks are passed, track some
|
||||
* debug information and awaken.
|
||||
* Awaken the grace-period kthread. Don't do a self-awaken (unless in an
|
||||
* interrupt or softirq handler, in which case we just might immediately
|
||||
* sleep upon return, resulting in a grace-period hang), and don't bother
|
||||
* awakening when there is nothing for the grace-period kthread to do
|
||||
* (as in several CPUs raced to awaken, we lost), and finally don't try
|
||||
* to awaken a kthread that has not yet been created. If all those checks
|
||||
* are passed, track some debug information and awaken.
|
||||
*
|
||||
* So why do the self-wakeup when in an interrupt or softirq handler
|
||||
* in the grace-period kthread's context? Because the kthread might have
|
||||
|
@ -1252,10 +1253,10 @@ static bool rcu_future_gp_cleanup(struct rcu_node *rnp)
|
|||
*/
|
||||
static void rcu_gp_kthread_wake(void)
|
||||
{
|
||||
if ((current == rcu_state.gp_kthread &&
|
||||
!in_irq() && !in_serving_softirq()) ||
|
||||
!READ_ONCE(rcu_state.gp_flags) ||
|
||||
!rcu_state.gp_kthread)
|
||||
struct task_struct *t = READ_ONCE(rcu_state.gp_kthread);
|
||||
|
||||
if ((current == t && !in_irq() && !in_serving_softirq()) ||
|
||||
!READ_ONCE(rcu_state.gp_flags) || !t)
|
||||
return;
|
||||
WRITE_ONCE(rcu_state.gp_wake_time, jiffies);
|
||||
WRITE_ONCE(rcu_state.gp_wake_seq, READ_ONCE(rcu_state.gp_seq));
|
||||
|
@ -3554,7 +3555,10 @@ static int __init rcu_spawn_gp_kthread(void)
|
|||
}
|
||||
rnp = rcu_get_root();
|
||||
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
||||
rcu_state.gp_kthread = t;
|
||||
WRITE_ONCE(rcu_state.gp_activity, jiffies);
|
||||
WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
|
||||
// Reset .gp_activity and .gp_req_activity before setting .gp_kthread.
|
||||
smp_store_release(&rcu_state.gp_kthread, t); /* ^^^ */
|
||||
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
||||
wake_up_process(t);
|
||||
rcu_spawn_nocb_kthreads();
|
||||
|
|
|
@ -578,6 +578,7 @@ void show_rcu_gp_kthreads(void)
|
|||
unsigned long jw;
|
||||
struct rcu_data *rdp;
|
||||
struct rcu_node *rnp;
|
||||
struct task_struct *t = READ_ONCE(rcu_state.gp_kthread);
|
||||
|
||||
j = jiffies;
|
||||
ja = j - READ_ONCE(rcu_state.gp_activity);
|
||||
|
@ -585,8 +586,7 @@ void show_rcu_gp_kthreads(void)
|
|||
jw = j - READ_ONCE(rcu_state.gp_wake_time);
|
||||
pr_info("%s: wait state: %s(%d) ->state: %#lx delta ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_flags %#x\n",
|
||||
rcu_state.name, gp_state_getname(rcu_state.gp_state),
|
||||
rcu_state.gp_state,
|
||||
rcu_state.gp_kthread ? rcu_state.gp_kthread->state : 0x1ffffL,
|
||||
rcu_state.gp_state, t ? t->state : 0x1ffffL,
|
||||
ja, jr, jw, (long)READ_ONCE(rcu_state.gp_wake_seq),
|
||||
(long)READ_ONCE(rcu_state.gp_seq),
|
||||
(long)READ_ONCE(rcu_get_root()->gp_seq_needed),
|
||||
|
@ -633,7 +633,8 @@ static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp,
|
|||
|
||||
if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress() ||
|
||||
ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq),
|
||||
READ_ONCE(rnp_root->gp_seq_needed)))
|
||||
READ_ONCE(rnp_root->gp_seq_needed)) ||
|
||||
!smp_load_acquire(&rcu_state.gp_kthread)) // Get stable kthread.
|
||||
return;
|
||||
j = jiffies; /* Expensive access, and in common case don't get here. */
|
||||
if (time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
|
||||
|
|
Loading…
Reference in a new issue