rcu: Add *_ONCE() for grace-period progress indicators

The various RCU structures' ->gp_seq, ->gp_seq_needed, ->gp_req_activity,
and ->gp_activity fields are read locklessly, so they must be updated with
WRITE_ONCE() and, when read locklessly, with READ_ONCE().  This commit makes
these changes.

This data race was reported by KCSAN.  Not appropriate for backporting
due to failure being unlikely.

Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
This commit is contained in:
Paul E. McKenney 2020-01-04 11:33:17 -08:00
parent bfeebe2421
commit 8ff37290d6
3 changed files with 23 additions and 19 deletions

View file

@ -1175,7 +1175,7 @@ static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp,
TPS("Prestarted")); TPS("Prestarted"));
goto unlock_out; goto unlock_out;
} }
rnp->gp_seq_needed = gp_seq_req; WRITE_ONCE(rnp->gp_seq_needed, gp_seq_req);
if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) { if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) {
/* /*
* We just marked the leaf or internal node, and a * We just marked the leaf or internal node, and a
@ -1210,8 +1210,8 @@ static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp,
unlock_out: unlock_out:
/* Push furthest requested GP to leaf node and rcu_data structure. */ /* Push furthest requested GP to leaf node and rcu_data structure. */
if (ULONG_CMP_LT(gp_seq_req, rnp->gp_seq_needed)) { if (ULONG_CMP_LT(gp_seq_req, rnp->gp_seq_needed)) {
rnp_start->gp_seq_needed = rnp->gp_seq_needed; WRITE_ONCE(rnp_start->gp_seq_needed, rnp->gp_seq_needed);
rdp->gp_seq_needed = rnp->gp_seq_needed; WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
} }
if (rnp != rnp_start) if (rnp != rnp_start)
raw_spin_unlock_rcu_node(rnp); raw_spin_unlock_rcu_node(rnp);
@ -1423,7 +1423,7 @@ static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp)
} }
rdp->gp_seq = rnp->gp_seq; /* Remember new grace-period state. */ rdp->gp_seq = rnp->gp_seq; /* Remember new grace-period state. */
if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap) if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap)
rdp->gp_seq_needed = rnp->gp_seq_needed; WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
WRITE_ONCE(rdp->gpwrap, false); WRITE_ONCE(rdp->gpwrap, false);
rcu_gpnum_ovf(rnp, rdp); rcu_gpnum_ovf(rnp, rdp);
return ret; return ret;
@ -3276,12 +3276,12 @@ int rcutree_prepare_cpu(unsigned int cpu)
rnp = rdp->mynode; rnp = rdp->mynode;
raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
rdp->beenonline = true; /* We have now been online. */ rdp->beenonline = true; /* We have now been online. */
rdp->gp_seq = rnp->gp_seq; rdp->gp_seq = READ_ONCE(rnp->gp_seq);
rdp->gp_seq_needed = rnp->gp_seq; rdp->gp_seq_needed = rdp->gp_seq;
rdp->cpu_no_qs.b.norm = true; rdp->cpu_no_qs.b.norm = true;
rdp->core_needs_qs = false; rdp->core_needs_qs = false;
rdp->rcu_iw_pending = false; rdp->rcu_iw_pending = false;
rdp->rcu_iw_gp_seq = rnp->gp_seq - 1; rdp->rcu_iw_gp_seq = rdp->gp_seq - 1;
trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl")); trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl"));
raw_spin_unlock_irqrestore_rcu_node(rnp, flags); raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
rcu_prepare_kthreads(cpu); rcu_prepare_kthreads(cpu);

View file

@ -753,7 +753,7 @@ dump_blkd_tasks(struct rcu_node *rnp, int ncheck)
raw_lockdep_assert_held_rcu_node(rnp); raw_lockdep_assert_held_rcu_node(rnp);
pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n", pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n",
__func__, rnp->grplo, rnp->grphi, rnp->level, __func__, rnp->grplo, rnp->grphi, rnp->level,
(long)rnp->gp_seq, (long)rnp->completedqs); (long)READ_ONCE(rnp->gp_seq), (long)rnp->completedqs);
for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent) for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx\n", pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx\n",
__func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext); __func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext);

View file

@ -592,21 +592,22 @@ void show_rcu_gp_kthreads(void)
(long)READ_ONCE(rcu_get_root()->gp_seq_needed), (long)READ_ONCE(rcu_get_root()->gp_seq_needed),
READ_ONCE(rcu_state.gp_flags)); READ_ONCE(rcu_state.gp_flags));
rcu_for_each_node_breadth_first(rnp) { rcu_for_each_node_breadth_first(rnp) {
if (ULONG_CMP_GE(rcu_state.gp_seq, rnp->gp_seq_needed)) if (ULONG_CMP_GE(READ_ONCE(rcu_state.gp_seq),
READ_ONCE(rnp->gp_seq_needed)))
continue; continue;
pr_info("\trcu_node %d:%d ->gp_seq %ld ->gp_seq_needed %ld\n", pr_info("\trcu_node %d:%d ->gp_seq %ld ->gp_seq_needed %ld\n",
rnp->grplo, rnp->grphi, (long)rnp->gp_seq, rnp->grplo, rnp->grphi, (long)READ_ONCE(rnp->gp_seq),
(long)rnp->gp_seq_needed); (long)READ_ONCE(rnp->gp_seq_needed));
if (!rcu_is_leaf_node(rnp)) if (!rcu_is_leaf_node(rnp))
continue; continue;
for_each_leaf_node_possible_cpu(rnp, cpu) { for_each_leaf_node_possible_cpu(rnp, cpu) {
rdp = per_cpu_ptr(&rcu_data, cpu); rdp = per_cpu_ptr(&rcu_data, cpu);
if (rdp->gpwrap || if (rdp->gpwrap ||
ULONG_CMP_GE(rcu_state.gp_seq, ULONG_CMP_GE(READ_ONCE(rcu_state.gp_seq),
rdp->gp_seq_needed)) READ_ONCE(rdp->gp_seq_needed)))
continue; continue;
pr_info("\tcpu %d ->gp_seq_needed %ld\n", pr_info("\tcpu %d ->gp_seq_needed %ld\n",
cpu, (long)rdp->gp_seq_needed); cpu, (long)READ_ONCE(rdp->gp_seq_needed));
} }
} }
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
@ -631,7 +632,8 @@ static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp,
static atomic_t warned = ATOMIC_INIT(0); static atomic_t warned = ATOMIC_INIT(0);
if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress() || if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress() ||
ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed)) ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq),
READ_ONCE(rnp_root->gp_seq_needed)))
return; return;
j = jiffies; /* Expensive access, and in common case don't get here. */ j = jiffies; /* Expensive access, and in common case don't get here. */
if (time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) || if (time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
@ -642,7 +644,8 @@ static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp,
raw_spin_lock_irqsave_rcu_node(rnp, flags); raw_spin_lock_irqsave_rcu_node(rnp, flags);
j = jiffies; j = jiffies;
if (rcu_gp_in_progress() || if (rcu_gp_in_progress() ||
ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) || ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq),
READ_ONCE(rnp_root->gp_seq_needed)) ||
time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) || time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) || time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
atomic_read(&warned)) { atomic_read(&warned)) {
@ -655,9 +658,10 @@ static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp,
raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */ raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */
j = jiffies; j = jiffies;
if (rcu_gp_in_progress() || if (rcu_gp_in_progress() ||
ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) || ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq),
time_before(j, rcu_state.gp_req_activity + gpssdelay) || READ_ONCE(rnp_root->gp_seq_needed)) ||
time_before(j, rcu_state.gp_activity + gpssdelay) || time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
atomic_xchg(&warned, 1)) { atomic_xchg(&warned, 1)) {
if (rnp_root != rnp) if (rnp_root != rnp)
/* irqs remain disabled. */ /* irqs remain disabled. */