rcu: Add *_ONCE() to rcu_data ->rcu_forced_tick

The rcu_data structure's ->rcu_forced_tick field is read locklessly, so
this commit adds WRITE_ONCE() to all updates and READ_ONCE() to all
lockless reads.

This data race was reported by KCSAN.  Not appropriate for backporting
due to failure being unlikely.

Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
This commit is contained in:
Paul E. McKenney 2020-01-08 20:06:25 -08:00
parent a5b8950180
commit 2a2ae872ef

View file

@ -818,11 +818,12 @@ static __always_inline void rcu_nmi_enter_common(bool irq)
incby = 1; incby = 1;
} else if (tick_nohz_full_cpu(rdp->cpu) && } else if (tick_nohz_full_cpu(rdp->cpu) &&
rdp->dynticks_nmi_nesting == DYNTICK_IRQ_NONIDLE && rdp->dynticks_nmi_nesting == DYNTICK_IRQ_NONIDLE &&
READ_ONCE(rdp->rcu_urgent_qs) && !rdp->rcu_forced_tick) { READ_ONCE(rdp->rcu_urgent_qs) &&
!READ_ONCE(rdp->rcu_forced_tick)) {
raw_spin_lock_rcu_node(rdp->mynode); raw_spin_lock_rcu_node(rdp->mynode);
// Recheck under lock. // Recheck under lock.
if (rdp->rcu_urgent_qs && !rdp->rcu_forced_tick) { if (rdp->rcu_urgent_qs && !rdp->rcu_forced_tick) {
rdp->rcu_forced_tick = true; WRITE_ONCE(rdp->rcu_forced_tick, true);
tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU); tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
} }
raw_spin_unlock_rcu_node(rdp->mynode); raw_spin_unlock_rcu_node(rdp->mynode);
@ -899,7 +900,7 @@ static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp)
WRITE_ONCE(rdp->rcu_need_heavy_qs, false); WRITE_ONCE(rdp->rcu_need_heavy_qs, false);
if (tick_nohz_full_cpu(rdp->cpu) && rdp->rcu_forced_tick) { if (tick_nohz_full_cpu(rdp->cpu) && rdp->rcu_forced_tick) {
tick_dep_clear_cpu(rdp->cpu, TICK_DEP_BIT_RCU); tick_dep_clear_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
rdp->rcu_forced_tick = false; WRITE_ONCE(rdp->rcu_forced_tick, false);
} }
} }