mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-30 08:02:30 +00:00
rcu: Weaken ->dynticks accesses and updates
Accesses to the rcu_data structure's ->dynticks field have always been fully ordered because it was not possible to prove that weaker ordering was safe. However, with the removal of the rcu_eqs_special_set() function and the advent of the Linux-kernel memory model, it is now easy to show that two of the four original full memory barriers can be weakened to acquire and release operations. The remaining pair must remain full memory barriers. This change makes the memory ordering requirements more evident, and it might well also speed up the to-idle and from-idle fastpaths on some architectures. The following litmus test, adapted from one supplied off-list by Frederic Weisbecker, models the RCU grace-period kthread detecting an idle CPU that is concurrently transitioning to non-idle: C dynticks-from-idle { DYNTICKS=0; (* Initially idle. *) } P0(int *X, int *DYNTICKS) { int dynticks; int x; // Idle. dynticks = READ_ONCE(*DYNTICKS); smp_store_release(DYNTICKS, dynticks + 1); smp_mb(); // Now non-idle x = READ_ONCE(*X); } P1(int *X, int *DYNTICKS) { int dynticks; WRITE_ONCE(*X, 1); smp_mb(); dynticks = smp_load_acquire(DYNTICKS); } exists (1:dynticks=0 /\ 0:x=1) Running "herd7 -conf linux-kernel.cfg dynticks-from-idle.litmus" verifies this transition, namely, showing that if the RCU grace-period kthread (P1) sees another CPU as idle (P0), then any memory access prior to the start of the grace period (P1's write to X) will be seen by any RCU read-side critical section following the to-non-idle transition (P0's read from X). This is a straightforward use of full memory barriers to force ordering in a store-buffering (SB) litmus test. The following litmus test, also adapted from the one supplied off-list by Frederic Weisbecker, models the RCU grace-period kthread detecting a non-idle CPU that is concurrently transitioning to idle: C dynticks-into-idle { DYNTICKS=1; (* Initially non-idle. *) } P0(int *X, int *DYNTICKS) { int dynticks; // Non-idle. WRITE_ONCE(*X, 1); dynticks = READ_ONCE(*DYNTICKS); smp_store_release(DYNTICKS, dynticks + 1); smp_mb(); // Now idle. } P1(int *X, int *DYNTICKS) { int x; int dynticks; smp_mb(); dynticks = smp_load_acquire(DYNTICKS); x = READ_ONCE(*X); } exists (1:dynticks=2 /\ 1:x=0) Running "herd7 -conf linux-kernel.cfg dynticks-into-idle.litmus" verifies this transition, namely, showing that if the RCU grace-period kthread (P1) sees another CPU as newly idle (P0), then any pre-idle memory access (P0's write to X) will be seen by any code following the grace period (P1's read from X). This is a simple release-acquire pair forcing ordering in a message-passing (MP) litmus test. Of course, if the grace-period kthread detects the CPU as non-idle, it will refrain from reporting a quiescent state on behalf of that CPU, so there are no ordering requirements from the grace-period kthread in that case. However, other subsystems call rcu_is_idle_cpu() to check for CPUs being non-idle from an RCU perspective. That case is also verified by the above litmus tests with the proviso that the sense of the low-order bit of the DYNTICKS counter be inverted. Unfortunately, on x86 smp_mb() is as expensive as a cache-local atomic increment. This commit therefore weakens only the read from ->dynticks. However, the updates are abstracted into a rcu_dynticks_inc() function to ease any future changes that might be needed. [ paulmck: Apply Linus Torvalds feedback. ] Link: https://lore.kernel.org/lkml/20210721202127.2129660-4-paulmck@kernel.org/ Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
This commit is contained in:
parent
a86baa69c2
commit
2be57f7328
1 changed files with 18 additions and 8 deletions
|
@ -251,6 +251,15 @@ void rcu_softirq_qs(void)
|
|||
rcu_tasks_qs(current, false);
|
||||
}
|
||||
|
||||
/*
|
||||
* Increment the current CPU's rcu_data structure's ->dynticks field
|
||||
* with ordering. Return the new value.
|
||||
*/
|
||||
static noinline noinstr unsigned long rcu_dynticks_inc(int incby)
|
||||
{
|
||||
return arch_atomic_add_return(incby, this_cpu_ptr(&rcu_data.dynticks));
|
||||
}
|
||||
|
||||
/*
|
||||
* Record entry into an extended quiescent state. This is only to be
|
||||
* called when not already in an extended quiescent state, that is,
|
||||
|
@ -267,7 +276,7 @@ static noinstr void rcu_dynticks_eqs_enter(void)
|
|||
* next idle sojourn.
|
||||
*/
|
||||
rcu_dynticks_task_trace_enter(); // Before ->dynticks update!
|
||||
seq = arch_atomic_inc_return(&this_cpu_ptr(&rcu_data)->dynticks);
|
||||
seq = rcu_dynticks_inc(1);
|
||||
// RCU is no longer watching. Better be in extended quiescent state!
|
||||
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && (seq & 0x1));
|
||||
}
|
||||
|
@ -286,7 +295,7 @@ static noinstr void rcu_dynticks_eqs_exit(void)
|
|||
* and we also must force ordering with the next RCU read-side
|
||||
* critical section.
|
||||
*/
|
||||
seq = arch_atomic_inc_return(&this_cpu_ptr(&rcu_data)->dynticks);
|
||||
seq = rcu_dynticks_inc(1);
|
||||
// RCU is now watching. Better not be in an extended quiescent state!
|
||||
rcu_dynticks_task_trace_exit(); // After ->dynticks update!
|
||||
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !(seq & 0x1));
|
||||
|
@ -308,7 +317,7 @@ static void rcu_dynticks_eqs_online(void)
|
|||
|
||||
if (atomic_read(&rdp->dynticks) & 0x1)
|
||||
return;
|
||||
atomic_inc(&rdp->dynticks);
|
||||
rcu_dynticks_inc(1);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -318,7 +327,7 @@ static void rcu_dynticks_eqs_online(void)
|
|||
*/
|
||||
static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void)
|
||||
{
|
||||
return !(arch_atomic_read(&this_cpu_ptr(&rcu_data)->dynticks) & 0x1);
|
||||
return !(atomic_read(this_cpu_ptr(&rcu_data.dynticks)) & 0x1);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -327,7 +336,8 @@ static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void)
|
|||
*/
|
||||
static int rcu_dynticks_snap(struct rcu_data *rdp)
|
||||
{
|
||||
return atomic_add_return(0, &rdp->dynticks);
|
||||
smp_mb(); // Fundamental RCU ordering guarantee.
|
||||
return atomic_read_acquire(&rdp->dynticks);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -391,12 +401,12 @@ bool rcu_dynticks_zero_in_eqs(int cpu, int *vp)
|
|||
*/
|
||||
notrace void rcu_momentary_dyntick_idle(void)
|
||||
{
|
||||
int special;
|
||||
int seq;
|
||||
|
||||
raw_cpu_write(rcu_data.rcu_need_heavy_qs, false);
|
||||
special = atomic_add_return(2, &this_cpu_ptr(&rcu_data)->dynticks);
|
||||
seq = rcu_dynticks_inc(2);
|
||||
/* It is illegal to call this from idle state. */
|
||||
WARN_ON_ONCE(!(special & 0x1));
|
||||
WARN_ON_ONCE(!(seq & 0x1));
|
||||
rcu_preempt_deferred_qs(current);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rcu_momentary_dyntick_idle);
|
||||
|
|
Loading…
Reference in a new issue