mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-11-01 17:08:10 +00:00
rcu: Make synchronize_rcu() fast path update ->gp_seq counters
This commit causes the early boot single-CPU synchronize_rcu() fastpath to update the rcu_state and rcu_node structures' ->gp_seq and ->gp_seq_needed counters. This will allow the full-state polled grace-period APIs to detect all normal grace periods without the need to track the special combined polling-only counter, which is a step towards removing the ->rgos_polled field from the rcu_gp_oldstate, thereby reducing its size by one third. Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
This commit is contained in:
parent
5f11bad6b7
commit
910e12092e
1 changed files with 26 additions and 13 deletions
|
@ -3480,24 +3480,37 @@ static int rcu_blocking_is_gp(void)
|
||||||
*/
|
*/
|
||||||
void synchronize_rcu(void)
|
void synchronize_rcu(void)
|
||||||
{
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
struct rcu_node *rnp;
|
||||||
|
|
||||||
RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
|
RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
|
||||||
lock_is_held(&rcu_lock_map) ||
|
lock_is_held(&rcu_lock_map) ||
|
||||||
lock_is_held(&rcu_sched_lock_map),
|
lock_is_held(&rcu_sched_lock_map),
|
||||||
"Illegal synchronize_rcu() in RCU read-side critical section");
|
"Illegal synchronize_rcu() in RCU read-side critical section");
|
||||||
if (rcu_blocking_is_gp()) {
|
if (!rcu_blocking_is_gp()) {
|
||||||
// Note well that this code runs with !PREEMPT && !SMP.
|
|
||||||
// In addition, all code that advances grace periods runs at
|
|
||||||
// process level. Therefore, this normal GP overlaps with
|
|
||||||
// other normal GPs only by being fully nested within them,
|
|
||||||
// which allows reuse of ->gp_seq_polled_snap.
|
|
||||||
rcu_poll_gp_seq_start_unlocked(&rcu_state.gp_seq_polled_snap);
|
|
||||||
rcu_poll_gp_seq_end_unlocked(&rcu_state.gp_seq_polled_snap);
|
|
||||||
return; // Context allows vacuous grace periods.
|
|
||||||
}
|
|
||||||
if (rcu_gp_is_expedited())
|
if (rcu_gp_is_expedited())
|
||||||
synchronize_rcu_expedited();
|
synchronize_rcu_expedited();
|
||||||
else
|
else
|
||||||
wait_rcu_gp(call_rcu);
|
wait_rcu_gp(call_rcu);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Context allows vacuous grace periods.
|
||||||
|
// Note well that this code runs with !PREEMPT && !SMP.
|
||||||
|
// In addition, all code that advances grace periods runs at
|
||||||
|
// process level. Therefore, this normal GP overlaps with other
|
||||||
|
// normal GPs only by being fully nested within them, which allows
|
||||||
|
// reuse of ->gp_seq_polled_snap.
|
||||||
|
rcu_poll_gp_seq_start_unlocked(&rcu_state.gp_seq_polled_snap);
|
||||||
|
rcu_poll_gp_seq_end_unlocked(&rcu_state.gp_seq_polled_snap);
|
||||||
|
|
||||||
|
// Update normal grace-period counters to record grace period.
|
||||||
|
local_irq_save(flags);
|
||||||
|
WARN_ON_ONCE(num_online_cpus() > 1);
|
||||||
|
rcu_state.gp_seq += (1 << RCU_SEQ_CTR_SHIFT);
|
||||||
|
rcu_for_each_node_breadth_first(rnp)
|
||||||
|
rnp->gp_seq_needed = rnp->gp_seq = rcu_state.gp_seq;
|
||||||
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(synchronize_rcu);
|
EXPORT_SYMBOL_GPL(synchronize_rcu);
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue