rcu: Rename sync_rcu_preempt_exp_done() to sync_rcu_exp_done()

Now that the RCU flavors have been consolidated, there is one common
function for checking to see if an expedited RCU grace period has
completed, namely sync_rcu_preempt_exp_done().  Because this function is
no longer specific to RCU-preempt, this commit removes the "_preempt" from
its name.  This commit also changes sync_rcu_preempt_exp_done_unlocked()
to sync_rcu_exp_done_unlocked() for the same reason.

Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
This commit is contained in:
Paul E. McKenney 2019-11-27 13:59:37 -08:00
parent 4bc6b745e5
commit 6c7d7dbf5b
2 changed files with 11 additions and 12 deletions

View File

@ -148,7 +148,7 @@ static void __maybe_unused sync_exp_reset_tree(void)
*
* Caller must hold the specificed rcu_node structure's ->lock
*/
static bool sync_rcu_preempt_exp_done(struct rcu_node *rnp)
static bool sync_rcu_exp_done(struct rcu_node *rnp)
{
raw_lockdep_assert_held_rcu_node(rnp);
@ -157,17 +157,16 @@ static bool sync_rcu_preempt_exp_done(struct rcu_node *rnp)
}
/*
* Like sync_rcu_preempt_exp_done(), but this function assumes the caller
* doesn't hold the rcu_node's ->lock, and will acquire and release the lock
* itself
* Like sync_rcu_exp_done(), but this function assumes the caller doesn't
* hold the rcu_node's ->lock, and will acquire and release the lock itself
*/
static bool sync_rcu_preempt_exp_done_unlocked(struct rcu_node *rnp)
static bool sync_rcu_exp_done_unlocked(struct rcu_node *rnp)
{
unsigned long flags;
bool ret;
raw_spin_lock_irqsave_rcu_node(rnp, flags);
ret = sync_rcu_preempt_exp_done(rnp);
ret = sync_rcu_exp_done(rnp);
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
return ret;
@ -191,7 +190,7 @@ static void __rcu_report_exp_rnp(struct rcu_node *rnp,
unsigned long mask;
for (;;) {
if (!sync_rcu_preempt_exp_done(rnp)) {
if (!sync_rcu_exp_done(rnp)) {
if (!rnp->expmask)
rcu_initiate_boost(rnp, flags);
else
@ -471,9 +470,9 @@ static void synchronize_sched_expedited_wait(void)
for (;;) {
ret = swait_event_timeout_exclusive(
rcu_state.expedited_wq,
sync_rcu_preempt_exp_done_unlocked(rnp_root),
sync_rcu_exp_done_unlocked(rnp_root),
jiffies_stall);
if (ret > 0 || sync_rcu_preempt_exp_done_unlocked(rnp_root))
if (ret > 0 || sync_rcu_exp_done_unlocked(rnp_root))
return;
WARN_ON(ret < 0); /* workqueues should not be signaled. */
if (rcu_cpu_stall_suppress)
@ -507,7 +506,7 @@ static void synchronize_sched_expedited_wait(void)
rcu_for_each_node_breadth_first(rnp) {
if (rnp == rnp_root)
continue; /* printed unconditionally */
if (sync_rcu_preempt_exp_done_unlocked(rnp))
if (sync_rcu_exp_done_unlocked(rnp))
continue;
pr_cont(" l=%u:%d-%d:%#lx/%c",
rnp->level, rnp->grplo, rnp->grphi,

View File

@ -485,7 +485,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
empty_norm = !rcu_preempt_blocked_readers_cgp(rnp);
WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq &&
(!empty_norm || rnp->qsmask));
empty_exp = sync_rcu_preempt_exp_done(rnp);
empty_exp = sync_rcu_exp_done(rnp);
smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
np = rcu_next_node_entry(t, rnp);
list_del_init(&t->rcu_node_entry);
@ -509,7 +509,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
* Note that rcu_report_unblock_qs_rnp() releases rnp->lock,
* so we must take a snapshot of the expedited state.
*/
empty_exp_now = sync_rcu_preempt_exp_done(rnp);
empty_exp_now = sync_rcu_exp_done(rnp);
if (!empty_norm && !rcu_preempt_blocked_readers_cgp(rnp)) {
trace_rcu_quiescent_state_report(TPS("preempt_rcu"),
rnp->gp_seq,