mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-09-13 22:25:03 +00:00
rcu: Split RCU core processing out of __call_rcu()
The __call_rcu() function is a bit overweight, so this commit splits it into actual enqueuing of and accounting for the callback (__call_rcu()) and associated RCU-core processing (__call_rcu_core()). Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Reviewed-by: Josh Triplett <josh@joshtriplett.org>
This commit is contained in:
parent
a16b7a6934
commit
29154c57e3
1 changed files with 52 additions and 44 deletions
|
@ -1861,6 +1861,56 @@ static void invoke_rcu_core(void)
|
||||||
raise_softirq(RCU_SOFTIRQ);
|
raise_softirq(RCU_SOFTIRQ);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Handle any core-RCU processing required by a call_rcu() invocation.
|
||||||
|
*/
|
||||||
|
static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
|
||||||
|
struct rcu_head *head, unsigned long flags)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* If called from an extended quiescent state, invoke the RCU
|
||||||
|
* core in order to force a re-evaluation of RCU's idleness.
|
||||||
|
*/
|
||||||
|
if (rcu_is_cpu_idle() && cpu_online(smp_processor_id()))
|
||||||
|
invoke_rcu_core();
|
||||||
|
|
||||||
|
/* If interrupts were disabled or CPU offline, don't invoke RCU core. */
|
||||||
|
if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id()))
|
||||||
|
return;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Force the grace period if too many callbacks or too long waiting.
|
||||||
|
* Enforce hysteresis, and don't invoke force_quiescent_state()
|
||||||
|
* if some other CPU has recently done so. Also, don't bother
|
||||||
|
* invoking force_quiescent_state() if the newly enqueued callback
|
||||||
|
* is the only one waiting for a grace period to complete.
|
||||||
|
*/
|
||||||
|
if (unlikely(rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) {
|
||||||
|
|
||||||
|
/* Are we ignoring a completed grace period? */
|
||||||
|
rcu_process_gp_end(rsp, rdp);
|
||||||
|
check_for_new_grace_period(rsp, rdp);
|
||||||
|
|
||||||
|
/* Start a new grace period if one not already started. */
|
||||||
|
if (!rcu_gp_in_progress(rsp)) {
|
||||||
|
unsigned long nestflag;
|
||||||
|
struct rcu_node *rnp_root = rcu_get_root(rsp);
|
||||||
|
|
||||||
|
raw_spin_lock_irqsave(&rnp_root->lock, nestflag);
|
||||||
|
rcu_start_gp(rsp, nestflag); /* rlses rnp_root->lock */
|
||||||
|
} else {
|
||||||
|
/* Give the grace period a kick. */
|
||||||
|
rdp->blimit = LONG_MAX;
|
||||||
|
if (rsp->n_force_qs == rdp->n_force_qs_snap &&
|
||||||
|
*rdp->nxttail[RCU_DONE_TAIL] != head)
|
||||||
|
force_quiescent_state(rsp, 0);
|
||||||
|
rdp->n_force_qs_snap = rsp->n_force_qs;
|
||||||
|
rdp->qlen_last_fqs_check = rdp->qlen;
|
||||||
|
}
|
||||||
|
} else if (ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies))
|
||||||
|
force_quiescent_state(rsp, 1);
|
||||||
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
__call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
|
__call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
|
||||||
struct rcu_state *rsp, bool lazy)
|
struct rcu_state *rsp, bool lazy)
|
||||||
|
@ -1900,50 +1950,8 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
|
||||||
else
|
else
|
||||||
trace_rcu_callback(rsp->name, head, rdp->qlen_lazy, rdp->qlen);
|
trace_rcu_callback(rsp->name, head, rdp->qlen_lazy, rdp->qlen);
|
||||||
|
|
||||||
/*
|
/* Go handle any RCU core processing required. */
|
||||||
* If called from an extended quiescent state, invoke the RCU
|
__call_rcu_core(rsp, rdp, head, flags);
|
||||||
* core in order to force a re-evaluation of RCU's idleness.
|
|
||||||
*/
|
|
||||||
if (rcu_is_cpu_idle() && cpu_online(smp_processor_id()))
|
|
||||||
invoke_rcu_core();
|
|
||||||
|
|
||||||
/* If interrupts were disabled or CPU offline, don't invoke RCU core. */
|
|
||||||
if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id())) {
|
|
||||||
local_irq_restore(flags);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Force the grace period if too many callbacks or too long waiting.
|
|
||||||
* Enforce hysteresis, and don't invoke force_quiescent_state()
|
|
||||||
* if some other CPU has recently done so. Also, don't bother
|
|
||||||
* invoking force_quiescent_state() if the newly enqueued callback
|
|
||||||
* is the only one waiting for a grace period to complete.
|
|
||||||
*/
|
|
||||||
if (unlikely(rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) {
|
|
||||||
|
|
||||||
/* Are we ignoring a completed grace period? */
|
|
||||||
rcu_process_gp_end(rsp, rdp);
|
|
||||||
check_for_new_grace_period(rsp, rdp);
|
|
||||||
|
|
||||||
/* Start a new grace period if one not already started. */
|
|
||||||
if (!rcu_gp_in_progress(rsp)) {
|
|
||||||
unsigned long nestflag;
|
|
||||||
struct rcu_node *rnp_root = rcu_get_root(rsp);
|
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&rnp_root->lock, nestflag);
|
|
||||||
rcu_start_gp(rsp, nestflag); /* rlses rnp_root->lock */
|
|
||||||
} else {
|
|
||||||
/* Give the grace period a kick. */
|
|
||||||
rdp->blimit = LONG_MAX;
|
|
||||||
if (rsp->n_force_qs == rdp->n_force_qs_snap &&
|
|
||||||
*rdp->nxttail[RCU_DONE_TAIL] != head)
|
|
||||||
force_quiescent_state(rsp, 0);
|
|
||||||
rdp->n_force_qs_snap = rsp->n_force_qs;
|
|
||||||
rdp->qlen_last_fqs_check = rdp->qlen;
|
|
||||||
}
|
|
||||||
} else if (ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies))
|
|
||||||
force_quiescent_state(rsp, 1);
|
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue