rcu: Consolidate initialization and CPU-hotplug code
This commit consolidates the initialization and CPU-hotplug code at the end of kernel/rcu/tree.c. This is strictly a code-motion commit. No functionality has changed. Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
This commit is contained in:
parent
c004d231ca
commit
5a04848d00
|
@ -144,14 +144,16 @@ static int rcu_scheduler_fully_active __read_mostly;
|
|||
|
||||
static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
|
||||
unsigned long gps, unsigned long flags);
|
||||
static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
|
||||
static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
|
||||
static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
|
||||
static void invoke_rcu_core(void);
|
||||
static void rcu_report_exp_rdp(struct rcu_data *rdp);
|
||||
static void sync_sched_exp_online_cleanup(int cpu);
|
||||
static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp);
|
||||
static bool rcu_rdp_is_offloaded(struct rcu_data *rdp);
|
||||
static bool rcu_rdp_cpu_online(struct rcu_data *rdp);
|
||||
static bool rcu_init_invoked(void);
|
||||
static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
|
||||
static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
|
||||
|
||||
/*
|
||||
* rcuc/rcub/rcuop kthread realtime priority. The "rcuop"
|
||||
|
@ -214,27 +216,6 @@ EXPORT_SYMBOL_GPL(rcu_get_gp_kthreads_prio);
|
|||
*/
|
||||
#define PER_RCU_NODE_PERIOD 3 /* Number of grace periods between delays for debugging. */
|
||||
|
||||
/*
|
||||
* Compute the mask of online CPUs for the specified rcu_node structure.
|
||||
* This will not be stable unless the rcu_node structure's ->lock is
|
||||
* held, but the bit corresponding to the current CPU will be stable
|
||||
* in most contexts.
|
||||
*/
|
||||
static unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
|
||||
{
|
||||
return READ_ONCE(rnp->qsmaskinitnext);
|
||||
}
|
||||
|
||||
/*
|
||||
* Is the CPU corresponding to the specified rcu_data structure online
|
||||
* from RCU's perspective? This perspective is given by that structure's
|
||||
* ->qsmaskinitnext field rather than by the global cpu_online_mask.
|
||||
*/
|
||||
static bool rcu_rdp_cpu_online(struct rcu_data *rdp)
|
||||
{
|
||||
return !!(rdp->grpmask & rcu_rnp_online_cpus(rdp->mynode));
|
||||
}
|
||||
|
||||
/*
|
||||
* Return true if an RCU grace period is in progress. The READ_ONCE()s
|
||||
* permit this function to be invoked without holding the root rcu_node
|
||||
|
@ -734,46 +715,6 @@ void rcu_request_urgent_qs_task(struct task_struct *t)
|
|||
smp_store_release(per_cpu_ptr(&rcu_data.rcu_urgent_qs, cpu), true);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
|
||||
|
||||
/*
|
||||
* Is the current CPU online as far as RCU is concerned?
|
||||
*
|
||||
* Disable preemption to avoid false positives that could otherwise
|
||||
* happen due to the current CPU number being sampled, this task being
|
||||
* preempted, its old CPU being taken offline, resuming on some other CPU,
|
||||
* then determining that its old CPU is now offline.
|
||||
*
|
||||
* Disable checking if in an NMI handler because we cannot safely
|
||||
* report errors from NMI handlers anyway. In addition, it is OK to use
|
||||
* RCU on an offline processor during initial boot, hence the check for
|
||||
* rcu_scheduler_fully_active.
|
||||
*/
|
||||
bool rcu_lockdep_current_cpu_online(void)
|
||||
{
|
||||
struct rcu_data *rdp;
|
||||
bool ret = false;
|
||||
|
||||
if (in_nmi() || !rcu_scheduler_fully_active)
|
||||
return true;
|
||||
preempt_disable_notrace();
|
||||
rdp = this_cpu_ptr(&rcu_data);
|
||||
/*
|
||||
* Strictly, we care here about the case where the current CPU is
|
||||
* in rcu_cpu_starting() and thus has an excuse for rdp->grpmask
|
||||
* not being up to date. So arch_spin_is_locked() might have a
|
||||
* false positive if it's held by some *other* CPU, but that's
|
||||
* OK because that just means a false *negative* on the warning.
|
||||
*/
|
||||
if (rcu_rdp_cpu_online(rdp) || arch_spin_is_locked(&rcu_state.ofl_lock))
|
||||
ret = true;
|
||||
preempt_enable_notrace();
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
|
||||
|
||||
#endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */
|
||||
|
||||
/*
|
||||
* When trying to report a quiescent state on behalf of some other CPU,
|
||||
* it is our responsibility to check for and handle potential overflow
|
||||
|
@ -1350,13 +1291,6 @@ static void rcu_strict_gp_boundary(void *unused)
|
|||
invoke_rcu_core();
|
||||
}
|
||||
|
||||
// Has rcu_init() been invoked? This is used (for example) to determine
|
||||
// whether spinlocks may be acquired safely.
|
||||
static bool rcu_init_invoked(void)
|
||||
{
|
||||
return !!rcu_state.n_online_cpus;
|
||||
}
|
||||
|
||||
// Make the polled API aware of the beginning of a grace period.
|
||||
static void rcu_poll_gp_seq_start(unsigned long *snap)
|
||||
{
|
||||
|
@ -2091,92 +2025,6 @@ rcu_check_quiescent_state(struct rcu_data *rdp)
|
|||
rcu_report_qs_rdp(rdp);
|
||||
}
|
||||
|
||||
/*
|
||||
* Near the end of the offline process. Trace the fact that this CPU
|
||||
* is going offline.
|
||||
*/
|
||||
int rcutree_dying_cpu(unsigned int cpu)
|
||||
{
|
||||
bool blkd;
|
||||
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
|
||||
struct rcu_node *rnp = rdp->mynode;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
|
||||
return 0;
|
||||
|
||||
blkd = !!(READ_ONCE(rnp->qsmask) & rdp->grpmask);
|
||||
trace_rcu_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
|
||||
blkd ? TPS("cpuofl-bgp") : TPS("cpuofl"));
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* All CPUs for the specified rcu_node structure have gone offline,
|
||||
* and all tasks that were preempted within an RCU read-side critical
|
||||
* section while running on one of those CPUs have since exited their RCU
|
||||
* read-side critical section. Some other CPU is reporting this fact with
|
||||
* the specified rcu_node structure's ->lock held and interrupts disabled.
|
||||
* This function therefore goes up the tree of rcu_node structures,
|
||||
* clearing the corresponding bits in the ->qsmaskinit fields. Note that
|
||||
* the leaf rcu_node structure's ->qsmaskinit field has already been
|
||||
* updated.
|
||||
*
|
||||
* This function does check that the specified rcu_node structure has
|
||||
* all CPUs offline and no blocked tasks, so it is OK to invoke it
|
||||
* prematurely. That said, invoking it after the fact will cost you
|
||||
* a needless lock acquisition. So once it has done its work, don't
|
||||
* invoke it again.
|
||||
*/
|
||||
static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
|
||||
{
|
||||
long mask;
|
||||
struct rcu_node *rnp = rnp_leaf;
|
||||
|
||||
raw_lockdep_assert_held_rcu_node(rnp_leaf);
|
||||
if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
|
||||
WARN_ON_ONCE(rnp_leaf->qsmaskinit) ||
|
||||
WARN_ON_ONCE(rcu_preempt_has_tasks(rnp_leaf)))
|
||||
return;
|
||||
for (;;) {
|
||||
mask = rnp->grpmask;
|
||||
rnp = rnp->parent;
|
||||
if (!rnp)
|
||||
break;
|
||||
raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
|
||||
rnp->qsmaskinit &= ~mask;
|
||||
/* Between grace periods, so better already be zero! */
|
||||
WARN_ON_ONCE(rnp->qsmask);
|
||||
if (rnp->qsmaskinit) {
|
||||
raw_spin_unlock_rcu_node(rnp);
|
||||
/* irqs remain disabled. */
|
||||
return;
|
||||
}
|
||||
raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* The CPU has been completely removed, and some other CPU is reporting
|
||||
* this fact from process context. Do the remainder of the cleanup.
|
||||
* There can only be one CPU hotplug operation at a time, so no need for
|
||||
* explicit locking.
|
||||
*/
|
||||
int rcutree_dead_cpu(unsigned int cpu)
|
||||
{
|
||||
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
|
||||
struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
|
||||
|
||||
if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
|
||||
return 0;
|
||||
|
||||
WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus - 1);
|
||||
/* Adjust any no-longer-needed kthreads. */
|
||||
rcu_boost_kthread_setaffinity(rnp, -1);
|
||||
// Stop-machine done, so allow nohz_full to disable tick.
|
||||
tick_dep_clear(TICK_DEP_BIT_RCU);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Invoke any RCU callbacks that have made it to the end of their grace
|
||||
* period. Throttle as specified by rdp->blimit.
|
||||
|
@ -4079,6 +3927,160 @@ retry:
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(rcu_barrier);
|
||||
|
||||
/*
|
||||
* Compute the mask of online CPUs for the specified rcu_node structure.
|
||||
* This will not be stable unless the rcu_node structure's ->lock is
|
||||
* held, but the bit corresponding to the current CPU will be stable
|
||||
* in most contexts.
|
||||
*/
|
||||
static unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
|
||||
{
|
||||
return READ_ONCE(rnp->qsmaskinitnext);
|
||||
}
|
||||
|
||||
/*
|
||||
* Is the CPU corresponding to the specified rcu_data structure online
|
||||
* from RCU's perspective? This perspective is given by that structure's
|
||||
* ->qsmaskinitnext field rather than by the global cpu_online_mask.
|
||||
*/
|
||||
static bool rcu_rdp_cpu_online(struct rcu_data *rdp)
|
||||
{
|
||||
return !!(rdp->grpmask & rcu_rnp_online_cpus(rdp->mynode));
|
||||
}
|
||||
|
||||
#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
|
||||
|
||||
/*
|
||||
* Is the current CPU online as far as RCU is concerned?
|
||||
*
|
||||
* Disable preemption to avoid false positives that could otherwise
|
||||
* happen due to the current CPU number being sampled, this task being
|
||||
* preempted, its old CPU being taken offline, resuming on some other CPU,
|
||||
* then determining that its old CPU is now offline.
|
||||
*
|
||||
* Disable checking if in an NMI handler because we cannot safely
|
||||
* report errors from NMI handlers anyway. In addition, it is OK to use
|
||||
* RCU on an offline processor during initial boot, hence the check for
|
||||
* rcu_scheduler_fully_active.
|
||||
*/
|
||||
bool rcu_lockdep_current_cpu_online(void)
|
||||
{
|
||||
struct rcu_data *rdp;
|
||||
bool ret = false;
|
||||
|
||||
if (in_nmi() || !rcu_scheduler_fully_active)
|
||||
return true;
|
||||
preempt_disable_notrace();
|
||||
rdp = this_cpu_ptr(&rcu_data);
|
||||
/*
|
||||
* Strictly, we care here about the case where the current CPU is
|
||||
* in rcu_cpu_starting() and thus has an excuse for rdp->grpmask
|
||||
* not being up to date. So arch_spin_is_locked() might have a
|
||||
* false positive if it's held by some *other* CPU, but that's
|
||||
* OK because that just means a false *negative* on the warning.
|
||||
*/
|
||||
if (rcu_rdp_cpu_online(rdp) || arch_spin_is_locked(&rcu_state.ofl_lock))
|
||||
ret = true;
|
||||
preempt_enable_notrace();
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
|
||||
|
||||
#endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */
|
||||
|
||||
// Has rcu_init() been invoked? This is used (for example) to determine
|
||||
// whether spinlocks may be acquired safely.
|
||||
static bool rcu_init_invoked(void)
|
||||
{
|
||||
return !!rcu_state.n_online_cpus;
|
||||
}
|
||||
|
||||
/*
|
||||
* Near the end of the offline process. Trace the fact that this CPU
|
||||
* is going offline.
|
||||
*/
|
||||
int rcutree_dying_cpu(unsigned int cpu)
|
||||
{
|
||||
bool blkd;
|
||||
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
|
||||
struct rcu_node *rnp = rdp->mynode;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
|
||||
return 0;
|
||||
|
||||
blkd = !!(READ_ONCE(rnp->qsmask) & rdp->grpmask);
|
||||
trace_rcu_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
|
||||
blkd ? TPS("cpuofl-bgp") : TPS("cpuofl"));
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* All CPUs for the specified rcu_node structure have gone offline,
|
||||
* and all tasks that were preempted within an RCU read-side critical
|
||||
* section while running on one of those CPUs have since exited their RCU
|
||||
* read-side critical section. Some other CPU is reporting this fact with
|
||||
* the specified rcu_node structure's ->lock held and interrupts disabled.
|
||||
* This function therefore goes up the tree of rcu_node structures,
|
||||
* clearing the corresponding bits in the ->qsmaskinit fields. Note that
|
||||
* the leaf rcu_node structure's ->qsmaskinit field has already been
|
||||
* updated.
|
||||
*
|
||||
* This function does check that the specified rcu_node structure has
|
||||
* all CPUs offline and no blocked tasks, so it is OK to invoke it
|
||||
* prematurely. That said, invoking it after the fact will cost you
|
||||
* a needless lock acquisition. So once it has done its work, don't
|
||||
* invoke it again.
|
||||
*/
|
||||
static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
|
||||
{
|
||||
long mask;
|
||||
struct rcu_node *rnp = rnp_leaf;
|
||||
|
||||
raw_lockdep_assert_held_rcu_node(rnp_leaf);
|
||||
if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
|
||||
WARN_ON_ONCE(rnp_leaf->qsmaskinit) ||
|
||||
WARN_ON_ONCE(rcu_preempt_has_tasks(rnp_leaf)))
|
||||
return;
|
||||
for (;;) {
|
||||
mask = rnp->grpmask;
|
||||
rnp = rnp->parent;
|
||||
if (!rnp)
|
||||
break;
|
||||
raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
|
||||
rnp->qsmaskinit &= ~mask;
|
||||
/* Between grace periods, so better already be zero! */
|
||||
WARN_ON_ONCE(rnp->qsmask);
|
||||
if (rnp->qsmaskinit) {
|
||||
raw_spin_unlock_rcu_node(rnp);
|
||||
/* irqs remain disabled. */
|
||||
return;
|
||||
}
|
||||
raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* The CPU has been completely removed, and some other CPU is reporting
|
||||
* this fact from process context. Do the remainder of the cleanup.
|
||||
* There can only be one CPU hotplug operation at a time, so no need for
|
||||
* explicit locking.
|
||||
*/
|
||||
int rcutree_dead_cpu(unsigned int cpu)
|
||||
{
|
||||
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
|
||||
struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
|
||||
|
||||
if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
|
||||
return 0;
|
||||
|
||||
WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus - 1);
|
||||
/* Adjust any no-longer-needed kthreads. */
|
||||
rcu_boost_kthread_setaffinity(rnp, -1);
|
||||
// Stop-machine done, so allow nohz_full to disable tick.
|
||||
tick_dep_clear(TICK_DEP_BIT_RCU);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Propagate ->qsinitmask bits up the rcu_node tree to account for the
|
||||
* first CPU in a given leaf rcu_node structure coming online. The caller
|
||||
|
|
Loading…
Reference in New Issue