mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-30 08:02:30 +00:00
rcu: Abstract the dynticks snapshot operation
This commit is the second step towards full abstraction of all accesses to the ->dynticks counter, implementing the previously open-coded atomic add of zero in a new rcu_dynticks_snap() function. This abstraction will ease changes o the ->dynticks counter operation. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Reviewed-by: Josh Triplett <josh@joshtriplett.org>
This commit is contained in:
parent
6563de9d6f
commit
8b2f63ab05
2 changed files with 18 additions and 7 deletions
|
@ -281,6 +281,17 @@ static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
|
||||||
#endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
|
#endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Snapshot the ->dynticks counter with full ordering so as to allow
|
||||||
|
* stable comparison of this counter with past and future snapshots.
|
||||||
|
*/
|
||||||
|
static int rcu_dynticks_snap(struct rcu_dynticks *rdtp)
|
||||||
|
{
|
||||||
|
int snap = atomic_add_return(0, &rdtp->dynticks);
|
||||||
|
|
||||||
|
return snap;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Do a double-increment of the ->dynticks counter to emulate a
|
* Do a double-increment of the ->dynticks counter to emulate a
|
||||||
* momentary idle-CPU quiescent state.
|
* momentary idle-CPU quiescent state.
|
||||||
|
@ -1049,7 +1060,9 @@ void rcu_nmi_exit(void)
|
||||||
*/
|
*/
|
||||||
bool notrace __rcu_is_watching(void)
|
bool notrace __rcu_is_watching(void)
|
||||||
{
|
{
|
||||||
return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
|
struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
|
||||||
|
|
||||||
|
return atomic_read(&rdtp->dynticks) & 0x1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1132,7 +1145,7 @@ static int rcu_is_cpu_rrupt_from_idle(void)
|
||||||
static int dyntick_save_progress_counter(struct rcu_data *rdp,
|
static int dyntick_save_progress_counter(struct rcu_data *rdp,
|
||||||
bool *isidle, unsigned long *maxj)
|
bool *isidle, unsigned long *maxj)
|
||||||
{
|
{
|
||||||
rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
|
rdp->dynticks_snap = rcu_dynticks_snap(rdp->dynticks);
|
||||||
rcu_sysidle_check_cpu(rdp, isidle, maxj);
|
rcu_sysidle_check_cpu(rdp, isidle, maxj);
|
||||||
if ((rdp->dynticks_snap & 0x1) == 0) {
|
if ((rdp->dynticks_snap & 0x1) == 0) {
|
||||||
trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
|
trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
|
||||||
|
@ -1157,7 +1170,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
|
||||||
int *rcrmp;
|
int *rcrmp;
|
||||||
unsigned int snap;
|
unsigned int snap;
|
||||||
|
|
||||||
curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
|
curr = (unsigned int)rcu_dynticks_snap(rdp->dynticks);
|
||||||
snap = (unsigned int)rdp->dynticks_snap;
|
snap = (unsigned int)rdp->dynticks_snap;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -356,10 +356,9 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
|
||||||
mask_ofl_test = 0;
|
mask_ofl_test = 0;
|
||||||
for_each_leaf_node_possible_cpu(rnp, cpu) {
|
for_each_leaf_node_possible_cpu(rnp, cpu) {
|
||||||
struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
|
struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
|
||||||
struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
|
|
||||||
|
|
||||||
rdp->exp_dynticks_snap =
|
rdp->exp_dynticks_snap =
|
||||||
atomic_add_return(0, &rdtp->dynticks);
|
rcu_dynticks_snap(rdp->dynticks);
|
||||||
if (raw_smp_processor_id() == cpu ||
|
if (raw_smp_processor_id() == cpu ||
|
||||||
!(rdp->exp_dynticks_snap & 0x1) ||
|
!(rdp->exp_dynticks_snap & 0x1) ||
|
||||||
!(rnp->qsmaskinitnext & rdp->grpmask))
|
!(rnp->qsmaskinitnext & rdp->grpmask))
|
||||||
|
@ -380,12 +379,11 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
|
||||||
for_each_leaf_node_possible_cpu(rnp, cpu) {
|
for_each_leaf_node_possible_cpu(rnp, cpu) {
|
||||||
unsigned long mask = leaf_node_cpu_bit(rnp, cpu);
|
unsigned long mask = leaf_node_cpu_bit(rnp, cpu);
|
||||||
struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
|
struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
|
||||||
struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
|
|
||||||
|
|
||||||
if (!(mask_ofl_ipi & mask))
|
if (!(mask_ofl_ipi & mask))
|
||||||
continue;
|
continue;
|
||||||
retry_ipi:
|
retry_ipi:
|
||||||
if (atomic_add_return(0, &rdtp->dynticks) !=
|
if (rcu_dynticks_snap(rdp->dynticks) !=
|
||||||
rdp->exp_dynticks_snap) {
|
rdp->exp_dynticks_snap) {
|
||||||
mask_ofl_test |= mask;
|
mask_ofl_test |= mask;
|
||||||
continue;
|
continue;
|
||||||
|
|
Loading…
Reference in a new issue