mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-30 08:02:30 +00:00
rcu: Switch urgent quiescent-state requests to rcu_data structure
This commit removes ->rcu_need_heavy_qs and ->rcu_urgent_qs from the rcu_dynticks structure and updates the code to access them from the rcu_data structure. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
This commit is contained in:
parent
c458a89e96
commit
2dba13f0b6
4 changed files with 14 additions and 16 deletions
|
@ -362,7 +362,7 @@ static void __maybe_unused rcu_momentary_dyntick_idle(void)
|
|||
struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
|
||||
int special;
|
||||
|
||||
raw_cpu_write(rcu_dynticks.rcu_need_heavy_qs, false);
|
||||
raw_cpu_write(rcu_data.rcu_need_heavy_qs, false);
|
||||
special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks);
|
||||
/* It is illegal to call this from idle state. */
|
||||
WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR));
|
||||
|
@ -928,7 +928,7 @@ void rcu_request_urgent_qs_task(struct task_struct *t)
|
|||
cpu = task_cpu(t);
|
||||
if (!task_curr(t))
|
||||
return; /* This task is not running on that CPU. */
|
||||
smp_store_release(per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, cpu), true);
|
||||
smp_store_release(per_cpu_ptr(&rcu_data.rcu_urgent_qs, cpu), true);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
|
||||
|
@ -1081,8 +1081,8 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
|
|||
* is set way high.
|
||||
*/
|
||||
jtsq = READ_ONCE(jiffies_to_sched_qs);
|
||||
ruqp = per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, rdp->cpu);
|
||||
rnhqp = &per_cpu(rcu_dynticks.rcu_need_heavy_qs, rdp->cpu);
|
||||
ruqp = per_cpu_ptr(&rcu_data.rcu_urgent_qs, rdp->cpu);
|
||||
rnhqp = &per_cpu(rcu_data.rcu_need_heavy_qs, rdp->cpu);
|
||||
if (!READ_ONCE(*rnhqp) &&
|
||||
(time_after(jiffies, rcu_state.gp_start + jtsq * 2) ||
|
||||
time_after(jiffies, rcu_state.jiffies_resched))) {
|
||||
|
@ -2499,13 +2499,13 @@ void rcu_check_callbacks(int user)
|
|||
trace_rcu_utilization(TPS("Start scheduler-tick"));
|
||||
raw_cpu_inc(rcu_data.ticks_this_gp);
|
||||
/* The load-acquire pairs with the store-release setting to true. */
|
||||
if (smp_load_acquire(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs))) {
|
||||
if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) {
|
||||
/* Idle and userspace execution already are quiescent states. */
|
||||
if (!rcu_is_cpu_rrupt_from_idle() && !user) {
|
||||
set_tsk_need_resched(current);
|
||||
set_preempt_need_resched();
|
||||
}
|
||||
__this_cpu_write(rcu_dynticks.rcu_urgent_qs, false);
|
||||
__this_cpu_write(rcu_data.rcu_urgent_qs, false);
|
||||
}
|
||||
rcu_flavor_check_callbacks(user);
|
||||
if (rcu_pending())
|
||||
|
|
|
@ -41,8 +41,6 @@ struct rcu_dynticks {
|
|||
long dynticks_nesting; /* Track process nesting level. */
|
||||
long dynticks_nmi_nesting; /* Track irq/NMI nesting level. */
|
||||
atomic_t dynticks; /* Even value for idle, else odd. */
|
||||
bool rcu_need_heavy_qs; /* GP old, need heavy quiescent state. */
|
||||
bool rcu_urgent_qs; /* GP old need light quiescent state. */
|
||||
};
|
||||
|
||||
/* Communicate arguments to a workqueue handler. */
|
||||
|
|
|
@ -780,7 +780,7 @@ static void sync_sched_exp_handler(void *unused)
|
|||
}
|
||||
__this_cpu_write(rcu_data.cpu_no_qs.b.exp, true);
|
||||
/* Store .exp before .rcu_urgent_qs. */
|
||||
smp_store_release(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs), true);
|
||||
smp_store_release(this_cpu_ptr(&rcu_data.rcu_urgent_qs), true);
|
||||
set_tsk_need_resched(current);
|
||||
set_preempt_need_resched();
|
||||
}
|
||||
|
|
|
@ -967,17 +967,17 @@ void rcu_all_qs(void)
|
|||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (!raw_cpu_read(rcu_dynticks.rcu_urgent_qs))
|
||||
if (!raw_cpu_read(rcu_data.rcu_urgent_qs))
|
||||
return;
|
||||
preempt_disable();
|
||||
/* Load rcu_urgent_qs before other flags. */
|
||||
if (!smp_load_acquire(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs))) {
|
||||
if (!smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) {
|
||||
preempt_enable();
|
||||
return;
|
||||
}
|
||||
this_cpu_write(rcu_dynticks.rcu_urgent_qs, false);
|
||||
this_cpu_write(rcu_data.rcu_urgent_qs, false);
|
||||
barrier(); /* Avoid RCU read-side critical sections leaking down. */
|
||||
if (unlikely(raw_cpu_read(rcu_dynticks.rcu_need_heavy_qs))) {
|
||||
if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs))) {
|
||||
local_irq_save(flags);
|
||||
rcu_momentary_dyntick_idle();
|
||||
local_irq_restore(flags);
|
||||
|
@ -997,10 +997,10 @@ void rcu_note_context_switch(bool preempt)
|
|||
trace_rcu_utilization(TPS("Start context switch"));
|
||||
rcu_qs();
|
||||
/* Load rcu_urgent_qs before other flags. */
|
||||
if (!smp_load_acquire(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs)))
|
||||
if (!smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs)))
|
||||
goto out;
|
||||
this_cpu_write(rcu_dynticks.rcu_urgent_qs, false);
|
||||
if (unlikely(raw_cpu_read(rcu_dynticks.rcu_need_heavy_qs)))
|
||||
this_cpu_write(rcu_data.rcu_urgent_qs, false);
|
||||
if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs)))
|
||||
rcu_momentary_dyntick_idle();
|
||||
if (!preempt)
|
||||
rcu_tasks_qs(current);
|
||||
|
|
Loading…
Reference in a new issue