mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-09-15 15:15:47 +00:00
sched/rt: Keep period timer ticking when rt throttling is active
When a runqueue is throttled we cannot disable the period timer because that timer is the only way to undo the throttling. We got stale throttling entries when a rq was throttled and then the global sysctl was disabled, which stopped the timer. Signed-off-by: Peter Zijlstra <peterz@infradead.org> [ Added changelog ] Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/n/tip-nuj34q52p6ro7szapuz84i0v@git.kernel.org Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
7e4d960993
commit
42c62a589f
1 changed files with 8 additions and 5 deletions
|
@ -778,12 +778,9 @@ static inline int balance_runtime(struct rt_rq *rt_rq)
|
||||||
|
|
||||||
static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
|
static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
|
||||||
{
|
{
|
||||||
int i, idle = 1;
|
int i, idle = 1, throttled = 0;
|
||||||
const struct cpumask *span;
|
const struct cpumask *span;
|
||||||
|
|
||||||
if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
|
|
||||||
return 1;
|
|
||||||
|
|
||||||
span = sched_rt_period_mask();
|
span = sched_rt_period_mask();
|
||||||
for_each_cpu(i, span) {
|
for_each_cpu(i, span) {
|
||||||
int enqueue = 0;
|
int enqueue = 0;
|
||||||
|
@ -818,12 +815,17 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
|
||||||
if (!rt_rq_throttled(rt_rq))
|
if (!rt_rq_throttled(rt_rq))
|
||||||
enqueue = 1;
|
enqueue = 1;
|
||||||
}
|
}
|
||||||
|
if (rt_rq->rt_throttled)
|
||||||
|
throttled = 1;
|
||||||
|
|
||||||
if (enqueue)
|
if (enqueue)
|
||||||
sched_rt_rq_enqueue(rt_rq);
|
sched_rt_rq_enqueue(rt_rq);
|
||||||
raw_spin_unlock(&rq->lock);
|
raw_spin_unlock(&rq->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
|
||||||
|
return 1;
|
||||||
|
|
||||||
return idle;
|
return idle;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -884,7 +886,8 @@ static void update_curr_rt(struct rq *rq)
|
||||||
if (unlikely((s64)delta_exec < 0))
|
if (unlikely((s64)delta_exec < 0))
|
||||||
delta_exec = 0;
|
delta_exec = 0;
|
||||||
|
|
||||||
schedstat_set(curr->se.statistics.exec_max, max(curr->se.statistics.exec_max, delta_exec));
|
schedstat_set(curr->se.statistics.exec_max,
|
||||||
|
max(curr->se.statistics.exec_max, delta_exec));
|
||||||
|
|
||||||
curr->se.sum_exec_runtime += delta_exec;
|
curr->se.sum_exec_runtime += delta_exec;
|
||||||
account_group_exec_runtime(curr, delta_exec);
|
account_group_exec_runtime(curr, delta_exec);
|
||||||
|
|
Loading…
Reference in a new issue