mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-01 22:54:01 +00:00
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Ingo Molnar: "Two fixes: - a SCHED_DEADLINE task selection fix - a sched/numa related lockdep splat fix" * 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched: Check for stop task appearance when balancing happens sched/numa: Fix task_numa_free() lockdep splat
This commit is contained in:
commit
8f98f6f5d6
4 changed files with 32 additions and 11 deletions
|
@ -1021,8 +1021,17 @@ struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev)
|
||||||
|
|
||||||
dl_rq = &rq->dl;
|
dl_rq = &rq->dl;
|
||||||
|
|
||||||
if (need_pull_dl_task(rq, prev))
|
if (need_pull_dl_task(rq, prev)) {
|
||||||
pull_dl_task(rq);
|
pull_dl_task(rq);
|
||||||
|
/*
|
||||||
|
* pull_rt_task() can drop (and re-acquire) rq->lock; this
|
||||||
|
* means a stop task can slip in, in which case we need to
|
||||||
|
* re-start task selection.
|
||||||
|
*/
|
||||||
|
if (rq->stop && rq->stop->on_rq)
|
||||||
|
return RETRY_TASK;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* When prev is DL, we may throttle it in put_prev_task().
|
* When prev is DL, we may throttle it in put_prev_task().
|
||||||
* So, we update time before we check for dl_nr_running.
|
* So, we update time before we check for dl_nr_running.
|
||||||
|
|
|
@ -1497,7 +1497,7 @@ static void task_numa_placement(struct task_struct *p)
|
||||||
/* If the task is part of a group prevent parallel updates to group stats */
|
/* If the task is part of a group prevent parallel updates to group stats */
|
||||||
if (p->numa_group) {
|
if (p->numa_group) {
|
||||||
group_lock = &p->numa_group->lock;
|
group_lock = &p->numa_group->lock;
|
||||||
spin_lock(group_lock);
|
spin_lock_irq(group_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Find the node with the highest number of faults */
|
/* Find the node with the highest number of faults */
|
||||||
|
@ -1572,7 +1572,7 @@ static void task_numa_placement(struct task_struct *p)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock(group_lock);
|
spin_unlock_irq(group_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Preferred node as the node with the most faults */
|
/* Preferred node as the node with the most faults */
|
||||||
|
@ -1677,7 +1677,8 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags,
|
||||||
if (!join)
|
if (!join)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
double_lock(&my_grp->lock, &grp->lock);
|
BUG_ON(irqs_disabled());
|
||||||
|
double_lock_irq(&my_grp->lock, &grp->lock);
|
||||||
|
|
||||||
for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) {
|
for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) {
|
||||||
my_grp->faults[i] -= p->numa_faults_memory[i];
|
my_grp->faults[i] -= p->numa_faults_memory[i];
|
||||||
|
@ -1691,7 +1692,7 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags,
|
||||||
grp->nr_tasks++;
|
grp->nr_tasks++;
|
||||||
|
|
||||||
spin_unlock(&my_grp->lock);
|
spin_unlock(&my_grp->lock);
|
||||||
spin_unlock(&grp->lock);
|
spin_unlock_irq(&grp->lock);
|
||||||
|
|
||||||
rcu_assign_pointer(p->numa_group, grp);
|
rcu_assign_pointer(p->numa_group, grp);
|
||||||
|
|
||||||
|
@ -1710,14 +1711,14 @@ void task_numa_free(struct task_struct *p)
|
||||||
void *numa_faults = p->numa_faults_memory;
|
void *numa_faults = p->numa_faults_memory;
|
||||||
|
|
||||||
if (grp) {
|
if (grp) {
|
||||||
spin_lock(&grp->lock);
|
spin_lock_irq(&grp->lock);
|
||||||
for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
|
for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
|
||||||
grp->faults[i] -= p->numa_faults_memory[i];
|
grp->faults[i] -= p->numa_faults_memory[i];
|
||||||
grp->total_faults -= p->total_numa_faults;
|
grp->total_faults -= p->total_numa_faults;
|
||||||
|
|
||||||
list_del(&p->numa_entry);
|
list_del(&p->numa_entry);
|
||||||
grp->nr_tasks--;
|
grp->nr_tasks--;
|
||||||
spin_unlock(&grp->lock);
|
spin_unlock_irq(&grp->lock);
|
||||||
rcu_assign_pointer(p->numa_group, NULL);
|
rcu_assign_pointer(p->numa_group, NULL);
|
||||||
put_numa_group(grp);
|
put_numa_group(grp);
|
||||||
}
|
}
|
||||||
|
@ -6727,7 +6728,8 @@ static int idle_balance(struct rq *this_rq)
|
||||||
out:
|
out:
|
||||||
/* Is there a task of a high priority class? */
|
/* Is there a task of a high priority class? */
|
||||||
if (this_rq->nr_running != this_rq->cfs.h_nr_running &&
|
if (this_rq->nr_running != this_rq->cfs.h_nr_running &&
|
||||||
(this_rq->dl.dl_nr_running ||
|
((this_rq->stop && this_rq->stop->on_rq) ||
|
||||||
|
this_rq->dl.dl_nr_running ||
|
||||||
(this_rq->rt.rt_nr_running && !rt_rq_throttled(&this_rq->rt))))
|
(this_rq->rt.rt_nr_running && !rt_rq_throttled(&this_rq->rt))))
|
||||||
pulled_task = -1;
|
pulled_task = -1;
|
||||||
|
|
||||||
|
|
|
@ -1362,10 +1362,11 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev)
|
||||||
pull_rt_task(rq);
|
pull_rt_task(rq);
|
||||||
/*
|
/*
|
||||||
* pull_rt_task() can drop (and re-acquire) rq->lock; this
|
* pull_rt_task() can drop (and re-acquire) rq->lock; this
|
||||||
* means a dl task can slip in, in which case we need to
|
* means a dl or stop task can slip in, in which case we need
|
||||||
* re-start task selection.
|
* to re-start task selection.
|
||||||
*/
|
*/
|
||||||
if (unlikely(rq->dl.dl_nr_running))
|
if (unlikely((rq->stop && rq->stop->on_rq) ||
|
||||||
|
rq->dl.dl_nr_running))
|
||||||
return RETRY_TASK;
|
return RETRY_TASK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1385,6 +1385,15 @@ static inline void double_lock(spinlock_t *l1, spinlock_t *l2)
|
||||||
spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
|
spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2)
|
||||||
|
{
|
||||||
|
if (l1 > l2)
|
||||||
|
swap(l1, l2);
|
||||||
|
|
||||||
|
spin_lock_irq(l1);
|
||||||
|
spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
|
||||||
|
}
|
||||||
|
|
||||||
static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2)
|
static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2)
|
||||||
{
|
{
|
||||||
if (l1 > l2)
|
if (l1 > l2)
|
||||||
|
|
Loading…
Reference in a new issue