mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-11-01 08:58:07 +00:00
Merge branch 'tip/sched/core' into for-6.12
- Resolve trivial context conflicts from dl_server clearing being moved around. - Add @next to put_prev_task_scx() and @prev to pick_next_task_scx() to match sched/core. - Merge sched_class->switch_class() addition from sched_ext with tip/sched/core changes in __pick_next_task(). - Make pick_next_task_scx() call put_prev_task_scx() to emulate the previous behavior where sched_class->put_prev_task() was called before sched_class->pick_next_task(). While this makes sched_ext build and function, the behavior is not in line with other sched classes. The follow-up patches will address the discrepancies and remove sched_class->switch_class(). Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
commit
d7b01aef9d
9 changed files with 180 additions and 218 deletions
|
@ -694,7 +694,6 @@ struct sched_dl_entity {
|
||||||
*/
|
*/
|
||||||
struct rq *rq;
|
struct rq *rq;
|
||||||
dl_server_has_tasks_f server_has_tasks;
|
dl_server_has_tasks_f server_has_tasks;
|
||||||
dl_server_pick_f server_pick_next;
|
|
||||||
dl_server_pick_f server_pick_task;
|
dl_server_pick_f server_pick_task;
|
||||||
|
|
||||||
#ifdef CONFIG_RT_MUTEXES
|
#ifdef CONFIG_RT_MUTEXES
|
||||||
|
|
|
@ -3690,8 +3690,6 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
|
||||||
rq->idle_stamp = 0;
|
rq->idle_stamp = 0;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
p->dl_server = NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -5895,7 +5893,7 @@ static inline void schedule_debug(struct task_struct *prev, bool preempt)
|
||||||
schedstat_inc(this_rq()->sched_count);
|
schedstat_inc(this_rq()->sched_count);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void put_prev_task_balance(struct rq *rq, struct task_struct *prev,
|
static void prev_balance(struct rq *rq, struct task_struct *prev,
|
||||||
struct rq_flags *rf)
|
struct rq_flags *rf)
|
||||||
{
|
{
|
||||||
const struct sched_class *start_class = prev->sched_class;
|
const struct sched_class *start_class = prev->sched_class;
|
||||||
|
@ -5923,16 +5921,6 @@ static void put_prev_task_balance(struct rq *rq, struct task_struct *prev,
|
||||||
if (class->balance && class->balance(rq, prev, rf))
|
if (class->balance && class->balance(rq, prev, rf))
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
put_prev_task(rq, prev);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* We've updated @prev and no longer need the server link, clear it.
|
|
||||||
* Must be done before ->pick_next_task() because that can (re)set
|
|
||||||
* ->dl_server.
|
|
||||||
*/
|
|
||||||
if (prev->dl_server)
|
|
||||||
prev->dl_server = NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -5944,6 +5932,8 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
|
||||||
const struct sched_class *class;
|
const struct sched_class *class;
|
||||||
struct task_struct *p;
|
struct task_struct *p;
|
||||||
|
|
||||||
|
rq->dl_server = NULL;
|
||||||
|
|
||||||
if (scx_enabled())
|
if (scx_enabled())
|
||||||
goto restart;
|
goto restart;
|
||||||
|
|
||||||
|
@ -5962,32 +5952,19 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
|
||||||
|
|
||||||
/* Assume the next prioritized class is idle_sched_class */
|
/* Assume the next prioritized class is idle_sched_class */
|
||||||
if (!p) {
|
if (!p) {
|
||||||
put_prev_task(rq, prev);
|
p = pick_task_idle(rq);
|
||||||
p = pick_next_task_idle(rq);
|
put_prev_set_next_task(rq, prev, p);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* This is a normal CFS pick, but the previous could be a DL pick.
|
|
||||||
* Clear it as previous is no longer picked.
|
|
||||||
*/
|
|
||||||
if (prev->dl_server)
|
|
||||||
prev->dl_server = NULL;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* This is the fast path; it cannot be a DL server pick;
|
|
||||||
* therefore even if @p == @prev, ->dl_server must be NULL.
|
|
||||||
*/
|
|
||||||
if (p->dl_server)
|
|
||||||
p->dl_server = NULL;
|
|
||||||
|
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
|
||||||
restart:
|
restart:
|
||||||
put_prev_task_balance(rq, prev, rf);
|
prev_balance(rq, prev, rf);
|
||||||
|
|
||||||
for_each_active_class(class) {
|
for_each_active_class(class) {
|
||||||
p = class->pick_next_task(rq);
|
if (class->pick_next_task) {
|
||||||
|
p = class->pick_next_task(rq, prev);
|
||||||
if (p) {
|
if (p) {
|
||||||
const struct sched_class *prev_class = prev->sched_class;
|
const struct sched_class *prev_class = prev->sched_class;
|
||||||
|
|
||||||
|
@ -5995,6 +5972,18 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
|
||||||
prev_class->switch_class(rq, p);
|
prev_class->switch_class(rq, p);
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
p = class->pick_task(rq);
|
||||||
|
if (p) {
|
||||||
|
const struct sched_class *prev_class = prev->sched_class;
|
||||||
|
|
||||||
|
put_prev_set_next_task(rq, prev, p);
|
||||||
|
|
||||||
|
if (class != prev_class && prev_class->switch_class)
|
||||||
|
prev_class->switch_class(rq, p);
|
||||||
|
return p;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
BUG(); /* The idle class should always have a runnable task. */
|
BUG(); /* The idle class should always have a runnable task. */
|
||||||
|
@ -6024,6 +6013,8 @@ static inline struct task_struct *pick_task(struct rq *rq)
|
||||||
const struct sched_class *class;
|
const struct sched_class *class;
|
||||||
struct task_struct *p;
|
struct task_struct *p;
|
||||||
|
|
||||||
|
rq->dl_server = NULL;
|
||||||
|
|
||||||
for_each_active_class(class) {
|
for_each_active_class(class) {
|
||||||
p = class->pick_task(rq);
|
p = class->pick_task(rq);
|
||||||
if (p)
|
if (p)
|
||||||
|
@ -6062,6 +6053,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
|
||||||
* another cpu during offline.
|
* another cpu during offline.
|
||||||
*/
|
*/
|
||||||
rq->core_pick = NULL;
|
rq->core_pick = NULL;
|
||||||
|
rq->core_dl_server = NULL;
|
||||||
return __pick_next_task(rq, prev, rf);
|
return __pick_next_task(rq, prev, rf);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6080,16 +6072,13 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
|
||||||
WRITE_ONCE(rq->core_sched_seq, rq->core->core_pick_seq);
|
WRITE_ONCE(rq->core_sched_seq, rq->core->core_pick_seq);
|
||||||
|
|
||||||
next = rq->core_pick;
|
next = rq->core_pick;
|
||||||
if (next != prev) {
|
rq->dl_server = rq->core_dl_server;
|
||||||
put_prev_task(rq, prev);
|
|
||||||
set_next_task(rq, next);
|
|
||||||
}
|
|
||||||
|
|
||||||
rq->core_pick = NULL;
|
rq->core_pick = NULL;
|
||||||
goto out;
|
rq->core_dl_server = NULL;
|
||||||
|
goto out_set_next;
|
||||||
}
|
}
|
||||||
|
|
||||||
put_prev_task_balance(rq, prev, rf);
|
prev_balance(rq, prev, rf);
|
||||||
|
|
||||||
smt_mask = cpu_smt_mask(cpu);
|
smt_mask = cpu_smt_mask(cpu);
|
||||||
need_sync = !!rq->core->core_cookie;
|
need_sync = !!rq->core->core_cookie;
|
||||||
|
@ -6130,6 +6119,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
|
||||||
next = pick_task(rq);
|
next = pick_task(rq);
|
||||||
if (!next->core_cookie) {
|
if (!next->core_cookie) {
|
||||||
rq->core_pick = NULL;
|
rq->core_pick = NULL;
|
||||||
|
rq->core_dl_server = NULL;
|
||||||
/*
|
/*
|
||||||
* For robustness, update the min_vruntime_fi for
|
* For robustness, update the min_vruntime_fi for
|
||||||
* unconstrained picks as well.
|
* unconstrained picks as well.
|
||||||
|
@ -6157,7 +6147,9 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
|
||||||
if (i != cpu && (rq_i != rq->core || !core_clock_updated))
|
if (i != cpu && (rq_i != rq->core || !core_clock_updated))
|
||||||
update_rq_clock(rq_i);
|
update_rq_clock(rq_i);
|
||||||
|
|
||||||
p = rq_i->core_pick = pick_task(rq_i);
|
rq_i->core_pick = p = pick_task(rq_i);
|
||||||
|
rq_i->core_dl_server = rq_i->dl_server;
|
||||||
|
|
||||||
if (!max || prio_less(max, p, fi_before))
|
if (!max || prio_less(max, p, fi_before))
|
||||||
max = p;
|
max = p;
|
||||||
}
|
}
|
||||||
|
@ -6181,6 +6173,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
|
||||||
}
|
}
|
||||||
|
|
||||||
rq_i->core_pick = p;
|
rq_i->core_pick = p;
|
||||||
|
rq_i->core_dl_server = NULL;
|
||||||
|
|
||||||
if (p == rq_i->idle) {
|
if (p == rq_i->idle) {
|
||||||
if (rq_i->nr_running) {
|
if (rq_i->nr_running) {
|
||||||
|
@ -6241,6 +6234,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
|
||||||
|
|
||||||
if (i == cpu) {
|
if (i == cpu) {
|
||||||
rq_i->core_pick = NULL;
|
rq_i->core_pick = NULL;
|
||||||
|
rq_i->core_dl_server = NULL;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6249,6 +6243,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
|
||||||
|
|
||||||
if (rq_i->curr == rq_i->core_pick) {
|
if (rq_i->curr == rq_i->core_pick) {
|
||||||
rq_i->core_pick = NULL;
|
rq_i->core_pick = NULL;
|
||||||
|
rq_i->core_dl_server = NULL;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6256,8 +6251,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
|
||||||
}
|
}
|
||||||
|
|
||||||
out_set_next:
|
out_set_next:
|
||||||
set_next_task(rq, next);
|
put_prev_set_next_task(rq, prev, next);
|
||||||
out:
|
|
||||||
if (rq->core->core_forceidle_count && next == rq->idle)
|
if (rq->core->core_forceidle_count && next == rq->idle)
|
||||||
queue_core_balance(rq);
|
queue_core_balance(rq);
|
||||||
|
|
||||||
|
@ -8487,6 +8481,7 @@ void __init sched_init(void)
|
||||||
#ifdef CONFIG_SCHED_CORE
|
#ifdef CONFIG_SCHED_CORE
|
||||||
rq->core = rq;
|
rq->core = rq;
|
||||||
rq->core_pick = NULL;
|
rq->core_pick = NULL;
|
||||||
|
rq->core_dl_server = NULL;
|
||||||
rq->core_enabled = 0;
|
rq->core_enabled = 0;
|
||||||
rq->core_tree = RB_ROOT;
|
rq->core_tree = RB_ROOT;
|
||||||
rq->core_forceidle_count = 0;
|
rq->core_forceidle_count = 0;
|
||||||
|
|
|
@ -1665,12 +1665,10 @@ void dl_server_stop(struct sched_dl_entity *dl_se)
|
||||||
|
|
||||||
void dl_server_init(struct sched_dl_entity *dl_se, struct rq *rq,
|
void dl_server_init(struct sched_dl_entity *dl_se, struct rq *rq,
|
||||||
dl_server_has_tasks_f has_tasks,
|
dl_server_has_tasks_f has_tasks,
|
||||||
dl_server_pick_f pick_next,
|
|
||||||
dl_server_pick_f pick_task)
|
dl_server_pick_f pick_task)
|
||||||
{
|
{
|
||||||
dl_se->rq = rq;
|
dl_se->rq = rq;
|
||||||
dl_se->server_has_tasks = has_tasks;
|
dl_se->server_has_tasks = has_tasks;
|
||||||
dl_se->server_pick_next = pick_next;
|
|
||||||
dl_se->server_pick_task = pick_task;
|
dl_se->server_pick_task = pick_task;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1896,45 +1894,39 @@ static inline bool __dl_less(struct rb_node *a, const struct rb_node *b)
|
||||||
return dl_time_before(__node_2_dle(a)->deadline, __node_2_dle(b)->deadline);
|
return dl_time_before(__node_2_dle(a)->deadline, __node_2_dle(b)->deadline);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct sched_statistics *
|
static __always_inline struct sched_statistics *
|
||||||
__schedstats_from_dl_se(struct sched_dl_entity *dl_se)
|
__schedstats_from_dl_se(struct sched_dl_entity *dl_se)
|
||||||
{
|
{
|
||||||
|
if (!schedstat_enabled())
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
if (dl_server(dl_se))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
return &dl_task_of(dl_se)->stats;
|
return &dl_task_of(dl_se)->stats;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
update_stats_wait_start_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se)
|
update_stats_wait_start_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se)
|
||||||
{
|
{
|
||||||
struct sched_statistics *stats;
|
struct sched_statistics *stats = __schedstats_from_dl_se(dl_se);
|
||||||
|
if (stats)
|
||||||
if (!schedstat_enabled())
|
|
||||||
return;
|
|
||||||
|
|
||||||
stats = __schedstats_from_dl_se(dl_se);
|
|
||||||
__update_stats_wait_start(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
|
__update_stats_wait_start(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
update_stats_wait_end_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se)
|
update_stats_wait_end_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se)
|
||||||
{
|
{
|
||||||
struct sched_statistics *stats;
|
struct sched_statistics *stats = __schedstats_from_dl_se(dl_se);
|
||||||
|
if (stats)
|
||||||
if (!schedstat_enabled())
|
|
||||||
return;
|
|
||||||
|
|
||||||
stats = __schedstats_from_dl_se(dl_se);
|
|
||||||
__update_stats_wait_end(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
|
__update_stats_wait_end(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
update_stats_enqueue_sleeper_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se)
|
update_stats_enqueue_sleeper_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se)
|
||||||
{
|
{
|
||||||
struct sched_statistics *stats;
|
struct sched_statistics *stats = __schedstats_from_dl_se(dl_se);
|
||||||
|
if (stats)
|
||||||
if (!schedstat_enabled())
|
|
||||||
return;
|
|
||||||
|
|
||||||
stats = __schedstats_from_dl_se(dl_se);
|
|
||||||
__update_stats_enqueue_sleeper(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
|
__update_stats_enqueue_sleeper(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2392,6 +2384,9 @@ static void set_next_task_dl(struct rq *rq, struct task_struct *p, bool first)
|
||||||
update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
|
update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
|
||||||
|
|
||||||
deadline_queue_push_tasks(rq);
|
deadline_queue_push_tasks(rq);
|
||||||
|
|
||||||
|
if (hrtick_enabled(rq))
|
||||||
|
start_hrtick_dl(rq, &p->dl);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct sched_dl_entity *pick_next_dl_entity(struct dl_rq *dl_rq)
|
static struct sched_dl_entity *pick_next_dl_entity(struct dl_rq *dl_rq)
|
||||||
|
@ -2407,9 +2402,8 @@ static struct sched_dl_entity *pick_next_dl_entity(struct dl_rq *dl_rq)
|
||||||
/*
|
/*
|
||||||
* __pick_next_task_dl - Helper to pick the next -deadline task to run.
|
* __pick_next_task_dl - Helper to pick the next -deadline task to run.
|
||||||
* @rq: The runqueue to pick the next task from.
|
* @rq: The runqueue to pick the next task from.
|
||||||
* @peek: If true, just peek at the next task. Only relevant for dlserver.
|
|
||||||
*/
|
*/
|
||||||
static struct task_struct *__pick_next_task_dl(struct rq *rq, bool peek)
|
static struct task_struct *__pick_task_dl(struct rq *rq)
|
||||||
{
|
{
|
||||||
struct sched_dl_entity *dl_se;
|
struct sched_dl_entity *dl_se;
|
||||||
struct dl_rq *dl_rq = &rq->dl;
|
struct dl_rq *dl_rq = &rq->dl;
|
||||||
|
@ -2423,16 +2417,13 @@ static struct task_struct *__pick_next_task_dl(struct rq *rq, bool peek)
|
||||||
WARN_ON_ONCE(!dl_se);
|
WARN_ON_ONCE(!dl_se);
|
||||||
|
|
||||||
if (dl_server(dl_se)) {
|
if (dl_server(dl_se)) {
|
||||||
if (IS_ENABLED(CONFIG_SMP) && peek)
|
|
||||||
p = dl_se->server_pick_task(dl_se);
|
p = dl_se->server_pick_task(dl_se);
|
||||||
else
|
|
||||||
p = dl_se->server_pick_next(dl_se);
|
|
||||||
if (!p) {
|
if (!p) {
|
||||||
dl_se->dl_yielded = 1;
|
dl_se->dl_yielded = 1;
|
||||||
update_curr_dl_se(rq, dl_se, 0);
|
update_curr_dl_se(rq, dl_se, 0);
|
||||||
goto again;
|
goto again;
|
||||||
}
|
}
|
||||||
p->dl_server = dl_se;
|
rq->dl_server = dl_se;
|
||||||
} else {
|
} else {
|
||||||
p = dl_task_of(dl_se);
|
p = dl_task_of(dl_se);
|
||||||
}
|
}
|
||||||
|
@ -2440,31 +2431,12 @@ static struct task_struct *__pick_next_task_dl(struct rq *rq, bool peek)
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
static struct task_struct *pick_task_dl(struct rq *rq)
|
static struct task_struct *pick_task_dl(struct rq *rq)
|
||||||
{
|
{
|
||||||
return __pick_next_task_dl(rq, true);
|
return __pick_task_dl(rq);
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static struct task_struct *pick_next_task_dl(struct rq *rq)
|
|
||||||
{
|
|
||||||
struct task_struct *p;
|
|
||||||
|
|
||||||
p = __pick_next_task_dl(rq, false);
|
|
||||||
if (!p)
|
|
||||||
return p;
|
|
||||||
|
|
||||||
if (!p->dl_server)
|
|
||||||
set_next_task_dl(rq, p, true);
|
|
||||||
|
|
||||||
if (hrtick_enabled(rq))
|
|
||||||
start_hrtick_dl(rq, &p->dl);
|
|
||||||
|
|
||||||
return p;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
|
static void put_prev_task_dl(struct rq *rq, struct task_struct *p, struct task_struct *next)
|
||||||
{
|
{
|
||||||
struct sched_dl_entity *dl_se = &p->dl;
|
struct sched_dl_entity *dl_se = &p->dl;
|
||||||
struct dl_rq *dl_rq = &rq->dl;
|
struct dl_rq *dl_rq = &rq->dl;
|
||||||
|
@ -3156,13 +3128,12 @@ DEFINE_SCHED_CLASS(dl) = {
|
||||||
|
|
||||||
.wakeup_preempt = wakeup_preempt_dl,
|
.wakeup_preempt = wakeup_preempt_dl,
|
||||||
|
|
||||||
.pick_next_task = pick_next_task_dl,
|
.pick_task = pick_task_dl,
|
||||||
.put_prev_task = put_prev_task_dl,
|
.put_prev_task = put_prev_task_dl,
|
||||||
.set_next_task = set_next_task_dl,
|
.set_next_task = set_next_task_dl,
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
.balance = balance_dl,
|
.balance = balance_dl,
|
||||||
.pick_task = pick_task_dl,
|
|
||||||
.select_task_rq = select_task_rq_dl,
|
.select_task_rq = select_task_rq_dl,
|
||||||
.migrate_task_rq = migrate_task_rq_dl,
|
.migrate_task_rq = migrate_task_rq_dl,
|
||||||
.set_cpus_allowed = set_cpus_allowed_dl,
|
.set_cpus_allowed = set_cpus_allowed_dl,
|
||||||
|
|
|
@ -2719,7 +2719,8 @@ static void process_ddsp_deferred_locals(struct rq *rq)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void put_prev_task_scx(struct rq *rq, struct task_struct *p)
|
static void put_prev_task_scx(struct rq *rq, struct task_struct *p,
|
||||||
|
struct task_struct *next)
|
||||||
{
|
{
|
||||||
update_curr_scx(rq);
|
update_curr_scx(rq);
|
||||||
|
|
||||||
|
@ -2774,14 +2775,21 @@ static struct task_struct *first_local_task(struct rq *rq)
|
||||||
struct task_struct, scx.dsq_list.node);
|
struct task_struct, scx.dsq_list.node);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct task_struct *pick_next_task_scx(struct rq *rq)
|
static struct task_struct *pick_next_task_scx(struct rq *rq,
|
||||||
|
struct task_struct *prev)
|
||||||
{
|
{
|
||||||
struct task_struct *p;
|
struct task_struct *p;
|
||||||
|
|
||||||
|
if (prev->sched_class == &ext_sched_class)
|
||||||
|
put_prev_task_scx(rq, prev, NULL);
|
||||||
|
|
||||||
p = first_local_task(rq);
|
p = first_local_task(rq);
|
||||||
if (!p)
|
if (!p)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
if (prev->sched_class != &ext_sched_class)
|
||||||
|
prev->sched_class->put_prev_task(rq, prev, p);
|
||||||
|
|
||||||
set_next_task_scx(rq, p, true);
|
set_next_task_scx(rq, p, true);
|
||||||
|
|
||||||
if (unlikely(!p->scx.slice)) {
|
if (unlikely(!p->scx.slice)) {
|
||||||
|
|
|
@ -5457,6 +5457,13 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||||
|
|
||||||
static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
|
static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
|
||||||
|
|
||||||
|
static inline void finish_delayed_dequeue_entity(struct sched_entity *se)
|
||||||
|
{
|
||||||
|
se->sched_delayed = 0;
|
||||||
|
if (sched_feat(DELAY_ZERO) && se->vlag > 0)
|
||||||
|
se->vlag = 0;
|
||||||
|
}
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
|
dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
|
||||||
{
|
{
|
||||||
|
@ -5532,11 +5539,8 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
|
||||||
if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) != DEQUEUE_SAVE)
|
if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) != DEQUEUE_SAVE)
|
||||||
update_min_vruntime(cfs_rq);
|
update_min_vruntime(cfs_rq);
|
||||||
|
|
||||||
if (flags & DEQUEUE_DELAYED) {
|
if (flags & DEQUEUE_DELAYED)
|
||||||
se->sched_delayed = 0;
|
finish_delayed_dequeue_entity(se);
|
||||||
if (sched_feat(DELAY_ZERO) && se->vlag > 0)
|
|
||||||
se->vlag = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (cfs_rq->nr_running == 0)
|
if (cfs_rq->nr_running == 0)
|
||||||
update_idle_cfs_rq_clock_pelt(cfs_rq);
|
update_idle_cfs_rq_clock_pelt(cfs_rq);
|
||||||
|
@ -8746,17 +8750,12 @@ static struct task_struct *pick_task_fair(struct rq *rq)
|
||||||
cfs_rq = group_cfs_rq(se);
|
cfs_rq = group_cfs_rq(se);
|
||||||
} while (cfs_rq);
|
} while (cfs_rq);
|
||||||
|
|
||||||
/*
|
|
||||||
* This can be called from directly from CFS's ->pick_task() or indirectly
|
|
||||||
* from DL's ->pick_task when fair server is enabled. In the indirect case,
|
|
||||||
* DL will set ->dl_server just after this function is called, so its Ok to
|
|
||||||
* clear. In the direct case, we are picking directly so we must clear it.
|
|
||||||
*/
|
|
||||||
task_of(se)->dl_server = NULL;
|
|
||||||
|
|
||||||
return task_of(se);
|
return task_of(se);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void __set_next_task_fair(struct rq *rq, struct task_struct *p, bool first);
|
||||||
|
static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first);
|
||||||
|
|
||||||
struct task_struct *
|
struct task_struct *
|
||||||
pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
|
pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
|
||||||
{
|
{
|
||||||
|
@ -8771,9 +8770,11 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
|
||||||
se = &p->se;
|
se = &p->se;
|
||||||
|
|
||||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||||
if (!prev || prev->sched_class != &fair_sched_class)
|
if (prev->sched_class != &fair_sched_class)
|
||||||
goto simple;
|
goto simple;
|
||||||
|
|
||||||
|
__put_prev_set_next_dl_server(rq, prev, p);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Because of the set_next_buddy() in dequeue_task_fair() it is rather
|
* Because of the set_next_buddy() in dequeue_task_fair() it is rather
|
||||||
* likely that a next task is from the same cgroup as the current.
|
* likely that a next task is from the same cgroup as the current.
|
||||||
|
@ -8805,33 +8806,15 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
|
||||||
|
|
||||||
put_prev_entity(cfs_rq, pse);
|
put_prev_entity(cfs_rq, pse);
|
||||||
set_next_entity(cfs_rq, se);
|
set_next_entity(cfs_rq, se);
|
||||||
|
|
||||||
|
__set_next_task_fair(rq, p, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
goto done;
|
return p;
|
||||||
|
|
||||||
simple:
|
simple:
|
||||||
#endif
|
#endif
|
||||||
if (prev)
|
put_prev_set_next_task(rq, prev, p);
|
||||||
put_prev_task(rq, prev);
|
|
||||||
|
|
||||||
for_each_sched_entity(se)
|
|
||||||
set_next_entity(cfs_rq_of(se), se);
|
|
||||||
|
|
||||||
done: __maybe_unused;
|
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
/*
|
|
||||||
* Move the next running task to the front of
|
|
||||||
* the list, so our cfs_tasks list becomes MRU
|
|
||||||
* one.
|
|
||||||
*/
|
|
||||||
list_move(&p->se.group_node, &rq->cfs_tasks);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
if (hrtick_enabled_fair(rq))
|
|
||||||
hrtick_start_fair(rq, p);
|
|
||||||
|
|
||||||
update_misfit_status(p, rq);
|
|
||||||
sched_fair_update_stop_tick(rq, p);
|
|
||||||
|
|
||||||
return p;
|
return p;
|
||||||
|
|
||||||
idle:
|
idle:
|
||||||
|
@ -8860,9 +8843,9 @@ done: __maybe_unused;
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct task_struct *__pick_next_task_fair(struct rq *rq)
|
static struct task_struct *__pick_next_task_fair(struct rq *rq, struct task_struct *prev)
|
||||||
{
|
{
|
||||||
return pick_next_task_fair(rq, NULL, NULL);
|
return pick_next_task_fair(rq, prev, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool fair_server_has_tasks(struct sched_dl_entity *dl_se)
|
static bool fair_server_has_tasks(struct sched_dl_entity *dl_se)
|
||||||
|
@ -8872,16 +8855,7 @@ static bool fair_server_has_tasks(struct sched_dl_entity *dl_se)
|
||||||
|
|
||||||
static struct task_struct *fair_server_pick_task(struct sched_dl_entity *dl_se)
|
static struct task_struct *fair_server_pick_task(struct sched_dl_entity *dl_se)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
return pick_task_fair(dl_se->rq);
|
return pick_task_fair(dl_se->rq);
|
||||||
#else
|
|
||||||
return NULL;
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct task_struct *fair_server_pick_next(struct sched_dl_entity *dl_se)
|
|
||||||
{
|
|
||||||
return pick_next_task_fair(dl_se->rq, NULL, NULL);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void fair_server_init(struct rq *rq)
|
void fair_server_init(struct rq *rq)
|
||||||
|
@ -8890,15 +8864,13 @@ void fair_server_init(struct rq *rq)
|
||||||
|
|
||||||
init_dl_entity(dl_se);
|
init_dl_entity(dl_se);
|
||||||
|
|
||||||
dl_server_init(dl_se, rq, fair_server_has_tasks, fair_server_pick_next,
|
dl_server_init(dl_se, rq, fair_server_has_tasks, fair_server_pick_task);
|
||||||
fair_server_pick_task);
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Account for a descheduled task:
|
* Account for a descheduled task:
|
||||||
*/
|
*/
|
||||||
static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
|
static void put_prev_task_fair(struct rq *rq, struct task_struct *prev, struct task_struct *next)
|
||||||
{
|
{
|
||||||
struct sched_entity *se = &prev->se;
|
struct sched_entity *se = &prev->se;
|
||||||
struct cfs_rq *cfs_rq;
|
struct cfs_rq *cfs_rq;
|
||||||
|
@ -13098,11 +13070,16 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p)
|
||||||
* and we cannot use DEQUEUE_DELAYED.
|
* and we cannot use DEQUEUE_DELAYED.
|
||||||
*/
|
*/
|
||||||
if (p->se.sched_delayed) {
|
if (p->se.sched_delayed) {
|
||||||
|
/* First, dequeue it from its new class' structures */
|
||||||
dequeue_task(rq, p, DEQUEUE_NOCLOCK | DEQUEUE_SLEEP);
|
dequeue_task(rq, p, DEQUEUE_NOCLOCK | DEQUEUE_SLEEP);
|
||||||
p->se.sched_delayed = 0;
|
/*
|
||||||
|
* Now, clean up the fair_sched_class side of things
|
||||||
|
* related to sched_delayed being true and that wasn't done
|
||||||
|
* due to the generic dequeue not using DEQUEUE_DELAYED.
|
||||||
|
*/
|
||||||
|
finish_delayed_dequeue_entity(&p->se);
|
||||||
p->se.rel_deadline = 0;
|
p->se.rel_deadline = 0;
|
||||||
if (sched_feat(DELAY_ZERO) && p->se.vlag > 0)
|
__block_task(rq, p);
|
||||||
p->se.vlag = 0;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -13127,12 +13104,7 @@ static void switched_to_fair(struct rq *rq, struct task_struct *p)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Account for a task changing its policy or group.
|
static void __set_next_task_fair(struct rq *rq, struct task_struct *p, bool first)
|
||||||
*
|
|
||||||
* This routine is mostly called to set cfs_rq->curr field when a task
|
|
||||||
* migrates between groups/classes.
|
|
||||||
*/
|
|
||||||
static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first)
|
|
||||||
{
|
{
|
||||||
struct sched_entity *se = &p->se;
|
struct sched_entity *se = &p->se;
|
||||||
|
|
||||||
|
@ -13145,6 +13117,27 @@ static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first)
|
||||||
list_move(&se->group_node, &rq->cfs_tasks);
|
list_move(&se->group_node, &rq->cfs_tasks);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
if (!first)
|
||||||
|
return;
|
||||||
|
|
||||||
|
SCHED_WARN_ON(se->sched_delayed);
|
||||||
|
|
||||||
|
if (hrtick_enabled_fair(rq))
|
||||||
|
hrtick_start_fair(rq, p);
|
||||||
|
|
||||||
|
update_misfit_status(p, rq);
|
||||||
|
sched_fair_update_stop_tick(rq, p);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Account for a task changing its policy or group.
|
||||||
|
*
|
||||||
|
* This routine is mostly called to set cfs_rq->curr field when a task
|
||||||
|
* migrates between groups/classes.
|
||||||
|
*/
|
||||||
|
static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first)
|
||||||
|
{
|
||||||
|
struct sched_entity *se = &p->se;
|
||||||
|
|
||||||
for_each_sched_entity(se) {
|
for_each_sched_entity(se) {
|
||||||
struct cfs_rq *cfs_rq = cfs_rq_of(se);
|
struct cfs_rq *cfs_rq = cfs_rq_of(se);
|
||||||
|
@ -13154,10 +13147,7 @@ static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first)
|
||||||
account_cfs_rq_runtime(cfs_rq, 0);
|
account_cfs_rq_runtime(cfs_rq, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!first)
|
__set_next_task_fair(rq, p, first);
|
||||||
return;
|
|
||||||
|
|
||||||
SCHED_WARN_ON(se->sched_delayed);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void init_cfs_rq(struct cfs_rq *cfs_rq)
|
void init_cfs_rq(struct cfs_rq *cfs_rq)
|
||||||
|
@ -13483,13 +13473,13 @@ DEFINE_SCHED_CLASS(fair) = {
|
||||||
|
|
||||||
.wakeup_preempt = check_preempt_wakeup_fair,
|
.wakeup_preempt = check_preempt_wakeup_fair,
|
||||||
|
|
||||||
|
.pick_task = pick_task_fair,
|
||||||
.pick_next_task = __pick_next_task_fair,
|
.pick_next_task = __pick_next_task_fair,
|
||||||
.put_prev_task = put_prev_task_fair,
|
.put_prev_task = put_prev_task_fair,
|
||||||
.set_next_task = set_next_task_fair,
|
.set_next_task = set_next_task_fair,
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
.balance = balance_fair,
|
.balance = balance_fair,
|
||||||
.pick_task = pick_task_fair,
|
|
||||||
.select_task_rq = select_task_rq_fair,
|
.select_task_rq = select_task_rq_fair,
|
||||||
.migrate_task_rq = migrate_task_rq_fair,
|
.migrate_task_rq = migrate_task_rq_fair,
|
||||||
|
|
||||||
|
|
|
@ -450,7 +450,7 @@ static void wakeup_preempt_idle(struct rq *rq, struct task_struct *p, int flags)
|
||||||
resched_curr(rq);
|
resched_curr(rq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
|
static void put_prev_task_idle(struct rq *rq, struct task_struct *prev, struct task_struct *next)
|
||||||
{
|
{
|
||||||
dl_server_update_idle_time(rq, prev);
|
dl_server_update_idle_time(rq, prev);
|
||||||
scx_update_idle(rq, false);
|
scx_update_idle(rq, false);
|
||||||
|
@ -464,21 +464,10 @@ static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool fir
|
||||||
next->se.exec_start = rq_clock_task(rq);
|
next->se.exec_start = rq_clock_task(rq);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
struct task_struct *pick_task_idle(struct rq *rq)
|
||||||
static struct task_struct *pick_task_idle(struct rq *rq)
|
|
||||||
{
|
{
|
||||||
return rq->idle;
|
return rq->idle;
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
struct task_struct *pick_next_task_idle(struct rq *rq)
|
|
||||||
{
|
|
||||||
struct task_struct *next = rq->idle;
|
|
||||||
|
|
||||||
set_next_task_idle(rq, next, true);
|
|
||||||
|
|
||||||
return next;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* It is not legal to sleep in the idle task - print a warning
|
* It is not legal to sleep in the idle task - print a warning
|
||||||
|
@ -533,13 +522,12 @@ DEFINE_SCHED_CLASS(idle) = {
|
||||||
|
|
||||||
.wakeup_preempt = wakeup_preempt_idle,
|
.wakeup_preempt = wakeup_preempt_idle,
|
||||||
|
|
||||||
.pick_next_task = pick_next_task_idle,
|
.pick_task = pick_task_idle,
|
||||||
.put_prev_task = put_prev_task_idle,
|
.put_prev_task = put_prev_task_idle,
|
||||||
.set_next_task = set_next_task_idle,
|
.set_next_task = set_next_task_idle,
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
.balance = balance_idle,
|
.balance = balance_idle,
|
||||||
.pick_task = pick_task_idle,
|
|
||||||
.select_task_rq = select_task_rq_idle,
|
.select_task_rq = select_task_rq_idle,
|
||||||
.set_cpus_allowed = set_cpus_allowed_common,
|
.set_cpus_allowed = set_cpus_allowed_common,
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -1748,17 +1748,7 @@ static struct task_struct *pick_task_rt(struct rq *rq)
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct task_struct *pick_next_task_rt(struct rq *rq)
|
static void put_prev_task_rt(struct rq *rq, struct task_struct *p, struct task_struct *next)
|
||||||
{
|
|
||||||
struct task_struct *p = pick_task_rt(rq);
|
|
||||||
|
|
||||||
if (p)
|
|
||||||
set_next_task_rt(rq, p, true);
|
|
||||||
|
|
||||||
return p;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
|
|
||||||
{
|
{
|
||||||
struct sched_rt_entity *rt_se = &p->rt;
|
struct sched_rt_entity *rt_se = &p->rt;
|
||||||
struct rt_rq *rt_rq = &rq->rt;
|
struct rt_rq *rt_rq = &rq->rt;
|
||||||
|
@ -2645,13 +2635,12 @@ DEFINE_SCHED_CLASS(rt) = {
|
||||||
|
|
||||||
.wakeup_preempt = wakeup_preempt_rt,
|
.wakeup_preempt = wakeup_preempt_rt,
|
||||||
|
|
||||||
.pick_next_task = pick_next_task_rt,
|
.pick_task = pick_task_rt,
|
||||||
.put_prev_task = put_prev_task_rt,
|
.put_prev_task = put_prev_task_rt,
|
||||||
.set_next_task = set_next_task_rt,
|
.set_next_task = set_next_task_rt,
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
.balance = balance_rt,
|
.balance = balance_rt,
|
||||||
.pick_task = pick_task_rt,
|
|
||||||
.select_task_rq = select_task_rq_rt,
|
.select_task_rq = select_task_rq_rt,
|
||||||
.set_cpus_allowed = set_cpus_allowed_common,
|
.set_cpus_allowed = set_cpus_allowed_common,
|
||||||
.rq_online = rq_online_rt,
|
.rq_online = rq_online_rt,
|
||||||
|
|
|
@ -389,7 +389,6 @@ extern void dl_server_start(struct sched_dl_entity *dl_se);
|
||||||
extern void dl_server_stop(struct sched_dl_entity *dl_se);
|
extern void dl_server_stop(struct sched_dl_entity *dl_se);
|
||||||
extern void dl_server_init(struct sched_dl_entity *dl_se, struct rq *rq,
|
extern void dl_server_init(struct sched_dl_entity *dl_se, struct rq *rq,
|
||||||
dl_server_has_tasks_f has_tasks,
|
dl_server_has_tasks_f has_tasks,
|
||||||
dl_server_pick_f pick_next,
|
|
||||||
dl_server_pick_f pick_task);
|
dl_server_pick_f pick_task);
|
||||||
|
|
||||||
extern void dl_server_update_idle_time(struct rq *rq,
|
extern void dl_server_update_idle_time(struct rq *rq,
|
||||||
|
@ -1133,6 +1132,7 @@ struct rq {
|
||||||
unsigned int nr_uninterruptible;
|
unsigned int nr_uninterruptible;
|
||||||
|
|
||||||
struct task_struct __rcu *curr;
|
struct task_struct __rcu *curr;
|
||||||
|
struct sched_dl_entity *dl_server;
|
||||||
struct task_struct *idle;
|
struct task_struct *idle;
|
||||||
struct task_struct *stop;
|
struct task_struct *stop;
|
||||||
unsigned long next_balance;
|
unsigned long next_balance;
|
||||||
|
@ -1260,6 +1260,7 @@ struct rq {
|
||||||
/* per rq */
|
/* per rq */
|
||||||
struct rq *core;
|
struct rq *core;
|
||||||
struct task_struct *core_pick;
|
struct task_struct *core_pick;
|
||||||
|
struct sched_dl_entity *core_dl_server;
|
||||||
unsigned int core_enabled;
|
unsigned int core_enabled;
|
||||||
unsigned int core_sched_seq;
|
unsigned int core_sched_seq;
|
||||||
struct rb_root core_tree;
|
struct rb_root core_tree;
|
||||||
|
@ -2368,9 +2369,19 @@ struct sched_class {
|
||||||
void (*wakeup_preempt)(struct rq *rq, struct task_struct *p, int flags);
|
void (*wakeup_preempt)(struct rq *rq, struct task_struct *p, int flags);
|
||||||
|
|
||||||
int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
|
int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
|
||||||
struct task_struct *(*pick_next_task)(struct rq *rq);
|
struct task_struct *(*pick_task)(struct rq *rq);
|
||||||
|
/*
|
||||||
|
* Optional! When implemented pick_next_task() should be equivalent to:
|
||||||
|
*
|
||||||
|
* next = pick_task();
|
||||||
|
* if (next) {
|
||||||
|
* put_prev_task(prev);
|
||||||
|
* set_next_task_first(next);
|
||||||
|
* }
|
||||||
|
*/
|
||||||
|
struct task_struct *(*pick_next_task)(struct rq *rq, struct task_struct *prev);
|
||||||
|
|
||||||
void (*put_prev_task)(struct rq *rq, struct task_struct *p);
|
void (*put_prev_task)(struct rq *rq, struct task_struct *p, struct task_struct *next);
|
||||||
void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first);
|
void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first);
|
||||||
|
|
||||||
void (*switch_class)(struct rq *rq, struct task_struct *next);
|
void (*switch_class)(struct rq *rq, struct task_struct *next);
|
||||||
|
@ -2378,8 +2389,6 @@ struct sched_class {
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
int (*select_task_rq)(struct task_struct *p, int task_cpu, int flags);
|
int (*select_task_rq)(struct task_struct *p, int task_cpu, int flags);
|
||||||
|
|
||||||
struct task_struct * (*pick_task)(struct rq *rq);
|
|
||||||
|
|
||||||
void (*migrate_task_rq)(struct task_struct *p, int new_cpu);
|
void (*migrate_task_rq)(struct task_struct *p, int new_cpu);
|
||||||
|
|
||||||
void (*task_woken)(struct rq *this_rq, struct task_struct *task);
|
void (*task_woken)(struct rq *this_rq, struct task_struct *task);
|
||||||
|
@ -2426,7 +2435,7 @@ struct sched_class {
|
||||||
static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
|
static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
|
||||||
{
|
{
|
||||||
WARN_ON_ONCE(rq->curr != prev);
|
WARN_ON_ONCE(rq->curr != prev);
|
||||||
prev->sched_class->put_prev_task(rq, prev);
|
prev->sched_class->put_prev_task(rq, prev, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void set_next_task(struct rq *rq, struct task_struct *next)
|
static inline void set_next_task(struct rq *rq, struct task_struct *next)
|
||||||
|
@ -2434,6 +2443,30 @@ static inline void set_next_task(struct rq *rq, struct task_struct *next)
|
||||||
next->sched_class->set_next_task(rq, next, false);
|
next->sched_class->set_next_task(rq, next, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
__put_prev_set_next_dl_server(struct rq *rq,
|
||||||
|
struct task_struct *prev,
|
||||||
|
struct task_struct *next)
|
||||||
|
{
|
||||||
|
prev->dl_server = NULL;
|
||||||
|
next->dl_server = rq->dl_server;
|
||||||
|
rq->dl_server = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void put_prev_set_next_task(struct rq *rq,
|
||||||
|
struct task_struct *prev,
|
||||||
|
struct task_struct *next)
|
||||||
|
{
|
||||||
|
WARN_ON_ONCE(rq->curr != prev);
|
||||||
|
|
||||||
|
__put_prev_set_next_dl_server(rq, prev, next);
|
||||||
|
|
||||||
|
if (next == prev)
|
||||||
|
return;
|
||||||
|
|
||||||
|
prev->sched_class->put_prev_task(rq, prev, next);
|
||||||
|
next->sched_class->set_next_task(rq, next, true);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Helper to define a sched_class instance; each one is placed in a separate
|
* Helper to define a sched_class instance; each one is placed in a separate
|
||||||
|
@ -2524,7 +2557,7 @@ static inline bool sched_fair_runnable(struct rq *rq)
|
||||||
}
|
}
|
||||||
|
|
||||||
extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
|
extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
|
||||||
extern struct task_struct *pick_next_task_idle(struct rq *rq);
|
extern struct task_struct *pick_task_idle(struct rq *rq);
|
||||||
|
|
||||||
#define SCA_CHECK 0x01
|
#define SCA_CHECK 0x01
|
||||||
#define SCA_MIGRATE_DISABLE 0x02
|
#define SCA_MIGRATE_DISABLE 0x02
|
||||||
|
|
|
@ -41,16 +41,6 @@ static struct task_struct *pick_task_stop(struct rq *rq)
|
||||||
return rq->stop;
|
return rq->stop;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct task_struct *pick_next_task_stop(struct rq *rq)
|
|
||||||
{
|
|
||||||
struct task_struct *p = pick_task_stop(rq);
|
|
||||||
|
|
||||||
if (p)
|
|
||||||
set_next_task_stop(rq, p, true);
|
|
||||||
|
|
||||||
return p;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void
|
static void
|
||||||
enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags)
|
enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags)
|
||||||
{
|
{
|
||||||
|
@ -69,7 +59,7 @@ static void yield_task_stop(struct rq *rq)
|
||||||
BUG(); /* the stop task should never yield, its pointless. */
|
BUG(); /* the stop task should never yield, its pointless. */
|
||||||
}
|
}
|
||||||
|
|
||||||
static void put_prev_task_stop(struct rq *rq, struct task_struct *prev)
|
static void put_prev_task_stop(struct rq *rq, struct task_struct *prev, struct task_struct *next)
|
||||||
{
|
{
|
||||||
update_curr_common(rq);
|
update_curr_common(rq);
|
||||||
}
|
}
|
||||||
|
@ -112,13 +102,12 @@ DEFINE_SCHED_CLASS(stop) = {
|
||||||
|
|
||||||
.wakeup_preempt = wakeup_preempt_stop,
|
.wakeup_preempt = wakeup_preempt_stop,
|
||||||
|
|
||||||
.pick_next_task = pick_next_task_stop,
|
.pick_task = pick_task_stop,
|
||||||
.put_prev_task = put_prev_task_stop,
|
.put_prev_task = put_prev_task_stop,
|
||||||
.set_next_task = set_next_task_stop,
|
.set_next_task = set_next_task_stop,
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
.balance = balance_stop,
|
.balance = balance_stop,
|
||||||
.pick_task = pick_task_stop,
|
|
||||||
.select_task_rq = select_task_rq_stop,
|
.select_task_rq = select_task_rq_stop,
|
||||||
.set_cpus_allowed = set_cpus_allowed_common,
|
.set_cpus_allowed = set_cpus_allowed_common,
|
||||||
#endif
|
#endif
|
||||||
|
|
Loading…
Reference in a new issue