- Fix a NULL-ptr dereference when recalculating a sched entity's weight

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEzv7L6UO9uDPlPSfHEsHwGGHeVUoFAmII+xoACgkQEsHwGGHe
 VUqiEQ/+JI1pSIk5+arj5v7B1GaiqUg2UE0MNgFErPfMsMCWMT1/3R0P+ULzy/Dl
 Tv/mQnHznfSdntUVoJ2iYCayLFAJC7AnDVq00inHQI/vIvQnwjUuoE6AYDBJx5FG
 oLu9W1eUjXt6RVlhVA3+OY4PJlxoEXCNbai7cDYoHbOYwQDGfPPuNkdcvLsnPC8Z
 xIICP4+ncTgMw4unI0edqCVYYtuKv8GUklaFlNyPv/PNXYnf1mFvGQ8w4Zq2f3og
 ndVFDj3HMijYhpiBlcLQCvq1aT11ubmCaEcKufzwXWzQfEnMhHw4he/vobuuycSG
 i9vGUO8Qo+7sRCFL0CGI3UBchTWbzUe57Aj/rWijmKl4zFWkYcc7PBDEZetfU/rS
 CrYD83WzfS+DV7NRThIpdqG8fpqGcIp40ot724XZN05NEmX3oxPNT/UabYyBx/0h
 IGb63PeQndsfzFxVx5fl6GeiuMvqLHtgVbJV3hWE5Ea8zrOBWANAyTdRet486IL/
 JKkFb8VGtCjRzjfpAoWt81U6JL6bh0yR+tfLu4QNOK3ELhDxTVsS3JnsEUJjgZvk
 4tOJhuIR4ApGYiwKGTNo9J9GfppMUcwHOxwmtjFgG17MS5Iqam7XrHdnpInB8CCF
 WrcFlEg2EiufSOrEp4Ub9Dc/uqmxmYO/NNWZxXCvNgDOlO0sKHs=
 =a9P6
 -----END PGP SIGNATURE-----

Merge tag 'sched_urgent_for_v5.17_rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fix from Borislav Petkov:
 "Fix a NULL-ptr dereference when recalculating a sched entity's weight"

* tag 'sched_urgent_for_v5.17_rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched/fair: Fix fault in reweight_entity
This commit is contained in:
Linus Torvalds 2022-02-13 09:27:26 -08:00
commit 6f35736723
1 changed files with 6 additions and 5 deletions

View File

@ -1214,8 +1214,9 @@ int tg_nop(struct task_group *tg, void *data)
}
#endif
static void set_load_weight(struct task_struct *p, bool update_load)
static void set_load_weight(struct task_struct *p)
{
bool update_load = !(READ_ONCE(p->__state) & TASK_NEW);
int prio = p->static_prio - MAX_RT_PRIO;
struct load_weight *load = &p->se.load;
@ -4406,7 +4407,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
p->static_prio = NICE_TO_PRIO(0);
p->prio = p->normal_prio = p->static_prio;
set_load_weight(p, false);
set_load_weight(p);
/*
* We don't need the reset flag anymore after the fork. It has
@ -6921,7 +6922,7 @@ void set_user_nice(struct task_struct *p, long nice)
put_prev_task(rq, p);
p->static_prio = NICE_TO_PRIO(nice);
set_load_weight(p, true);
set_load_weight(p);
old_prio = p->prio;
p->prio = effective_prio(p);
@ -7212,7 +7213,7 @@ static void __setscheduler_params(struct task_struct *p,
*/
p->rt_priority = attr->sched_priority;
p->normal_prio = normal_prio(p);
set_load_weight(p, true);
set_load_weight(p);
}
/*
@ -9445,7 +9446,7 @@ void __init sched_init(void)
#endif
}
set_load_weight(&init_task, false);
set_load_weight(&init_task);
/*
* The boot idle thread does lazy MMU switching as well: