mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-11-01 17:08:10 +00:00
Add check of order < 0 before calling free_pages()
The function addresses that are traced by ftrace are stored in pages, and the size is held in a variable. If there's some error in creating them, the allocate ones will be freed. In this case, it is possible that the order of pages to be freed may end up being negative due to a size of zero passed to get_count_order(), and then that negative number will cause free_pages() to free a very large section. Make sure that does not happen. -----BEGIN PGP SIGNATURE----- iIoEABYIADIWIQRRSw7ePDh/lE+zeZMp5XQQmuv6qgUCYGR30BQccm9zdGVkdEBn b29kbWlzLm9yZwAKCRAp5XQQmuv6qnbDAP9yEhTLcDRUi3VLWnEq19Dt4Lsg86Bf QRpbWG6Ze9EbZQEAgYAOe1fsNCNEIMXXh/4nlKVpKKH+vviS0ux9Z6uhpQQ= =Veyq -----END PGP SIGNATURE----- Merge tag 'trace-v5.12-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace Pull ftrace fix from Steven Rostedt: "Add check of order < 0 before calling free_pages() The function addresses that are traced by ftrace are stored in pages, and the size is held in a variable. If there's some error in creating them, the allocate ones will be freed. In this case, it is possible that the order of pages to be freed may end up being negative due to a size of zero passed to get_count_order(), and then that negative number will cause free_pages() to free a very large section. Make sure that does not happen" * tag 'trace-v5.12-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: ftrace: Check if pages were allocated before calling free_pages()
This commit is contained in:
commit
d19cc4bfbf
1 changed files with 6 additions and 3 deletions
|
@ -3231,7 +3231,8 @@ ftrace_allocate_pages(unsigned long num_to_init)
|
||||||
pg = start_pg;
|
pg = start_pg;
|
||||||
while (pg) {
|
while (pg) {
|
||||||
order = get_count_order(pg->size / ENTRIES_PER_PAGE);
|
order = get_count_order(pg->size / ENTRIES_PER_PAGE);
|
||||||
free_pages((unsigned long)pg->records, order);
|
if (order >= 0)
|
||||||
|
free_pages((unsigned long)pg->records, order);
|
||||||
start_pg = pg->next;
|
start_pg = pg->next;
|
||||||
kfree(pg);
|
kfree(pg);
|
||||||
pg = start_pg;
|
pg = start_pg;
|
||||||
|
@ -6451,7 +6452,8 @@ void ftrace_release_mod(struct module *mod)
|
||||||
clear_mod_from_hashes(pg);
|
clear_mod_from_hashes(pg);
|
||||||
|
|
||||||
order = get_count_order(pg->size / ENTRIES_PER_PAGE);
|
order = get_count_order(pg->size / ENTRIES_PER_PAGE);
|
||||||
free_pages((unsigned long)pg->records, order);
|
if (order >= 0)
|
||||||
|
free_pages((unsigned long)pg->records, order);
|
||||||
tmp_page = pg->next;
|
tmp_page = pg->next;
|
||||||
kfree(pg);
|
kfree(pg);
|
||||||
ftrace_number_of_pages -= 1 << order;
|
ftrace_number_of_pages -= 1 << order;
|
||||||
|
@ -6811,7 +6813,8 @@ void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
|
||||||
if (!pg->index) {
|
if (!pg->index) {
|
||||||
*last_pg = pg->next;
|
*last_pg = pg->next;
|
||||||
order = get_count_order(pg->size / ENTRIES_PER_PAGE);
|
order = get_count_order(pg->size / ENTRIES_PER_PAGE);
|
||||||
free_pages((unsigned long)pg->records, order);
|
if (order >= 0)
|
||||||
|
free_pages((unsigned long)pg->records, order);
|
||||||
ftrace_number_of_pages -= 1 << order;
|
ftrace_number_of_pages -= 1 << order;
|
||||||
ftrace_number_of_groups--;
|
ftrace_number_of_groups--;
|
||||||
kfree(pg);
|
kfree(pg);
|
||||||
|
|
Loading…
Reference in a new issue