kasan: clean up __kasan_mempool_poison_object

Reorganize the code and reword the comment in
__kasan_mempool_poison_object to improve the code readability.

Link: https://lkml.kernel.org/r/4f6fc8840512286c1a96e16e86901082c671677d.1703024586.git.andreyknvl@google.com
Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
Cc: Alexander Lobakin <alobakin@pm.me>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Breno Leitao <leitao@debian.org>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Evgenii Stepanov <eugenis@google.com>
Cc: Marco Elver <elver@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Andrey Konovalov 2023-12-19 23:28:52 +01:00 committed by Andrew Morton
parent 9f41c59ae3
commit cf0da2afe3
1 changed files with 7 additions and 12 deletions

View File

@ -457,27 +457,22 @@ void __kasan_mempool_unpoison_pages(struct page *page, unsigned int order,
bool __kasan_mempool_poison_object(void *ptr, unsigned long ip)
{
struct folio *folio;
folio = virt_to_folio(ptr);
struct folio *folio = virt_to_folio(ptr);
struct slab *slab;
/*
* Even though this function is only called for kmem_cache_alloc and
* kmalloc backed mempool allocations, those allocations can still be
* !PageSlab() when the size provided to kmalloc is larger than
* KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc.
* This function can be called for large kmalloc allocation that get
* their memory from page_alloc. Thus, the folio might not be a slab.
*/
if (unlikely(!folio_test_slab(folio))) {
if (check_page_allocation(ptr, ip))
return false;
kasan_poison(ptr, folio_size(folio), KASAN_PAGE_FREE, false);
return true;
} else {
struct slab *slab = folio_slab(folio);
return !____kasan_slab_free(slab->slab_cache, ptr, ip,
false, false);
}
slab = folio_slab(folio);
return !____kasan_slab_free(slab->slab_cache, ptr, ip, false, false);
}
void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip)