kasan: rename pagealloc tests

Rename "pagealloc" KASAN tests:

1. Use "kmalloc_large" for tests that use large kmalloc allocations.

2. Use "page_alloc" for tests that use page_alloc.

Also clean up the comments.

Link: https://lkml.kernel.org/r/f3eef6ddb87176c40958a3e5a0bd2386b52af4c6.1703024586.git.andreyknvl@google.com
Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
Cc: Alexander Lobakin <alobakin@pm.me>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Breno Leitao <leitao@debian.org>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Evgenii Stepanov <eugenis@google.com>
Cc: Marco Elver <elver@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Andrey Konovalov 2023-12-19 23:29:01 +01:00 committed by Andrew Morton
parent 0f199eb435
commit 0f18ea6ea4
1 changed files with 26 additions and 25 deletions

View File

@ -214,12 +214,13 @@ static void kmalloc_node_oob_right(struct kunit *test)
}
/*
* These kmalloc_pagealloc_* tests try allocating a memory chunk that doesn't
* fit into a slab cache and therefore is allocated via the page allocator
* fallback. Since this kind of fallback is only implemented for SLUB, these
* tests are limited to that allocator.
* The kmalloc_large_* tests below use kmalloc() to allocate a memory chunk
* that does not fit into the largest slab cache and therefore is allocated via
* the page_alloc fallback for SLUB. SLAB has no such fallback, and thus these
* tests are not supported for it.
*/
static void kmalloc_pagealloc_oob_right(struct kunit *test)
static void kmalloc_large_oob_right(struct kunit *test)
{
char *ptr;
size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
@ -235,7 +236,7 @@ static void kmalloc_pagealloc_oob_right(struct kunit *test)
kfree(ptr);
}
static void kmalloc_pagealloc_uaf(struct kunit *test)
static void kmalloc_large_uaf(struct kunit *test)
{
char *ptr;
size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
@ -249,7 +250,7 @@ static void kmalloc_pagealloc_uaf(struct kunit *test)
KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
}
static void kmalloc_pagealloc_invalid_free(struct kunit *test)
static void kmalloc_large_invalid_free(struct kunit *test)
{
char *ptr;
size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
@ -262,7 +263,7 @@ static void kmalloc_pagealloc_invalid_free(struct kunit *test)
KUNIT_EXPECT_KASAN_FAIL(test, kfree(ptr + 1));
}
static void pagealloc_oob_right(struct kunit *test)
static void page_alloc_oob_right(struct kunit *test)
{
char *ptr;
struct page *pages;
@ -284,7 +285,7 @@ static void pagealloc_oob_right(struct kunit *test)
free_pages((unsigned long)ptr, order);
}
static void pagealloc_uaf(struct kunit *test)
static void page_alloc_uaf(struct kunit *test)
{
char *ptr;
struct page *pages;
@ -298,15 +299,15 @@ static void pagealloc_uaf(struct kunit *test)
KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
}
static void kmalloc_large_oob_right(struct kunit *test)
/*
* Check that KASAN detects an out-of-bounds access for a big object allocated
* via kmalloc(). But not as big as to trigger the page_alloc fallback for SLUB.
*/
static void kmalloc_big_oob_right(struct kunit *test)
{
char *ptr;
size_t size = KMALLOC_MAX_CACHE_SIZE - 256;
/*
* Allocate a chunk that is large enough, but still fits into a slab
* and does not trigger the page allocator fallback in SLUB.
*/
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
@ -404,18 +405,18 @@ static void krealloc_less_oob(struct kunit *test)
krealloc_less_oob_helper(test, 235, 201);
}
static void krealloc_pagealloc_more_oob(struct kunit *test)
static void krealloc_large_more_oob(struct kunit *test)
{
/* page_alloc fallback in only implemented for SLUB. */
/* page_alloc fallback is only implemented for SLUB. */
KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
krealloc_more_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 201,
KMALLOC_MAX_CACHE_SIZE + 235);
}
static void krealloc_pagealloc_less_oob(struct kunit *test)
static void krealloc_large_less_oob(struct kunit *test)
{
/* page_alloc fallback in only implemented for SLUB. */
/* page_alloc fallback is only implemented for SLUB. */
KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
krealloc_less_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 235,
@ -1828,16 +1829,16 @@ static struct kunit_case kasan_kunit_test_cases[] = {
KUNIT_CASE(kmalloc_oob_right),
KUNIT_CASE(kmalloc_oob_left),
KUNIT_CASE(kmalloc_node_oob_right),
KUNIT_CASE(kmalloc_pagealloc_oob_right),
KUNIT_CASE(kmalloc_pagealloc_uaf),
KUNIT_CASE(kmalloc_pagealloc_invalid_free),
KUNIT_CASE(pagealloc_oob_right),
KUNIT_CASE(pagealloc_uaf),
KUNIT_CASE(kmalloc_large_oob_right),
KUNIT_CASE(kmalloc_large_uaf),
KUNIT_CASE(kmalloc_large_invalid_free),
KUNIT_CASE(page_alloc_oob_right),
KUNIT_CASE(page_alloc_uaf),
KUNIT_CASE(kmalloc_big_oob_right),
KUNIT_CASE(krealloc_more_oob),
KUNIT_CASE(krealloc_less_oob),
KUNIT_CASE(krealloc_pagealloc_more_oob),
KUNIT_CASE(krealloc_pagealloc_less_oob),
KUNIT_CASE(krealloc_large_more_oob),
KUNIT_CASE(krealloc_large_less_oob),
KUNIT_CASE(krealloc_uaf),
KUNIT_CASE(kmalloc_oob_16),
KUNIT_CASE(kmalloc_uaf_16),