kasan: only define kasan_metadata_size for Generic mode

KASAN provides a helper for calculating the size of per-object metadata
stored in the redzone.

As now only the Generic mode uses per-object metadata, only define
kasan_metadata_size() for this mode.

Link: https://lkml.kernel.org/r/8f81d4938b80446bc72538a08217009f328a3e23.1662411799.git.andreyknvl@google.com
Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
Reviewed-by: Marco Elver <elver@google.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Evgenii Stepanov <eugenis@google.com>
Cc: Peter Collingbourne <pcc@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Andrey Konovalov 2022-09-05 23:05:29 +02:00 committed by Andrew Morton
parent 02856beb2d
commit f372bde922
3 changed files with 19 additions and 20 deletions

View file

@ -150,14 +150,6 @@ static __always_inline void kasan_cache_create_kmalloc(struct kmem_cache *cache)
__kasan_cache_create_kmalloc(cache);
}
size_t __kasan_metadata_size(struct kmem_cache *cache);
static __always_inline size_t kasan_metadata_size(struct kmem_cache *cache)
{
if (kasan_enabled())
return __kasan_metadata_size(cache);
return 0;
}
void __kasan_poison_slab(struct slab *slab);
static __always_inline void kasan_poison_slab(struct slab *slab)
{
@ -282,7 +274,6 @@ static inline void kasan_cache_create(struct kmem_cache *cache,
unsigned int *size,
slab_flags_t *flags) {}
static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {}
static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
static inline void kasan_poison_slab(struct slab *slab) {}
static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
void *object) {}
@ -333,6 +324,8 @@ static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
#ifdef CONFIG_KASAN_GENERIC
size_t kasan_metadata_size(struct kmem_cache *cache);
void kasan_cache_shrink(struct kmem_cache *cache);
void kasan_cache_shutdown(struct kmem_cache *cache);
void kasan_record_aux_stack(void *ptr);
@ -340,6 +333,12 @@ void kasan_record_aux_stack_noalloc(void *ptr);
#else /* CONFIG_KASAN_GENERIC */
/* Tag-based KASAN modes do not use per-object metadata. */
static inline size_t kasan_metadata_size(struct kmem_cache *cache)
{
return 0;
}
static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
static inline void kasan_record_aux_stack(void *ptr) {}

View file

@ -139,17 +139,6 @@ void __kasan_cache_create_kmalloc(struct kmem_cache *cache)
cache->kasan_info.is_kmalloc = true;
}
size_t __kasan_metadata_size(struct kmem_cache *cache)
{
if (!kasan_requires_meta())
return 0;
return (cache->kasan_info.alloc_meta_offset ?
sizeof(struct kasan_alloc_meta) : 0) +
((cache->kasan_info.free_meta_offset &&
cache->kasan_info.free_meta_offset != KASAN_NO_FREE_META) ?
sizeof(struct kasan_free_meta) : 0);
}
void __kasan_poison_slab(struct slab *slab)
{
struct page *page = slab_page(slab);

View file

@ -427,6 +427,17 @@ void kasan_init_object_meta(struct kmem_cache *cache, const void *object)
__memset(alloc_meta, 0, sizeof(*alloc_meta));
}
size_t kasan_metadata_size(struct kmem_cache *cache)
{
if (!kasan_requires_meta())
return 0;
return (cache->kasan_info.alloc_meta_offset ?
sizeof(struct kasan_alloc_meta) : 0) +
((cache->kasan_info.free_meta_offset &&
cache->kasan_info.free_meta_offset != KASAN_NO_FREE_META) ?
sizeof(struct kasan_free_meta) : 0);
}
static void __kasan_record_aux_stack(void *addr, bool can_alloc)
{
struct slab *slab = kasan_addr_to_slab(addr);