mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-11-01 17:08:10 +00:00
bbc61844b4
struct 'kasan_cache' has a member 'is_kmalloc' indicating whether its host kmem_cache is a kmalloc cache. With newly introduced is_kmalloc_cache() helper, 'is_kmalloc' and its related function can be replaced and removed. Also 'kasan_cache' is only needed by KASAN generic mode, and not by SW/HW tag modes, so refine its protection macro accordingly, suggested by Andrey Konoval. Link: https://lkml.kernel.org/r/20230104060605.930910-2-feng.tang@intel.com Signed-off-by: Feng Tang <feng.tang@intel.com> Reviewed-by: Andrey Konovalov <andreyknvl@gmail.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: David Rientjes <rientjes@google.com> Cc: Alexander Potapenko <glider@google.com> Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com> Cc: Christoph Lameter <cl@linux.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: Roman Gushchin <roman.gushchin@linux.dev> Cc: Vincenzo Frascino <vincenzo.frascino@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
124 lines
3 KiB
C
124 lines
3 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _LINUX_SLAB_DEF_H
|
|
#define _LINUX_SLAB_DEF_H
|
|
|
|
#include <linux/kfence.h>
|
|
#include <linux/reciprocal_div.h>
|
|
|
|
/*
|
|
* Definitions unique to the original Linux SLAB allocator.
|
|
*/
|
|
|
|
struct kmem_cache {
|
|
struct array_cache __percpu *cpu_cache;
|
|
|
|
/* 1) Cache tunables. Protected by slab_mutex */
|
|
unsigned int batchcount;
|
|
unsigned int limit;
|
|
unsigned int shared;
|
|
|
|
unsigned int size;
|
|
struct reciprocal_value reciprocal_buffer_size;
|
|
/* 2) touched by every alloc & free from the backend */
|
|
|
|
slab_flags_t flags; /* constant flags */
|
|
unsigned int num; /* # of objs per slab */
|
|
|
|
/* 3) cache_grow/shrink */
|
|
/* order of pgs per slab (2^n) */
|
|
unsigned int gfporder;
|
|
|
|
/* force GFP flags, e.g. GFP_DMA */
|
|
gfp_t allocflags;
|
|
|
|
size_t colour; /* cache colouring range */
|
|
unsigned int colour_off; /* colour offset */
|
|
unsigned int freelist_size;
|
|
|
|
/* constructor func */
|
|
void (*ctor)(void *obj);
|
|
|
|
/* 4) cache creation/removal */
|
|
const char *name;
|
|
struct list_head list;
|
|
int refcount;
|
|
int object_size;
|
|
int align;
|
|
|
|
/* 5) statistics */
|
|
#ifdef CONFIG_DEBUG_SLAB
|
|
unsigned long num_active;
|
|
unsigned long num_allocations;
|
|
unsigned long high_mark;
|
|
unsigned long grown;
|
|
unsigned long reaped;
|
|
unsigned long errors;
|
|
unsigned long max_freeable;
|
|
unsigned long node_allocs;
|
|
unsigned long node_frees;
|
|
unsigned long node_overflow;
|
|
atomic_t allochit;
|
|
atomic_t allocmiss;
|
|
atomic_t freehit;
|
|
atomic_t freemiss;
|
|
|
|
/*
|
|
* If debugging is enabled, then the allocator can add additional
|
|
* fields and/or padding to every object. 'size' contains the total
|
|
* object size including these internal fields, while 'obj_offset'
|
|
* and 'object_size' contain the offset to the user object and its
|
|
* size.
|
|
*/
|
|
int obj_offset;
|
|
#endif /* CONFIG_DEBUG_SLAB */
|
|
|
|
#ifdef CONFIG_KASAN_GENERIC
|
|
struct kasan_cache kasan_info;
|
|
#endif
|
|
|
|
#ifdef CONFIG_SLAB_FREELIST_RANDOM
|
|
unsigned int *random_seq;
|
|
#endif
|
|
|
|
#ifdef CONFIG_HARDENED_USERCOPY
|
|
unsigned int useroffset; /* Usercopy region offset */
|
|
unsigned int usersize; /* Usercopy region size */
|
|
#endif
|
|
|
|
struct kmem_cache_node *node[MAX_NUMNODES];
|
|
};
|
|
|
|
static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *slab,
|
|
void *x)
|
|
{
|
|
void *object = x - (x - slab->s_mem) % cache->size;
|
|
void *last_object = slab->s_mem + (cache->num - 1) * cache->size;
|
|
|
|
if (unlikely(object > last_object))
|
|
return last_object;
|
|
else
|
|
return object;
|
|
}
|
|
|
|
/*
|
|
* We want to avoid an expensive divide : (offset / cache->size)
|
|
* Using the fact that size is a constant for a particular cache,
|
|
* we can replace (offset / cache->size) by
|
|
* reciprocal_divide(offset, cache->reciprocal_buffer_size)
|
|
*/
|
|
static inline unsigned int obj_to_index(const struct kmem_cache *cache,
|
|
const struct slab *slab, void *obj)
|
|
{
|
|
u32 offset = (obj - slab->s_mem);
|
|
return reciprocal_divide(offset, cache->reciprocal_buffer_size);
|
|
}
|
|
|
|
static inline int objs_per_slab(const struct kmem_cache *cache,
|
|
const struct slab *slab)
|
|
{
|
|
if (is_kfence_address(slab_address(slab)))
|
|
return 1;
|
|
return cache->num;
|
|
}
|
|
|
|
#endif /* _LINUX_SLAB_DEF_H */
|