Merge branch 'slab/for-5.18/trivial' into slab/for-linus

Trivial slab code changes:
- deleting unused parameters and flags
- using helper macros and functions
- making structures static
This commit is contained in:
Vlastimil Babka 2022-03-21 19:46:05 +01:00
commit acbfab16cc
4 changed files with 12 additions and 20 deletions

View file

@ -117,9 +117,6 @@
#define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U)
#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
/* Slab deactivation flag */
#define SLAB_DEACTIVATED ((slab_flags_t __force)0x10000000U)
/*
* ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
*

View file

@ -807,7 +807,7 @@ void __init setup_kmalloc_cache_index_table(void)
unsigned int i;
BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
(KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
!is_power_of_2(KMALLOC_MIN_SIZE));
for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
unsigned int elem = size_index_elem(i);

View file

@ -708,7 +708,7 @@ int __kmem_cache_shrink(struct kmem_cache *d)
return 0;
}
struct kmem_cache kmem_cache_boot = {
static struct kmem_cache kmem_cache_boot = {
.name = "kmem_cache",
.size = sizeof(struct kmem_cache),
.flags = SLAB_PANIC,

View file

@ -1788,8 +1788,8 @@ static void *setup_object(struct kmem_cache *s, struct slab *slab,
/*
* Slab allocation and freeing
*/
static inline struct slab *alloc_slab_page(struct kmem_cache *s,
gfp_t flags, int node, struct kmem_cache_order_objects oo)
static inline struct slab *alloc_slab_page(gfp_t flags, int node,
struct kmem_cache_order_objects oo)
{
struct folio *folio;
struct slab *slab;
@ -1941,7 +1941,7 @@ static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min))
alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~(__GFP_RECLAIM|__GFP_NOFAIL);
slab = alloc_slab_page(s, alloc_gfp, node, oo);
slab = alloc_slab_page(alloc_gfp, node, oo);
if (unlikely(!slab)) {
oo = s->min;
alloc_gfp = flags;
@ -1949,7 +1949,7 @@ static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
* Allocation may have failed due to fragmentation.
* Try a lower order alloc if possible
*/
slab = alloc_slab_page(s, alloc_gfp, node, oo);
slab = alloc_slab_page(alloc_gfp, node, oo);
if (unlikely(!slab))
goto out;
stat(s, ORDER_FALLBACK);
@ -4046,7 +4046,7 @@ static void set_cpu_partial(struct kmem_cache *s)
* calculate_sizes() determines the order and the distribution of data within
* a slab object.
*/
static int calculate_sizes(struct kmem_cache *s, int forced_order)
static int calculate_sizes(struct kmem_cache *s)
{
slab_flags_t flags = s->flags;
unsigned int size = s->object_size;
@ -4150,10 +4150,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
size = ALIGN(size, s->align);
s->size = size;
s->reciprocal_size = reciprocal_value(size);
if (forced_order >= 0)
order = forced_order;
else
order = calculate_order(size);
order = calculate_order(size);
if ((int)order < 0)
return 0;
@ -4189,7 +4186,7 @@ static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
s->random = get_random_long();
#endif
if (!calculate_sizes(s, -1))
if (!calculate_sizes(s))
goto error;
if (disable_higher_order_debug) {
/*
@ -4199,7 +4196,7 @@ static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
if (get_order(s->size) > get_order(s->object_size)) {
s->flags &= ~DEBUG_METADATA_FLAGS;
s->offset = 0;
if (!calculate_sizes(s, -1))
if (!calculate_sizes(s))
goto error;
}
}
@ -5344,12 +5341,10 @@ struct slab_attribute {
};
#define SLAB_ATTR_RO(_name) \
static struct slab_attribute _name##_attr = \
__ATTR(_name, 0400, _name##_show, NULL)
static struct slab_attribute _name##_attr = __ATTR_RO_MODE(_name, 0400)
#define SLAB_ATTR(_name) \
static struct slab_attribute _name##_attr = \
__ATTR(_name, 0600, _name##_show, _name##_store)
static struct slab_attribute _name##_attr = __ATTR_RW_MODE(_name, 0600)
static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
{