slub: make ->cpu_partial unsigned int

/*
	 * cpu_partial determined the maximum number of objects
	 * kept in the per cpu partial lists of a processor.
	 */

Can't be negative.

Link: http://lkml.kernel.org/r/20180305200730.15812-15-adobriyan@gmail.com
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Acked-by: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Alexey Dobriyan 2018-04-05 16:21:10 -07:00 committed by Linus Torvalds
parent 52ee6d74aa
commit e5d9998f3e
2 changed files with 5 additions and 4 deletions

View File

@ -88,7 +88,8 @@ struct kmem_cache {
int object_size; /* The size of an object without meta data */
int offset; /* Free pointer offset. */
#ifdef CONFIG_SLUB_CPU_PARTIAL
int cpu_partial; /* Number of per cpu partial objects to keep around */
/* Number of per cpu partial objects to keep around */
unsigned int cpu_partial;
#endif
struct kmem_cache_order_objects oo;

View File

@ -1812,7 +1812,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
{
struct page *page, *page2;
void *object = NULL;
int available = 0;
unsigned int available = 0;
int objects;
/*
@ -4962,10 +4962,10 @@ static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf)
static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
size_t length)
{
unsigned long objects;
unsigned int objects;
int err;
err = kstrtoul(buf, 10, &objects);
err = kstrtouint(buf, 10, &objects);
if (err)
return err;
if (objects && !kmem_cache_has_cpu_partial(s))