diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 89c67c2403fc..aefd68cec2e9 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -5891,11 +5891,42 @@ simeth= [IA-64] simscsi= - slram= [HW,MTD] + slab_debug[=options[,slabs][;[options[,slabs]]...] [MM] + Enabling slab_debug allows one to determine the + culprit if slab objects become corrupted. Enabling + slab_debug can create guard zones around objects and + may poison objects when not in use. Also tracks the + last alloc / free. For more information see + Documentation/mm/slub.rst. + (slub_debug legacy name also accepted for now) + + slab_max_order= [MM] + Determines the maximum allowed order for slabs. + A high setting may cause OOMs due to memory + fragmentation. For more information see + Documentation/mm/slub.rst. + (slub_max_order legacy name also accepted for now) slab_merge [MM] Enable merging of slabs with similar size when the kernel is built without CONFIG_SLAB_MERGE_DEFAULT. + (slub_merge legacy name also accepted for now) + + slab_min_objects= [MM] + The minimum number of objects per slab. SLUB will + increase the slab order up to slab_max_order to + generate a sufficiently large slab able to contain + the number of objects indicated. The higher the number + of objects the smaller the overhead of tracking slabs + and the less frequently locks need to be acquired. + For more information see Documentation/mm/slub.rst. + (slub_min_objects legacy name also accepted for now) + + slab_min_order= [MM] + Determines the minimum page order for slabs. Must be + lower or equal to slab_max_order. For more information see + Documentation/mm/slub.rst. + (slub_min_order legacy name also accepted for now) slab_nomerge [MM] Disable merging of slabs with similar size. May be @@ -5909,47 +5940,9 @@ unchanged). Debug options disable merging on their own. For more information see Documentation/mm/slub.rst. + (slub_nomerge legacy name also accepted for now) - slab_max_order= [MM, SLAB] - Determines the maximum allowed order for slabs. - A high setting may cause OOMs due to memory - fragmentation. Defaults to 1 for systems with - more than 32MB of RAM, 0 otherwise. - - slub_debug[=options[,slabs][;[options[,slabs]]...] [MM, SLUB] - Enabling slub_debug allows one to determine the - culprit if slab objects become corrupted. Enabling - slub_debug can create guard zones around objects and - may poison objects when not in use. Also tracks the - last alloc / free. For more information see - Documentation/mm/slub.rst. - - slub_max_order= [MM, SLUB] - Determines the maximum allowed order for slabs. - A high setting may cause OOMs due to memory - fragmentation. For more information see - Documentation/mm/slub.rst. - - slub_min_objects= [MM, SLUB] - The minimum number of objects per slab. SLUB will - increase the slab order up to slub_max_order to - generate a sufficiently large slab able to contain - the number of objects indicated. The higher the number - of objects the smaller the overhead of tracking slabs - and the less frequently locks need to be acquired. - For more information see Documentation/mm/slub.rst. - - slub_min_order= [MM, SLUB] - Determines the minimum page order for slabs. Must be - lower than slub_max_order. - For more information see Documentation/mm/slub.rst. - - slub_merge [MM, SLUB] - Same with slab_merge. - - slub_nomerge [MM, SLUB] - Same with slab_nomerge. This is supported for legacy. - See slab_nomerge for more information. + slram= [HW,MTD] smart2= [HW] Format: [,[,...,]] diff --git a/drivers/misc/lkdtm/heap.c b/drivers/misc/lkdtm/heap.c index 4f467d3972a6..b1b316f99703 100644 --- a/drivers/misc/lkdtm/heap.c +++ b/drivers/misc/lkdtm/heap.c @@ -48,7 +48,7 @@ static void lkdtm_VMALLOC_LINEAR_OVERFLOW(void) * correctly. * * This should get caught by either memory tagging, KASan, or by using - * CONFIG_SLUB_DEBUG=y and slub_debug=ZF (or CONFIG_SLUB_DEBUG_ON=y). + * CONFIG_SLUB_DEBUG=y and slab_debug=ZF (or CONFIG_SLUB_DEBUG_ON=y). */ static void lkdtm_SLAB_LINEAR_OVERFLOW(void) { diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug index 321ab379994f..afc72fde0f03 100644 --- a/mm/Kconfig.debug +++ b/mm/Kconfig.debug @@ -64,11 +64,11 @@ config SLUB_DEBUG_ON help Boot with debugging on by default. SLUB boots by default with the runtime debug capabilities switched off. Enabling this is - equivalent to specifying the "slub_debug" parameter on boot. + equivalent to specifying the "slab_debug" parameter on boot. There is no support for more fine grained debug control like - possible with slub_debug=xxx. SLUB debugging may be switched + possible with slab_debug=xxx. SLUB debugging may be switched off in a kernel built with CONFIG_SLUB_DEBUG_ON by specifying - "slub_debug=-". + "slab_debug=-". config PAGE_OWNER bool "Track page owner" diff --git a/mm/slab.h b/mm/slab.h index 54deeb0428c6..f7df6d701c5b 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -528,7 +528,7 @@ static inline bool __slub_debug_enabled(void) #endif /* - * Returns true if any of the specified slub_debug flags is enabled for the + * Returns true if any of the specified slab_debug flags is enabled for the * cache. Use only for flags parsed by setup_slub_debug() as it also enables * the static key. */ diff --git a/mm/slab_common.c b/mm/slab_common.c index 238293b1dbe1..230ef7cc3467 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -282,7 +282,7 @@ kmem_cache_create_usercopy(const char *name, #ifdef CONFIG_SLUB_DEBUG /* - * If no slub_debug was enabled globally, the static key is not yet + * If no slab_debug was enabled globally, the static key is not yet * enabled by setup_slub_debug(). Enable it if the cache is being * created with any of the debugging flags passed explicitly. * It's also possible that this is the first cache created with @@ -766,7 +766,7 @@ EXPORT_SYMBOL(kmalloc_size_roundup); } /* - * kmalloc_info[] is to make slub_debug=,kmalloc-xx option work at boot time. + * kmalloc_info[] is to make slab_debug=,kmalloc-xx option work at boot time. * kmalloc_index() supports up to 2^21=2MB, so the final entry of the table is * kmalloc-2M. */ diff --git a/mm/slub.c b/mm/slub.c index 2ef88bbf56a3..e66bc888d23b 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -295,7 +295,7 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s) /* * Debugging flags that require metadata to be stored in the slab. These get - * disabled when slub_debug=O is used and a cache's min order increases with + * disabled when slab_debug=O is used and a cache's min order increases with * metadata. */ #define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) @@ -1616,7 +1616,7 @@ static inline int free_consistency_checks(struct kmem_cache *s, } /* - * Parse a block of slub_debug options. Blocks are delimited by ';' + * Parse a block of slab_debug options. Blocks are delimited by ';' * * @str: start of block * @flags: returns parsed flags, or DEBUG_DEFAULT_FLAGS if none specified @@ -1677,7 +1677,7 @@ parse_slub_debug_flags(char *str, slab_flags_t *flags, char **slabs, bool init) break; default: if (init) - pr_err("slub_debug option '%c' unknown. skipped\n", *str); + pr_err("slab_debug option '%c' unknown. skipped\n", *str); } } check_slabs: @@ -1736,7 +1736,7 @@ static int __init setup_slub_debug(char *str) /* * For backwards compatibility, a single list of flags with list of * slabs means debugging is only changed for those slabs, so the global - * slub_debug should be unchanged (0 or DEBUG_DEFAULT_FLAGS, depending + * slab_debug should be unchanged (0 or DEBUG_DEFAULT_FLAGS, depending * on CONFIG_SLUB_DEBUG_ON). We can extended that to multiple lists as * long as there is no option specifying flags without a slab list. */ @@ -1760,7 +1760,8 @@ out: return 1; } -__setup("slub_debug", setup_slub_debug); +__setup("slab_debug", setup_slub_debug); +__setup_param("slub_debug", slub_debug, setup_slub_debug, 0); /* * kmem_cache_flags - apply debugging options to the cache @@ -1770,7 +1771,7 @@ __setup("slub_debug", setup_slub_debug); * * Debug option(s) are applied to @flags. In addition to the debug * option(s), if a slab name (or multiple) is specified i.e. - * slub_debug=,, ... + * slab_debug=,, ... * then only the select slabs will receive the debug option(s). */ slab_flags_t kmem_cache_flags(unsigned int object_size, @@ -3263,7 +3264,7 @@ slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) oo_order(s->min)); if (oo_order(s->min) > get_order(s->object_size)) - pr_warn(" %s debugging increased min order, use slub_debug=O to disable.\n", + pr_warn(" %s debugging increased min order, use slab_debug=O to disable.\n", s->name); for_each_kmem_cache_node(s, node, n) { @@ -3792,11 +3793,11 @@ void slab_post_alloc_hook(struct kmem_cache *s, struct obj_cgroup *objcg, zero_size = orig_size; /* - * When slub_debug is enabled, avoid memory initialization integrated + * When slab_debug is enabled, avoid memory initialization integrated * into KASAN and instead zero out the memory via the memset below with * the proper size. Otherwise, KASAN might overwrite SLUB redzones and * cause false-positive reports. This does not lead to a performance - * penalty on production builds, as slub_debug is not intended to be + * penalty on production builds, as slab_debug is not intended to be * enabled there. */ if (__slub_debug_enabled()) @@ -4702,8 +4703,8 @@ static unsigned int slub_min_objects; * activity on the partial lists which requires taking the list_lock. This is * less a concern for large slabs though which are rarely used. * - * slub_max_order specifies the order where we begin to stop considering the - * number of objects in a slab as critical. If we reach slub_max_order then + * slab_max_order specifies the order where we begin to stop considering the + * number of objects in a slab as critical. If we reach slab_max_order then * we try to keep the page order as low as possible. So we accept more waste * of space in favor of a small page order. * @@ -4770,14 +4771,14 @@ static inline int calculate_order(unsigned int size) * and backing off gradually. * * We start with accepting at most 1/16 waste and try to find the - * smallest order from min_objects-derived/slub_min_order up to - * slub_max_order that will satisfy the constraint. Note that increasing + * smallest order from min_objects-derived/slab_min_order up to + * slab_max_order that will satisfy the constraint. Note that increasing * the order can only result in same or less fractional waste, not more. * * If that fails, we increase the acceptable fraction of waste and try * again. The last iteration with fraction of 1/2 would effectively * accept any waste and give us the order determined by min_objects, as - * long as at least single object fits within slub_max_order. + * long as at least single object fits within slab_max_order. */ for (unsigned int fraction = 16; fraction > 1; fraction /= 2) { order = calc_slab_order(size, min_order, slub_max_order, @@ -4787,7 +4788,7 @@ static inline int calculate_order(unsigned int size) } /* - * Doh this slab cannot be placed using slub_max_order. + * Doh this slab cannot be placed using slab_max_order. */ order = get_order(size); if (order <= MAX_PAGE_ORDER) @@ -5313,7 +5314,9 @@ static int __init setup_slub_min_order(char *str) return 1; } -__setup("slub_min_order=", setup_slub_min_order); +__setup("slab_min_order=", setup_slub_min_order); +__setup_param("slub_min_order=", slub_min_order, setup_slub_min_order, 0); + static int __init setup_slub_max_order(char *str) { @@ -5326,7 +5329,8 @@ static int __init setup_slub_max_order(char *str) return 1; } -__setup("slub_max_order=", setup_slub_max_order); +__setup("slab_max_order=", setup_slub_max_order); +__setup_param("slub_max_order=", slub_max_order, setup_slub_max_order, 0); static int __init setup_slub_min_objects(char *str) { @@ -5335,7 +5339,8 @@ static int __init setup_slub_min_objects(char *str) return 1; } -__setup("slub_min_objects=", setup_slub_min_objects); +__setup("slab_min_objects=", setup_slub_min_objects); +__setup_param("slub_min_objects=", slub_min_objects, setup_slub_min_objects, 0); #ifdef CONFIG_HARDENED_USERCOPY /*