mm, treewide: redefine MAX_ORDER sanely

MAX_ORDER currently defined as number of orders page allocator supports:
user can ask buddy allocator for page order between 0 and MAX_ORDER-1.

This definition is counter-intuitive and lead to number of bugs all over
the kernel.

Change the definition of MAX_ORDER to be inclusive: the range of orders
user can ask from buddy allocator is 0..MAX_ORDER now.

[kirill@shutemov.name: fix min() warning]
  Link: https://lkml.kernel.org/r/20230315153800.32wib3n5rickolvh@box
[akpm@linux-foundation.org: fix another min_t warning]
[kirill@shutemov.name: fixups per Zi Yan]
  Link: https://lkml.kernel.org/r/20230316232144.b7ic4cif4kjiabws@box.shutemov.name
[akpm@linux-foundation.org: fix underlining in docs]
  Link: https://lore.kernel.org/oe-kbuild-all/202303191025.VRCTk6mP-lkp@intel.com/
Link: https://lkml.kernel.org/r/20230315113133.11326-11-kirill.shutemov@linux.intel.com
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Reviewed-by: Michael Ellerman <mpe@ellerman.id.au>	[powerpc]
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Kirill A. Shutemov 2023-03-15 14:31:33 +03:00 committed by Andrew Morton
parent 61883d3c32
commit 23baf831a3
84 changed files with 223 additions and 253 deletions

View File

@ -172,7 +172,7 @@ variables.
Offset of the free_list's member. This value is used to compute the number Offset of the free_list's member. This value is used to compute the number
of free pages. of free pages.
Each zone has a free_area structure array called free_area[MAX_ORDER]. Each zone has a free_area structure array called free_area[MAX_ORDER + 1].
The free_list represents a linked list of free page blocks. The free_list represents a linked list of free page blocks.
(list_head, next|prev) (list_head, next|prev)
@ -189,8 +189,8 @@ Offsets of the vmap_area's members. They carry vmalloc-specific
information. Makedumpfile gets the start address of the vmalloc region information. Makedumpfile gets the start address of the vmalloc region
from this. from this.
(zone.free_area, MAX_ORDER) (zone.free_area, MAX_ORDER + 1)
--------------------------- -------------------------------
Free areas descriptor. User-space tools use this value to iterate the Free areas descriptor. User-space tools use this value to iterate the
free_area ranges. MAX_ORDER is used by the zone buddy allocator. free_area ranges. MAX_ORDER is used by the zone buddy allocator.

View File

@ -3969,7 +3969,7 @@
[KNL] Minimal page reporting order [KNL] Minimal page reporting order
Format: <integer> Format: <integer>
Adjust the minimal page reporting order. The page Adjust the minimal page reporting order. The page
reporting is disabled when it exceeds (MAX_ORDER-1). reporting is disabled when it exceeds MAX_ORDER.
panic= [KNL] Kernel behaviour on panic: delay <timeout> panic= [KNL] Kernel behaviour on panic: delay <timeout>
timeout > 0: seconds before rebooting timeout > 0: seconds before rebooting

View File

@ -556,7 +556,7 @@ endmenu # "ARC Architecture Configuration"
config ARCH_FORCE_MAX_ORDER config ARCH_FORCE_MAX_ORDER
int "Maximum zone order" int "Maximum zone order"
default "12" if ARC_HUGEPAGE_16M default "11" if ARC_HUGEPAGE_16M
default "11" default "10"
source "kernel/power/Kconfig" source "kernel/power/Kconfig"

View File

@ -1355,9 +1355,9 @@ config ARM_MODULE_PLTS
config ARCH_FORCE_MAX_ORDER config ARCH_FORCE_MAX_ORDER
int "Maximum zone order" int "Maximum zone order"
default "12" if SOC_AM33XX default "11" if SOC_AM33XX
default "9" if SA1111 default "8" if SA1111
default "11" default "10"
help help
The kernel memory allocator divides physically contiguous memory The kernel memory allocator divides physically contiguous memory
blocks into "zones", where each zone is a power of two number of blocks into "zones", where each zone is a power of two number of
@ -1366,9 +1366,6 @@ config ARCH_FORCE_MAX_ORDER
blocks of physically contiguous memory, then you may need to blocks of physically contiguous memory, then you may need to
increase this value. increase this value.
This config option is actually maximum order plus one. For example,
a value of 11 means that the largest free memory block is 2^10 pages.
config ALIGNMENT_TRAP config ALIGNMENT_TRAP
def_bool CPU_CP15_MMU def_bool CPU_CP15_MMU
select HAVE_PROC_CPU if PROC_FS select HAVE_PROC_CPU if PROC_FS

View File

@ -31,7 +31,7 @@ CONFIG_SOC_VF610=y
CONFIG_SMP=y CONFIG_SMP=y
CONFIG_ARM_PSCI=y CONFIG_ARM_PSCI=y
CONFIG_HIGHMEM=y CONFIG_HIGHMEM=y
CONFIG_ARCH_FORCE_MAX_ORDER=14 CONFIG_ARCH_FORCE_MAX_ORDER=13
CONFIG_CMDLINE="noinitrd console=ttymxc0,115200" CONFIG_CMDLINE="noinitrd console=ttymxc0,115200"
CONFIG_KEXEC=y CONFIG_KEXEC=y
CONFIG_CPU_FREQ=y CONFIG_CPU_FREQ=y

View File

@ -26,7 +26,7 @@ CONFIG_THUMB2_KERNEL=y
# CONFIG_THUMB2_AVOID_R_ARM_THM_JUMP11 is not set # CONFIG_THUMB2_AVOID_R_ARM_THM_JUMP11 is not set
# CONFIG_ARM_PATCH_IDIV is not set # CONFIG_ARM_PATCH_IDIV is not set
CONFIG_HIGHMEM=y CONFIG_HIGHMEM=y
CONFIG_ARCH_FORCE_MAX_ORDER=12 CONFIG_ARCH_FORCE_MAX_ORDER=11
CONFIG_SECCOMP=y CONFIG_SECCOMP=y
CONFIG_KEXEC=y CONFIG_KEXEC=y
CONFIG_EFI=y CONFIG_EFI=y

View File

@ -12,7 +12,7 @@ CONFIG_ARCH_OXNAS=y
CONFIG_MACH_OX820=y CONFIG_MACH_OX820=y
CONFIG_SMP=y CONFIG_SMP=y
CONFIG_NR_CPUS=16 CONFIG_NR_CPUS=16
CONFIG_ARCH_FORCE_MAX_ORDER=12 CONFIG_ARCH_FORCE_MAX_ORDER=11
CONFIG_SECCOMP=y CONFIG_SECCOMP=y
CONFIG_ARM_APPENDED_DTB=y CONFIG_ARM_APPENDED_DTB=y
CONFIG_ARM_ATAG_DTB_COMPAT=y CONFIG_ARM_ATAG_DTB_COMPAT=y

View File

@ -20,7 +20,7 @@ CONFIG_PXA_SHARPSL=y
CONFIG_MACH_AKITA=y CONFIG_MACH_AKITA=y
CONFIG_MACH_BORZOI=y CONFIG_MACH_BORZOI=y
CONFIG_AEABI=y CONFIG_AEABI=y
CONFIG_ARCH_FORCE_MAX_ORDER=9 CONFIG_ARCH_FORCE_MAX_ORDER=8
CONFIG_CMDLINE="root=/dev/ram0 ro" CONFIG_CMDLINE="root=/dev/ram0 ro"
CONFIG_KEXEC=y CONFIG_KEXEC=y
CONFIG_CPU_FREQ=y CONFIG_CPU_FREQ=y

View File

@ -19,7 +19,7 @@ CONFIG_ATMEL_CLOCKSOURCE_TCB=y
# CONFIG_CACHE_L2X0 is not set # CONFIG_CACHE_L2X0 is not set
# CONFIG_ARM_PATCH_IDIV is not set # CONFIG_ARM_PATCH_IDIV is not set
# CONFIG_CPU_SW_DOMAIN_PAN is not set # CONFIG_CPU_SW_DOMAIN_PAN is not set
CONFIG_ARCH_FORCE_MAX_ORDER=15 CONFIG_ARCH_FORCE_MAX_ORDER=14
CONFIG_UACCESS_WITH_MEMCPY=y CONFIG_UACCESS_WITH_MEMCPY=y
# CONFIG_ATAGS is not set # CONFIG_ATAGS is not set
CONFIG_CMDLINE="console=ttyS0,115200 earlyprintk ignore_loglevel" CONFIG_CMDLINE="console=ttyS0,115200 earlyprintk ignore_loglevel"

View File

@ -17,7 +17,7 @@ CONFIG_ARCH_SUNPLUS=y
# CONFIG_VDSO is not set # CONFIG_VDSO is not set
CONFIG_SMP=y CONFIG_SMP=y
CONFIG_THUMB2_KERNEL=y CONFIG_THUMB2_KERNEL=y
CONFIG_ARCH_FORCE_MAX_ORDER=12 CONFIG_ARCH_FORCE_MAX_ORDER=11
CONFIG_VFP=y CONFIG_VFP=y
CONFIG_NEON=y CONFIG_NEON=y
CONFIG_MODULES=y CONFIG_MODULES=y

View File

@ -1476,22 +1476,22 @@ config XEN
# include/linux/mmzone.h requires the following to be true: # include/linux/mmzone.h requires the following to be true:
# #
# MAX_ORDER - 1 + PAGE_SHIFT <= SECTION_SIZE_BITS # MAX_ORDER + PAGE_SHIFT <= SECTION_SIZE_BITS
# #
# so the maximum value of MAX_ORDER is SECTION_SIZE_BITS + 1 - PAGE_SHIFT: # so the maximum value of MAX_ORDER is SECTION_SIZE_BITS - PAGE_SHIFT:
# #
# | SECTION_SIZE_BITS | PAGE_SHIFT | max MAX_ORDER | default MAX_ORDER | # | SECTION_SIZE_BITS | PAGE_SHIFT | max MAX_ORDER | default MAX_ORDER |
# ----+-------------------+--------------+-----------------+--------------------+ # ----+-------------------+--------------+-----------------+--------------------+
# 4K | 27 | 12 | 16 | 11 | # 4K | 27 | 12 | 15 | 10 |
# 16K | 27 | 14 | 14 | 12 | # 16K | 27 | 14 | 13 | 11 |
# 64K | 29 | 16 | 14 | 14 | # 64K | 29 | 16 | 13 | 13 |
config ARCH_FORCE_MAX_ORDER config ARCH_FORCE_MAX_ORDER
int "Maximum zone order" if ARM64_4K_PAGES || ARM64_16K_PAGES int "Maximum zone order" if ARM64_4K_PAGES || ARM64_16K_PAGES
default "14" if ARM64_64K_PAGES default "13" if ARM64_64K_PAGES
range 12 14 if ARM64_16K_PAGES range 11 13 if ARM64_16K_PAGES
default "12" if ARM64_16K_PAGES default "11" if ARM64_16K_PAGES
range 11 16 if ARM64_4K_PAGES range 10 15 if ARM64_4K_PAGES
default "11" default "10"
help help
The kernel memory allocator divides physically contiguous memory The kernel memory allocator divides physically contiguous memory
blocks into "zones", where each zone is a power of two number of blocks into "zones", where each zone is a power of two number of
@ -1500,14 +1500,11 @@ config ARCH_FORCE_MAX_ORDER
blocks of physically contiguous memory, then you may need to blocks of physically contiguous memory, then you may need to
increase this value. increase this value.
This config option is actually maximum order plus one. For example,
a value of 11 means that the largest free memory block is 2^10 pages.
We make sure that we can allocate up to a HugePage size for each configuration. We make sure that we can allocate up to a HugePage size for each configuration.
Hence we have : Hence we have :
MAX_ORDER = (PMD_SHIFT - PAGE_SHIFT) + 1 => PAGE_SHIFT - 2 MAX_ORDER = PMD_SHIFT - PAGE_SHIFT => PAGE_SHIFT - 3
However for 4K, we choose a higher default value, 11 as opposed to 10, giving us However for 4K, we choose a higher default value, 10 as opposed to 9, giving us
4M allocations matching the default size used by generic code. 4M allocations matching the default size used by generic code.
config UNMAP_KERNEL_AT_EL0 config UNMAP_KERNEL_AT_EL0

View File

@ -10,7 +10,7 @@
/* /*
* Section size must be at least 512MB for 64K base * Section size must be at least 512MB for 64K base
* page size config. Otherwise it will be less than * page size config. Otherwise it will be less than
* (MAX_ORDER - 1) and the build process will fail. * MAX_ORDER and the build process will fail.
*/ */
#ifdef CONFIG_ARM64_64K_PAGES #ifdef CONFIG_ARM64_64K_PAGES
#define SECTION_SIZE_BITS 29 #define SECTION_SIZE_BITS 29

View File

@ -16,7 +16,7 @@ struct hyp_pool {
* API at EL2. * API at EL2.
*/ */
hyp_spinlock_t lock; hyp_spinlock_t lock;
struct list_head free_area[MAX_ORDER]; struct list_head free_area[MAX_ORDER + 1];
phys_addr_t range_start; phys_addr_t range_start;
phys_addr_t range_end; phys_addr_t range_end;
unsigned short max_order; unsigned short max_order;

View File

@ -110,7 +110,7 @@ static void __hyp_attach_page(struct hyp_pool *pool,
* after coalescing, so make sure to mark it HYP_NO_ORDER proactively. * after coalescing, so make sure to mark it HYP_NO_ORDER proactively.
*/ */
p->order = HYP_NO_ORDER; p->order = HYP_NO_ORDER;
for (; (order + 1) < pool->max_order; order++) { for (; (order + 1) <= pool->max_order; order++) {
buddy = __find_buddy_avail(pool, p, order); buddy = __find_buddy_avail(pool, p, order);
if (!buddy) if (!buddy)
break; break;
@ -203,9 +203,9 @@ void *hyp_alloc_pages(struct hyp_pool *pool, unsigned short order)
hyp_spin_lock(&pool->lock); hyp_spin_lock(&pool->lock);
/* Look for a high-enough-order page */ /* Look for a high-enough-order page */
while (i < pool->max_order && list_empty(&pool->free_area[i])) while (i <= pool->max_order && list_empty(&pool->free_area[i]))
i++; i++;
if (i >= pool->max_order) { if (i > pool->max_order) {
hyp_spin_unlock(&pool->lock); hyp_spin_unlock(&pool->lock);
return NULL; return NULL;
} }
@ -228,8 +228,8 @@ int hyp_pool_init(struct hyp_pool *pool, u64 pfn, unsigned int nr_pages,
int i; int i;
hyp_spin_lock_init(&pool->lock); hyp_spin_lock_init(&pool->lock);
pool->max_order = min(MAX_ORDER, get_order((nr_pages + 1) << PAGE_SHIFT)); pool->max_order = min(MAX_ORDER, get_order(nr_pages << PAGE_SHIFT));
for (i = 0; i < pool->max_order; i++) for (i = 0; i <= pool->max_order; i++)
INIT_LIST_HEAD(&pool->free_area[i]); INIT_LIST_HEAD(&pool->free_area[i]);
pool->range_start = phys; pool->range_start = phys;
pool->range_end = phys + (nr_pages << PAGE_SHIFT); pool->range_end = phys + (nr_pages << PAGE_SHIFT);

View File

@ -334,7 +334,7 @@ config HIGHMEM
config ARCH_FORCE_MAX_ORDER config ARCH_FORCE_MAX_ORDER
int "Maximum zone order" int "Maximum zone order"
default "11" default "10"
config DRAM_BASE config DRAM_BASE
hex "DRAM start addr (the same with memory-section in dts)" hex "DRAM start addr (the same with memory-section in dts)"

View File

@ -202,10 +202,10 @@ config IA64_CYCLONE
If you're unsure, answer N. If you're unsure, answer N.
config ARCH_FORCE_MAX_ORDER config ARCH_FORCE_MAX_ORDER
int "MAX_ORDER (11 - 17)" if !HUGETLB_PAGE int "MAX_ORDER (10 - 16)" if !HUGETLB_PAGE
range 11 17 if !HUGETLB_PAGE range 10 16 if !HUGETLB_PAGE
default "17" if HUGETLB_PAGE default "16" if HUGETLB_PAGE
default "11" default "10"
config SMP config SMP
bool "Symmetric multi-processing support" bool "Symmetric multi-processing support"

View File

@ -12,9 +12,9 @@
#define SECTION_SIZE_BITS (30) #define SECTION_SIZE_BITS (30)
#define MAX_PHYSMEM_BITS (50) #define MAX_PHYSMEM_BITS (50)
#ifdef CONFIG_ARCH_FORCE_MAX_ORDER #ifdef CONFIG_ARCH_FORCE_MAX_ORDER
#if ((CONFIG_ARCH_FORCE_MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS) #if (CONFIG_ARCH_FORCE_MAX_ORDER + PAGE_SHIFT > SECTION_SIZE_BITS)
#undef SECTION_SIZE_BITS #undef SECTION_SIZE_BITS
#define SECTION_SIZE_BITS (CONFIG_ARCH_FORCE_MAX_ORDER - 1 + PAGE_SHIFT) #define SECTION_SIZE_BITS (CONFIG_ARCH_FORCE_MAX_ORDER + PAGE_SHIFT)
#endif #endif
#endif #endif

View File

@ -170,7 +170,7 @@ static int __init hugetlb_setup_sz(char *str)
size = memparse(str, &str); size = memparse(str, &str);
if (*str || !is_power_of_2(size) || !(tr_pages & size) || if (*str || !is_power_of_2(size) || !(tr_pages & size) ||
size <= PAGE_SIZE || size <= PAGE_SIZE ||
size >= (1UL << PAGE_SHIFT << MAX_ORDER)) { size > (1UL << PAGE_SHIFT << MAX_ORDER)) {
printk(KERN_WARNING "Invalid huge page size specified\n"); printk(KERN_WARNING "Invalid huge page size specified\n");
return 1; return 1;
} }

View File

@ -420,12 +420,12 @@ config NODES_SHIFT
config ARCH_FORCE_MAX_ORDER config ARCH_FORCE_MAX_ORDER
int "Maximum zone order" int "Maximum zone order"
range 14 64 if PAGE_SIZE_64KB range 13 63 if PAGE_SIZE_64KB
default "14" if PAGE_SIZE_64KB default "13" if PAGE_SIZE_64KB
range 12 64 if PAGE_SIZE_16KB range 11 63 if PAGE_SIZE_16KB
default "12" if PAGE_SIZE_16KB default "11" if PAGE_SIZE_16KB
range 11 64 range 10 63
default "11" default "10"
help help
The kernel memory allocator divides physically contiguous memory The kernel memory allocator divides physically contiguous memory
blocks into "zones", where each zone is a power of two number of blocks into "zones", where each zone is a power of two number of
@ -434,9 +434,6 @@ config ARCH_FORCE_MAX_ORDER
blocks of physically contiguous memory, then you may need to blocks of physically contiguous memory, then you may need to
increase this value. increase this value.
This config option is actually maximum order plus one. For example,
a value of 11 means that the largest free memory block is 2^10 pages.
The page size is not necessarily 4KB. Keep this in mind The page size is not necessarily 4KB. Keep this in mind
when choosing a value for this option. when choosing a value for this option.

View File

@ -400,7 +400,7 @@ config SINGLE_MEMORY_CHUNK
config ARCH_FORCE_MAX_ORDER config ARCH_FORCE_MAX_ORDER
int "Maximum zone order" if ADVANCED int "Maximum zone order" if ADVANCED
depends on !SINGLE_MEMORY_CHUNK depends on !SINGLE_MEMORY_CHUNK
default "11" default "10"
help help
The kernel memory allocator divides physically contiguous memory The kernel memory allocator divides physically contiguous memory
blocks into "zones", where each zone is a power of two number of blocks into "zones", where each zone is a power of two number of
@ -413,9 +413,6 @@ config ARCH_FORCE_MAX_ORDER
value also defines the minimal size of the hole that allows value also defines the minimal size of the hole that allows
freeing unused memory map. freeing unused memory map.
This config option is actually maximum order plus one. For example,
a value of 11 means that the largest free memory block is 2^10 pages.
config 060_WRITETHROUGH config 060_WRITETHROUGH
bool "Use write-through caching for 68060 supervisor accesses" bool "Use write-through caching for 68060 supervisor accesses"
depends on ADVANCED && M68060 depends on ADVANCED && M68060

View File

@ -2137,14 +2137,14 @@ endchoice
config ARCH_FORCE_MAX_ORDER config ARCH_FORCE_MAX_ORDER
int "Maximum zone order" int "Maximum zone order"
range 14 64 if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_64KB range 13 63 if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_64KB
default "14" if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_64KB default "13" if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_64KB
range 13 64 if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_32KB range 12 63 if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_32KB
default "13" if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_32KB default "12" if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_32KB
range 12 64 if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_16KB range 11 63 if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_16KB
default "12" if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_16KB default "11" if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_16KB
range 0 64 range 0 63
default "11" default "10"
help help
The kernel memory allocator divides physically contiguous memory The kernel memory allocator divides physically contiguous memory
blocks into "zones", where each zone is a power of two number of blocks into "zones", where each zone is a power of two number of
@ -2153,9 +2153,6 @@ config ARCH_FORCE_MAX_ORDER
blocks of physically contiguous memory, then you may need to blocks of physically contiguous memory, then you may need to
increase this value. increase this value.
This config option is actually maximum order plus one. For example,
a value of 11 means that the largest free memory block is 2^10 pages.
The page size is not necessarily 4KB. Keep this in mind The page size is not necessarily 4KB. Keep this in mind
when choosing a value for this option. when choosing a value for this option.

View File

@ -46,8 +46,8 @@ source "kernel/Kconfig.hz"
config ARCH_FORCE_MAX_ORDER config ARCH_FORCE_MAX_ORDER
int "Maximum zone order" int "Maximum zone order"
range 9 20 range 8 19
default "11" default "10"
help help
The kernel memory allocator divides physically contiguous memory The kernel memory allocator divides physically contiguous memory
blocks into "zones", where each zone is a power of two number of blocks into "zones", where each zone is a power of two number of
@ -56,9 +56,6 @@ config ARCH_FORCE_MAX_ORDER
blocks of physically contiguous memory, then you may need to blocks of physically contiguous memory, then you may need to
increase this value. increase this value.
This config option is actually maximum order plus one. For example,
a value of 11 means that the largest free memory block is 2^10 pages.
endmenu endmenu
source "arch/nios2/platform/Kconfig.platform" source "arch/nios2/platform/Kconfig.platform"

View File

@ -897,18 +897,18 @@ config DATA_SHIFT
config ARCH_FORCE_MAX_ORDER config ARCH_FORCE_MAX_ORDER
int "Maximum zone order" int "Maximum zone order"
range 8 9 if PPC64 && PPC_64K_PAGES range 7 8 if PPC64 && PPC_64K_PAGES
default "9" if PPC64 && PPC_64K_PAGES default "8" if PPC64 && PPC_64K_PAGES
range 13 13 if PPC64 && !PPC_64K_PAGES range 12 12 if PPC64 && !PPC_64K_PAGES
default "13" if PPC64 && !PPC_64K_PAGES default "12" if PPC64 && !PPC_64K_PAGES
range 9 64 if PPC32 && PPC_16K_PAGES range 8 63 if PPC32 && PPC_16K_PAGES
default "9" if PPC32 && PPC_16K_PAGES default "8" if PPC32 && PPC_16K_PAGES
range 7 64 if PPC32 && PPC_64K_PAGES range 6 63 if PPC32 && PPC_64K_PAGES
default "7" if PPC32 && PPC_64K_PAGES default "6" if PPC32 && PPC_64K_PAGES
range 5 64 if PPC32 && PPC_256K_PAGES range 4 63 if PPC32 && PPC_256K_PAGES
default "5" if PPC32 && PPC_256K_PAGES default "4" if PPC32 && PPC_256K_PAGES
range 11 64 range 10 63
default "11" default "10"
help help
The kernel memory allocator divides physically contiguous memory The kernel memory allocator divides physically contiguous memory
blocks into "zones", where each zone is a power of two number of blocks into "zones", where each zone is a power of two number of
@ -917,9 +917,6 @@ config ARCH_FORCE_MAX_ORDER
blocks of physically contiguous memory, then you may need to blocks of physically contiguous memory, then you may need to
increase this value. increase this value.
This config option is actually maximum order plus one. For example,
a value of 11 means that the largest free memory block is 2^10 pages.
The page size is not necessarily 4KB. For example, on 64-bit The page size is not necessarily 4KB. For example, on 64-bit
systems, 64KB pages can be enabled via CONFIG_PPC_64K_PAGES. Keep systems, 64KB pages can be enabled via CONFIG_PPC_64K_PAGES. Keep
this in mind when choosing a value for this option. this in mind when choosing a value for this option.

View File

@ -30,7 +30,7 @@ CONFIG_PREEMPT=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_MISC=m CONFIG_BINFMT_MISC=m
CONFIG_MATH_EMULATION=y CONFIG_MATH_EMULATION=y
CONFIG_ARCH_FORCE_MAX_ORDER=17 CONFIG_ARCH_FORCE_MAX_ORDER=16
CONFIG_PCI=y CONFIG_PCI=y
CONFIG_PCIEPORTBUS=y CONFIG_PCIEPORTBUS=y
CONFIG_PCI_MSI=y CONFIG_PCI_MSI=y

View File

@ -41,7 +41,7 @@ CONFIG_FIXED_PHY=y
CONFIG_FONT_8x16=y CONFIG_FONT_8x16=y
CONFIG_FONT_8x8=y CONFIG_FONT_8x8=y
CONFIG_FONTS=y CONFIG_FONTS=y
CONFIG_ARCH_FORCE_MAX_ORDER=13 CONFIG_ARCH_FORCE_MAX_ORDER=12
CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_FRAME_WARN=1024 CONFIG_FRAME_WARN=1024
CONFIG_FTL=y CONFIG_FTL=y

View File

@ -97,7 +97,7 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
} }
mmap_read_lock(mm); mmap_read_lock(mm);
chunk = (1UL << (PAGE_SHIFT + MAX_ORDER - 1)) / chunk = (1UL << (PAGE_SHIFT + MAX_ORDER)) /
sizeof(struct vm_area_struct *); sizeof(struct vm_area_struct *);
chunk = min(chunk, entries); chunk = min(chunk, entries);
for (entry = 0; entry < entries; entry += chunk) { for (entry = 0; entry < entries; entry += chunk) {

View File

@ -615,7 +615,7 @@ void __init gigantic_hugetlb_cma_reserve(void)
order = mmu_psize_to_shift(MMU_PAGE_16G) - PAGE_SHIFT; order = mmu_psize_to_shift(MMU_PAGE_16G) - PAGE_SHIFT;
if (order) { if (order) {
VM_WARN_ON(order < MAX_ORDER); VM_WARN_ON(order <= MAX_ORDER);
hugetlb_cma_reserve(order); hugetlb_cma_reserve(order);
} }
} }

View File

@ -1740,7 +1740,7 @@ static long pnv_pci_ioda2_setup_default_config(struct pnv_ioda_pe *pe)
* DMA window can be larger than available memory, which will * DMA window can be larger than available memory, which will
* cause errors later. * cause errors later.
*/ */
const u64 maxblock = 1UL << (PAGE_SHIFT + MAX_ORDER - 1); const u64 maxblock = 1UL << (PAGE_SHIFT + MAX_ORDER);
/* /*
* We create the default window as big as we can. The constraint is * We create the default window as big as we can. The constraint is

View File

@ -8,7 +8,7 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set # CONFIG_BLK_DEV_BSG is not set
CONFIG_CPU_SUBTYPE_SH7724=y CONFIG_CPU_SUBTYPE_SH7724=y
CONFIG_ARCH_FORCE_MAX_ORDER=12 CONFIG_ARCH_FORCE_MAX_ORDER=11
CONFIG_MEMORY_SIZE=0x10000000 CONFIG_MEMORY_SIZE=0x10000000
CONFIG_FLATMEM_MANUAL=y CONFIG_FLATMEM_MANUAL=y
CONFIG_SH_ECOVEC=y CONFIG_SH_ECOVEC=y

View File

@ -20,13 +20,13 @@ config PAGE_OFFSET
config ARCH_FORCE_MAX_ORDER config ARCH_FORCE_MAX_ORDER
int "Maximum zone order" int "Maximum zone order"
range 9 64 if PAGE_SIZE_16KB range 8 63 if PAGE_SIZE_16KB
default "9" if PAGE_SIZE_16KB default "8" if PAGE_SIZE_16KB
range 7 64 if PAGE_SIZE_64KB range 6 63 if PAGE_SIZE_64KB
default "7" if PAGE_SIZE_64KB default "6" if PAGE_SIZE_64KB
range 11 64 range 10 63
default "14" if !MMU default "13" if !MMU
default "11" default "10"
help help
The kernel memory allocator divides physically contiguous memory The kernel memory allocator divides physically contiguous memory
blocks into "zones", where each zone is a power of two number of blocks into "zones", where each zone is a power of two number of
@ -35,9 +35,6 @@ config ARCH_FORCE_MAX_ORDER
blocks of physically contiguous memory, then you may need to blocks of physically contiguous memory, then you may need to
increase this value. increase this value.
This config option is actually maximum order plus one. For example,
a value of 11 means that the largest free memory block is 2^10 pages.
The page size is not necessarily 4KB. Keep this in mind when The page size is not necessarily 4KB. Keep this in mind when
choosing a value for this option. choosing a value for this option.

View File

@ -271,7 +271,7 @@ config ARCH_SPARSEMEM_DEFAULT
config ARCH_FORCE_MAX_ORDER config ARCH_FORCE_MAX_ORDER
int "Maximum zone order" int "Maximum zone order"
default "13" default "12"
help help
The kernel memory allocator divides physically contiguous memory The kernel memory allocator divides physically contiguous memory
blocks into "zones", where each zone is a power of two number of blocks into "zones", where each zone is a power of two number of
@ -280,9 +280,6 @@ config ARCH_FORCE_MAX_ORDER
blocks of physically contiguous memory, then you may need to blocks of physically contiguous memory, then you may need to
increase this value. increase this value.
This config option is actually maximum order plus one. For example,
a value of 13 means that the largest free memory block is 2^12 pages.
if SPARC64 || COMPILE_TEST if SPARC64 || COMPILE_TEST
source "kernel/power/Kconfig" source "kernel/power/Kconfig"
endif endif

View File

@ -193,7 +193,7 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
size = IO_PAGE_ALIGN(size); size = IO_PAGE_ALIGN(size);
order = get_order(size); order = get_order(size);
if (unlikely(order >= MAX_ORDER)) if (unlikely(order > MAX_ORDER))
return NULL; return NULL;
npages = size >> IO_PAGE_SHIFT; npages = size >> IO_PAGE_SHIFT;

View File

@ -897,7 +897,7 @@ void __init cheetah_ecache_flush_init(void)
/* Now allocate error trap reporting scoreboard. */ /* Now allocate error trap reporting scoreboard. */
sz = NR_CPUS * (2 * sizeof(struct cheetah_err_info)); sz = NR_CPUS * (2 * sizeof(struct cheetah_err_info));
for (order = 0; order < MAX_ORDER; order++) { for (order = 0; order <= MAX_ORDER; order++) {
if ((PAGE_SIZE << order) >= sz) if ((PAGE_SIZE << order) >= sz)
break; break;
} }

View File

@ -402,8 +402,8 @@ void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long rss)
unsigned long new_rss_limit; unsigned long new_rss_limit;
gfp_t gfp_flags; gfp_t gfp_flags;
if (max_tsb_size > (PAGE_SIZE << (MAX_ORDER - 1))) if (max_tsb_size > PAGE_SIZE << MAX_ORDER)
max_tsb_size = (PAGE_SIZE << (MAX_ORDER - 1)); max_tsb_size = PAGE_SIZE << MAX_ORDER;
new_cache_index = 0; new_cache_index = 0;
for (new_size = 8192; new_size < max_tsb_size; new_size <<= 1UL) { for (new_size = 8192; new_size < max_tsb_size; new_size <<= 1UL) {

View File

@ -368,10 +368,10 @@ int __init linux_main(int argc, char **argv)
max_physmem = TASK_SIZE - uml_physmem - iomem_size - MIN_VMALLOC; max_physmem = TASK_SIZE - uml_physmem - iomem_size - MIN_VMALLOC;
/* /*
* Zones have to begin on a 1 << MAX_ORDER-1 page boundary, * Zones have to begin on a 1 << MAX_ORDER page boundary,
* so this makes sure that's true for highmem * so this makes sure that's true for highmem
*/ */
max_physmem &= ~((1 << (PAGE_SHIFT + MAX_ORDER - 1)) - 1); max_physmem &= ~((1 << (PAGE_SHIFT + MAX_ORDER)) - 1);
if (physmem_size + iomem_size > max_physmem) { if (physmem_size + iomem_size > max_physmem) {
highmem = physmem_size + iomem_size - max_physmem; highmem = physmem_size + iomem_size - max_physmem;
physmem_size -= highmem; physmem_size -= highmem;

View File

@ -773,7 +773,7 @@ config HIGHMEM
config ARCH_FORCE_MAX_ORDER config ARCH_FORCE_MAX_ORDER
int "Maximum zone order" int "Maximum zone order"
default "11" default "10"
help help
The kernel memory allocator divides physically contiguous memory The kernel memory allocator divides physically contiguous memory
blocks into "zones", where each zone is a power of two number of blocks into "zones", where each zone is a power of two number of
@ -782,9 +782,6 @@ config ARCH_FORCE_MAX_ORDER
blocks of physically contiguous memory, then you may need to blocks of physically contiguous memory, then you may need to
increase this value. increase this value.
This config option is actually maximum order plus one. For example,
a value of 11 means that the largest free memory block is 2^10 pages.
endmenu endmenu
menu "Power management options" menu "Power management options"

View File

@ -226,8 +226,8 @@ static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from,
if (*ppos < 0 || !count) if (*ppos < 0 || !count)
return -EINVAL; return -EINVAL;
if (count > (PAGE_SIZE << (MAX_ORDER - 1))) if (count > (PAGE_SIZE << MAX_ORDER))
count = PAGE_SIZE << (MAX_ORDER - 1); count = PAGE_SIZE << MAX_ORDER;
buf = kmalloc(count, GFP_KERNEL); buf = kmalloc(count, GFP_KERNEL);
if (!buf) if (!buf)
@ -373,8 +373,8 @@ static ssize_t regmap_reg_ranges_read_file(struct file *file,
if (*ppos < 0 || !count) if (*ppos < 0 || !count)
return -EINVAL; return -EINVAL;
if (count > (PAGE_SIZE << (MAX_ORDER - 1))) if (count > (PAGE_SIZE << MAX_ORDER))
count = PAGE_SIZE << (MAX_ORDER - 1); count = PAGE_SIZE << MAX_ORDER;
buf = kmalloc(count, GFP_KERNEL); buf = kmalloc(count, GFP_KERNEL);
if (!buf) if (!buf)

View File

@ -3079,7 +3079,7 @@ static void raw_cmd_free(struct floppy_raw_cmd **ptr)
} }
} }
#define MAX_LEN (1UL << (MAX_ORDER - 1) << PAGE_SHIFT) #define MAX_LEN (1UL << MAX_ORDER << PAGE_SHIFT)
static int raw_cmd_copyin(int cmd, void __user *param, static int raw_cmd_copyin(int cmd, void __user *param,
struct floppy_raw_cmd **rcmd) struct floppy_raw_cmd **rcmd)

View File

@ -886,7 +886,7 @@ static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp)
/* /*
* The length of the ID shouldn't be assumed by software since * The length of the ID shouldn't be assumed by software since
* it may change in the future. The allocation size is limited * it may change in the future. The allocation size is limited
* to 1 << (PAGE_SHIFT + MAX_ORDER - 1) by the page allocator. * to 1 << (PAGE_SHIFT + MAX_ORDER) by the page allocator.
* If the allocation fails, simply return ENOMEM rather than * If the allocation fails, simply return ENOMEM rather than
* warning in the kernel log. * warning in the kernel log.
*/ */

View File

@ -70,11 +70,11 @@ struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev,
HISI_ACC_SGL_ALIGN_SIZE); HISI_ACC_SGL_ALIGN_SIZE);
/* /*
* the pool may allocate a block of memory of size PAGE_SIZE * 2^(MAX_ORDER - 1), * the pool may allocate a block of memory of size PAGE_SIZE * 2^MAX_ORDER,
* block size may exceed 2^31 on ia64, so the max of block size is 2^31 * block size may exceed 2^31 on ia64, so the max of block size is 2^31
*/ */
block_size = 1 << (PAGE_SHIFT + MAX_ORDER <= 32 ? block_size = 1 << (PAGE_SHIFT + MAX_ORDER < 32 ?
PAGE_SHIFT + MAX_ORDER - 1 : 31); PAGE_SHIFT + MAX_ORDER : 31);
sgl_num_per_block = block_size / sgl_size; sgl_num_per_block = block_size / sgl_size;
block_num = count / sgl_num_per_block; block_num = count / sgl_num_per_block;
remain_sgl = count % sgl_num_per_block; remain_sgl = count % sgl_num_per_block;

View File

@ -36,7 +36,7 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
struct sg_table *st; struct sg_table *st;
struct scatterlist *sg; struct scatterlist *sg;
unsigned int npages; /* restricted by sg_alloc_table */ unsigned int npages; /* restricted by sg_alloc_table */
int max_order = MAX_ORDER - 1; int max_order = MAX_ORDER;
unsigned int max_segment; unsigned int max_segment;
gfp_t gfp; gfp_t gfp;

View File

@ -115,7 +115,7 @@ static int get_huge_pages(struct drm_i915_gem_object *obj)
do { do {
struct page *page; struct page *page;
GEM_BUG_ON(order >= MAX_ORDER); GEM_BUG_ON(order > MAX_ORDER);
page = alloc_pages(GFP | __GFP_ZERO, order); page = alloc_pages(GFP | __GFP_ZERO, order);
if (!page) if (!page)
goto err; goto err;

View File

@ -65,11 +65,11 @@ module_param(page_pool_size, ulong, 0644);
static atomic_long_t allocated_pages; static atomic_long_t allocated_pages;
static struct ttm_pool_type global_write_combined[MAX_ORDER]; static struct ttm_pool_type global_write_combined[MAX_ORDER + 1];
static struct ttm_pool_type global_uncached[MAX_ORDER]; static struct ttm_pool_type global_uncached[MAX_ORDER + 1];
static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER]; static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER + 1];
static struct ttm_pool_type global_dma32_uncached[MAX_ORDER]; static struct ttm_pool_type global_dma32_uncached[MAX_ORDER + 1];
static spinlock_t shrinker_lock; static spinlock_t shrinker_lock;
static struct list_head shrinker_list; static struct list_head shrinker_list;
@ -405,7 +405,7 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
else else
gfp_flags |= GFP_HIGHUSER; gfp_flags |= GFP_HIGHUSER;
for (order = min_t(unsigned int, MAX_ORDER - 1, __fls(num_pages)); for (order = min_t(unsigned int, MAX_ORDER, __fls(num_pages));
num_pages; num_pages;
order = min_t(unsigned int, order, __fls(num_pages))) { order = min_t(unsigned int, order, __fls(num_pages))) {
struct ttm_pool_type *pt; struct ttm_pool_type *pt;
@ -542,7 +542,7 @@ void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
if (use_dma_alloc) { if (use_dma_alloc) {
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
for (j = 0; j < MAX_ORDER; ++j) for (j = 0; j <= MAX_ORDER; ++j)
ttm_pool_type_init(&pool->caching[i].orders[j], ttm_pool_type_init(&pool->caching[i].orders[j],
pool, i, j); pool, i, j);
} }
@ -562,7 +562,7 @@ void ttm_pool_fini(struct ttm_pool *pool)
if (pool->use_dma_alloc) { if (pool->use_dma_alloc) {
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
for (j = 0; j < MAX_ORDER; ++j) for (j = 0; j <= MAX_ORDER; ++j)
ttm_pool_type_fini(&pool->caching[i].orders[j]); ttm_pool_type_fini(&pool->caching[i].orders[j]);
} }
@ -616,7 +616,7 @@ static void ttm_pool_debugfs_header(struct seq_file *m)
unsigned int i; unsigned int i;
seq_puts(m, "\t "); seq_puts(m, "\t ");
for (i = 0; i < MAX_ORDER; ++i) for (i = 0; i <= MAX_ORDER; ++i)
seq_printf(m, " ---%2u---", i); seq_printf(m, " ---%2u---", i);
seq_puts(m, "\n"); seq_puts(m, "\n");
} }
@ -627,7 +627,7 @@ static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt,
{ {
unsigned int i; unsigned int i;
for (i = 0; i < MAX_ORDER; ++i) for (i = 0; i <= MAX_ORDER; ++i)
seq_printf(m, " %8u", ttm_pool_type_count(&pt[i])); seq_printf(m, " %8u", ttm_pool_type_count(&pt[i]));
seq_puts(m, "\n"); seq_puts(m, "\n");
} }
@ -736,7 +736,7 @@ int ttm_pool_mgr_init(unsigned long num_pages)
spin_lock_init(&shrinker_lock); spin_lock_init(&shrinker_lock);
INIT_LIST_HEAD(&shrinker_list); INIT_LIST_HEAD(&shrinker_list);
for (i = 0; i < MAX_ORDER; ++i) { for (i = 0; i <= MAX_ORDER; ++i) {
ttm_pool_type_init(&global_write_combined[i], NULL, ttm_pool_type_init(&global_write_combined[i], NULL,
ttm_write_combined, i); ttm_write_combined, i);
ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i); ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i);
@ -769,7 +769,7 @@ void ttm_pool_mgr_fini(void)
{ {
unsigned int i; unsigned int i;
for (i = 0; i < MAX_ORDER; ++i) { for (i = 0; i <= MAX_ORDER; ++i) {
ttm_pool_type_fini(&global_write_combined[i]); ttm_pool_type_fini(&global_write_combined[i]);
ttm_pool_type_fini(&global_uncached[i]); ttm_pool_type_fini(&global_uncached[i]);

View File

@ -182,7 +182,7 @@
#ifdef CONFIG_CMA_ALIGNMENT #ifdef CONFIG_CMA_ALIGNMENT
#define Q_MAX_SZ_SHIFT (PAGE_SHIFT + CONFIG_CMA_ALIGNMENT) #define Q_MAX_SZ_SHIFT (PAGE_SHIFT + CONFIG_CMA_ALIGNMENT)
#else #else
#define Q_MAX_SZ_SHIFT (PAGE_SHIFT + MAX_ORDER - 1) #define Q_MAX_SZ_SHIFT (PAGE_SHIFT + MAX_ORDER)
#endif #endif
/* /*

View File

@ -736,7 +736,7 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev,
struct page **pages; struct page **pages;
unsigned int i = 0, nid = dev_to_node(dev); unsigned int i = 0, nid = dev_to_node(dev);
order_mask &= GENMASK(MAX_ORDER - 1, 0); order_mask &= GENMASK(MAX_ORDER, 0);
if (!order_mask) if (!order_mask)
return NULL; return NULL;

View File

@ -2440,8 +2440,8 @@ static bool its_parse_indirect_baser(struct its_node *its,
* feature is not supported by hardware. * feature is not supported by hardware.
*/ */
new_order = max_t(u32, get_order(esz << ids), new_order); new_order = max_t(u32, get_order(esz << ids), new_order);
if (new_order >= MAX_ORDER) { if (new_order > MAX_ORDER) {
new_order = MAX_ORDER - 1; new_order = MAX_ORDER;
ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz); ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz);
pr_warn("ITS@%pa: %s Table too large, reduce ids %llu->%u\n", pr_warn("ITS@%pa: %s Table too large, reduce ids %llu->%u\n",
&its->phys_base, its_base_type_string[type], &its->phys_base, its_base_type_string[type],

View File

@ -408,7 +408,7 @@ static void __cache_size_refresh(void)
* If the allocation may fail we use __get_free_pages. Memory fragmentation * If the allocation may fail we use __get_free_pages. Memory fragmentation
* won't have a fatal effect here, but it just causes flushes of some other * won't have a fatal effect here, but it just causes flushes of some other
* buffers and more I/O will be performed. Don't use __get_free_pages if it * buffers and more I/O will be performed. Don't use __get_free_pages if it
* always fails (i.e. order >= MAX_ORDER). * always fails (i.e. order > MAX_ORDER).
* *
* If the allocation shouldn't fail we use __vmalloc. This is only for the * If the allocation shouldn't fail we use __vmalloc. This is only for the
* initial reserve allocation, so there's no risk of wasting all vmalloc * initial reserve allocation, so there's no risk of wasting all vmalloc

View File

@ -443,7 +443,7 @@ static int genwqe_mmap(struct file *filp, struct vm_area_struct *vma)
if (vsize == 0) if (vsize == 0)
return -EINVAL; return -EINVAL;
if (get_order(vsize) >= MAX_ORDER) if (get_order(vsize) > MAX_ORDER)
return -ENOMEM; return -ENOMEM;
dma_map = kzalloc(sizeof(struct dma_mapping), GFP_KERNEL); dma_map = kzalloc(sizeof(struct dma_mapping), GFP_KERNEL);

View File

@ -210,7 +210,7 @@ u32 genwqe_crc32(u8 *buff, size_t len, u32 init)
void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size, void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size,
dma_addr_t *dma_handle) dma_addr_t *dma_handle)
{ {
if (get_order(size) >= MAX_ORDER) if (get_order(size) > MAX_ORDER)
return NULL; return NULL;
return dma_alloc_coherent(&cd->pci_dev->dev, size, dma_handle, return dma_alloc_coherent(&cd->pci_dev->dev, size, dma_handle,
@ -308,7 +308,7 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
sgl->write = write; sgl->write = write;
sgl->sgl_size = genwqe_sgl_size(sgl->nr_pages); sgl->sgl_size = genwqe_sgl_size(sgl->nr_pages);
if (get_order(sgl->sgl_size) >= MAX_ORDER) { if (get_order(sgl->sgl_size) > MAX_ORDER) {
dev_err(&pci_dev->dev, dev_err(&pci_dev->dev,
"[%s] err: too much memory requested!\n", __func__); "[%s] err: too much memory requested!\n", __func__);
return ret; return ret;

View File

@ -1041,7 +1041,7 @@ static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring)
return; return;
order = get_order(alloc_size); order = get_order(alloc_size);
if (order >= MAX_ORDER) { if (order > MAX_ORDER) {
if (net_ratelimit()) if (net_ratelimit())
dev_warn(ring_to_dev(ring), "failed to allocate tx spare buffer, exceed to max order\n"); dev_warn(ring_to_dev(ring), "failed to allocate tx spare buffer, exceed to max order\n");
return; return;

View File

@ -75,7 +75,7 @@
* pool for the 4MB. Thus the 16 Rx and Tx queues require 32 * 5 = 160 * pool for the 4MB. Thus the 16 Rx and Tx queues require 32 * 5 = 160
* plus 16 for the TSO pools for a total of 176 LTB mappings per VNIC. * plus 16 for the TSO pools for a total of 176 LTB mappings per VNIC.
*/ */
#define IBMVNIC_ONE_LTB_MAX ((u32)((1 << (MAX_ORDER - 1)) * PAGE_SIZE)) #define IBMVNIC_ONE_LTB_MAX ((u32)((1 << MAX_ORDER) * PAGE_SIZE))
#define IBMVNIC_ONE_LTB_SIZE min((u32)(8 << 20), IBMVNIC_ONE_LTB_MAX) #define IBMVNIC_ONE_LTB_SIZE min((u32)(8 << 20), IBMVNIC_ONE_LTB_MAX)
#define IBMVNIC_LTB_SET_SIZE (38 << 20) #define IBMVNIC_LTB_SET_SIZE (38 << 20)

View File

@ -946,7 +946,7 @@ static phys_addr_t hvfb_get_phymem(struct hv_device *hdev,
if (request_size == 0) if (request_size == 0)
return -1; return -1;
if (order < MAX_ORDER) { if (order <= MAX_ORDER) {
/* Call alloc_pages if the size is less than 2^MAX_ORDER */ /* Call alloc_pages if the size is less than 2^MAX_ORDER */
page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!page) if (!page)
@ -977,7 +977,7 @@ static void hvfb_release_phymem(struct hv_device *hdev,
{ {
unsigned int order = get_order(size); unsigned int order = get_order(size);
if (order < MAX_ORDER) if (order <= MAX_ORDER)
__free_pages(pfn_to_page(paddr >> PAGE_SHIFT), order); __free_pages(pfn_to_page(paddr >> PAGE_SHIFT), order);
else else
dma_free_coherent(&hdev->device, dma_free_coherent(&hdev->device,

View File

@ -197,7 +197,7 @@ static int vmlfb_alloc_vram(struct vml_info *vinfo,
va = &vinfo->vram[i]; va = &vinfo->vram[i];
order = 0; order = 0;
while (requested > (PAGE_SIZE << order) && order < MAX_ORDER) while (requested > (PAGE_SIZE << order) && order <= MAX_ORDER)
order++; order++;
err = vmlfb_alloc_vram_area(va, order, 0); err = vmlfb_alloc_vram_area(va, order, 0);

View File

@ -33,7 +33,7 @@
#define VIRTIO_BALLOON_FREE_PAGE_ALLOC_FLAG (__GFP_NORETRY | __GFP_NOWARN | \ #define VIRTIO_BALLOON_FREE_PAGE_ALLOC_FLAG (__GFP_NORETRY | __GFP_NOWARN | \
__GFP_NOMEMALLOC) __GFP_NOMEMALLOC)
/* The order of free page blocks to report to host */ /* The order of free page blocks to report to host */
#define VIRTIO_BALLOON_HINT_BLOCK_ORDER (MAX_ORDER - 1) #define VIRTIO_BALLOON_HINT_BLOCK_ORDER MAX_ORDER
/* The size of a free page block in bytes */ /* The size of a free page block in bytes */
#define VIRTIO_BALLOON_HINT_BLOCK_BYTES \ #define VIRTIO_BALLOON_HINT_BLOCK_BYTES \
(1 << (VIRTIO_BALLOON_HINT_BLOCK_ORDER + PAGE_SHIFT)) (1 << (VIRTIO_BALLOON_HINT_BLOCK_ORDER + PAGE_SHIFT))

View File

@ -1120,13 +1120,13 @@ static void virtio_mem_clear_fake_offline(unsigned long pfn,
*/ */
static void virtio_mem_fake_online(unsigned long pfn, unsigned long nr_pages) static void virtio_mem_fake_online(unsigned long pfn, unsigned long nr_pages)
{ {
unsigned long order = MAX_ORDER - 1; unsigned long order = MAX_ORDER;
unsigned long i; unsigned long i;
/* /*
* We might get called for ranges that don't cover properly aligned * We might get called for ranges that don't cover properly aligned
* MAX_ORDER - 1 pages; however, we can only online properly aligned * MAX_ORDER pages; however, we can only online properly aligned
* pages with an order of MAX_ORDER - 1 at maximum. * pages with an order of MAX_ORDER at maximum.
*/ */
while (!IS_ALIGNED(pfn | nr_pages, 1 << order)) while (!IS_ALIGNED(pfn | nr_pages, 1 << order))
order--; order--;
@ -1237,9 +1237,9 @@ static void virtio_mem_online_page(struct virtio_mem *vm,
bool do_online; bool do_online;
/* /*
* We can get called with any order up to MAX_ORDER - 1. If our * We can get called with any order up to MAX_ORDER. If our subblock
* subblock size is smaller than that and we have a mixture of plugged * size is smaller than that and we have a mixture of plugged and
* and unplugged subblocks within such a page, we have to process in * unplugged subblocks within such a page, we have to process in
* smaller granularity. In that case we'll adjust the order exactly once * smaller granularity. In that case we'll adjust the order exactly once
* within the loop. * within the loop.
*/ */

View File

@ -70,7 +70,7 @@ int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize)
/* make various checks */ /* make various checks */
order = get_order(newsize); order = get_order(newsize);
if (unlikely(order >= MAX_ORDER)) if (unlikely(order > MAX_ORDER))
return -EFBIG; return -EFBIG;
ret = inode_newsize_ok(inode, newsize); ret = inode_newsize_ok(inode, newsize);

View File

@ -72,7 +72,7 @@ struct ttm_pool {
bool use_dma32; bool use_dma32;
struct { struct {
struct ttm_pool_type orders[MAX_ORDER]; struct ttm_pool_type orders[MAX_ORDER + 1];
} caching[TTM_NUM_CACHING_TYPES]; } caching[TTM_NUM_CACHING_TYPES];
}; };

View File

@ -818,7 +818,7 @@ static inline unsigned huge_page_shift(struct hstate *h)
static inline bool hstate_is_gigantic(struct hstate *h) static inline bool hstate_is_gigantic(struct hstate *h)
{ {
return huge_page_order(h) >= MAX_ORDER; return huge_page_order(h) > MAX_ORDER;
} }
static inline unsigned int pages_per_huge_page(const struct hstate *h) static inline unsigned int pages_per_huge_page(const struct hstate *h)

View File

@ -26,11 +26,11 @@
/* Free memory management - zoned buddy allocator. */ /* Free memory management - zoned buddy allocator. */
#ifndef CONFIG_ARCH_FORCE_MAX_ORDER #ifndef CONFIG_ARCH_FORCE_MAX_ORDER
#define MAX_ORDER 11 #define MAX_ORDER 10
#else #else
#define MAX_ORDER CONFIG_ARCH_FORCE_MAX_ORDER #define MAX_ORDER CONFIG_ARCH_FORCE_MAX_ORDER
#endif #endif
#define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1)) #define MAX_ORDER_NR_PAGES (1 << MAX_ORDER)
/* /*
* PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed
@ -93,7 +93,7 @@ static inline bool migratetype_is_mergeable(int mt)
} }
#define for_each_migratetype_order(order, type) \ #define for_each_migratetype_order(order, type) \
for (order = 0; order < MAX_ORDER; order++) \ for (order = 0; order <= MAX_ORDER; order++) \
for (type = 0; type < MIGRATE_TYPES; type++) for (type = 0; type < MIGRATE_TYPES; type++)
extern int page_group_by_mobility_disabled; extern int page_group_by_mobility_disabled;
@ -922,7 +922,7 @@ struct zone {
CACHELINE_PADDING(_pad1_); CACHELINE_PADDING(_pad1_);
/* free areas of different sizes */ /* free areas of different sizes */
struct free_area free_area[MAX_ORDER]; struct free_area free_area[MAX_ORDER + 1];
/* zone flags, see below */ /* zone flags, see below */
unsigned long flags; unsigned long flags;
@ -1745,7 +1745,7 @@ static inline bool movable_only_nodes(nodemask_t *nodes)
#define SECTION_BLOCKFLAGS_BITS \ #define SECTION_BLOCKFLAGS_BITS \
((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS) ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS)
#if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS #if (MAX_ORDER + PAGE_SHIFT) > SECTION_SIZE_BITS
#error Allocator MAX_ORDER exceeds SECTION_SIZE #error Allocator MAX_ORDER exceeds SECTION_SIZE
#endif #endif

View File

@ -41,14 +41,14 @@ extern unsigned int pageblock_order;
* Huge pages are a constant size, but don't exceed the maximum allocation * Huge pages are a constant size, but don't exceed the maximum allocation
* granularity. * granularity.
*/ */
#define pageblock_order min_t(unsigned int, HUGETLB_PAGE_ORDER, MAX_ORDER - 1) #define pageblock_order min_t(unsigned int, HUGETLB_PAGE_ORDER, MAX_ORDER)
#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
#else /* CONFIG_HUGETLB_PAGE */ #else /* CONFIG_HUGETLB_PAGE */
/* If huge pages are not used, group by MAX_ORDER_NR_PAGES */ /* If huge pages are not used, group by MAX_ORDER_NR_PAGES */
#define pageblock_order (MAX_ORDER-1) #define pageblock_order MAX_ORDER
#endif /* CONFIG_HUGETLB_PAGE */ #endif /* CONFIG_HUGETLB_PAGE */

View File

@ -284,7 +284,7 @@ static inline unsigned int arch_slab_minalign(void)
* (PAGE_SIZE*2). Larger requests are passed to the page allocator. * (PAGE_SIZE*2). Larger requests are passed to the page allocator.
*/ */
#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1) #define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1) #define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT)
#ifndef KMALLOC_SHIFT_LOW #ifndef KMALLOC_SHIFT_LOW
#define KMALLOC_SHIFT_LOW 5 #define KMALLOC_SHIFT_LOW 5
#endif #endif
@ -292,7 +292,7 @@ static inline unsigned int arch_slab_minalign(void)
#ifdef CONFIG_SLUB #ifdef CONFIG_SLUB
#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1) #define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1) #define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT)
#ifndef KMALLOC_SHIFT_LOW #ifndef KMALLOC_SHIFT_LOW
#define KMALLOC_SHIFT_LOW 3 #define KMALLOC_SHIFT_LOW 3
#endif #endif
@ -305,7 +305,7 @@ static inline unsigned int arch_slab_minalign(void)
* be allocated from the same page. * be allocated from the same page.
*/ */
#define KMALLOC_SHIFT_HIGH PAGE_SHIFT #define KMALLOC_SHIFT_HIGH PAGE_SHIFT
#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1) #define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT)
#ifndef KMALLOC_SHIFT_LOW #ifndef KMALLOC_SHIFT_LOW
#define KMALLOC_SHIFT_LOW 3 #define KMALLOC_SHIFT_LOW 3
#endif #endif

View File

@ -474,7 +474,7 @@ static int __init crash_save_vmcoreinfo_init(void)
VMCOREINFO_OFFSET(list_head, prev); VMCOREINFO_OFFSET(list_head, prev);
VMCOREINFO_OFFSET(vmap_area, va_start); VMCOREINFO_OFFSET(vmap_area, va_start);
VMCOREINFO_OFFSET(vmap_area, list); VMCOREINFO_OFFSET(vmap_area, list);
VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER); VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER + 1);
log_buf_vmcoreinfo_setup(); log_buf_vmcoreinfo_setup();
VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES); VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES);
VMCOREINFO_NUMBER(NR_FREE_PAGES); VMCOREINFO_NUMBER(NR_FREE_PAGES);

View File

@ -84,8 +84,8 @@ static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size,
void *addr; void *addr;
int ret = -ENOMEM; int ret = -ENOMEM;
/* Cannot allocate larger than MAX_ORDER-1 */ /* Cannot allocate larger than MAX_ORDER */
order = min(get_order(pool_size), MAX_ORDER-1); order = min(get_order(pool_size), MAX_ORDER);
do { do {
pool_size = 1 << (PAGE_SHIFT + order); pool_size = 1 << (PAGE_SHIFT + order);
@ -190,7 +190,7 @@ static int __init dma_atomic_pool_init(void)
/* /*
* If coherent_pool was not used on the command line, default the pool * If coherent_pool was not used on the command line, default the pool
* sizes to 128KB per 1GB of memory, min 128KB, max MAX_ORDER-1. * sizes to 128KB per 1GB of memory, min 128KB, max MAX_ORDER.
*/ */
if (!atomic_pool_size) { if (!atomic_pool_size) {
unsigned long pages = totalram_pages() / (SZ_1G / SZ_128K); unsigned long pages = totalram_pages() / (SZ_1G / SZ_128K);

View File

@ -609,8 +609,8 @@ static struct page *rb_alloc_aux_page(int node, int order)
{ {
struct page *page; struct page *page;
if (order >= MAX_ORDER) if (order > MAX_ORDER)
order = MAX_ORDER - 1; order = MAX_ORDER;
do { do {
page = alloc_pages_node(node, PERF_AUX_GFP, order); page = alloc_pages_node(node, PERF_AUX_GFP, order);
@ -814,7 +814,7 @@ struct perf_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
size = sizeof(struct perf_buffer); size = sizeof(struct perf_buffer);
size += nr_pages * sizeof(void *); size += nr_pages * sizeof(void *);
if (order_base_2(size) >= PAGE_SHIFT+MAX_ORDER) if (order_base_2(size) > PAGE_SHIFT+MAX_ORDER)
goto fail; goto fail;
node = (cpu == -1) ? cpu : cpu_to_node(cpu); node = (cpu == -1) ? cpu : cpu_to_node(cpu);

View File

@ -346,9 +346,9 @@ config SHUFFLE_PAGE_ALLOCATOR
the presence of a memory-side-cache. There are also incidental the presence of a memory-side-cache. There are also incidental
security benefits as it reduces the predictability of page security benefits as it reduces the predictability of page
allocations to compliment SLAB_FREELIST_RANDOM, but the allocations to compliment SLAB_FREELIST_RANDOM, but the
default granularity of shuffling on the "MAX_ORDER - 1" i.e, default granularity of shuffling on the MAX_ORDER i.e, 10th
10th order of pages is selected based on cache utilization order of pages is selected based on cache utilization benefits
benefits on x86. on x86.
While the randomization improves cache utilization it may While the randomization improves cache utilization it may
negatively impact workloads on platforms without a cache. For negatively impact workloads on platforms without a cache. For
@ -666,8 +666,8 @@ config HUGETLB_PAGE_SIZE_VARIABLE
HUGETLB_PAGE_ORDER when there are multiple HugeTLB page sizes available HUGETLB_PAGE_ORDER when there are multiple HugeTLB page sizes available
on a platform. on a platform.
Note that the pageblock_order cannot exceed MAX_ORDER - 1 and will be Note that the pageblock_order cannot exceed MAX_ORDER and will be
clamped down to MAX_ORDER - 1. clamped down to MAX_ORDER.
config CONTIG_ALLOC config CONTIG_ALLOC
def_bool (MEMORY_ISOLATION && COMPACTION) || CMA def_bool (MEMORY_ISOLATION && COMPACTION) || CMA

View File

@ -583,7 +583,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
if (PageCompound(page)) { if (PageCompound(page)) {
const unsigned int order = compound_order(page); const unsigned int order = compound_order(page);
if (likely(order < MAX_ORDER)) { if (likely(order <= MAX_ORDER)) {
blockpfn += (1UL << order) - 1; blockpfn += (1UL << order) - 1;
cursor += (1UL << order) - 1; cursor += (1UL << order) - 1;
} }
@ -938,7 +938,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
* a valid page order. Consider only values in the * a valid page order. Consider only values in the
* valid order range to prevent low_pfn overflow. * valid order range to prevent low_pfn overflow.
*/ */
if (freepage_order > 0 && freepage_order < MAX_ORDER) if (freepage_order > 0 && freepage_order <= MAX_ORDER)
low_pfn += (1UL << freepage_order) - 1; low_pfn += (1UL << freepage_order) - 1;
continue; continue;
} }
@ -954,7 +954,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
if (PageCompound(page) && !cc->alloc_contig) { if (PageCompound(page) && !cc->alloc_contig) {
const unsigned int order = compound_order(page); const unsigned int order = compound_order(page);
if (likely(order < MAX_ORDER)) if (likely(order <= MAX_ORDER))
low_pfn += (1UL << order) - 1; low_pfn += (1UL << order) - 1;
goto isolate_fail; goto isolate_fail;
} }
@ -2124,7 +2124,7 @@ static enum compact_result __compact_finished(struct compact_control *cc)
/* Direct compactor: Is a suitable page free? */ /* Direct compactor: Is a suitable page free? */
ret = COMPACT_NO_SUITABLE_PAGE; ret = COMPACT_NO_SUITABLE_PAGE;
for (order = cc->order; order < MAX_ORDER; order++) { for (order = cc->order; order <= MAX_ORDER; order++) {
struct free_area *area = &cc->zone->free_area[order]; struct free_area *area = &cc->zone->free_area[order];
bool can_steal; bool can_steal;

View File

@ -1086,7 +1086,7 @@ debug_vm_pgtable_alloc_huge_page(struct pgtable_debug_args *args, int order)
struct page *page = NULL; struct page *page = NULL;
#ifdef CONFIG_CONTIG_ALLOC #ifdef CONFIG_CONTIG_ALLOC
if (order >= MAX_ORDER) { if (order > MAX_ORDER) {
page = alloc_contig_pages((1 << order), GFP_KERNEL, page = alloc_contig_pages((1 << order), GFP_KERNEL,
first_online_node, NULL); first_online_node, NULL);
if (page) { if (page) {
@ -1096,7 +1096,7 @@ debug_vm_pgtable_alloc_huge_page(struct pgtable_debug_args *args, int order)
} }
#endif #endif
if (order < MAX_ORDER) if (order <= MAX_ORDER)
page = alloc_pages(GFP_KERNEL, order); page = alloc_pages(GFP_KERNEL, order);
return page; return page;

View File

@ -467,7 +467,7 @@ static int __init hugepage_init(void)
/* /*
* hugepages can't be allocated by the buddy allocator * hugepages can't be allocated by the buddy allocator
*/ */
MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER >= MAX_ORDER); MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER > MAX_ORDER);
/* /*
* we use page->mapping and page->index in second tail page * we use page->mapping and page->index in second tail page
* as list_head: assuming THP order >= 2 * as list_head: assuming THP order >= 2

View File

@ -2090,7 +2090,7 @@ pgoff_t hugetlb_basepage_index(struct page *page)
pgoff_t index = page_index(page_head); pgoff_t index = page_index(page_head);
unsigned long compound_idx; unsigned long compound_idx;
if (compound_order(page_head) >= MAX_ORDER) if (compound_order(page_head) > MAX_ORDER)
compound_idx = page_to_pfn(page) - page_to_pfn(page_head); compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
else else
compound_idx = page - page_head; compound_idx = page - page_head;
@ -4497,7 +4497,7 @@ static int __init default_hugepagesz_setup(char *s)
* The number of default huge pages (for this size) could have been * The number of default huge pages (for this size) could have been
* specified as the first hugetlb parameter: hugepages=X. If so, * specified as the first hugetlb parameter: hugepages=X. If so,
* then default_hstate_max_huge_pages is set. If the default huge * then default_hstate_max_huge_pages is set. If the default huge
* page size is gigantic (>= MAX_ORDER), then the pages must be * page size is gigantic (> MAX_ORDER), then the pages must be
* allocated here from bootmem allocator. * allocated here from bootmem allocator.
*/ */
if (default_hstate_max_huge_pages) { if (default_hstate_max_huge_pages) {

View File

@ -96,7 +96,7 @@ void __init kmsan_init_shadow(void)
struct metadata_page_pair { struct metadata_page_pair {
struct page *shadow, *origin; struct page *shadow, *origin;
}; };
static struct metadata_page_pair held_back[MAX_ORDER] __initdata; static struct metadata_page_pair held_back[MAX_ORDER + 1] __initdata;
/* /*
* Eager metadata allocation. When the memblock allocator is freeing pages to * Eager metadata allocation. When the memblock allocator is freeing pages to
@ -211,8 +211,8 @@ static void kmsan_memblock_discard(void)
* order=N-1, * order=N-1,
* - repeat. * - repeat.
*/ */
collect.order = MAX_ORDER - 1; collect.order = MAX_ORDER;
for (int i = MAX_ORDER - 1; i >= 0; i--) { for (int i = MAX_ORDER; i >= 0; i--) {
if (held_back[i].shadow) if (held_back[i].shadow)
smallstack_push(&collect, held_back[i].shadow); smallstack_push(&collect, held_back[i].shadow);
if (held_back[i].origin) if (held_back[i].origin)

View File

@ -2043,7 +2043,7 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end)
int order; int order;
while (start < end) { while (start < end) {
order = min(MAX_ORDER - 1UL, __ffs(start)); order = min_t(int, MAX_ORDER, __ffs(start));
while (start + (1UL << order) > end) while (start + (1UL << order) > end)
order--; order--;

View File

@ -596,7 +596,7 @@ static void online_pages_range(unsigned long start_pfn, unsigned long nr_pages)
unsigned long pfn; unsigned long pfn;
/* /*
* Online the pages in MAX_ORDER - 1 aligned chunks. The callback might * Online the pages in MAX_ORDER aligned chunks. The callback might
* decide to not expose all pages to the buddy (e.g., expose them * decide to not expose all pages to the buddy (e.g., expose them
* later). We account all pages as being online and belonging to this * later). We account all pages as being online and belonging to this
* zone ("present"). * zone ("present").
@ -605,7 +605,7 @@ static void online_pages_range(unsigned long start_pfn, unsigned long nr_pages)
* this and the first chunk to online will be pageblock_nr_pages. * this and the first chunk to online will be pageblock_nr_pages.
*/ */
for (pfn = start_pfn; pfn < end_pfn;) { for (pfn = start_pfn; pfn < end_pfn;) {
int order = min(MAX_ORDER - 1UL, __ffs(pfn)); int order = min_t(int, MAX_ORDER, __ffs(pfn));
(*online_page_callback)(pfn_to_page(pfn), order); (*online_page_callback)(pfn_to_page(pfn), order);
pfn += (1UL << order); pfn += (1UL << order);

View File

@ -1063,7 +1063,7 @@ buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn,
unsigned long higher_page_pfn; unsigned long higher_page_pfn;
struct page *higher_page; struct page *higher_page;
if (order >= MAX_ORDER - 2) if (order >= MAX_ORDER - 1)
return false; return false;
higher_page_pfn = buddy_pfn & pfn; higher_page_pfn = buddy_pfn & pfn;
@ -1118,7 +1118,7 @@ static inline void __free_one_page(struct page *page,
VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page); VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page);
VM_BUG_ON_PAGE(bad_range(zone, page), page); VM_BUG_ON_PAGE(bad_range(zone, page), page);
while (order < MAX_ORDER - 1) { while (order < MAX_ORDER) {
if (compaction_capture(capc, page, order, migratetype)) { if (compaction_capture(capc, page, order, migratetype)) {
__mod_zone_freepage_state(zone, -(1 << order), __mod_zone_freepage_state(zone, -(1 << order),
migratetype); migratetype);
@ -2499,7 +2499,7 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
struct page *page; struct page *page;
/* Find a page of the appropriate size in the preferred list */ /* Find a page of the appropriate size in the preferred list */
for (current_order = order; current_order < MAX_ORDER; ++current_order) { for (current_order = order; current_order <= MAX_ORDER; ++current_order) {
area = &(zone->free_area[current_order]); area = &(zone->free_area[current_order]);
page = get_page_from_free_area(area, migratetype); page = get_page_from_free_area(area, migratetype);
if (!page) if (!page)
@ -2871,7 +2871,7 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
continue; continue;
spin_lock_irqsave(&zone->lock, flags); spin_lock_irqsave(&zone->lock, flags);
for (order = 0; order < MAX_ORDER; order++) { for (order = 0; order <= MAX_ORDER; order++) {
struct free_area *area = &(zone->free_area[order]); struct free_area *area = &(zone->free_area[order]);
page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC); page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC);
@ -2955,7 +2955,7 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
* approximates finding the pageblock with the most free pages, which * approximates finding the pageblock with the most free pages, which
* would be too costly to do exactly. * would be too costly to do exactly.
*/ */
for (current_order = MAX_ORDER - 1; current_order >= min_order; for (current_order = MAX_ORDER; current_order >= min_order;
--current_order) { --current_order) {
area = &(zone->free_area[current_order]); area = &(zone->free_area[current_order]);
fallback_mt = find_suitable_fallback(area, current_order, fallback_mt = find_suitable_fallback(area, current_order,
@ -2981,7 +2981,7 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
return false; return false;
find_smallest: find_smallest:
for (current_order = order; current_order < MAX_ORDER; for (current_order = order; current_order <= MAX_ORDER;
current_order++) { current_order++) {
area = &(zone->free_area[current_order]); area = &(zone->free_area[current_order]);
fallback_mt = find_suitable_fallback(area, current_order, fallback_mt = find_suitable_fallback(area, current_order,
@ -2994,7 +2994,7 @@ find_smallest:
* This should not happen - we already found a suitable fallback * This should not happen - we already found a suitable fallback
* when looking for the largest page. * when looking for the largest page.
*/ */
VM_BUG_ON(current_order == MAX_ORDER); VM_BUG_ON(current_order > MAX_ORDER);
do_steal: do_steal:
page = get_page_from_free_area(area, fallback_mt); page = get_page_from_free_area(area, fallback_mt);
@ -3955,7 +3955,7 @@ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
return true; return true;
/* For a high-order request, check at least one suitable page is free */ /* For a high-order request, check at least one suitable page is free */
for (o = order; o < MAX_ORDER; o++) { for (o = order; o <= MAX_ORDER; o++) {
struct free_area *area = &z->free_area[o]; struct free_area *area = &z->free_area[o];
int mt; int mt;
@ -5475,7 +5475,7 @@ struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
* There are several places where we assume that the order value is sane * There are several places where we assume that the order value is sane
* so bail out early if the request is out of bound. * so bail out early if the request is out of bound.
*/ */
if (WARN_ON_ONCE_GFP(order >= MAX_ORDER, gfp)) if (WARN_ON_ONCE_GFP(order > MAX_ORDER, gfp))
return NULL; return NULL;
gfp &= gfp_allowed_mask; gfp &= gfp_allowed_mask;
@ -6205,8 +6205,8 @@ void __show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_zone_i
for_each_populated_zone(zone) { for_each_populated_zone(zone) {
unsigned int order; unsigned int order;
unsigned long nr[MAX_ORDER], flags, total = 0; unsigned long nr[MAX_ORDER + 1], flags, total = 0;
unsigned char types[MAX_ORDER]; unsigned char types[MAX_ORDER + 1];
if (zone_idx(zone) > max_zone_idx) if (zone_idx(zone) > max_zone_idx)
continue; continue;
@ -6216,7 +6216,7 @@ void __show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_zone_i
printk(KERN_CONT "%s: ", zone->name); printk(KERN_CONT "%s: ", zone->name);
spin_lock_irqsave(&zone->lock, flags); spin_lock_irqsave(&zone->lock, flags);
for (order = 0; order < MAX_ORDER; order++) { for (order = 0; order <= MAX_ORDER; order++) {
struct free_area *area = &zone->free_area[order]; struct free_area *area = &zone->free_area[order];
int type; int type;
@ -6230,7 +6230,7 @@ void __show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_zone_i
} }
} }
spin_unlock_irqrestore(&zone->lock, flags); spin_unlock_irqrestore(&zone->lock, flags);
for (order = 0; order < MAX_ORDER; order++) { for (order = 0; order <= MAX_ORDER; order++) {
printk(KERN_CONT "%lu*%lukB ", printk(KERN_CONT "%lu*%lukB ",
nr[order], K(1UL) << order); nr[order], K(1UL) << order);
if (nr[order]) if (nr[order])
@ -7581,7 +7581,7 @@ static inline void setup_usemap(struct zone *zone) {}
/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */ /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
void __init set_pageblock_order(void) void __init set_pageblock_order(void)
{ {
unsigned int order = MAX_ORDER - 1; unsigned int order = MAX_ORDER;
/* Check that pageblock_nr_pages has not already been setup */ /* Check that pageblock_nr_pages has not already been setup */
if (pageblock_order) if (pageblock_order)
@ -9076,7 +9076,7 @@ void *__init alloc_large_system_hash(const char *tablename,
else else
table = memblock_alloc_raw(size, table = memblock_alloc_raw(size,
SMP_CACHE_BYTES); SMP_CACHE_BYTES);
} else if (get_order(size) >= MAX_ORDER || hashdist) { } else if (get_order(size) > MAX_ORDER || hashdist) {
table = vmalloc_huge(size, gfp_flags); table = vmalloc_huge(size, gfp_flags);
virt = true; virt = true;
if (table) if (table)
@ -9290,7 +9290,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
order = 0; order = 0;
outer_start = start; outer_start = start;
while (!PageBuddy(pfn_to_page(outer_start))) { while (!PageBuddy(pfn_to_page(outer_start))) {
if (++order >= MAX_ORDER) { if (++order > MAX_ORDER) {
outer_start = start; outer_start = start;
break; break;
} }
@ -9540,7 +9540,7 @@ bool is_free_buddy_page(struct page *page)
unsigned long pfn = page_to_pfn(page); unsigned long pfn = page_to_pfn(page);
unsigned int order; unsigned int order;
for (order = 0; order < MAX_ORDER; order++) { for (order = 0; order <= MAX_ORDER; order++) {
struct page *page_head = page - (pfn & ((1 << order) - 1)); struct page *page_head = page - (pfn & ((1 << order) - 1));
if (PageBuddy(page_head) && if (PageBuddy(page_head) &&
@ -9548,7 +9548,7 @@ bool is_free_buddy_page(struct page *page)
break; break;
} }
return order < MAX_ORDER; return order <= MAX_ORDER;
} }
EXPORT_SYMBOL(is_free_buddy_page); EXPORT_SYMBOL(is_free_buddy_page);
@ -9599,7 +9599,7 @@ bool take_page_off_buddy(struct page *page)
bool ret = false; bool ret = false;
spin_lock_irqsave(&zone->lock, flags); spin_lock_irqsave(&zone->lock, flags);
for (order = 0; order < MAX_ORDER; order++) { for (order = 0; order <= MAX_ORDER; order++) {
struct page *page_head = page - (pfn & ((1 << order) - 1)); struct page *page_head = page - (pfn & ((1 << order) - 1));
int page_order = buddy_order(page_head); int page_order = buddy_order(page_head);

View File

@ -226,7 +226,7 @@ static void unset_migratetype_isolate(struct page *page, int migratetype)
*/ */
if (PageBuddy(page)) { if (PageBuddy(page)) {
order = buddy_order(page); order = buddy_order(page);
if (order >= pageblock_order && order < MAX_ORDER - 1) { if (order >= pageblock_order && order < MAX_ORDER) {
buddy = find_buddy_page_pfn(page, page_to_pfn(page), buddy = find_buddy_page_pfn(page, page_to_pfn(page),
order, NULL); order, NULL);
if (buddy && !is_migrate_isolate_page(buddy)) { if (buddy && !is_migrate_isolate_page(buddy)) {
@ -290,11 +290,11 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
* isolate_single_pageblock() * isolate_single_pageblock()
* @migratetype: migrate type to set in error recovery. * @migratetype: migrate type to set in error recovery.
* *
* Free and in-use pages can be as big as MAX_ORDER-1 and contain more than one * Free and in-use pages can be as big as MAX_ORDER and contain more than one
* pageblock. When not all pageblocks within a page are isolated at the same * pageblock. When not all pageblocks within a page are isolated at the same
* time, free page accounting can go wrong. For example, in the case of * time, free page accounting can go wrong. For example, in the case of
* MAX_ORDER-1 = pageblock_order + 1, a MAX_ORDER-1 page has two pagelbocks. * MAX_ORDER = pageblock_order + 1, a MAX_ORDER page has two pagelbocks.
* [ MAX_ORDER-1 ] * [ MAX_ORDER ]
* [ pageblock0 | pageblock1 ] * [ pageblock0 | pageblock1 ]
* When either pageblock is isolated, if it is a free page, the page is not * When either pageblock is isolated, if it is a free page, the page is not
* split into separate migratetype lists, which is supposed to; if it is an * split into separate migratetype lists, which is supposed to; if it is an
@ -451,7 +451,7 @@ static int isolate_single_pageblock(unsigned long boundary_pfn, int flags,
* the free page to the right migratetype list. * the free page to the right migratetype list.
* *
* head_pfn is not used here as a hugetlb page order * head_pfn is not used here as a hugetlb page order
* can be bigger than MAX_ORDER-1, but after it is * can be bigger than MAX_ORDER, but after it is
* freed, the free page order is not. Use pfn within * freed, the free page order is not. Use pfn within
* the range to find the head of the free page. * the range to find the head of the free page.
*/ */
@ -459,7 +459,7 @@ static int isolate_single_pageblock(unsigned long boundary_pfn, int flags,
outer_pfn = pfn; outer_pfn = pfn;
while (!PageBuddy(pfn_to_page(outer_pfn))) { while (!PageBuddy(pfn_to_page(outer_pfn))) {
/* stop if we cannot find the free page */ /* stop if we cannot find the free page */
if (++order >= MAX_ORDER) if (++order > MAX_ORDER)
goto failed; goto failed;
outer_pfn &= ~0UL << order; outer_pfn &= ~0UL << order;
} }

View File

@ -315,7 +315,7 @@ void pagetypeinfo_showmixedcount_print(struct seq_file *m,
unsigned long freepage_order; unsigned long freepage_order;
freepage_order = buddy_order_unsafe(page); freepage_order = buddy_order_unsafe(page);
if (freepage_order < MAX_ORDER) if (freepage_order <= MAX_ORDER)
pfn += (1UL << freepage_order) - 1; pfn += (1UL << freepage_order) - 1;
continue; continue;
} }
@ -549,7 +549,7 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
if (PageBuddy(page)) { if (PageBuddy(page)) {
unsigned long freepage_order = buddy_order_unsafe(page); unsigned long freepage_order = buddy_order_unsafe(page);
if (freepage_order < MAX_ORDER) if (freepage_order <= MAX_ORDER)
pfn += (1UL << freepage_order) - 1; pfn += (1UL << freepage_order) - 1;
continue; continue;
} }
@ -657,7 +657,7 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
if (PageBuddy(page)) { if (PageBuddy(page)) {
unsigned long order = buddy_order_unsafe(page); unsigned long order = buddy_order_unsafe(page);
if (order > 0 && order < MAX_ORDER) if (order > 0 && order <= MAX_ORDER)
pfn += (1UL << order) - 1; pfn += (1UL << order) - 1;
continue; continue;
} }

View File

@ -20,7 +20,7 @@ static int page_order_update_notify(const char *val, const struct kernel_param *
* If param is set beyond this limit, order is set to default * If param is set beyond this limit, order is set to default
* pageblock_order value * pageblock_order value
*/ */
return param_set_uint_minmax(val, kp, 0, MAX_ORDER-1); return param_set_uint_minmax(val, kp, 0, MAX_ORDER);
} }
static const struct kernel_param_ops page_reporting_param_ops = { static const struct kernel_param_ops page_reporting_param_ops = {
@ -276,7 +276,7 @@ page_reporting_process_zone(struct page_reporting_dev_info *prdev,
return err; return err;
/* Process each free list starting from lowest order/mt */ /* Process each free list starting from lowest order/mt */
for (order = page_reporting_order; order < MAX_ORDER; order++) { for (order = page_reporting_order; order <= MAX_ORDER; order++) {
for (mt = 0; mt < MIGRATE_TYPES; mt++) { for (mt = 0; mt < MIGRATE_TYPES; mt++) {
/* We do not pull pages from the isolate free list */ /* We do not pull pages from the isolate free list */
if (is_migrate_isolate(mt)) if (is_migrate_isolate(mt))
@ -370,7 +370,7 @@ int page_reporting_register(struct page_reporting_dev_info *prdev)
*/ */
if (page_reporting_order == -1) { if (page_reporting_order == -1) {
if (prdev->order > 0 && prdev->order < MAX_ORDER) if (prdev->order > 0 && prdev->order <= MAX_ORDER)
page_reporting_order = prdev->order; page_reporting_order = prdev->order;
else else
page_reporting_order = pageblock_order; page_reporting_order = pageblock_order;

View File

@ -4,7 +4,7 @@
#define _MM_SHUFFLE_H #define _MM_SHUFFLE_H
#include <linux/jump_label.h> #include <linux/jump_label.h>
#define SHUFFLE_ORDER (MAX_ORDER-1) #define SHUFFLE_ORDER MAX_ORDER
#ifdef CONFIG_SHUFFLE_PAGE_ALLOCATOR #ifdef CONFIG_SHUFFLE_PAGE_ALLOCATOR
DECLARE_STATIC_KEY_FALSE(page_alloc_shuffle_key); DECLARE_STATIC_KEY_FALSE(page_alloc_shuffle_key);

View File

@ -465,7 +465,7 @@ static int __init slab_max_order_setup(char *str)
{ {
get_option(&str, &slab_max_order); get_option(&str, &slab_max_order);
slab_max_order = slab_max_order < 0 ? 0 : slab_max_order = slab_max_order < 0 ? 0 :
min(slab_max_order, MAX_ORDER - 1); min(slab_max_order, MAX_ORDER);
slab_max_order_set = true; slab_max_order_set = true;
return 1; return 1;

View File

@ -4171,8 +4171,8 @@ static inline int calculate_order(unsigned int size)
/* /*
* Doh this slab cannot be placed using slub_max_order. * Doh this slab cannot be placed using slub_max_order.
*/ */
order = calc_slab_order(size, 1, MAX_ORDER - 1, 1); order = calc_slab_order(size, 1, MAX_ORDER, 1);
if (order < MAX_ORDER) if (order <= MAX_ORDER)
return order; return order;
return -ENOSYS; return -ENOSYS;
} }
@ -4697,7 +4697,7 @@ __setup("slub_min_order=", setup_slub_min_order);
static int __init setup_slub_max_order(char *str) static int __init setup_slub_max_order(char *str)
{ {
get_option(&str, (int *)&slub_max_order); get_option(&str, (int *)&slub_max_order);
slub_max_order = min(slub_max_order, (unsigned int)MAX_ORDER - 1); slub_max_order = min_t(unsigned int, slub_max_order, MAX_ORDER);
return 1; return 1;
} }

View File

@ -7002,7 +7002,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
* scan_control uses s8 fields for order, priority, and reclaim_idx. * scan_control uses s8 fields for order, priority, and reclaim_idx.
* Confirm they are large enough for max values. * Confirm they are large enough for max values.
*/ */
BUILD_BUG_ON(MAX_ORDER > S8_MAX); BUILD_BUG_ON(MAX_ORDER >= S8_MAX);
BUILD_BUG_ON(DEF_PRIORITY > S8_MAX); BUILD_BUG_ON(DEF_PRIORITY > S8_MAX);
BUILD_BUG_ON(MAX_NR_ZONES > S8_MAX); BUILD_BUG_ON(MAX_NR_ZONES > S8_MAX);

View File

@ -1055,7 +1055,7 @@ static void fill_contig_page_info(struct zone *zone,
info->free_blocks_total = 0; info->free_blocks_total = 0;
info->free_blocks_suitable = 0; info->free_blocks_suitable = 0;
for (order = 0; order < MAX_ORDER; order++) { for (order = 0; order <= MAX_ORDER; order++) {
unsigned long blocks; unsigned long blocks;
/* /*
@ -1088,7 +1088,7 @@ static int __fragmentation_index(unsigned int order, struct contig_page_info *in
{ {
unsigned long requested = 1UL << order; unsigned long requested = 1UL << order;
if (WARN_ON_ONCE(order >= MAX_ORDER)) if (WARN_ON_ONCE(order > MAX_ORDER))
return 0; return 0;
if (!info->free_blocks_total) if (!info->free_blocks_total)
@ -1462,7 +1462,7 @@ static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
int order; int order;
seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name); seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
for (order = 0; order < MAX_ORDER; ++order) for (order = 0; order <= MAX_ORDER; ++order)
/* /*
* Access to nr_free is lockless as nr_free is used only for * Access to nr_free is lockless as nr_free is used only for
* printing purposes. Use data_race to avoid KCSAN warning. * printing purposes. Use data_race to avoid KCSAN warning.
@ -1491,7 +1491,7 @@ static void pagetypeinfo_showfree_print(struct seq_file *m,
pgdat->node_id, pgdat->node_id,
zone->name, zone->name,
migratetype_names[mtype]); migratetype_names[mtype]);
for (order = 0; order < MAX_ORDER; ++order) { for (order = 0; order <= MAX_ORDER; ++order) {
unsigned long freecount = 0; unsigned long freecount = 0;
struct free_area *area; struct free_area *area;
struct list_head *curr; struct list_head *curr;
@ -1531,7 +1531,7 @@ static void pagetypeinfo_showfree(struct seq_file *m, void *arg)
/* Print header */ /* Print header */
seq_printf(m, "%-43s ", "Free pages count per migrate type at order"); seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
for (order = 0; order < MAX_ORDER; ++order) for (order = 0; order <= MAX_ORDER; ++order)
seq_printf(m, "%6d ", order); seq_printf(m, "%6d ", order);
seq_putc(m, '\n'); seq_putc(m, '\n');
@ -2153,7 +2153,7 @@ static void unusable_show_print(struct seq_file *m,
seq_printf(m, "Node %d, zone %8s ", seq_printf(m, "Node %d, zone %8s ",
pgdat->node_id, pgdat->node_id,
zone->name); zone->name);
for (order = 0; order < MAX_ORDER; ++order) { for (order = 0; order <= MAX_ORDER; ++order) {
fill_contig_page_info(zone, order, &info); fill_contig_page_info(zone, order, &info);
index = unusable_free_index(order, &info); index = unusable_free_index(order, &info);
seq_printf(m, "%d.%03d ", index / 1000, index % 1000); seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
@ -2205,7 +2205,7 @@ static void extfrag_show_print(struct seq_file *m,
seq_printf(m, "Node %d, zone %8s ", seq_printf(m, "Node %d, zone %8s ",
pgdat->node_id, pgdat->node_id,
zone->name); zone->name);
for (order = 0; order < MAX_ORDER; ++order) { for (order = 0; order <= MAX_ORDER; ++order) {
fill_contig_page_info(zone, order, &info); fill_contig_page_info(zone, order, &info);
index = __fragmentation_index(order, &info); index = __fragmentation_index(order, &info);
seq_printf(m, "%2d.%03d ", index / 1000, index % 1000); seq_printf(m, "%2d.%03d ", index / 1000, index % 1000);

View File

@ -843,7 +843,7 @@ long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev)
goto out; goto out;
/* the calculated number of cq entries fits to mlx5 cq allocation */ /* the calculated number of cq entries fits to mlx5 cq allocation */
cqe_size_order = cache_line_size() == 128 ? 7 : 6; cqe_size_order = cache_line_size() == 128 ? 7 : 6;
smc_order = MAX_ORDER - cqe_size_order - 1; smc_order = MAX_ORDER - cqe_size_order;
if (SMC_MAX_CQE + 2 > (0x00000001 << smc_order) * PAGE_SIZE) if (SMC_MAX_CQE + 2 > (0x00000001 << smc_order) * PAGE_SIZE)
cqattr.cqe = (0x00000001 << smc_order) * PAGE_SIZE - 2; cqattr.cqe = (0x00000001 << smc_order) * PAGE_SIZE - 2;
smcibdev->roce_cq_send = ib_create_cq(smcibdev->ibdev, smcibdev->roce_cq_send = ib_create_cq(smcibdev->ibdev,

View File

@ -38,7 +38,7 @@ static int param_set_bufsize(const char *val, const struct kernel_param *kp)
size = memparse(val, NULL); size = memparse(val, NULL);
order = get_order(size); order = get_order(size);
if (order >= MAX_ORDER) if (order > MAX_ORDER)
return -EINVAL; return -EINVAL;
ima_maxorder = order; ima_maxorder = order;
ima_bufsize = PAGE_SIZE << order; ima_bufsize = PAGE_SIZE << order;

View File

@ -17,10 +17,10 @@ enum zone_type {
}; };
#define MAX_NR_ZONES __MAX_NR_ZONES #define MAX_NR_ZONES __MAX_NR_ZONES
#define MAX_ORDER 11 #define MAX_ORDER 10
#define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1)) #define MAX_ORDER_NR_PAGES (1 << MAX_ORDER)
#define pageblock_order (MAX_ORDER - 1) #define pageblock_order MAX_ORDER
#define pageblock_nr_pages BIT(pageblock_order) #define pageblock_nr_pages BIT(pageblock_order)
#define pageblock_align(pfn) ALIGN((pfn), pageblock_nr_pages) #define pageblock_align(pfn) ALIGN((pfn), pageblock_nr_pages)
#define pageblock_start_pfn(pfn) ALIGN_DOWN((pfn), pageblock_nr_pages) #define pageblock_start_pfn(pfn) ALIGN_DOWN((pfn), pageblock_nr_pages)