diff --git a/.mailmap b/.mailmap index 1f6ad79b45e4..cc0c58c560b3 100644 --- a/.mailmap +++ b/.mailmap @@ -363,7 +363,6 @@ Maheshwar Ajja Malathi Gottam Manikanta Pubbisetty Manivannan Sadhasivam -Manivannan Sadhasivam Manoj Basapathi Marcin Nowakowski Marc Zyngier @@ -504,6 +503,9 @@ Ralf Baechle Ralf Wildenhues Ram Chandra Jangir Randy Dunlap +Randy Dunlap +Randy Dunlap +Randy Dunlap Ravi Kumar Bokka Ravi Kumar Siddojigari RĂ©mi Denis-Courmont @@ -582,6 +584,7 @@ Surabhi Vishnoi Takashi YOSHII Tamizh Chelvam Raja Taniya Das +Tanzir Hasan Tejun Heo Tomeu Vizoso Thomas Graf diff --git a/MAINTAINERS b/MAINTAINERS index 391bbb855cbe..2cd538369f3c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -12724,12 +12724,11 @@ F: Documentation/devicetree/bindings/i2c/i2c-mux-ltc4306.txt F: drivers/i2c/muxes/i2c-mux-ltc4306.c LTP (Linux Test Project) -M: Mike Frysinger M: Cyril Hrubis -M: Wanlong Gao M: Jan Stancek -M: Stanislav Kholmanskikh -M: Alexey Kodanev +M: Petr Vorel +M: Li Wang +M: Yang Xu L: ltp@lists.linux.it (subscribers-only) S: Maintained W: http://linux-test-project.github.io/ @@ -19738,6 +19737,19 @@ T: git git://linuxtv.org/media_tree.git F: drivers/media/i2c/rj54n1cb0c.c F: include/media/i2c/rj54n1cb0c.h +SHRINKER +M: Andrew Morton +M: Dave Chinner +R: Qi Zheng +R: Roman Gushchin +R: Muchun Song +L: linux-mm@kvack.org +S: Maintained +F: Documentation/admin-guide/mm/shrinker_debugfs.rst +F: include/linux/shrinker.h +F: mm/shrinker.c +F: mm/shrinker_debug.c + SH_VOU V4L2 OUTPUT DRIVER L: linux-media@vger.kernel.org S: Orphan @@ -24263,11 +24275,13 @@ N: zstd K: zstd ZSWAP COMPRESSED SWAP CACHING -M: Seth Jennings -M: Dan Streetman -M: Vitaly Wool +M: Johannes Weiner +M: Yosry Ahmed +M: Nhat Pham L: linux-mm@kvack.org S: Maintained +F: Documentation/admin-guide/mm/zswap.rst +F: include/linux/zswap.h F: mm/zswap.c THE REST diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 62b16f42d5d2..3f78ebbb795f 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -2432,7 +2432,6 @@ static long pagemap_scan_flush_buffer(struct pagemap_scan_private *p) static long do_pagemap_scan(struct mm_struct *mm, unsigned long uarg) { - struct mmu_notifier_range range; struct pagemap_scan_private p = {0}; unsigned long walk_start; size_t n_ranges_out = 0; @@ -2448,15 +2447,9 @@ static long do_pagemap_scan(struct mm_struct *mm, unsigned long uarg) if (ret) return ret; - /* Protection change for the range is going to happen. */ - if (p.arg.flags & PM_SCAN_WP_MATCHING) { - mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA, 0, - mm, p.arg.start, p.arg.end); - mmu_notifier_invalidate_range_start(&range); - } - for (walk_start = p.arg.start; walk_start < p.arg.end; walk_start = p.arg.walk_end) { + struct mmu_notifier_range range; long n_out; if (fatal_signal_pending(current)) { @@ -2467,8 +2460,20 @@ static long do_pagemap_scan(struct mm_struct *mm, unsigned long uarg) ret = mmap_read_lock_killable(mm); if (ret) break; + + /* Protection change for the range is going to happen. */ + if (p.arg.flags & PM_SCAN_WP_MATCHING) { + mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA, 0, + mm, walk_start, p.arg.end); + mmu_notifier_invalidate_range_start(&range); + } + ret = walk_page_range(mm, walk_start, p.arg.end, &pagemap_scan_ops, &p); + + if (p.arg.flags & PM_SCAN_WP_MATCHING) + mmu_notifier_invalidate_range_end(&range); + mmap_read_unlock(mm); n_out = pagemap_scan_flush_buffer(&p); @@ -2494,9 +2499,6 @@ static long do_pagemap_scan(struct mm_struct *mm, unsigned long uarg) if (pagemap_scan_writeback_args(&p.arg, uarg)) ret = -EFAULT; - if (p.arg.flags & PM_SCAN_WP_MATCHING) - mmu_notifier_invalidate_range_end(&range); - kfree(p.vec_buf); return ret; } diff --git a/kernel/crash_core.c b/kernel/crash_core.c index d48315667752..75cd6a736d03 100644 --- a/kernel/crash_core.c +++ b/kernel/crash_core.c @@ -376,7 +376,6 @@ static int __init reserve_crashkernel_low(unsigned long long low_size) crashk_low_res.start = low_base; crashk_low_res.end = low_base + low_size - 1; - insert_resource(&iomem_resource, &crashk_low_res); #endif return 0; } @@ -458,8 +457,19 @@ retry: crashk_res.start = crash_base; crashk_res.end = crash_base + crash_size - 1; - insert_resource(&iomem_resource, &crashk_res); } + +static __init int insert_crashkernel_resources(void) +{ + if (crashk_res.start < crashk_res.end) + insert_resource(&iomem_resource, &crashk_res); + + if (crashk_low_res.start < crashk_low_res.end) + insert_resource(&iomem_resource, &crashk_low_res); + + return 0; +} +early_initcall(insert_crashkernel_resources); #endif int crash_prepare_elf64_headers(struct crash_mem *mem, int need_kernel_map, @@ -867,7 +877,7 @@ subsys_initcall(crash_notes_memory_init); * regions are online. So mutex lock __crash_hotplug_lock is used to * serialize the crash hotplug handling specifically. */ -DEFINE_MUTEX(__crash_hotplug_lock); +static DEFINE_MUTEX(__crash_hotplug_lock); #define crash_hotplug_lock() mutex_lock(&__crash_hotplug_lock) #define crash_hotplug_unlock() mutex_unlock(&__crash_hotplug_lock) diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c index a08031b57a61..d08fc7b5db97 100644 --- a/kernel/kexec_core.c +++ b/kernel/kexec_core.c @@ -1257,6 +1257,7 @@ int kernel_kexec(void) kexec_in_progress = true; kernel_restart_prepare("kexec reboot"); migrate_to_reboot_cpu(); + syscore_shutdown(); /* * migrate_to_reboot_cpu() disables CPU hotplug assuming that diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 97ce28f4d154..ba25129563ad 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -378,6 +378,8 @@ config DEBUG_INFO_BTF depends on !GCC_PLUGIN_RANDSTRUCT || COMPILE_TEST depends on BPF_SYSCALL depends on !DEBUG_INFO_DWARF5 || PAHOLE_VERSION >= 121 + # pahole uses elfutils, which does not have support for Hexagon relocations + depends on !HEXAGON help Generate deduplicated BTF type information from DWARF debug info. Turning this on expects presence of pahole tool, which will convert diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c index 24c13dfb1e94..df6627f62402 100644 --- a/mm/kasan/generic.c +++ b/mm/kasan/generic.c @@ -487,6 +487,7 @@ void kasan_init_object_meta(struct kmem_cache *cache, const void *object) __memset(alloc_meta, 0, sizeof(*alloc_meta)); /* + * Prepare the lock for saving auxiliary stack traces. * Temporarily disable KASAN bug reporting to allow instrumented * raw_spin_lock_init to access aux_lock, which resides inside * of a redzone. @@ -510,8 +511,13 @@ static void release_alloc_meta(struct kasan_alloc_meta *meta) stack_depot_put(meta->aux_stack[0]); stack_depot_put(meta->aux_stack[1]); - /* Zero out alloc meta to mark it as invalid. */ - __memset(meta, 0, sizeof(*meta)); + /* + * Zero out alloc meta to mark it as invalid but keep aux_lock + * initialized to avoid having to reinitialize it when another object + * is allocated in the same slot. + */ + __memset(&meta->alloc_track, 0, sizeof(meta->alloc_track)); + __memset(meta->aux_stack, 0, sizeof(meta->aux_stack)); } static void release_free_meta(const void *object, struct kasan_free_meta *meta) diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index b3c0ff52bb72..21890994c1d3 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -101,9 +101,11 @@ static int set_memmap_mode(const char *val, const struct kernel_param *kp) static int get_memmap_mode(char *buffer, const struct kernel_param *kp) { - if (*((int *)kp->arg) == MEMMAP_ON_MEMORY_FORCE) - return sprintf(buffer, "force\n"); - return param_get_bool(buffer, kp); + int mode = *((int *)kp->arg); + + if (mode == MEMMAP_ON_MEMORY_FORCE) + return sprintf(buffer, "force\n"); + return sprintf(buffer, "%c\n", mode ? 'Y' : 'N'); } static const struct kernel_param_ops memmap_mode_ops = { diff --git a/mm/mm_init.c b/mm/mm_init.c index 89dc29f1e6c6..2c19f5515e36 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -26,6 +26,7 @@ #include #include #include +#include #include "internal.h" #include "slab.h" #include "shuffle.h" @@ -381,6 +382,11 @@ static void __init find_zone_movable_pfns_for_nodes(void) goto out; } + if (is_kdump_kernel()) { + pr_warn("The system is under kdump, ignore kernelcore=mirror.\n"); + goto out; + } + for_each_mem_region(r) { if (memblock_is_mirror(r)) continue; diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 216ab4c8621f..20e3b0d9cf7e 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -1393,6 +1393,12 @@ ssize_t move_pages(struct userfaultfd_ctx *ctx, struct mm_struct *mm, err = -ENOENT; break; } + /* Avoid moving zeropages for now */ + if (is_huge_zero_pmd(*src_pmd)) { + spin_unlock(ptl); + err = -EBUSY; + break; + } /* Check if we can move the pmd without splitting it. */ if (move_splits_huge_pmd(dst_addr, src_addr, src_start + len) || diff --git a/scripts/decode_stacktrace.sh b/scripts/decode_stacktrace.sh index cb980b144ca1..fa5be6f57b00 100755 --- a/scripts/decode_stacktrace.sh +++ b/scripts/decode_stacktrace.sh @@ -16,6 +16,21 @@ elif type c++filt >/dev/null 2>&1 ; then cppfilt_opts=-i fi +UTIL_SUFFIX= +if [[ -z ${LLVM:-} ]]; then + UTIL_PREFIX=${CROSS_COMPILE:-} +else + UTIL_PREFIX=llvm- + if [[ ${LLVM} == */ ]]; then + UTIL_PREFIX=${LLVM}${UTIL_PREFIX} + elif [[ ${LLVM} == -* ]]; then + UTIL_SUFFIX=${LLVM} + fi +fi + +READELF=${UTIL_PREFIX}readelf${UTIL_SUFFIX} +ADDR2LINE=${UTIL_PREFIX}addr2line${UTIL_SUFFIX} + if [[ $1 == "-r" ]] ; then vmlinux="" basepath="auto" @@ -75,7 +90,7 @@ find_module() { if [[ "$modpath" != "" ]] ; then for fn in $(find "$modpath" -name "${module//_/[-_]}.ko*") ; do - if readelf -WS "$fn" | grep -qwF .debug_line ; then + if ${READELF} -WS "$fn" | grep -qwF .debug_line ; then echo $fn return fi @@ -169,7 +184,7 @@ parse_symbol() { if [[ $aarray_support == true && "${cache[$module,$address]+isset}" == "isset" ]]; then local code=${cache[$module,$address]} else - local code=$(${CROSS_COMPILE}addr2line -i -e "$objfile" "$address" 2>/dev/null) + local code=$(${ADDR2LINE} -i -e "$objfile" "$address" 2>/dev/null) if [[ $aarray_support == true ]]; then cache[$module,$address]=$code fi diff --git a/tools/testing/selftests/mm/hugepage-vmemmap.c b/tools/testing/selftests/mm/hugepage-vmemmap.c index 5b354c209e93..894d28c3dd47 100644 --- a/tools/testing/selftests/mm/hugepage-vmemmap.c +++ b/tools/testing/selftests/mm/hugepage-vmemmap.c @@ -10,10 +10,7 @@ #include #include #include - -#define MAP_LENGTH (2UL * 1024 * 1024) - -#define PAGE_SIZE 4096 +#include "vm_util.h" #define PAGE_COMPOUND_HEAD (1UL << 15) #define PAGE_COMPOUND_TAIL (1UL << 16) @@ -39,6 +36,9 @@ #define MAP_FLAGS (MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB) #endif +static size_t pagesize; +static size_t maplength; + static void write_bytes(char *addr, size_t length) { unsigned long i; @@ -56,7 +56,7 @@ static unsigned long virt_to_pfn(void *addr) if (fd < 0) return -1UL; - lseek(fd, (unsigned long)addr / PAGE_SIZE * sizeof(pagemap), SEEK_SET); + lseek(fd, (unsigned long)addr / pagesize * sizeof(pagemap), SEEK_SET); read(fd, &pagemap, sizeof(pagemap)); close(fd); @@ -86,7 +86,7 @@ static int check_page_flags(unsigned long pfn) * this also verifies kernel has correctly set the fake page_head to tail * while hugetlb_free_vmemmap is enabled. */ - for (i = 1; i < MAP_LENGTH / PAGE_SIZE; i++) { + for (i = 1; i < maplength / pagesize; i++) { read(fd, &pageflags, sizeof(pageflags)); if ((pageflags & TAIL_PAGE_FLAGS) != TAIL_PAGE_FLAGS || (pageflags & HEAD_PAGE_FLAGS) == HEAD_PAGE_FLAGS) { @@ -106,18 +106,25 @@ int main(int argc, char **argv) void *addr; unsigned long pfn; - addr = mmap(MAP_ADDR, MAP_LENGTH, PROT_READ | PROT_WRITE, MAP_FLAGS, -1, 0); + pagesize = psize(); + maplength = default_huge_page_size(); + if (!maplength) { + printf("Unable to determine huge page size\n"); + exit(1); + } + + addr = mmap(MAP_ADDR, maplength, PROT_READ | PROT_WRITE, MAP_FLAGS, -1, 0); if (addr == MAP_FAILED) { perror("mmap"); exit(1); } /* Trigger allocation of HugeTLB page. */ - write_bytes(addr, MAP_LENGTH); + write_bytes(addr, maplength); pfn = virt_to_pfn(addr); if (pfn == -1UL) { - munmap(addr, MAP_LENGTH); + munmap(addr, maplength); perror("virt_to_pfn"); exit(1); } @@ -125,13 +132,13 @@ int main(int argc, char **argv) printf("Returned address is %p whose pfn is %lx\n", addr, pfn); if (check_page_flags(pfn) < 0) { - munmap(addr, MAP_LENGTH); + munmap(addr, maplength); perror("check_page_flags"); exit(1); } /* munmap() length of MAP_HUGETLB memory must be hugepage aligned */ - if (munmap(addr, MAP_LENGTH)) { + if (munmap(addr, maplength)) { perror("munmap"); exit(1); }