From b4e61fc031b11dd807dffc46cebbf0e25966d3d1 Mon Sep 17 00:00:00 2001 From: Xu Yu Date: Thu, 28 Apr 2022 23:14:43 -0700 Subject: [PATCH 01/10] Revert "mm/memory-failure.c: skip huge_zero_page in memory_failure()" Patch series "mm/memory-failure: rework fix on huge_zero_page splitting". This patch (of 2): This reverts commit d173d5417fb67411e623d394aab986d847e47dad. The commit d173d5417fb6 ("mm/memory-failure.c: skip huge_zero_page in memory_failure()") explicitly skips huge_zero_page in memory_failure(), in order to avoid triggering VM_BUG_ON_PAGE on huge_zero_page in split_huge_page_to_list(). This works, but Yang Shi thinks that, Raising BUG is overkilling for splitting huge_zero_page. The huge_zero_page can't be met from normal paths other than memory failure, but memory failure is a valid caller. So I tend to replace the BUG to WARN + returning -EBUSY. If we don't care about the reason code in memory failure, we don't have to touch memory failure. And for the issue that huge_zero_page will be set PG_has_hwpoisoned, Yang Shi comments that, The anonymous page fault doesn't check if the page is poisoned or not since it typically gets a fresh allocated page and assumes the poisoned page (isolated successfully) can't be reallocated again. But huge zero page and base zero page are reused every time. So no matter what fix we pick, the issue is always there. Finally, Yang, David, Anshuman and Naoya all agree to fix the bug, i.e., to split huge_zero_page, in split_huge_page_to_list(). This reverts the commit d173d5417fb6 ("mm/memory-failure.c: skip huge_zero_page in memory_failure()"), and the original bug will be fixed by the next patch. Link: https://lkml.kernel.org/r/872cefb182ba1dd686b0e7db1e6b2ebe5a4fff87.1651039624.git.xuyu@linux.alibaba.com Fixes: d173d5417fb6 ("mm/memory-failure.c: skip huge_zero_page in memory_failure()") Fixes: 6a46079cf57a ("HWPOISON: The high level memory error handler in the VM v7") Signed-off-by: Xu Yu Suggested-by: Yang Shi Reviewed-by: Yang Shi Reviewed-by: Miaohe Lin Cc: Naoya Horiguchi Cc: Signed-off-by: Andrew Morton --- mm/memory-failure.c | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 27760c19bad7..2020944398c9 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1860,19 +1860,6 @@ try_again: } if (PageTransHuge(hpage)) { - /* - * Bail out before SetPageHasHWPoisoned() if hpage is - * huge_zero_page, although PG_has_hwpoisoned is not - * checked in set_huge_zero_page(). - * - * TODO: Handle memory failure of huge_zero_page thoroughly. - */ - if (is_huge_zero_page(hpage)) { - action_result(pfn, MF_MSG_UNSPLIT_THP, MF_IGNORED); - res = -EBUSY; - goto unlock_mutex; - } - /* * The flag must be set after the refcount is bumped * otherwise it may race with THP split. From 478d134e9506c7e9bfe2830ed03dd85e97966313 Mon Sep 17 00:00:00 2001 From: Xu Yu Date: Thu, 28 Apr 2022 23:14:43 -0700 Subject: [PATCH 02/10] mm/huge_memory: do not overkill when splitting huge_zero_page Kernel panic when injecting memory_failure for the global huge_zero_page, when CONFIG_DEBUG_VM is enabled, as follows. Injecting memory failure for pfn 0x109ff9 at process virtual address 0x20ff9000 page:00000000fb053fc3 refcount:2 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x109e00 head:00000000fb053fc3 order:9 compound_mapcount:0 compound_pincount:0 flags: 0x17fffc000010001(locked|head|node=0|zone=2|lastcpupid=0x1ffff) raw: 017fffc000010001 0000000000000000 dead000000000122 0000000000000000 raw: 0000000000000000 0000000000000000 00000002ffffffff 0000000000000000 page dumped because: VM_BUG_ON_PAGE(is_huge_zero_page(head)) ------------[ cut here ]------------ kernel BUG at mm/huge_memory.c:2499! invalid opcode: 0000 [#1] PREEMPT SMP PTI CPU: 6 PID: 553 Comm: split_bug Not tainted 5.18.0-rc1+ #11 Hardware name: Alibaba Cloud Alibaba Cloud ECS, BIOS 3288b3c 04/01/2014 RIP: 0010:split_huge_page_to_list+0x66a/0x880 Code: 84 9b fb ff ff 48 8b 7c 24 08 31 f6 e8 9f 5d 2a 00 b8 b8 02 00 00 e9 e8 fb ff ff 48 c7 c6 e8 47 3c 82 4c b RSP: 0018:ffffc90000dcbdf8 EFLAGS: 00010246 RAX: 000000000000003c RBX: 0000000000000001 RCX: 0000000000000000 RDX: 0000000000000000 RSI: ffffffff823e4c4f RDI: 00000000ffffffff RBP: ffff88843fffdb40 R08: 0000000000000000 R09: 00000000fffeffff R10: ffffc90000dcbc48 R11: ffffffff82d68448 R12: ffffea0004278000 R13: ffffffff823c6203 R14: 0000000000109ff9 R15: ffffea000427fe40 FS: 00007fc375a26740(0000) GS:ffff88842fd80000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007fc3757c9290 CR3: 0000000102174006 CR4: 00000000003706e0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: try_to_split_thp_page+0x3a/0x130 memory_failure+0x128/0x800 madvise_inject_error.cold+0x8b/0xa1 __x64_sys_madvise+0x54/0x60 do_syscall_64+0x35/0x80 entry_SYSCALL_64_after_hwframe+0x44/0xae RIP: 0033:0x7fc3754f8bf9 Code: 01 00 48 81 c4 80 00 00 00 e9 f1 fe ff ff 0f 1f 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 8 RSP: 002b:00007ffeda93a1d8 EFLAGS: 00000217 ORIG_RAX: 000000000000001c RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007fc3754f8bf9 RDX: 0000000000000064 RSI: 0000000000003000 RDI: 0000000020ff9000 RBP: 00007ffeda93a200 R08: 0000000000000000 R09: 0000000000000000 R10: 00000000ffffffff R11: 0000000000000217 R12: 0000000000400490 R13: 00007ffeda93a2e0 R14: 0000000000000000 R15: 0000000000000000 We think that raising BUG is overkilling for splitting huge_zero_page, the huge_zero_page can't be met from normal paths other than memory failure, but memory failure is a valid caller. So we tend to replace the BUG to WARN + returning -EBUSY, and thus the panic above won't happen again. Link: https://lkml.kernel.org/r/f35f8b97377d5d3ede1bc5ac3114da888c57cbce.1651052574.git.xuyu@linux.alibaba.com Fixes: d173d5417fb6 ("mm/memory-failure.c: skip huge_zero_page in memory_failure()") Fixes: 6a46079cf57a ("HWPOISON: The high level memory error handler in the VM v7") Signed-off-by: Xu Yu Suggested-by: Yang Shi Reported-by: kernel test robot Reviewed-by: Naoya Horiguchi Reviewed-by: Yang Shi Reviewed-by: Miaohe Lin Cc: Signed-off-by: Andrew Morton --- mm/huge_memory.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index c468fee595ff..910a138e9859 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2495,11 +2495,16 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) struct address_space *mapping = NULL; int extra_pins, ret; pgoff_t end; + bool is_hzp; - VM_BUG_ON_PAGE(is_huge_zero_page(head), head); VM_BUG_ON_PAGE(!PageLocked(head), head); VM_BUG_ON_PAGE(!PageCompound(head), head); + is_hzp = is_huge_zero_page(head); + VM_WARN_ON_ONCE_PAGE(is_hzp, head); + if (is_hzp) + return -EBUSY; + if (PageWriteback(head)) return -EBUSY; From 1825b93b626e99eb9a0f9f50342c7b2fa201b387 Mon Sep 17 00:00:00 2001 From: Naoya Horiguchi Date: Thu, 28 Apr 2022 23:14:44 -0700 Subject: [PATCH 03/10] mm/hwpoison: use pr_err() instead of dump_page() in get_any_page() The following VM_BUG_ON_FOLIO() is triggered when memory error event happens on the (thp/folio) pages which are about to be freed: [ 1160.232771] page:00000000b36a8a0f refcount:1 mapcount:0 mapping:0000000000000000 index:0x1 pfn:0x16a000 [ 1160.236916] page:00000000b36a8a0f refcount:0 mapcount:0 mapping:0000000000000000 index:0x1 pfn:0x16a000 [ 1160.240684] flags: 0x57ffffc0800000(hwpoison|node=1|zone=2|lastcpupid=0x1fffff) [ 1160.243458] raw: 0057ffffc0800000 dead000000000100 dead000000000122 0000000000000000 [ 1160.246268] raw: 0000000000000001 0000000000000000 00000000ffffffff 0000000000000000 [ 1160.249197] page dumped because: VM_BUG_ON_FOLIO(!folio_test_large(folio)) [ 1160.251815] ------------[ cut here ]------------ [ 1160.253438] kernel BUG at include/linux/mm.h:788! [ 1160.256162] invalid opcode: 0000 [#1] PREEMPT SMP PTI [ 1160.258172] CPU: 2 PID: 115368 Comm: mceinj.sh Tainted: G E 5.18.0-rc1-v5.18-rc1-220404-2353-005-g83111+ #3 [ 1160.262049] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1.fc35 04/01/2014 [ 1160.265103] RIP: 0010:dump_page.cold+0x27e/0x2bd [ 1160.266757] Code: fe ff ff 48 c7 c6 81 f1 5a 98 e9 4c fe ff ff 48 c7 c6 a1 95 59 98 e9 40 fe ff ff 48 c7 c6 50 bf 5a 98 48 89 ef e8 9d 04 6d ff <0f> 0b 41 f7 c4 ff 0f 00 00 0f 85 9f fd ff ff 49 8b 04 24 a9 00 00 [ 1160.273180] RSP: 0018:ffffaa2c4d59fd18 EFLAGS: 00010292 [ 1160.274969] RAX: 000000000000003e RBX: 0000000000000001 RCX: 0000000000000000 [ 1160.277263] RDX: 0000000000000001 RSI: ffffffff985995a1 RDI: 00000000ffffffff [ 1160.279571] RBP: ffffdc9c45a80000 R08: 0000000000000000 R09: 00000000ffffdfff [ 1160.281794] R10: ffffaa2c4d59fb08 R11: ffffffff98940d08 R12: ffffdc9c45a80000 [ 1160.283920] R13: ffffffff985b6f94 R14: 0000000000000000 R15: ffffdc9c45a80000 [ 1160.286641] FS: 00007eff54ce1740(0000) GS:ffff99c67bd00000(0000) knlGS:0000000000000000 [ 1160.289498] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 1160.291106] CR2: 00005628381a5f68 CR3: 0000000104712003 CR4: 0000000000170ee0 [ 1160.293031] Call Trace: [ 1160.293724] [ 1160.294334] get_hwpoison_page+0x47d/0x570 [ 1160.295474] memory_failure+0x106/0xaa0 [ 1160.296474] ? security_capable+0x36/0x50 [ 1160.297524] hard_offline_page_store+0x43/0x80 [ 1160.298684] kernfs_fop_write_iter+0x11c/0x1b0 [ 1160.299829] new_sync_write+0xf9/0x160 [ 1160.300810] vfs_write+0x209/0x290 [ 1160.301835] ksys_write+0x4f/0xc0 [ 1160.302718] do_syscall_64+0x3b/0x90 [ 1160.303664] entry_SYSCALL_64_after_hwframe+0x44/0xae [ 1160.304981] RIP: 0033:0x7eff54b018b7 As shown in the RIP address, this VM_BUG_ON in folio_entire_mapcount() is called from dump_page("hwpoison: unhandlable page") in get_any_page(). The below explains the mechanism of the race: CPU 0 CPU 1 memory_failure get_hwpoison_page get_any_page dump_page compound = PageCompound free_pages_prepare page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP folio_entire_mapcount VM_BUG_ON_FOLIO(!folio_test_large(folio)) So replace dump_page() with safer one, pr_err(). Link: https://lkml.kernel.org/r/20220427053220.719866-1-naoya.horiguchi@linux.dev Fixes: 74e8ee4708a8 ("mm: Turn head_compound_mapcount() into folio_entire_mapcount()") Signed-off-by: Naoya Horiguchi Reviewed-by: John Hubbard Reviewed-by: Miaohe Lin Cc: Matthew Wilcox Cc: Christoph Hellwig Cc: Jason Gunthorpe Cc: William Kucharski Cc: Signed-off-by: Andrew Morton --- mm/memory-failure.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 2020944398c9..d4a4adcca01f 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1274,7 +1274,7 @@ try_again: } out: if (ret == -EIO) - dump_page(p, "hwpoison: unhandlable page"); + pr_err("Memory failure: %#lx: unhandlable page.\n", page_to_pfn(p)); return ret; } From 7d1e6496616275f3830e2f2f91fa69a66953e95b Mon Sep 17 00:00:00 2001 From: Niels Dossche Date: Mon, 9 May 2022 17:34:28 -0700 Subject: [PATCH 04/10] mm: mremap: fix sign for EFAULT error return value The mremap syscall is supposed to return a pointer to the new virtual memory area on success, and a negative value of the error code in case of failure. Currently, EFAULT is returned when the VMA is not found, instead of -EFAULT. The users of this syscall will therefore believe the syscall succeeded in case the VMA didn't exist, as it returns a pointer to address 0xe (0xe being the value of EFAULT). Fix the sign of the error value. Link: https://lkml.kernel.org/r/20220427224439.23828-2-dossche.niels@gmail.com Fixes: 550a7d60bd5e ("mm, hugepages: add mremap() support for hugepage backed vma") Signed-off-by: Niels Dossche Cc: Mina Almasry Cc: Mike Kravetz Cc: Signed-off-by: Andrew Morton --- mm/mremap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/mremap.c b/mm/mremap.c index 303d3290b938..0b93fac76851 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -947,7 +947,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, return -EINTR; vma = vma_lookup(mm, addr); if (!vma) { - ret = EFAULT; + ret = -EFAULT; goto out; } From 1927e498aee1757b3df755a194cbfc5cc0f2b663 Mon Sep 17 00:00:00 2001 From: Kalesh Singh Date: Mon, 9 May 2022 17:34:28 -0700 Subject: [PATCH 05/10] procfs: prevent unprivileged processes accessing fdinfo dir The file permissions on the fdinfo dir from were changed from S_IRUSR|S_IXUSR to S_IRUGO|S_IXUGO, and a PTRACE_MODE_READ check was added for opening the fdinfo files [1]. However, the ptrace permission check was not added to the directory, allowing anyone to get the open FD numbers by reading the fdinfo directory. Add the missing ptrace permission check for opening the fdinfo directory. [1] https://lkml.kernel.org/r/20210308170651.919148-1-kaleshsingh@google.com Link: https://lkml.kernel.org/r/20210713162008.1056986-1-kaleshsingh@google.com Fixes: 7bc3fa0172a4 ("procfs: allow reading fdinfo with PTRACE_MODE_READ") Signed-off-by: Kalesh Singh Cc: Kees Cook Cc: Eric W. Biederman Cc: Christian Brauner Cc: Suren Baghdasaryan Cc: Hridya Valsaraju Cc: Jann Horn Signed-off-by: Andrew Morton --- fs/proc/fd.c | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/fs/proc/fd.c b/fs/proc/fd.c index 172c86270b31..913bef0d2a36 100644 --- a/fs/proc/fd.c +++ b/fs/proc/fd.c @@ -72,7 +72,7 @@ out: return 0; } -static int seq_fdinfo_open(struct inode *inode, struct file *file) +static int proc_fdinfo_access_allowed(struct inode *inode) { bool allowed = false; struct task_struct *task = get_proc_task(inode); @@ -86,6 +86,16 @@ static int seq_fdinfo_open(struct inode *inode, struct file *file) if (!allowed) return -EACCES; + return 0; +} + +static int seq_fdinfo_open(struct inode *inode, struct file *file) +{ + int ret = proc_fdinfo_access_allowed(inode); + + if (ret) + return ret; + return single_open(file, seq_show, inode); } @@ -348,12 +358,23 @@ static int proc_readfdinfo(struct file *file, struct dir_context *ctx) proc_fdinfo_instantiate); } +static int proc_open_fdinfo(struct inode *inode, struct file *file) +{ + int ret = proc_fdinfo_access_allowed(inode); + + if (ret) + return ret; + + return 0; +} + const struct inode_operations proc_fdinfo_inode_operations = { .lookup = proc_lookupfdinfo, .setattr = proc_setattr, }; const struct file_operations proc_fdinfo_operations = { + .open = proc_open_fdinfo, .read = generic_read_dir, .iterate_shared = proc_readfdinfo, .llseek = generic_file_llseek, From 260364d112bc822005224667c0c9b1b17a53eafd Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Mon, 9 May 2022 17:34:28 -0700 Subject: [PATCH 06/10] arm[64]/memremap: don't abuse pfn_valid() to ensure presence of linear map The semantics of pfn_valid() is to check presence of the memory map for a PFN and not whether a PFN is covered by the linear map. The memory map may be present for NOMAP memory regions, but they won't be mapped in the linear mapping. Accessing such regions via __va() when they are memremap()'ed will cause a crash. On v5.4.y the crash happens on qemu-arm with UEFI [1]: <1>[ 0.084476] 8<--- cut here --- <1>[ 0.084595] Unable to handle kernel paging request at virtual address dfb76000 <1>[ 0.084938] pgd = (ptrval) <1>[ 0.085038] [dfb76000] *pgd=5f7fe801, *pte=00000000, *ppte=00000000 ... <4>[ 0.093923] [] (memcpy) from [] (dmi_setup+0x60/0x418) <4>[ 0.094204] [] (dmi_setup) from [] (arm_dmi_init+0x8/0x10) <4>[ 0.094408] [] (arm_dmi_init) from [] (do_one_initcall+0x50/0x228) <4>[ 0.094619] [] (do_one_initcall) from [] (kernel_init_freeable+0x15c/0x1f8) <4>[ 0.094841] [] (kernel_init_freeable) from [] (kernel_init+0x8/0x10c) <4>[ 0.095057] [] (kernel_init) from [] (ret_from_fork+0x14/0x2c) On kernels v5.10.y and newer the same crash won't reproduce on ARM because commit b10d6bca8720 ("arch, drivers: replace for_each_membock() with for_each_mem_range()") changed the way memory regions are registered in the resource tree, but that merely covers up the problem. On ARM64 memory resources registered in yet another way and there the issue of wrong usage of pfn_valid() to ensure availability of the linear map is also covered. Implement arch_memremap_can_ram_remap() on ARM and ARM64 to prevent access to NOMAP regions via the linear mapping in memremap(). Link: https://lore.kernel.org/all/Yl65zxGgFzF1Okac@sirena.org.uk Link: https://lkml.kernel.org/r/20220426060107.7618-1-rppt@kernel.org Signed-off-by: Mike Rapoport Reported-by: "kernelci.org bot" Tested-by: Mark Brown Reviewed-by: Ard Biesheuvel Acked-by: Catalin Marinas Cc: Greg Kroah-Hartman Cc: Mark Brown Cc: Mark-PK Tsai Cc: Russell King Cc: Tony Lindgren Cc: Will Deacon Cc: [5.4+] Signed-off-by: Andrew Morton --- arch/arm/include/asm/io.h | 3 +++ arch/arm/mm/ioremap.c | 8 ++++++++ arch/arm64/include/asm/io.h | 4 ++++ arch/arm64/mm/ioremap.c | 8 ++++++++ 4 files changed, 23 insertions(+) diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index 0c70eb688a00..2a0739a2350b 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h @@ -440,6 +440,9 @@ extern void pci_iounmap(struct pci_dev *dev, void __iomem *addr); #define ARCH_HAS_VALID_PHYS_ADDR_RANGE extern int valid_phys_addr_range(phys_addr_t addr, size_t size); extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size); +extern bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size, + unsigned long flags); +#define arch_memremap_can_ram_remap arch_memremap_can_ram_remap #endif /* diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index aa08bcb72db9..290702328a33 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -493,3 +493,11 @@ void __init early_ioremap_init(void) { early_ioremap_setup(); } + +bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size, + unsigned long flags) +{ + unsigned long pfn = PHYS_PFN(offset); + + return memblock_is_map_memory(pfn); +} diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index 7fd836bea7eb..3995652daf81 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h @@ -192,4 +192,8 @@ extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size); extern int valid_phys_addr_range(phys_addr_t addr, size_t size); extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size); +extern bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size, + unsigned long flags); +#define arch_memremap_can_ram_remap arch_memremap_can_ram_remap + #endif /* __ASM_IO_H */ diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c index b7c81dacabf0..b21f91cd830d 100644 --- a/arch/arm64/mm/ioremap.c +++ b/arch/arm64/mm/ioremap.c @@ -99,3 +99,11 @@ void __init early_ioremap_init(void) { early_ioremap_setup(); } + +bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size, + unsigned long flags) +{ + unsigned long pfn = PHYS_PFN(offset); + + return pfn_is_map_memory(pfn); +} From 5fcaa7caf5fa7f3ad9eaba1897f226d520f963cc Mon Sep 17 00:00:00 2001 From: Martyna Szapar-Mudlaw Date: Mon, 9 May 2022 17:34:29 -0700 Subject: [PATCH 07/10] mailmap: add entry for martyna.szapar-mudlaw@intel.com Separate linux.intel.com account was created for submitting and reviewing kernel patches, thus need to map previously used primary Intel e-mail address. Link: https://lkml.kernel.org/r/20220505132624.41802-1-martyna.szapar-mudlaw@linux.intel.com Signed-off-by: Martyna Szapar-Mudlaw Signed-off-by: Andrew Morton --- .mailmap | 1 + 1 file changed, 1 insertion(+) diff --git a/.mailmap b/.mailmap index 93458154ce7d..36a275ab8c52 100644 --- a/.mailmap +++ b/.mailmap @@ -249,6 +249,7 @@ Mark Yao Martin Kepplinger Martin Kepplinger Martin Kepplinger +Martyna Szapar-Mudlaw Mathieu Othacehe Matthew Wilcox Matthew Wilcox From 2839b0999c20c9f6bf353849c69370e121e2fa1a Mon Sep 17 00:00:00 2001 From: Hyeonggon Yoo <42.hyeyoo@gmail.com> Date: Mon, 9 May 2022 17:34:29 -0700 Subject: [PATCH 08/10] mm/kfence: reset PG_slab and memcg_data before freeing __kfence_pool When kfence fails to initialize kfence pool, it frees the pool. But it does not reset memcg_data and PG_slab flag. Below is a BUG because of this. Let's fix it by resetting memcg_data and PG_slab flag before free. [ 0.089149] BUG: Bad page state in process swapper/0 pfn:3d8e06 [ 0.089149] page:ffffea46cf638180 refcount:0 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x3d8e06 [ 0.089150] memcg:ffffffff94a475d1 [ 0.089150] flags: 0x17ffffc0000200(slab|node=0|zone=2|lastcpupid=0x1fffff) [ 0.089151] raw: 0017ffffc0000200 ffffea46cf638188 ffffea46cf638188 0000000000000000 [ 0.089152] raw: 0000000000000000 0000000000000000 00000000ffffffff ffffffff94a475d1 [ 0.089152] page dumped because: page still charged to cgroup [ 0.089153] Modules linked in: [ 0.089153] CPU: 0 PID: 0 Comm: swapper/0 Tainted: G B W 5.18.0-rc1+ #965 [ 0.089154] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.14.0-2 04/01/2014 [ 0.089154] Call Trace: [ 0.089155] [ 0.089155] dump_stack_lvl+0x49/0x5f [ 0.089157] dump_stack+0x10/0x12 [ 0.089158] bad_page.cold+0x63/0x94 [ 0.089159] check_free_page_bad+0x66/0x70 [ 0.089160] __free_pages_ok+0x423/0x530 [ 0.089161] __free_pages_core+0x8e/0xa0 [ 0.089162] memblock_free_pages+0x10/0x12 [ 0.089164] memblock_free_late+0x8f/0xb9 [ 0.089165] kfence_init+0x68/0x92 [ 0.089166] start_kernel+0x789/0x992 [ 0.089167] x86_64_start_reservations+0x24/0x26 [ 0.089168] x86_64_start_kernel+0xa9/0xaf [ 0.089170] secondary_startup_64_no_verify+0xd5/0xdb [ 0.089171] Link: https://lkml.kernel.org/r/YnPG3pQrqfcgOlVa@hyeyoo Fixes: 0ce20dd84089 ("mm: add Kernel Electric-Fence infrastructure") Fixes: 8f0b36497303 ("mm: kfence: fix objcgs vector allocation") Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Reviewed-by: Marco Elver Reviewed-by: Muchun Song Cc: Alexander Potapenko Cc: Dmitry Vyukov Signed-off-by: Andrew Morton --- mm/kfence/core.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/mm/kfence/core.c b/mm/kfence/core.c index 9b2b5f56f4ae..11a954763be9 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -621,6 +621,16 @@ static bool __init kfence_init_pool_early(void) * fails for the first page, and therefore expect addr==__kfence_pool in * most failure cases. */ + for (char *p = (char *)addr; p < __kfence_pool + KFENCE_POOL_SIZE; p += PAGE_SIZE) { + struct slab *slab = virt_to_slab(p); + + if (!slab) + continue; +#ifdef CONFIG_MEMCG + slab->memcg_data = 0; +#endif + __folio_clear_slab(slab_folio(slab)); + } memblock_free_late(__pa(addr), KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool)); __kfence_pool = NULL; return false; From 41c240099fe09377b6b9f8272e45d2267c843d3e Mon Sep 17 00:00:00 2001 From: Joel Savitz Date: Mon, 9 May 2022 17:34:29 -0700 Subject: [PATCH 09/10] selftests: vm: Makefile: rename TARGETS to VMTARGETS The tools/testing/selftests/vm/Makefile uses the variable TARGETS internally to generate a list of platform-specific binary build targets suffixed with _{32,64}. When building the selftests using its own Makefile directly, such as via the following command run in a kernel tree: One receives an error such as the following: make: Entering directory '/root/linux/tools/testing/selftests' make --no-builtin-rules ARCH=x86 -C ../../.. headers_install make[1]: Entering directory '/root/linux' INSTALL ./usr/include make[1]: Leaving directory '/root/linux' make[1]: Entering directory '/root/linux/tools/testing/selftests/vm' make[1]: *** No rule to make target 'vm.c', needed by '/root/linux/tools/testing/selftests/vm/vm_64'. Stop. make[1]: Leaving directory '/root/linux/tools/testing/selftests/vm' make: *** [Makefile:175: all] Error 2 make: Leaving directory '/root/linux/tools/testing/selftests' The TARGETS variable passed to tools/testing/selftests/Makefile collides with the TARGETS used in tools/testing/selftests/vm/Makefile, so rename the latter to VMTARGETS, eliminating the collision with no functional change. Link: https://lkml.kernel.org/r/20220504213454.1282532-1-jsavitz@redhat.com Fixes: f21fda8f6453 ("selftests: vm: pkeys: fix multilib builds for x86") Signed-off-by: Joel Savitz Acked-by: Nico Pache Cc: Joel Savitz Cc: Shuah Khan Cc: Sandipan Das Cc: Dave Hansen Signed-off-by: Andrew Morton --- tools/testing/selftests/vm/Makefile | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile index 04a49e876a46..5b1ecd00695b 100644 --- a/tools/testing/selftests/vm/Makefile +++ b/tools/testing/selftests/vm/Makefile @@ -57,9 +57,9 @@ CAN_BUILD_I386 := $(shell ./../x86/check_cc.sh "$(CC)" ../x86/trivial_32bit_prog CAN_BUILD_X86_64 := $(shell ./../x86/check_cc.sh "$(CC)" ../x86/trivial_64bit_program.c) CAN_BUILD_WITH_NOPIE := $(shell ./../x86/check_cc.sh "$(CC)" ../x86/trivial_program.c -no-pie) -TARGETS := protection_keys -BINARIES_32 := $(TARGETS:%=%_32) -BINARIES_64 := $(TARGETS:%=%_64) +VMTARGETS := protection_keys +BINARIES_32 := $(VMTARGETS:%=%_32) +BINARIES_64 := $(VMTARGETS:%=%_64) ifeq ($(CAN_BUILD_WITH_NOPIE),1) CFLAGS += -no-pie @@ -112,7 +112,7 @@ $(BINARIES_32): CFLAGS += -m32 -mxsave $(BINARIES_32): LDLIBS += -lrt -ldl -lm $(BINARIES_32): $(OUTPUT)/%_32: %.c $(CC) $(CFLAGS) $(EXTRA_CFLAGS) $(notdir $^) $(LDLIBS) -o $@ -$(foreach t,$(TARGETS),$(eval $(call gen-target-rule-32,$(t)))) +$(foreach t,$(VMTARGETS),$(eval $(call gen-target-rule-32,$(t)))) endif ifeq ($(CAN_BUILD_X86_64),1) @@ -120,7 +120,7 @@ $(BINARIES_64): CFLAGS += -m64 -mxsave $(BINARIES_64): LDLIBS += -lrt -ldl $(BINARIES_64): $(OUTPUT)/%_64: %.c $(CC) $(CFLAGS) $(EXTRA_CFLAGS) $(notdir $^) $(LDLIBS) -o $@ -$(foreach t,$(TARGETS),$(eval $(call gen-target-rule-64,$(t)))) +$(foreach t,$(VMTARGETS),$(eval $(call gen-target-rule-64,$(t)))) endif # x86_64 users should be encouraged to install 32-bit libraries From 9039b8335276569670caaf23606335946625e764 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Mon, 9 May 2022 17:34:29 -0700 Subject: [PATCH 10/10] MAINTAINERS: add a mailing list for DAMON development This commit adds an open mailing list for DAMON in MAINTAINERS file. Link: https://lkml.kernel.org/r/20220503180741.137079-1-sj@kernel.org Signed-off-by: SeongJae Park Signed-off-by: Andrew Morton --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) diff --git a/MAINTAINERS b/MAINTAINERS index 2647adf30569..df6c341aca44 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5438,6 +5438,7 @@ F: net/ax25/sysctl_net_ax25.c DATA ACCESS MONITOR M: SeongJae Park +L: damon@lists.linux.dev L: linux-mm@kvack.org S: Maintained F: Documentation/ABI/testing/sysfs-kernel-mm-damon