20 hotfixes. 12 are cc:stable and the remainder address post-6.5 issues

or aren't considered necessary for earlier kernel versions.
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYIAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCZTfz/QAKCRDdBJ7gKXxA
 joMyAP99hLaLYeJbjlf+4tLJzhlpbVoFra1ieun2D+ZgFE78xQD/T4T3PYrZhYqD
 WdrxGT9fiKOykXM5pmQRH9Zr4EvJBA0=
 =Obbk
 -----END PGP SIGNATURE-----

Merge tag 'mm-hotfixes-stable-2023-10-24-09-40' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull misc fixes from Andrew Morton:
 "20 hotfixes. 12 are cc:stable and the remainder address post-6.5
  issues or aren't considered necessary for earlier kernel versions"

* tag 'mm-hotfixes-stable-2023-10-24-09-40' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
  maple_tree: add GFP_KERNEL to allocations in mas_expected_entries()
  selftests/mm: include mman header to access MREMAP_DONTUNMAP identifier
  mailmap: correct email aliasing for Oleksij Rempel
  mailmap: map Bartosz's old address to the current one
  mm/damon/sysfs: check DAMOS regions update progress from before_terminate()
  MAINTAINERS: Ondrej has moved
  kasan: disable kasan_non_canonical_hook() for HW tags
  kasan: print the original fault addr when access invalid shadow
  hugetlbfs: close race between MADV_DONTNEED and page fault
  hugetlbfs: extend hugetlb_vma_lock to private VMAs
  hugetlbfs: clear resv_map pointer if mmap fails
  mm: zswap: fix pool refcount bug around shrink_worker()
  mm/migrate: fix do_pages_move for compat pointers
  riscv: fix set_huge_pte_at() for NAPOT mappings when a swap entry is set
  riscv: handle VM_FAULT_[HWPOISON|HWPOISON_LARGE] faults instead of panicking
  mmap: fix error paths with dup_anon_vma()
  mmap: fix vma_iterator in error path of vma_merge()
  mm: fix vm_brk_flags() to not bail out while holding lock
  mm/mempolicy: fix set_mempolicy_home_node() previous VMA pointer
  mm/page_alloc: correct start page when guard page debug is enabled
This commit is contained in:
Linus Torvalds 2023-10-24 09:52:16 -10:00
commit 4f82870119
19 changed files with 256 additions and 74 deletions

View File

@ -87,6 +87,7 @@ Baolin Wang <baolin.wang@linux.alibaba.com> <baolin.wang@unisoc.com>
Baolin Wang <baolin.wang@linux.alibaba.com> <baolin.wang7@gmail.com> Baolin Wang <baolin.wang@linux.alibaba.com> <baolin.wang7@gmail.com>
Bart Van Assche <bvanassche@acm.org> <bart.vanassche@sandisk.com> Bart Van Assche <bvanassche@acm.org> <bart.vanassche@sandisk.com>
Bart Van Assche <bvanassche@acm.org> <bart.vanassche@wdc.com> Bart Van Assche <bvanassche@acm.org> <bart.vanassche@wdc.com>
Bartosz Golaszewski <brgl@bgdev.pl> <bgolaszewski@baylibre.com>
Ben Dooks <ben-linux@fluff.org> <ben.dooks@simtec.co.uk> Ben Dooks <ben-linux@fluff.org> <ben.dooks@simtec.co.uk>
Ben Dooks <ben-linux@fluff.org> <ben.dooks@sifive.com> Ben Dooks <ben-linux@fluff.org> <ben.dooks@sifive.com>
Ben Gardner <bgardner@wabtec.com> Ben Gardner <bgardner@wabtec.com>
@ -450,9 +451,10 @@ Oleksandr Natalenko <oleksandr@natalenko.name> <oleksandr@redhat.com>
Oleksij Rempel <linux@rempel-privat.de> <bug-track@fisher-privat.net> Oleksij Rempel <linux@rempel-privat.de> <bug-track@fisher-privat.net>
Oleksij Rempel <linux@rempel-privat.de> <external.Oleksij.Rempel@de.bosch.com> Oleksij Rempel <linux@rempel-privat.de> <external.Oleksij.Rempel@de.bosch.com>
Oleksij Rempel <linux@rempel-privat.de> <fixed-term.Oleksij.Rempel@de.bosch.com> Oleksij Rempel <linux@rempel-privat.de> <fixed-term.Oleksij.Rempel@de.bosch.com>
Oleksij Rempel <linux@rempel-privat.de> <o.rempel@pengutronix.de> Oleksij Rempel <o.rempel@pengutronix.de>
Oleksij Rempel <linux@rempel-privat.de> <ore@pengutronix.de> Oleksij Rempel <o.rempel@pengutronix.de> <ore@pengutronix.de>
Oliver Upton <oliver.upton@linux.dev> <oupton@google.com> Oliver Upton <oliver.upton@linux.dev> <oupton@google.com>
Ondřej Jirman <megi@xff.cz> <megous@megous.com>
Oza Pawandeep <quic_poza@quicinc.com> <poza@codeaurora.org> Oza Pawandeep <quic_poza@quicinc.com> <poza@codeaurora.org>
Pali Rohár <pali@kernel.org> <pali.rohar@gmail.com> Pali Rohár <pali@kernel.org> <pali.rohar@gmail.com>
Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it> Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>

View File

@ -6766,7 +6766,7 @@ F: drivers/gpu/drm/panel/panel-sitronix-st7701.c
DRM DRIVER FOR SITRONIX ST7703 PANELS DRM DRIVER FOR SITRONIX ST7703 PANELS
M: Guido Günther <agx@sigxcpu.org> M: Guido Günther <agx@sigxcpu.org>
R: Purism Kernel Team <kernel@puri.sm> R: Purism Kernel Team <kernel@puri.sm>
R: Ondrej Jirman <megous@megous.com> R: Ondrej Jirman <megi@xff.cz>
S: Maintained S: Maintained
F: Documentation/devicetree/bindings/display/panel/rocktech,jh057n00900.yaml F: Documentation/devicetree/bindings/display/panel/rocktech,jh057n00900.yaml
F: drivers/gpu/drm/panel/panel-sitronix-st7703.c F: drivers/gpu/drm/panel/panel-sitronix-st7703.c

View File

@ -72,7 +72,7 @@ static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_f
} }
pagefault_out_of_memory(); pagefault_out_of_memory();
return; return;
} else if (fault & VM_FAULT_SIGBUS) { } else if (fault & (VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) {
/* Kernel mode? Handle exceptions or die */ /* Kernel mode? Handle exceptions or die */
if (!user_mode(regs)) { if (!user_mode(regs)) {
no_context(regs, addr); no_context(regs, addr);

View File

@ -183,15 +183,22 @@ void set_huge_pte_at(struct mm_struct *mm,
pte_t pte, pte_t pte,
unsigned long sz) unsigned long sz)
{ {
unsigned long hugepage_shift;
int i, pte_num; int i, pte_num;
if (!pte_napot(pte)) { if (sz >= PGDIR_SIZE)
set_pte_at(mm, addr, ptep, pte); hugepage_shift = PGDIR_SHIFT;
return; else if (sz >= P4D_SIZE)
} hugepage_shift = P4D_SHIFT;
else if (sz >= PUD_SIZE)
hugepage_shift = PUD_SHIFT;
else if (sz >= PMD_SIZE)
hugepage_shift = PMD_SHIFT;
else
hugepage_shift = PAGE_SHIFT;
pte_num = napot_pte_num(napot_cont_order(pte)); pte_num = sz >> hugepage_shift;
for (i = 0; i < pte_num; i++, ptep++, addr += PAGE_SIZE) for (i = 0; i < pte_num; i++, ptep++, addr += (1 << hugepage_shift))
set_pte_at(mm, addr, ptep, pte); set_pte_at(mm, addr, ptep, pte);
} }

View File

@ -60,6 +60,7 @@ struct resv_map {
long adds_in_progress; long adds_in_progress;
struct list_head region_cache; struct list_head region_cache;
long region_cache_count; long region_cache_count;
struct rw_semaphore rw_sema;
#ifdef CONFIG_CGROUP_HUGETLB #ifdef CONFIG_CGROUP_HUGETLB
/* /*
* On private mappings, the counter to uncharge reservations is stored * On private mappings, the counter to uncharge reservations is stored
@ -138,7 +139,7 @@ struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
void unmap_hugepage_range(struct vm_area_struct *, void unmap_hugepage_range(struct vm_area_struct *,
unsigned long, unsigned long, struct page *, unsigned long, unsigned long, struct page *,
zap_flags_t); zap_flags_t);
void __unmap_hugepage_range_final(struct mmu_gather *tlb, void __unmap_hugepage_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, struct vm_area_struct *vma,
unsigned long start, unsigned long end, unsigned long start, unsigned long end,
struct page *ref_page, zap_flags_t zap_flags); struct page *ref_page, zap_flags_t zap_flags);
@ -245,6 +246,25 @@ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
unsigned long *start, unsigned long *end); unsigned long *start, unsigned long *end);
extern void __hugetlb_zap_begin(struct vm_area_struct *vma,
unsigned long *begin, unsigned long *end);
extern void __hugetlb_zap_end(struct vm_area_struct *vma,
struct zap_details *details);
static inline void hugetlb_zap_begin(struct vm_area_struct *vma,
unsigned long *start, unsigned long *end)
{
if (is_vm_hugetlb_page(vma))
__hugetlb_zap_begin(vma, start, end);
}
static inline void hugetlb_zap_end(struct vm_area_struct *vma,
struct zap_details *details)
{
if (is_vm_hugetlb_page(vma))
__hugetlb_zap_end(vma, details);
}
void hugetlb_vma_lock_read(struct vm_area_struct *vma); void hugetlb_vma_lock_read(struct vm_area_struct *vma);
void hugetlb_vma_unlock_read(struct vm_area_struct *vma); void hugetlb_vma_unlock_read(struct vm_area_struct *vma);
void hugetlb_vma_lock_write(struct vm_area_struct *vma); void hugetlb_vma_lock_write(struct vm_area_struct *vma);
@ -296,6 +316,18 @@ static inline void adjust_range_if_pmd_sharing_possible(
{ {
} }
static inline void hugetlb_zap_begin(
struct vm_area_struct *vma,
unsigned long *start, unsigned long *end)
{
}
static inline void hugetlb_zap_end(
struct vm_area_struct *vma,
struct zap_details *details)
{
}
static inline struct page *hugetlb_follow_page_mask( static inline struct page *hugetlb_follow_page_mask(
struct vm_area_struct *vma, unsigned long address, unsigned int flags, struct vm_area_struct *vma, unsigned long address, unsigned int flags,
unsigned int *page_mask) unsigned int *page_mask)
@ -441,7 +473,7 @@ static inline long hugetlb_change_protection(
return 0; return 0;
} }
static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb, static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, unsigned long start, struct vm_area_struct *vma, unsigned long start,
unsigned long end, struct page *ref_page, unsigned long end, struct page *ref_page,
zap_flags_t zap_flags) zap_flags_t zap_flags)
@ -1233,6 +1265,11 @@ static inline bool __vma_shareable_lock(struct vm_area_struct *vma)
return (vma->vm_flags & VM_MAYSHARE) && vma->vm_private_data; return (vma->vm_flags & VM_MAYSHARE) && vma->vm_private_data;
} }
static inline bool __vma_private_lock(struct vm_area_struct *vma)
{
return (!(vma->vm_flags & VM_MAYSHARE)) && vma->vm_private_data;
}
/* /*
* Safe version of huge_pte_offset() to check the locks. See comments * Safe version of huge_pte_offset() to check the locks. See comments
* above huge_pte_offset(). * above huge_pte_offset().

View File

@ -466,10 +466,10 @@ static inline void kasan_free_module_shadow(const struct vm_struct *vm) {}
#endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */ #endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
#ifdef CONFIG_KASAN_INLINE #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
void kasan_non_canonical_hook(unsigned long addr); void kasan_non_canonical_hook(unsigned long addr);
#else /* CONFIG_KASAN_INLINE */ #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
static inline void kasan_non_canonical_hook(unsigned long addr) { } static inline void kasan_non_canonical_hook(unsigned long addr) { }
#endif /* CONFIG_KASAN_INLINE */ #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
#endif /* LINUX_KASAN_H */ #endif /* LINUX_KASAN_H */

View File

@ -5627,7 +5627,7 @@ int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries)
/* Internal nodes */ /* Internal nodes */
nr_nodes += DIV_ROUND_UP(nr_nodes, nonleaf_cap); nr_nodes += DIV_ROUND_UP(nr_nodes, nonleaf_cap);
/* Add working room for split (2 nodes) + new parents */ /* Add working room for split (2 nodes) + new parents */
mas_node_count(mas, nr_nodes + 3); mas_node_count_gfp(mas, nr_nodes + 3, GFP_KERNEL);
/* Detect if allocations run out */ /* Detect if allocations run out */
mas->mas_flags |= MA_STATE_PREALLOC; mas->mas_flags |= MA_STATE_PREALLOC;

View File

@ -9,6 +9,7 @@
#include <linux/maple_tree.h> #include <linux/maple_tree.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/rwsem.h>
#define MTREE_ALLOC_MAX 0x2000000000000Ul #define MTREE_ALLOC_MAX 0x2000000000000Ul
#define CONFIG_MAPLE_SEARCH #define CONFIG_MAPLE_SEARCH
@ -1841,17 +1842,21 @@ static noinline void __init check_forking(struct maple_tree *mt)
void *val; void *val;
MA_STATE(mas, mt, 0, 0); MA_STATE(mas, mt, 0, 0);
MA_STATE(newmas, mt, 0, 0); MA_STATE(newmas, mt, 0, 0);
struct rw_semaphore newmt_lock;
init_rwsem(&newmt_lock);
for (i = 0; i <= nr_entries; i++) for (i = 0; i <= nr_entries; i++)
mtree_store_range(mt, i*10, i*10 + 5, mtree_store_range(mt, i*10, i*10 + 5,
xa_mk_value(i), GFP_KERNEL); xa_mk_value(i), GFP_KERNEL);
mt_set_non_kernel(99999); mt_set_non_kernel(99999);
mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE); mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
mt_set_external_lock(&newmt, &newmt_lock);
newmas.tree = &newmt; newmas.tree = &newmt;
mas_reset(&newmas); mas_reset(&newmas);
mas_reset(&mas); mas_reset(&mas);
mas_lock(&newmas); down_write(&newmt_lock);
mas.index = 0; mas.index = 0;
mas.last = 0; mas.last = 0;
if (mas_expected_entries(&newmas, nr_entries)) { if (mas_expected_entries(&newmas, nr_entries)) {
@ -1866,10 +1871,10 @@ static noinline void __init check_forking(struct maple_tree *mt)
} }
rcu_read_unlock(); rcu_read_unlock();
mas_destroy(&newmas); mas_destroy(&newmas);
mas_unlock(&newmas);
mt_validate(&newmt); mt_validate(&newmt);
mt_set_non_kernel(0); mt_set_non_kernel(0);
mtree_destroy(&newmt); __mt_destroy(&newmt);
up_write(&newmt_lock);
} }
static noinline void __init check_iteration(struct maple_tree *mt) static noinline void __init check_iteration(struct maple_tree *mt)
@ -1980,6 +1985,10 @@ static noinline void __init bench_forking(struct maple_tree *mt)
void *val; void *val;
MA_STATE(mas, mt, 0, 0); MA_STATE(mas, mt, 0, 0);
MA_STATE(newmas, mt, 0, 0); MA_STATE(newmas, mt, 0, 0);
struct rw_semaphore newmt_lock;
init_rwsem(&newmt_lock);
mt_set_external_lock(&newmt, &newmt_lock);
for (i = 0; i <= nr_entries; i++) for (i = 0; i <= nr_entries; i++)
mtree_store_range(mt, i*10, i*10 + 5, mtree_store_range(mt, i*10, i*10 + 5,
@ -1994,7 +2003,7 @@ static noinline void __init bench_forking(struct maple_tree *mt)
mas.index = 0; mas.index = 0;
mas.last = 0; mas.last = 0;
rcu_read_lock(); rcu_read_lock();
mas_lock(&newmas); down_write(&newmt_lock);
if (mas_expected_entries(&newmas, nr_entries)) { if (mas_expected_entries(&newmas, nr_entries)) {
printk("OOM!"); printk("OOM!");
BUG_ON(1); BUG_ON(1);
@ -2005,11 +2014,11 @@ static noinline void __init bench_forking(struct maple_tree *mt)
mas_store(&newmas, val); mas_store(&newmas, val);
} }
mas_destroy(&newmas); mas_destroy(&newmas);
mas_unlock(&newmas);
rcu_read_unlock(); rcu_read_unlock();
mt_validate(&newmt); mt_validate(&newmt);
mt_set_non_kernel(0); mt_set_non_kernel(0);
mtree_destroy(&newmt); __mt_destroy(&newmt);
up_write(&newmt_lock);
} }
} }
#endif #endif
@ -2616,6 +2625,10 @@ static noinline void __init check_dup_gaps(struct maple_tree *mt,
void *tmp; void *tmp;
MA_STATE(mas, mt, 0, 0); MA_STATE(mas, mt, 0, 0);
MA_STATE(newmas, &newmt, 0, 0); MA_STATE(newmas, &newmt, 0, 0);
struct rw_semaphore newmt_lock;
init_rwsem(&newmt_lock);
mt_set_external_lock(&newmt, &newmt_lock);
if (!zero_start) if (!zero_start)
i = 1; i = 1;
@ -2625,9 +2638,9 @@ static noinline void __init check_dup_gaps(struct maple_tree *mt,
mtree_store_range(mt, i*10, (i+1)*10 - gap, mtree_store_range(mt, i*10, (i+1)*10 - gap,
xa_mk_value(i), GFP_KERNEL); xa_mk_value(i), GFP_KERNEL);
mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE); mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
mt_set_non_kernel(99999); mt_set_non_kernel(99999);
mas_lock(&newmas); down_write(&newmt_lock);
ret = mas_expected_entries(&newmas, nr_entries); ret = mas_expected_entries(&newmas, nr_entries);
mt_set_non_kernel(0); mt_set_non_kernel(0);
MT_BUG_ON(mt, ret != 0); MT_BUG_ON(mt, ret != 0);
@ -2640,9 +2653,9 @@ static noinline void __init check_dup_gaps(struct maple_tree *mt,
} }
rcu_read_unlock(); rcu_read_unlock();
mas_destroy(&newmas); mas_destroy(&newmas);
mas_unlock(&newmas);
mtree_destroy(&newmt); __mt_destroy(&newmt);
up_write(&newmt_lock);
} }
/* Duplicate many sizes of trees. Mainly to test expected entry values */ /* Duplicate many sizes of trees. Mainly to test expected entry values */

View File

@ -1208,6 +1208,8 @@ static int damon_sysfs_set_targets(struct damon_ctx *ctx,
return 0; return 0;
} }
static bool damon_sysfs_schemes_regions_updating;
static void damon_sysfs_before_terminate(struct damon_ctx *ctx) static void damon_sysfs_before_terminate(struct damon_ctx *ctx)
{ {
struct damon_target *t, *next; struct damon_target *t, *next;
@ -1219,8 +1221,10 @@ static void damon_sysfs_before_terminate(struct damon_ctx *ctx)
cmd = damon_sysfs_cmd_request.cmd; cmd = damon_sysfs_cmd_request.cmd;
if (kdamond && ctx == kdamond->damon_ctx && if (kdamond && ctx == kdamond->damon_ctx &&
(cmd == DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_REGIONS || (cmd == DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_REGIONS ||
cmd == DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_BYTES)) { cmd == DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_BYTES) &&
damon_sysfs_schemes_regions_updating) {
damon_sysfs_schemes_update_regions_stop(ctx); damon_sysfs_schemes_update_regions_stop(ctx);
damon_sysfs_schemes_regions_updating = false;
mutex_unlock(&damon_sysfs_lock); mutex_unlock(&damon_sysfs_lock);
} }
@ -1340,7 +1344,6 @@ static int damon_sysfs_commit_input(struct damon_sysfs_kdamond *kdamond)
static int damon_sysfs_cmd_request_callback(struct damon_ctx *c) static int damon_sysfs_cmd_request_callback(struct damon_ctx *c)
{ {
struct damon_sysfs_kdamond *kdamond; struct damon_sysfs_kdamond *kdamond;
static bool damon_sysfs_schemes_regions_updating;
bool total_bytes_only = false; bool total_bytes_only = false;
int err = 0; int err = 0;

View File

@ -97,6 +97,7 @@ static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma);
static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma); static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma);
static void hugetlb_unshare_pmds(struct vm_area_struct *vma, static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
unsigned long start, unsigned long end); unsigned long start, unsigned long end);
static struct resv_map *vma_resv_map(struct vm_area_struct *vma);
static inline bool subpool_is_free(struct hugepage_subpool *spool) static inline bool subpool_is_free(struct hugepage_subpool *spool)
{ {
@ -267,6 +268,10 @@ void hugetlb_vma_lock_read(struct vm_area_struct *vma)
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
down_read(&vma_lock->rw_sema); down_read(&vma_lock->rw_sema);
} else if (__vma_private_lock(vma)) {
struct resv_map *resv_map = vma_resv_map(vma);
down_read(&resv_map->rw_sema);
} }
} }
@ -276,6 +281,10 @@ void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
up_read(&vma_lock->rw_sema); up_read(&vma_lock->rw_sema);
} else if (__vma_private_lock(vma)) {
struct resv_map *resv_map = vma_resv_map(vma);
up_read(&resv_map->rw_sema);
} }
} }
@ -285,6 +294,10 @@ void hugetlb_vma_lock_write(struct vm_area_struct *vma)
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
down_write(&vma_lock->rw_sema); down_write(&vma_lock->rw_sema);
} else if (__vma_private_lock(vma)) {
struct resv_map *resv_map = vma_resv_map(vma);
down_write(&resv_map->rw_sema);
} }
} }
@ -294,17 +307,27 @@ void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
up_write(&vma_lock->rw_sema); up_write(&vma_lock->rw_sema);
} else if (__vma_private_lock(vma)) {
struct resv_map *resv_map = vma_resv_map(vma);
up_write(&resv_map->rw_sema);
} }
} }
int hugetlb_vma_trylock_write(struct vm_area_struct *vma) int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
{ {
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
if (!__vma_shareable_lock(vma)) if (__vma_shareable_lock(vma)) {
return 1; struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
return down_write_trylock(&vma_lock->rw_sema); return down_write_trylock(&vma_lock->rw_sema);
} else if (__vma_private_lock(vma)) {
struct resv_map *resv_map = vma_resv_map(vma);
return down_write_trylock(&resv_map->rw_sema);
}
return 1;
} }
void hugetlb_vma_assert_locked(struct vm_area_struct *vma) void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
@ -313,6 +336,10 @@ void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
lockdep_assert_held(&vma_lock->rw_sema); lockdep_assert_held(&vma_lock->rw_sema);
} else if (__vma_private_lock(vma)) {
struct resv_map *resv_map = vma_resv_map(vma);
lockdep_assert_held(&resv_map->rw_sema);
} }
} }
@ -345,6 +372,11 @@ static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma)
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
__hugetlb_vma_unlock_write_put(vma_lock); __hugetlb_vma_unlock_write_put(vma_lock);
} else if (__vma_private_lock(vma)) {
struct resv_map *resv_map = vma_resv_map(vma);
/* no free for anon vmas, but still need to unlock */
up_write(&resv_map->rw_sema);
} }
} }
@ -1068,6 +1100,7 @@ struct resv_map *resv_map_alloc(void)
kref_init(&resv_map->refs); kref_init(&resv_map->refs);
spin_lock_init(&resv_map->lock); spin_lock_init(&resv_map->lock);
INIT_LIST_HEAD(&resv_map->regions); INIT_LIST_HEAD(&resv_map->regions);
init_rwsem(&resv_map->rw_sema);
resv_map->adds_in_progress = 0; resv_map->adds_in_progress = 0;
/* /*
@ -1138,8 +1171,7 @@ static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
set_vma_private_data(vma, (get_vma_private_data(vma) & set_vma_private_data(vma, (unsigned long)map);
HPAGE_RESV_MASK) | (unsigned long)map);
} }
static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags) static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
@ -5274,9 +5306,9 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
return len + old_addr - old_end; return len + old_addr - old_end;
} }
static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
unsigned long start, unsigned long end, unsigned long start, unsigned long end,
struct page *ref_page, zap_flags_t zap_flags) struct page *ref_page, zap_flags_t zap_flags)
{ {
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
unsigned long address; unsigned long address;
@ -5405,16 +5437,25 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct
tlb_flush_mmu_tlbonly(tlb); tlb_flush_mmu_tlbonly(tlb);
} }
void __unmap_hugepage_range_final(struct mmu_gather *tlb, void __hugetlb_zap_begin(struct vm_area_struct *vma,
struct vm_area_struct *vma, unsigned long start, unsigned long *start, unsigned long *end)
unsigned long end, struct page *ref_page,
zap_flags_t zap_flags)
{ {
hugetlb_vma_lock_write(vma); if (!vma->vm_file) /* hugetlbfs_file_mmap error */
i_mmap_lock_write(vma->vm_file->f_mapping); return;
/* mmu notification performed in caller */ adjust_range_if_pmd_sharing_possible(vma, start, end);
__unmap_hugepage_range(tlb, vma, start, end, ref_page, zap_flags); hugetlb_vma_lock_write(vma);
if (vma->vm_file)
i_mmap_lock_write(vma->vm_file->f_mapping);
}
void __hugetlb_zap_end(struct vm_area_struct *vma,
struct zap_details *details)
{
zap_flags_t zap_flags = details ? details->zap_flags : 0;
if (!vma->vm_file) /* hugetlbfs_file_mmap error */
return;
if (zap_flags & ZAP_FLAG_UNMAP) { /* final unmap */ if (zap_flags & ZAP_FLAG_UNMAP) { /* final unmap */
/* /*
@ -5427,11 +5468,12 @@ void __unmap_hugepage_range_final(struct mmu_gather *tlb,
* someone else. * someone else.
*/ */
__hugetlb_vma_unlock_write_free(vma); __hugetlb_vma_unlock_write_free(vma);
i_mmap_unlock_write(vma->vm_file->f_mapping);
} else { } else {
i_mmap_unlock_write(vma->vm_file->f_mapping);
hugetlb_vma_unlock_write(vma); hugetlb_vma_unlock_write(vma);
} }
if (vma->vm_file)
i_mmap_unlock_write(vma->vm_file->f_mapping);
} }
void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
@ -6811,8 +6853,10 @@ out_err:
*/ */
if (chg >= 0 && add < 0) if (chg >= 0 && add < 0)
region_abort(resv_map, from, to, regions_needed); region_abort(resv_map, from, to, regions_needed);
if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
kref_put(&resv_map->refs, resv_map_release); kref_put(&resv_map->refs, resv_map_release);
set_vma_resv_map(vma, NULL);
}
return false; return false;
} }

View File

@ -621,7 +621,7 @@ void kasan_report_async(void)
} }
#endif /* CONFIG_KASAN_HW_TAGS */ #endif /* CONFIG_KASAN_HW_TAGS */
#ifdef CONFIG_KASAN_INLINE #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
/* /*
* With CONFIG_KASAN_INLINE, accesses to bogus pointers (outside the high * With CONFIG_KASAN_INLINE, accesses to bogus pointers (outside the high
* canonical half of the address space) cause out-of-bounds shadow memory reads * canonical half of the address space) cause out-of-bounds shadow memory reads

View File

@ -1683,7 +1683,7 @@ static void unmap_single_vma(struct mmu_gather *tlb,
if (vma->vm_file) { if (vma->vm_file) {
zap_flags_t zap_flags = details ? zap_flags_t zap_flags = details ?
details->zap_flags : 0; details->zap_flags : 0;
__unmap_hugepage_range_final(tlb, vma, start, end, __unmap_hugepage_range(tlb, vma, start, end,
NULL, zap_flags); NULL, zap_flags);
} }
} else } else
@ -1728,8 +1728,12 @@ void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
start_addr, end_addr); start_addr, end_addr);
mmu_notifier_invalidate_range_start(&range); mmu_notifier_invalidate_range_start(&range);
do { do {
unmap_single_vma(tlb, vma, start_addr, end_addr, &details, unsigned long start = start_addr;
unsigned long end = end_addr;
hugetlb_zap_begin(vma, &start, &end);
unmap_single_vma(tlb, vma, start, end, &details,
mm_wr_locked); mm_wr_locked);
hugetlb_zap_end(vma, &details);
} while ((vma = mas_find(mas, tree_end - 1)) != NULL); } while ((vma = mas_find(mas, tree_end - 1)) != NULL);
mmu_notifier_invalidate_range_end(&range); mmu_notifier_invalidate_range_end(&range);
} }
@ -1753,9 +1757,7 @@ void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
lru_add_drain(); lru_add_drain();
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
address, end); address, end);
if (is_vm_hugetlb_page(vma)) hugetlb_zap_begin(vma, &range.start, &range.end);
adjust_range_if_pmd_sharing_possible(vma, &range.start,
&range.end);
tlb_gather_mmu(&tlb, vma->vm_mm); tlb_gather_mmu(&tlb, vma->vm_mm);
update_hiwater_rss(vma->vm_mm); update_hiwater_rss(vma->vm_mm);
mmu_notifier_invalidate_range_start(&range); mmu_notifier_invalidate_range_start(&range);
@ -1766,6 +1768,7 @@ void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
unmap_single_vma(&tlb, vma, address, end, details, false); unmap_single_vma(&tlb, vma, address, end, details, false);
mmu_notifier_invalidate_range_end(&range); mmu_notifier_invalidate_range_end(&range);
tlb_finish_mmu(&tlb); tlb_finish_mmu(&tlb);
hugetlb_zap_end(vma, details);
} }
/** /**

View File

@ -1543,8 +1543,10 @@ SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, le
* the home node for vmas we already updated before. * the home node for vmas we already updated before.
*/ */
old = vma_policy(vma); old = vma_policy(vma);
if (!old) if (!old) {
prev = vma;
continue; continue;
}
if (old->mode != MPOL_BIND && old->mode != MPOL_PREFERRED_MANY) { if (old->mode != MPOL_BIND && old->mode != MPOL_PREFERRED_MANY) {
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
break; break;

View File

@ -2162,6 +2162,7 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
const int __user *nodes, const int __user *nodes,
int __user *status, int flags) int __user *status, int flags)
{ {
compat_uptr_t __user *compat_pages = (void __user *)pages;
int current_node = NUMA_NO_NODE; int current_node = NUMA_NO_NODE;
LIST_HEAD(pagelist); LIST_HEAD(pagelist);
int start, i; int start, i;
@ -2174,8 +2175,17 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
int node; int node;
err = -EFAULT; err = -EFAULT;
if (get_user(p, pages + i)) if (in_compat_syscall()) {
goto out_flush; compat_uptr_t cp;
if (get_user(cp, compat_pages + i))
goto out_flush;
p = compat_ptr(cp);
} else {
if (get_user(p, pages + i))
goto out_flush;
}
if (get_user(node, nodes + i)) if (get_user(node, nodes + i))
goto out_flush; goto out_flush;

View File

@ -583,11 +583,12 @@ again:
* dup_anon_vma() - Helper function to duplicate anon_vma * dup_anon_vma() - Helper function to duplicate anon_vma
* @dst: The destination VMA * @dst: The destination VMA
* @src: The source VMA * @src: The source VMA
* @dup: Pointer to the destination VMA when successful.
* *
* Returns: 0 on success. * Returns: 0 on success.
*/ */
static inline int dup_anon_vma(struct vm_area_struct *dst, static inline int dup_anon_vma(struct vm_area_struct *dst,
struct vm_area_struct *src) struct vm_area_struct *src, struct vm_area_struct **dup)
{ {
/* /*
* Easily overlooked: when mprotect shifts the boundary, make sure the * Easily overlooked: when mprotect shifts the boundary, make sure the
@ -595,9 +596,15 @@ static inline int dup_anon_vma(struct vm_area_struct *dst,
* anon pages imported. * anon pages imported.
*/ */
if (src->anon_vma && !dst->anon_vma) { if (src->anon_vma && !dst->anon_vma) {
int ret;
vma_assert_write_locked(dst); vma_assert_write_locked(dst);
dst->anon_vma = src->anon_vma; dst->anon_vma = src->anon_vma;
return anon_vma_clone(dst, src); ret = anon_vma_clone(dst, src);
if (ret)
return ret;
*dup = dst;
} }
return 0; return 0;
@ -624,6 +631,7 @@ int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
unsigned long start, unsigned long end, pgoff_t pgoff, unsigned long start, unsigned long end, pgoff_t pgoff,
struct vm_area_struct *next) struct vm_area_struct *next)
{ {
struct vm_area_struct *anon_dup = NULL;
bool remove_next = false; bool remove_next = false;
struct vma_prepare vp; struct vma_prepare vp;
@ -633,7 +641,7 @@ int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
remove_next = true; remove_next = true;
vma_start_write(next); vma_start_write(next);
ret = dup_anon_vma(vma, next); ret = dup_anon_vma(vma, next, &anon_dup);
if (ret) if (ret)
return ret; return ret;
} }
@ -661,6 +669,8 @@ int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
return 0; return 0;
nomem: nomem:
if (anon_dup)
unlink_anon_vmas(anon_dup);
return -ENOMEM; return -ENOMEM;
} }
@ -860,6 +870,7 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
{ {
struct vm_area_struct *curr, *next, *res; struct vm_area_struct *curr, *next, *res;
struct vm_area_struct *vma, *adjust, *remove, *remove2; struct vm_area_struct *vma, *adjust, *remove, *remove2;
struct vm_area_struct *anon_dup = NULL;
struct vma_prepare vp; struct vma_prepare vp;
pgoff_t vma_pgoff; pgoff_t vma_pgoff;
int err = 0; int err = 0;
@ -927,18 +938,18 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
vma_start_write(next); vma_start_write(next);
remove = next; /* case 1 */ remove = next; /* case 1 */
vma_end = next->vm_end; vma_end = next->vm_end;
err = dup_anon_vma(prev, next); err = dup_anon_vma(prev, next, &anon_dup);
if (curr) { /* case 6 */ if (curr) { /* case 6 */
vma_start_write(curr); vma_start_write(curr);
remove = curr; remove = curr;
remove2 = next; remove2 = next;
if (!next->anon_vma) if (!next->anon_vma)
err = dup_anon_vma(prev, curr); err = dup_anon_vma(prev, curr, &anon_dup);
} }
} else if (merge_prev) { /* case 2 */ } else if (merge_prev) { /* case 2 */
if (curr) { if (curr) {
vma_start_write(curr); vma_start_write(curr);
err = dup_anon_vma(prev, curr); err = dup_anon_vma(prev, curr, &anon_dup);
if (end == curr->vm_end) { /* case 7 */ if (end == curr->vm_end) { /* case 7 */
remove = curr; remove = curr;
} else { /* case 5 */ } else { /* case 5 */
@ -954,7 +965,7 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
vma_end = addr; vma_end = addr;
adjust = next; adjust = next;
adj_start = -(prev->vm_end - addr); adj_start = -(prev->vm_end - addr);
err = dup_anon_vma(next, prev); err = dup_anon_vma(next, prev, &anon_dup);
} else { } else {
/* /*
* Note that cases 3 and 8 are the ONLY ones where prev * Note that cases 3 and 8 are the ONLY ones where prev
@ -968,14 +979,14 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
vma_pgoff = curr->vm_pgoff; vma_pgoff = curr->vm_pgoff;
vma_start_write(curr); vma_start_write(curr);
remove = curr; remove = curr;
err = dup_anon_vma(next, curr); err = dup_anon_vma(next, curr, &anon_dup);
} }
} }
} }
/* Error in anon_vma clone. */ /* Error in anon_vma clone. */
if (err) if (err)
return NULL; goto anon_vma_fail;
if (vma_start < vma->vm_start || vma_end > vma->vm_end) if (vma_start < vma->vm_start || vma_end > vma->vm_end)
vma_expanded = true; vma_expanded = true;
@ -988,7 +999,7 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
} }
if (vma_iter_prealloc(vmi, vma)) if (vma_iter_prealloc(vmi, vma))
return NULL; goto prealloc_fail;
init_multi_vma_prep(&vp, vma, adjust, remove, remove2); init_multi_vma_prep(&vp, vma, adjust, remove, remove2);
VM_WARN_ON(vp.anon_vma && adjust && adjust->anon_vma && VM_WARN_ON(vp.anon_vma && adjust && adjust->anon_vma &&
@ -1016,6 +1027,15 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
vma_complete(&vp, vmi, mm); vma_complete(&vp, vmi, mm);
khugepaged_enter_vma(res, vm_flags); khugepaged_enter_vma(res, vm_flags);
return res; return res;
prealloc_fail:
if (anon_dup)
unlink_anon_vmas(anon_dup);
anon_vma_fail:
vma_iter_set(vmi, addr);
vma_iter_load(vmi);
return NULL;
} }
/* /*
@ -3143,13 +3163,13 @@ int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags)
if (!len) if (!len)
return 0; return 0;
if (mmap_write_lock_killable(mm))
return -EINTR;
/* Until we need other flags, refuse anything except VM_EXEC. */ /* Until we need other flags, refuse anything except VM_EXEC. */
if ((flags & (~VM_EXEC)) != 0) if ((flags & (~VM_EXEC)) != 0)
return -EINVAL; return -EINVAL;
if (mmap_write_lock_killable(mm))
return -EINTR;
ret = check_brk_limits(addr, len); ret = check_brk_limits(addr, len);
if (ret) if (ret)
goto limits_failed; goto limits_failed;

View File

@ -6475,6 +6475,7 @@ static void break_down_buddy_pages(struct zone *zone, struct page *page,
next_page = page; next_page = page;
current_buddy = page + size; current_buddy = page + size;
} }
page = next_page;
if (set_page_guard(zone, current_buddy, high, migratetype)) if (set_page_guard(zone, current_buddy, high, migratetype))
continue; continue;
@ -6482,7 +6483,6 @@ static void break_down_buddy_pages(struct zone *zone, struct page *page,
if (current_buddy != target) { if (current_buddy != target) {
add_to_free_list(current_buddy, zone, high, migratetype); add_to_free_list(current_buddy, zone, high, migratetype);
set_buddy_order(current_buddy, high); set_buddy_order(current_buddy, high);
page = next_page;
} }
} }
} }

View File

@ -1383,8 +1383,8 @@ reject:
shrink: shrink:
pool = zswap_pool_last_get(); pool = zswap_pool_last_get();
if (pool) if (pool && !queue_work(shrink_wq, &pool->shrink_work))
queue_work(shrink_wq, &pool->shrink_work); zswap_pool_put(pool);
goto reject; goto reject;
} }

View File

@ -0,0 +1,40 @@
/* SPDX-License-Identifier: GPL-2.0+ */
#ifndef _TOOLS__RWSEM_H
#define _TOOLS__RWSEM_H
#include <pthread.h>
struct rw_semaphore {
pthread_rwlock_t lock;
};
static inline int init_rwsem(struct rw_semaphore *sem)
{
return pthread_rwlock_init(&sem->lock, NULL);
}
static inline int exit_rwsem(struct rw_semaphore *sem)
{
return pthread_rwlock_destroy(&sem->lock);
}
static inline int down_read(struct rw_semaphore *sem)
{
return pthread_rwlock_rdlock(&sem->lock);
}
static inline int up_read(struct rw_semaphore *sem)
{
return pthread_rwlock_unlock(&sem->lock);
}
static inline int down_write(struct rw_semaphore *sem)
{
return pthread_rwlock_wrlock(&sem->lock);
}
static inline int up_write(struct rw_semaphore *sem)
{
return pthread_rwlock_unlock(&sem->lock);
}
#endif /* _TOOLS_RWSEM_H */

View File

@ -7,6 +7,7 @@
*/ */
#define _GNU_SOURCE #define _GNU_SOURCE
#include <sys/mman.h> #include <sys/mman.h>
#include <linux/mman.h>
#include <errno.h> #include <errno.h>
#include <stdio.h> #include <stdio.h>
#include <stdlib.h> #include <stdlib.h>