17 hotfixes. Eight are for MM and seven are for other parts of the

kernel.  Seven are cc:stable and eight address post-6.3 issues or were
 judged unsuitable for -stable backporting.
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYIAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCZAO0bAAKCRDdBJ7gKXxA
 jo73AP0Sbgd+E0u5Hs+aACHW28FpxleVRdyexc5chXD5QsyLKgEAwjntE7jfHHYK
 GkUKsoWQJblgjm3ksRxdLbVkDSQ8sQE=
 =CQ0B
 -----END PGP SIGNATURE-----

Merge tag 'mm-hotfixes-stable-2023-03-04-13-12' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull misc fixes from Andrew Morton:
 "17 hotfixes.

  Eight are for MM and seven are for other parts of the kernel. Seven
  are cc:stable and eight address post-6.3 issues or were judged
  unsuitable for -stable backporting"

* tag 'mm-hotfixes-stable-2023-03-04-13-12' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
  mailmap: map Dikshita Agarwal's old address to his current one
  mailmap: map Vikash Garodia's old address to his current one
  fs/cramfs/inode.c: initialize file_ra_state
  fs: hfsplus: fix UAF issue in hfsplus_put_super
  panic: fix the panic_print NMI backtrace setting
  lib: parser: update documentation for match_NUMBER functions
  kasan, x86: don't rename memintrinsics in uninstrumented files
  kasan: test: fix test for new meminstrinsic instrumentation
  kasan: treat meminstrinsic as builtins in uninstrumented files
  kasan: emit different calls for instrumentable memintrinsics
  ocfs2: fix non-auto defrag path not working issue
  ocfs2: fix defrag path triggering jbd2 ASSERT
  mailmap: map Georgi Djakov's old Linaro address to his current one
  mm/hwpoison: convert TTU_IGNORE_HWPOISON to TTU_HWPOISON
  lib/zlib: DFLTCC deflate does not write all available bits for Z_NO_FLUSH
  mm/damon/paddr: fix missing folio_put()
  mm/mremap: fix dup_anon_vma() in vma_merge() case 4
This commit is contained in:
Linus Torvalds 2023-03-04 13:32:50 -08:00
commit 20fdfd55ab
19 changed files with 147 additions and 82 deletions

View file

@ -121,6 +121,7 @@ Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@gmail.com>
Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@imgtec.com>
Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@mips.com>
<dev.kurt@vandijck-laurijssen.be> <kurt.van.dijck@eia.be>
Dikshita Agarwal <dikshita@qti.qualcomm.com> <dikshita@codeaurora.org>
Dmitry Baryshkov <dbaryshkov@gmail.com>
Dmitry Baryshkov <dbaryshkov@gmail.com> <[dbaryshkov@gmail.com]>
Dmitry Baryshkov <dbaryshkov@gmail.com> <dmitry_baryshkov@mentor.com>
@ -150,6 +151,7 @@ Gao Xiang <xiang@kernel.org> <gaoxiang25@huawei.com>
Gao Xiang <xiang@kernel.org> <hsiangkao@aol.com>
Gao Xiang <xiang@kernel.org> <hsiangkao@linux.alibaba.com>
Gao Xiang <xiang@kernel.org> <hsiangkao@redhat.com>
Georgi Djakov <djakov@kernel.org> <georgi.djakov@linaro.org>
Gerald Schaefer <gerald.schaefer@linux.ibm.com> <geraldsc@de.ibm.com>
Gerald Schaefer <gerald.schaefer@linux.ibm.com> <gerald.schaefer@de.ibm.com>
Gerald Schaefer <gerald.schaefer@linux.ibm.com> <geraldsc@linux.vnet.ibm.com>
@ -441,6 +443,7 @@ Vasily Averin <vasily.averin@linux.dev> <vvs@openvz.org>
Vasily Averin <vasily.averin@linux.dev> <vvs@parallels.com>
Vasily Averin <vasily.averin@linux.dev> <vvs@sw.ru>
Valentin Schneider <vschneid@redhat.com> <valentin.schneider@arm.com>
Vikash Garodia <quic_vgarodia@quicinc.com> <vgarodia@codeaurora.org>
Vinod Koul <vkoul@kernel.org> <vinod.koul@intel.com>
Vinod Koul <vkoul@kernel.org> <vinod.koul@linux.intel.com>
Vinod Koul <vkoul@kernel.org> <vkoul@infradead.org>

View file

@ -85,25 +85,6 @@ char *strcpy(char *dest, const char *src);
char *strcat(char *dest, const char *src);
int strcmp(const char *cs, const char *ct);
#if (defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__))
/*
* For files that not instrumented (e.g. mm/slub.c) we
* should use not instrumented version of mem* functions.
*/
#undef memcpy
#define memcpy(dst, src, len) __memcpy(dst, src, len)
#undef memmove
#define memmove(dst, src, len) __memmove(dst, src, len)
#undef memset
#define memset(s, c, n) __memset(s, c, n)
#ifndef __NO_FORTIFY
#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
#endif
#endif
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
#define __HAVE_ARCH_MEMCPY_FLUSHCACHE 1
void __memcpy_flushcache(void *dst, const void *src, size_t cnt);

View file

@ -183,7 +183,7 @@ static void *cramfs_blkdev_read(struct super_block *sb, unsigned int offset,
unsigned int len)
{
struct address_space *mapping = sb->s_bdev->bd_inode->i_mapping;
struct file_ra_state ra;
struct file_ra_state ra = {};
struct page *pages[BLKS_PER_BUF];
unsigned i, blocknr, buffer;
unsigned long devsize;

View file

@ -295,11 +295,11 @@ static void hfsplus_put_super(struct super_block *sb)
hfsplus_sync_fs(sb, 1);
}
iput(sbi->alloc_file);
iput(sbi->hidden_dir);
hfs_btree_close(sbi->attr_tree);
hfs_btree_close(sbi->cat_tree);
hfs_btree_close(sbi->ext_tree);
iput(sbi->alloc_file);
iput(sbi->hidden_dir);
kfree(sbi->s_vhdr_buf);
kfree(sbi->s_backup_vhdr_buf);
unload_nls(sbi->nls);

View file

@ -105,14 +105,6 @@ static int __ocfs2_move_extent(handle_t *handle,
*/
replace_rec.e_flags = ext_flags & ~OCFS2_EXT_REFCOUNTED;
ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode),
context->et.et_root_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_split_extent(handle, &context->et, path, index,
&replace_rec, context->meta_ac,
&context->dealloc);
@ -121,8 +113,6 @@ static int __ocfs2_move_extent(handle_t *handle,
goto out;
}
ocfs2_journal_dirty(handle, context->et.et_root_bh);
context->new_phys_cpos = new_p_cpos;
/*
@ -444,7 +434,7 @@ static int ocfs2_find_victim_alloc_group(struct inode *inode,
bg = (struct ocfs2_group_desc *)gd_bh->b_data;
if (vict_blkno < (le64_to_cpu(bg->bg_blkno) +
le16_to_cpu(bg->bg_bits))) {
(le16_to_cpu(bg->bg_bits) << bits_per_unit))) {
*ret_bh = gd_bh;
*vict_bit = (vict_blkno - blkno) >>
@ -559,6 +549,7 @@ static void ocfs2_probe_alloc_group(struct inode *inode, struct buffer_head *bh,
last_free_bits++;
if (last_free_bits == move_len) {
i -= move_len;
*goal_bit = i;
*phys_cpos = base_cpos + i;
break;
@ -1030,18 +1021,19 @@ int ocfs2_ioctl_move_extents(struct file *filp, void __user *argp)
context->range = &range;
/*
* ok, the default theshold for the defragmentation
* is 1M, since our maximum clustersize was 1M also.
* any thought?
*/
if (!range.me_threshold)
range.me_threshold = 1024 * 1024;
if (range.me_threshold > i_size_read(inode))
range.me_threshold = i_size_read(inode);
if (range.me_flags & OCFS2_MOVE_EXT_FL_AUTO_DEFRAG) {
context->auto_defrag = 1;
/*
* ok, the default theshold for the defragmentation
* is 1M, since our maximum clustersize was 1M also.
* any thought?
*/
if (!range.me_threshold)
range.me_threshold = 1024 * 1024;
if (range.me_threshold > i_size_read(inode))
range.me_threshold = i_size_read(inode);
if (range.me_flags & OCFS2_MOVE_EXT_FL_PART_DEFRAG)
context->partial = 1;

View file

@ -94,7 +94,7 @@ enum ttu_flags {
TTU_SPLIT_HUGE_PMD = 0x4, /* split huge PMD if any */
TTU_IGNORE_MLOCK = 0x8, /* ignore mlock */
TTU_SYNC = 0x10, /* avoid racy checks with PVMW_SYNC */
TTU_IGNORE_HWPOISON = 0x20, /* corrupted page is recoverable */
TTU_HWPOISON = 0x20, /* do convert pte to hwpoison entry */
TTU_BATCH_FLUSH = 0x40, /* Batch TLB flushes where possible
* and caller guarantees they will
* do a final flush if necessary */

View file

@ -212,9 +212,6 @@ static void panic_print_sys_info(bool console_flush)
return;
}
if (panic_print & PANIC_PRINT_ALL_CPU_BT)
trigger_all_cpu_backtrace();
if (panic_print & PANIC_PRINT_TASK_INFO)
show_state();
@ -244,6 +241,30 @@ void check_panic_on_warn(const char *origin)
origin, limit);
}
/*
* Helper that triggers the NMI backtrace (if set in panic_print)
* and then performs the secondary CPUs shutdown - we cannot have
* the NMI backtrace after the CPUs are off!
*/
static void panic_other_cpus_shutdown(bool crash_kexec)
{
if (panic_print & PANIC_PRINT_ALL_CPU_BT)
trigger_all_cpu_backtrace();
/*
* Note that smp_send_stop() is the usual SMP shutdown function,
* which unfortunately may not be hardened to work in a panic
* situation. If we want to do crash dump after notifier calls
* and kmsg_dump, we will need architecture dependent extra
* bits in addition to stopping other CPUs, hence we rely on
* crash_smp_send_stop() for that.
*/
if (!crash_kexec)
smp_send_stop();
else
crash_smp_send_stop();
}
/**
* panic - halt the system
* @fmt: The text string to print
@ -334,23 +355,10 @@ void panic(const char *fmt, ...)
*
* Bypass the panic_cpu check and call __crash_kexec directly.
*/
if (!_crash_kexec_post_notifiers) {
if (!_crash_kexec_post_notifiers)
__crash_kexec(NULL);
/*
* Note smp_send_stop is the usual smp shutdown function, which
* unfortunately means it may not be hardened to work in a
* panic situation.
*/
smp_send_stop();
} else {
/*
* If we want to do crash dump after notifier calls and
* kmsg_dump, we will need architecture dependent extra
* works in addition to stopping other CPUs.
*/
crash_smp_send_stop();
}
panic_other_cpus_shutdown(_crash_kexec_post_notifiers);
/*
* Run any panic handlers, including those that might need to

View file

@ -49,6 +49,15 @@ menuconfig KASAN
if KASAN
config CC_HAS_KASAN_MEMINTRINSIC_PREFIX
def_bool (CC_IS_CLANG && $(cc-option,-fsanitize=kernel-address -mllvm -asan-kernel-mem-intrinsic-prefix=1)) || \
(CC_IS_GCC && $(cc-option,-fsanitize=kernel-address --param asan-kernel-mem-intrinsic-prefix=1))
# Don't define it if we don't need it: compilation of the test uses
# this variable to decide how the compiler should treat builtins.
depends on !KASAN_HW_TAGS
help
The compiler is able to prefix memintrinsics with __asan or __hwasan.
choice
prompt "KASAN mode"
default KASAN_GENERIC

View file

@ -133,7 +133,7 @@ EXPORT_SYMBOL(match_token);
* as a number in that base.
*
* Return: On success, sets @result to the integer represented by the
* string and returns 0. Returns -ENOMEM, -EINVAL, or -ERANGE on failure.
* string and returns 0. Returns -EINVAL or -ERANGE on failure.
*/
static int match_number(substring_t *s, int *result, int base)
{
@ -165,7 +165,7 @@ static int match_number(substring_t *s, int *result, int base)
* as a number in that base.
*
* Return: On success, sets @result to the integer represented by the
* string and returns 0. Returns -ENOMEM, -EINVAL, or -ERANGE on failure.
* string and returns 0. Returns -EINVAL or -ERANGE on failure.
*/
static int match_u64int(substring_t *s, u64 *result, int base)
{
@ -189,7 +189,7 @@ static int match_u64int(substring_t *s, u64 *result, int base)
* Description: Attempts to parse the &substring_t @s as a decimal integer.
*
* Return: On success, sets @result to the integer represented by the string
* and returns 0. Returns -ENOMEM, -EINVAL, or -ERANGE on failure.
* and returns 0. Returns -EINVAL or -ERANGE on failure.
*/
int match_int(substring_t *s, int *result)
{
@ -205,7 +205,7 @@ EXPORT_SYMBOL(match_int);
* Description: Attempts to parse the &substring_t @s as a decimal integer.
*
* Return: On success, sets @result to the integer represented by the string
* and returns 0. Returns -ENOMEM, -EINVAL, or -ERANGE on failure.
* and returns 0. Returns -EINVAL or -ERANGE on failure.
*/
int match_uint(substring_t *s, unsigned int *result)
{
@ -228,7 +228,7 @@ EXPORT_SYMBOL(match_uint);
* integer.
*
* Return: On success, sets @result to the integer represented by the string
* and returns 0. Returns -ENOMEM, -EINVAL, or -ERANGE on failure.
* and returns 0. Returns -EINVAL or -ERANGE on failure.
*/
int match_u64(substring_t *s, u64 *result)
{
@ -244,7 +244,7 @@ EXPORT_SYMBOL(match_u64);
* Description: Attempts to parse the &substring_t @s as an octal integer.
*
* Return: On success, sets @result to the integer represented by the string
* and returns 0. Returns -ENOMEM, -EINVAL, or -ERANGE on failure.
* and returns 0. Returns -EINVAL or -ERANGE on failure.
*/
int match_octal(substring_t *s, int *result)
{
@ -260,7 +260,7 @@ EXPORT_SYMBOL(match_octal);
* Description: Attempts to parse the &substring_t @s as a hexadecimal integer.
*
* Return: On success, sets @result to the integer represented by the string
* and returns 0. Returns -ENOMEM, -EINVAL, or -ERANGE on failure.
* and returns 0. Returns -EINVAL or -ERANGE on failure.
*/
int match_hex(substring_t *s, int *result)
{

View file

@ -420,9 +420,11 @@ static inline void flush_pending(
z_streamp strm
)
{
unsigned len;
deflate_state *s = (deflate_state *) strm->state;
unsigned len = s->pending;
bi_flush(s);
len = s->pending;
if (len > strm->avail_out) len = strm->avail_out;
if (len == 0) return;

View file

@ -250,12 +250,11 @@ static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s)
folio_put(folio);
continue;
}
if (folio_test_unevictable(folio)) {
if (folio_test_unevictable(folio))
folio_putback_lru(folio);
} else {
else
list_add(&folio->lru, &folio_list);
folio_put(folio);
}
folio_put(folio);
}
applied = reclaim_pages(&folio_list);
cond_resched();

View file

@ -35,7 +35,14 @@ CFLAGS_shadow.o := $(CC_FLAGS_KASAN_RUNTIME)
CFLAGS_hw_tags.o := $(CC_FLAGS_KASAN_RUNTIME)
CFLAGS_sw_tags.o := $(CC_FLAGS_KASAN_RUNTIME)
CFLAGS_KASAN_TEST := $(CFLAGS_KASAN) -fno-builtin $(call cc-disable-warning, vla)
CFLAGS_KASAN_TEST := $(CFLAGS_KASAN) $(call cc-disable-warning, vla)
ifndef CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX
# If compiler instruments memintrinsics by prefixing them with __asan/__hwasan,
# we need to treat them normally (as builtins), otherwise the compiler won't
# recognize them as instrumentable. If it doesn't instrument them, we need to
# pass -fno-builtin, so the compiler doesn't inline them.
CFLAGS_KASAN_TEST += -fno-builtin
endif
CFLAGS_kasan_test.o := $(CFLAGS_KASAN_TEST)
CFLAGS_kasan_test_module.o := $(CFLAGS_KASAN_TEST)

View file

@ -666,4 +666,8 @@ void __hwasan_storeN_noabort(unsigned long addr, size_t size);
void __hwasan_tag_memory(unsigned long addr, u8 tag, unsigned long size);
void *__hwasan_memset(void *addr, int c, size_t len);
void *__hwasan_memmove(void *dest, const void *src, size_t len);
void *__hwasan_memcpy(void *dest, const void *src, size_t len);
#endif /* __MM_KASAN_KASAN_H */

View file

@ -165,6 +165,15 @@ static void kasan_test_exit(struct kunit *test)
kunit_skip((test), "Test requires " #config "=n"); \
} while (0)
#define KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test) do { \
if (IS_ENABLED(CONFIG_KASAN_HW_TAGS)) \
break; /* No compiler instrumentation. */ \
if (IS_ENABLED(CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX)) \
break; /* Should always be instrumented! */ \
if (IS_ENABLED(CONFIG_GENERIC_ENTRY)) \
kunit_skip((test), "Test requires checked mem*()"); \
} while (0)
static void kmalloc_oob_right(struct kunit *test)
{
char *ptr;
@ -454,6 +463,8 @@ static void kmalloc_oob_16(struct kunit *test)
u64 words[2];
} *ptr1, *ptr2;
KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
/* This test is specifically crafted for the generic mode. */
KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
@ -476,6 +487,8 @@ static void kmalloc_uaf_16(struct kunit *test)
u64 words[2];
} *ptr1, *ptr2;
KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
ptr1 = kmalloc(sizeof(*ptr1), GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
@ -498,6 +511,8 @@ static void kmalloc_oob_memset_2(struct kunit *test)
char *ptr;
size_t size = 128 - KASAN_GRANULE_SIZE;
KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
@ -511,6 +526,8 @@ static void kmalloc_oob_memset_4(struct kunit *test)
char *ptr;
size_t size = 128 - KASAN_GRANULE_SIZE;
KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
@ -524,6 +541,8 @@ static void kmalloc_oob_memset_8(struct kunit *test)
char *ptr;
size_t size = 128 - KASAN_GRANULE_SIZE;
KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
@ -537,6 +556,8 @@ static void kmalloc_oob_memset_16(struct kunit *test)
char *ptr;
size_t size = 128 - KASAN_GRANULE_SIZE;
KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
@ -550,6 +571,8 @@ static void kmalloc_oob_in_memset(struct kunit *test)
char *ptr;
size_t size = 128 - KASAN_GRANULE_SIZE;
KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
@ -566,6 +589,8 @@ static void kmalloc_memmove_negative_size(struct kunit *test)
size_t size = 64;
size_t invalid_size = -2;
KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
/*
* Hardware tag-based mode doesn't check memmove for negative size.
* As a result, this test introduces a side-effect memory corruption,
@ -590,6 +615,8 @@ static void kmalloc_memmove_invalid_size(struct kunit *test)
size_t size = 64;
size_t invalid_size = size;
KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
@ -618,6 +645,8 @@ static void kmalloc_uaf_memset(struct kunit *test)
char *ptr;
size_t size = 33;
KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
/*
* Only generic KASAN uses quarantine, which is required to avoid a
* kernel memory corruption this test causes.

View file

@ -38,11 +38,14 @@ bool __kasan_check_write(const volatile void *p, unsigned int size)
}
EXPORT_SYMBOL(__kasan_check_write);
#ifndef CONFIG_GENERIC_ENTRY
#if !defined(CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX) && !defined(CONFIG_GENERIC_ENTRY)
/*
* CONFIG_GENERIC_ENTRY relies on compiler emitted mem*() calls to not be
* instrumented. KASAN enabled toolchains should emit __asan_mem*() functions
* for the sites they want to instrument.
*
* If we have a compiler that can instrument meminstrinsics, never override
* these, so that non-instrumented files can safely consider them as builtins.
*/
#undef memset
void *memset(void *addr, int c, size_t len)
@ -107,6 +110,17 @@ void *__asan_memcpy(void *dest, const void *src, size_t len)
}
EXPORT_SYMBOL(__asan_memcpy);
#ifdef CONFIG_KASAN_SW_TAGS
void *__hwasan_memset(void *addr, int c, size_t len) __alias(__asan_memset);
EXPORT_SYMBOL(__hwasan_memset);
#ifdef __HAVE_ARCH_MEMMOVE
void *__hwasan_memmove(void *dest, const void *src, size_t len) __alias(__asan_memmove);
EXPORT_SYMBOL(__hwasan_memmove);
#endif
void *__hwasan_memcpy(void *dest, const void *src, size_t len) __alias(__asan_memcpy);
EXPORT_SYMBOL(__hwasan_memcpy);
#endif
void kasan_poison(const void *addr, size_t size, u8 value, bool init)
{
void *shadow_start, *shadow_end;

View file

@ -1069,7 +1069,7 @@ static int me_pagecache_dirty(struct page_state *ps, struct page *p)
* cache and swap cache(ie. page is freshly swapped in). So it could be
* referenced concurrently by 2 types of PTEs:
* normal PTEs and swap PTEs. We try to handle them consistently by calling
* try_to_unmap(TTU_IGNORE_HWPOISON) to convert the normal PTEs to swap PTEs,
* try_to_unmap(!TTU_HWPOISON) to convert the normal PTEs to swap PTEs,
* and then
* - clear dirty bit to prevent IO
* - remove from LRU
@ -1486,7 +1486,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
int flags, struct page *hpage)
{
struct folio *folio = page_folio(hpage);
enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_SYNC;
enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_SYNC | TTU_HWPOISON;
struct address_space *mapping;
LIST_HEAD(tokill);
bool unmap_success;
@ -1516,7 +1516,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
if (PageSwapCache(p)) {
pr_err("%#lx: keeping poisoned page in swap cache\n", pfn);
ttu |= TTU_IGNORE_HWPOISON;
ttu &= ~TTU_HWPOISON;
}
/*
@ -1531,7 +1531,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
if (page_mkclean(hpage)) {
SetPageDirty(hpage);
} else {
ttu |= TTU_IGNORE_HWPOISON;
ttu &= ~TTU_HWPOISON;
pr_info("%#lx: corrupted page was clean: dropped without side effects\n",
pfn);
}

View file

@ -973,7 +973,7 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
vma_end = addr;
adjust = mid;
adj_next = -(vma->vm_end - addr);
err = dup_anon_vma(res, adjust);
err = dup_anon_vma(adjust, prev);
} else {
vma = next; /* case 3 */
vma_start = addr;

View file

@ -1602,7 +1602,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
/* Update high watermark before we lower rss */
update_hiwater_rss(mm);
if (PageHWPoison(subpage) && !(flags & TTU_IGNORE_HWPOISON)) {
if (PageHWPoison(subpage) && (flags & TTU_HWPOISON)) {
pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
if (folio_test_hugetlb(folio)) {
hugetlb_count_sub(folio_nr_pages(folio), mm);

View file

@ -1,5 +1,14 @@
# SPDX-License-Identifier: GPL-2.0
ifdef CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX
# Safe for compiler to generate meminstrinsic calls in uninstrumented files.
CFLAGS_KASAN_NOSANITIZE :=
else
# Don't let compiler generate memintrinsic calls in uninstrumented files
# because they are instrumented.
CFLAGS_KASAN_NOSANITIZE := -fno-builtin
endif
KASAN_SHADOW_OFFSET ?= $(CONFIG_KASAN_SHADOW_OFFSET)
cc-param = $(call cc-option, -mllvm -$(1), $(call cc-option, --param $(1)))
@ -38,6 +47,11 @@ endif
CFLAGS_KASAN += $(call cc-param,asan-stack=$(stack_enable))
# Instrument memcpy/memset/memmove calls by using instrumented __asan_mem*()
# instead. With compilers that don't support this option, compiler-inserted
# memintrinsics won't be checked by KASAN on GENERIC_ENTRY architectures.
CFLAGS_KASAN += $(call cc-param,asan-kernel-mem-intrinsic-prefix=1)
endif # CONFIG_KASAN_GENERIC
ifdef CONFIG_KASAN_SW_TAGS
@ -54,6 +68,9 @@ CFLAGS_KASAN := -fsanitize=kernel-hwaddress \
$(call cc-param,hwasan-inline-all-checks=0) \
$(instrumentation_flags)
# Instrument memcpy/memset/memmove calls by using instrumented __hwasan_mem*().
CFLAGS_KASAN += $(call cc-param,hwasan-kernel-mem-intrinsic-prefix=1)
endif # CONFIG_KASAN_SW_TAGS
export CFLAGS_KASAN CFLAGS_KASAN_NOSANITIZE