mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-09-27 12:57:53 +00:00
726ccdba15
syzbot is reporting lockdep warning in __stack_depot_save(), for the caller of __stack_depot_save() (i.e. __kasan_record_aux_stack() in this report) is responsible for masking __GFP_KSWAPD_RECLAIM flag in order not to wake kswapd which in turn wakes kcompactd. Since kasan/kmsan functions might be called with arbitrary locks held, mask __GFP_KSWAPD_RECLAIM flag from all GFP_NOWAIT/GFP_ATOMIC allocations in kasan/kmsan. Note that kmsan_save_stack_with_flags() is changed to mask both __GFP_DIRECT_RECLAIM flag and __GFP_KSWAPD_RECLAIM flag, for wakeup_kswapd() from wake_all_kswapds() from __alloc_pages_slowpath() calls wakeup_kcompactd() if __GFP_KSWAPD_RECLAIM flag is set and __GFP_DIRECT_RECLAIM flag is not set. Link: https://lkml.kernel.org/r/656cb4f5-998b-c8d7-3c61-c2d37aa90f9a@I-love.SAKURA.ne.jp Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp> Reported-by: syzbot <syzbot+ece2915262061d6e0ac1@syzkaller.appspotmail.com> Closes: https://syzkaller.appspot.com/bug?extid=ece2915262061d6e0ac1 Reviewed-by: "Huang, Ying" <ying.huang@intel.com> Reviewed-by: Alexander Potapenko <glider@google.com> Cc: Andrey Konovalov <andreyknvl@gmail.com> Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Marco Elver <elver@google.com> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Vincenzo Frascino <vincenzo.frascino@arm.com> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
144 lines
3.6 KiB
C
144 lines
3.6 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* This file contains common tag-based KASAN code.
|
|
*
|
|
* Copyright (c) 2018 Google, Inc.
|
|
* Copyright (c) 2020 Google, Inc.
|
|
*/
|
|
|
|
#include <linux/atomic.h>
|
|
#include <linux/init.h>
|
|
#include <linux/kasan.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/memblock.h>
|
|
#include <linux/memory.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/static_key.h>
|
|
#include <linux/string.h>
|
|
#include <linux/types.h>
|
|
|
|
#include "kasan.h"
|
|
#include "../slab.h"
|
|
|
|
#define KASAN_STACK_RING_SIZE_DEFAULT (32 << 10)
|
|
|
|
enum kasan_arg_stacktrace {
|
|
KASAN_ARG_STACKTRACE_DEFAULT,
|
|
KASAN_ARG_STACKTRACE_OFF,
|
|
KASAN_ARG_STACKTRACE_ON,
|
|
};
|
|
|
|
static enum kasan_arg_stacktrace kasan_arg_stacktrace __initdata;
|
|
|
|
/* Whether to collect alloc/free stack traces. */
|
|
DEFINE_STATIC_KEY_TRUE(kasan_flag_stacktrace);
|
|
|
|
/* Non-zero, as initial pointer values are 0. */
|
|
#define STACK_RING_BUSY_PTR ((void *)1)
|
|
|
|
struct kasan_stack_ring stack_ring = {
|
|
.lock = __RW_LOCK_UNLOCKED(stack_ring.lock)
|
|
};
|
|
|
|
/* kasan.stacktrace=off/on */
|
|
static int __init early_kasan_flag_stacktrace(char *arg)
|
|
{
|
|
if (!arg)
|
|
return -EINVAL;
|
|
|
|
if (!strcmp(arg, "off"))
|
|
kasan_arg_stacktrace = KASAN_ARG_STACKTRACE_OFF;
|
|
else if (!strcmp(arg, "on"))
|
|
kasan_arg_stacktrace = KASAN_ARG_STACKTRACE_ON;
|
|
else
|
|
return -EINVAL;
|
|
|
|
return 0;
|
|
}
|
|
early_param("kasan.stacktrace", early_kasan_flag_stacktrace);
|
|
|
|
/* kasan.stack_ring_size=<number of entries> */
|
|
static int __init early_kasan_flag_stack_ring_size(char *arg)
|
|
{
|
|
if (!arg)
|
|
return -EINVAL;
|
|
|
|
return kstrtoul(arg, 0, &stack_ring.size);
|
|
}
|
|
early_param("kasan.stack_ring_size", early_kasan_flag_stack_ring_size);
|
|
|
|
void __init kasan_init_tags(void)
|
|
{
|
|
switch (kasan_arg_stacktrace) {
|
|
case KASAN_ARG_STACKTRACE_DEFAULT:
|
|
/* Default is specified by kasan_flag_stacktrace definition. */
|
|
break;
|
|
case KASAN_ARG_STACKTRACE_OFF:
|
|
static_branch_disable(&kasan_flag_stacktrace);
|
|
break;
|
|
case KASAN_ARG_STACKTRACE_ON:
|
|
static_branch_enable(&kasan_flag_stacktrace);
|
|
break;
|
|
}
|
|
|
|
if (kasan_stack_collection_enabled()) {
|
|
if (!stack_ring.size)
|
|
stack_ring.size = KASAN_STACK_RING_SIZE_DEFAULT;
|
|
stack_ring.entries = memblock_alloc(
|
|
sizeof(stack_ring.entries[0]) * stack_ring.size,
|
|
SMP_CACHE_BYTES);
|
|
if (WARN_ON(!stack_ring.entries))
|
|
static_branch_disable(&kasan_flag_stacktrace);
|
|
}
|
|
}
|
|
|
|
static void save_stack_info(struct kmem_cache *cache, void *object,
|
|
gfp_t gfp_flags, bool is_free)
|
|
{
|
|
unsigned long flags;
|
|
depot_stack_handle_t stack;
|
|
u64 pos;
|
|
struct kasan_stack_ring_entry *entry;
|
|
void *old_ptr;
|
|
|
|
stack = kasan_save_stack(gfp_flags, true);
|
|
|
|
/*
|
|
* Prevent save_stack_info() from modifying stack ring
|
|
* when kasan_complete_mode_report_info() is walking it.
|
|
*/
|
|
read_lock_irqsave(&stack_ring.lock, flags);
|
|
|
|
next:
|
|
pos = atomic64_fetch_add(1, &stack_ring.pos);
|
|
entry = &stack_ring.entries[pos % stack_ring.size];
|
|
|
|
/* Detect stack ring entry slots that are being written to. */
|
|
old_ptr = READ_ONCE(entry->ptr);
|
|
if (old_ptr == STACK_RING_BUSY_PTR)
|
|
goto next; /* Busy slot. */
|
|
if (!try_cmpxchg(&entry->ptr, &old_ptr, STACK_RING_BUSY_PTR))
|
|
goto next; /* Busy slot. */
|
|
|
|
WRITE_ONCE(entry->size, cache->object_size);
|
|
WRITE_ONCE(entry->pid, current->pid);
|
|
WRITE_ONCE(entry->stack, stack);
|
|
WRITE_ONCE(entry->is_free, is_free);
|
|
|
|
/*
|
|
* Paired with smp_load_acquire() in kasan_complete_mode_report_info().
|
|
*/
|
|
smp_store_release(&entry->ptr, (s64)object);
|
|
|
|
read_unlock_irqrestore(&stack_ring.lock, flags);
|
|
}
|
|
|
|
void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
|
|
{
|
|
save_stack_info(cache, object, flags, false);
|
|
}
|
|
|
|
void kasan_save_free_info(struct kmem_cache *cache, void *object)
|
|
{
|
|
save_stack_info(cache, object, 0, true);
|
|
}
|