2021-06-29 02:40:55 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
/*
|
|
|
|
* This file contains common tag-based KASAN code.
|
|
|
|
*
|
|
|
|
* Copyright (c) 2018 Google, Inc.
|
|
|
|
* Copyright (c) 2020 Google, Inc.
|
|
|
|
*/
|
|
|
|
|
2022-09-05 21:05:45 +00:00
|
|
|
#include <linux/atomic.h>
|
2021-06-29 02:40:55 +00:00
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/kasan.h>
|
|
|
|
#include <linux/kernel.h>
|
2022-09-05 21:05:47 +00:00
|
|
|
#include <linux/memblock.h>
|
2021-06-29 02:40:55 +00:00
|
|
|
#include <linux/memory.h>
|
|
|
|
#include <linux/mm.h>
|
kasan: record and report more information
Record and report more information to help us find the cause of the bug
and to help us correlate the error with other system events.
This patch adds recording and showing CPU number and timestamp at
allocation and free (controlled by CONFIG_KASAN_EXTRA_INFO). The
timestamps in the report use the same format and source as printk.
Error occurrence timestamp is already implicit in the printk log, and CPU
number is already shown by dump_stack_lvl, so there is no need to add it.
In order to record CPU number and timestamp at allocation and free,
corresponding members need to be added to the relevant data structures,
which will lead to increased memory consumption.
In Generic KASAN, members are added to struct kasan_track. Since in most
cases, alloc meta is stored in the redzone and free meta is stored in the
object or the redzone, memory consumption will not increase much.
In SW_TAGS KASAN and HW_TAGS KASAN, members are added to struct
kasan_stack_ring_entry. Memory consumption increases as the size of
struct kasan_stack_ring_entry increases (this part of the memory is
allocated by memblock), but since this is configurable, it is up to the
user to choose.
Link: https://lkml.kernel.org/r/VI1P193MB0752BD991325D10E4AB1913599BDA@VI1P193MB0752.EURP193.PROD.OUTLOOK.COM
Signed-off-by: Juntong Deng <juntong.deng@outlook.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Konovalov <andreyknvl@gmail.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2023-11-27 21:17:31 +00:00
|
|
|
#include <linux/sched/clock.h>
|
2023-11-20 17:47:13 +00:00
|
|
|
#include <linux/stackdepot.h>
|
2021-06-29 02:40:55 +00:00
|
|
|
#include <linux/static_key.h>
|
|
|
|
#include <linux/string.h>
|
|
|
|
#include <linux/types.h>
|
|
|
|
|
|
|
|
#include "kasan.h"
|
2022-09-05 21:05:45 +00:00
|
|
|
#include "../slab.h"
|
|
|
|
|
2022-09-05 21:05:47 +00:00
|
|
|
#define KASAN_STACK_RING_SIZE_DEFAULT (32 << 10)
|
|
|
|
|
2022-09-05 21:05:46 +00:00
|
|
|
enum kasan_arg_stacktrace {
|
|
|
|
KASAN_ARG_STACKTRACE_DEFAULT,
|
|
|
|
KASAN_ARG_STACKTRACE_OFF,
|
|
|
|
KASAN_ARG_STACKTRACE_ON,
|
|
|
|
};
|
|
|
|
|
|
|
|
static enum kasan_arg_stacktrace kasan_arg_stacktrace __initdata;
|
|
|
|
|
|
|
|
/* Whether to collect alloc/free stack traces. */
|
|
|
|
DEFINE_STATIC_KEY_TRUE(kasan_flag_stacktrace);
|
|
|
|
|
2022-09-05 21:05:45 +00:00
|
|
|
/* Non-zero, as initial pointer values are 0. */
|
|
|
|
#define STACK_RING_BUSY_PTR ((void *)1)
|
|
|
|
|
|
|
|
struct kasan_stack_ring stack_ring = {
|
|
|
|
.lock = __RW_LOCK_UNLOCKED(stack_ring.lock)
|
|
|
|
};
|
|
|
|
|
2022-09-05 21:05:46 +00:00
|
|
|
/* kasan.stacktrace=off/on */
|
|
|
|
static int __init early_kasan_flag_stacktrace(char *arg)
|
|
|
|
{
|
|
|
|
if (!arg)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!strcmp(arg, "off"))
|
|
|
|
kasan_arg_stacktrace = KASAN_ARG_STACKTRACE_OFF;
|
|
|
|
else if (!strcmp(arg, "on"))
|
|
|
|
kasan_arg_stacktrace = KASAN_ARG_STACKTRACE_ON;
|
|
|
|
else
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
early_param("kasan.stacktrace", early_kasan_flag_stacktrace);
|
|
|
|
|
2022-09-05 21:05:47 +00:00
|
|
|
/* kasan.stack_ring_size=<number of entries> */
|
|
|
|
static int __init early_kasan_flag_stack_ring_size(char *arg)
|
|
|
|
{
|
|
|
|
if (!arg)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return kstrtoul(arg, 0, &stack_ring.size);
|
|
|
|
}
|
|
|
|
early_param("kasan.stack_ring_size", early_kasan_flag_stack_ring_size);
|
|
|
|
|
2022-09-05 21:05:46 +00:00
|
|
|
void __init kasan_init_tags(void)
|
|
|
|
{
|
|
|
|
switch (kasan_arg_stacktrace) {
|
|
|
|
case KASAN_ARG_STACKTRACE_DEFAULT:
|
|
|
|
/* Default is specified by kasan_flag_stacktrace definition. */
|
|
|
|
break;
|
|
|
|
case KASAN_ARG_STACKTRACE_OFF:
|
|
|
|
static_branch_disable(&kasan_flag_stacktrace);
|
|
|
|
break;
|
|
|
|
case KASAN_ARG_STACKTRACE_ON:
|
|
|
|
static_branch_enable(&kasan_flag_stacktrace);
|
|
|
|
break;
|
|
|
|
}
|
2022-09-05 21:05:47 +00:00
|
|
|
|
|
|
|
if (kasan_stack_collection_enabled()) {
|
|
|
|
if (!stack_ring.size)
|
|
|
|
stack_ring.size = KASAN_STACK_RING_SIZE_DEFAULT;
|
|
|
|
stack_ring.entries = memblock_alloc(
|
|
|
|
sizeof(stack_ring.entries[0]) * stack_ring.size,
|
|
|
|
SMP_CACHE_BYTES);
|
|
|
|
if (WARN_ON(!stack_ring.entries))
|
|
|
|
static_branch_disable(&kasan_flag_stacktrace);
|
|
|
|
}
|
2022-09-05 21:05:46 +00:00
|
|
|
}
|
|
|
|
|
2022-09-05 21:05:45 +00:00
|
|
|
static void save_stack_info(struct kmem_cache *cache, void *object,
|
|
|
|
gfp_t gfp_flags, bool is_free)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
2023-11-20 17:47:18 +00:00
|
|
|
depot_stack_handle_t stack, old_stack;
|
2022-09-05 21:05:45 +00:00
|
|
|
u64 pos;
|
|
|
|
struct kasan_stack_ring_entry *entry;
|
|
|
|
void *old_ptr;
|
|
|
|
|
2023-11-20 17:47:18 +00:00
|
|
|
stack = kasan_save_stack(gfp_flags,
|
|
|
|
STACK_DEPOT_FLAG_CAN_ALLOC | STACK_DEPOT_FLAG_GET);
|
2022-09-05 21:05:45 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Prevent save_stack_info() from modifying stack ring
|
|
|
|
* when kasan_complete_mode_report_info() is walking it.
|
|
|
|
*/
|
|
|
|
read_lock_irqsave(&stack_ring.lock, flags);
|
|
|
|
|
|
|
|
next:
|
|
|
|
pos = atomic64_fetch_add(1, &stack_ring.pos);
|
2022-09-05 21:05:47 +00:00
|
|
|
entry = &stack_ring.entries[pos % stack_ring.size];
|
2022-09-05 21:05:45 +00:00
|
|
|
|
|
|
|
/* Detect stack ring entry slots that are being written to. */
|
|
|
|
old_ptr = READ_ONCE(entry->ptr);
|
|
|
|
if (old_ptr == STACK_RING_BUSY_PTR)
|
|
|
|
goto next; /* Busy slot. */
|
|
|
|
if (!try_cmpxchg(&entry->ptr, &old_ptr, STACK_RING_BUSY_PTR))
|
|
|
|
goto next; /* Busy slot. */
|
|
|
|
|
2023-12-21 18:35:38 +00:00
|
|
|
old_stack = entry->track.stack;
|
2023-11-20 17:47:18 +00:00
|
|
|
|
2023-11-20 17:47:16 +00:00
|
|
|
entry->size = cache->object_size;
|
2023-12-21 18:35:39 +00:00
|
|
|
kasan_set_track(&entry->track, stack);
|
2023-12-21 18:35:38 +00:00
|
|
|
entry->is_free = is_free;
|
2022-09-05 21:05:45 +00:00
|
|
|
|
2023-11-20 17:47:16 +00:00
|
|
|
entry->ptr = object;
|
2022-09-05 21:05:45 +00:00
|
|
|
|
|
|
|
read_unlock_irqrestore(&stack_ring.lock, flags);
|
2023-11-20 17:47:18 +00:00
|
|
|
|
|
|
|
if (old_stack)
|
|
|
|
stack_depot_put(old_stack);
|
2022-09-05 21:05:45 +00:00
|
|
|
}
|
2021-06-29 02:40:55 +00:00
|
|
|
|
2022-09-05 21:05:19 +00:00
|
|
|
void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
|
|
|
|
{
|
2022-09-05 21:05:45 +00:00
|
|
|
save_stack_info(cache, object, flags, false);
|
2022-09-05 21:05:19 +00:00
|
|
|
}
|
|
|
|
|
2022-09-05 21:05:34 +00:00
|
|
|
void kasan_save_free_info(struct kmem_cache *cache, void *object)
|
2021-06-29 02:40:55 +00:00
|
|
|
{
|
2023-05-27 15:25:31 +00:00
|
|
|
save_stack_info(cache, object, 0, true);
|
2021-06-29 02:40:55 +00:00
|
|
|
}
|