mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-09-12 13:55:32 +00:00
e4b7818b9a
Move free track from kasan_alloc_meta to kasan_free_meta in order to make struct kasan_alloc_meta and kasan_free_meta size are both 16 bytes. It is a good size because it is the minimal redzone size and a good number of alignment. For free track, we make some modifications as shown below: 1) Remove the free_track from struct kasan_alloc_meta. 2) Add the free_track into struct kasan_free_meta. 3) Add a macro KASAN_KMALLOC_FREETRACK in order to check whether it can print free stack in KASAN report. [1]https://bugzilla.kernel.org/show_bug.cgi?id=198437 [walter-zh.wu@mediatek.com: build fix] Link: http://lkml.kernel.org/r/20200710162440.23887-1-walter-zh.wu@mediatek.com Suggested-by: Dmitry Vyukov <dvyukov@google.com> Co-developed-by: Dmitry Vyukov <dvyukov@google.com> Signed-off-by: Walter Wu <walter-zh.wu@mediatek.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Tested-by: Dmitry Vyukov <dvyukov@google.com> Reviewed-by: Dmitry Vyukov <dvyukov@google.com> Reviewed-by: Andrey Konovalov <andreyknvl@google.com> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: Alexander Potapenko <glider@google.com> Cc: Joel Fernandes <joel@joelfernandes.org> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Josh Triplett <josh@joshtriplett.org> Cc: Lai Jiangshan <jiangshanlai@gmail.com> Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Cc: Matthias Brugger <matthias.bgg@gmail.com> Cc: "Paul E . McKenney" <paulmck@kernel.org> Link: http://lkml.kernel.org/r/20200601051022.1230-1-walter-zh.wu@mediatek.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
200 lines
5.6 KiB
C
200 lines
5.6 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* This file contains core tag-based KASAN code.
|
|
*
|
|
* Copyright (c) 2018 Google, Inc.
|
|
* Author: Andrey Konovalov <andreyknvl@google.com>
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*
|
|
*/
|
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
|
#include <linux/export.h>
|
|
#include <linux/interrupt.h>
|
|
#include <linux/init.h>
|
|
#include <linux/kasan.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/kmemleak.h>
|
|
#include <linux/linkage.h>
|
|
#include <linux/memblock.h>
|
|
#include <linux/memory.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/module.h>
|
|
#include <linux/printk.h>
|
|
#include <linux/random.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/sched/task_stack.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/stacktrace.h>
|
|
#include <linux/string.h>
|
|
#include <linux/types.h>
|
|
#include <linux/vmalloc.h>
|
|
#include <linux/bug.h>
|
|
|
|
#include "kasan.h"
|
|
#include "../slab.h"
|
|
|
|
static DEFINE_PER_CPU(u32, prng_state);
|
|
|
|
void kasan_init_tags(void)
|
|
{
|
|
int cpu;
|
|
|
|
for_each_possible_cpu(cpu)
|
|
per_cpu(prng_state, cpu) = (u32)get_cycles();
|
|
}
|
|
|
|
/*
|
|
* If a preemption happens between this_cpu_read and this_cpu_write, the only
|
|
* side effect is that we'll give a few allocated in different contexts objects
|
|
* the same tag. Since tag-based KASAN is meant to be used a probabilistic
|
|
* bug-detection debug feature, this doesn't have significant negative impact.
|
|
*
|
|
* Ideally the tags use strong randomness to prevent any attempts to predict
|
|
* them during explicit exploit attempts. But strong randomness is expensive,
|
|
* and we did an intentional trade-off to use a PRNG. This non-atomic RMW
|
|
* sequence has in fact positive effect, since interrupts that randomly skew
|
|
* PRNG at unpredictable points do only good.
|
|
*/
|
|
u8 random_tag(void)
|
|
{
|
|
u32 state = this_cpu_read(prng_state);
|
|
|
|
state = 1664525 * state + 1013904223;
|
|
this_cpu_write(prng_state, state);
|
|
|
|
return (u8)(state % (KASAN_TAG_MAX + 1));
|
|
}
|
|
|
|
void *kasan_reset_tag(const void *addr)
|
|
{
|
|
return reset_tag(addr);
|
|
}
|
|
|
|
bool check_memory_region(unsigned long addr, size_t size, bool write,
|
|
unsigned long ret_ip)
|
|
{
|
|
u8 tag;
|
|
u8 *shadow_first, *shadow_last, *shadow;
|
|
void *untagged_addr;
|
|
|
|
if (unlikely(size == 0))
|
|
return true;
|
|
|
|
if (unlikely(addr + size < addr))
|
|
return !kasan_report(addr, size, write, ret_ip);
|
|
|
|
tag = get_tag((const void *)addr);
|
|
|
|
/*
|
|
* Ignore accesses for pointers tagged with 0xff (native kernel
|
|
* pointer tag) to suppress false positives caused by kmap.
|
|
*
|
|
* Some kernel code was written to account for archs that don't keep
|
|
* high memory mapped all the time, but rather map and unmap particular
|
|
* pages when needed. Instead of storing a pointer to the kernel memory,
|
|
* this code saves the address of the page structure and offset within
|
|
* that page for later use. Those pages are then mapped and unmapped
|
|
* with kmap/kunmap when necessary and virt_to_page is used to get the
|
|
* virtual address of the page. For arm64 (that keeps the high memory
|
|
* mapped all the time), kmap is turned into a page_address call.
|
|
|
|
* The issue is that with use of the page_address + virt_to_page
|
|
* sequence the top byte value of the original pointer gets lost (gets
|
|
* set to KASAN_TAG_KERNEL (0xFF)).
|
|
*/
|
|
if (tag == KASAN_TAG_KERNEL)
|
|
return true;
|
|
|
|
untagged_addr = reset_tag((const void *)addr);
|
|
if (unlikely(untagged_addr <
|
|
kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
|
|
return !kasan_report(addr, size, write, ret_ip);
|
|
}
|
|
shadow_first = kasan_mem_to_shadow(untagged_addr);
|
|
shadow_last = kasan_mem_to_shadow(untagged_addr + size - 1);
|
|
for (shadow = shadow_first; shadow <= shadow_last; shadow++) {
|
|
if (*shadow != tag) {
|
|
return !kasan_report(addr, size, write, ret_ip);
|
|
}
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
#define DEFINE_HWASAN_LOAD_STORE(size) \
|
|
void __hwasan_load##size##_noabort(unsigned long addr) \
|
|
{ \
|
|
check_memory_region(addr, size, false, _RET_IP_); \
|
|
} \
|
|
EXPORT_SYMBOL(__hwasan_load##size##_noabort); \
|
|
void __hwasan_store##size##_noabort(unsigned long addr) \
|
|
{ \
|
|
check_memory_region(addr, size, true, _RET_IP_); \
|
|
} \
|
|
EXPORT_SYMBOL(__hwasan_store##size##_noabort)
|
|
|
|
DEFINE_HWASAN_LOAD_STORE(1);
|
|
DEFINE_HWASAN_LOAD_STORE(2);
|
|
DEFINE_HWASAN_LOAD_STORE(4);
|
|
DEFINE_HWASAN_LOAD_STORE(8);
|
|
DEFINE_HWASAN_LOAD_STORE(16);
|
|
|
|
void __hwasan_loadN_noabort(unsigned long addr, unsigned long size)
|
|
{
|
|
check_memory_region(addr, size, false, _RET_IP_);
|
|
}
|
|
EXPORT_SYMBOL(__hwasan_loadN_noabort);
|
|
|
|
void __hwasan_storeN_noabort(unsigned long addr, unsigned long size)
|
|
{
|
|
check_memory_region(addr, size, true, _RET_IP_);
|
|
}
|
|
EXPORT_SYMBOL(__hwasan_storeN_noabort);
|
|
|
|
void __hwasan_tag_memory(unsigned long addr, u8 tag, unsigned long size)
|
|
{
|
|
kasan_poison_shadow((void *)addr, size, tag);
|
|
}
|
|
EXPORT_SYMBOL(__hwasan_tag_memory);
|
|
|
|
void kasan_set_free_info(struct kmem_cache *cache,
|
|
void *object, u8 tag)
|
|
{
|
|
struct kasan_alloc_meta *alloc_meta;
|
|
u8 idx = 0;
|
|
|
|
alloc_meta = get_alloc_info(cache, object);
|
|
|
|
#ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
|
|
idx = alloc_meta->free_track_idx;
|
|
alloc_meta->free_pointer_tag[idx] = tag;
|
|
alloc_meta->free_track_idx = (idx + 1) % KASAN_NR_FREE_STACKS;
|
|
#endif
|
|
|
|
kasan_set_track(&alloc_meta->free_track[idx], GFP_NOWAIT);
|
|
}
|
|
|
|
struct kasan_track *kasan_get_free_track(struct kmem_cache *cache,
|
|
void *object, u8 tag)
|
|
{
|
|
struct kasan_alloc_meta *alloc_meta;
|
|
int i = 0;
|
|
|
|
alloc_meta = get_alloc_info(cache, object);
|
|
|
|
#ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
|
|
for (i = 0; i < KASAN_NR_FREE_STACKS; i++) {
|
|
if (alloc_meta->free_pointer_tag[i] == tag)
|
|
break;
|
|
}
|
|
if (i == KASAN_NR_FREE_STACKS)
|
|
i = alloc_meta->free_track_idx;
|
|
#endif
|
|
|
|
return &alloc_meta->free_track[i];
|
|
}
|