mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-31 16:38:12 +00:00
kfence: shorten critical sections of alloc/free
Initializing memory and setting/checking the canary bytes is relatively expensive, and doing so in the meta->lock critical sections extends the duration with preemption and interrupts disabled unnecessarily. Any reads to meta->addr and meta->size in kfence_guarded_alloc() and kfence_guarded_free() don't require locking meta->lock as long as the object is removed from the freelist: only kfence_guarded_alloc() sets meta->addr and meta->size after removing it from the freelist, which requires a preceding kfence_guarded_free() returning it to the list or the initial state. Therefore move reads to meta->addr and meta->size, including expensive memory initialization using them, out of meta->lock critical sections. Link: https://lkml.kernel.org/r/20210930153706.2105471-1-elver@google.com Signed-off-by: Marco Elver <elver@google.com> Acked-by: Alexander Potapenko <glider@google.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Jann Horn <jannh@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
f51733e2fc
commit
4933295622
1 changed files with 21 additions and 17 deletions
|
@ -309,12 +309,19 @@ static inline bool set_canary_byte(u8 *addr)
|
|||
/* Check canary byte at @addr. */
|
||||
static inline bool check_canary_byte(u8 *addr)
|
||||
{
|
||||
struct kfence_metadata *meta;
|
||||
unsigned long flags;
|
||||
|
||||
if (likely(*addr == KFENCE_CANARY_PATTERN(addr)))
|
||||
return true;
|
||||
|
||||
atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
|
||||
kfence_report_error((unsigned long)addr, false, NULL, addr_to_metadata((unsigned long)addr),
|
||||
KFENCE_ERROR_CORRUPTION);
|
||||
|
||||
meta = addr_to_metadata((unsigned long)addr);
|
||||
raw_spin_lock_irqsave(&meta->lock, flags);
|
||||
kfence_report_error((unsigned long)addr, false, NULL, meta, KFENCE_ERROR_CORRUPTION);
|
||||
raw_spin_unlock_irqrestore(&meta->lock, flags);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -324,8 +331,6 @@ static __always_inline void for_each_canary(const struct kfence_metadata *meta,
|
|||
const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE);
|
||||
unsigned long addr;
|
||||
|
||||
lockdep_assert_held(&meta->lock);
|
||||
|
||||
/*
|
||||
* We'll iterate over each canary byte per-side until fn() returns
|
||||
* false. However, we'll still iterate over the canary bytes to the
|
||||
|
@ -414,8 +419,9 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
|
|||
WRITE_ONCE(meta->cache, cache);
|
||||
meta->size = size;
|
||||
meta->alloc_stack_hash = alloc_stack_hash;
|
||||
raw_spin_unlock_irqrestore(&meta->lock, flags);
|
||||
|
||||
for_each_canary(meta, set_canary_byte);
|
||||
alloc_covered_add(alloc_stack_hash, 1);
|
||||
|
||||
/* Set required struct page fields. */
|
||||
page = virt_to_page(meta->addr);
|
||||
|
@ -425,11 +431,8 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
|
|||
if (IS_ENABLED(CONFIG_SLAB))
|
||||
page->s_mem = addr;
|
||||
|
||||
raw_spin_unlock_irqrestore(&meta->lock, flags);
|
||||
|
||||
alloc_covered_add(alloc_stack_hash, 1);
|
||||
|
||||
/* Memory initialization. */
|
||||
for_each_canary(meta, set_canary_byte);
|
||||
|
||||
/*
|
||||
* We check slab_want_init_on_alloc() ourselves, rather than letting
|
||||
|
@ -454,6 +457,7 @@ static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool z
|
|||
{
|
||||
struct kcsan_scoped_access assert_page_exclusive;
|
||||
unsigned long flags;
|
||||
bool init;
|
||||
|
||||
raw_spin_lock_irqsave(&meta->lock, flags);
|
||||
|
||||
|
@ -481,6 +485,13 @@ static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool z
|
|||
meta->unprotected_page = 0;
|
||||
}
|
||||
|
||||
/* Mark the object as freed. */
|
||||
metadata_update_state(meta, KFENCE_OBJECT_FREED, NULL, 0);
|
||||
init = slab_want_init_on_free(meta->cache);
|
||||
raw_spin_unlock_irqrestore(&meta->lock, flags);
|
||||
|
||||
alloc_covered_add(meta->alloc_stack_hash, -1);
|
||||
|
||||
/* Check canary bytes for memory corruption. */
|
||||
for_each_canary(meta, check_canary_byte);
|
||||
|
||||
|
@ -489,16 +500,9 @@ static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool z
|
|||
* data is still there, and after a use-after-free is detected, we
|
||||
* unprotect the page, so the data is still accessible.
|
||||
*/
|
||||
if (!zombie && unlikely(slab_want_init_on_free(meta->cache)))
|
||||
if (!zombie && unlikely(init))
|
||||
memzero_explicit(addr, meta->size);
|
||||
|
||||
/* Mark the object as freed. */
|
||||
metadata_update_state(meta, KFENCE_OBJECT_FREED, NULL, 0);
|
||||
|
||||
raw_spin_unlock_irqrestore(&meta->lock, flags);
|
||||
|
||||
alloc_covered_add(meta->alloc_stack_hash, -1);
|
||||
|
||||
/* Protect to detect use-after-frees. */
|
||||
kfence_protect((unsigned long)addr);
|
||||
|
||||
|
|
Loading…
Reference in a new issue