lib/stackdepot: add refcount for records

Add a reference counter for how many times a stack records has been
  added to stack depot.

Add a new STACK_DEPOT_FLAG_GET flag to stack_depot_save_flags that
  instructs the stack depot to increment the refcount.

Do not yet decrement the refcount; this is implemented in one of the
  following patches.

Do not yet enable any users to use the flag to avoid overflowing the
  refcount.

This is preparatory patch for implementing the eviction of stack records
  from the stack depot.

Link: https://lkml.kernel.org/r/a3fc14a2359d019d2a008d4ff8b46a665371ffee.1700502145.git.andreyknvl@google.com
Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
Reviewed-by: Alexander Potapenko <glider@google.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Evgenii Stepanov <eugenis@google.com>
Cc: Marco Elver <elver@google.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Andrey Konovalov 2023-11-20 18:47:14 +01:00 committed by Andrew Morton
parent 022012dcf4
commit 410b764f89
2 changed files with 20 additions and 5 deletions

View File

@ -39,8 +39,9 @@ typedef u32 depot_flags_t;
* to its declaration for more details.
*/
#define STACK_DEPOT_FLAG_CAN_ALLOC ((depot_flags_t)0x0001)
#define STACK_DEPOT_FLAG_GET ((depot_flags_t)0x0002)
#define STACK_DEPOT_FLAGS_NUM 1
#define STACK_DEPOT_FLAGS_NUM 2
#define STACK_DEPOT_FLAGS_MASK ((depot_flags_t)((1 << STACK_DEPOT_FLAGS_NUM) - 1))
/*
@ -94,6 +95,9 @@ static inline int stack_depot_early_init(void) { return 0; }
* flags of @alloc_flags). Otherwise, stack depot avoids any allocations and
* fails if no space is left to store the stack trace.
*
* If STACK_DEPOT_FLAG_GET is set in @depot_flags, stack depot will increment
* the refcount on the saved stack trace if it already exists in stack depot.
*
* If the provided stack trace comes from the interrupt context, only the part
* up to the interrupt entry is saved.
*
@ -116,8 +120,11 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
* @nr_entries: Number of frames in the stack
* @alloc_flags: Allocation GFP flags
*
* Context: Contexts where allocations via alloc_pages() are allowed.
* See stack_depot_save_flags() for more details.
* Does not increment the refcount on the saved stack trace; see
* stack_depot_save_flags() for more details.
*
* Context: Contexts where allocations via alloc_pages() are allowed;
* see stack_depot_save_flags() for more details.
*
* Return: Handle of the stack trace stored in depot, 0 on failure
*/

View File

@ -23,6 +23,7 @@
#include <linux/mutex.h>
#include <linux/percpu.h>
#include <linux/printk.h>
#include <linux/refcount.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/stacktrace.h>
@ -60,6 +61,7 @@ struct stack_record {
u32 hash; /* Hash in hash table */
u32 size; /* Number of stored frames */
union handle_parts handle;
refcount_t count;
unsigned long entries[CONFIG_STACKDEPOT_MAX_FRAMES]; /* Frames */
};
@ -373,6 +375,7 @@ depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
stack->hash = hash;
stack->size = size;
/* stack->handle is already filled in by depot_init_pool(). */
refcount_set(&stack->count, 1);
memcpy(stack->entries, entries, flex_array_size(stack, entries, size));
/*
@ -489,6 +492,8 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
/* Fast path: look the stack trace up without full locking. */
found = find_stack(bucket, entries, nr_entries, hash);
if (found) {
if (depot_flags & STACK_DEPOT_FLAG_GET)
refcount_inc(&found->count);
read_unlock_irqrestore(&pool_rwlock, flags);
goto exit;
}
@ -528,12 +533,15 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
list_add(&new->list, bucket);
found = new;
}
} else if (prealloc) {
} else {
if (depot_flags & STACK_DEPOT_FLAG_GET)
refcount_inc(&found->count);
/*
* Stack depot already contains this stack trace, but let's
* keep the preallocated memory for future.
*/
depot_keep_new_pool(&prealloc);
if (prealloc)
depot_keep_new_pool(&prealloc);
}
write_unlock_irqrestore(&pool_rwlock, flags);