mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-09-27 12:57:53 +00:00
bpf: Convert hash map to bpf_mem_alloc.
Convert bpf hash map to use bpf memory allocator. Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Kumar Kartikeya Dwivedi <memxor@gmail.com> Acked-by: Andrii Nakryiko <andrii@kernel.org> Link: https://lore.kernel.org/bpf/20220902211058.60789-3-alexei.starovoitov@gmail.com
This commit is contained in:
parent
7c8199e24f
commit
fba1a1c6c9
1 changed files with 16 additions and 5 deletions
|
@ -14,6 +14,7 @@
|
|||
#include "percpu_freelist.h"
|
||||
#include "bpf_lru_list.h"
|
||||
#include "map_in_map.h"
|
||||
#include <linux/bpf_mem_alloc.h>
|
||||
|
||||
#define HTAB_CREATE_FLAG_MASK \
|
||||
(BPF_F_NO_PREALLOC | BPF_F_NO_COMMON_LRU | BPF_F_NUMA_NODE | \
|
||||
|
@ -92,6 +93,7 @@ struct bucket {
|
|||
|
||||
struct bpf_htab {
|
||||
struct bpf_map map;
|
||||
struct bpf_mem_alloc ma;
|
||||
struct bucket *buckets;
|
||||
void *elems;
|
||||
union {
|
||||
|
@ -576,6 +578,10 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
|
|||
if (err)
|
||||
goto free_prealloc;
|
||||
}
|
||||
} else {
|
||||
err = bpf_mem_alloc_init(&htab->ma, htab->elem_size);
|
||||
if (err)
|
||||
goto free_map_locked;
|
||||
}
|
||||
|
||||
return &htab->map;
|
||||
|
@ -586,6 +592,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
|
|||
for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++)
|
||||
free_percpu(htab->map_locked[i]);
|
||||
bpf_map_area_free(htab->buckets);
|
||||
bpf_mem_alloc_destroy(&htab->ma);
|
||||
free_htab:
|
||||
lockdep_unregister_key(&htab->lockdep_key);
|
||||
bpf_map_area_free(htab);
|
||||
|
@ -862,7 +869,7 @@ static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l)
|
|||
if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH)
|
||||
free_percpu(htab_elem_get_ptr(l, htab->map.key_size));
|
||||
check_and_free_fields(htab, l);
|
||||
kfree(l);
|
||||
bpf_mem_cache_free(&htab->ma, l);
|
||||
}
|
||||
|
||||
static void htab_elem_free_rcu(struct rcu_head *head)
|
||||
|
@ -986,9 +993,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
|
|||
l_new = ERR_PTR(-E2BIG);
|
||||
goto dec_count;
|
||||
}
|
||||
l_new = bpf_map_kmalloc_node(&htab->map, htab->elem_size,
|
||||
GFP_NOWAIT | __GFP_NOWARN,
|
||||
htab->map.numa_node);
|
||||
l_new = bpf_mem_cache_alloc(&htab->ma);
|
||||
if (!l_new) {
|
||||
l_new = ERR_PTR(-ENOMEM);
|
||||
goto dec_count;
|
||||
|
@ -1007,7 +1012,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
|
|||
pptr = bpf_map_alloc_percpu(&htab->map, size, 8,
|
||||
GFP_NOWAIT | __GFP_NOWARN);
|
||||
if (!pptr) {
|
||||
kfree(l_new);
|
||||
bpf_mem_cache_free(&htab->ma, l_new);
|
||||
l_new = ERR_PTR(-ENOMEM);
|
||||
goto dec_count;
|
||||
}
|
||||
|
@ -1429,6 +1434,10 @@ static void delete_all_elements(struct bpf_htab *htab)
|
|||
{
|
||||
int i;
|
||||
|
||||
/* It's called from a worker thread, so disable migration here,
|
||||
* since bpf_mem_cache_free() relies on that.
|
||||
*/
|
||||
migrate_disable();
|
||||
for (i = 0; i < htab->n_buckets; i++) {
|
||||
struct hlist_nulls_head *head = select_bucket(htab, i);
|
||||
struct hlist_nulls_node *n;
|
||||
|
@ -1439,6 +1448,7 @@ static void delete_all_elements(struct bpf_htab *htab)
|
|||
htab_elem_free(htab, l);
|
||||
}
|
||||
}
|
||||
migrate_enable();
|
||||
}
|
||||
|
||||
static void htab_free_malloced_timers(struct bpf_htab *htab)
|
||||
|
@ -1502,6 +1512,7 @@ static void htab_map_free(struct bpf_map *map)
|
|||
bpf_map_free_kptr_off_tab(map);
|
||||
free_percpu(htab->extra_elems);
|
||||
bpf_map_area_free(htab->buckets);
|
||||
bpf_mem_alloc_destroy(&htab->ma);
|
||||
for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++)
|
||||
free_percpu(htab->map_locked[i]);
|
||||
lockdep_unregister_key(&htab->lockdep_key);
|
||||
|
|
Loading…
Reference in a new issue