mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-09-27 12:57:53 +00:00
bpf: Optimize element count in non-preallocated hash map.
The atomic_inc/dec might cause extreme cache line bouncing when multiple cpus access the same bpf map. Based on specified max_entries for the hash map calculate when percpu_counter becomes faster than atomic_t and use it for such maps. For example samples/bpf/map_perf_test is using hash map with max_entries 1000. On a system with 16 cpus the 'map_perf_test 4' shows 14k events per second using atomic_t. On a system with 15 cpus it shows 100k events per second using percpu. map_perf_test is an extreme case where all cpus colliding on atomic_t which causes extreme cache bouncing. Note that the slow path of percpu_counter is 5k events per secound vs 14k for atomic, so the heuristic is necessary. See comment in the code why the heuristic is based on num_online_cpus(). Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Kumar Kartikeya Dwivedi <memxor@gmail.com> Acked-by: Andrii Nakryiko <andrii@kernel.org> Link: https://lore.kernel.org/bpf/20220902211058.60789-7-alexei.starovoitov@gmail.com
This commit is contained in:
parent
34dd3bad1a
commit
86fe28f769
1 changed files with 62 additions and 8 deletions
|
@ -101,7 +101,12 @@ struct bpf_htab {
|
|||
struct bpf_lru lru;
|
||||
};
|
||||
struct htab_elem *__percpu *extra_elems;
|
||||
atomic_t count; /* number of elements in this hashtable */
|
||||
/* number of elements in non-preallocated hashtable are kept
|
||||
* in either pcount or count
|
||||
*/
|
||||
struct percpu_counter pcount;
|
||||
atomic_t count;
|
||||
bool use_percpu_counter;
|
||||
u32 n_buckets; /* number of hash buckets */
|
||||
u32 elem_size; /* size of each element in bytes */
|
||||
u32 hashrnd;
|
||||
|
@ -565,6 +570,29 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
|
|||
|
||||
htab_init_buckets(htab);
|
||||
|
||||
/* compute_batch_value() computes batch value as num_online_cpus() * 2
|
||||
* and __percpu_counter_compare() needs
|
||||
* htab->max_entries - cur_number_of_elems to be more than batch * num_online_cpus()
|
||||
* for percpu_counter to be faster than atomic_t. In practice the average bpf
|
||||
* hash map size is 10k, which means that a system with 64 cpus will fill
|
||||
* hashmap to 20% of 10k before percpu_counter becomes ineffective. Therefore
|
||||
* define our own batch count as 32 then 10k hash map can be filled up to 80%:
|
||||
* 10k - 8k > 32 _batch_ * 64 _cpus_
|
||||
* and __percpu_counter_compare() will still be fast. At that point hash map
|
||||
* collisions will dominate its performance anyway. Assume that hash map filled
|
||||
* to 50+% isn't going to be O(1) and use the following formula to choose
|
||||
* between percpu_counter and atomic_t.
|
||||
*/
|
||||
#define PERCPU_COUNTER_BATCH 32
|
||||
if (attr->max_entries / 2 > num_online_cpus() * PERCPU_COUNTER_BATCH)
|
||||
htab->use_percpu_counter = true;
|
||||
|
||||
if (htab->use_percpu_counter) {
|
||||
err = percpu_counter_init(&htab->pcount, 0, GFP_KERNEL);
|
||||
if (err)
|
||||
goto free_map_locked;
|
||||
}
|
||||
|
||||
if (prealloc) {
|
||||
err = prealloc_init(htab);
|
||||
if (err)
|
||||
|
@ -891,6 +919,31 @@ static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l)
|
|||
}
|
||||
}
|
||||
|
||||
static bool is_map_full(struct bpf_htab *htab)
|
||||
{
|
||||
if (htab->use_percpu_counter)
|
||||
return __percpu_counter_compare(&htab->pcount, htab->map.max_entries,
|
||||
PERCPU_COUNTER_BATCH) >= 0;
|
||||
return atomic_read(&htab->count) >= htab->map.max_entries;
|
||||
}
|
||||
|
||||
static void inc_elem_count(struct bpf_htab *htab)
|
||||
{
|
||||
if (htab->use_percpu_counter)
|
||||
percpu_counter_add_batch(&htab->pcount, 1, PERCPU_COUNTER_BATCH);
|
||||
else
|
||||
atomic_inc(&htab->count);
|
||||
}
|
||||
|
||||
static void dec_elem_count(struct bpf_htab *htab)
|
||||
{
|
||||
if (htab->use_percpu_counter)
|
||||
percpu_counter_add_batch(&htab->pcount, -1, PERCPU_COUNTER_BATCH);
|
||||
else
|
||||
atomic_dec(&htab->count);
|
||||
}
|
||||
|
||||
|
||||
static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
|
||||
{
|
||||
htab_put_fd_value(htab, l);
|
||||
|
@ -899,7 +952,7 @@ static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
|
|||
check_and_free_fields(htab, l);
|
||||
__pcpu_freelist_push(&htab->freelist, &l->fnode);
|
||||
} else {
|
||||
atomic_dec(&htab->count);
|
||||
dec_elem_count(htab);
|
||||
l->htab = htab;
|
||||
call_rcu(&l->rcu, htab_elem_free_rcu);
|
||||
}
|
||||
|
@ -983,16 +1036,15 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
|
|||
l_new = container_of(l, struct htab_elem, fnode);
|
||||
}
|
||||
} else {
|
||||
if (atomic_inc_return(&htab->count) > htab->map.max_entries)
|
||||
if (!old_elem) {
|
||||
if (is_map_full(htab))
|
||||
if (!old_elem)
|
||||
/* when map is full and update() is replacing
|
||||
* old element, it's ok to allocate, since
|
||||
* old element will be freed immediately.
|
||||
* Otherwise return an error
|
||||
*/
|
||||
l_new = ERR_PTR(-E2BIG);
|
||||
goto dec_count;
|
||||
}
|
||||
return ERR_PTR(-E2BIG);
|
||||
inc_elem_count(htab);
|
||||
l_new = bpf_mem_cache_alloc(&htab->ma);
|
||||
if (!l_new) {
|
||||
l_new = ERR_PTR(-ENOMEM);
|
||||
|
@ -1034,7 +1086,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
|
|||
l_new->hash = hash;
|
||||
return l_new;
|
||||
dec_count:
|
||||
atomic_dec(&htab->count);
|
||||
dec_elem_count(htab);
|
||||
return l_new;
|
||||
}
|
||||
|
||||
|
@ -1513,6 +1565,8 @@ static void htab_map_free(struct bpf_map *map)
|
|||
free_percpu(htab->extra_elems);
|
||||
bpf_map_area_free(htab->buckets);
|
||||
bpf_mem_alloc_destroy(&htab->ma);
|
||||
if (htab->use_percpu_counter)
|
||||
percpu_counter_destroy(&htab->pcount);
|
||||
for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++)
|
||||
free_percpu(htab->map_locked[i]);
|
||||
lockdep_unregister_key(&htab->lockdep_key);
|
||||
|
|
Loading…
Reference in a new issue