diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 56d3da7d0bc6..a8c7e1c5abfa 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -302,6 +302,7 @@ static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key, struct htab_elem *l; if (node) { + bpf_map_inc_elem_count(&htab->map); l = container_of(node, struct htab_elem, lru_node); memcpy(l->key, key, htab->map.key_size); return l; @@ -510,12 +511,16 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) htab->n_buckets > U32_MAX / sizeof(struct bucket)) goto free_htab; + err = bpf_map_init_elem_count(&htab->map); + if (err) + goto free_htab; + err = -ENOMEM; htab->buckets = bpf_map_area_alloc(htab->n_buckets * sizeof(struct bucket), htab->map.numa_node); if (!htab->buckets) - goto free_htab; + goto free_elem_count; for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++) { htab->map_locked[i] = bpf_map_alloc_percpu(&htab->map, @@ -593,6 +598,8 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) bpf_map_area_free(htab->buckets); bpf_mem_alloc_destroy(&htab->pcpu_ma); bpf_mem_alloc_destroy(&htab->ma); +free_elem_count: + bpf_map_free_elem_count(&htab->map); free_htab: lockdep_unregister_key(&htab->lockdep_key); bpf_map_area_free(htab); @@ -804,6 +811,7 @@ static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node) if (l == tgt_l) { hlist_nulls_del_rcu(&l->hash_node); check_and_free_fields(htab, l); + bpf_map_dec_elem_count(&htab->map); break; } @@ -900,6 +908,8 @@ static bool is_map_full(struct bpf_htab *htab) static void inc_elem_count(struct bpf_htab *htab) { + bpf_map_inc_elem_count(&htab->map); + if (htab->use_percpu_counter) percpu_counter_add_batch(&htab->pcount, 1, PERCPU_COUNTER_BATCH); else @@ -908,6 +918,8 @@ static void inc_elem_count(struct bpf_htab *htab) static void dec_elem_count(struct bpf_htab *htab) { + bpf_map_dec_elem_count(&htab->map); + if (htab->use_percpu_counter) percpu_counter_add_batch(&htab->pcount, -1, PERCPU_COUNTER_BATCH); else @@ -920,6 +932,7 @@ static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) htab_put_fd_value(htab, l); if (htab_is_prealloc(htab)) { + bpf_map_dec_elem_count(&htab->map); check_and_free_fields(htab, l); __pcpu_freelist_push(&htab->freelist, &l->fnode); } else { @@ -1000,6 +1013,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, if (!l) return ERR_PTR(-E2BIG); l_new = container_of(l, struct htab_elem, fnode); + bpf_map_inc_elem_count(&htab->map); } } else { if (is_map_full(htab)) @@ -1168,6 +1182,7 @@ static long htab_map_update_elem(struct bpf_map *map, void *key, void *value, static void htab_lru_push_free(struct bpf_htab *htab, struct htab_elem *elem) { check_and_free_fields(htab, elem); + bpf_map_dec_elem_count(&htab->map); bpf_lru_push_free(&htab->lru, &elem->lru_node); } @@ -1357,8 +1372,10 @@ static long __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key, err: htab_unlock_bucket(htab, b, hash, flags); err_lock_bucket: - if (l_new) + if (l_new) { + bpf_map_dec_elem_count(&htab->map); bpf_lru_push_free(&htab->lru, &l_new->lru_node); + } return ret; } @@ -1523,6 +1540,7 @@ static void htab_map_free(struct bpf_map *map) prealloc_destroy(htab); } + bpf_map_free_elem_count(map); free_percpu(htab->extra_elems); bpf_map_area_free(htab->buckets); bpf_mem_alloc_destroy(&htab->pcpu_ma);