bpf: Convert bpf_cpumask to bpf_mem_cache_free_rcu.

Convert bpf_cpumask to bpf_mem_cache_free_rcu.
Note that migrate_disable() in bpf_cpumask_release() is still necessary, since
bpf_cpumask_release() is a dtor. bpf_obj_free_fields() can be converted to do
migrate_disable() there in a follow up.

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: David Vernet <void@manifault.com>
Link: https://lore.kernel.org/bpf/20230706033447.54696-14-alexei.starovoitov@gmail.com
This commit is contained in:
Alexei Starovoitov 2023-07-05 20:34:46 -07:00 committed by Daniel Borkmann
parent 5af6807bdb
commit 8e07bb9ebc

View file

@ -9,7 +9,6 @@
/**
* struct bpf_cpumask - refcounted BPF cpumask wrapper structure
* @cpumask: The actual cpumask embedded in the struct.
* @rcu: The RCU head used to free the cpumask with RCU safety.
* @usage: Object reference counter. When the refcount goes to 0, the
* memory is released back to the BPF allocator, which provides
* RCU safety.
@ -25,7 +24,6 @@
*/
struct bpf_cpumask {
cpumask_t cpumask;
struct rcu_head rcu;
refcount_t usage;
};
@ -82,16 +80,6 @@ __bpf_kfunc struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask)
return cpumask;
}
static void cpumask_free_cb(struct rcu_head *head)
{
struct bpf_cpumask *cpumask;
cpumask = container_of(head, struct bpf_cpumask, rcu);
migrate_disable();
bpf_mem_cache_free(&bpf_cpumask_ma, cpumask);
migrate_enable();
}
/**
* bpf_cpumask_release() - Release a previously acquired BPF cpumask.
* @cpumask: The cpumask being released.
@ -102,8 +90,12 @@ static void cpumask_free_cb(struct rcu_head *head)
*/
__bpf_kfunc void bpf_cpumask_release(struct bpf_cpumask *cpumask)
{
if (refcount_dec_and_test(&cpumask->usage))
call_rcu(&cpumask->rcu, cpumask_free_cb);
if (!refcount_dec_and_test(&cpumask->usage))
return;
migrate_disable();
bpf_mem_cache_free_rcu(&bpf_cpumask_ma, cpumask);
migrate_enable();
}
/**