netfilter: nf_tables: split async and sync catchall in two functions

[ Upstream commit 8837ba3e58 ]

list_for_each_entry_safe() does not work for the async case which runs
under RCU, therefore, split GC logic for catchall in two functions
instead, one for each of the sync and async GC variants.

The catchall sync GC variant never sees a _DEAD bit set on ever, thus,
this handling is removed in such case, moreover, allocate GC sync batch
via GFP_KERNEL.

Fixes: 93995bf4af ("netfilter: nf_tables: remove catchall element in GC sync path")
Reported-by: Florian Westphal <fw@strlen.de>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Signed-off-by: Florian Westphal <fw@strlen.de>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
Pablo Neira Ayuso 2023-11-21 13:14:22 +01:00 committed by Greg Kroah-Hartman
parent e3e68e617b
commit e31eb7d9b7
1 changed files with 32 additions and 29 deletions

View File

@ -9637,16 +9637,14 @@ void nft_trans_gc_queue_sync_done(struct nft_trans_gc *trans)
call_rcu(&trans->rcu, nft_trans_gc_trans_free);
}
static struct nft_trans_gc *nft_trans_gc_catchall(struct nft_trans_gc *gc,
unsigned int gc_seq,
bool sync)
struct nft_trans_gc *nft_trans_gc_catchall_async(struct nft_trans_gc *gc,
unsigned int gc_seq)
{
struct nft_set_elem_catchall *catchall, *next;
struct nft_set_elem_catchall *catchall;
const struct nft_set *set = gc->set;
struct nft_elem_priv *elem_priv;
struct nft_set_ext *ext;
list_for_each_entry_safe(catchall, next, &set->catchall_list, list) {
list_for_each_entry_rcu(catchall, &set->catchall_list, list) {
ext = nft_set_elem_ext(set, catchall->elem);
if (!nft_set_elem_expired(ext))
@ -9656,39 +9654,44 @@ static struct nft_trans_gc *nft_trans_gc_catchall(struct nft_trans_gc *gc,
nft_set_elem_dead(ext);
dead_elem:
if (sync)
gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC);
else
gc = nft_trans_gc_queue_async(gc, gc_seq, GFP_ATOMIC);
gc = nft_trans_gc_queue_async(gc, gc_seq, GFP_ATOMIC);
if (!gc)
return NULL;
elem_priv = catchall->elem;
if (sync) {
struct nft_set_elem elem = {
.priv = elem_priv,
};
nft_setelem_data_deactivate(gc->net, gc->set, &elem);
nft_setelem_catchall_destroy(catchall);
}
nft_trans_gc_elem_add(gc, elem_priv);
nft_trans_gc_elem_add(gc, catchall->elem);
}
return gc;
}
struct nft_trans_gc *nft_trans_gc_catchall_async(struct nft_trans_gc *gc,
unsigned int gc_seq)
{
return nft_trans_gc_catchall(gc, gc_seq, false);
}
struct nft_trans_gc *nft_trans_gc_catchall_sync(struct nft_trans_gc *gc)
{
return nft_trans_gc_catchall(gc, 0, true);
struct nft_set_elem_catchall *catchall, *next;
const struct nft_set *set = gc->set;
struct nft_set_elem elem;
struct nft_set_ext *ext;
WARN_ON_ONCE(!lockdep_commit_lock_is_held(gc->net));
list_for_each_entry_safe(catchall, next, &set->catchall_list, list) {
ext = nft_set_elem_ext(set, catchall->elem);
if (!nft_set_elem_expired(ext))
continue;
gc = nft_trans_gc_queue_sync(gc, GFP_KERNEL);
if (!gc)
return NULL;
memset(&elem, 0, sizeof(elem));
elem.priv = catchall->elem;
nft_setelem_data_deactivate(gc->net, gc->set, &elem);
nft_setelem_catchall_destroy(catchall);
nft_trans_gc_elem_add(gc, elem.priv);
}
return gc;
}
static void nf_tables_module_autoload_cleanup(struct net *net)