netfilter: nf_flow_table: count pending offload workqueue tasks

To improve hardware offload debuggability count pending 'add', 'del' and
'stats' flow_table offload workqueue tasks. Counters are incremented before
scheduling new task and decremented when workqueue handler finishes
executing. These counters allow user to diagnose congestion on hardware
offload workqueues that can happen when either CPU is starved and workqueue
jobs are executed at lower rate than new ones are added or when
hardware/driver can't keep up with the rate.

Implement the described counters as percpu counters inside new struct
netns_ft which is stored inside struct net. Expose them via new procfs file
'/proc/net/stats/nf_flowtable' that is similar to existing 'nf_conntrack'
file.

Signed-off-by: Vlad Buslov <vladbu@nvidia.com>
Signed-off-by: Oz Shlomo <ozsh@nvidia.com>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
This commit is contained in:
Vlad Buslov 2022-06-15 12:43:55 +02:00 committed by Pablo Neira Ayuso
parent fc54d9065f
commit b038177636
8 changed files with 206 additions and 4 deletions

View File

@ -26,6 +26,9 @@
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
#include <net/netns/conntrack.h>
#endif
#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
#include <net/netns/flow_table.h>
#endif
#include <net/netns/nftables.h>
#include <net/netns/xfrm.h>
#include <net/netns/mpls.h>
@ -142,6 +145,9 @@ struct net {
#if defined(CONFIG_NF_TABLES) || defined(CONFIG_NF_TABLES_MODULE)
struct netns_nftables nft;
#endif
#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
struct netns_ft ft;
#endif
#endif
#ifdef CONFIG_WEXT_CORE
struct sk_buff_head wext_nlevents;

View File

@ -335,4 +335,25 @@ static inline __be16 nf_flow_pppoe_proto(const struct sk_buff *skb)
return 0;
}
#define NF_FLOW_TABLE_STAT_INC(net, count) __this_cpu_inc((net)->ft.stat->count)
#define NF_FLOW_TABLE_STAT_DEC(net, count) __this_cpu_dec((net)->ft.stat->count)
#define NF_FLOW_TABLE_STAT_INC_ATOMIC(net, count) \
this_cpu_inc((net)->ft.stat->count)
#define NF_FLOW_TABLE_STAT_DEC_ATOMIC(net, count) \
this_cpu_dec((net)->ft.stat->count)
#ifdef CONFIG_NF_FLOW_TABLE_PROCFS
int nf_flow_table_init_proc(struct net *net);
void nf_flow_table_fini_proc(struct net *net);
#else
static inline int nf_flow_table_init_proc(struct net *net)
{
return 0;
}
static inline void nf_flow_table_fini_proc(struct net *net)
{
}
#endif /* CONFIG_NF_FLOW_TABLE_PROCFS */
#endif /* _NF_FLOW_TABLE_H */

View File

@ -0,0 +1,14 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NETNS_FLOW_TABLE_H
#define __NETNS_FLOW_TABLE_H
struct nf_flow_table_stat {
unsigned int count_wq_add;
unsigned int count_wq_del;
unsigned int count_wq_stats;
};
struct netns_ft {
struct nf_flow_table_stat __percpu *stat;
};
#endif

View File

@ -734,6 +734,15 @@ config NF_FLOW_TABLE
To compile it as a module, choose M here.
config NF_FLOW_TABLE_PROCFS
bool "Supply flow table statistics in procfs"
default y
depends on PROC_FS
depends on SYSCTL
help
This option enables for the flow table offload statistics
to be shown in procfs under net/netfilter/nf_flowtable.
config NETFILTER_XTABLES
tristate "Netfilter Xtables support (required for ip_tables)"
default m if NETFILTER_ADVANCED=n

View File

@ -128,6 +128,7 @@ obj-$(CONFIG_NFT_FWD_NETDEV) += nft_fwd_netdev.o
obj-$(CONFIG_NF_FLOW_TABLE) += nf_flow_table.o
nf_flow_table-objs := nf_flow_table_core.o nf_flow_table_ip.o \
nf_flow_table_offload.o
nf_flow_table-$(CONFIG_NF_FLOW_TABLE_PROCFS) += nf_flow_table_procfs.o
obj-$(CONFIG_NF_FLOW_TABLE_INET) += nf_flow_table_inet.o

View File

@ -614,14 +614,74 @@ void nf_flow_table_free(struct nf_flowtable *flow_table)
}
EXPORT_SYMBOL_GPL(nf_flow_table_free);
static int nf_flow_table_init_net(struct net *net)
{
net->ft.stat = alloc_percpu(struct nf_flow_table_stat);
return net->ft.stat ? 0 : -ENOMEM;
}
static void nf_flow_table_fini_net(struct net *net)
{
free_percpu(net->ft.stat);
}
static int nf_flow_table_pernet_init(struct net *net)
{
int ret;
ret = nf_flow_table_init_net(net);
if (ret < 0)
return ret;
ret = nf_flow_table_init_proc(net);
if (ret < 0)
goto out_proc;
return 0;
out_proc:
nf_flow_table_fini_net(net);
return ret;
}
static void nf_flow_table_pernet_exit(struct list_head *net_exit_list)
{
struct net *net;
list_for_each_entry(net, net_exit_list, exit_list) {
nf_flow_table_fini_proc(net);
nf_flow_table_fini_net(net);
}
}
static struct pernet_operations nf_flow_table_net_ops = {
.init = nf_flow_table_pernet_init,
.exit_batch = nf_flow_table_pernet_exit,
};
static int __init nf_flow_table_module_init(void)
{
return nf_flow_table_offload_init();
int ret;
ret = register_pernet_subsys(&nf_flow_table_net_ops);
if (ret < 0)
return ret;
ret = nf_flow_table_offload_init();
if (ret)
goto out_offload;
return 0;
out_offload:
unregister_pernet_subsys(&nf_flow_table_net_ops);
return ret;
}
static void __exit nf_flow_table_module_exit(void)
{
nf_flow_table_offload_exit();
unregister_pernet_subsys(&nf_flow_table_net_ops);
}
module_init(nf_flow_table_module_init);

View File

@ -967,17 +967,22 @@ static void flow_offload_work_stats(struct flow_offload_work *offload)
static void flow_offload_work_handler(struct work_struct *work)
{
struct flow_offload_work *offload;
struct net *net;
offload = container_of(work, struct flow_offload_work, work);
net = read_pnet(&offload->flowtable->net);
switch (offload->cmd) {
case FLOW_CLS_REPLACE:
flow_offload_work_add(offload);
NF_FLOW_TABLE_STAT_DEC_ATOMIC(net, count_wq_add);
break;
case FLOW_CLS_DESTROY:
flow_offload_work_del(offload);
NF_FLOW_TABLE_STAT_DEC_ATOMIC(net, count_wq_del);
break;
case FLOW_CLS_STATS:
flow_offload_work_stats(offload);
NF_FLOW_TABLE_STAT_DEC_ATOMIC(net, count_wq_stats);
break;
default:
WARN_ON_ONCE(1);
@ -989,12 +994,18 @@ static void flow_offload_work_handler(struct work_struct *work)
static void flow_offload_queue_work(struct flow_offload_work *offload)
{
if (offload->cmd == FLOW_CLS_REPLACE)
struct net *net = read_pnet(&offload->flowtable->net);
if (offload->cmd == FLOW_CLS_REPLACE) {
NF_FLOW_TABLE_STAT_INC(net, count_wq_add);
queue_work(nf_flow_offload_add_wq, &offload->work);
else if (offload->cmd == FLOW_CLS_DESTROY)
} else if (offload->cmd == FLOW_CLS_DESTROY) {
NF_FLOW_TABLE_STAT_INC(net, count_wq_del);
queue_work(nf_flow_offload_del_wq, &offload->work);
else
} else {
NF_FLOW_TABLE_STAT_INC(net, count_wq_stats);
queue_work(nf_flow_offload_stats_wq, &offload->work);
}
}
static struct flow_offload_work *

View File

@ -0,0 +1,80 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/kernel.h>
#include <linux/proc_fs.h>
#include <net/netfilter/nf_flow_table.h>
static void *nf_flow_table_cpu_seq_start(struct seq_file *seq, loff_t *pos)
{
struct net *net = seq_file_net(seq);
int cpu;
if (*pos == 0)
return SEQ_START_TOKEN;
for (cpu = *pos - 1; cpu < nr_cpu_ids; ++cpu) {
if (!cpu_possible(cpu))
continue;
*pos = cpu + 1;
return per_cpu_ptr(net->ft.stat, cpu);
}
return NULL;
}
static void *nf_flow_table_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct net *net = seq_file_net(seq);
int cpu;
for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
if (!cpu_possible(cpu))
continue;
*pos = cpu + 1;
return per_cpu_ptr(net->ft.stat, cpu);
}
(*pos)++;
return NULL;
}
static void nf_flow_table_cpu_seq_stop(struct seq_file *seq, void *v)
{
}
static int nf_flow_table_cpu_seq_show(struct seq_file *seq, void *v)
{
const struct nf_flow_table_stat *st = v;
if (v == SEQ_START_TOKEN) {
seq_puts(seq, "wq_add wq_del wq_stats\n");
return 0;
}
seq_printf(seq, "%8d %8d %8d\n",
st->count_wq_add,
st->count_wq_del,
st->count_wq_stats
);
return 0;
}
static const struct seq_operations nf_flow_table_cpu_seq_ops = {
.start = nf_flow_table_cpu_seq_start,
.next = nf_flow_table_cpu_seq_next,
.stop = nf_flow_table_cpu_seq_stop,
.show = nf_flow_table_cpu_seq_show,
};
int nf_flow_table_init_proc(struct net *net)
{
struct proc_dir_entry *pde;
pde = proc_create_net("nf_flowtable", 0444, net->proc_net_stat,
&nf_flow_table_cpu_seq_ops,
sizeof(struct seq_net_private));
return pde ? 0 : -ENOMEM;
}
void nf_flow_table_fini_proc(struct net *net)
{
remove_proc_entry("nf_flowtable", net->proc_net_stat);
}