drivers: vxlan: vnifilter: per vni stats

Add per-vni statistics for vni filter mode. Counting Rx/Tx
bytes/packets/drops/errors at the appropriate places.

This patch changes vxlan_vs_find_vni to also return the
vxlan_vni_node in cases where the vni belongs to a vni
filtering vxlan device

Signed-off-by: Nikolay Aleksandrov <nikolay@nvidia.com>
Signed-off-by: Roopa Prabhu <roopa@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Nikolay Aleksandrov 2022-03-01 05:04:38 +00:00 committed by David S. Miller
parent 3edf5f66c1
commit 4095e0e132
4 changed files with 128 additions and 9 deletions

View File

@ -139,9 +139,11 @@ static struct vxlan_sock *vxlan_find_sock(struct net *net, sa_family_t family,
return NULL;
}
static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, int ifindex,
__be32 vni)
static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs,
int ifindex, __be32 vni,
struct vxlan_vni_node **vninode)
{
struct vxlan_vni_node *vnode;
struct vxlan_dev_node *node;
/* For flow based devices, map all packets to VNI 0 */
@ -152,8 +154,10 @@ static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, int ifindex,
hlist_for_each_entry_rcu(node, vni_head(vs, vni), hlist) {
if (!node->vxlan)
continue;
vnode = NULL;
if (node->vxlan->cfg.flags & VXLAN_F_VNIFILTER) {
if (!vxlan_vnifilter_lookup(node->vxlan, vni))
vnode = vxlan_vnifilter_lookup(node->vxlan, vni);
if (!vnode)
continue;
} else if (node->vxlan->default_dst.remote_vni != vni) {
continue;
@ -167,6 +171,8 @@ static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, int ifindex,
continue;
}
if (vninode)
*vninode = vnode;
return node->vxlan;
}
@ -184,7 +190,7 @@ static struct vxlan_dev *vxlan_find_vni(struct net *net, int ifindex,
if (!vs)
return NULL;
return vxlan_vs_find_vni(vs, ifindex, vni);
return vxlan_vs_find_vni(vs, ifindex, vni, NULL);
}
/* Fill in neighbour message in skbuff. */
@ -1644,6 +1650,7 @@ static bool vxlan_ecn_decapsulate(struct vxlan_sock *vs, void *oiph,
/* Callback from net/ipv4/udp.c to receive packets */
static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
{
struct vxlan_vni_node *vninode = NULL;
struct vxlan_dev *vxlan;
struct vxlan_sock *vs;
struct vxlanhdr unparsed;
@ -1676,7 +1683,7 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
vni = vxlan_vni(vxlan_hdr(skb)->vx_vni);
vxlan = vxlan_vs_find_vni(vs, skb->dev->ifindex, vni);
vxlan = vxlan_vs_find_vni(vs, skb->dev->ifindex, vni, &vninode);
if (!vxlan)
goto drop;
@ -1746,6 +1753,8 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
if (!vxlan_ecn_decapsulate(vs, oiph, skb)) {
++vxlan->dev->stats.rx_frame_errors;
++vxlan->dev->stats.rx_errors;
vxlan_vnifilter_count(vxlan, vni, vninode,
VXLAN_VNI_STATS_RX_ERRORS, 0);
goto drop;
}
@ -1754,10 +1763,13 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
if (unlikely(!(vxlan->dev->flags & IFF_UP))) {
rcu_read_unlock();
atomic_long_inc(&vxlan->dev->rx_dropped);
vxlan_vnifilter_count(vxlan, vni, vninode,
VXLAN_VNI_STATS_RX_DROPS, 0);
goto drop;
}
dev_sw_netstats_rx_add(vxlan->dev, skb->len);
vxlan_vnifilter_count(vxlan, vni, vninode, VXLAN_VNI_STATS_RX, skb->len);
gro_cells_receive(&vxlan->gro_cells, skb);
rcu_read_unlock();
@ -1791,7 +1803,7 @@ static int vxlan_err_lookup(struct sock *sk, struct sk_buff *skb)
return -ENOENT;
vni = vxlan_vni(hdr->vx_vni);
vxlan = vxlan_vs_find_vni(vs, skb->dev->ifindex, vni);
vxlan = vxlan_vs_find_vni(vs, skb->dev->ifindex, vni, NULL);
if (!vxlan)
return -ENOENT;
@ -1865,8 +1877,12 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
reply->ip_summed = CHECKSUM_UNNECESSARY;
reply->pkt_type = PACKET_HOST;
if (netif_rx_ni(reply) == NET_RX_DROP)
if (netif_rx_ni(reply) == NET_RX_DROP) {
dev->stats.rx_dropped++;
vxlan_vnifilter_count(vxlan, vni, NULL,
VXLAN_VNI_STATS_RX_DROPS, 0);
}
} else if (vxlan->cfg.flags & VXLAN_F_L3MISS) {
union vxlan_addr ipa = {
.sin.sin_addr.s_addr = tip,
@ -2020,9 +2036,11 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
if (reply == NULL)
goto out;
if (netif_rx_ni(reply) == NET_RX_DROP)
if (netif_rx_ni(reply) == NET_RX_DROP) {
dev->stats.rx_dropped++;
vxlan_vnifilter_count(vxlan, vni, NULL,
VXLAN_VNI_STATS_RX_DROPS, 0);
}
} else if (vxlan->cfg.flags & VXLAN_F_L3MISS) {
union vxlan_addr ipa = {
.sin6.sin6_addr = msg->target,
@ -2356,15 +2374,20 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
tx_stats->tx_packets++;
tx_stats->tx_bytes += len;
u64_stats_update_end(&tx_stats->syncp);
vxlan_vnifilter_count(src_vxlan, vni, NULL, VXLAN_VNI_STATS_TX, len);
if (__netif_rx(skb) == NET_RX_SUCCESS) {
u64_stats_update_begin(&rx_stats->syncp);
rx_stats->rx_packets++;
rx_stats->rx_bytes += len;
u64_stats_update_end(&rx_stats->syncp);
vxlan_vnifilter_count(dst_vxlan, vni, NULL, VXLAN_VNI_STATS_RX,
len);
} else {
drop:
dev->stats.rx_dropped++;
vxlan_vnifilter_count(dst_vxlan, vni, NULL,
VXLAN_VNI_STATS_RX_DROPS, 0);
}
rcu_read_unlock();
}
@ -2394,6 +2417,8 @@ static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev,
vxlan->cfg.flags);
if (!dst_vxlan) {
dev->stats.tx_errors++;
vxlan_vnifilter_count(vxlan, vni, NULL,
VXLAN_VNI_STATS_TX_ERRORS, 0);
kfree_skb(skb);
return -ENOENT;
@ -2417,6 +2442,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
union vxlan_addr remote_ip, local_ip;
struct vxlan_metadata _md;
struct vxlan_metadata *md = &_md;
unsigned int pkt_len = skb->len;
__be16 src_port = 0, dst_port;
struct dst_entry *ndst = NULL;
__u8 tos, ttl;
@ -2644,12 +2670,14 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
label, src_port, dst_port, !udp_sum);
#endif
}
vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX, pkt_len);
out_unlock:
rcu_read_unlock();
return;
drop:
dev->stats.tx_dropped++;
vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX_DROPS, 0);
dev_kfree_skb(skb);
return;
@ -2661,6 +2689,7 @@ tx_error:
dev->stats.tx_carrier_errors++;
dst_release(ndst);
dev->stats.tx_errors++;
vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX_ERRORS, 0);
kfree_skb(skb);
}
@ -2693,6 +2722,8 @@ static void vxlan_xmit_nh(struct sk_buff *skb, struct net_device *dev,
drop:
dev->stats.tx_dropped++;
vxlan_vnifilter_count(netdev_priv(dev), vni, NULL,
VXLAN_VNI_STATS_TX_DROPS, 0);
dev_kfree_skb(skb);
}
@ -2767,6 +2798,8 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
vxlan_fdb_miss(vxlan, eth->h_dest);
dev->stats.tx_dropped++;
vxlan_vnifilter_count(vxlan, vni, NULL,
VXLAN_VNI_STATS_TX_DROPS, 0);
kfree_skb(skb);
return NETDEV_TX_OK;
}

View File

@ -136,6 +136,9 @@ void vxlan_vnigroup_uninit(struct vxlan_dev *vxlan);
void vxlan_vnifilter_init(void);
void vxlan_vnifilter_uninit(void);
void vxlan_vnifilter_count(struct vxlan_dev *vxlan, __be32 vni,
struct vxlan_vni_node *vninode,
int type, unsigned int len);
void vxlan_vs_add_vnigrp(struct vxlan_dev *vxlan,
struct vxlan_sock *vs,

View File

@ -116,6 +116,57 @@ void vxlan_vs_del_vnigrp(struct vxlan_dev *vxlan)
spin_unlock(&vn->sock_lock);
}
static void vxlan_vnifilter_stats_add(struct vxlan_vni_node *vninode,
int type, unsigned int len)
{
struct vxlan_vni_stats_pcpu *pstats = this_cpu_ptr(vninode->stats);
u64_stats_update_begin(&pstats->syncp);
switch (type) {
case VXLAN_VNI_STATS_RX:
pstats->stats.rx_bytes += len;
pstats->stats.rx_packets++;
break;
case VXLAN_VNI_STATS_RX_DROPS:
pstats->stats.rx_drops++;
break;
case VXLAN_VNI_STATS_RX_ERRORS:
pstats->stats.rx_errors++;
break;
case VXLAN_VNI_STATS_TX:
pstats->stats.tx_bytes += len;
pstats->stats.tx_packets++;
break;
case VXLAN_VNI_STATS_TX_DROPS:
pstats->stats.tx_drops++;
break;
case VXLAN_VNI_STATS_TX_ERRORS:
pstats->stats.tx_errors++;
break;
}
u64_stats_update_end(&pstats->syncp);
}
void vxlan_vnifilter_count(struct vxlan_dev *vxlan, __be32 vni,
struct vxlan_vni_node *vninode,
int type, unsigned int len)
{
struct vxlan_vni_node *vnode;
if (!(vxlan->cfg.flags & VXLAN_F_VNIFILTER))
return;
if (vninode) {
vnode = vninode;
} else {
vnode = vxlan_vnifilter_lookup(vxlan, vni);
if (!vnode)
return;
}
vxlan_vnifilter_stats_add(vnode, type, len);
}
static u32 vnirange(struct vxlan_vni_node *vbegin,
struct vxlan_vni_node *vend)
{
@ -562,6 +613,11 @@ static struct vxlan_vni_node *vxlan_vni_alloc(struct vxlan_dev *vxlan,
vninode = kzalloc(sizeof(*vninode), GFP_ATOMIC);
if (!vninode)
return NULL;
vninode->stats = netdev_alloc_pcpu_stats(struct vxlan_vni_stats_pcpu);
if (!vninode->stats) {
kfree(vninode);
return NULL;
}
vninode->vni = vni;
vninode->hlist4.vxlan = vxlan;
#if IS_ENABLED(CONFIG_IPV6)
@ -621,6 +677,7 @@ static void vxlan_vni_node_rcu_free(struct rcu_head *rcu)
struct vxlan_vni_node *v;
v = container_of(rcu, struct vxlan_vni_node, rcu);
free_percpu(v->stats);
kfree(v);
}

View File

@ -227,6 +227,31 @@ struct vxlan_config {
enum ifla_vxlan_df df;
};
enum {
VXLAN_VNI_STATS_RX,
VXLAN_VNI_STATS_RX_DROPS,
VXLAN_VNI_STATS_RX_ERRORS,
VXLAN_VNI_STATS_TX,
VXLAN_VNI_STATS_TX_DROPS,
VXLAN_VNI_STATS_TX_ERRORS,
};
struct vxlan_vni_stats {
u64 rx_packets;
u64 rx_bytes;
u64 rx_drops;
u64 rx_errors;
u64 tx_packets;
u64 tx_bytes;
u64 tx_drops;
u64 tx_errors;
};
struct vxlan_vni_stats_pcpu {
struct vxlan_vni_stats stats;
struct u64_stats_sync syncp;
};
struct vxlan_dev_node {
struct hlist_node hlist;
struct vxlan_dev *vxlan;
@ -241,6 +266,7 @@ struct vxlan_vni_node {
struct list_head vlist;
__be32 vni;
union vxlan_addr remote_ip; /* default remote ip for this vni */
struct vxlan_vni_stats_pcpu __percpu *stats;
struct rcu_head rcu;
};