Merge branch 'net-speedup-vxlan-geneve-tunnel-dismantle'

Haishuang Yan says:

====================
net: speedup geneve/vxlan tunnels dismantle

This patch series add batching to vxlan/geneve tunnels so that netns
dismantles are less costly.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2017-12-19 10:59:44 -05:00
commit 8e6d60435d
2 changed files with 33 additions and 17 deletions

View file

@ -1638,19 +1638,16 @@ static __net_init int geneve_init_net(struct net *net)
return 0;
}
static void __net_exit geneve_exit_net(struct net *net)
static void geneve_destroy_tunnels(struct net *net, struct list_head *head)
{
struct geneve_net *gn = net_generic(net, geneve_net_id);
struct geneve_dev *geneve, *next;
struct net_device *dev, *aux;
LIST_HEAD(list);
rtnl_lock();
/* gather any geneve devices that were moved into this ns */
for_each_netdev_safe(net, dev, aux)
if (dev->rtnl_link_ops == &geneve_link_ops)
unregister_netdevice_queue(dev, &list);
unregister_netdevice_queue(dev, head);
/* now gather any other geneve devices that were created in this ns */
list_for_each_entry_safe(geneve, next, &gn->geneve_list, next) {
@ -1658,18 +1655,29 @@ static void __net_exit geneve_exit_net(struct net *net)
* to the list by the previous loop.
*/
if (!net_eq(dev_net(geneve->dev), net))
unregister_netdevice_queue(geneve->dev, &list);
unregister_netdevice_queue(geneve->dev, head);
}
WARN_ON_ONCE(!list_empty(&gn->sock_list));
}
static void __net_exit geneve_exit_batch_net(struct list_head *net_list)
{
struct net *net;
LIST_HEAD(list);
rtnl_lock();
list_for_each_entry(net, net_list, exit_list)
geneve_destroy_tunnels(net, &list);
/* unregister the devices gathered above */
unregister_netdevice_many(&list);
rtnl_unlock();
WARN_ON_ONCE(!list_empty(&gn->sock_list));
}
static struct pernet_operations geneve_net_ops = {
.init = geneve_init_net,
.exit = geneve_exit_net,
.exit_batch = geneve_exit_batch_net,
.id = &geneve_net_id,
.size = sizeof(struct geneve_net),
};

View file

@ -3692,18 +3692,16 @@ static __net_init int vxlan_init_net(struct net *net)
return 0;
}
static void __net_exit vxlan_exit_net(struct net *net)
static void vxlan_destroy_tunnels(struct net *net, struct list_head *head)
{
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
struct vxlan_dev *vxlan, *next;
struct net_device *dev, *aux;
unsigned int h;
LIST_HEAD(list);
rtnl_lock();
for_each_netdev_safe(net, dev, aux)
if (dev->rtnl_link_ops == &vxlan_link_ops)
unregister_netdevice_queue(dev, &list);
unregister_netdevice_queue(dev, head);
list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
/* If vxlan->dev is in the same netns, it has already been added
@ -3711,20 +3709,30 @@ static void __net_exit vxlan_exit_net(struct net *net)
*/
if (!net_eq(dev_net(vxlan->dev), net)) {
gro_cells_destroy(&vxlan->gro_cells);
unregister_netdevice_queue(vxlan->dev, &list);
unregister_netdevice_queue(vxlan->dev, head);
}
}
unregister_netdevice_many(&list);
rtnl_unlock();
for (h = 0; h < PORT_HASH_SIZE; ++h)
WARN_ON_ONCE(!hlist_empty(&vn->sock_list[h]));
}
static void __net_exit vxlan_exit_batch_net(struct list_head *net_list)
{
struct net *net;
LIST_HEAD(list);
rtnl_lock();
list_for_each_entry(net, net_list, exit_list)
vxlan_destroy_tunnels(net, &list);
unregister_netdevice_many(&list);
rtnl_unlock();
}
static struct pernet_operations vxlan_net_ops = {
.init = vxlan_init_net,
.exit = vxlan_exit_net,
.exit_batch = vxlan_exit_batch_net,
.id = &vxlan_net_id,
.size = sizeof(struct vxlan_net),
};