mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-12 11:36:35 +00:00
net: sched: drop qdisc_reset from dev_graft_qdisc
[ Upstream commit 7bbde83b18
]
In qdisc_graft_qdisc a "new" qdisc is attached and the 'qdisc_destroy'
operation is called on the old qdisc. The destroy operation will wait
a rcu grace period and call qdisc_rcu_free(). At which point
gso_cpu_skb is free'd along with all stats so no need to zero stats
and gso_cpu_skb from the graft operation itself.
Further after dropping the qdisc locks we can not continue to call
qdisc_reset before waiting an rcu grace period so that the qdisc is
detached from all cpus. By removing the qdisc_reset() here we get
the correct property of waiting an rcu grace period and letting the
qdisc_destroy operation clean up the qdisc correctly.
Note, a refcnt greater than 1 would cause the destroy operation to
be aborted however if this ever happened the reference to the qdisc
would be lost and we would have a memory leak.
Signed-off-by: John Fastabend <john.fastabend@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Sasha Levin <alexander.levin@microsoft.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
127bda4b09
commit
a80de288d6
1 changed files with 19 additions and 9 deletions
|
@ -743,10 +743,6 @@ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
|
||||||
root_lock = qdisc_lock(oqdisc);
|
root_lock = qdisc_lock(oqdisc);
|
||||||
spin_lock_bh(root_lock);
|
spin_lock_bh(root_lock);
|
||||||
|
|
||||||
/* Prune old scheduler */
|
|
||||||
if (oqdisc && refcount_read(&oqdisc->refcnt) <= 1)
|
|
||||||
qdisc_reset(oqdisc);
|
|
||||||
|
|
||||||
/* ... and graft new one */
|
/* ... and graft new one */
|
||||||
if (qdisc == NULL)
|
if (qdisc == NULL)
|
||||||
qdisc = &noop_qdisc;
|
qdisc = &noop_qdisc;
|
||||||
|
@ -897,6 +893,16 @@ static bool some_qdisc_is_busy(struct net_device *dev)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void dev_qdisc_reset(struct net_device *dev,
|
||||||
|
struct netdev_queue *dev_queue,
|
||||||
|
void *none)
|
||||||
|
{
|
||||||
|
struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
|
||||||
|
|
||||||
|
if (qdisc)
|
||||||
|
qdisc_reset(qdisc);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* dev_deactivate_many - deactivate transmissions on several devices
|
* dev_deactivate_many - deactivate transmissions on several devices
|
||||||
* @head: list of devices to deactivate
|
* @head: list of devices to deactivate
|
||||||
|
@ -907,7 +913,6 @@ static bool some_qdisc_is_busy(struct net_device *dev)
|
||||||
void dev_deactivate_many(struct list_head *head)
|
void dev_deactivate_many(struct list_head *head)
|
||||||
{
|
{
|
||||||
struct net_device *dev;
|
struct net_device *dev;
|
||||||
bool sync_needed = false;
|
|
||||||
|
|
||||||
list_for_each_entry(dev, head, close_list) {
|
list_for_each_entry(dev, head, close_list) {
|
||||||
netdev_for_each_tx_queue(dev, dev_deactivate_queue,
|
netdev_for_each_tx_queue(dev, dev_deactivate_queue,
|
||||||
|
@ -917,20 +922,25 @@ void dev_deactivate_many(struct list_head *head)
|
||||||
&noop_qdisc);
|
&noop_qdisc);
|
||||||
|
|
||||||
dev_watchdog_down(dev);
|
dev_watchdog_down(dev);
|
||||||
sync_needed |= !dev->dismantle;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Wait for outstanding qdisc-less dev_queue_xmit calls.
|
/* Wait for outstanding qdisc-less dev_queue_xmit calls.
|
||||||
* This is avoided if all devices are in dismantle phase :
|
* This is avoided if all devices are in dismantle phase :
|
||||||
* Caller will call synchronize_net() for us
|
* Caller will call synchronize_net() for us
|
||||||
*/
|
*/
|
||||||
if (sync_needed)
|
synchronize_net();
|
||||||
synchronize_net();
|
|
||||||
|
|
||||||
/* Wait for outstanding qdisc_run calls. */
|
/* Wait for outstanding qdisc_run calls. */
|
||||||
list_for_each_entry(dev, head, close_list)
|
list_for_each_entry(dev, head, close_list) {
|
||||||
while (some_qdisc_is_busy(dev))
|
while (some_qdisc_is_busy(dev))
|
||||||
yield();
|
yield();
|
||||||
|
/* The new qdisc is assigned at this point so we can safely
|
||||||
|
* unwind stale skb lists and qdisc statistics
|
||||||
|
*/
|
||||||
|
netdev_for_each_tx_queue(dev, dev_qdisc_reset, NULL);
|
||||||
|
if (dev_ingress_queue(dev))
|
||||||
|
dev_qdisc_reset(dev, dev_ingress_queue(dev), NULL);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void dev_deactivate(struct net_device *dev)
|
void dev_deactivate(struct net_device *dev)
|
||||||
|
|
Loading…
Reference in a new issue