pkt_sched: Kill qdisc_lock_tree and qdisc_unlock_tree.

No longer used.

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2008-07-16 03:22:39 -07:00
parent 78a5b30b73
commit c7e4f3bbb4
2 changed files with 3 additions and 36 deletions

View File

@ -180,9 +180,6 @@ static inline struct net_device *qdisc_dev(struct Qdisc *qdisc)
return qdisc->dev_queue->dev;
}
extern void qdisc_lock_tree(struct net_device *dev);
extern void qdisc_unlock_tree(struct net_device *dev);
static inline void sch_tree_lock(struct Qdisc *q)
{
spin_lock_bh(qdisc_root_lock(q));

View File

@ -29,44 +29,14 @@
/* Main transmission queue. */
/* Modifications to data participating in scheduling must be protected with
* queue->lock spinlock.
* qdisc_root_lock(qdisc) spinlock.
*
* The idea is the following:
* - enqueue, dequeue are serialized via top level device
* spinlock queue->lock.
* - ingress filtering is serialized via top level device
* spinlock dev->rx_queue.lock.
* - enqueue, dequeue are serialized via qdisc root lock
* - ingress filtering is also serialized via qdisc root lock
* - updates to tree and tree walking are only done under the rtnl mutex.
*/
void qdisc_lock_tree(struct net_device *dev)
__acquires(dev->rx_queue.lock)
{
unsigned int i;
local_bh_disable();
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
spin_lock(&txq->lock);
}
spin_lock(&dev->rx_queue.lock);
}
EXPORT_SYMBOL(qdisc_lock_tree);
void qdisc_unlock_tree(struct net_device *dev)
__releases(dev->rx_queue.lock)
{
unsigned int i;
spin_unlock(&dev->rx_queue.lock);
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
spin_unlock(&txq->lock);
}
local_bh_enable();
}
EXPORT_SYMBOL(qdisc_unlock_tree);
static inline int qdisc_qlen(struct Qdisc *q)
{
return q->q.qlen;