summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-07-08 17:33:13 -0700
committerDavid S. Miller <davem@davemloft.net>2008-07-08 17:33:13 -0700
commit555353cfa1aee293de445bfa6de43276138ddd82 (patch)
treeb5daba85806b8e36731c4a474aac97f1a0140a51 /net
parentdc2b48475a0a36f8b3bbb2da60d3a006dc5c2c84 (diff)
downloadop-kernel-dev-555353cfa1aee293de445bfa6de43276138ddd82.zip
op-kernel-dev-555353cfa1aee293de445bfa6de43276138ddd82.tar.gz
netdev: The ingress_lock member is no longer needed.
Every qdisc is assosciated with a queue, and in the case of ingress qdiscs that will now be netdev->rx_queue so using that queue's lock is the thing to do. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/core/dev.c12
-rw-r--r--net/sched/sch_api.c3
-rw-r--r--net/sched/sch_generic.c10
3 files changed, 13 insertions, 12 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 0501104..2322fb6 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2014,10 +2014,11 @@ static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
*/
static int ing_filter(struct sk_buff *skb)
{
- struct Qdisc *q;
struct net_device *dev = skb->dev;
- int result = TC_ACT_OK;
u32 ttl = G_TC_RTTL(skb->tc_verd);
+ struct netdev_queue *rxq;
+ int result = TC_ACT_OK;
+ struct Qdisc *q;
if (MAX_RED_LOOP < ttl++) {
printk(KERN_WARNING
@@ -2029,10 +2030,12 @@ static int ing_filter(struct sk_buff *skb)
skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
- spin_lock(&dev->ingress_lock);
+ rxq = &dev->rx_queue;
+
+ spin_lock(&rxq->lock);
if ((q = dev->qdisc_ingress) != NULL)
result = q->enqueue(skb, q);
- spin_unlock(&dev->ingress_lock);
+ spin_unlock(&rxq->lock);
return result;
}
@@ -3795,7 +3798,6 @@ int register_netdevice(struct net_device *dev)
spin_lock_init(&dev->_xmit_lock);
netdev_set_lockdep_class(&dev->_xmit_lock, dev->type);
dev->xmit_lock_owner = -1;
- spin_lock_init(&dev->ingress_lock);
dev->iflink = -1;
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 2a1834f..570cef2 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -601,12 +601,11 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
sch->parent = parent;
+ sch->stats_lock = &dev_queue->lock;
if (handle == TC_H_INGRESS) {
sch->flags |= TCQ_F_INGRESS;
- sch->stats_lock = &dev->ingress_lock;
handle = TC_H_MAKE(TC_H_INGRESS, 0);
} else {
- sch->stats_lock = &dev_queue->lock;
if (handle == 0) {
handle = qdisc_alloc_handle(dev);
err = -ENOMEM;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index ee8f9f7..804d44b 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -35,24 +35,24 @@
* - enqueue, dequeue are serialized via top level device
* spinlock queue->lock.
* - ingress filtering is serialized via top level device
- * spinlock dev->ingress_lock.
+ * spinlock dev->rx_queue.lock.
* - updates to tree and tree walking are only done under the rtnl mutex.
*/
void qdisc_lock_tree(struct net_device *dev)
__acquires(dev->tx_queue.lock)
- __acquires(dev->ingress_lock)
+ __acquires(dev->rx_queue.lock)
{
spin_lock_bh(&dev->tx_queue.lock);
- spin_lock(&dev->ingress_lock);
+ spin_lock(&dev->rx_queue.lock);
}
EXPORT_SYMBOL(qdisc_lock_tree);
void qdisc_unlock_tree(struct net_device *dev)
- __releases(dev->ingress_lock)
+ __releases(dev->rx_queue.lock)
__releases(dev->tx_queue.lock)
{
- spin_unlock(&dev->ingress_lock);
+ spin_unlock(&dev->rx_queue.lock);
spin_unlock_bh(&dev->tx_queue.lock);
}
EXPORT_SYMBOL(qdisc_unlock_tree);
OpenPOWER on IntegriCloud