summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2007-05-10 14:12:47 -0700
committerDavid S. Miller <davem@sunset.davemloft.net>2007-05-10 23:47:42 -0700
commit41a23b0788610b27ecb4c4ee857f3fe7168f1070 (patch)
tree8f239d889253a7d7d9dd9f12bf4c76f7ff184274 /net
parentcce1fa36a8ed36e8a3f64455e2a830f48e904c64 (diff)
downloadop-kernel-dev-41a23b0788610b27ecb4c4ee857f3fe7168f1070.zip
op-kernel-dev-41a23b0788610b27ecb4c4ee857f3fe7168f1070.tar.gz
[NET_SCHED]: Avoid requeue warning on dev_deactivate
When we relinquish queue_lock in qdisc_restart and then retake it for requeueing, we might race against dev_deactivate and end up requeueing onto noop_qdisc. This causes a warning to be printed. This patch fixes this by checking this before we requeue. As an added bonus, we can remove the same check in __qdisc_run which was added to prevent dev->gso_skb from being requeued when we're shutting down. Even though we've had to add a new conditional in its place, it's better because it only happens on requeues rather than every single time that qdisc_run is called. For this to work we also need to move the clearing of gso_skb up in dev_deactivate as now qdisc_restart can occur even after we wait for __LINK_STATE_QDISC_RUNNING to clear (but it won't do anything as long as the queue and gso_skb is already clear). Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/sched/sch_generic.c18
1 files changed, 8 insertions, 10 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 816d311..f28bb2d 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -162,7 +162,9 @@ static inline int qdisc_restart(struct net_device *dev)
*/
requeue:
- if (skb->next)
+ if (unlikely(q == &noop_qdisc))
+ kfree_skb(skb);
+ else if (skb->next)
dev->gso_skb = skb;
else
q->ops->requeue(skb, q);
@@ -177,15 +179,11 @@ out:
void __qdisc_run(struct net_device *dev)
{
- if (unlikely(dev->qdisc == &noop_qdisc))
- goto out;
-
do {
if (!qdisc_restart(dev))
break;
} while (!netif_queue_stopped(dev));
-out:
clear_bit(__LINK_STATE_QDISC_RUNNING, &dev->state);
}
@@ -547,6 +545,7 @@ void dev_activate(struct net_device *dev)
void dev_deactivate(struct net_device *dev)
{
struct Qdisc *qdisc;
+ struct sk_buff *skb;
spin_lock_bh(&dev->queue_lock);
qdisc = dev->qdisc;
@@ -554,8 +553,12 @@ void dev_deactivate(struct net_device *dev)
qdisc_reset(qdisc);
+ skb = dev->gso_skb;
+ dev->gso_skb = NULL;
spin_unlock_bh(&dev->queue_lock);
+ kfree_skb(skb);
+
dev_watchdog_down(dev);
/* Wait for outstanding dev_queue_xmit calls. */
@@ -564,11 +567,6 @@ void dev_deactivate(struct net_device *dev)
/* Wait for outstanding qdisc_run calls. */
while (test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state))
yield();
-
- if (dev->gso_skb) {
- kfree_skb(dev->gso_skb);
- dev->gso_skb = NULL;
- }
}
void dev_init_scheduler(struct net_device *dev)
OpenPOWER on IntegriCloud