summaryrefslogtreecommitdiffstats
path: root/net/sched/sch_generic.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/sched/sch_generic.c')
-rw-r--r--net/sched/sch_generic.c40
1 files changed, 31 insertions, 9 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index b1e4c5e..74d4a1d 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -90,14 +90,17 @@ void qdisc_unlock_tree(struct net_device *dev)
NOTE: Called under dev->queue_lock with locally disabled BH.
*/
-int qdisc_restart(struct net_device *dev)
+static inline int qdisc_restart(struct net_device *dev)
{
struct Qdisc *q = dev->qdisc;
struct sk_buff *skb;
/* Dequeue packet */
- if ((skb = q->dequeue(q)) != NULL) {
+ if (((skb = dev->gso_skb)) || ((skb = q->dequeue(q)))) {
unsigned nolock = (dev->features & NETIF_F_LLTX);
+
+ dev->gso_skb = NULL;
+
/*
* When the driver has LLTX set it does its own locking
* in start_xmit. No need to add additional overhead by
@@ -134,10 +137,8 @@ int qdisc_restart(struct net_device *dev)
if (!netif_queue_stopped(dev)) {
int ret;
- if (netdev_nit)
- dev_queue_xmit_nit(skb, dev);
- ret = dev->hard_start_xmit(skb, dev);
+ ret = dev_hard_start_xmit(skb, dev);
if (ret == NETDEV_TX_OK) {
if (!nolock) {
netif_tx_unlock(dev);
@@ -171,7 +172,10 @@ int qdisc_restart(struct net_device *dev)
*/
requeue:
- q->ops->requeue(skb, q);
+ if (skb->next)
+ dev->gso_skb = skb;
+ else
+ q->ops->requeue(skb, q);
netif_schedule(dev);
return 1;
}
@@ -179,6 +183,18 @@ requeue:
return q->q.qlen;
}
+void __qdisc_run(struct net_device *dev)
+{
+ if (unlikely(dev->qdisc == &noop_qdisc))
+ goto out;
+
+ while (qdisc_restart(dev) < 0 && !netif_queue_stopped(dev))
+ /* NOTHING */;
+
+out:
+ clear_bit(__LINK_STATE_QDISC_RUNNING, &dev->state);
+}
+
static void dev_watchdog(unsigned long arg)
{
struct net_device *dev = (struct net_device *)arg;
@@ -575,10 +591,17 @@ void dev_deactivate(struct net_device *dev)
dev_watchdog_down(dev);
- while (test_bit(__LINK_STATE_SCHED, &dev->state))
+ /* Wait for outstanding dev_queue_xmit calls. */
+ synchronize_rcu();
+
+ /* Wait for outstanding qdisc_run calls. */
+ while (test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state))
yield();
- spin_unlock_wait(&dev->_xmit_lock);
+ if (dev->gso_skb) {
+ kfree_skb(dev->gso_skb);
+ dev->gso_skb = NULL;
+ }
}
void dev_init_scheduler(struct net_device *dev)
@@ -620,6 +643,5 @@ EXPORT_SYMBOL(qdisc_create_dflt);
EXPORT_SYMBOL(qdisc_alloc);
EXPORT_SYMBOL(qdisc_destroy);
EXPORT_SYMBOL(qdisc_reset);
-EXPORT_SYMBOL(qdisc_restart);
EXPORT_SYMBOL(qdisc_lock_tree);
EXPORT_SYMBOL(qdisc_unlock_tree);
OpenPOWER on IntegriCloud