summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/net/sch_generic.h3
-rw-r--r--net/core/dev.c29
-rw-r--r--net/sched/sch_generic.c1
3 files changed, 28 insertions, 5 deletions
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index b3591e4a..b35301b 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -80,7 +80,8 @@ struct Qdisc {
struct gnet_stats_basic_packed bstats;
unsigned long __state;
struct gnet_stats_queue qstats;
- struct rcu_head rcu_head;
+ struct rcu_head rcu_head;
+ spinlock_t busylock;
};
static inline bool qdisc_is_running(struct Qdisc *qdisc)
diff --git a/net/core/dev.c b/net/core/dev.c
index 2733226..ffca5c10 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2040,8 +2040,18 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
struct netdev_queue *txq)
{
spinlock_t *root_lock = qdisc_lock(q);
+ bool contended = qdisc_is_running(q);
int rc;
+ /*
+ * Heuristic to force contended enqueues to serialize on a
+ * separate lock before trying to get qdisc main lock.
+ * This permits __QDISC_STATE_RUNNING owner to get the lock more often
+ * and dequeue packets faster.
+ */
+ if (unlikely(contended))
+ spin_lock(&q->busylock);
+
spin_lock(root_lock);
if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
kfree_skb(skb);
@@ -2056,19 +2066,30 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
skb_dst_force(skb);
__qdisc_update_bstats(q, skb->len);
- if (sch_direct_xmit(skb, q, dev, txq, root_lock))
+ if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
+ if (unlikely(contended)) {
+ spin_unlock(&q->busylock);
+ contended = false;
+ }
__qdisc_run(q);
- else
+ } else
qdisc_run_end(q);
rc = NET_XMIT_SUCCESS;
} else {
skb_dst_force(skb);
rc = qdisc_enqueue_root(skb, q);
- qdisc_run(q);
+ if (qdisc_run_begin(q)) {
+ if (unlikely(contended)) {
+ spin_unlock(&q->busylock);
+ contended = false;
+ }
+ __qdisc_run(q);
+ }
}
spin_unlock(root_lock);
-
+ if (unlikely(contended))
+ spin_unlock(&q->busylock);
return rc;
}
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 37b86ea..d20fcd2 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -561,6 +561,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
INIT_LIST_HEAD(&sch->list);
skb_queue_head_init(&sch->q);
+ spin_lock_init(&sch->busylock);
sch->ops = ops;
sch->enqueue = ops->enqueue;
sch->dequeue = ops->dequeue;
OpenPOWER on IntegriCloud