diff options
author | Stephen Hemminger <shemminger@linux-foundation.org> | 2007-03-22 12:17:42 -0700 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-04-25 22:27:22 -0700 |
commit | 11274e5a43266d531140530adebead6903380caf (patch) | |
tree | 38c365a3835c9e5973fb1941a31dde8cc4856724 /net/sched | |
parent | 075aa573b74a732aeff487ab77d3fbd627c10856 (diff) | |
download | op-kernel-dev-11274e5a43266d531140530adebead6903380caf.zip op-kernel-dev-11274e5a43266d531140530adebead6903380caf.tar.gz |
[NETEM]: avoid excessive requeues
The netem code would call getnstimeofday() and dequeue/requeue after
every packet, even if it was waiting. Avoid this overhead by using
the throttled flag.
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r-- | net/sched/sch_api.c | 3 | ||||
-rw-r--r-- | net/sched/sch_netem.c | 23 |
2 files changed, 16 insertions, 10 deletions
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index b06f202..fcaa4ad 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -298,6 +298,7 @@ static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer) timer); wd->qdisc->flags &= ~TCQ_F_THROTTLED; + smp_wmb(); netif_schedule(wd->qdisc->dev); return HRTIMER_NORESTART; } @@ -315,6 +316,7 @@ void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires) ktime_t time; wd->qdisc->flags |= TCQ_F_THROTTLED; + smp_wmb(); time = ktime_set(0, 0); time = ktime_add_ns(time, PSCHED_US2NS(expires)); hrtimer_start(&wd->timer, time, HRTIMER_MODE_ABS); @@ -325,6 +327,7 @@ void qdisc_watchdog_cancel(struct qdisc_watchdog *wd) { hrtimer_cancel(&wd->timer); wd->qdisc->flags &= ~TCQ_F_THROTTLED; + smp_wmb(); } EXPORT_SYMBOL(qdisc_watchdog_cancel); diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 7e9e658..fb49e9e 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -273,6 +273,10 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch) struct netem_sched_data *q = qdisc_priv(sch); struct sk_buff *skb; + smp_mb(); + if (sch->flags & TCQ_F_THROTTLED) + return NULL; + skb = q->qdisc->dequeue(q->qdisc); if (skb) { const struct netem_skb_cb *cb @@ -285,18 +289,17 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch) if (PSCHED_TLESS(cb->time_to_send, now)) { pr_debug("netem_dequeue: return skb=%p\n", skb); sch->q.qlen--; - sch->flags &= ~TCQ_F_THROTTLED; return skb; - } else { - qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send); - - if (q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS) { - qdisc_tree_decrease_qlen(q->qdisc, 1); - sch->qstats.drops++; - printk(KERN_ERR "netem: queue discpline %s could not requeue\n", - q->qdisc->ops->id); - } } + + if (unlikely(q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS)) { + qdisc_tree_decrease_qlen(q->qdisc, 1); + sch->qstats.drops++; + printk(KERN_ERR "netem: %s could not requeue\n", + q->qdisc->ops->id); + } + + qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send); } return NULL; |