summaryrefslogtreecommitdiffstats
path: root/net/sched/sch_fq_codel.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2016-06-10 11:52:24 -0700
committerDavid S. Miller <davem@davemloft.net>2016-06-10 11:52:24 -0700
commit1578b0a5e92825334760741e5c166b8873886f1b (patch)
treeac8299191f37990111f7d4b615601f4356e24fea /net/sched/sch_fq_codel.c
parent3d5479e92087f6249231e26a2d7327e86a8d0dfc (diff)
parent698ea54dde6768d4a96080d0fb796cb3a4eadaf8 (diff)
downloadop-kernel-dev-1578b0a5e92825334760741e5c166b8873886f1b.zip
op-kernel-dev-1578b0a5e92825334760741e5c166b8873886f1b.tar.gz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: net/sched/act_police.c net/sched/sch_drr.c net/sched/sch_hfsc.c net/sched/sch_prio.c net/sched/sch_red.c net/sched/sch_tbf.c In net-next the drop methods of the packet schedulers got removed, so the bug fixes to them in 'net' are irrelevant. A packet action unload crash fix conflicts with the addition of the new firstuse timestamp. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_fq_codel.c')
-rw-r--r--net/sched/sch_fq_codel.c26
1 files changed, 19 insertions, 7 deletions
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index 176a7e2..a302e8e 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -190,6 +190,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
unsigned int idx, prev_backlog, prev_qlen;
struct fq_codel_flow *flow;
int uninitialized_var(ret);
+ unsigned int pkt_len;
bool memory_limited;
idx = fq_codel_classify(skb, sch, &ret);
@@ -221,6 +222,8 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
prev_backlog = sch->qstats.backlog;
prev_qlen = sch->q.qlen;
+ /* save this packet length as it might be dropped by fq_codel_drop() */
+ pkt_len = qdisc_pkt_len(skb);
/* fq_codel_drop() is quite expensive, as it performs a linear search
* in q->backlogs[] to find a fat flow.
* So instead of dropping a single packet, drop half of its backlog
@@ -228,14 +231,23 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
*/
ret = fq_codel_drop(sch, q->drop_batch_size);
- q->drop_overlimit += prev_qlen - sch->q.qlen;
+ prev_qlen -= sch->q.qlen;
+ prev_backlog -= sch->qstats.backlog;
+ q->drop_overlimit += prev_qlen;
if (memory_limited)
- q->drop_overmemory += prev_qlen - sch->q.qlen;
- /* As we dropped packet(s), better let upper stack know this */
- qdisc_tree_reduce_backlog(sch, prev_qlen - sch->q.qlen,
- prev_backlog - sch->qstats.backlog);
+ q->drop_overmemory += prev_qlen;
- return ret == idx ? NET_XMIT_CN : NET_XMIT_SUCCESS;
+ /* As we dropped packet(s), better let upper stack know this.
+ * If we dropped a packet for this flow, return NET_XMIT_CN,
+ * but in this case, our parents wont increase their backlogs.
+ */
+ if (ret == idx) {
+ qdisc_tree_reduce_backlog(sch, prev_qlen - 1,
+ prev_backlog - pkt_len);
+ return NET_XMIT_CN;
+ }
+ qdisc_tree_reduce_backlog(sch, prev_qlen, prev_backlog);
+ return NET_XMIT_SUCCESS;
}
/* This is the specific function called from codel_dequeue()
@@ -647,7 +659,7 @@ static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
qs.backlog = q->backlogs[idx];
qs.drops = flow->dropped;
}
- if (gnet_stats_copy_queue(d, NULL, &qs, 0) < 0)
+ if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0)
return -1;
if (idx < q->flows_cnt)
return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
OpenPOWER on IntegriCloud