summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2012-05-16 04:39:09 +0000
committerDavid S. Miller <davem@davemloft.net>2012-05-16 15:30:26 -0400
commit865ec5523dadbedefbc5710a68969f686a28d928 (patch)
treeb9f056cdea6922d5fc3ed035764660bb6fb152b3
parentc27b46e7f1cbf3be95a4cf5840c76a7b7d54b26f (diff)
downloadop-kernel-dev-865ec5523dadbedefbc5710a68969f686a28d928.zip
op-kernel-dev-865ec5523dadbedefbc5710a68969f686a28d928.tar.gz
fq_codel: should use qdisc backlog as threshold
codel_should_drop() logic allows a packet being not dropped if queue size is under max packet size. In fq_codel, we have two possible backlogs : The qdisc global one, and the flow local one. The meaningful one for codel_should_drop() should be the global backlog, not the per flow one, so that thin flows can have a non zero drop/mark probability. Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Dave Taht <dave.taht@bufferbloat.net> Cc: Kathleen Nichols <nichols@pollere.com> Cc: Van Jacobson <van@pollere.net> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/codel.h15
-rw-r--r--net/sched/sch_codel.c4
-rw-r--r--net/sched/sch_fq_codel.c5
3 files changed, 12 insertions, 12 deletions
diff --git a/include/net/codel.h b/include/net/codel.h
index 7546517..550debf 100644
--- a/include/net/codel.h
+++ b/include/net/codel.h
@@ -205,7 +205,7 @@ static codel_time_t codel_control_law(codel_time_t t,
static bool codel_should_drop(const struct sk_buff *skb,
- unsigned int *backlog,
+ struct Qdisc *sch,
struct codel_vars *vars,
struct codel_params *params,
struct codel_stats *stats,
@@ -219,13 +219,13 @@ static bool codel_should_drop(const struct sk_buff *skb,
}
vars->ldelay = now - codel_get_enqueue_time(skb);
- *backlog -= qdisc_pkt_len(skb);
+ sch->qstats.backlog -= qdisc_pkt_len(skb);
if (unlikely(qdisc_pkt_len(skb) > stats->maxpacket))
stats->maxpacket = qdisc_pkt_len(skb);
if (codel_time_before(vars->ldelay, params->target) ||
- *backlog <= stats->maxpacket) {
+ sch->qstats.backlog <= stats->maxpacket) {
/* went below - stay below for at least interval */
vars->first_above_time = 0;
return false;
@@ -249,8 +249,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
struct codel_params *params,
struct codel_vars *vars,
struct codel_stats *stats,
- codel_skb_dequeue_t dequeue_func,
- u32 *backlog)
+ codel_skb_dequeue_t dequeue_func)
{
struct sk_buff *skb = dequeue_func(vars, sch);
codel_time_t now;
@@ -261,7 +260,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
return skb;
}
now = codel_get_time();
- drop = codel_should_drop(skb, backlog, vars, params, stats, now);
+ drop = codel_should_drop(skb, sch, vars, params, stats, now);
if (vars->dropping) {
if (!drop) {
/* sojourn time below target - leave dropping state */
@@ -292,7 +291,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
qdisc_drop(skb, sch);
stats->drop_count++;
skb = dequeue_func(vars, sch);
- if (!codel_should_drop(skb, backlog,
+ if (!codel_should_drop(skb, sch,
vars, params, stats, now)) {
/* leave dropping state */
vars->dropping = false;
@@ -313,7 +312,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
stats->drop_count++;
skb = dequeue_func(vars, sch);
- drop = codel_should_drop(skb, backlog, vars, params,
+ drop = codel_should_drop(skb, sch, vars, params,
stats, now);
}
vars->dropping = true;
diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
index 213ef60..2f9ab17 100644
--- a/net/sched/sch_codel.c
+++ b/net/sched/sch_codel.c
@@ -77,8 +77,8 @@ static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
struct codel_sched_data *q = qdisc_priv(sch);
struct sk_buff *skb;
- skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats,
- dequeue, &sch->qstats.backlog);
+ skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats, dequeue);
+
/* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
* or HTB crashes. Defer it for next round.
*/
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index 337ff20..9fc1c62 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -217,13 +217,14 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
*/
static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch)
{
+ struct fq_codel_sched_data *q = qdisc_priv(sch);
struct fq_codel_flow *flow;
struct sk_buff *skb = NULL;
flow = container_of(vars, struct fq_codel_flow, cvars);
if (flow->head) {
skb = dequeue_head(flow);
- sch->qstats.backlog -= qdisc_pkt_len(skb);
+ q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb);
sch->q.qlen--;
}
return skb;
@@ -256,7 +257,7 @@ begin:
prev_ecn_mark = q->cstats.ecn_mark;
skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats,
- dequeue, &q->backlogs[flow - q->flows]);
+ dequeue);
flow->dropped += q->cstats.drop_count - prev_drop_count;
flow->dropped += q->cstats.ecn_mark - prev_ecn_mark;
OpenPOWER on IntegriCloud