diff options
Diffstat (limited to 'net/sched/sch_htb.c')
-rw-r--r-- | net/sched/sch_htb.c | 104 |
1 files changed, 61 insertions, 43 deletions
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 984c1b0..3e86fd3 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -99,9 +99,10 @@ struct htb_class { struct rb_root feed[TC_HTB_NUMPRIO]; /* feed trees */ struct rb_node *ptr[TC_HTB_NUMPRIO]; /* current class ptr */ /* When class changes from state 1->2 and disconnects from - parent's feed then we lost ptr value and start from the - first child again. Here we store classid of the - last valid ptr (used when ptr is NULL). */ + * parent's feed then we lost ptr value and start from the + * first child again. Here we store classid of the + * last valid ptr (used when ptr is NULL). + */ u32 last_ptr_id[TC_HTB_NUMPRIO]; } inner; } un; @@ -185,7 +186,7 @@ static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch) * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessfull * then finish and return direct queue. */ -#define HTB_DIRECT (struct htb_class*)-1 +#define HTB_DIRECT ((struct htb_class *)-1L) static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) @@ -197,11 +198,13 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, int result; /* allow to select class by setting skb->priority to valid classid; - note that nfmark can be used too by attaching filter fw with no - rules in it */ + * note that nfmark can be used too by attaching filter fw with no + * rules in it + */ if (skb->priority == sch->handle) return HTB_DIRECT; /* X:0 (direct flow) selected */ - if ((cl = htb_find(skb->priority, sch)) != NULL && cl->level == 0) + cl = htb_find(skb->priority, sch); + if (cl && cl->level == 0) return cl; *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; @@ -216,10 +219,12 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, return NULL; } #endif - if ((cl = (void *)res.class) == NULL) { + cl = (void *)res.class; + if (!cl) { if (res.classid == sch->handle) return HTB_DIRECT; /* X:0 (direct flow) */ - if ((cl = htb_find(res.classid, sch)) == NULL) + cl = htb_find(res.classid, sch); + if (!cl) break; /* filter selected invalid classid */ } if (!cl->level) @@ -378,7 +383,8 @@ static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl) if (p->un.inner.feed[prio].rb_node) /* parent already has its feed in use so that - reset bit in mask as parent is already ok */ + * reset bit in mask as parent is already ok + */ mask &= ~(1 << prio); htb_add_to_id_tree(p->un.inner.feed + prio, cl, prio); @@ -413,8 +419,9 @@ static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl) if (p->un.inner.ptr[prio] == cl->node + prio) { /* we are removing child which is pointed to from - parent feed - forget the pointer but remember - classid */ + * parent feed - forget the pointer but remember + * classid + */ p->un.inner.last_ptr_id[prio] = cl->common.classid; p->un.inner.ptr[prio] = NULL; } @@ -664,8 +671,9 @@ static psched_time_t htb_do_events(struct htb_sched *q, int level, unsigned long start) { /* don't run for longer than 2 jiffies; 2 is used instead of - 1 to simplify things when jiffy is going to be incremented - too soon */ + * 1 to simplify things when jiffy is going to be incremented + * too soon + */ unsigned long stop_at = start + 2; while (time_before(jiffies, stop_at)) { struct htb_class *cl; @@ -688,7 +696,7 @@ static psched_time_t htb_do_events(struct htb_sched *q, int level, /* too much load - let's continue after a break for scheduling */ if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) { - printk(KERN_WARNING "htb: too many events!\n"); + pr_warning("htb: too many events!\n"); q->warned |= HTB_WARN_TOOMANYEVENTS; } @@ -696,7 +704,8 @@ static psched_time_t htb_do_events(struct htb_sched *q, int level, } /* Returns class->node+prio from id-tree where classe's id is >= id. NULL - is no such one exists. */ + * is no such one exists. + */ static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n, u32 id) { @@ -740,12 +749,14 @@ static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio, for (i = 0; i < 65535; i++) { if (!*sp->pptr && *sp->pid) { /* ptr was invalidated but id is valid - try to recover - the original or next ptr */ + * the original or next ptr + */ *sp->pptr = htb_id_find_next_upper(prio, sp->root, *sp->pid); } *sp->pid = 0; /* ptr is valid now so that remove this hint as it - can become out of date quickly */ + * can become out of date quickly + */ if (!*sp->pptr) { /* we are at right end; rewind & go up */ *sp->pptr = sp->root; while ((*sp->pptr)->rb_left) @@ -773,7 +784,8 @@ static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio, } /* dequeues packet at given priority and level; call only if - you are sure that there is active class at prio/level */ + * you are sure that there is active class at prio/level + */ static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, int prio, int level) { @@ -790,9 +802,10 @@ next: return NULL; /* class can be empty - it is unlikely but can be true if leaf - qdisc drops packets in enqueue routine or if someone used - graft operation on the leaf since last dequeue; - simply deactivate and skip such class */ + * qdisc drops packets in enqueue routine or if someone used + * graft operation on the leaf since last dequeue; + * simply deactivate and skip such class + */ if (unlikely(cl->un.leaf.q->q.qlen == 0)) { struct htb_class *next; htb_deactivate(q, cl); @@ -832,7 +845,8 @@ next: ptr[0]) + prio); } /* this used to be after charge_class but this constelation - gives us slightly better performance */ + * gives us slightly better performance + */ if (!cl->un.leaf.q->q.qlen) htb_deactivate(q, cl); htb_charge_class(q, cl, level, skb); @@ -882,6 +896,7 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch) m = ~q->row_mask[level]; while (m != (int)(-1)) { int prio = ffz(m); + m |= 1 << prio; skb = htb_dequeue_tree(q, prio, level); if (likely(skb != NULL)) { @@ -989,13 +1004,12 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt) return err; if (tb[TCA_HTB_INIT] == NULL) { - printk(KERN_ERR "HTB: hey probably you have bad tc tool ?\n"); + pr_err("HTB: hey probably you have bad tc tool ?\n"); return -EINVAL; } gopt = nla_data(tb[TCA_HTB_INIT]); if (gopt->version != HTB_VER >> 16) { - printk(KERN_ERR - "HTB: need tc/htb version %d (minor is %d), you have %d\n", + pr_err("HTB: need tc/htb version %d (minor is %d), you have %d\n", HTB_VER >> 16, HTB_VER & 0xffff, gopt->version); return -EINVAL; } @@ -1208,9 +1222,10 @@ static void htb_destroy(struct Qdisc *sch) cancel_work_sync(&q->work); qdisc_watchdog_cancel(&q->watchdog); /* This line used to be after htb_destroy_class call below - and surprisingly it worked in 2.4. But it must precede it - because filter need its target class alive to be able to call - unbind_filter on it (without Oops). */ + * and surprisingly it worked in 2.4. But it must precede it + * because filter need its target class alive to be able to call + * unbind_filter on it (without Oops). + */ tcf_destroy_chain(&q->filter_list); for (i = 0; i < q->clhash.hashsize; i++) { @@ -1344,11 +1359,12 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, /* check maximal depth */ if (parent && parent->parent && parent->parent->level < 2) { - printk(KERN_ERR "htb: tree is too deep\n"); + pr_err("htb: tree is too deep\n"); goto failure; } err = -ENOBUFS; - if ((cl = kzalloc(sizeof(*cl), GFP_KERNEL)) == NULL) + cl = kzalloc(sizeof(*cl), GFP_KERNEL); + if (!cl) goto failure; err = gen_new_estimator(&cl->bstats, &cl->rate_est, @@ -1368,8 +1384,9 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, RB_CLEAR_NODE(&cl->node[prio]); /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL) - so that can't be used inside of sch_tree_lock - -- thanks to Karlis Peisenieks */ + * so that can't be used inside of sch_tree_lock + * -- thanks to Karlis Peisenieks + */ new_q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid); sch_tree_lock(sch); @@ -1421,17 +1438,18 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, } /* it used to be a nasty bug here, we have to check that node - is really leaf before changing cl->un.leaf ! */ + * is really leaf before changing cl->un.leaf ! + */ if (!cl->level) { cl->quantum = rtab->rate.rate / q->rate2quantum; if (!hopt->quantum && cl->quantum < 1000) { - printk(KERN_WARNING + pr_warning( "HTB: quantum of class %X is small. Consider r2q change.\n", cl->common.classid); cl->quantum = 1000; } if (!hopt->quantum && cl->quantum > 200000) { - printk(KERN_WARNING + pr_warning( "HTB: quantum of class %X is big. Consider r2q change.\n", cl->common.classid); cl->quantum = 200000; @@ -1480,13 +1498,13 @@ static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent, struct htb_class *cl = htb_find(classid, sch); /*if (cl && !cl->level) return 0; - The line above used to be there to prevent attaching filters to - leaves. But at least tc_index filter uses this just to get class - for other reasons so that we have to allow for it. - ---- - 19.6.2002 As Werner explained it is ok - bind filter is just - another way to "lock" the class - unlike "get" this lock can - be broken by class during destroy IIUC. + * The line above used to be there to prevent attaching filters to + * leaves. But at least tc_index filter uses this just to get class + * for other reasons so that we have to allow for it. + * ---- + * 19.6.2002 As Werner explained it is ok - bind filter is just + * another way to "lock" the class - unlike "get" this lock can + * be broken by class during destroy IIUC. */ if (cl) cl->filter_cnt++; |