summaryrefslogtreecommitdiffstats
path: root/net/sched/sch_cbq.c
diff options
context:
space:
mode:
authorPatrick McHardy <kaber@trash.net>2007-03-23 11:28:55 -0700
committerDavid S. Miller <davem@sunset.davemloft.net>2007-04-25 22:27:53 -0700
commit8edc0c31d6b7849b0fb50db86824830769241939 (patch)
tree103cc1cf516e6317abad41579d5dd3af5a523d6e /net/sched/sch_cbq.c
parenta084980dcbf56c896e4b6c19aff2b082d5db7006 (diff)
downloadop-kernel-dev-8edc0c31d6b7849b0fb50db86824830769241939.zip
op-kernel-dev-8edc0c31d6b7849b0fb50db86824830769241939.tar.gz
[NET_SCHED]: kill PSCHED_TDIFF
Signed-off-by: Patrick McHardy <kaber@trash.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_cbq.c')
-rw-r--r--net/sched/sch_cbq.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 2bb271b..f9e8403 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -386,7 +386,7 @@ cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl)
psched_tdiff_t incr;
PSCHED_GET_TIME(now);
- incr = PSCHED_TDIFF(now, q->now_rt);
+ incr = now - q->now_rt;
now = q->now + incr;
do {
@@ -474,7 +474,7 @@ cbq_requeue(struct sk_buff *skb, struct Qdisc *sch)
static void cbq_ovl_classic(struct cbq_class *cl)
{
struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
- psched_tdiff_t delay = PSCHED_TDIFF(cl->undertime, q->now);
+ psched_tdiff_t delay = cl->undertime - q->now;
if (!cl->delayed) {
delay += cl->offtime;
@@ -509,7 +509,7 @@ static void cbq_ovl_classic(struct cbq_class *cl)
psched_tdiff_t base_delay = q->wd_expires;
for (b = cl->borrow; b; b = b->borrow) {
- delay = PSCHED_TDIFF(b->undertime, q->now);
+ delay = b->undertime - q->now;
if (delay < base_delay) {
if (delay <= 0)
delay = 1;
@@ -547,7 +547,7 @@ static void cbq_ovl_rclassic(struct cbq_class *cl)
static void cbq_ovl_delay(struct cbq_class *cl)
{
struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
- psched_tdiff_t delay = PSCHED_TDIFF(cl->undertime, q->now);
+ psched_tdiff_t delay = cl->undertime - q->now;
if (!cl->delayed) {
psched_time_t sched = q->now;
@@ -776,7 +776,7 @@ cbq_update(struct cbq_sched_data *q)
idle = (now - last) - last_pktlen/rate
*/
- idle = PSCHED_TDIFF(q->now, cl->last);
+ idle = q->now - cl->last;
if ((unsigned long)idle > 128*1024*1024) {
avgidle = cl->maxidle;
} else {
@@ -1004,7 +1004,7 @@ cbq_dequeue(struct Qdisc *sch)
psched_tdiff_t incr;
PSCHED_GET_TIME(now);
- incr = PSCHED_TDIFF(now, q->now_rt);
+ incr = now - q->now_rt;
if (q->tx_class) {
psched_tdiff_t incr2;
@@ -1650,7 +1650,7 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
cl->xstats.undertime = 0;
if (cl->undertime != PSCHED_PASTPERFECT)
- cl->xstats.undertime = PSCHED_TDIFF(cl->undertime, q->now);
+ cl->xstats.undertime = cl->undertime - q->now;
if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
#ifdef CONFIG_NET_ESTIMATOR
OpenPOWER on IntegriCloud