diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2007-01-19 11:56:49 +1100 |
---|---|---|
committer | Jens Axboe <axboe@carl.home.kernel.dk> | 2007-02-11 23:14:45 +0100 |
commit | c5b680f3b7593f2b066c683df799d19f807fb23d (patch) | |
tree | 657df453cdf7b872f5ea713e66f2e090048a6c1f /block | |
parent | 44f7c16065c83060cbb9dd9b367141682a6e2b8e (diff) | |
download | op-kernel-dev-c5b680f3b7593f2b066c683df799d19f807fb23d.zip op-kernel-dev-c5b680f3b7593f2b066c683df799d19f807fb23d.tar.gz |
cfq-iosched: account for slice over/under time
If a slice uses less than it is entitled to (or perhaps more), include
that in the decision on how much time to give it the next time it
gets serviced.
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/cfq-iosched.c | 32 |
1 files changed, 12 insertions, 20 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index d44402a..039b38c 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -147,8 +147,8 @@ struct cfq_queue { struct list_head fifo; unsigned long slice_end; - unsigned long slice_left; unsigned long service_last; + long slice_resid; /* number of requests that are on the dispatch list */ int on_dispatch[2]; @@ -251,6 +251,14 @@ static inline void cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) { cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies; + cfqq->slice_end += cfqq->slice_resid; + + /* + * Don't carry over residual for more than one slice, we only want + * to slightly correct the fairness. Carrying over forever would + * easily introduce oscillations. + */ + cfqq->slice_resid = 0; } /* @@ -667,7 +675,6 @@ __cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) del_timer(&cfqd->idle_class_timer); cfqq->slice_end = 0; - cfqq->slice_left = 0; cfq_clear_cfqq_must_alloc_slice(cfqq); cfq_clear_cfqq_fifo_expire(cfqq); cfq_mark_cfqq_slice_new(cfqq); @@ -683,8 +690,6 @@ static void __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, int preempted) { - unsigned long now = jiffies; - if (cfq_cfqq_wait_request(cfqq)) del_timer(&cfqd->idle_slice_timer); @@ -699,10 +704,8 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, * store what was left of this slice, if the queue idled out * or was preempted */ - if (cfq_slice_used(cfqq)) - cfqq->slice_left = cfqq->slice_end - now; - else - cfqq->slice_left = 0; + if (!cfq_cfqq_slice_new(cfqq)) + cfqq->slice_resid = cfqq->slice_end - jiffies; cfq_resort_rr_list(cfqq, preempted); @@ -1364,10 +1367,7 @@ retry: hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]); atomic_set(&cfqq->ref, 0); cfqq->cfqd = cfqd; - /* - * set ->slice_left to allow preemption for a new process - */ - cfqq->slice_left = 2 * cfqd->cfq_slice_idle; + cfq_mark_cfqq_idle_window(cfqq); cfq_mark_cfqq_prio_changed(cfqq); cfq_mark_cfqq_queue_new(cfqq); @@ -1586,11 +1586,6 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, if (!cfq_cfqq_wait_request(new_cfqq)) return 0; /* - * if it doesn't have slice left, forget it - */ - if (new_cfqq->slice_left < cfqd->cfq_slice_idle) - return 0; - /* * if the new request is sync, but the currently running queue is * not, let the sync request have priority. */ @@ -1614,9 +1609,6 @@ static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) { cfq_slice_expired(cfqd, 1); - if (!cfqq->slice_left) - cfqq->slice_left = cfq_prio_to_slice(cfqd, cfqq) / 2; - /* * Put the new queue at the front of the of the current list, * so we know that it will be selected next. |