summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorShaohua Li <shaohua.li@intel.com>2009-11-03 20:25:02 +0100
committerJens Axboe <jens.axboe@oracle.com>2009-11-03 20:25:02 +0100
commit4b27e1bb442e964903f8a3fa6bdf33a602dc0941 (patch)
treed7eecb66f6a75dbff292fbd03b643b04ed075289 /block
parente6ec4fe24572ee265723d895ec4159e5559c8266 (diff)
downloadop-kernel-dev-4b27e1bb442e964903f8a3fa6bdf33a602dc0941.zip
op-kernel-dev-4b27e1bb442e964903f8a3fa6bdf33a602dc0941.tar.gz
cfq-iosched: limit coop preemption
CFQ has an optimization for cooperated applications. if several io-context have close requests, they will get boost. But the optimization get abused. Considering thread a, b, which work on one file. a reads sectors s, s+2, s+4, ...; b reads sectors s+1, s+3, s +5, ... Both a and b are sequential read, so they can open idle window. a reads a sector s and goes to idle window and wakeup b. b reads sector s+1, since in current implementation, cfq_should_preempt() thinks a and b are cooperators, b will preempt a. b then reads sector s+1 and goes to idle window and wakeup a. for the same reason, a will preempt b and reads s+2. a and b will continue the circle. The circle will be very long, and a and b will occupy whole disk queue. Other applications will nearly have no chance to run. Fix this limiting coop preempt until a queue is scheduled normally again. Signed-off-by: Shaohua Li <shaohua.li@intel.com> Acked-by: Jeff Moyer <jmoyer@redhat.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block')
-rw-r--r--block/cfq-iosched.c17
1 files changed, 15 insertions, 2 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 5802e32..aa1e953 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -196,6 +196,7 @@ enum cfqq_state_flags {
CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */
CFQ_CFQQ_FLAG_sync, /* synchronous queue */
CFQ_CFQQ_FLAG_coop, /* has done a coop jump of the queue */
+ CFQ_CFQQ_FLAG_coop_preempt, /* coop preempt */
};
#define CFQ_CFQQ_FNS(name) \
@@ -222,6 +223,7 @@ CFQ_CFQQ_FNS(prio_changed);
CFQ_CFQQ_FNS(slice_new);
CFQ_CFQQ_FNS(sync);
CFQ_CFQQ_FNS(coop);
+CFQ_CFQQ_FNS(coop_preempt);
#undef CFQ_CFQQ_FNS
#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
@@ -945,10 +947,13 @@ static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
{
if (!cfqq) {
cfqq = cfq_get_next_queue(cfqd);
- if (cfqq)
+ if (cfqq && !cfq_cfqq_coop_preempt(cfqq))
cfq_clear_cfqq_coop(cfqq);
}
+ if (cfqq)
+ cfq_clear_cfqq_coop_preempt(cfqq);
+
__cfq_set_active_queue(cfqd, cfqq);
return cfqq;
}
@@ -2066,8 +2071,16 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
* if this request is as-good as one we would expect from the
* current cfqq, let it preempt
*/
- if (cfq_rq_close(cfqd, rq))
+ if (cfq_rq_close(cfqd, rq) && (!cfq_cfqq_coop(new_cfqq) ||
+ cfqd->busy_queues == 1)) {
+ /*
+ * Mark new queue coop_preempt, so its coop flag will not be
+ * cleared when new queue gets scheduled at the very first time
+ */
+ cfq_mark_cfqq_coop_preempt(new_cfqq);
+ cfq_mark_cfqq_coop(new_cfqq);
return true;
+ }
return false;
}
OpenPOWER on IntegriCloud