summaryrefslogtreecommitdiffstats
path: root/block/cfq-iosched.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r--block/cfq-iosched.c98
1 files changed, 61 insertions, 37 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 01c416b..6200d9b 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -33,7 +33,7 @@ static int cfq_slice_idle = HZ / 70;
#define CFQ_KEY_ASYNC (0)
-static DEFINE_RWLOCK(cfq_exit_lock);
+static DEFINE_SPINLOCK(cfq_exit_lock);
/*
* for the hash of cfqq inside the cfqd
@@ -128,6 +128,7 @@ struct cfq_data {
mempool_t *crq_pool;
int rq_in_driver;
+ int hw_tag;
/*
* schedule slice state info
@@ -495,10 +496,13 @@ static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted)
/*
* if queue was preempted, just add to front to be fair. busy_rr
- * isn't sorted.
+ * isn't sorted, but insert at the back for fairness.
*/
if (preempted || list == &cfqd->busy_rr) {
- list_add(&cfqq->cfq_list, list);
+ if (preempted)
+ list = list->prev;
+
+ list_add_tail(&cfqq->cfq_list, list);
return;
}
@@ -658,6 +662,15 @@ static void cfq_activate_request(request_queue_t *q, struct request *rq)
struct cfq_data *cfqd = q->elevator->elevator_data;
cfqd->rq_in_driver++;
+
+ /*
+ * If the depth is larger 1, it really could be queueing. But lets
+ * make the mark a little higher - idling could still be good for
+ * low queueing, and a low queueing number could also just indicate
+ * a SCSI mid layer like behaviour where limit+1 is often seen.
+ */
+ if (!cfqd->hw_tag && cfqd->rq_in_driver > 4)
+ cfqd->hw_tag = 1;
}
static void cfq_deactivate_request(request_queue_t *q, struct request *rq)
@@ -873,6 +886,13 @@ static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd)
cfqq = list_entry_cfqq(cfqd->cur_rr.next);
/*
+ * If no new queues are available, check if the busy list has some
+ * before falling back to idle io.
+ */
+ if (!cfqq && !list_empty(&cfqd->busy_rr))
+ cfqq = list_entry_cfqq(cfqd->busy_rr.next);
+
+ /*
* if we have idle queues and no rt or be queues had pending
* requests, either allow immediate service if the grace period
* has passed or arm the idle grace timer
@@ -1278,7 +1298,7 @@ static void cfq_exit_io_context(struct io_context *ioc)
/*
* put the reference this task is holding to the various queues
*/
- read_lock_irqsave(&cfq_exit_lock, flags);
+ spin_lock_irqsave(&cfq_exit_lock, flags);
n = rb_first(&ioc->cic_root);
while (n != NULL) {
@@ -1288,7 +1308,7 @@ static void cfq_exit_io_context(struct io_context *ioc)
n = rb_next(n);
}
- read_unlock_irqrestore(&cfq_exit_lock, flags);
+ spin_unlock_irqrestore(&cfq_exit_lock, flags);
}
static struct cfq_io_context *
@@ -1297,17 +1317,12 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
struct cfq_io_context *cic = kmem_cache_alloc(cfq_ioc_pool, gfp_mask);
if (cic) {
- RB_CLEAR(&cic->rb_node);
- cic->key = NULL;
- cic->cfqq[ASYNC] = NULL;
- cic->cfqq[SYNC] = NULL;
+ memset(cic, 0, sizeof(*cic));
+ RB_CLEAR_COLOR(&cic->rb_node);
cic->last_end_request = jiffies;
- cic->ttime_total = 0;
- cic->ttime_samples = 0;
- cic->ttime_mean = 0;
+ INIT_LIST_HEAD(&cic->queue_list);
cic->dtor = cfq_free_io_context;
cic->exit = cfq_exit_io_context;
- INIT_LIST_HEAD(&cic->queue_list);
atomic_inc(&ioc_count);
}
@@ -1394,17 +1409,17 @@ static int cfq_ioc_set_ioprio(struct io_context *ioc, unsigned int ioprio)
struct cfq_io_context *cic;
struct rb_node *n;
- write_lock(&cfq_exit_lock);
+ spin_lock(&cfq_exit_lock);
n = rb_first(&ioc->cic_root);
while (n != NULL) {
cic = rb_entry(n, struct cfq_io_context, rb_node);
-
+
changed_ioprio(cic);
n = rb_next(n);
}
- write_unlock(&cfq_exit_lock);
+ spin_unlock(&cfq_exit_lock);
return 0;
}
@@ -1452,7 +1467,8 @@ retry:
* set ->slice_left to allow preemption for a new process
*/
cfqq->slice_left = 2 * cfqd->cfq_slice_idle;
- cfq_mark_cfqq_idle_window(cfqq);
+ if (!cfqd->hw_tag)
+ cfq_mark_cfqq_idle_window(cfqq);
cfq_mark_cfqq_prio_changed(cfqq);
cfq_init_prio_data(cfqq);
}
@@ -1469,9 +1485,10 @@ out:
static void
cfq_drop_dead_cic(struct io_context *ioc, struct cfq_io_context *cic)
{
- read_lock(&cfq_exit_lock);
+ spin_lock(&cfq_exit_lock);
rb_erase(&cic->rb_node, &ioc->cic_root);
- read_unlock(&cfq_exit_lock);
+ list_del_init(&cic->queue_list);
+ spin_unlock(&cfq_exit_lock);
kmem_cache_free(cfq_ioc_pool, cic);
atomic_dec(&ioc_count);
}
@@ -1539,11 +1556,11 @@ restart:
BUG();
}
- read_lock(&cfq_exit_lock);
+ spin_lock(&cfq_exit_lock);
rb_link_node(&cic->rb_node, parent, p);
rb_insert_color(&cic->rb_node, &ioc->cic_root);
list_add(&cic->queue_list, &cfqd->cic_list);
- read_unlock(&cfq_exit_lock);
+ spin_unlock(&cfq_exit_lock);
}
/*
@@ -1642,7 +1659,7 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
{
int enable_idle = cfq_cfqq_idle_window(cfqq);
- if (!cic->ioc->task || !cfqd->cfq_slice_idle)
+ if (!cic->ioc->task || !cfqd->cfq_slice_idle || cfqd->hw_tag)
enable_idle = 0;
else if (sample_valid(cic->ttime_samples)) {
if (cic->ttime_mean > cfqd->cfq_slice_idle)
@@ -1733,14 +1750,24 @@ cfq_crq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq);
+ cic = crq->io_context;
+
/*
* we never wait for an async request and we don't allow preemption
* of an async request. so just return early
*/
- if (!cfq_crq_is_sync(crq))
+ if (!cfq_crq_is_sync(crq)) {
+ /*
+ * sync process issued an async request, if it's waiting
+ * then expire it and kick rq handling.
+ */
+ if (cic == cfqd->active_cic &&
+ del_timer(&cfqd->idle_slice_timer)) {
+ cfq_slice_expired(cfqd, 0);
+ cfq_start_queueing(cfqd, cfqq);
+ }
return;
-
- cic = crq->io_context;
+ }
cfq_update_io_thinktime(cfqd, cic);
cfq_update_io_seektime(cfqd, cic, crq);
@@ -2158,10 +2185,9 @@ static void cfq_idle_class_timer(unsigned long data)
* race with a non-idle queue, reset timer
*/
end = cfqd->last_end_request + CFQ_IDLE_GRACE;
- if (!time_after_eq(jiffies, end)) {
- cfqd->idle_class_timer.expires = end;
- add_timer(&cfqd->idle_class_timer);
- } else
+ if (!time_after_eq(jiffies, end))
+ mod_timer(&cfqd->idle_class_timer, end);
+ else
cfq_schedule_dispatch(cfqd);
spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
@@ -2181,7 +2207,7 @@ static void cfq_exit_queue(elevator_t *e)
cfq_shutdown_timer_wq(cfqd);
- write_lock(&cfq_exit_lock);
+ spin_lock(&cfq_exit_lock);
spin_lock_irq(q->queue_lock);
if (cfqd->active_queue)
@@ -2204,7 +2230,7 @@ static void cfq_exit_queue(elevator_t *e)
}
spin_unlock_irq(q->queue_lock);
- write_unlock(&cfq_exit_lock);
+ spin_unlock(&cfq_exit_lock);
cfq_shutdown_timer_wq(cfqd);
@@ -2214,14 +2240,14 @@ static void cfq_exit_queue(elevator_t *e)
kfree(cfqd);
}
-static int cfq_init_queue(request_queue_t *q, elevator_t *e)
+static void *cfq_init_queue(request_queue_t *q, elevator_t *e)
{
struct cfq_data *cfqd;
int i;
cfqd = kmalloc(sizeof(*cfqd), GFP_KERNEL);
if (!cfqd)
- return -ENOMEM;
+ return NULL;
memset(cfqd, 0, sizeof(*cfqd));
@@ -2251,8 +2277,6 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e)
for (i = 0; i < CFQ_QHASH_ENTRIES; i++)
INIT_HLIST_HEAD(&cfqd->cfq_hash[i]);
- e->elevator_data = cfqd;
-
cfqd->queue = q;
cfqd->max_queued = q->nr_requests / 4;
@@ -2279,14 +2303,14 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e)
cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
cfqd->cfq_slice_idle = cfq_slice_idle;
- return 0;
+ return cfqd;
out_crqpool:
kfree(cfqd->cfq_hash);
out_cfqhash:
kfree(cfqd->crq_hash);
out_crqhash:
kfree(cfqd);
- return -ENOMEM;
+ return NULL;
}
static void cfq_slab_kill(void)
OpenPOWER on IntegriCloud