summaryrefslogtreecommitdiffstats
path: root/block/elevator.c
diff options
context:
space:
mode:
authorJens Axboe <jaxboe@fusionio.com>2011-03-21 10:14:27 +0100
committerJens Axboe <jaxboe@fusionio.com>2011-03-21 10:14:27 +0100
commit5e84ea3a9c662dc2d7a48703a4468fad954a3b7f (patch)
tree3fa0fb26a7c8a970213584104cc2498ef46d60a3 /block/elevator.c
parent4345caba340f051e10847924fc078ae18ed6695c (diff)
downloadop-kernel-dev-5e84ea3a9c662dc2d7a48703a4468fad954a3b7f.zip
op-kernel-dev-5e84ea3a9c662dc2d7a48703a4468fad954a3b7f.tar.gz
block: attempt to merge with existing requests on plug flush
One of the disadvantages of on-stack plugging is that we potentially lose out on merging since all pending IO isn't always visible to everybody. When we flush the on-stack plugs, right now we don't do any checks to see if potential merge candidates could be utilized. Correct this by adding a new insert variant, ELEVATOR_INSERT_SORT_MERGE. It works just ELEVATOR_INSERT_SORT, but first checks whether we can merge with an existing request before doing the insertion (if we fail merging). This fixes a regression with multiple processes issuing IO that can be merged. Thanks to Shaohua Li <shaohua.li@intel.com> for testing and fixing an accounting bug. Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block/elevator.c')
-rw-r--r--block/elevator.c52
1 files changed, 49 insertions, 3 deletions
diff --git a/block/elevator.c b/block/elevator.c
index 542ce82..c387d31 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -521,6 +521,40 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
return ELEVATOR_NO_MERGE;
}
+/*
+ * Attempt to do an insertion back merge. Only check for the case where
+ * we can append 'rq' to an existing request, so we can throw 'rq' away
+ * afterwards.
+ *
+ * Returns true if we merged, false otherwise
+ */
+static bool elv_attempt_insert_merge(struct request_queue *q,
+ struct request *rq)
+{
+ struct request *__rq;
+
+ if (blk_queue_nomerges(q))
+ return false;
+
+ /*
+ * First try one-hit cache.
+ */
+ if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq))
+ return true;
+
+ if (blk_queue_noxmerges(q))
+ return false;
+
+ /*
+ * See if our hash lookup can find a potential backmerge.
+ */
+ __rq = elv_rqhash_find(q, blk_rq_pos(rq));
+ if (__rq && blk_attempt_req_merge(q, __rq, rq))
+ return true;
+
+ return false;
+}
+
void elv_merged_request(struct request_queue *q, struct request *rq, int type)
{
struct elevator_queue *e = q->elevator;
@@ -538,14 +572,18 @@ void elv_merge_requests(struct request_queue *q, struct request *rq,
struct request *next)
{
struct elevator_queue *e = q->elevator;
+ const int next_sorted = next->cmd_flags & REQ_SORTED;
- if (e->ops->elevator_merge_req_fn)
+ if (next_sorted && e->ops->elevator_merge_req_fn)
e->ops->elevator_merge_req_fn(q, rq, next);
elv_rqhash_reposition(q, rq);
- elv_rqhash_del(q, next);
- q->nr_sorted--;
+ if (next_sorted) {
+ elv_rqhash_del(q, next);
+ q->nr_sorted--;
+ }
+
q->last_merge = rq;
}
@@ -647,6 +685,14 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
__blk_run_queue(q, false);
break;
+ case ELEVATOR_INSERT_SORT_MERGE:
+ /*
+ * If we succeed in merging this request with one in the
+ * queue already, we are done - rq has now been freed,
+ * so no need to do anything further.
+ */
+ if (elv_attempt_insert_merge(q, rq))
+ break;
case ELEVATOR_INSERT_SORT:
BUG_ON(rq->cmd_type != REQ_TYPE_FS &&
!(rq->cmd_flags & REQ_DISCARD));
OpenPOWER on IntegriCloud