diff options
author | Tejun Heo <tj@kernel.org> | 2009-04-23 11:05:17 +0900 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2009-04-28 07:37:33 +0200 |
commit | e4025f6c21f1389696c069be2dc647f364925c45 (patch) | |
tree | 4d7ee21062293c9a9d398063c22339b47f581283 /block | |
parent | a7f557923441186a3cdbabc54f1bcacf42b63bf5 (diff) | |
download | op-kernel-dev-e4025f6c21f1389696c069be2dc647f364925c45.zip op-kernel-dev-e4025f6c21f1389696c069be2dc647f364925c45.tar.gz |
block: don't set REQ_NOMERGE unnecessarily
RQ_NOMERGE_FLAGS already clears defines which REQ flags aren't
mergeable. There is no reason to specify it superflously. It only
adds to confusion. Don't set REQ_NOMERGE for barriers and requests
with specific queueing directive. REQ_NOMERGE is now exclusively used
by the merging code.
[ Impact: cleanup ]
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-core.c | 5 | ||||
-rw-r--r-- | block/blk-exec.c | 1 |
2 files changed, 1 insertions, 5 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 8b4a0af..7e0fab5 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1082,16 +1082,13 @@ void init_request_from_bio(struct request *req, struct bio *bio) if (bio_failfast_driver(bio)) req->cmd_flags |= REQ_FAILFAST_DRIVER; - /* - * REQ_BARRIER implies no merging, but lets make it explicit - */ if (unlikely(bio_discard(bio))) { req->cmd_flags |= REQ_DISCARD; if (bio_barrier(bio)) req->cmd_flags |= REQ_SOFTBARRIER; req->q->prepare_discard_fn(req->q, req); } else if (unlikely(bio_barrier(bio))) - req->cmd_flags |= (REQ_HARDBARRIER | REQ_NOMERGE); + req->cmd_flags |= REQ_HARDBARRIER; if (bio_sync(bio)) req->cmd_flags |= REQ_RW_SYNC; diff --git a/block/blk-exec.c b/block/blk-exec.c index 6af716d..49557e9 100644 --- a/block/blk-exec.c +++ b/block/blk-exec.c @@ -51,7 +51,6 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; rq->rq_disk = bd_disk; - rq->cmd_flags |= REQ_NOMERGE; rq->end_io = done; WARN_ON(irqs_disabled()); spin_lock_irq(q->queue_lock); |