summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2010-09-03 11:56:17 +0200
committerJens Axboe <jaxboe@fusionio.com>2010-09-10 12:35:37 +0200
commit47f70d5a6ca78c40a1c799d43506efbfed914f7b (patch)
treefff5197359b989197eda76dd019746fbba054e88 /block
parent337238be1bf52e1242f940fc6fe83fb395e55057 (diff)
downloadop-kernel-dev-47f70d5a6ca78c40a1c799d43506efbfed914f7b.zip
op-kernel-dev-47f70d5a6ca78c40a1c799d43506efbfed914f7b.tar.gz
block: kick queue after sequencing REQ_FLUSH/FUA
While completing a request from a REQ_FLUSH/FUA sequence, another request can be pushed to the request queue. If a driver tests elv_queue_empty() before completing a request and runs the queue again only if the queue wasn't empty, this may lead to hang. Please note that most drivers either kick the queue unconditionally or test queue emptiness after completing the current request and don't have this problem. This patch removes this possibility by making REQ_FLUSH/FUA sequence code kick the queue if the queue was empty before completing a request from REQ_FLUSH/FUA sequence. Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-flush.c22
1 files changed, 19 insertions, 3 deletions
diff --git a/block/blk-flush.c b/block/blk-flush.c
index f357f1f..cb4c8440 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -56,22 +56,38 @@ static struct request *blk_flush_complete_seq(struct request_queue *q,
return next_rq;
}
+static void blk_flush_complete_seq_end_io(struct request_queue *q,
+ unsigned seq, int error)
+{
+ bool was_empty = elv_queue_empty(q);
+ struct request *next_rq;
+
+ next_rq = blk_flush_complete_seq(q, seq, error);
+
+ /*
+ * Moving a request silently to empty queue_head may stall the
+ * queue. Kick the queue in those cases.
+ */
+ if (was_empty && next_rq)
+ __blk_run_queue(q);
+}
+
static void pre_flush_end_io(struct request *rq, int error)
{
elv_completed_request(rq->q, rq);
- blk_flush_complete_seq(rq->q, QUEUE_FSEQ_PREFLUSH, error);
+ blk_flush_complete_seq_end_io(rq->q, QUEUE_FSEQ_PREFLUSH, error);
}
static void flush_data_end_io(struct request *rq, int error)
{
elv_completed_request(rq->q, rq);
- blk_flush_complete_seq(rq->q, QUEUE_FSEQ_DATA, error);
+ blk_flush_complete_seq_end_io(rq->q, QUEUE_FSEQ_DATA, error);
}
static void post_flush_end_io(struct request *rq, int error)
{
elv_completed_request(rq->q, rq);
- blk_flush_complete_seq(rq->q, QUEUE_FSEQ_POSTFLUSH, error);
+ blk_flush_complete_seq_end_io(rq->q, QUEUE_FSEQ_POSTFLUSH, error);
}
static void init_flush_request(struct request *rq, struct gendisk *disk)
OpenPOWER on IntegriCloud