diff options
author | Tejun Heo <tj@kernel.org> | 2011-12-14 00:33:37 +0100 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2011-12-14 00:33:37 +0100 |
commit | 8ba61435d73f2274e12d4d823fde06735e8f6a54 (patch) | |
tree | 4b63993dc0fdc26918bd990fb47a142b8d24ef80 | |
parent | 481a7d64790cd7ca61a8bbcbd9d017ce58e6fe39 (diff) | |
download | op-kernel-dev-8ba61435d73f2274e12d4d823fde06735e8f6a54.zip op-kernel-dev-8ba61435d73f2274e12d4d823fde06735e8f6a54.tar.gz |
block: add missing blk_queue_dead() checks
blk_insert_cloned_request(), blk_execute_rq_nowait() and
blk_flush_plug_list() either didn't check whether the queue was dead
or did it without holding queue_lock. Update them so that dead state
is checked while holding queue_lock.
AFAICS, this plugs all holes (requeue doesn't matter as the request is
transitioning atomically from in_flight to queued).
Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r-- | block/blk-core.c | 21 | ||||
-rw-r--r-- | block/blk-exec.c | 6 |
2 files changed, 25 insertions, 2 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index c37e9e7..30add45 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1731,6 +1731,10 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq) return -EIO; spin_lock_irqsave(q->queue_lock, flags); + if (unlikely(blk_queue_dead(q))) { + spin_unlock_irqrestore(q->queue_lock, flags); + return -ENODEV; + } /* * Submitting request must be dequeued before calling this function @@ -2705,6 +2709,14 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth, trace_block_unplug(q, depth, !from_schedule); /* + * Don't mess with dead queue. + */ + if (unlikely(blk_queue_dead(q))) { + spin_unlock(q->queue_lock); + return; + } + + /* * If we are punting this to kblockd, then we can safely drop * the queue_lock before waking kblockd (which needs to take * this lock). @@ -2780,6 +2792,15 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) depth = 0; spin_lock(q->queue_lock); } + + /* + * Short-circuit if @q is dead + */ + if (unlikely(blk_queue_dead(q))) { + __blk_end_request_all(rq, -ENODEV); + continue; + } + /* * rq is already accounted, so use raw insert */ diff --git a/block/blk-exec.c b/block/blk-exec.c index 6053285..fb2cbd5 100644 --- a/block/blk-exec.c +++ b/block/blk-exec.c @@ -50,7 +50,11 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, { int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; + WARN_ON(irqs_disabled()); + spin_lock_irq(q->queue_lock); + if (unlikely(blk_queue_dead(q))) { + spin_unlock_irq(q->queue_lock); rq->errors = -ENXIO; if (rq->end_io) rq->end_io(rq, rq->errors); @@ -59,8 +63,6 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, rq->rq_disk = bd_disk; rq->end_io = done; - WARN_ON(irqs_disabled()); - spin_lock_irq(q->queue_lock); __elv_add_request(q, rq, where); __blk_run_queue(q); /* the queue is stopped so it won't be run */ |