diff options
author | Jens Axboe <axboe@fb.com> | 2014-12-22 14:04:42 -0700 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2014-12-31 09:39:16 -0700 |
commit | aed3ea94bdd2ac0a21ed0103d34097e202ee77f6 (patch) | |
tree | e432ae8b2224a4172e4c91350b0135b14c587442 /block/blk-core.c | |
parent | 2b25d981790b830f0e045881386866b970bf9066 (diff) | |
download | op-kernel-dev-aed3ea94bdd2ac0a21ed0103d34097e202ee77f6.zip op-kernel-dev-aed3ea94bdd2ac0a21ed0103d34097e202ee77f6.tar.gz |
block: wake up waiters when a queue is marked dying
If it's dying, we can't expect new request to complete and come
in an wake up other tasks waiting for requests. So after we
have marked it as dying, wake up everybody currently waiting
for a request. Once they wake, they will retry their allocation
and fail appropriately due to the state of the queue.
Tested-by: Keith Busch <keith.busch@intel.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r-- | block/blk-core.c | 21 |
1 files changed, 20 insertions, 1 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 30f6153..3ad4055 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -473,6 +473,25 @@ void blk_queue_bypass_end(struct request_queue *q) } EXPORT_SYMBOL_GPL(blk_queue_bypass_end); +void blk_set_queue_dying(struct request_queue *q) +{ + queue_flag_set_unlocked(QUEUE_FLAG_DYING, q); + + if (q->mq_ops) + blk_mq_wake_waiters(q); + else { + struct request_list *rl; + + blk_queue_for_each_rl(rl, q) { + if (rl->rq_pool) { + wake_up(&rl->wait[BLK_RW_SYNC]); + wake_up(&rl->wait[BLK_RW_ASYNC]); + } + } + } +} +EXPORT_SYMBOL_GPL(blk_set_queue_dying); + /** * blk_cleanup_queue - shutdown a request queue * @q: request queue to shutdown @@ -486,7 +505,7 @@ void blk_cleanup_queue(struct request_queue *q) /* mark @q DYING, no new request or merges will be allowed afterwards */ mutex_lock(&q->sysfs_lock); - queue_flag_set_unlocked(QUEUE_FLAG_DYING, q); + blk_set_queue_dying(q); spin_lock_irq(lock); /* |