diff options
author | Tejun Heo <tj@kernel.org> | 2012-03-05 13:14:59 -0800 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2012-03-06 21:27:22 +0100 |
commit | 6ecf23afab13c39d3bb0e2d826d0984b0dd53733 (patch) | |
tree | 48436e2eb507d623ff2c2332aa34e9b7380f33e1 /block | |
parent | d732580b4eb31553c63744a47d590f770cafb8f0 (diff) | |
download | op-kernel-dev-6ecf23afab13c39d3bb0e2d826d0984b0dd53733.zip op-kernel-dev-6ecf23afab13c39d3bb0e2d826d0984b0dd53733.tar.gz |
block: extend queue bypassing to cover blkcg policies
Extend queue bypassing such that dying queue is always bypassing and
blk-throttle is drained on bypass. With blkcg policies updated to
test blk_queue_bypass() instead of blk_queue_dead(), this ensures that
no bio or request is held by or going through blkcg policies on a
bypassing queue.
This will be used to implement blkg cleanup on elevator switches and
policy changes.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-core.c | 12 | ||||
-rw-r--r-- | block/blk-throttle.c | 4 |
2 files changed, 10 insertions, 6 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 98ddef4..7713c73 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -372,8 +372,7 @@ void blk_drain_queue(struct request_queue *q, bool drain_all) if (q->elevator) elv_drain_elevator(q); - if (drain_all) - blk_throtl_drain(q); + blk_throtl_drain(q); /* * This function might be called on a queue which failed @@ -415,8 +414,8 @@ void blk_drain_queue(struct request_queue *q, bool drain_all) * * In bypass mode, only the dispatch FIFO queue of @q is used. This * function makes @q enter bypass mode and drains all requests which were - * issued before. On return, it's guaranteed that no request has ELVPRIV - * set. + * throttled or issued before. On return, it's guaranteed that no request + * is being throttled or has ELVPRIV set. */ void blk_queue_bypass_start(struct request_queue *q) { @@ -461,6 +460,11 @@ void blk_cleanup_queue(struct request_queue *q) queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q); spin_lock_irq(lock); + + /* dead queue is permanently in bypass mode till released */ + q->bypass_depth++; + queue_flag_set(QUEUE_FLAG_BYPASS, q); + queue_flag_set(QUEUE_FLAG_NOMERGES, q); queue_flag_set(QUEUE_FLAG_NOXMERGES, q); queue_flag_set(QUEUE_FLAG_DEAD, q); diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 5eed6a7..702c0e6 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -310,7 +310,7 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td) struct request_queue *q = td->queue; /* no throttling for dead queue */ - if (unlikely(blk_queue_dead(q))) + if (unlikely(blk_queue_bypass(q))) return NULL; rcu_read_lock(); @@ -335,7 +335,7 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td) spin_lock_irq(q->queue_lock); /* Make sure @q is still alive */ - if (unlikely(blk_queue_dead(q))) { + if (unlikely(blk_queue_bypass(q))) { kfree(tg); return NULL; } |