summaryrefslogtreecommitdiffstats
path: root/block/blk-mq.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2015-05-07 09:38:13 +0200
committerJens Axboe <axboe@fb.com>2015-05-19 09:12:59 -0600
commit4ecd4fef3a074c8bb43c391a57742c422469ebbd (patch)
tree76916ca92a584daf7dd398eff0997e7e82bfa4f9 /block/blk-mq.c
parent5b3f341f098d60da2970758db6a05bd851eb6b39 (diff)
downloadop-kernel-dev-4ecd4fef3a074c8bb43c391a57742c422469ebbd.zip
op-kernel-dev-4ecd4fef3a074c8bb43c391a57742c422469ebbd.tar.gz
block: use an atomic_t for mq_freeze_depth
lockdep gets unhappy about the not disabling irqs when using the queue_lock around it. Instead of trying to fix that up just switch to an atomic_t and get rid of the lock. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r--block/blk-mq.c24
1 files changed, 10 insertions, 14 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 31df4744..c382a34 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -89,7 +89,8 @@ static int blk_mq_queue_enter(struct request_queue *q, gfp_t gfp)
return -EBUSY;
ret = wait_event_interruptible(q->mq_freeze_wq,
- !q->mq_freeze_depth || blk_queue_dying(q));
+ !atomic_read(&q->mq_freeze_depth) ||
+ blk_queue_dying(q));
if (blk_queue_dying(q))
return -ENODEV;
if (ret)
@@ -112,13 +113,10 @@ static void blk_mq_usage_counter_release(struct percpu_ref *ref)
void blk_mq_freeze_queue_start(struct request_queue *q)
{
- bool freeze;
+ int freeze_depth;
- spin_lock_irq(q->queue_lock);
- freeze = !q->mq_freeze_depth++;
- spin_unlock_irq(q->queue_lock);
-
- if (freeze) {
+ freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
+ if (freeze_depth == 1) {
percpu_ref_kill(&q->mq_usage_counter);
blk_mq_run_hw_queues(q, false);
}
@@ -143,13 +141,11 @@ EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
void blk_mq_unfreeze_queue(struct request_queue *q)
{
- bool wake;
+ int freeze_depth;
- spin_lock_irq(q->queue_lock);
- wake = !--q->mq_freeze_depth;
- WARN_ON_ONCE(q->mq_freeze_depth < 0);
- spin_unlock_irq(q->queue_lock);
- if (wake) {
+ freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
+ WARN_ON_ONCE(freeze_depth < 0);
+ if (!freeze_depth) {
percpu_ref_reinit(&q->mq_usage_counter);
wake_up_all(&q->mq_freeze_wq);
}
@@ -2081,7 +2077,7 @@ void blk_mq_free_queue(struct request_queue *q)
/* Basically redo blk_mq_init_queue with queue frozen */
static void blk_mq_queue_reinit(struct request_queue *q)
{
- WARN_ON_ONCE(!q->mq_freeze_depth);
+ WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
blk_mq_sysfs_unregister(q);
OpenPOWER on IntegriCloud