summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBart Van Assche <bart.vanassche@wdc.com>2018-03-07 17:10:04 -0800
committerJens Axboe <axboe@kernel.dk>2018-03-08 14:13:48 -0700
commit8814ce8a0f680599a837af18aefdec774e5c7b97 (patch)
tree1f825afafde21e04bd53a20c23e0fb83eeb84347
parentf78bac2c8e69144781e271d9771bae8dbb4e7098 (diff)
downloadop-kernel-dev-8814ce8a0f680599a837af18aefdec774e5c7b97.zip
op-kernel-dev-8814ce8a0f680599a837af18aefdec774e5c7b97.tar.gz
block: Introduce blk_queue_flag_{set,clear,test_and_{set,clear}}()
Introduce functions that modify the queue flags and that protect these modifications with the request queue lock. Except for moving one wake_up_all() call from inside to outside a critical section, this patch does not change any functionality. Cc: Christoph Hellwig <hch@lst.de> Cc: Hannes Reinecke <hare@suse.de> Cc: Ming Lei <ming.lei@redhat.com> Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de> Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com> Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--block/blk-core.c91
-rw-r--r--block/blk-mq.c12
-rw-r--r--block/blk-settings.c6
-rw-r--r--block/blk-sysfs.c22
-rw-r--r--block/blk-timeout.c6
-rw-r--r--include/linux/blkdev.h5
6 files changed, 93 insertions, 49 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 241b730..74c6283 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -71,6 +71,78 @@ struct kmem_cache *blk_requestq_cachep;
*/
static struct workqueue_struct *kblockd_workqueue;
+/**
+ * blk_queue_flag_set - atomically set a queue flag
+ * @flag: flag to be set
+ * @q: request queue
+ */
+void blk_queue_flag_set(unsigned int flag, struct request_queue *q)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ queue_flag_set(flag, q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+}
+EXPORT_SYMBOL(blk_queue_flag_set);
+
+/**
+ * blk_queue_flag_clear - atomically clear a queue flag
+ * @flag: flag to be cleared
+ * @q: request queue
+ */
+void blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ queue_flag_clear(flag, q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+}
+EXPORT_SYMBOL(blk_queue_flag_clear);
+
+/**
+ * blk_queue_flag_test_and_set - atomically test and set a queue flag
+ * @flag: flag to be set
+ * @q: request queue
+ *
+ * Returns the previous value of @flag - 0 if the flag was not set and 1 if
+ * the flag was already set.
+ */
+bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q)
+{
+ unsigned long flags;
+ bool res;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ res = queue_flag_test_and_set(flag, q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ return res;
+}
+EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set);
+
+/**
+ * blk_queue_flag_test_and_clear - atomically test and clear a queue flag
+ * @flag: flag to be cleared
+ * @q: request queue
+ *
+ * Returns the previous value of @flag - 0 if the flag was not set and 1 if
+ * the flag was set.
+ */
+bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q)
+{
+ unsigned long flags;
+ bool res;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ res = queue_flag_test_and_clear(flag, q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ return res;
+}
+EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_clear);
+
static void blk_clear_congested(struct request_list *rl, int sync)
{
#ifdef CONFIG_CGROUP_WRITEBACK
@@ -361,25 +433,14 @@ EXPORT_SYMBOL(blk_sync_queue);
*/
int blk_set_preempt_only(struct request_queue *q)
{
- unsigned long flags;
- int res;
-
- spin_lock_irqsave(q->queue_lock, flags);
- res = queue_flag_test_and_set(QUEUE_FLAG_PREEMPT_ONLY, q);
- spin_unlock_irqrestore(q->queue_lock, flags);
-
- return res;
+ return blk_queue_flag_test_and_set(QUEUE_FLAG_PREEMPT_ONLY, q);
}
EXPORT_SYMBOL_GPL(blk_set_preempt_only);
void blk_clear_preempt_only(struct request_queue *q)
{
- unsigned long flags;
-
- spin_lock_irqsave(q->queue_lock, flags);
- queue_flag_clear(QUEUE_FLAG_PREEMPT_ONLY, q);
+ blk_queue_flag_clear(QUEUE_FLAG_PREEMPT_ONLY, q);
wake_up_all(&q->mq_freeze_wq);
- spin_unlock_irqrestore(q->queue_lock, flags);
}
EXPORT_SYMBOL_GPL(blk_clear_preempt_only);
@@ -629,9 +690,7 @@ EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
void blk_set_queue_dying(struct request_queue *q)
{
- spin_lock_irq(q->queue_lock);
- queue_flag_set(QUEUE_FLAG_DYING, q);
- spin_unlock_irq(q->queue_lock);
+ blk_queue_flag_set(QUEUE_FLAG_DYING, q);
/*
* When queue DYING flag is set, we need to block new req
diff --git a/block/blk-mq.c b/block/blk-mq.c
index e70cc7d..a868990 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -194,11 +194,7 @@ EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
*/
void blk_mq_quiesce_queue_nowait(struct request_queue *q)
{
- unsigned long flags;
-
- spin_lock_irqsave(q->queue_lock, flags);
- queue_flag_set(QUEUE_FLAG_QUIESCED, q);
- spin_unlock_irqrestore(q->queue_lock, flags);
+ blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q);
}
EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);
@@ -239,11 +235,7 @@ EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
*/
void blk_mq_unquiesce_queue(struct request_queue *q)
{
- unsigned long flags;
-
- spin_lock_irqsave(q->queue_lock, flags);
- queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
- spin_unlock_irqrestore(q->queue_lock, flags);
+ blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
/* dispatch requests which are inserted during quiescing */
blk_mq_run_hw_queues(q, true);
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 7f719da..d1de711 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -859,12 +859,10 @@ EXPORT_SYMBOL(blk_queue_update_dma_alignment);
void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
{
- spin_lock_irq(q->queue_lock);
if (queueable)
- queue_flag_clear(QUEUE_FLAG_FLUSH_NQ, q);
+ blk_queue_flag_clear(QUEUE_FLAG_FLUSH_NQ, q);
else
- queue_flag_set(QUEUE_FLAG_FLUSH_NQ, q);
- spin_unlock_irq(q->queue_lock);
+ blk_queue_flag_set(QUEUE_FLAG_FLUSH_NQ, q);
}
EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index fd71a00..d00d1b0 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -276,12 +276,10 @@ queue_store_##name(struct request_queue *q, const char *page, size_t count) \
if (neg) \
val = !val; \
\
- spin_lock_irq(q->queue_lock); \
if (val) \
- queue_flag_set(QUEUE_FLAG_##flag, q); \
+ blk_queue_flag_set(QUEUE_FLAG_##flag, q); \
else \
- queue_flag_clear(QUEUE_FLAG_##flag, q); \
- spin_unlock_irq(q->queue_lock); \
+ blk_queue_flag_clear(QUEUE_FLAG_##flag, q); \
return ret; \
}
@@ -414,12 +412,10 @@ static ssize_t queue_poll_store(struct request_queue *q, const char *page,
if (ret < 0)
return ret;
- spin_lock_irq(q->queue_lock);
if (poll_on)
- queue_flag_set(QUEUE_FLAG_POLL, q);
+ blk_queue_flag_set(QUEUE_FLAG_POLL, q);
else
- queue_flag_clear(QUEUE_FLAG_POLL, q);
- spin_unlock_irq(q->queue_lock);
+ blk_queue_flag_clear(QUEUE_FLAG_POLL, q);
return ret;
}
@@ -487,12 +483,10 @@ static ssize_t queue_wc_store(struct request_queue *q, const char *page,
if (set == -1)
return -EINVAL;
- spin_lock_irq(q->queue_lock);
if (set)
- queue_flag_set(QUEUE_FLAG_WC, q);
+ blk_queue_flag_set(QUEUE_FLAG_WC, q);
else
- queue_flag_clear(QUEUE_FLAG_WC, q);
- spin_unlock_irq(q->queue_lock);
+ blk_queue_flag_clear(QUEUE_FLAG_WC, q);
return count;
}
@@ -946,9 +940,7 @@ void blk_unregister_queue(struct gendisk *disk)
*/
mutex_lock(&q->sysfs_lock);
- spin_lock_irq(q->queue_lock);
- queue_flag_clear(QUEUE_FLAG_REGISTERED, q);
- spin_unlock_irq(q->queue_lock);
+ blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q);
/*
* Remove the sysfs attributes before unregistering the queue data
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index a05e367..34a5525 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -57,12 +57,10 @@ ssize_t part_timeout_store(struct device *dev, struct device_attribute *attr,
char *p = (char *) buf;
val = simple_strtoul(p, &p, 10);
- spin_lock_irq(q->queue_lock);
if (val)
- queue_flag_set(QUEUE_FLAG_FAIL_IO, q);
+ blk_queue_flag_set(QUEUE_FLAG_FAIL_IO, q);
else
- queue_flag_clear(QUEUE_FLAG_FAIL_IO, q);
- spin_unlock_irq(q->queue_lock);
+ blk_queue_flag_clear(QUEUE_FLAG_FAIL_IO, q);
}
return count;
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index c351aae..f84b3c7 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -707,6 +707,11 @@ struct request_queue {
(1 << QUEUE_FLAG_SAME_COMP) | \
(1 << QUEUE_FLAG_POLL))
+void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
+void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
+bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
+bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q);
+
/*
* @q->queue_lock is set while a queue is being initialized. Since we know
* that no other threads access the queue object before @q->queue_lock has
OpenPOWER on IntegriCloud