summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c8
-rw-r--r--block/blk-exec.c1
-rw-r--r--block/blk-mq-sysfs.c6
-rw-r--r--block/blk-mq.c35
-rw-r--r--block/blk-sysfs.c11
-rw-r--r--block/genhd.c4
6 files changed, 44 insertions, 21 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index bf930f4..9c888bd 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -240,7 +240,7 @@ EXPORT_SYMBOL(blk_stop_queue);
* this function.
*
* This function does not cancel any asynchronous activity arising
- * out of elevator or throttling code. That would require elevaotor_exit()
+ * out of elevator or throttling code. That would require elevator_exit()
* and blkcg_exit_queue() to be called with queue lock initialized.
*
*/
@@ -933,7 +933,7 @@ static struct io_context *rq_ioc(struct bio *bio)
* Get a free request from @q. This function may fail under memory
* pressure or if @q is dead.
*
- * Must be callled with @q->queue_lock held and,
+ * Must be called with @q->queue_lock held and,
* Returns %NULL on failure, with @q->queue_lock held.
* Returns !%NULL on success, with @q->queue_lock *not held*.
*/
@@ -1110,7 +1110,7 @@ rq_starved:
* Get a free request from @q. If %__GFP_WAIT is set in @gfp_mask, this
* function keeps retrying under memory pressure and fails iff @q is dead.
*
- * Must be callled with @q->queue_lock held and,
+ * Must be called with @q->queue_lock held and,
* Returns %NULL on failure, with @q->queue_lock held.
* Returns !%NULL on success, with @q->queue_lock *not held*.
*/
@@ -1241,7 +1241,7 @@ struct request *blk_make_request(struct request_queue *q, struct bio *bio,
EXPORT_SYMBOL(blk_make_request);
/**
- * blk_rq_set_block_pc - initialize a requeest to type BLOCK_PC
+ * blk_rq_set_block_pc - initialize a request to type BLOCK_PC
* @rq: request to be initialized
*
*/
diff --git a/block/blk-exec.c b/block/blk-exec.c
index f4d27b1..9924725 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -56,6 +56,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
bool is_pm_resume;
WARN_ON(irqs_disabled());
+ WARN_ON(rq->cmd_type == REQ_TYPE_FS);
rq->rq_disk = bd_disk;
rq->end_io = done;
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index ed52178..371d880 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -402,6 +402,12 @@ static void blk_mq_sysfs_init(struct request_queue *q)
}
}
+/* see blk_register_queue() */
+void blk_mq_finish_init(struct request_queue *q)
+{
+ percpu_ref_switch_to_percpu(&q->mq_usage_counter);
+}
+
int blk_mq_register_disk(struct gendisk *disk)
{
struct device *dev = disk_to_dev(disk);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 383ea0c..38f4a165 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -203,7 +203,6 @@ __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int rw)
if (tag != BLK_MQ_TAG_FAIL) {
rq = data->hctx->tags->rqs[tag];
- rq->cmd_flags = 0;
if (blk_mq_tag_busy(data->hctx)) {
rq->cmd_flags = REQ_MQ_INFLIGHT;
atomic_inc(&data->hctx->nr_active);
@@ -258,6 +257,7 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
if (rq->cmd_flags & REQ_MQ_INFLIGHT)
atomic_dec(&hctx->nr_active);
+ rq->cmd_flags = 0;
clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
blk_mq_put_tag(hctx, tag, &ctx->last_tag);
@@ -393,6 +393,12 @@ static void blk_mq_start_request(struct request *rq, bool last)
blk_add_timer(rq);
/*
+ * Ensure that ->deadline is visible before set the started
+ * flag and clear the completed flag.
+ */
+ smp_mb__before_atomic();
+
+ /*
* Mark us as started and clear complete. Complete might have been
* set if requeue raced with timeout, which then marked it as
* complete. So be sure to clear complete again when we start
@@ -473,7 +479,11 @@ static void blk_mq_requeue_work(struct work_struct *work)
blk_mq_insert_request(rq, false, false, false);
}
- blk_mq_run_queues(q, false);
+ /*
+ * Use the start variant of queue running here, so that running
+ * the requeue work will kick stopped queues.
+ */
+ blk_mq_start_hw_queues(q);
}
void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
@@ -957,14 +967,9 @@ void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
hctx = q->mq_ops->map_queue(q, ctx->cpu);
- if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA) &&
- !(rq->cmd_flags & (REQ_FLUSH_SEQ))) {
- blk_insert_flush(rq);
- } else {
- spin_lock(&ctx->lock);
- __blk_mq_insert_request(hctx, rq, at_head);
- spin_unlock(&ctx->lock);
- }
+ spin_lock(&ctx->lock);
+ __blk_mq_insert_request(hctx, rq, at_head);
+ spin_unlock(&ctx->lock);
if (run_queue)
blk_mq_run_hw_queue(hctx, async);
@@ -1404,6 +1409,8 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
left -= to_do * rq_size;
for (j = 0; j < to_do; j++) {
tags->rqs[i] = p;
+ tags->rqs[i]->atomic_flags = 0;
+ tags->rqs[i]->cmd_flags = 0;
if (set->ops->init_request) {
if (set->ops->init_request(set->driver_data,
tags->rqs[i], hctx_idx, i,
@@ -1788,7 +1795,12 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
if (!q)
goto err_hctxs;
- if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release))
+ /*
+ * Init percpu_ref in atomic mode so that it's faster to shutdown.
+ * See blk_register_queue() for details.
+ */
+ if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release,
+ PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
goto err_map;
setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
@@ -1956,7 +1968,6 @@ out_unwind:
while (--i >= 0)
blk_mq_free_rq_map(set, set->tags[i], i);
- set->tags = NULL;
return -ENOMEM;
}
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 17f5c84..521ae90 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -551,12 +551,19 @@ int blk_register_queue(struct gendisk *disk)
return -ENXIO;
/*
- * Initialization must be complete by now. Finish the initial
- * bypass from queue allocation.
+ * SCSI probing may synchronously create and destroy a lot of
+ * request_queues for non-existent devices. Shutting down a fully
+ * functional queue takes measureable wallclock time as RCU grace
+ * periods are involved. To avoid excessive latency in these
+ * cases, a request_queue starts out in a degraded mode which is
+ * faster to shut down and is made fully functional here as
+ * request_queues for non-existent devices never get registered.
*/
if (!blk_queue_init_done(q)) {
queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q);
blk_queue_bypass_end(q);
+ if (q->mq_ops)
+ blk_mq_finish_init(q);
}
ret = blk_trace_init_sysfs(dev);
diff --git a/block/genhd.c b/block/genhd.c
index 09da5e4..bd30606 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -445,8 +445,6 @@ int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
*/
void blk_free_devt(dev_t devt)
{
- might_sleep();
-
if (devt == MKDEV(0, 0))
return;
@@ -1547,7 +1545,7 @@ void disk_flush_events(struct gendisk *disk, unsigned int mask)
/**
* disk_clear_events - synchronously check, clear and return pending events
* @disk: disk to fetch and clear events from
- * @mask: mask of events to be fetched and clearted
+ * @mask: mask of events to be fetched and cleared
*
* Disk events are synchronously checked and pending events in @mask
* are cleared and returned. This ignores the block count.
OpenPOWER on IntegriCloud