diff options
author | Ming Lei <ming.lei@canonical.com> | 2014-09-25 23:23:39 +0800 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2014-09-25 15:22:34 -0600 |
commit | 1bcb1eada4f11a713cbe586d1b5a5d93a48277cb (patch) | |
tree | 89e14d77cb1e95f7742ee7395ed5ff7e84afc057 /block | |
parent | 08e98fc6016c890c2f4ffba6decc0ca9d2d5d7f8 (diff) | |
download | op-kernel-dev-1bcb1eada4f11a713cbe586d1b5a5d93a48277cb.zip op-kernel-dev-1bcb1eada4f11a713cbe586d1b5a5d93a48277cb.tar.gz |
blk-mq: allocate flush_rq in blk_mq_init_flush()
It is reasonable to allocate flush req in blk_mq_init_flush().
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-flush.c | 11 | ||||
-rw-r--r-- | block/blk-mq.c | 16 | ||||
-rw-r--r-- | block/blk-mq.h | 2 |
3 files changed, 17 insertions, 12 deletions
diff --git a/block/blk-flush.c b/block/blk-flush.c index c8e2576..55028a7 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -472,7 +472,16 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, } EXPORT_SYMBOL(blkdev_issue_flush); -void blk_mq_init_flush(struct request_queue *q) +int blk_mq_init_flush(struct request_queue *q) { + struct blk_mq_tag_set *set = q->tag_set; + spin_lock_init(&q->mq_flush_lock); + + q->flush_rq = kzalloc(round_up(sizeof(struct request) + + set->cmd_size, cache_line_size()), + GFP_KERNEL); + if (!q->flush_rq) + return -ENOMEM; + return 0; } diff --git a/block/blk-mq.c b/block/blk-mq.c index 66ef1fb..78bcf8b 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1848,17 +1848,10 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) if (set->ops->complete) blk_queue_softirq_done(q, set->ops->complete); - blk_mq_init_flush(q); blk_mq_init_cpu_queues(q, set->nr_hw_queues); - q->flush_rq = kzalloc(round_up(sizeof(struct request) + - set->cmd_size, cache_line_size()), - GFP_KERNEL); - if (!q->flush_rq) - goto err_hw; - if (blk_mq_init_hw_queues(q, set)) - goto err_flush_rq; + goto err_hw; mutex_lock(&all_q_mutex); list_add_tail(&q->all_q_node, &all_q_list); @@ -1866,12 +1859,15 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) blk_mq_add_queue_tag_set(set, q); + if (blk_mq_init_flush(q)) + goto err_hw_queues; + blk_mq_map_swqueue(q); return q; -err_flush_rq: - kfree(q->flush_rq); +err_hw_queues: + blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); err_hw: blk_cleanup_queue(q); err_hctxs: diff --git a/block/blk-mq.h b/block/blk-mq.h index a3c613a..ecac69c 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -27,7 +27,7 @@ struct blk_mq_ctx { void __blk_mq_complete_request(struct request *rq); void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); -void blk_mq_init_flush(struct request_queue *q); +int blk_mq_init_flush(struct request_queue *q); void blk_mq_freeze_queue(struct request_queue *q); void blk_mq_free_queue(struct request_queue *q); void blk_mq_clone_flush_request(struct request *flush_rq, |