diff options
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-exec.c | 2 | ||||
-rw-r--r-- | block/blk-flush.c | 4 | ||||
-rw-r--r-- | block/blk-mq-cpu.c | 14 | ||||
-rw-r--r-- | block/blk-mq.c | 102 | ||||
-rw-r--r-- | block/blk-mq.h | 1 |
5 files changed, 29 insertions, 94 deletions
diff --git a/block/blk-exec.c b/block/blk-exec.c index c68613b..dbf4502 100644 --- a/block/blk-exec.c +++ b/block/blk-exec.c @@ -65,7 +65,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, * be resued after dying flag is set */ if (q->mq_ops) { - blk_mq_insert_request(q, rq, at_head, true); + blk_mq_insert_request(rq, at_head, true, false); return; } diff --git a/block/blk-flush.c b/block/blk-flush.c index 66e2b69..f598f79 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -137,7 +137,7 @@ static void mq_flush_run(struct work_struct *work) rq = container_of(work, struct request, mq_flush_work); memset(&rq->csd, 0, sizeof(rq->csd)); - blk_mq_run_request(rq, true, false); + blk_mq_insert_request(rq, false, true, false); } static bool blk_flush_queue_rq(struct request *rq) @@ -411,7 +411,7 @@ void blk_insert_flush(struct request *rq) if ((policy & REQ_FSEQ_DATA) && !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) { if (q->mq_ops) { - blk_mq_run_request(rq, false, true); + blk_mq_insert_request(rq, false, false, true); } else list_add_tail(&rq->queuelist, &q->queue_head); return; diff --git a/block/blk-mq-cpu.c b/block/blk-mq-cpu.c index 3146bef..136ef86 100644 --- a/block/blk-mq-cpu.c +++ b/block/blk-mq-cpu.c @@ -11,7 +11,7 @@ #include "blk-mq.h" static LIST_HEAD(blk_mq_cpu_notify_list); -static DEFINE_SPINLOCK(blk_mq_cpu_notify_lock); +static DEFINE_RAW_SPINLOCK(blk_mq_cpu_notify_lock); static int blk_mq_main_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) @@ -19,12 +19,12 @@ static int blk_mq_main_cpu_notify(struct notifier_block *self, unsigned int cpu = (unsigned long) hcpu; struct blk_mq_cpu_notifier *notify; - spin_lock(&blk_mq_cpu_notify_lock); + raw_spin_lock(&blk_mq_cpu_notify_lock); list_for_each_entry(notify, &blk_mq_cpu_notify_list, list) notify->notify(notify->data, action, cpu); - spin_unlock(&blk_mq_cpu_notify_lock); + raw_spin_unlock(&blk_mq_cpu_notify_lock); return NOTIFY_OK; } @@ -32,16 +32,16 @@ void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier) { BUG_ON(!notifier->notify); - spin_lock(&blk_mq_cpu_notify_lock); + raw_spin_lock(&blk_mq_cpu_notify_lock); list_add_tail(¬ifier->list, &blk_mq_cpu_notify_list); - spin_unlock(&blk_mq_cpu_notify_lock); + raw_spin_unlock(&blk_mq_cpu_notify_lock); } void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier) { - spin_lock(&blk_mq_cpu_notify_lock); + raw_spin_lock(&blk_mq_cpu_notify_lock); list_del(¬ifier->list); - spin_unlock(&blk_mq_cpu_notify_lock); + raw_spin_unlock(&blk_mq_cpu_notify_lock); } void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier, diff --git a/block/blk-mq.c b/block/blk-mq.c index 1fa9dd1..883f720 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -73,8 +73,8 @@ static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, set_bit(ctx->index_hw, hctx->ctx_map); } -static struct request *blk_mq_alloc_rq(struct blk_mq_hw_ctx *hctx, gfp_t gfp, - bool reserved) +static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx, + gfp_t gfp, bool reserved) { struct request *rq; unsigned int tag; @@ -193,12 +193,6 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, ctx->rq_dispatched[rw_is_sync(rw_flags)]++; } -static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx, - gfp_t gfp, bool reserved) -{ - return blk_mq_alloc_rq(hctx, gfp, reserved); -} - static struct request *blk_mq_alloc_request_pinned(struct request_queue *q, int rw, gfp_t gfp, bool reserved) @@ -289,38 +283,10 @@ void blk_mq_free_request(struct request *rq) __blk_mq_free_request(hctx, ctx, rq); } -static void blk_mq_bio_endio(struct request *rq, struct bio *bio, int error) -{ - if (error) - clear_bit(BIO_UPTODATE, &bio->bi_flags); - else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) - error = -EIO; - - if (unlikely(rq->cmd_flags & REQ_QUIET)) - set_bit(BIO_QUIET, &bio->bi_flags); - - /* don't actually finish bio if it's part of flush sequence */ - if (!(rq->cmd_flags & REQ_FLUSH_SEQ)) - bio_endio(bio, error); -} - -void blk_mq_end_io(struct request *rq, int error) +bool blk_mq_end_io_partial(struct request *rq, int error, unsigned int nr_bytes) { - struct bio *bio = rq->bio; - unsigned int bytes = 0; - - trace_block_rq_complete(rq->q, rq); - - while (bio) { - struct bio *next = bio->bi_next; - - bio->bi_next = NULL; - bytes += bio->bi_iter.bi_size; - blk_mq_bio_endio(rq, bio, error); - bio = next; - } - - blk_account_io_completion(rq, bytes); + if (blk_update_request(rq, error, blk_rq_bytes(rq))) + return true; blk_account_io_done(rq); @@ -328,8 +294,9 @@ void blk_mq_end_io(struct request *rq, int error) rq->end_io(rq, error); else blk_mq_free_request(rq); + return false; } -EXPORT_SYMBOL(blk_mq_end_io); +EXPORT_SYMBOL(blk_mq_end_io_partial); static void __blk_mq_complete_request_remote(void *data) { @@ -730,60 +697,27 @@ static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, blk_mq_add_timer(rq); } -void blk_mq_insert_request(struct request_queue *q, struct request *rq, - bool at_head, bool run_queue) +void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue, + bool async) { + struct request_queue *q = rq->q; struct blk_mq_hw_ctx *hctx; - struct blk_mq_ctx *ctx, *current_ctx; + struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx; + + current_ctx = blk_mq_get_ctx(q); + if (!cpu_online(ctx->cpu)) + rq->mq_ctx = ctx = current_ctx; - ctx = rq->mq_ctx; hctx = q->mq_ops->map_queue(q, ctx->cpu); - if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA)) { + if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA) && + !(rq->cmd_flags & (REQ_FLUSH_SEQ))) { blk_insert_flush(rq); } else { - current_ctx = blk_mq_get_ctx(q); - - if (!cpu_online(ctx->cpu)) { - ctx = current_ctx; - hctx = q->mq_ops->map_queue(q, ctx->cpu); - rq->mq_ctx = ctx; - } spin_lock(&ctx->lock); __blk_mq_insert_request(hctx, rq, at_head); spin_unlock(&ctx->lock); - - blk_mq_put_ctx(current_ctx); - } - - if (run_queue) - __blk_mq_run_hw_queue(hctx); -} -EXPORT_SYMBOL(blk_mq_insert_request); - -/* - * This is a special version of blk_mq_insert_request to bypass FLUSH request - * check. Should only be used internally. - */ -void blk_mq_run_request(struct request *rq, bool run_queue, bool async) -{ - struct request_queue *q = rq->q; - struct blk_mq_hw_ctx *hctx; - struct blk_mq_ctx *ctx, *current_ctx; - - current_ctx = blk_mq_get_ctx(q); - - ctx = rq->mq_ctx; - if (!cpu_online(ctx->cpu)) { - ctx = current_ctx; - rq->mq_ctx = ctx; } - hctx = q->mq_ops->map_queue(q, ctx->cpu); - - /* ctx->cpu might be offline */ - spin_lock(&ctx->lock); - __blk_mq_insert_request(hctx, rq, false); - spin_unlock(&ctx->lock); blk_mq_put_ctx(current_ctx); @@ -926,6 +860,8 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio) ctx = blk_mq_get_ctx(q); hctx = q->mq_ops->map_queue(q, ctx->cpu); + if (is_sync) + rw |= REQ_SYNC; trace_block_getrq(q, bio, rw); rq = __blk_mq_alloc_request(hctx, GFP_ATOMIC, false); if (likely(rq)) diff --git a/block/blk-mq.h b/block/blk-mq.h index ed0035c..72beba1 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -23,7 +23,6 @@ struct blk_mq_ctx { }; void __blk_mq_complete_request(struct request *rq); -void blk_mq_run_request(struct request *rq, bool run_queue, bool async); void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); void blk_mq_init_flush(struct request_queue *q); void blk_mq_drain_queue(struct request_queue *q); |