summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2015-11-05 10:41:40 -0700
committerJens Axboe <axboe@fb.com>2015-11-07 10:40:47 -0700
commit7b371636fb6d187873d9d2730c2b1febc48a9b47 (patch)
tree28044eb235fb576771b171ade4e40cfd5d9343a8 /block
parentdece16353ef47d8d33f5302bc158072a9d65e26f (diff)
downloadop-kernel-dev-7b371636fb6d187873d9d2730c2b1febc48a9b47.zip
op-kernel-dev-7b371636fb6d187873d9d2730c2b1febc48a9b47.tar.gz
blk-mq: return tag/queue combo in the make_request_fn handlers
Return a cookie, blk_qc_t, from the blk-mq make request functions, that allows a later caller to uniquely identify a specific IO. The cookie doesn't mean anything to the caller, but the caller can use it to later pass back to the block layer. The block layer can then identify the hardware queue and request from that cookie. Signed-off-by: Jens Axboe <axboe@fb.com> Acked-by: Christoph Hellwig <hch@lst.de> Acked-by: Keith Busch <keith.busch@intel.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-mq.c45
1 files changed, 28 insertions, 17 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 65f43bd..66f3cf9 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1198,7 +1198,7 @@ static struct request *blk_mq_map_request(struct request_queue *q,
return rq;
}
-static int blk_mq_direct_issue_request(struct request *rq)
+static int blk_mq_direct_issue_request(struct request *rq, blk_qc_t *cookie)
{
int ret;
struct request_queue *q = rq->q;
@@ -1209,6 +1209,7 @@ static int blk_mq_direct_issue_request(struct request *rq)
.list = NULL,
.last = 1
};
+ blk_qc_t new_cookie = blk_tag_to_qc_t(rq->tag, hctx->queue_num);
/*
* For OK queue, we are done. For error, kill it. Any other
@@ -1216,18 +1217,21 @@ static int blk_mq_direct_issue_request(struct request *rq)
* would have done
*/
ret = q->mq_ops->queue_rq(hctx, &bd);
- if (ret == BLK_MQ_RQ_QUEUE_OK)
+ if (ret == BLK_MQ_RQ_QUEUE_OK) {
+ *cookie = new_cookie;
return 0;
- else {
- __blk_mq_requeue_request(rq);
+ }
- if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
- rq->errors = -EIO;
- blk_mq_end_request(rq, rq->errors);
- return 0;
- }
- return -1;
+ __blk_mq_requeue_request(rq);
+
+ if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
+ *cookie = BLK_QC_T_NONE;
+ rq->errors = -EIO;
+ blk_mq_end_request(rq, rq->errors);
+ return 0;
}
+
+ return -1;
}
/*
@@ -1244,6 +1248,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
unsigned int request_count = 0;
struct blk_plug *plug;
struct request *same_queue_rq = NULL;
+ blk_qc_t cookie;
blk_queue_bounce(q, &bio);
@@ -1265,6 +1270,8 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
if (unlikely(!rq))
return BLK_QC_T_NONE;
+ cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num);
+
if (unlikely(is_flush_fua)) {
blk_mq_bio_to_request(rq, bio);
blk_insert_flush(rq);
@@ -1302,11 +1309,11 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
old_rq = rq;
blk_mq_put_ctx(data.ctx);
if (!old_rq)
- return BLK_QC_T_NONE;
- if (!blk_mq_direct_issue_request(old_rq))
- return BLK_QC_T_NONE;
+ goto done;
+ if (!blk_mq_direct_issue_request(old_rq, &cookie))
+ goto done;
blk_mq_insert_request(old_rq, false, true, true);
- return BLK_QC_T_NONE;
+ goto done;
}
if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
@@ -1320,7 +1327,8 @@ run_queue:
blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
}
blk_mq_put_ctx(data.ctx);
- return BLK_QC_T_NONE;
+done:
+ return cookie;
}
/*
@@ -1335,6 +1343,7 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
unsigned int request_count = 0;
struct blk_map_ctx data;
struct request *rq;
+ blk_qc_t cookie;
blk_queue_bounce(q, &bio);
@@ -1353,6 +1362,8 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
if (unlikely(!rq))
return BLK_QC_T_NONE;
+ cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num);
+
if (unlikely(is_flush_fua)) {
blk_mq_bio_to_request(rq, bio);
blk_insert_flush(rq);
@@ -1375,7 +1386,7 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
}
list_add_tail(&rq->queuelist, &plug->mq_list);
blk_mq_put_ctx(data.ctx);
- return BLK_QC_T_NONE;
+ return cookie;
}
if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
@@ -1390,7 +1401,7 @@ run_queue:
}
blk_mq_put_ctx(data.ctx);
- return BLK_QC_T_NONE;
+ return cookie;
}
/*
OpenPOWER on IntegriCloud