summaryrefslogtreecommitdiffstats
path: root/block/blk-core.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-04-19 16:29:21 -0700
committerJens Axboe <axboe@kernel.dk>2012-04-20 10:06:40 +0200
commit29e2b09ab5fa790514d47838f3c05497130908b3 (patch)
treeaa430587f78d90d3108c1885f8049da484631935 /block/blk-core.c
parentf9fcc2d3919b8eb575b3cee9274feefafb641bca (diff)
downloadop-kernel-dev-29e2b09ab5fa790514d47838f3c05497130908b3.zip
op-kernel-dev-29e2b09ab5fa790514d47838f3c05497130908b3.tar.gz
block: collapse blk_alloc_request() into get_request()
Allocation failure handling in get_request() is about to be updated. To ease the update, collapse blk_alloc_request() into get_request(). This patch doesn't introduce any functional change. Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c46
1 files changed, 17 insertions, 29 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 3b02ba3..f6f68b0 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -719,33 +719,6 @@ static inline void blk_free_request(struct request_queue *q, struct request *rq)
mempool_free(rq, q->rq.rq_pool);
}
-static struct request *
-blk_alloc_request(struct request_queue *q, struct bio *bio, struct io_cq *icq,
- unsigned int flags, gfp_t gfp_mask)
-{
- struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
-
- if (!rq)
- return NULL;
-
- blk_rq_init(q, rq);
-
- rq->cmd_flags = flags | REQ_ALLOCED;
-
- if (flags & REQ_ELVPRIV) {
- rq->elv.icq = icq;
- if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) {
- mempool_free(rq, q->rq.rq_pool);
- return NULL;
- }
- /* @rq->elv.icq holds on to io_context until @rq is freed */
- if (icq)
- get_io_context(icq->ioc);
- }
-
- return rq;
-}
-
/*
* ioc_batching returns true if the ioc is a valid batching request and
* should be given priority access to a request.
@@ -968,10 +941,25 @@ retry:
goto fail_alloc;
}
- rq = blk_alloc_request(q, bio, icq, rw_flags, gfp_mask);
- if (unlikely(!rq))
+ /* allocate and init request */
+ rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
+ if (!rq)
goto fail_alloc;
+ blk_rq_init(q, rq);
+ rq->cmd_flags = rw_flags | REQ_ALLOCED;
+
+ if (rw_flags & REQ_ELVPRIV) {
+ rq->elv.icq = icq;
+ if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) {
+ mempool_free(rq, q->rq.rq_pool);
+ goto fail_alloc;
+ }
+ /* @rq->elv.icq holds on to io_context until @rq is freed */
+ if (icq)
+ get_io_context(icq->ioc);
+ }
+
/*
* ioc may be NULL here, and ioc_batching will be false. That's
* OK, if the queue is under the request limit then requests need
OpenPOWER on IntegriCloud