diff options
author | Goldwyn Rodrigues <rgoldwyn@suse.com> | 2017-06-20 07:05:46 -0500 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2017-06-20 07:12:03 -0600 |
commit | 03a07c92a9ed9938d828ca7f1d11b8bc63a7bb89 (patch) | |
tree | aced260faa3cdf56d6f1140579ffaf46f8ba9c25 /block/blk-core.c | |
parent | a38d1243704f501a4c42de1db1062ff6eba83453 (diff) | |
download | op-kernel-dev-03a07c92a9ed9938d828ca7f1d11b8bc63a7bb89.zip op-kernel-dev-03a07c92a9ed9938d828ca7f1d11b8bc63a7bb89.tar.gz |
block: return on congested block device
A new bio operation flag REQ_NOWAIT is introduced to identify bio's
orignating from iocb with IOCB_NOWAIT. This flag indicates
to return immediately if a request cannot be made instead
of retrying.
Stacked devices such as md (the ones with make_request_fn hooks)
currently are not supported because it may block for housekeeping.
For example, an md can have a part of the device suspended.
For this reason, only request based devices are supported.
In the future, this feature will be expanded to stacked devices
by teaching them how to handle the REQ_NOWAIT flags.
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Goldwyn Rodrigues <rgoldwyn@suse.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r-- | block/blk-core.c | 22 |
1 files changed, 20 insertions, 2 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 62cf925..279e3c4 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -143,6 +143,7 @@ static const struct { [BLK_STS_MEDIUM] = { -ENODATA, "critical medium" }, [BLK_STS_PROTECTION] = { -EILSEQ, "protection" }, [BLK_STS_RESOURCE] = { -ENOMEM, "kernel resource" }, + [BLK_STS_AGAIN] = { -EAGAIN, "nonblocking retry" }, /* device mapper special case, should not leak out: */ [BLK_STS_DM_REQUEUE] = { -EREMCHG, "dm internal retry" }, @@ -1314,6 +1315,11 @@ retry: if (!IS_ERR(rq)) return rq; + if (op & REQ_NOWAIT) { + blk_put_rl(rl); + return ERR_PTR(-EAGAIN); + } + if (!gfpflags_allow_blocking(gfp_mask) || unlikely(blk_queue_dying(q))) { blk_put_rl(rl); return rq; @@ -1961,6 +1967,14 @@ generic_make_request_checks(struct bio *bio) goto end_io; } + /* + * For a REQ_NOWAIT based request, return -EOPNOTSUPP + * if queue is not a request based queue. + */ + + if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_rq_based(q)) + goto not_supported; + part = bio->bi_bdev->bd_part; if (should_fail_request(part, bio->bi_iter.bi_size) || should_fail_request(&part_to_disk(part)->part0, @@ -2118,7 +2132,7 @@ blk_qc_t generic_make_request(struct bio *bio) do { struct request_queue *q = bdev_get_queue(bio->bi_bdev); - if (likely(blk_queue_enter(q, false) == 0)) { + if (likely(blk_queue_enter(q, bio->bi_opf & REQ_NOWAIT) == 0)) { struct bio_list lower, same; /* Create a fresh bio_list for all subordinate requests */ @@ -2143,7 +2157,11 @@ blk_qc_t generic_make_request(struct bio *bio) bio_list_merge(&bio_list_on_stack[0], &same); bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]); } else { - bio_io_error(bio); + if (unlikely(!blk_queue_dying(q) && + (bio->bi_opf & REQ_NOWAIT))) + bio_wouldblock_error(bio); + else + bio_io_error(bio); } bio = bio_list_pop(&bio_list_on_stack[0]); } while (bio); |