summaryrefslogtreecommitdiffstats
path: root/block/blk-mq.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2014-09-13 16:40:11 -0700
committerJens Axboe <axboe@fb.com>2014-09-22 12:00:07 -0600
commit81481eb423c295c5480a3fab9bb961cf286c91e7 (patch)
tree3d4f0eaed9f784ba5beb41aa9fec3e76b867c1e8 /block/blk-mq.c
parentc8a446ad695ada43a885ec12b38411dbd190a11b (diff)
downloadop-kernel-dev-81481eb423c295c5480a3fab9bb961cf286c91e7.zip
op-kernel-dev-81481eb423c295c5480a3fab9bb961cf286c91e7.tar.gz
blk-mq: fix and simplify tag iteration for the timeout handler
Don't do a kmalloc from timer to handle timeouts, chances are we could be under heavy load or similar and thus just miss out on the timeouts. Fortunately it is very easy to just iterate over all in use tags, and doing this properly actually cleans up the blk_mq_busy_iter API as well, and prepares us for the next patch by passing a reserved argument to the iterator. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r--block/blk-mq.c85
1 files changed, 25 insertions, 60 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 1713686..3baebca 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -525,58 +525,6 @@ struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
}
EXPORT_SYMBOL(blk_mq_tag_to_rq);
-struct blk_mq_timeout_data {
- struct blk_mq_hw_ctx *hctx;
- unsigned long *next;
- unsigned int *next_set;
-};
-
-static void blk_mq_timeout_check(void *__data, unsigned long *free_tags)
-{
- struct blk_mq_timeout_data *data = __data;
- struct blk_mq_hw_ctx *hctx = data->hctx;
- unsigned int tag;
-
- /* It may not be in flight yet (this is where
- * the REQ_ATOMIC_STARTED flag comes in). The requests are
- * statically allocated, so we know it's always safe to access the
- * memory associated with a bit offset into ->rqs[].
- */
- tag = 0;
- do {
- struct request *rq;
-
- tag = find_next_zero_bit(free_tags, hctx->tags->nr_tags, tag);
- if (tag >= hctx->tags->nr_tags)
- break;
-
- rq = blk_mq_tag_to_rq(hctx->tags, tag++);
- if (rq->q != hctx->queue)
- continue;
- if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
- continue;
-
- blk_rq_check_expired(rq, data->next, data->next_set);
- } while (1);
-}
-
-static void blk_mq_hw_ctx_check_timeout(struct blk_mq_hw_ctx *hctx,
- unsigned long *next,
- unsigned int *next_set)
-{
- struct blk_mq_timeout_data data = {
- .hctx = hctx,
- .next = next,
- .next_set = next_set,
- };
-
- /*
- * Ask the tagging code to iterate busy requests, so we can
- * check them for timeout.
- */
- blk_mq_tag_busy_iter(hctx->tags, blk_mq_timeout_check, &data);
-}
-
static enum blk_eh_timer_return blk_mq_rq_timed_out(struct request *rq)
{
struct request_queue *q = rq->q;
@@ -598,13 +546,30 @@ static enum blk_eh_timer_return blk_mq_rq_timed_out(struct request *rq)
return q->mq_ops->timeout(rq);
}
+
+struct blk_mq_timeout_data {
+ unsigned long next;
+ unsigned int next_set;
+};
+
+static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
+ struct request *rq, void *priv, bool reserved)
+{
+ struct blk_mq_timeout_data *data = priv;
+
+ if (test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
+ blk_rq_check_expired(rq, &data->next, &data->next_set);
+}
-static void blk_mq_rq_timer(unsigned long data)
+static void blk_mq_rq_timer(unsigned long priv)
{
- struct request_queue *q = (struct request_queue *) data;
+ struct request_queue *q = (struct request_queue *)priv;
+ struct blk_mq_timeout_data data = {
+ .next = 0,
+ .next_set = 0,
+ };
struct blk_mq_hw_ctx *hctx;
- unsigned long next = 0;
- int i, next_set = 0;
+ int i;
queue_for_each_hw_ctx(q, hctx, i) {
/*
@@ -614,12 +579,12 @@ static void blk_mq_rq_timer(unsigned long data)
if (!hctx->nr_ctx || !hctx->tags)
continue;
- blk_mq_hw_ctx_check_timeout(hctx, &next, &next_set);
+ blk_mq_tag_busy_iter(hctx, blk_mq_check_expired, &data);
}
- if (next_set) {
- next = blk_rq_timeout(round_jiffies_up(next));
- mod_timer(&q->timeout, next);
+ if (data.next_set) {
+ data.next = blk_rq_timeout(round_jiffies_up(data.next));
+ mod_timer(&q->timeout, data.next);
} else {
queue_for_each_hw_ctx(q, hctx, i)
blk_mq_tag_idle(hctx);
OpenPOWER on IntegriCloud