summaryrefslogtreecommitdiffstats
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2016-03-30 10:21:08 -0600
committerJens Axboe <axboe@fb.com>2016-11-05 17:09:53 -0600
commitd278d4a8892f13b6a9eb6102b356402f0e062324 (patch)
tree2d1f9dc740b6d47257404c02292493568c8ee656 /include/linux/blkdev.h
parent600271d9000027c013c01be87cbb90a5a18c5c3f (diff)
downloadop-kernel-dev-d278d4a8892f13b6a9eb6102b356402f0e062324.zip
op-kernel-dev-d278d4a8892f13b6a9eb6102b356402f0e062324.tar.gz
block: add code to track actual device queue depth
For blk-mq, ->nr_requests does track queue depth, at least at init time. But for the older queue paths, it's simply a soft setting. On top of that, it's generally larger than the hardware setting on purpose, to allow backup of requests for merging. Fill a hole in struct request with a 'queue_depth' member, that drivers can call to more closely inform the block layer of the real queue depth. Signed-off-by: Jens Axboe <axboe@fb.com> Reviewed-by: Jan Kara <jack@suse.cz>
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h11
1 files changed, 11 insertions, 0 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 9189a2d..d364be6 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -405,6 +405,8 @@ struct request_queue {
struct blk_mq_ctx __percpu *queue_ctx;
unsigned int nr_queues;
+ unsigned int queue_depth;
+
/* hw dispatch queues */
struct blk_mq_hw_ctx **queue_hw_ctx;
unsigned int nr_hw_queues;
@@ -777,6 +779,14 @@ static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
return false;
}
+static inline unsigned int blk_queue_depth(struct request_queue *q)
+{
+ if (q->queue_depth)
+ return q->queue_depth;
+
+ return q->nr_requests;
+}
+
/*
* q->prep_rq_fn return values
*/
@@ -1094,6 +1104,7 @@ extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
+extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
extern void blk_set_default_limits(struct queue_limits *lim);
extern void blk_set_stacking_limits(struct queue_limits *lim);
extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
OpenPOWER on IntegriCloud