summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-05-06 11:25:08 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-05-06 11:25:08 -0700
commit044f1daaaaf7c86bc4fcf433848b7baae236946b (patch)
tree55a5b94c75cc6e51992ee3b5d7c49878c7ae7760 /block
parentd557d1b58b3546bab2c5bc2d624c5709840e6b10 (diff)
parentdaaadb3e9453ab89c2e113a2d1df8e19e30944cc (diff)
downloadop-kernel-dev-044f1daaaaf7c86bc4fcf433848b7baae236946b.zip
op-kernel-dev-044f1daaaaf7c86bc4fcf433848b7baae236946b.tar.gz
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
Pull block fixes and updates from Jens Axboe: "Some fixes and followup features/changes that should go in, in this merge window. This contains: - Two fixes for lightnvm from Javier, fixing problems in the new code merge previously in this merge window. - A fix from Jan for the backing device changes, fixing an issue in NFS that causes a failure to mount on certain setups. - A change from Christoph, cleaning up the blk-mq init and exit request paths. - Remove elevator_change(), which is now unused. From Bart. - A fix for queue operation invocation on a dead queue, from Bart. - A series fixing up mtip32xx for blk-mq scheduling, removing a bandaid we previously had in place for this. From me. - A regression fix for this series, fixing a case where we wait on workqueue flushing from an invalid (non-blocking) context. From me. - A fix/optimization from Ming, ensuring that we don't both quiesce and freeze a queue at the same time. - A fix from Peter on lock ordering for CPU hotplug. Not a real problem right now, but will be once the CPU hotplug rework goes in. - A series from Omar, cleaning up out blk-mq debugfs support, and adding support for exporting info from schedulers in debugfs as well. This is really useful in debugging stalls or livelocks. From Omar" * 'for-linus' of git://git.kernel.dk/linux-block: (28 commits) mq-deadline: add debugfs attributes kyber: add debugfs attributes blk-mq-debugfs: allow schedulers to register debugfs attributes blk-mq: untangle debugfs and sysfs blk-mq: move debugfs declarations to a separate header file blk-mq: Do not invoke queue operations on a dead queue blk-mq-debugfs: get rid of a bunch of boilerplate blk-mq-debugfs: rename hw queue directories from <n> to hctx<n> blk-mq-debugfs: don't open code strstrip() blk-mq-debugfs: error on long write to queue "state" file blk-mq-debugfs: clean up flag definitions blk-mq-debugfs: separate flags with | nfs: Fix bdi handling for cloned superblocks block/mq: Cure cpu hotplug lock inversion lightnvm: fix bad back free on error path lightnvm: create cmd before allocating request blk-mq: don't use sync workqueue flushing from drivers mtip32xx: convert internal commands to regular block infrastructure mtip32xx: cleanup internal tag assumptions block: don't call blk_mq_quiesce_queue() after queue is frozen ...
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c8
-rw-r--r--block/blk-mq-debugfs.c870
-rw-r--r--block/blk-mq-debugfs.h82
-rw-r--r--block/blk-mq-sched.c30
-rw-r--r--block/blk-mq-sysfs.c10
-rw-r--r--block/blk-mq.c54
-rw-r--r--block/blk-mq.h28
-rw-r--r--block/blk-sysfs.c3
-rw-r--r--block/elevator.c16
-rw-r--r--block/kyber-iosched.c130
-rw-r--r--block/mq-deadline.c123
11 files changed, 781 insertions, 573 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 24886b6..c580b01 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -561,13 +561,9 @@ void blk_cleanup_queue(struct request_queue *q)
* prevent that q->request_fn() gets invoked after draining finished.
*/
blk_freeze_queue(q);
- if (!q->mq_ops) {
- spin_lock_irq(lock);
+ spin_lock_irq(lock);
+ if (!q->mq_ops)
__blk_drain_queue(q, true);
- } else {
- blk_mq_debugfs_unregister_mq(q);
- spin_lock_irq(lock);
- }
queue_flag_set(QUEUE_FLAG_DEAD, q);
spin_unlock_irq(lock);
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index bcd2a7d..803aed4 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -21,28 +21,9 @@
#include <linux/blk-mq.h>
#include "blk.h"
#include "blk-mq.h"
+#include "blk-mq-debugfs.h"
#include "blk-mq-tag.h"
-struct blk_mq_debugfs_attr {
- const char *name;
- umode_t mode;
- const struct file_operations *fops;
-};
-
-static int blk_mq_debugfs_seq_open(struct inode *inode, struct file *file,
- const struct seq_operations *ops)
-{
- struct seq_file *m;
- int ret;
-
- ret = seq_open(file, ops);
- if (!ret) {
- m = file->private_data;
- m->private = inode->i_private;
- }
- return ret;
-}
-
static int blk_flags_show(struct seq_file *m, const unsigned long flags,
const char *const *flag_name, int flag_name_count)
{
@@ -53,7 +34,7 @@ static int blk_flags_show(struct seq_file *m, const unsigned long flags,
if (!(flags & BIT(i)))
continue;
if (sep)
- seq_puts(m, " ");
+ seq_puts(m, "|");
sep = true;
if (i < flag_name_count && flag_name[i])
seq_puts(m, flag_name[i]);
@@ -63,41 +44,43 @@ static int blk_flags_show(struct seq_file *m, const unsigned long flags,
return 0;
}
+#define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name
static const char *const blk_queue_flag_name[] = {
- [QUEUE_FLAG_QUEUED] = "QUEUED",
- [QUEUE_FLAG_STOPPED] = "STOPPED",
- [QUEUE_FLAG_SYNCFULL] = "SYNCFULL",
- [QUEUE_FLAG_ASYNCFULL] = "ASYNCFULL",
- [QUEUE_FLAG_DYING] = "DYING",
- [QUEUE_FLAG_BYPASS] = "BYPASS",
- [QUEUE_FLAG_BIDI] = "BIDI",
- [QUEUE_FLAG_NOMERGES] = "NOMERGES",
- [QUEUE_FLAG_SAME_COMP] = "SAME_COMP",
- [QUEUE_FLAG_FAIL_IO] = "FAIL_IO",
- [QUEUE_FLAG_STACKABLE] = "STACKABLE",
- [QUEUE_FLAG_NONROT] = "NONROT",
- [QUEUE_FLAG_IO_STAT] = "IO_STAT",
- [QUEUE_FLAG_DISCARD] = "DISCARD",
- [QUEUE_FLAG_NOXMERGES] = "NOXMERGES",
- [QUEUE_FLAG_ADD_RANDOM] = "ADD_RANDOM",
- [QUEUE_FLAG_SECERASE] = "SECERASE",
- [QUEUE_FLAG_SAME_FORCE] = "SAME_FORCE",
- [QUEUE_FLAG_DEAD] = "DEAD",
- [QUEUE_FLAG_INIT_DONE] = "INIT_DONE",
- [QUEUE_FLAG_NO_SG_MERGE] = "NO_SG_MERGE",
- [QUEUE_FLAG_POLL] = "POLL",
- [QUEUE_FLAG_WC] = "WC",
- [QUEUE_FLAG_FUA] = "FUA",
- [QUEUE_FLAG_FLUSH_NQ] = "FLUSH_NQ",
- [QUEUE_FLAG_DAX] = "DAX",
- [QUEUE_FLAG_STATS] = "STATS",
- [QUEUE_FLAG_POLL_STATS] = "POLL_STATS",
- [QUEUE_FLAG_REGISTERED] = "REGISTERED",
-};
-
-static int blk_queue_flags_show(struct seq_file *m, void *v)
-{
- struct request_queue *q = m->private;
+ QUEUE_FLAG_NAME(QUEUED),
+ QUEUE_FLAG_NAME(STOPPED),
+ QUEUE_FLAG_NAME(SYNCFULL),
+ QUEUE_FLAG_NAME(ASYNCFULL),
+ QUEUE_FLAG_NAME(DYING),
+ QUEUE_FLAG_NAME(BYPASS),
+ QUEUE_FLAG_NAME(BIDI),
+ QUEUE_FLAG_NAME(NOMERGES),
+ QUEUE_FLAG_NAME(SAME_COMP),
+ QUEUE_FLAG_NAME(FAIL_IO),
+ QUEUE_FLAG_NAME(STACKABLE),
+ QUEUE_FLAG_NAME(NONROT),
+ QUEUE_FLAG_NAME(IO_STAT),
+ QUEUE_FLAG_NAME(DISCARD),
+ QUEUE_FLAG_NAME(NOXMERGES),
+ QUEUE_FLAG_NAME(ADD_RANDOM),
+ QUEUE_FLAG_NAME(SECERASE),
+ QUEUE_FLAG_NAME(SAME_FORCE),
+ QUEUE_FLAG_NAME(DEAD),
+ QUEUE_FLAG_NAME(INIT_DONE),
+ QUEUE_FLAG_NAME(NO_SG_MERGE),
+ QUEUE_FLAG_NAME(POLL),
+ QUEUE_FLAG_NAME(WC),
+ QUEUE_FLAG_NAME(FUA),
+ QUEUE_FLAG_NAME(FLUSH_NQ),
+ QUEUE_FLAG_NAME(DAX),
+ QUEUE_FLAG_NAME(STATS),
+ QUEUE_FLAG_NAME(POLL_STATS),
+ QUEUE_FLAG_NAME(REGISTERED),
+};
+#undef QUEUE_FLAG_NAME
+
+static int queue_state_show(void *data, struct seq_file *m)
+{
+ struct request_queue *q = data;
blk_flags_show(m, q->queue_flags, blk_queue_flag_name,
ARRAY_SIZE(blk_queue_flag_name));
@@ -105,42 +88,41 @@ static int blk_queue_flags_show(struct seq_file *m, void *v)
return 0;
}
-static ssize_t blk_queue_flags_store(struct file *file, const char __user *ubuf,
- size_t len, loff_t *offp)
+static ssize_t queue_state_write(void *data, const char __user *buf,
+ size_t count, loff_t *ppos)
{
- struct request_queue *q = file_inode(file)->i_private;
- char op[16] = { }, *s;
+ struct request_queue *q = data;
+ char opbuf[16] = { }, *op;
+
+ /*
+ * The "state" attribute is removed after blk_cleanup_queue() has called
+ * blk_mq_free_queue(). Return if QUEUE_FLAG_DEAD has been set to avoid
+ * triggering a use-after-free.
+ */
+ if (blk_queue_dead(q))
+ return -ENOENT;
- len = min(len, sizeof(op) - 1);
- if (copy_from_user(op, ubuf, len))
+ if (count >= sizeof(opbuf)) {
+ pr_err("%s: operation too long\n", __func__);
+ goto inval;
+ }
+
+ if (copy_from_user(opbuf, buf, count))
return -EFAULT;
- s = op;
- strsep(&s, " \t\n"); /* strip trailing whitespace */
+ op = strstrip(opbuf);
if (strcmp(op, "run") == 0) {
blk_mq_run_hw_queues(q, true);
} else if (strcmp(op, "start") == 0) {
blk_mq_start_stopped_hw_queues(q, true);
} else {
- pr_err("%s: unsupported operation %s. Use either 'run' or 'start'\n",
- __func__, op);
+ pr_err("%s: unsupported operation '%s'\n", __func__, op);
+inval:
+ pr_err("%s: use either 'run' or 'start'\n", __func__);
return -EINVAL;
}
- return len;
-}
-
-static int blk_queue_flags_open(struct inode *inode, struct file *file)
-{
- return single_open(file, blk_queue_flags_show, inode->i_private);
+ return count;
}
-static const struct file_operations blk_queue_flags_fops = {
- .open = blk_queue_flags_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = blk_queue_flags_store,
-};
-
static void print_stat(struct seq_file *m, struct blk_rq_stat *stat)
{
if (stat->nr_samples) {
@@ -151,9 +133,9 @@ static void print_stat(struct seq_file *m, struct blk_rq_stat *stat)
}
}
-static int queue_poll_stat_show(struct seq_file *m, void *v)
+static int queue_poll_stat_show(void *data, struct seq_file *m)
{
- struct request_queue *q = m->private;
+ struct request_queue *q = data;
int bucket;
for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS/2; bucket++) {
@@ -168,28 +150,19 @@ static int queue_poll_stat_show(struct seq_file *m, void *v)
return 0;
}
-static int queue_poll_stat_open(struct inode *inode, struct file *file)
-{
- return single_open(file, queue_poll_stat_show, inode->i_private);
-}
-
-static const struct file_operations queue_poll_stat_fops = {
- .open = queue_poll_stat_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
+#define HCTX_STATE_NAME(name) [BLK_MQ_S_##name] = #name
static const char *const hctx_state_name[] = {
- [BLK_MQ_S_STOPPED] = "STOPPED",
- [BLK_MQ_S_TAG_ACTIVE] = "TAG_ACTIVE",
- [BLK_MQ_S_SCHED_RESTART] = "SCHED_RESTART",
- [BLK_MQ_S_TAG_WAITING] = "TAG_WAITING",
-
+ HCTX_STATE_NAME(STOPPED),
+ HCTX_STATE_NAME(TAG_ACTIVE),
+ HCTX_STATE_NAME(SCHED_RESTART),
+ HCTX_STATE_NAME(TAG_WAITING),
+ HCTX_STATE_NAME(START_ON_RUN),
};
-static int hctx_state_show(struct seq_file *m, void *v)
+#undef HCTX_STATE_NAME
+
+static int hctx_state_show(void *data, struct seq_file *m)
{
- struct blk_mq_hw_ctx *hctx = m->private;
+ struct blk_mq_hw_ctx *hctx = data;
blk_flags_show(m, hctx->state, hctx_state_name,
ARRAY_SIZE(hctx_state_name));
@@ -197,34 +170,26 @@ static int hctx_state_show(struct seq_file *m, void *v)
return 0;
}
-static int hctx_state_open(struct inode *inode, struct file *file)
-{
- return single_open(file, hctx_state_show, inode->i_private);
-}
-
-static const struct file_operations hctx_state_fops = {
- .open = hctx_state_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
+#define BLK_TAG_ALLOC_NAME(name) [BLK_TAG_ALLOC_##name] = #name
static const char *const alloc_policy_name[] = {
- [BLK_TAG_ALLOC_FIFO] = "fifo",
- [BLK_TAG_ALLOC_RR] = "rr",
+ BLK_TAG_ALLOC_NAME(FIFO),
+ BLK_TAG_ALLOC_NAME(RR),
};
+#undef BLK_TAG_ALLOC_NAME
+#define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name
static const char *const hctx_flag_name[] = {
- [ilog2(BLK_MQ_F_SHOULD_MERGE)] = "SHOULD_MERGE",
- [ilog2(BLK_MQ_F_TAG_SHARED)] = "TAG_SHARED",
- [ilog2(BLK_MQ_F_SG_MERGE)] = "SG_MERGE",
- [ilog2(BLK_MQ_F_BLOCKING)] = "BLOCKING",
- [ilog2(BLK_MQ_F_NO_SCHED)] = "NO_SCHED",
+ HCTX_FLAG_NAME(SHOULD_MERGE),
+ HCTX_FLAG_NAME(TAG_SHARED),
+ HCTX_FLAG_NAME(SG_MERGE),
+ HCTX_FLAG_NAME(BLOCKING),
+ HCTX_FLAG_NAME(NO_SCHED),
};
+#undef HCTX_FLAG_NAME
-static int hctx_flags_show(struct seq_file *m, void *v)
+static int hctx_flags_show(void *data, struct seq_file *m)
{
- struct blk_mq_hw_ctx *hctx = m->private;
+ struct blk_mq_hw_ctx *hctx = data;
const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags);
seq_puts(m, "alloc_policy=");
@@ -241,76 +206,69 @@ static int hctx_flags_show(struct seq_file *m, void *v)
return 0;
}
-static int hctx_flags_open(struct inode *inode, struct file *file)
-{
- return single_open(file, hctx_flags_show, inode->i_private);
-}
-
-static const struct file_operations hctx_flags_fops = {
- .open = hctx_flags_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
+#define REQ_OP_NAME(name) [REQ_OP_##name] = #name
static const char *const op_name[] = {
- [REQ_OP_READ] = "READ",
- [REQ_OP_WRITE] = "WRITE",
- [REQ_OP_FLUSH] = "FLUSH",
- [REQ_OP_DISCARD] = "DISCARD",
- [REQ_OP_ZONE_REPORT] = "ZONE_REPORT",
- [REQ_OP_SECURE_ERASE] = "SECURE_ERASE",
- [REQ_OP_ZONE_RESET] = "ZONE_RESET",
- [REQ_OP_WRITE_SAME] = "WRITE_SAME",
- [REQ_OP_WRITE_ZEROES] = "WRITE_ZEROES",
- [REQ_OP_SCSI_IN] = "SCSI_IN",
- [REQ_OP_SCSI_OUT] = "SCSI_OUT",
- [REQ_OP_DRV_IN] = "DRV_IN",
- [REQ_OP_DRV_OUT] = "DRV_OUT",
-};
-
+ REQ_OP_NAME(READ),
+ REQ_OP_NAME(WRITE),
+ REQ_OP_NAME(FLUSH),
+ REQ_OP_NAME(DISCARD),
+ REQ_OP_NAME(ZONE_REPORT),
+ REQ_OP_NAME(SECURE_ERASE),
+ REQ_OP_NAME(ZONE_RESET),
+ REQ_OP_NAME(WRITE_SAME),
+ REQ_OP_NAME(WRITE_ZEROES),
+ REQ_OP_NAME(SCSI_IN),
+ REQ_OP_NAME(SCSI_OUT),
+ REQ_OP_NAME(DRV_IN),
+ REQ_OP_NAME(DRV_OUT),
+};
+#undef REQ_OP_NAME
+
+#define CMD_FLAG_NAME(name) [__REQ_##name] = #name
static const char *const cmd_flag_name[] = {
- [__REQ_FAILFAST_DEV] = "FAILFAST_DEV",
- [__REQ_FAILFAST_TRANSPORT] = "FAILFAST_TRANSPORT",
- [__REQ_FAILFAST_DRIVER] = "FAILFAST_DRIVER",
- [__REQ_SYNC] = "SYNC",
- [__REQ_META] = "META",
- [__REQ_PRIO] = "PRIO",
- [__REQ_NOMERGE] = "NOMERGE",
- [__REQ_IDLE] = "IDLE",
- [__REQ_INTEGRITY] = "INTEGRITY",
- [__REQ_FUA] = "FUA",
- [__REQ_PREFLUSH] = "PREFLUSH",
- [__REQ_RAHEAD] = "RAHEAD",
- [__REQ_BACKGROUND] = "BACKGROUND",
- [__REQ_NR_BITS] = "NR_BITS",
-};
-
+ CMD_FLAG_NAME(FAILFAST_DEV),
+ CMD_FLAG_NAME(FAILFAST_TRANSPORT),
+ CMD_FLAG_NAME(FAILFAST_DRIVER),
+ CMD_FLAG_NAME(SYNC),
+ CMD_FLAG_NAME(META),
+ CMD_FLAG_NAME(PRIO),
+ CMD_FLAG_NAME(NOMERGE),
+ CMD_FLAG_NAME(IDLE),
+ CMD_FLAG_NAME(INTEGRITY),
+ CMD_FLAG_NAME(FUA),
+ CMD_FLAG_NAME(PREFLUSH),
+ CMD_FLAG_NAME(RAHEAD),
+ CMD_FLAG_NAME(BACKGROUND),
+ CMD_FLAG_NAME(NOUNMAP),
+};
+#undef CMD_FLAG_NAME
+
+#define RQF_NAME(name) [ilog2((__force u32)RQF_##name)] = #name
static const char *const rqf_name[] = {
- [ilog2((__force u32)RQF_SORTED)] = "SORTED",
- [ilog2((__force u32)RQF_STARTED)] = "STARTED",
- [ilog2((__force u32)RQF_QUEUED)] = "QUEUED",
- [ilog2((__force u32)RQF_SOFTBARRIER)] = "SOFTBARRIER",
- [ilog2((__force u32)RQF_FLUSH_SEQ)] = "FLUSH_SEQ",
- [ilog2((__force u32)RQF_MIXED_MERGE)] = "MIXED_MERGE",
- [ilog2((__force u32)RQF_MQ_INFLIGHT)] = "MQ_INFLIGHT",
- [ilog2((__force u32)RQF_DONTPREP)] = "DONTPREP",
- [ilog2((__force u32)RQF_PREEMPT)] = "PREEMPT",
- [ilog2((__force u32)RQF_COPY_USER)] = "COPY_USER",
- [ilog2((__force u32)RQF_FAILED)] = "FAILED",
- [ilog2((__force u32)RQF_QUIET)] = "QUIET",
- [ilog2((__force u32)RQF_ELVPRIV)] = "ELVPRIV",
- [ilog2((__force u32)RQF_IO_STAT)] = "IO_STAT",
- [ilog2((__force u32)RQF_ALLOCED)] = "ALLOCED",
- [ilog2((__force u32)RQF_PM)] = "PM",
- [ilog2((__force u32)RQF_HASHED)] = "HASHED",
- [ilog2((__force u32)RQF_STATS)] = "STATS",
- [ilog2((__force u32)RQF_SPECIAL_PAYLOAD)] = "SPECIAL_PAYLOAD",
-};
-
-static int blk_mq_debugfs_rq_show(struct seq_file *m, void *v)
+ RQF_NAME(SORTED),
+ RQF_NAME(STARTED),
+ RQF_NAME(QUEUED),
+ RQF_NAME(SOFTBARRIER),
+ RQF_NAME(FLUSH_SEQ),
+ RQF_NAME(MIXED_MERGE),
+ RQF_NAME(MQ_INFLIGHT),
+ RQF_NAME(DONTPREP),
+ RQF_NAME(PREEMPT),
+ RQF_NAME(COPY_USER),
+ RQF_NAME(FAILED),
+ RQF_NAME(QUIET),
+ RQF_NAME(ELVPRIV),
+ RQF_NAME(IO_STAT),
+ RQF_NAME(ALLOCED),
+ RQF_NAME(PM),
+ RQF_NAME(HASHED),
+ RQF_NAME(STATS),
+ RQF_NAME(SPECIAL_PAYLOAD),
+};
+#undef RQF_NAME
+
+int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
{
- struct request *rq = list_entry_rq(v);
const struct blk_mq_ops *const mq_ops = rq->q->mq_ops;
const unsigned int op = rq->cmd_flags & REQ_OP_MASK;
@@ -332,6 +290,13 @@ static int blk_mq_debugfs_rq_show(struct seq_file *m, void *v)
seq_puts(m, "}\n");
return 0;
}
+EXPORT_SYMBOL_GPL(__blk_mq_debugfs_rq_show);
+
+int blk_mq_debugfs_rq_show(struct seq_file *m, void *v)
+{
+ return __blk_mq_debugfs_rq_show(m, list_entry_rq(v));
+}
+EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show);
static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos)
__acquires(&hctx->lock)
@@ -364,38 +329,14 @@ static const struct seq_operations hctx_dispatch_seq_ops = {
.show = blk_mq_debugfs_rq_show,
};
-static int hctx_dispatch_open(struct inode *inode, struct file *file)
-{
- return blk_mq_debugfs_seq_open(inode, file, &hctx_dispatch_seq_ops);
-}
-
-static const struct file_operations hctx_dispatch_fops = {
- .open = hctx_dispatch_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
-
-static int hctx_ctx_map_show(struct seq_file *m, void *v)
+static int hctx_ctx_map_show(void *data, struct seq_file *m)
{
- struct blk_mq_hw_ctx *hctx = m->private;
+ struct blk_mq_hw_ctx *hctx = data;
sbitmap_bitmap_show(&hctx->ctx_map, m);
return 0;
}
-static int hctx_ctx_map_open(struct inode *inode, struct file *file)
-{
- return single_open(file, hctx_ctx_map_show, inode->i_private);
-}
-
-static const struct file_operations hctx_ctx_map_fops = {
- .open = hctx_ctx_map_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
static void blk_mq_debugfs_tags_show(struct seq_file *m,
struct blk_mq_tags *tags)
{
@@ -413,9 +354,9 @@ static void blk_mq_debugfs_tags_show(struct seq_file *m,
}
}
-static int hctx_tags_show(struct seq_file *m, void *v)
+static int hctx_tags_show(void *data, struct seq_file *m)
{
- struct blk_mq_hw_ctx *hctx = m->private;
+ struct blk_mq_hw_ctx *hctx = data;
struct request_queue *q = hctx->queue;
int res;
@@ -430,21 +371,9 @@ out:
return res;
}
-static int hctx_tags_open(struct inode *inode, struct file *file)
-{
- return single_open(file, hctx_tags_show, inode->i_private);
-}
-
-static const struct file_operations hctx_tags_fops = {
- .open = hctx_tags_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int hctx_tags_bitmap_show(struct seq_file *m, void *v)
+static int hctx_tags_bitmap_show(void *data, struct seq_file *m)
{
- struct blk_mq_hw_ctx *hctx = m->private;
+ struct blk_mq_hw_ctx *hctx = data;
struct request_queue *q = hctx->queue;
int res;
@@ -459,21 +388,9 @@ out:
return res;
}
-static int hctx_tags_bitmap_open(struct inode *inode, struct file *file)
+static int hctx_sched_tags_show(void *data, struct seq_file *m)
{
- return single_open(file, hctx_tags_bitmap_show, inode->i_private);
-}
-
-static const struct file_operations hctx_tags_bitmap_fops = {
- .open = hctx_tags_bitmap_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int hctx_sched_tags_show(struct seq_file *m, void *v)
-{
- struct blk_mq_hw_ctx *hctx = m->private;
+ struct blk_mq_hw_ctx *hctx = data;
struct request_queue *q = hctx->queue;
int res;
@@ -488,21 +405,9 @@ out:
return res;
}
-static int hctx_sched_tags_open(struct inode *inode, struct file *file)
+static int hctx_sched_tags_bitmap_show(void *data, struct seq_file *m)
{
- return single_open(file, hctx_sched_tags_show, inode->i_private);
-}
-
-static const struct file_operations hctx_sched_tags_fops = {
- .open = hctx_sched_tags_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int hctx_sched_tags_bitmap_show(struct seq_file *m, void *v)
-{
- struct blk_mq_hw_ctx *hctx = m->private;
+ struct blk_mq_hw_ctx *hctx = data;
struct request_queue *q = hctx->queue;
int res;
@@ -517,21 +422,9 @@ out:
return res;
}
-static int hctx_sched_tags_bitmap_open(struct inode *inode, struct file *file)
-{
- return single_open(file, hctx_sched_tags_bitmap_show, inode->i_private);
-}
-
-static const struct file_operations hctx_sched_tags_bitmap_fops = {
- .open = hctx_sched_tags_bitmap_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int hctx_io_poll_show(struct seq_file *m, void *v)
+static int hctx_io_poll_show(void *data, struct seq_file *m)
{
- struct blk_mq_hw_ctx *hctx = m->private;
+ struct blk_mq_hw_ctx *hctx = data;
seq_printf(m, "considered=%lu\n", hctx->poll_considered);
seq_printf(m, "invoked=%lu\n", hctx->poll_invoked);
@@ -539,32 +432,18 @@ static int hctx_io_poll_show(struct seq_file *m, void *v)
return 0;
}
-static int hctx_io_poll_open(struct inode *inode, struct file *file)
-{
- return single_open(file, hctx_io_poll_show, inode->i_private);
-}
-
-static ssize_t hctx_io_poll_write(struct file *file, const char __user *buf,
+static ssize_t hctx_io_poll_write(void *data, const char __user *buf,
size_t count, loff_t *ppos)
{
- struct seq_file *m = file->private_data;
- struct blk_mq_hw_ctx *hctx = m->private;
+ struct blk_mq_hw_ctx *hctx = data;
hctx->poll_considered = hctx->poll_invoked = hctx->poll_success = 0;
return count;
}
-static const struct file_operations hctx_io_poll_fops = {
- .open = hctx_io_poll_open,
- .read = seq_read,
- .write = hctx_io_poll_write,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int hctx_dispatched_show(struct seq_file *m, void *v)
+static int hctx_dispatched_show(void *data, struct seq_file *m)
{
- struct blk_mq_hw_ctx *hctx = m->private;
+ struct blk_mq_hw_ctx *hctx = data;
int i;
seq_printf(m, "%8u\t%lu\n", 0U, hctx->dispatched[0]);
@@ -579,16 +458,10 @@ static int hctx_dispatched_show(struct seq_file *m, void *v)
return 0;
}
-static int hctx_dispatched_open(struct inode *inode, struct file *file)
-{
- return single_open(file, hctx_dispatched_show, inode->i_private);
-}
-
-static ssize_t hctx_dispatched_write(struct file *file, const char __user *buf,
+static ssize_t hctx_dispatched_write(void *data, const char __user *buf,
size_t count, loff_t *ppos)
{
- struct seq_file *m = file->private_data;
- struct blk_mq_hw_ctx *hctx = m->private;
+ struct blk_mq_hw_ctx *hctx = data;
int i;
for (i = 0; i < BLK_MQ_MAX_DISPATCH_ORDER; i++)
@@ -596,96 +469,48 @@ static ssize_t hctx_dispatched_write(struct file *file, const char __user *buf,
return count;
}
-static const struct file_operations hctx_dispatched_fops = {
- .open = hctx_dispatched_open,
- .read = seq_read,
- .write = hctx_dispatched_write,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int hctx_queued_show(struct seq_file *m, void *v)
+static int hctx_queued_show(void *data, struct seq_file *m)
{
- struct blk_mq_hw_ctx *hctx = m->private;
+ struct blk_mq_hw_ctx *hctx = data;
seq_printf(m, "%lu\n", hctx->queued);
return 0;
}
-static int hctx_queued_open(struct inode *inode, struct file *file)
-{
- return single_open(file, hctx_queued_show, inode->i_private);
-}
-
-static ssize_t hctx_queued_write(struct file *file, const char __user *buf,
+static ssize_t hctx_queued_write(void *data, const char __user *buf,
size_t count, loff_t *ppos)
{
- struct seq_file *m = file->private_data;
- struct blk_mq_hw_ctx *hctx = m->private;
+ struct blk_mq_hw_ctx *hctx = data;
hctx->queued = 0;
return count;
}
-static const struct file_operations hctx_queued_fops = {
- .open = hctx_queued_open,
- .read = seq_read,
- .write = hctx_queued_write,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int hctx_run_show(struct seq_file *m, void *v)
+static int hctx_run_show(void *data, struct seq_file *m)
{
- struct blk_mq_hw_ctx *hctx = m->private;
+ struct blk_mq_hw_ctx *hctx = data;
seq_printf(m, "%lu\n", hctx->run);
return 0;
}
-static int hctx_run_open(struct inode *inode, struct file *file)
+static ssize_t hctx_run_write(void *data, const char __user *buf, size_t count,
+ loff_t *ppos)
{
- return single_open(file, hctx_run_show, inode->i_private);
-}
-
-static ssize_t hctx_run_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct seq_file *m = file->private_data;
- struct blk_mq_hw_ctx *hctx = m->private;
+ struct blk_mq_hw_ctx *hctx = data;
hctx->run = 0;
return count;
}
-static const struct file_operations hctx_run_fops = {
- .open = hctx_run_open,
- .read = seq_read,
- .write = hctx_run_write,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int hctx_active_show(struct seq_file *m, void *v)
+static int hctx_active_show(void *data, struct seq_file *m)
{
- struct blk_mq_hw_ctx *hctx = m->private;
+ struct blk_mq_hw_ctx *hctx = data;
seq_printf(m, "%d\n", atomic_read(&hctx->nr_active));
return 0;
}
-static int hctx_active_open(struct inode *inode, struct file *file)
-{
- return single_open(file, hctx_active_show, inode->i_private);
-}
-
-static const struct file_operations hctx_active_fops = {
- .open = hctx_active_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
static void *ctx_rq_list_start(struct seq_file *m, loff_t *pos)
__acquires(&ctx->lock)
{
@@ -716,156 +541,192 @@ static const struct seq_operations ctx_rq_list_seq_ops = {
.stop = ctx_rq_list_stop,
.show = blk_mq_debugfs_rq_show,
};
-
-static int ctx_rq_list_open(struct inode *inode, struct file *file)
+static int ctx_dispatched_show(void *data, struct seq_file *m)
{
- return blk_mq_debugfs_seq_open(inode, file, &ctx_rq_list_seq_ops);
-}
-
-static const struct file_operations ctx_rq_list_fops = {
- .open = ctx_rq_list_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
-
-static int ctx_dispatched_show(struct seq_file *m, void *v)
-{
- struct blk_mq_ctx *ctx = m->private;
+ struct blk_mq_ctx *ctx = data;
seq_printf(m, "%lu %lu\n", ctx->rq_dispatched[1], ctx->rq_dispatched[0]);
return 0;
}
-static int ctx_dispatched_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ctx_dispatched_show, inode->i_private);
-}
-
-static ssize_t ctx_dispatched_write(struct file *file, const char __user *buf,
+static ssize_t ctx_dispatched_write(void *data, const char __user *buf,
size_t count, loff_t *ppos)
{
- struct seq_file *m = file->private_data;
- struct blk_mq_ctx *ctx = m->private;
+ struct blk_mq_ctx *ctx = data;
ctx->rq_dispatched[0] = ctx->rq_dispatched[1] = 0;
return count;
}
-static const struct file_operations ctx_dispatched_fops = {
- .open = ctx_dispatched_open,
- .read = seq_read,
- .write = ctx_dispatched_write,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int ctx_merged_show(struct seq_file *m, void *v)
+static int ctx_merged_show(void *data, struct seq_file *m)
{
- struct blk_mq_ctx *ctx = m->private;
+ struct blk_mq_ctx *ctx = data;
seq_printf(m, "%lu\n", ctx->rq_merged);
return 0;
}
-static int ctx_merged_open(struct inode *inode, struct file *file)
+static ssize_t ctx_merged_write(void *data, const char __user *buf,
+ size_t count, loff_t *ppos)
{
- return single_open(file, ctx_merged_show, inode->i_private);
-}
-
-static ssize_t ctx_merged_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct seq_file *m = file->private_data;
- struct blk_mq_ctx *ctx = m->private;
+ struct blk_mq_ctx *ctx = data;
ctx->rq_merged = 0;
return count;
}
-static const struct file_operations ctx_merged_fops = {
- .open = ctx_merged_open,
- .read = seq_read,
- .write = ctx_merged_write,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int ctx_completed_show(struct seq_file *m, void *v)
+static int ctx_completed_show(void *data, struct seq_file *m)
{
- struct blk_mq_ctx *ctx = m->private;
+ struct blk_mq_ctx *ctx = data;
seq_printf(m, "%lu %lu\n", ctx->rq_completed[1], ctx->rq_completed[0]);
return 0;
}
-static int ctx_completed_open(struct inode *inode, struct file *file)
+static ssize_t ctx_completed_write(void *data, const char __user *buf,
+ size_t count, loff_t *ppos)
{
- return single_open(file, ctx_completed_show, inode->i_private);
+ struct blk_mq_ctx *ctx = data;
+
+ ctx->rq_completed[0] = ctx->rq_completed[1] = 0;
+ return count;
}
-static ssize_t ctx_completed_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
+static int blk_mq_debugfs_show(struct seq_file *m, void *v)
+{
+ const struct blk_mq_debugfs_attr *attr = m->private;
+ void *data = d_inode(m->file->f_path.dentry->d_parent)->i_private;
+
+ return attr->show(data, m);
+}
+
+static ssize_t blk_mq_debugfs_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
{
struct seq_file *m = file->private_data;
- struct blk_mq_ctx *ctx = m->private;
+ const struct blk_mq_debugfs_attr *attr = m->private;
+ void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
- ctx->rq_completed[0] = ctx->rq_completed[1] = 0;
- return count;
+ if (!attr->write)
+ return -EPERM;
+
+ return attr->write(data, buf, count, ppos);
+}
+
+static int blk_mq_debugfs_open(struct inode *inode, struct file *file)
+{
+ const struct blk_mq_debugfs_attr *attr = inode->i_private;
+ void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
+ struct seq_file *m;
+ int ret;
+
+ if (attr->seq_ops) {
+ ret = seq_open(file, attr->seq_ops);
+ if (!ret) {
+ m = file->private_data;
+ m->private = data;
+ }
+ return ret;
+ }
+
+ if (WARN_ON_ONCE(!attr->show))
+ return -EPERM;
+
+ return single_open(file, blk_mq_debugfs_show, inode->i_private);
+}
+
+static int blk_mq_debugfs_release(struct inode *inode, struct file *file)
+{
+ const struct blk_mq_debugfs_attr *attr = inode->i_private;
+
+ if (attr->show)
+ return single_release(inode, file);
+ else
+ return seq_release(inode, file);
}
-static const struct file_operations ctx_completed_fops = {
- .open = ctx_completed_open,
+const struct file_operations blk_mq_debugfs_fops = {
+ .open = blk_mq_debugfs_open,
.read = seq_read,
- .write = ctx_completed_write,
+ .write = blk_mq_debugfs_write,
.llseek = seq_lseek,
- .release = single_release,
+ .release = blk_mq_debugfs_release,
};
static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
- {"poll_stat", 0400, &queue_poll_stat_fops},
- {"state", 0600, &blk_queue_flags_fops},
+ {"poll_stat", 0400, queue_poll_stat_show},
+ {"state", 0600, queue_state_show, queue_state_write},
{},
};
static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = {
- {"state", 0400, &hctx_state_fops},
- {"flags", 0400, &hctx_flags_fops},
- {"dispatch", 0400, &hctx_dispatch_fops},
- {"ctx_map", 0400, &hctx_ctx_map_fops},
- {"tags", 0400, &hctx_tags_fops},
- {"tags_bitmap", 0400, &hctx_tags_bitmap_fops},
- {"sched_tags", 0400, &hctx_sched_tags_fops},
- {"sched_tags_bitmap", 0400, &hctx_sched_tags_bitmap_fops},
- {"io_poll", 0600, &hctx_io_poll_fops},
- {"dispatched", 0600, &hctx_dispatched_fops},
- {"queued", 0600, &hctx_queued_fops},
- {"run", 0600, &hctx_run_fops},
- {"active", 0400, &hctx_active_fops},
+ {"state", 0400, hctx_state_show},
+ {"flags", 0400, hctx_flags_show},
+ {"dispatch", 0400, .seq_ops = &hctx_dispatch_seq_ops},
+ {"ctx_map", 0400, hctx_ctx_map_show},
+ {"tags", 0400, hctx_tags_show},
+ {"tags_bitmap", 0400, hctx_tags_bitmap_show},
+ {"sched_tags", 0400, hctx_sched_tags_show},
+ {"sched_tags_bitmap", 0400, hctx_sched_tags_bitmap_show},
+ {"io_poll", 0600, hctx_io_poll_show, hctx_io_poll_write},
+ {"dispatched", 0600, hctx_dispatched_show, hctx_dispatched_write},
+ {"queued", 0600, hctx_queued_show, hctx_queued_write},
+ {"run", 0600, hctx_run_show, hctx_run_write},
+ {"active", 0400, hctx_active_show},
{},
};
static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = {
- {"rq_list", 0400, &ctx_rq_list_fops},
- {"dispatched", 0600, &ctx_dispatched_fops},
- {"merged", 0600, &ctx_merged_fops},
- {"completed", 0600, &ctx_completed_fops},
+ {"rq_list", 0400, .seq_ops = &ctx_rq_list_seq_ops},
+ {"dispatched", 0600, ctx_dispatched_show, ctx_dispatched_write},
+ {"merged", 0600, ctx_merged_show, ctx_merged_write},
+ {"completed", 0600, ctx_completed_show, ctx_completed_write},
{},
};
+static bool debugfs_create_files(struct dentry *parent, void *data,
+ const struct blk_mq_debugfs_attr *attr)
+{
+ d_inode(parent)->i_private = data;
+
+ for (; attr->name; attr++) {
+ if (!debugfs_create_file(attr->name, attr->mode, parent,
+ (void *)attr, &blk_mq_debugfs_fops))
+ return false;
+ }
+ return true;
+}
+
int blk_mq_debugfs_register(struct request_queue *q)
{
+ struct blk_mq_hw_ctx *hctx;
+ int i;
+
if (!blk_debugfs_root)
return -ENOENT;
q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent),
blk_debugfs_root);
if (!q->debugfs_dir)
- goto err;
+ return -ENOMEM;
- if (blk_mq_debugfs_register_mq(q))
+ if (!debugfs_create_files(q->debugfs_dir, q,
+ blk_mq_debugfs_queue_attrs))
goto err;
+ /*
+ * blk_mq_init_hctx() attempted to do this already, but q->debugfs_dir
+ * didn't exist yet (because we don't know what to name the directory
+ * until the queue is registered to a gendisk).
+ */
+ queue_for_each_hw_ctx(q, hctx, i) {
+ if (!hctx->debugfs_dir && blk_mq_debugfs_register_hctx(q, hctx))
+ goto err;
+ if (q->elevator && !hctx->sched_debugfs_dir &&
+ blk_mq_debugfs_register_sched_hctx(q, hctx))
+ goto err;
+ }
+
return 0;
err:
@@ -876,30 +737,18 @@ err:
void blk_mq_debugfs_unregister(struct request_queue *q)
{
debugfs_remove_recursive(q->debugfs_dir);
- q->mq_debugfs_dir = NULL;
+ q->sched_debugfs_dir = NULL;
q->debugfs_dir = NULL;
}
-static bool debugfs_create_files(struct dentry *parent, void *data,
- const struct blk_mq_debugfs_attr *attr)
-{
- for (; attr->name; attr++) {
- if (!debugfs_create_file(attr->name, attr->mode, parent,
- data, attr->fops))
- return false;
- }
- return true;
-}
-
-static int blk_mq_debugfs_register_ctx(struct request_queue *q,
- struct blk_mq_ctx *ctx,
- struct dentry *hctx_dir)
+static int blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx,
+ struct blk_mq_ctx *ctx)
{
struct dentry *ctx_dir;
char name[20];
snprintf(name, sizeof(name), "cpu%u", ctx->cpu);
- ctx_dir = debugfs_create_dir(name, hctx_dir);
+ ctx_dir = debugfs_create_dir(name, hctx->debugfs_dir);
if (!ctx_dir)
return -ENOMEM;
@@ -909,59 +758,122 @@ static int blk_mq_debugfs_register_ctx(struct request_queue *q,
return 0;
}
-static int blk_mq_debugfs_register_hctx(struct request_queue *q,
- struct blk_mq_hw_ctx *hctx)
+int blk_mq_debugfs_register_hctx(struct request_queue *q,
+ struct blk_mq_hw_ctx *hctx)
{
struct blk_mq_ctx *ctx;
- struct dentry *hctx_dir;
char name[20];
int i;
- snprintf(name, sizeof(name), "%u", hctx->queue_num);
- hctx_dir = debugfs_create_dir(name, q->mq_debugfs_dir);
- if (!hctx_dir)
- return -ENOMEM;
+ if (!q->debugfs_dir)
+ return -ENOENT;
- if (!debugfs_create_files(hctx_dir, hctx, blk_mq_debugfs_hctx_attrs))
+ snprintf(name, sizeof(name), "hctx%u", hctx->queue_num);
+ hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir);
+ if (!hctx->debugfs_dir)
return -ENOMEM;
+ if (!debugfs_create_files(hctx->debugfs_dir, hctx,
+ blk_mq_debugfs_hctx_attrs))
+ goto err;
+
hctx_for_each_ctx(hctx, ctx, i) {
- if (blk_mq_debugfs_register_ctx(q, ctx, hctx_dir))
+ if (blk_mq_debugfs_register_ctx(hctx, ctx))
+ goto err;
+ }
+
+ return 0;
+
+err:
+ blk_mq_debugfs_unregister_hctx(hctx);
+ return -ENOMEM;
+}
+
+void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
+{
+ debugfs_remove_recursive(hctx->debugfs_dir);
+ hctx->sched_debugfs_dir = NULL;
+ hctx->debugfs_dir = NULL;
+}
+
+int blk_mq_debugfs_register_hctxs(struct request_queue *q)
+{
+ struct blk_mq_hw_ctx *hctx;
+ int i;
+
+ queue_for_each_hw_ctx(q, hctx, i) {
+ if (blk_mq_debugfs_register_hctx(q, hctx))
return -ENOMEM;
}
return 0;
}
-int blk_mq_debugfs_register_mq(struct request_queue *q)
+void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
{
struct blk_mq_hw_ctx *hctx;
int i;
+ queue_for_each_hw_ctx(q, hctx, i)
+ blk_mq_debugfs_unregister_hctx(hctx);
+}
+
+int blk_mq_debugfs_register_sched(struct request_queue *q)
+{
+ struct elevator_type *e = q->elevator->type;
+
if (!q->debugfs_dir)
return -ENOENT;
- q->mq_debugfs_dir = debugfs_create_dir("mq", q->debugfs_dir);
- if (!q->mq_debugfs_dir)
- goto err;
+ if (!e->queue_debugfs_attrs)
+ return 0;
- if (!debugfs_create_files(q->mq_debugfs_dir, q, blk_mq_debugfs_queue_attrs))
- goto err;
+ q->sched_debugfs_dir = debugfs_create_dir("sched", q->debugfs_dir);
+ if (!q->sched_debugfs_dir)
+ return -ENOMEM;
- queue_for_each_hw_ctx(q, hctx, i) {
- if (blk_mq_debugfs_register_hctx(q, hctx))
- goto err;
- }
+ if (!debugfs_create_files(q->sched_debugfs_dir, q,
+ e->queue_debugfs_attrs))
+ goto err;
return 0;
err:
- blk_mq_debugfs_unregister_mq(q);
+ blk_mq_debugfs_unregister_sched(q);
return -ENOMEM;
}
-void blk_mq_debugfs_unregister_mq(struct request_queue *q)
+void blk_mq_debugfs_unregister_sched(struct request_queue *q)
+{
+ debugfs_remove_recursive(q->sched_debugfs_dir);
+ q->sched_debugfs_dir = NULL;
+}
+
+int blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
+ struct blk_mq_hw_ctx *hctx)
+{
+ struct elevator_type *e = q->elevator->type;
+
+ if (!hctx->debugfs_dir)
+ return -ENOENT;
+
+ if (!e->hctx_debugfs_attrs)
+ return 0;
+
+ hctx->sched_debugfs_dir = debugfs_create_dir("sched",
+ hctx->debugfs_dir);
+ if (!hctx->sched_debugfs_dir)
+ return -ENOMEM;
+
+ if (!debugfs_create_files(hctx->sched_debugfs_dir, hctx,
+ e->hctx_debugfs_attrs))
+ return -ENOMEM;
+
+ return 0;
+}
+
+void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
{
- debugfs_remove_recursive(q->mq_debugfs_dir);
- q->mq_debugfs_dir = NULL;
+ debugfs_remove_recursive(hctx->sched_debugfs_dir);
+ hctx->sched_debugfs_dir = NULL;
}
diff --git a/block/blk-mq-debugfs.h b/block/blk-mq-debugfs.h
new file mode 100644
index 0000000..a182e6f
--- /dev/null
+++ b/block/blk-mq-debugfs.h
@@ -0,0 +1,82 @@
+#ifndef INT_BLK_MQ_DEBUGFS_H
+#define INT_BLK_MQ_DEBUGFS_H
+
+#ifdef CONFIG_BLK_DEBUG_FS
+
+#include <linux/seq_file.h>
+
+struct blk_mq_debugfs_attr {
+ const char *name;
+ umode_t mode;
+ int (*show)(void *, struct seq_file *);
+ ssize_t (*write)(void *, const char __user *, size_t, loff_t *);
+ /* Set either .show or .seq_ops. */
+ const struct seq_operations *seq_ops;
+};
+
+int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq);
+int blk_mq_debugfs_rq_show(struct seq_file *m, void *v);
+
+int blk_mq_debugfs_register(struct request_queue *q);
+void blk_mq_debugfs_unregister(struct request_queue *q);
+int blk_mq_debugfs_register_hctx(struct request_queue *q,
+ struct blk_mq_hw_ctx *hctx);
+void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx);
+int blk_mq_debugfs_register_hctxs(struct request_queue *q);
+void blk_mq_debugfs_unregister_hctxs(struct request_queue *q);
+
+int blk_mq_debugfs_register_sched(struct request_queue *q);
+void blk_mq_debugfs_unregister_sched(struct request_queue *q);
+int blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
+ struct blk_mq_hw_ctx *hctx);
+void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx);
+#else
+static inline int blk_mq_debugfs_register(struct request_queue *q)
+{
+ return 0;
+}
+
+static inline void blk_mq_debugfs_unregister(struct request_queue *q)
+{
+}
+
+static inline int blk_mq_debugfs_register_hctx(struct request_queue *q,
+ struct blk_mq_hw_ctx *hctx)
+{
+ return 0;
+}
+
+static inline void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
+{
+}
+
+static inline int blk_mq_debugfs_register_hctxs(struct request_queue *q)
+{
+ return 0;
+}
+
+static inline void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
+{
+}
+
+static inline int blk_mq_debugfs_register_sched(struct request_queue *q)
+{
+ return 0;
+}
+
+static inline void blk_mq_debugfs_unregister_sched(struct request_queue *q)
+{
+}
+
+static inline int blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
+ struct blk_mq_hw_ctx *hctx)
+{
+ return 0;
+}
+
+static inline void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
+{
+}
+#endif
+
+#endif
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 8b361e1..1f5b692 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -11,6 +11,7 @@
#include "blk.h"
#include "blk-mq.h"
+#include "blk-mq-debugfs.h"
#include "blk-mq-sched.h"
#include "blk-mq-tag.h"
#include "blk-wbt.h"
@@ -82,11 +83,7 @@ struct request *blk_mq_sched_get_request(struct request_queue *q,
if (likely(!data->hctx))
data->hctx = blk_mq_map_queue(q, data->ctx->cpu);
- /*
- * For a reserved tag, allocate a normal request since we might
- * have driver dependencies on the value of the internal tag.
- */
- if (e && !(data->flags & BLK_MQ_REQ_RESERVED)) {
+ if (e) {
data->flags |= BLK_MQ_REQ_INTERNAL;
/*
@@ -476,6 +473,8 @@ int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
}
}
+ blk_mq_debugfs_register_sched_hctx(q, hctx);
+
return 0;
}
@@ -487,6 +486,8 @@ void blk_mq_sched_exit_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
if (!e)
return;
+ blk_mq_debugfs_unregister_sched_hctx(hctx);
+
if (e->type->ops.mq.exit_hctx && hctx->sched_data) {
e->type->ops.mq.exit_hctx(hctx, hctx_idx);
hctx->sched_data = NULL;
@@ -523,8 +524,10 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
if (ret)
goto err;
- if (e->ops.mq.init_hctx) {
- queue_for_each_hw_ctx(q, hctx, i) {
+ blk_mq_debugfs_register_sched(q);
+
+ queue_for_each_hw_ctx(q, hctx, i) {
+ if (e->ops.mq.init_hctx) {
ret = e->ops.mq.init_hctx(hctx, i);
if (ret) {
eq = q->elevator;
@@ -533,6 +536,7 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
return ret;
}
}
+ blk_mq_debugfs_register_sched_hctx(q, hctx);
}
return 0;
@@ -548,14 +552,14 @@ void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
struct blk_mq_hw_ctx *hctx;
unsigned int i;
- if (e->type->ops.mq.exit_hctx) {
- queue_for_each_hw_ctx(q, hctx, i) {
- if (hctx->sched_data) {
- e->type->ops.mq.exit_hctx(hctx, i);
- hctx->sched_data = NULL;
- }
+ queue_for_each_hw_ctx(q, hctx, i) {
+ blk_mq_debugfs_unregister_sched_hctx(hctx);
+ if (e->type->ops.mq.exit_hctx && hctx->sched_data) {
+ e->type->ops.mq.exit_hctx(hctx, i);
+ hctx->sched_data = NULL;
}
}
+ blk_mq_debugfs_unregister_sched(q);
if (e->type->ops.mq.exit_sched)
e->type->ops.mq.exit_sched(e);
blk_mq_sched_tags_teardown(q);
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index ec0afdf..79969c3 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -258,8 +258,6 @@ static void __blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
queue_for_each_hw_ctx(q, hctx, i)
blk_mq_unregister_hctx(hctx);
- blk_mq_debugfs_unregister_mq(q);
-
kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
kobject_del(&q->mq_kobj);
kobject_put(&dev->kobj);
@@ -318,8 +316,6 @@ int __blk_mq_register_dev(struct device *dev, struct request_queue *q)
kobject_uevent(&q->mq_kobj, KOBJ_ADD);
- blk_mq_debugfs_register(q);
-
queue_for_each_hw_ctx(q, hctx, i) {
ret = blk_mq_register_hctx(hctx);
if (ret)
@@ -335,8 +331,6 @@ unreg:
while (--i >= 0)
blk_mq_unregister_hctx(q->queue_hw_ctx[i]);
- blk_mq_debugfs_unregister_mq(q);
-
kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
kobject_del(&q->mq_kobj);
kobject_put(&dev->kobj);
@@ -364,8 +358,6 @@ void blk_mq_sysfs_unregister(struct request_queue *q)
if (!q->mq_sysfs_init_done)
goto unlock;
- blk_mq_debugfs_unregister_mq(q);
-
queue_for_each_hw_ctx(q, hctx, i)
blk_mq_unregister_hctx(hctx);
@@ -382,8 +374,6 @@ int blk_mq_sysfs_register(struct request_queue *q)
if (!q->mq_sysfs_init_done)
goto unlock;
- blk_mq_debugfs_register_mq(q);
-
queue_for_each_hw_ctx(q, hctx, i) {
ret = blk_mq_register_hctx(hctx);
if (ret)
diff --git a/block/blk-mq.c b/block/blk-mq.c
index bf90684..5d4ce7e 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -31,6 +31,7 @@
#include <linux/blk-mq.h>
#include "blk.h"
#include "blk-mq.h"
+#include "blk-mq-debugfs.h"
#include "blk-mq-tag.h"
#include "blk-stat.h"
#include "blk-wbt.h"
@@ -41,6 +42,7 @@ static LIST_HEAD(all_q_list);
static void blk_mq_poll_stats_start(struct request_queue *q);
static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
+static void __blk_mq_stop_hw_queues(struct request_queue *q, bool sync);
static int blk_mq_poll_stats_bkt(const struct request *rq)
{
@@ -166,7 +168,7 @@ void blk_mq_quiesce_queue(struct request_queue *q)
unsigned int i;
bool rcu = false;
- blk_mq_stop_hw_queues(q);
+ __blk_mq_stop_hw_queues(q, true);
queue_for_each_hw_ctx(q, hctx, i) {
if (hctx->flags & BLK_MQ_F_BLOCKING)
@@ -1218,20 +1220,34 @@ bool blk_mq_queue_stopped(struct request_queue *q)
}
EXPORT_SYMBOL(blk_mq_queue_stopped);
-void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
+static void __blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx, bool sync)
{
- cancel_delayed_work_sync(&hctx->run_work);
+ if (sync)
+ cancel_delayed_work_sync(&hctx->run_work);
+ else
+ cancel_delayed_work(&hctx->run_work);
+
set_bit(BLK_MQ_S_STOPPED, &hctx->state);
}
+
+void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
+{
+ __blk_mq_stop_hw_queue(hctx, false);
+}
EXPORT_SYMBOL(blk_mq_stop_hw_queue);
-void blk_mq_stop_hw_queues(struct request_queue *q)
+void __blk_mq_stop_hw_queues(struct request_queue *q, bool sync)
{
struct blk_mq_hw_ctx *hctx;
int i;
queue_for_each_hw_ctx(q, hctx, i)
- blk_mq_stop_hw_queue(hctx);
+ __blk_mq_stop_hw_queue(hctx, sync);
+}
+
+void blk_mq_stop_hw_queues(struct request_queue *q)
+{
+ __blk_mq_stop_hw_queues(q, false);
}
EXPORT_SYMBOL(blk_mq_stop_hw_queues);
@@ -1655,8 +1671,7 @@ void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
if (!rq)
continue;
- set->ops->exit_request(set->driver_data, rq,
- hctx_idx, i);
+ set->ops->exit_request(set, rq, hctx_idx);
tags->static_rqs[i] = NULL;
}
}
@@ -1787,8 +1802,7 @@ int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
tags->static_rqs[i] = rq;
if (set->ops->init_request) {
- if (set->ops->init_request(set->driver_data,
- rq, hctx_idx, i,
+ if (set->ops->init_request(set, rq, hctx_idx,
node)) {
tags->static_rqs[i] = NULL;
goto fail;
@@ -1849,14 +1863,12 @@ static void blk_mq_exit_hctx(struct request_queue *q,
struct blk_mq_tag_set *set,
struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
{
- unsigned flush_start_tag = set->queue_depth;
+ blk_mq_debugfs_unregister_hctx(hctx);
blk_mq_tag_idle(hctx);
if (set->ops->exit_request)
- set->ops->exit_request(set->driver_data,
- hctx->fq->flush_rq, hctx_idx,
- flush_start_tag + hctx_idx);
+ set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
@@ -1889,7 +1901,6 @@ static int blk_mq_init_hctx(struct request_queue *q,
struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
{
int node;
- unsigned flush_start_tag = set->queue_depth;
node = hctx->numa_node;
if (node == NUMA_NO_NODE)
@@ -1933,14 +1944,15 @@ static int blk_mq_init_hctx(struct request_queue *q,
goto sched_exit_hctx;
if (set->ops->init_request &&
- set->ops->init_request(set->driver_data,
- hctx->fq->flush_rq, hctx_idx,
- flush_start_tag + hctx_idx, node))
+ set->ops->init_request(set, hctx->fq->flush_rq, hctx_idx,
+ node))
goto free_fq;
if (hctx->flags & BLK_MQ_F_BLOCKING)
init_srcu_struct(&hctx->queue_rq_srcu);
+ blk_mq_debugfs_register_hctx(q, hctx);
+
return 0;
free_fq:
@@ -2329,15 +2341,15 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
blk_mq_init_cpu_queues(q, set->nr_hw_queues);
- get_online_cpus();
mutex_lock(&all_q_mutex);
+ get_online_cpus();
list_add_tail(&q->all_q_node, &all_q_list);
blk_mq_add_queue_tag_set(set, q);
blk_mq_map_swqueue(q, cpu_online_mask);
- mutex_unlock(&all_q_mutex);
put_online_cpus();
+ mutex_unlock(&all_q_mutex);
if (!(set->flags & BLK_MQ_F_NO_SCHED)) {
int ret;
@@ -2378,6 +2390,7 @@ static void blk_mq_queue_reinit(struct request_queue *q,
{
WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
+ blk_mq_debugfs_unregister_hctxs(q);
blk_mq_sysfs_unregister(q);
/*
@@ -2389,6 +2402,7 @@ static void blk_mq_queue_reinit(struct request_queue *q,
blk_mq_map_swqueue(q, online_mask);
blk_mq_sysfs_register(q);
+ blk_mq_debugfs_register_hctxs(q);
}
/*
@@ -2617,7 +2631,6 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
return -EINVAL;
blk_mq_freeze_queue(q);
- blk_mq_quiesce_queue(q);
ret = 0;
queue_for_each_hw_ctx(q, hctx, i) {
@@ -2643,7 +2656,6 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
q->nr_requests = nr;
blk_mq_unfreeze_queue(q);
- blk_mq_start_stopped_hw_queues(q, true);
return ret;
}
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 2814a14..cc67b48 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -83,34 +83,6 @@ extern int blk_mq_sysfs_register(struct request_queue *q);
extern void blk_mq_sysfs_unregister(struct request_queue *q);
extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
-/*
- * debugfs helpers
- */
-#ifdef CONFIG_BLK_DEBUG_FS
-int blk_mq_debugfs_register(struct request_queue *q);
-void blk_mq_debugfs_unregister(struct request_queue *q);
-int blk_mq_debugfs_register_mq(struct request_queue *q);
-void blk_mq_debugfs_unregister_mq(struct request_queue *q);
-#else
-static inline int blk_mq_debugfs_register(struct request_queue *q)
-{
- return 0;
-}
-
-static inline void blk_mq_debugfs_unregister(struct request_queue *q)
-{
-}
-
-static inline int blk_mq_debugfs_register_mq(struct request_queue *q)
-{
- return 0;
-}
-
-static inline void blk_mq_debugfs_unregister_mq(struct request_queue *q)
-{
-}
-#endif
-
extern void blk_mq_rq_timed_out(struct request *req, bool reserved);
void blk_mq_release(struct request_queue *q);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 3f37813..504fee9 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -13,6 +13,7 @@
#include "blk.h"
#include "blk-mq.h"
+#include "blk-mq-debugfs.h"
#include "blk-wbt.h"
struct queue_sysfs_entry {
@@ -889,6 +890,8 @@ int blk_register_queue(struct gendisk *disk)
if (q->mq_ops)
__blk_mq_register_dev(dev, q);
+ blk_mq_debugfs_register(q);
+
kobject_uevent(&q->kobj, KOBJ_ADD);
wbt_enable_default(q);
diff --git a/block/elevator.c b/block/elevator.c
index bf11e70..ab726a5 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -950,7 +950,6 @@ static int elevator_switch_mq(struct request_queue *q,
int ret;
blk_mq_freeze_queue(q);
- blk_mq_quiesce_queue(q);
if (q->elevator) {
if (q->elevator->registered)
@@ -978,9 +977,7 @@ static int elevator_switch_mq(struct request_queue *q,
out:
blk_mq_unfreeze_queue(q);
- blk_mq_start_stopped_hw_queues(q, true);
return ret;
-
}
/*
@@ -1088,19 +1085,6 @@ static int __elevator_change(struct request_queue *q, const char *name)
return elevator_switch(q, e);
}
-int elevator_change(struct request_queue *q, const char *name)
-{
- int ret;
-
- /* Protect q->elevator from elevator_init() */
- mutex_lock(&q->sysfs_lock);
- ret = __elevator_change(q, name);
- mutex_unlock(&q->sysfs_lock);
-
- return ret;
-}
-EXPORT_SYMBOL(elevator_change);
-
static inline bool elv_support_iosched(struct request_queue *q)
{
if (q->mq_ops && q->tag_set && (q->tag_set->flags &
diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c
index 3b0090b..b9faabc 100644
--- a/block/kyber-iosched.c
+++ b/block/kyber-iosched.c
@@ -26,6 +26,7 @@
#include "blk.h"
#include "blk-mq.h"
+#include "blk-mq-debugfs.h"
#include "blk-mq-sched.h"
#include "blk-mq-tag.h"
#include "blk-stat.h"
@@ -683,6 +684,131 @@ static struct elv_fs_entry kyber_sched_attrs[] = {
};
#undef KYBER_LAT_ATTR
+#ifdef CONFIG_BLK_DEBUG_FS
+#define KYBER_DEBUGFS_DOMAIN_ATTRS(domain, name) \
+static int kyber_##name##_tokens_show(void *data, struct seq_file *m) \
+{ \
+ struct request_queue *q = data; \
+ struct kyber_queue_data *kqd = q->elevator->elevator_data; \
+ \
+ sbitmap_queue_show(&kqd->domain_tokens[domain], m); \
+ return 0; \
+} \
+ \
+static void *kyber_##name##_rqs_start(struct seq_file *m, loff_t *pos) \
+ __acquires(&khd->lock) \
+{ \
+ struct blk_mq_hw_ctx *hctx = m->private; \
+ struct kyber_hctx_data *khd = hctx->sched_data; \
+ \
+ spin_lock(&khd->lock); \
+ return seq_list_start(&khd->rqs[domain], *pos); \
+} \
+ \
+static void *kyber_##name##_rqs_next(struct seq_file *m, void *v, \
+ loff_t *pos) \
+{ \
+ struct blk_mq_hw_ctx *hctx = m->private; \
+ struct kyber_hctx_data *khd = hctx->sched_data; \
+ \
+ return seq_list_next(v, &khd->rqs[domain], pos); \
+} \
+ \
+static void kyber_##name##_rqs_stop(struct seq_file *m, void *v) \
+ __releases(&khd->lock) \
+{ \
+ struct blk_mq_hw_ctx *hctx = m->private; \
+ struct kyber_hctx_data *khd = hctx->sched_data; \
+ \
+ spin_unlock(&khd->lock); \
+} \
+ \
+static const struct seq_operations kyber_##name##_rqs_seq_ops = { \
+ .start = kyber_##name##_rqs_start, \
+ .next = kyber_##name##_rqs_next, \
+ .stop = kyber_##name##_rqs_stop, \
+ .show = blk_mq_debugfs_rq_show, \
+}; \
+ \
+static int kyber_##name##_waiting_show(void *data, struct seq_file *m) \
+{ \
+ struct blk_mq_hw_ctx *hctx = data; \
+ struct kyber_hctx_data *khd = hctx->sched_data; \
+ wait_queue_t *wait = &khd->domain_wait[domain]; \
+ \
+ seq_printf(m, "%d\n", !list_empty_careful(&wait->task_list)); \
+ return 0; \
+}
+KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_READ, read)
+KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_SYNC_WRITE, sync_write)
+KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_OTHER, other)
+#undef KYBER_DEBUGFS_DOMAIN_ATTRS
+
+static int kyber_async_depth_show(void *data, struct seq_file *m)
+{
+ struct request_queue *q = data;
+ struct kyber_queue_data *kqd = q->elevator->elevator_data;
+
+ seq_printf(m, "%u\n", kqd->async_depth);
+ return 0;
+}
+
+static int kyber_cur_domain_show(void *data, struct seq_file *m)
+{
+ struct blk_mq_hw_ctx *hctx = data;
+ struct kyber_hctx_data *khd = hctx->sched_data;
+
+ switch (khd->cur_domain) {
+ case KYBER_READ:
+ seq_puts(m, "READ\n");
+ break;
+ case KYBER_SYNC_WRITE:
+ seq_puts(m, "SYNC_WRITE\n");
+ break;
+ case KYBER_OTHER:
+ seq_puts(m, "OTHER\n");
+ break;
+ default:
+ seq_printf(m, "%u\n", khd->cur_domain);
+ break;
+ }
+ return 0;
+}
+
+static int kyber_batching_show(void *data, struct seq_file *m)
+{
+ struct blk_mq_hw_ctx *hctx = data;
+ struct kyber_hctx_data *khd = hctx->sched_data;
+
+ seq_printf(m, "%u\n", khd->batching);
+ return 0;
+}
+
+#define KYBER_QUEUE_DOMAIN_ATTRS(name) \
+ {#name "_tokens", 0400, kyber_##name##_tokens_show}
+static const struct blk_mq_debugfs_attr kyber_queue_debugfs_attrs[] = {
+ KYBER_QUEUE_DOMAIN_ATTRS(read),
+ KYBER_QUEUE_DOMAIN_ATTRS(sync_write),
+ KYBER_QUEUE_DOMAIN_ATTRS(other),
+ {"async_depth", 0400, kyber_async_depth_show},
+ {},
+};
+#undef KYBER_QUEUE_DOMAIN_ATTRS
+
+#define KYBER_HCTX_DOMAIN_ATTRS(name) \
+ {#name "_rqs", 0400, .seq_ops = &kyber_##name##_rqs_seq_ops}, \
+ {#name "_waiting", 0400, kyber_##name##_waiting_show}
+static const struct blk_mq_debugfs_attr kyber_hctx_debugfs_attrs[] = {
+ KYBER_HCTX_DOMAIN_ATTRS(read),
+ KYBER_HCTX_DOMAIN_ATTRS(sync_write),
+ KYBER_HCTX_DOMAIN_ATTRS(other),
+ {"cur_domain", 0400, kyber_cur_domain_show},
+ {"batching", 0400, kyber_batching_show},
+ {},
+};
+#undef KYBER_HCTX_DOMAIN_ATTRS
+#endif
+
static struct elevator_type kyber_sched = {
.ops.mq = {
.init_sched = kyber_init_sched,
@@ -696,6 +822,10 @@ static struct elevator_type kyber_sched = {
.has_work = kyber_has_work,
},
.uses_mq = true,
+#ifdef CONFIG_BLK_DEBUG_FS
+ .queue_debugfs_attrs = kyber_queue_debugfs_attrs,
+ .hctx_debugfs_attrs = kyber_hctx_debugfs_attrs,
+#endif
.elevator_attrs = kyber_sched_attrs,
.elevator_name = "kyber",
.elevator_owner = THIS_MODULE,
diff --git a/block/mq-deadline.c b/block/mq-deadline.c
index 23612163..1b964a3 100644
--- a/block/mq-deadline.c
+++ b/block/mq-deadline.c
@@ -19,6 +19,7 @@
#include "blk.h"
#include "blk-mq.h"
+#include "blk-mq-debugfs.h"
#include "blk-mq-tag.h"
#include "blk-mq-sched.h"
@@ -517,6 +518,125 @@ static struct elv_fs_entry deadline_attrs[] = {
__ATTR_NULL
};
+#ifdef CONFIG_BLK_DEBUG_FS
+#define DEADLINE_DEBUGFS_DDIR_ATTRS(ddir, name) \
+static void *deadline_##name##_fifo_start(struct seq_file *m, \
+ loff_t *pos) \
+ __acquires(&dd->lock) \
+{ \
+ struct request_queue *q = m->private; \
+ struct deadline_data *dd = q->elevator->elevator_data; \
+ \
+ spin_lock(&dd->lock); \
+ return seq_list_start(&dd->fifo_list[ddir], *pos); \
+} \
+ \
+static void *deadline_##name##_fifo_next(struct seq_file *m, void *v, \
+ loff_t *pos) \
+{ \
+ struct request_queue *q = m->private; \
+ struct deadline_data *dd = q->elevator->elevator_data; \
+ \
+ return seq_list_next(v, &dd->fifo_list[ddir], pos); \
+} \
+ \
+static void deadline_##name##_fifo_stop(struct seq_file *m, void *v) \
+ __releases(&dd->lock) \
+{ \
+ struct request_queue *q = m->private; \
+ struct deadline_data *dd = q->elevator->elevator_data; \
+ \
+ spin_unlock(&dd->lock); \
+} \
+ \
+static const struct seq_operations deadline_##name##_fifo_seq_ops = { \
+ .start = deadline_##name##_fifo_start, \
+ .next = deadline_##name##_fifo_next, \
+ .stop = deadline_##name##_fifo_stop, \
+ .show = blk_mq_debugfs_rq_show, \
+}; \
+ \
+static int deadline_##name##_next_rq_show(void *data, \
+ struct seq_file *m) \
+{ \
+ struct request_queue *q = data; \
+ struct deadline_data *dd = q->elevator->elevator_data; \
+ struct request *rq = dd->next_rq[ddir]; \
+ \
+ if (rq) \
+ __blk_mq_debugfs_rq_show(m, rq); \
+ return 0; \
+}
+DEADLINE_DEBUGFS_DDIR_ATTRS(READ, read)
+DEADLINE_DEBUGFS_DDIR_ATTRS(WRITE, write)
+#undef DEADLINE_DEBUGFS_DDIR_ATTRS
+
+static int deadline_batching_show(void *data, struct seq_file *m)
+{
+ struct request_queue *q = data;
+ struct deadline_data *dd = q->elevator->elevator_data;
+
+ seq_printf(m, "%u\n", dd->batching);
+ return 0;
+}
+
+static int deadline_starved_show(void *data, struct seq_file *m)
+{
+ struct request_queue *q = data;
+ struct deadline_data *dd = q->elevator->elevator_data;
+
+ seq_printf(m, "%u\n", dd->starved);
+ return 0;
+}
+
+static void *deadline_dispatch_start(struct seq_file *m, loff_t *pos)
+ __acquires(&dd->lock)
+{
+ struct request_queue *q = m->private;
+ struct deadline_data *dd = q->elevator->elevator_data;
+
+ spin_lock(&dd->lock);
+ return seq_list_start(&dd->dispatch, *pos);
+}
+
+static void *deadline_dispatch_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ struct request_queue *q = m->private;
+ struct deadline_data *dd = q->elevator->elevator_data;
+
+ return seq_list_next(v, &dd->dispatch, pos);
+}
+
+static void deadline_dispatch_stop(struct seq_file *m, void *v)
+ __releases(&dd->lock)
+{
+ struct request_queue *q = m->private;
+ struct deadline_data *dd = q->elevator->elevator_data;
+
+ spin_unlock(&dd->lock);
+}
+
+static const struct seq_operations deadline_dispatch_seq_ops = {
+ .start = deadline_dispatch_start,
+ .next = deadline_dispatch_next,
+ .stop = deadline_dispatch_stop,
+ .show = blk_mq_debugfs_rq_show,
+};
+
+#define DEADLINE_QUEUE_DDIR_ATTRS(name) \
+ {#name "_fifo_list", 0400, .seq_ops = &deadline_##name##_fifo_seq_ops}, \
+ {#name "_next_rq", 0400, deadline_##name##_next_rq_show}
+static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = {
+ DEADLINE_QUEUE_DDIR_ATTRS(read),
+ DEADLINE_QUEUE_DDIR_ATTRS(write),
+ {"batching", 0400, deadline_batching_show},
+ {"starved", 0400, deadline_starved_show},
+ {"dispatch", 0400, .seq_ops = &deadline_dispatch_seq_ops},
+ {},
+};
+#undef DEADLINE_QUEUE_DDIR_ATTRS
+#endif
+
static struct elevator_type mq_deadline = {
.ops.mq = {
.insert_requests = dd_insert_requests,
@@ -533,6 +653,9 @@ static struct elevator_type mq_deadline = {
},
.uses_mq = true,
+#ifdef CONFIG_BLK_DEBUG_FS
+ .queue_debugfs_attrs = deadline_queue_debugfs_attrs,
+#endif
.elevator_attrs = deadline_attrs,
.elevator_name = "mq-deadline",
.elevator_owner = THIS_MODULE,
OpenPOWER on IntegriCloud