summaryrefslogtreecommitdiffstats
path: root/drivers/nvme
diff options
context:
space:
mode:
authorJay Freyensee <james_p_freyensee@linux.intel.com>2016-08-17 15:00:27 -0700
committerSagi Grimberg <sagi@grimberg.me>2016-08-18 09:58:06 +0300
commitc5af8654c422cfdd8480be3a244748e18cace6c5 (patch)
treed75ba0cd1e6a02851b2592758e90986a5fc09f94 /drivers/nvme
parentf994d9dc28bc27353acde2caaf718222d92a3e24 (diff)
downloadop-kernel-dev-c5af8654c422cfdd8480be3a244748e18cace6c5.zip
op-kernel-dev-c5af8654c422cfdd8480be3a244748e18cace6c5.tar.gz
nvme-rdma: fix sqsize/hsqsize per spec
Per NVMe-over-Fabrics 1.0 spec, sqsize is represented as a 0-based value. Also per spec, the RDMA binding values shall be set to sqsize, which makes hsqsize 0-based values. Thus, the sqsize during NVMf connect() is now: [root@fedora23-fabrics-host1 for-48]# dmesg [ 318.720645] nvme_fabrics: nvmf_connect_admin_queue(): sqsize for admin queue: 31 [ 318.720884] nvme nvme0: creating 16 I/O queues. [ 318.810114] nvme_fabrics: nvmf_connect_io_queue(): sqsize for i/o queue: 127 Finally, current interpretation implies hrqsize is 1's based so set it appropriately. Reported-by: Daniel Verkamp <daniel.verkamp@intel.com> Signed-off-by: Jay Freyensee <james_p_freyensee@linux.intel.com> Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
Diffstat (limited to 'drivers/nvme')
-rw-r--r--drivers/nvme/host/rdma.c14
1 files changed, 10 insertions, 4 deletions
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index d44809e..c133256 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -645,7 +645,8 @@ static int nvme_rdma_init_io_queues(struct nvme_rdma_ctrl *ctrl)
int i, ret;
for (i = 1; i < ctrl->queue_count; i++) {
- ret = nvme_rdma_init_queue(ctrl, i, ctrl->ctrl.sqsize);
+ ret = nvme_rdma_init_queue(ctrl, i,
+ ctrl->ctrl.opts->queue_size);
if (ret) {
dev_info(ctrl->ctrl.device,
"failed to initialize i/o queue: %d\n", ret);
@@ -1286,8 +1287,13 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue)
priv.hrqsize = cpu_to_le16(NVMF_AQ_DEPTH);
priv.hsqsize = cpu_to_le16(NVMF_AQ_DEPTH - 1);
} else {
+ /*
+ * current interpretation of the fabrics spec
+ * is at minimum you make hrqsize sqsize+1, or a
+ * 1's based representation of sqsize.
+ */
priv.hrqsize = cpu_to_le16(queue->queue_size);
- priv.hsqsize = cpu_to_le16(queue->queue_size);
+ priv.hsqsize = cpu_to_le16(queue->ctrl->ctrl.sqsize);
}
ret = rdma_connect(queue->cm_id, &param);
@@ -1825,7 +1831,7 @@ static int nvme_rdma_create_io_queues(struct nvme_rdma_ctrl *ctrl)
memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
ctrl->tag_set.ops = &nvme_rdma_mq_ops;
- ctrl->tag_set.queue_depth = ctrl->ctrl.sqsize;
+ ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
ctrl->tag_set.reserved_tags = 1; /* fabric connect */
ctrl->tag_set.numa_node = NUMA_NO_NODE;
ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
@@ -1923,7 +1929,7 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
spin_lock_init(&ctrl->lock);
ctrl->queue_count = opts->nr_io_queues + 1; /* +1 for admin queue */
- ctrl->ctrl.sqsize = opts->queue_size;
+ ctrl->ctrl.sqsize = opts->queue_size - 1;
ctrl->ctrl.kato = opts->kato;
ret = -ENOMEM;
OpenPOWER on IntegriCloud