summaryrefslogtreecommitdiffstats
path: root/drivers/nvme
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/nvme')
-rw-r--r--drivers/nvme/host/pci.c22
-rw-r--r--drivers/nvme/host/rdma.c42
-rw-r--r--drivers/nvme/target/rdma.c5
3 files changed, 39 insertions, 30 deletions
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 11874af..198245f 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -109,6 +109,7 @@ struct nvme_dev {
/* host memory buffer support: */
u64 host_mem_size;
u32 nr_host_mem_descs;
+ dma_addr_t host_mem_descs_dma;
struct nvme_host_mem_buf_desc *host_mem_descs;
void **host_mem_desc_bufs;
};
@@ -1568,16 +1569,10 @@ static inline void nvme_release_cmb(struct nvme_dev *dev)
static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits)
{
- size_t len = dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs);
+ u64 dma_addr = dev->host_mem_descs_dma;
struct nvme_command c;
- u64 dma_addr;
int ret;
- dma_addr = dma_map_single(dev->dev, dev->host_mem_descs, len,
- DMA_TO_DEVICE);
- if (dma_mapping_error(dev->dev, dma_addr))
- return -ENOMEM;
-
memset(&c, 0, sizeof(c));
c.features.opcode = nvme_admin_set_features;
c.features.fid = cpu_to_le32(NVME_FEAT_HOST_MEM_BUF);
@@ -1594,7 +1589,6 @@ static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits)
"failed to set host mem (err %d, flags %#x).\n",
ret, bits);
}
- dma_unmap_single(dev->dev, dma_addr, len, DMA_TO_DEVICE);
return ret;
}
@@ -1612,7 +1606,9 @@ static void nvme_free_host_mem(struct nvme_dev *dev)
kfree(dev->host_mem_desc_bufs);
dev->host_mem_desc_bufs = NULL;
- kfree(dev->host_mem_descs);
+ dma_free_coherent(dev->dev,
+ dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs),
+ dev->host_mem_descs, dev->host_mem_descs_dma);
dev->host_mem_descs = NULL;
}
@@ -1620,6 +1616,7 @@ static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
{
struct nvme_host_mem_buf_desc *descs;
u32 chunk_size, max_entries, len;
+ dma_addr_t descs_dma;
int i = 0;
void **bufs;
u64 size = 0, tmp;
@@ -1630,7 +1627,8 @@ retry:
tmp = (preferred + chunk_size - 1);
do_div(tmp, chunk_size);
max_entries = tmp;
- descs = kcalloc(max_entries, sizeof(*descs), GFP_KERNEL);
+ descs = dma_zalloc_coherent(dev->dev, max_entries * sizeof(*descs),
+ &descs_dma, GFP_KERNEL);
if (!descs)
goto out;
@@ -1664,6 +1662,7 @@ retry:
dev->nr_host_mem_descs = i;
dev->host_mem_size = size;
dev->host_mem_descs = descs;
+ dev->host_mem_descs_dma = descs_dma;
dev->host_mem_desc_bufs = bufs;
return 0;
@@ -1677,7 +1676,8 @@ out_free_bufs:
kfree(bufs);
out_free_descs:
- kfree(descs);
+ dma_free_coherent(dev->dev, max_entries * sizeof(*descs), descs,
+ descs_dma);
out:
/* try a smaller chunk size if we failed early */
if (chunk_size >= PAGE_SIZE * 2 && (i == 0 || size < min)) {
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 6a76826..5898300 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -19,6 +19,7 @@
#include <linux/string.h>
#include <linux/atomic.h>
#include <linux/blk-mq.h>
+#include <linux/blk-mq-rdma.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/mutex.h>
@@ -464,14 +465,10 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
ibdev = queue->device->dev;
/*
- * The admin queue is barely used once the controller is live, so don't
- * bother to spread it out.
+ * Spread I/O queues completion vectors according their queue index.
+ * Admin queues can always go on completion vector 0.
*/
- if (idx == 0)
- comp_vector = 0;
- else
- comp_vector = idx % ibdev->num_comp_vectors;
-
+ comp_vector = idx == 0 ? idx : idx - 1;
/* +1 for ib_stop_cq */
queue->ib_cq = ib_alloc_cq(ibdev, queue,
@@ -632,10 +629,20 @@ out_stop_queues:
static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl)
{
struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
+ struct ib_device *ibdev = ctrl->device->dev;
unsigned int nr_io_queues;
int i, ret;
nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
+
+ /*
+ * we map queues according to the device irq vectors for
+ * optimal locality so we don't need more queues than
+ * completion vectors.
+ */
+ nr_io_queues = min_t(unsigned int, nr_io_queues,
+ ibdev->num_comp_vectors);
+
ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
if (ret)
return ret;
@@ -793,7 +800,7 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
goto out_cleanup_queue;
ctrl->ctrl.max_hw_sectors =
- (ctrl->max_fr_pages - 1) << (PAGE_SHIFT - 9);
+ (ctrl->max_fr_pages - 1) << (ilog2(SZ_4K) - 9);
error = nvme_init_identify(&ctrl->ctrl);
if (error)
@@ -1111,7 +1118,11 @@ static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue,
struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
int nr;
- nr = ib_map_mr_sg(req->mr, req->sg_table.sgl, count, NULL, PAGE_SIZE);
+ /*
+ * Align the MR to a 4K page size to match the ctrl page size and
+ * the block virtual boundary.
+ */
+ nr = ib_map_mr_sg(req->mr, req->sg_table.sgl, count, NULL, SZ_4K);
if (unlikely(nr < count)) {
if (nr < 0)
return nr;
@@ -1689,6 +1700,13 @@ static void nvme_rdma_complete_rq(struct request *rq)
nvme_complete_rq(rq);
}
+static int nvme_rdma_map_queues(struct blk_mq_tag_set *set)
+{
+ struct nvme_rdma_ctrl *ctrl = set->driver_data;
+
+ return blk_mq_rdma_map_queues(set, ctrl->device->dev, 0);
+}
+
static const struct blk_mq_ops nvme_rdma_mq_ops = {
.queue_rq = nvme_rdma_queue_rq,
.complete = nvme_rdma_complete_rq,
@@ -1697,6 +1715,7 @@ static const struct blk_mq_ops nvme_rdma_mq_ops = {
.init_hctx = nvme_rdma_init_hctx,
.poll = nvme_rdma_poll,
.timeout = nvme_rdma_timeout,
+ .map_queues = nvme_rdma_map_queues,
};
static const struct blk_mq_ops nvme_rdma_admin_mq_ops = {
@@ -1959,10 +1978,6 @@ static struct nvmf_transport_ops nvme_rdma_transport = {
.create_ctrl = nvme_rdma_create_ctrl,
};
-static void nvme_rdma_add_one(struct ib_device *ib_device)
-{
-}
-
static void nvme_rdma_remove_one(struct ib_device *ib_device, void *client_data)
{
struct nvme_rdma_ctrl *ctrl;
@@ -1984,7 +1999,6 @@ static void nvme_rdma_remove_one(struct ib_device *ib_device, void *client_data)
static struct ib_client nvme_rdma_ib_client = {
.name = "nvme_rdma",
- .add = nvme_rdma_add_one,
.remove = nvme_rdma_remove_one
};
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 56a4cba..76d2bb7 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -1510,10 +1510,6 @@ static struct nvmet_fabrics_ops nvmet_rdma_ops = {
.delete_ctrl = nvmet_rdma_delete_ctrl,
};
-static void nvmet_rdma_add_one(struct ib_device *ib_device)
-{
-}
-
static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data)
{
struct nvmet_rdma_queue *queue;
@@ -1534,7 +1530,6 @@ static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data
static struct ib_client nvmet_rdma_ib_client = {
.name = "nvmet_rdma",
- .add = nvmet_rdma_add_one,
.remove = nvmet_rdma_remove_one
};
OpenPOWER on IntegriCloud