diff options
author | Sagi Grimberg <sagig@mellanox.com> | 2014-12-02 16:57:41 +0200 |
---|---|---|
committer | Nicholas Bellinger <nab@linux-iscsi.org> | 2014-12-12 23:32:30 -0800 |
commit | 6f0fae3d7797172f5c30ada80e815122fdf55609 (patch) | |
tree | 6fb3625df1c2a678bcfd300b442d0b1d05e9ec39 /drivers/infiniband | |
parent | 4a295bae7e72d870354d337fe42360b0e4441e57 (diff) | |
download | op-kernel-dev-6f0fae3d7797172f5c30ada80e815122fdf55609.zip op-kernel-dev-6f0fae3d7797172f5c30ada80e815122fdf55609.tar.gz |
iser-target: Use single CQ for TX and RX
Using TX and RX CQs attached to the same vector might
create a throttling effect coming from the serial processing
of a work-queue. Use one CQ instead, it will do better in interrupt
processing and it provides a simpler code. Also, We get rid of
redundant isert_rx_wq.
Next we can remove the atomic post_send_buf_count from the IO path.
Signed-off-by: Sagi Grimberg <sagig@mellanox.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/ulp/isert/ib_isert.c | 193 | ||||
-rw-r--r-- | drivers/infiniband/ulp/isert/ib_isert.h | 14 |
2 files changed, 83 insertions, 124 deletions
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index d4a2083..0dc6287 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -35,10 +35,10 @@ #define ISERT_MAX_CONN 8 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN) #define ISER_MAX_TX_CQ_LEN (ISERT_QP_MAX_REQ_DTOS * ISERT_MAX_CONN) +#define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN) static DEFINE_MUTEX(device_list_mutex); static LIST_HEAD(device_list); -static struct workqueue_struct *isert_rx_wq; static struct workqueue_struct *isert_comp_wq; static struct workqueue_struct *isert_release_wq; @@ -124,8 +124,8 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id) memset(&attr, 0, sizeof(struct ib_qp_init_attr)); attr.event_handler = isert_qp_event_callback; attr.qp_context = isert_conn; - attr.send_cq = comp->tx_cq; - attr.recv_cq = comp->rx_cq; + attr.send_cq = comp->cq; + attr.recv_cq = comp->cq; attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS; attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS; /* @@ -237,10 +237,8 @@ isert_free_rx_descriptors(struct isert_conn *isert_conn) isert_conn->conn_rx_descs = NULL; } -static void isert_cq_tx_work(struct work_struct *); -static void isert_cq_tx_callback(struct ib_cq *, void *); -static void isert_cq_rx_work(struct work_struct *); -static void isert_cq_rx_callback(struct ib_cq *, void *); +static void isert_cq_work(struct work_struct *); +static void isert_cq_callback(struct ib_cq *, void *); static int isert_create_device_ib_res(struct isert_device *device) @@ -248,15 +246,14 @@ isert_create_device_ib_res(struct isert_device *device) struct ib_device *ib_dev = device->ib_device; struct ib_device_attr *dev_attr; int ret = 0, i; - int max_rx_cqe, max_tx_cqe; + int max_cqe; dev_attr = &device->dev_attr; ret = isert_query_device(ib_dev, dev_attr); if (ret) return ret; - max_rx_cqe = min(ISER_MAX_RX_CQ_LEN, dev_attr->max_cqe); - max_tx_cqe = min(ISER_MAX_TX_CQ_LEN, dev_attr->max_cqe); + max_cqe = min(ISER_MAX_CQ_LEN, dev_attr->max_cqe); /* asign function handlers */ if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS && @@ -293,35 +290,19 @@ isert_create_device_ib_res(struct isert_device *device) struct isert_comp *comp = &device->comps[i]; comp->device = device; - INIT_WORK(&comp->rx_work, isert_cq_rx_work); - comp->rx_cq = ib_create_cq(device->ib_device, - isert_cq_rx_callback, - isert_cq_event_callback, - (void *)comp, - max_rx_cqe, i); - if (IS_ERR(comp->rx_cq)) { - ret = PTR_ERR(comp->rx_cq); - comp->rx_cq = NULL; + INIT_WORK(&comp->work, isert_cq_work); + comp->cq = ib_create_cq(device->ib_device, + isert_cq_callback, + isert_cq_event_callback, + (void *)comp, + max_cqe, i); + if (IS_ERR(comp->cq)) { + ret = PTR_ERR(comp->cq); + comp->cq = NULL; goto out_cq; } - INIT_WORK(&comp->tx_work, isert_cq_tx_work); - comp->tx_cq = ib_create_cq(device->ib_device, - isert_cq_tx_callback, - isert_cq_event_callback, - (void *)comp, - max_tx_cqe, i); - if (IS_ERR(comp->tx_cq)) { - ret = PTR_ERR(comp->tx_cq); - comp->tx_cq = NULL; - goto out_cq; - } - - ret = ib_req_notify_cq(comp->rx_cq, IB_CQ_NEXT_COMP); - if (ret) - goto out_cq; - - ret = ib_req_notify_cq(comp->tx_cq, IB_CQ_NEXT_COMP); + ret = ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP); if (ret) goto out_cq; } @@ -332,13 +313,9 @@ out_cq: for (i = 0; i < device->comps_used; i++) { struct isert_comp *comp = &device->comps[i]; - if (comp->rx_cq) { - cancel_work_sync(&comp->rx_work); - ib_destroy_cq(comp->rx_cq); - } - if (comp->tx_cq) { - cancel_work_sync(&comp->tx_work); - ib_destroy_cq(comp->tx_cq); + if (comp->cq) { + cancel_work_sync(&comp->work); + ib_destroy_cq(comp->cq); } } kfree(device->comps); @@ -356,12 +333,9 @@ isert_free_device_ib_res(struct isert_device *device) for (i = 0; i < device->comps_used; i++) { struct isert_comp *comp = &device->comps[i]; - cancel_work_sync(&comp->rx_work); - cancel_work_sync(&comp->tx_work); - ib_destroy_cq(comp->rx_cq); - ib_destroy_cq(comp->tx_cq); - comp->rx_cq = NULL; - comp->tx_cq = NULL; + cancel_work_sync(&comp->work); + ib_destroy_cq(comp->cq); + comp->cq = NULL; } kfree(device->comps); } @@ -2013,14 +1987,39 @@ isert_send_completion(struct iser_tx_desc *tx_desc, } } +/** + * is_isert_tx_desc() - Indicate if the completion wr_id + * is a TX descriptor or not. + * @isert_conn: iser connection + * @wr_id: completion WR identifier + * + * Since we cannot rely on wc opcode in FLUSH errors + * we must work around it by checking if the wr_id address + * falls in the iser connection rx_descs buffer. If so + * it is an RX descriptor, otherwize it is a TX. + */ +static inline bool +is_isert_tx_desc(struct isert_conn *isert_conn, void *wr_id) +{ + void *start = isert_conn->conn_rx_descs; + int len = ISERT_QP_MAX_RECV_DTOS * sizeof(*isert_conn->conn_rx_descs); + + if (wr_id >= start && wr_id < start + len) + return false; + + return true; +} + static void -isert_cq_comp_err(void *desc, struct isert_conn *isert_conn, bool tx) +isert_cq_comp_err(struct isert_conn *isert_conn, struct ib_wc *wc) { - if (tx) { + if (is_isert_tx_desc(isert_conn, (void *)wc->wr_id)) { struct ib_device *ib_dev = isert_conn->conn_cm_id->device; struct isert_cmd *isert_cmd; + struct iser_tx_desc *desc; - isert_cmd = ((struct iser_tx_desc *)desc)->isert_cmd; + desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id; + isert_cmd = desc->isert_cmd; if (!isert_cmd) isert_unmap_tx_desc(desc, ib_dev); else @@ -2049,80 +2048,52 @@ isert_cq_comp_err(void *desc, struct isert_conn *isert_conn, bool tx) } static void -isert_cq_tx_work(struct work_struct *work) +isert_handle_wc(struct ib_wc *wc) { - struct isert_comp *comp = container_of(work, struct isert_comp, - tx_work); - struct ib_cq *cq = comp->tx_cq; struct isert_conn *isert_conn; struct iser_tx_desc *tx_desc; - struct ib_wc wc; - - while (ib_poll_cq(cq, 1, &wc) == 1) { - isert_conn = wc.qp->qp_context; - tx_desc = (struct iser_tx_desc *)(uintptr_t)wc.wr_id; + struct iser_rx_desc *rx_desc; - if (wc.status == IB_WC_SUCCESS) { - isert_send_completion(tx_desc, isert_conn); + isert_conn = wc->qp->qp_context; + if (likely(wc->status == IB_WC_SUCCESS)) { + if (wc->opcode == IB_WC_RECV) { + rx_desc = (struct iser_rx_desc *)(uintptr_t)wc->wr_id; + isert_rx_completion(rx_desc, isert_conn, wc->byte_len); } else { - pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n"); - pr_debug("TX wc.status: 0x%08x\n", wc.status); - pr_debug("TX wc.vendor_err: 0x%08x\n", wc.vendor_err); - - if (wc.wr_id != ISER_FASTREG_LI_WRID) - isert_cq_comp_err(tx_desc, isert_conn, true); + tx_desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id; + isert_send_completion(tx_desc, isert_conn); } - } - - ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); -} - -static void -isert_cq_tx_callback(struct ib_cq *cq, void *context) -{ - struct isert_comp *comp = context; + } else { + if (wc->status != IB_WC_WR_FLUSH_ERR) + pr_err("wr id %llx status %d vend_err %x\n", + wc->wr_id, wc->status, wc->vendor_err); + else + pr_debug("flush error: wr id %llx\n", wc->wr_id); - queue_work(isert_comp_wq, &comp->tx_work); + if (wc->wr_id != ISER_FASTREG_LI_WRID) + isert_cq_comp_err(isert_conn, wc); + } } static void -isert_cq_rx_work(struct work_struct *work) +isert_cq_work(struct work_struct *work) { struct isert_comp *comp = container_of(work, struct isert_comp, - rx_work); - struct ib_cq *cq = comp->rx_cq; - struct isert_conn *isert_conn; - struct iser_rx_desc *rx_desc; + work); struct ib_wc wc; - u32 xfer_len; - - while (ib_poll_cq(cq, 1, &wc) == 1) { - isert_conn = wc.qp->qp_context; - rx_desc = (struct iser_rx_desc *)(uintptr_t)wc.wr_id; - if (wc.status == IB_WC_SUCCESS) { - xfer_len = wc.byte_len; - isert_rx_completion(rx_desc, isert_conn, xfer_len); - } else { - pr_debug("RX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n"); - if (wc.status != IB_WC_WR_FLUSH_ERR) { - pr_debug("RX wc.status: 0x%08x\n", wc.status); - pr_debug("RX wc.vendor_err: 0x%08x\n", - wc.vendor_err); - } - isert_cq_comp_err(rx_desc, isert_conn, false); - } - } + while (ib_poll_cq(comp->cq, 1, &wc) == 1) + isert_handle_wc(&wc); - ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); + ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP); } static void -isert_cq_rx_callback(struct ib_cq *cq, void *context) +isert_cq_callback(struct ib_cq *cq, void *context) { struct isert_comp *comp = context; - queue_work(isert_rx_wq, &comp->rx_work); + queue_work(isert_comp_wq, &comp->work); } static int @@ -3363,17 +3334,11 @@ static int __init isert_init(void) { int ret; - isert_rx_wq = alloc_workqueue("isert_rx_wq", 0, 0); - if (!isert_rx_wq) { - pr_err("Unable to allocate isert_rx_wq\n"); - return -ENOMEM; - } - isert_comp_wq = alloc_workqueue("isert_comp_wq", 0, 0); if (!isert_comp_wq) { pr_err("Unable to allocate isert_comp_wq\n"); ret = -ENOMEM; - goto destroy_rx_wq; + return -ENOMEM; } isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND, @@ -3391,8 +3356,7 @@ static int __init isert_init(void) destroy_comp_wq: destroy_workqueue(isert_comp_wq); -destroy_rx_wq: - destroy_workqueue(isert_rx_wq); + return ret; } @@ -3401,7 +3365,6 @@ static void __exit isert_exit(void) flush_scheduled_work(); destroy_workqueue(isert_release_wq); destroy_workqueue(isert_comp_wq); - destroy_workqueue(isert_rx_wq); iscsit_unregister_transport(&iser_target_transport); pr_debug("iSER_TARGET[0] - Released iser_target_transport\n"); } diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h index 3f93cb0..5c1a31e 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.h +++ b/drivers/infiniband/ulp/isert/ib_isert.h @@ -163,20 +163,16 @@ struct isert_conn { * struct isert_comp - iSER completion context * * @device: pointer to device handle - * @rx_cq: RX completion queue - * @tx_cq: TX completion queue + * @cq: completion queue * @active_qps: Number of active QPs attached * to completion context - * @rx_work: RX work handle - * @tx_work: TX work handle + * @work: completion work handle */ struct isert_comp { - struct isert_device *device; - struct ib_cq *rx_cq; - struct ib_cq *tx_cq; + struct isert_device *device; + struct ib_cq *cq; int active_qps; - struct work_struct rx_work; - struct work_struct tx_work; + struct work_struct work; }; struct isert_device { |