summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c356
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h44
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c209
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c77
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c28
5 files changed, 380 insertions, 334 deletions
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 356fac6..5a1cf25 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -71,6 +71,10 @@
#include "iscsi_iser.h"
+static struct scsi_host_template iscsi_iser_sht;
+static struct iscsi_transport iscsi_iser_transport;
+static struct scsi_transport_template *iscsi_iser_scsi_transport;
+
static unsigned int iscsi_max_lun = 512;
module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
@@ -91,7 +95,6 @@ iscsi_iser_recv(struct iscsi_conn *conn,
struct iscsi_hdr *hdr, char *rx_data, int rx_data_len)
{
int rc = 0;
- uint32_t ret_itt;
int datalen;
int ahslen;
@@ -107,12 +110,7 @@ iscsi_iser_recv(struct iscsi_conn *conn,
/* read AHS */
ahslen = hdr->hlength * 4;
- /* verify itt (itt encoding: age+cid+itt) */
- rc = iscsi_verify_itt(conn, hdr, &ret_itt);
-
- if (!rc)
- rc = iscsi_complete_pdu(conn, hdr, rx_data, rx_data_len);
-
+ rc = iscsi_complete_pdu(conn, hdr, rx_data, rx_data_len);
if (rc && rc != ISCSI_ERR_NO_SCSI_CMD)
goto error;
@@ -123,25 +121,33 @@ error:
/**
- * iscsi_iser_cmd_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
+ * iscsi_iser_task_init - Initialize task
+ * @task: iscsi task
*
- **/
+ * Initialize the task for the scsi command or mgmt command.
+ */
static int
-iscsi_iser_cmd_init(struct iscsi_cmd_task *ctask)
+iscsi_iser_task_init(struct iscsi_task *task)
{
- struct iscsi_iser_conn *iser_conn = ctask->conn->dd_data;
- struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ struct iscsi_iser_conn *iser_conn = task->conn->dd_data;
+ struct iscsi_iser_task *iser_task = task->dd_data;
+
+ /* mgmt task */
+ if (!task->sc) {
+ iser_task->desc.data = task->data;
+ return 0;
+ }
- iser_ctask->command_sent = 0;
- iser_ctask->iser_conn = iser_conn;
- iser_ctask_rdma_init(iser_ctask);
+ iser_task->command_sent = 0;
+ iser_task->iser_conn = iser_conn;
+ iser_task_rdma_init(iser_task);
return 0;
}
/**
- * iscsi_mtask_xmit - xmit management(immediate) task
+ * iscsi_iser_mtask_xmit - xmit management(immediate) task
* @conn: iscsi connection
- * @mtask: task management task
+ * @task: task management task
*
* Notes:
* The function can return -EAGAIN in which case caller must
@@ -150,20 +156,19 @@ iscsi_iser_cmd_init(struct iscsi_cmd_task *ctask)
*
**/
static int
-iscsi_iser_mtask_xmit(struct iscsi_conn *conn,
- struct iscsi_mgmt_task *mtask)
+iscsi_iser_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
{
int error = 0;
- debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id, mtask->itt);
+ debug_scsi("task deq [cid %d itt 0x%x]\n", conn->id, task->itt);
- error = iser_send_control(conn, mtask);
+ error = iser_send_control(conn, task);
- /* since iser xmits control with zero copy, mtasks can not be recycled
+ /* since iser xmits control with zero copy, tasks can not be recycled
* right after sending them.
* The recycling scheme is based on whether a response is expected
- * - if yes, the mtask is recycled at iscsi_complete_pdu
- * - if no, the mtask is recycled at iser_snd_completion
+ * - if yes, the task is recycled at iscsi_complete_pdu
+ * - if no, the task is recycled at iser_snd_completion
*/
if (error && error != -ENOBUFS)
iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
@@ -172,97 +177,86 @@ iscsi_iser_mtask_xmit(struct iscsi_conn *conn,
}
static int
-iscsi_iser_ctask_xmit_unsol_data(struct iscsi_conn *conn,
- struct iscsi_cmd_task *ctask)
+iscsi_iser_task_xmit_unsol_data(struct iscsi_conn *conn,
+ struct iscsi_task *task)
{
struct iscsi_data hdr;
int error = 0;
/* Send data-out PDUs while there's still unsolicited data to send */
- while (ctask->unsol_count > 0) {
- iscsi_prep_unsolicit_data_pdu(ctask, &hdr);
+ while (task->unsol_count > 0) {
+ iscsi_prep_unsolicit_data_pdu(task, &hdr);
debug_scsi("Sending data-out: itt 0x%x, data count %d\n",
- hdr.itt, ctask->data_count);
+ hdr.itt, task->data_count);
/* the buffer description has been passed with the command */
/* Send the command */
- error = iser_send_data_out(conn, ctask, &hdr);
+ error = iser_send_data_out(conn, task, &hdr);
if (error) {
- ctask->unsol_datasn--;
- goto iscsi_iser_ctask_xmit_unsol_data_exit;
+ task->unsol_datasn--;
+ goto iscsi_iser_task_xmit_unsol_data_exit;
}
- ctask->unsol_count -= ctask->data_count;
+ task->unsol_count -= task->data_count;
debug_scsi("Need to send %d more as data-out PDUs\n",
- ctask->unsol_count);
+ task->unsol_count);
}
-iscsi_iser_ctask_xmit_unsol_data_exit:
+iscsi_iser_task_xmit_unsol_data_exit:
return error;
}
static int
-iscsi_iser_ctask_xmit(struct iscsi_conn *conn,
- struct iscsi_cmd_task *ctask)
+iscsi_iser_task_xmit(struct iscsi_task *task)
{
- struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ struct iscsi_conn *conn = task->conn;
+ struct iscsi_iser_task *iser_task = task->dd_data;
int error = 0;
- if (ctask->sc->sc_data_direction == DMA_TO_DEVICE) {
- BUG_ON(scsi_bufflen(ctask->sc) == 0);
+ if (!task->sc)
+ return iscsi_iser_mtask_xmit(conn, task);
+
+ if (task->sc->sc_data_direction == DMA_TO_DEVICE) {
+ BUG_ON(scsi_bufflen(task->sc) == 0);
debug_scsi("cmd [itt %x total %d imm %d unsol_data %d\n",
- ctask->itt, scsi_bufflen(ctask->sc),
- ctask->imm_count, ctask->unsol_count);
+ task->itt, scsi_bufflen(task->sc),
+ task->imm_count, task->unsol_count);
}
- debug_scsi("ctask deq [cid %d itt 0x%x]\n",
- conn->id, ctask->itt);
+ debug_scsi("task deq [cid %d itt 0x%x]\n",
+ conn->id, task->itt);
/* Send the cmd PDU */
- if (!iser_ctask->command_sent) {
- error = iser_send_command(conn, ctask);
+ if (!iser_task->command_sent) {
+ error = iser_send_command(conn, task);
if (error)
- goto iscsi_iser_ctask_xmit_exit;
- iser_ctask->command_sent = 1;
+ goto iscsi_iser_task_xmit_exit;
+ iser_task->command_sent = 1;
}
/* Send unsolicited data-out PDU(s) if necessary */
- if (ctask->unsol_count)
- error = iscsi_iser_ctask_xmit_unsol_data(conn, ctask);
+ if (task->unsol_count)
+ error = iscsi_iser_task_xmit_unsol_data(conn, task);
- iscsi_iser_ctask_xmit_exit:
+ iscsi_iser_task_xmit_exit:
if (error && error != -ENOBUFS)
iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
return error;
}
static void
-iscsi_iser_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+iscsi_iser_cleanup_task(struct iscsi_conn *conn, struct iscsi_task *task)
{
- struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ struct iscsi_iser_task *iser_task = task->dd_data;
- if (iser_ctask->status == ISER_TASK_STATUS_STARTED) {
- iser_ctask->status = ISER_TASK_STATUS_COMPLETED;
- iser_ctask_rdma_finalize(iser_ctask);
- }
-}
-
-static struct iser_conn *
-iscsi_iser_ib_conn_lookup(__u64 ep_handle)
-{
- struct iser_conn *ib_conn;
- struct iser_conn *uib_conn = (struct iser_conn *)(unsigned long)ep_handle;
+ /* mgmt tasks do not need special cleanup */
+ if (!task->sc)
+ return;
- mutex_lock(&ig.connlist_mutex);
- list_for_each_entry(ib_conn, &ig.connlist, conn_list) {
- if (ib_conn == uib_conn) {
- mutex_unlock(&ig.connlist_mutex);
- return ib_conn;
- }
+ if (iser_task->status == ISER_TASK_STATUS_STARTED) {
+ iser_task->status = ISER_TASK_STATUS_COMPLETED;
+ iser_task_rdma_finalize(iser_task);
}
- mutex_unlock(&ig.connlist_mutex);
- iser_err("no conn exists for eph %llx\n",(unsigned long long)ep_handle);
- return NULL;
}
static struct iscsi_cls_conn *
@@ -272,7 +266,7 @@ iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
struct iscsi_cls_conn *cls_conn;
struct iscsi_iser_conn *iser_conn;
- cls_conn = iscsi_conn_setup(cls_session, conn_idx);
+ cls_conn = iscsi_conn_setup(cls_session, sizeof(*iser_conn), conn_idx);
if (!cls_conn)
return NULL;
conn = cls_conn->dd_data;
@@ -283,21 +277,11 @@ iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
*/
conn->max_recv_dlength = 128;
- iser_conn = kzalloc(sizeof(*iser_conn), GFP_KERNEL);
- if (!iser_conn)
- goto conn_alloc_fail;
-
- /* currently this is the only field which need to be initiated */
- rwlock_init(&iser_conn->lock);
-
+ iser_conn = conn->dd_data;
conn->dd_data = iser_conn;
iser_conn->iscsi_conn = conn;
return cls_conn;
-
-conn_alloc_fail:
- iscsi_conn_teardown(cls_conn);
- return NULL;
}
static void
@@ -305,11 +289,18 @@ iscsi_iser_conn_destroy(struct iscsi_cls_conn *cls_conn)
{
struct iscsi_conn *conn = cls_conn->dd_data;
struct iscsi_iser_conn *iser_conn = conn->dd_data;
+ struct iser_conn *ib_conn = iser_conn->ib_conn;
iscsi_conn_teardown(cls_conn);
- if (iser_conn->ib_conn)
- iser_conn->ib_conn->iser_conn = NULL;
- kfree(iser_conn);
+ /*
+ * Userspace will normally call the stop callback and
+ * already have freed the ib_conn, but if it goofed up then
+ * we free it here.
+ */
+ if (ib_conn) {
+ ib_conn->iser_conn = NULL;
+ iser_conn_put(ib_conn);
+ }
}
static int
@@ -320,6 +311,7 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
struct iscsi_conn *conn = cls_conn->dd_data;
struct iscsi_iser_conn *iser_conn;
struct iser_conn *ib_conn;
+ struct iscsi_endpoint *ep;
int error;
error = iscsi_conn_bind(cls_session, cls_conn, is_leading);
@@ -328,12 +320,14 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
/* the transport ep handle comes from user space so it must be
* verified against the global ib connections list */
- ib_conn = iscsi_iser_ib_conn_lookup(transport_eph);
- if (!ib_conn) {
+ ep = iscsi_lookup_endpoint(transport_eph);
+ if (!ep) {
iser_err("can't bind eph %llx\n",
(unsigned long long)transport_eph);
return -EINVAL;
}
+ ib_conn = ep->dd_data;
+
/* binds the iSER connection retrieved from the previously
* connected ep_handle to the iSCSI layer connection. exchanges
* connection pointers */
@@ -341,10 +335,30 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
iser_conn = conn->dd_data;
ib_conn->iser_conn = iser_conn;
iser_conn->ib_conn = ib_conn;
+ iser_conn_get(ib_conn);
+ return 0;
+}
- conn->recv_lock = &iser_conn->lock;
+static void
+iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_iser_conn *iser_conn = conn->dd_data;
+ struct iser_conn *ib_conn = iser_conn->ib_conn;
- return 0;
+ /*
+ * Userspace may have goofed up and not bound the connection or
+ * might have only partially setup the connection.
+ */
+ if (ib_conn) {
+ iscsi_conn_stop(cls_conn, flag);
+ /*
+ * There is no unbind event so the stop callback
+ * must release the ref from the bind.
+ */
+ iser_conn_put(ib_conn);
+ }
+ iser_conn->ib_conn = NULL;
}
static int
@@ -360,55 +374,75 @@ iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn)
return iscsi_conn_start(cls_conn);
}
-static struct iscsi_transport iscsi_iser_transport;
+static void iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session)
+{
+ struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+
+ iscsi_host_remove(shost);
+ iscsi_host_free(shost);
+}
static struct iscsi_cls_session *
-iscsi_iser_session_create(struct iscsi_transport *iscsit,
- struct scsi_transport_template *scsit,
- uint16_t cmds_max, uint16_t qdepth,
- uint32_t initial_cmdsn, uint32_t *hostno)
+iscsi_iser_session_create(struct iscsi_endpoint *ep,
+ uint16_t cmds_max, uint16_t qdepth,
+ uint32_t initial_cmdsn, uint32_t *hostno)
{
struct iscsi_cls_session *cls_session;
struct iscsi_session *session;
+ struct Scsi_Host *shost;
int i;
- uint32_t hn;
- struct iscsi_cmd_task *ctask;
- struct iscsi_mgmt_task *mtask;
- struct iscsi_iser_cmd_task *iser_ctask;
- struct iser_desc *desc;
+ struct iscsi_task *task;
+ struct iscsi_iser_task *iser_task;
+ struct iser_conn *ib_conn;
+
+ shost = iscsi_host_alloc(&iscsi_iser_sht, 0, ISCSI_MAX_CMD_PER_LUN);
+ if (!shost)
+ return NULL;
+ shost->transportt = iscsi_iser_scsi_transport;
+ shost->max_lun = iscsi_max_lun;
+ shost->max_id = 0;
+ shost->max_channel = 0;
+ shost->max_cmd_len = 16;
+
+ /*
+ * older userspace tools (before 2.0-870) did not pass us
+ * the leading conn's ep so this will be NULL;
+ */
+ if (ep)
+ ib_conn = ep->dd_data;
+
+ if (iscsi_host_add(shost,
+ ep ? ib_conn->device->ib_device->dma_device : NULL))
+ goto free_host;
+ *hostno = shost->host_no;
/*
* we do not support setting can_queue cmd_per_lun from userspace yet
* because we preallocate so many resources
*/
- cls_session = iscsi_session_setup(iscsit, scsit,
+ cls_session = iscsi_session_setup(&iscsi_iser_transport, shost,
ISCSI_DEF_XMIT_CMDS_MAX,
- ISCSI_MAX_CMD_PER_LUN,
- sizeof(struct iscsi_iser_cmd_task),
- sizeof(struct iser_desc),
- initial_cmdsn, &hn);
+ sizeof(struct iscsi_iser_task),
+ initial_cmdsn, 0);
if (!cls_session)
- return NULL;
-
- *hostno = hn;
- session = class_to_transport_session(cls_session);
+ goto remove_host;
+ session = cls_session->dd_data;
+ shost->can_queue = session->scsi_cmds_max;
/* libiscsi setup itts, data and pool so just set desc fields */
for (i = 0; i < session->cmds_max; i++) {
- ctask = session->cmds[i];
- iser_ctask = ctask->dd_data;
- ctask->hdr = (struct iscsi_cmd *)&iser_ctask->desc.iscsi_header;
- ctask->hdr_max = sizeof(iser_ctask->desc.iscsi_header);
- }
-
- for (i = 0; i < session->mgmtpool_max; i++) {
- mtask = session->mgmt_cmds[i];
- desc = mtask->dd_data;
- mtask->hdr = &desc->iscsi_header;
- desc->data = mtask->data;
+ task = session->cmds[i];
+ iser_task = task->dd_data;
+ task->hdr = (struct iscsi_cmd *)&iser_task->desc.iscsi_header;
+ task->hdr_max = sizeof(iser_task->desc.iscsi_header);
}
-
return cls_session;
+
+remove_host:
+ iscsi_host_remove(shost);
+free_host:
+ iscsi_host_free(shost);
+ return NULL;
}
static int
@@ -481,34 +515,37 @@ iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *s
stats->custom[3].value = conn->fmr_unalign_cnt;
}
-static int
-iscsi_iser_ep_connect(struct sockaddr *dst_addr, int non_blocking,
- __u64 *ep_handle)
+static struct iscsi_endpoint *
+iscsi_iser_ep_connect(struct sockaddr *dst_addr, int non_blocking)
{
int err;
struct iser_conn *ib_conn;
+ struct iscsi_endpoint *ep;
- err = iser_conn_init(&ib_conn);
- if (err)
- goto out;
+ ep = iscsi_create_endpoint(sizeof(*ib_conn));
+ if (!ep)
+ return ERR_PTR(-ENOMEM);
- err = iser_connect(ib_conn, NULL, (struct sockaddr_in *)dst_addr, non_blocking);
- if (!err)
- *ep_handle = (__u64)(unsigned long)ib_conn;
+ ib_conn = ep->dd_data;
+ ib_conn->ep = ep;
+ iser_conn_init(ib_conn);
-out:
- return err;
+ err = iser_connect(ib_conn, NULL, (struct sockaddr_in *)dst_addr,
+ non_blocking);
+ if (err) {
+ iscsi_destroy_endpoint(ep);
+ return ERR_PTR(err);
+ }
+ return ep;
}
static int
-iscsi_iser_ep_poll(__u64 ep_handle, int timeout_ms)
+iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
{
- struct iser_conn *ib_conn = iscsi_iser_ib_conn_lookup(ep_handle);
+ struct iser_conn *ib_conn;
int rc;
- if (!ib_conn)
- return -EINVAL;
-
+ ib_conn = ep->dd_data;
rc = wait_event_interruptible_timeout(ib_conn->wait,
ib_conn->state == ISER_CONN_UP,
msecs_to_jiffies(timeout_ms));
@@ -530,13 +567,21 @@ iscsi_iser_ep_poll(__u64 ep_handle, int timeout_ms)
}
static void
-iscsi_iser_ep_disconnect(__u64 ep_handle)
+iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep)
{
struct iser_conn *ib_conn;
- ib_conn = iscsi_iser_ib_conn_lookup(ep_handle);
- if (!ib_conn)
- return;
+ ib_conn = ep->dd_data;
+ if (ib_conn->iser_conn)
+ /*
+ * Must suspend xmit path if the ep is bound to the
+ * iscsi_conn, so we know we are not accessing the ib_conn
+ * when we free it.
+ *
+ * This may not be bound if the ep poll failed.
+ */
+ iscsi_suspend_tx(ib_conn->iser_conn->iscsi_conn);
+
iser_err("ib conn %p state %d\n",ib_conn, ib_conn->state);
iser_conn_terminate(ib_conn);
@@ -547,7 +592,6 @@ static struct scsi_host_template iscsi_iser_sht = {
.name = "iSCSI Initiator over iSER, v." DRV_VER,
.queuecommand = iscsi_queuecommand,
.change_queue_depth = iscsi_change_queue_depth,
- .can_queue = ISCSI_DEF_XMIT_CMDS_MAX - 1,
.sg_tablesize = ISCSI_ISER_SG_TABLESIZE,
.max_sectors = 1024,
.cmd_per_lun = ISCSI_MAX_CMD_PER_LUN,
@@ -581,17 +625,14 @@ static struct iscsi_transport iscsi_iser_transport = {
ISCSI_USERNAME | ISCSI_PASSWORD |
ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
- ISCSI_PING_TMO | ISCSI_RECV_TMO,
+ ISCSI_PING_TMO | ISCSI_RECV_TMO |
+ ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
.host_param_mask = ISCSI_HOST_HWADDRESS |
ISCSI_HOST_NETDEV_NAME |
ISCSI_HOST_INITIATOR_NAME,
- .host_template = &iscsi_iser_sht,
- .conndata_size = sizeof(struct iscsi_conn),
- .max_lun = ISCSI_ISER_MAX_LUN,
- .max_cmd_len = ISCSI_ISER_MAX_CMD_LEN,
/* session management */
.create_session = iscsi_iser_session_create,
- .destroy_session = iscsi_session_teardown,
+ .destroy_session = iscsi_iser_session_destroy,
/* connection management */
.create_conn = iscsi_iser_conn_create,
.bind_conn = iscsi_iser_conn_bind,
@@ -600,17 +641,16 @@ static struct iscsi_transport iscsi_iser_transport = {
.get_conn_param = iscsi_conn_get_param,
.get_session_param = iscsi_session_get_param,
.start_conn = iscsi_iser_conn_start,
- .stop_conn = iscsi_conn_stop,
+ .stop_conn = iscsi_iser_conn_stop,
/* iscsi host params */
.get_host_param = iscsi_host_get_param,
.set_host_param = iscsi_host_set_param,
/* IO */
.send_pdu = iscsi_conn_send_pdu,
.get_stats = iscsi_iser_conn_get_stats,
- .init_cmd_task = iscsi_iser_cmd_init,
- .xmit_cmd_task = iscsi_iser_ctask_xmit,
- .xmit_mgmt_task = iscsi_iser_mtask_xmit,
- .cleanup_cmd_task = iscsi_iser_cleanup_ctask,
+ .init_task = iscsi_iser_task_init,
+ .xmit_task = iscsi_iser_task_xmit,
+ .cleanup_task = iscsi_iser_cleanup_task,
/* recovery */
.session_recovery_timedout = iscsi_session_recovery_timedout,
@@ -630,8 +670,6 @@ static int __init iser_init(void)
return -EINVAL;
}
- iscsi_iser_transport.max_lun = iscsi_max_lun;
-
memset(&ig, 0, sizeof(struct iser_global));
ig.desc_cache = kmem_cache_create("iser_descriptors",
@@ -647,7 +685,9 @@ static int __init iser_init(void)
mutex_init(&ig.connlist_mutex);
INIT_LIST_HEAD(&ig.connlist);
- if (!iscsi_register_transport(&iscsi_iser_transport)) {
+ iscsi_iser_scsi_transport = iscsi_register_transport(
+ &iscsi_iser_transport);
+ if (!iscsi_iser_scsi_transport) {
iser_err("iscsi_register_transport failed\n");
err = -EINVAL;
goto register_transport_failure;
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 0e10703..81a8262 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -94,7 +94,6 @@
/* support upto 512KB in one RDMA */
#define ISCSI_ISER_SG_TABLESIZE (0x80000 >> SHIFT_4K)
#define ISCSI_ISER_MAX_LUN 256
-#define ISCSI_ISER_MAX_CMD_LEN 16
/* QP settings */
/* Maximal bounds on received asynchronous PDUs */
@@ -172,7 +171,8 @@ struct iser_data_buf {
/* fwd declarations */
struct iser_device;
struct iscsi_iser_conn;
-struct iscsi_iser_cmd_task;
+struct iscsi_iser_task;
+struct iscsi_endpoint;
struct iser_mem_reg {
u32 lkey;
@@ -196,7 +196,7 @@ struct iser_regd_buf {
#define MAX_REGD_BUF_VECTOR_LEN 2
struct iser_dto {
- struct iscsi_iser_cmd_task *ctask;
+ struct iscsi_iser_task *task;
struct iser_conn *ib_conn;
int notify_enable;
@@ -240,7 +240,9 @@ struct iser_device {
struct iser_conn {
struct iscsi_iser_conn *iser_conn; /* iser conn for upcalls */
+ struct iscsi_endpoint *ep;
enum iser_ib_conn_state state; /* rdma connection state */
+ atomic_t refcount;
spinlock_t lock; /* used for state changes */
struct iser_device *device; /* device context */
struct rdma_cm_id *cma_id; /* CMA ID */
@@ -259,11 +261,9 @@ struct iser_conn {
struct iscsi_iser_conn {
struct iscsi_conn *iscsi_conn;/* ptr to iscsi conn */
struct iser_conn *ib_conn; /* iSER IB conn */
-
- rwlock_t lock;
};
-struct iscsi_iser_cmd_task {
+struct iscsi_iser_task {
struct iser_desc desc;
struct iscsi_iser_conn *iser_conn;
enum iser_task_status status;
@@ -296,22 +296,26 @@ extern int iser_debug_level;
/* allocate connection resources needed for rdma functionality */
int iser_conn_set_full_featured_mode(struct iscsi_conn *conn);
-int iser_send_control(struct iscsi_conn *conn,
- struct iscsi_mgmt_task *mtask);
+int iser_send_control(struct iscsi_conn *conn,
+ struct iscsi_task *task);
-int iser_send_command(struct iscsi_conn *conn,
- struct iscsi_cmd_task *ctask);
+int iser_send_command(struct iscsi_conn *conn,
+ struct iscsi_task *task);
-int iser_send_data_out(struct iscsi_conn *conn,
- struct iscsi_cmd_task *ctask,
- struct iscsi_data *hdr);
+int iser_send_data_out(struct iscsi_conn *conn,
+ struct iscsi_task *task,
+ struct iscsi_data *hdr);
void iscsi_iser_recv(struct iscsi_conn *conn,
struct iscsi_hdr *hdr,
char *rx_data,
int rx_data_len);
-int iser_conn_init(struct iser_conn **ib_conn);
+void iser_conn_init(struct iser_conn *ib_conn);
+
+void iser_conn_get(struct iser_conn *ib_conn);
+
+void iser_conn_put(struct iser_conn *ib_conn);
void iser_conn_terminate(struct iser_conn *ib_conn);
@@ -320,9 +324,9 @@ void iser_rcv_completion(struct iser_desc *desc,
void iser_snd_completion(struct iser_desc *desc);
-void iser_ctask_rdma_init(struct iscsi_iser_cmd_task *ctask);
+void iser_task_rdma_init(struct iscsi_iser_task *task);
-void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *ctask);
+void iser_task_rdma_finalize(struct iscsi_iser_task *task);
void iser_dto_buffs_release(struct iser_dto *dto);
@@ -332,10 +336,10 @@ void iser_reg_single(struct iser_device *device,
struct iser_regd_buf *regd_buf,
enum dma_data_direction direction);
-void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *ctask,
+void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *task,
enum iser_data_dir cmd_dir);
-int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *ctask,
+int iser_reg_rdma_mem(struct iscsi_iser_task *task,
enum iser_data_dir cmd_dir);
int iser_connect(struct iser_conn *ib_conn,
@@ -355,10 +359,10 @@ int iser_post_send(struct iser_desc *tx_desc);
int iser_conn_state_comp(struct iser_conn *ib_conn,
enum iser_ib_conn_state comp);
-int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask,
+int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
struct iser_data_buf *data,
enum iser_data_dir iser_dir,
enum dma_data_direction dma_dir);
-void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask);
+void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task);
#endif
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 31ad498..cdd2831 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -64,46 +64,46 @@ static void iser_dto_add_regd_buff(struct iser_dto *dto,
/* Register user buffer memory and initialize passive rdma
* dto descriptor. Total data size is stored in
- * iser_ctask->data[ISER_DIR_IN].data_len
+ * iser_task->data[ISER_DIR_IN].data_len
*/
-static int iser_prepare_read_cmd(struct iscsi_cmd_task *ctask,
+static int iser_prepare_read_cmd(struct iscsi_task *task,
unsigned int edtl)
{
- struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ struct iscsi_iser_task *iser_task = task->dd_data;
struct iser_regd_buf *regd_buf;
int err;
- struct iser_hdr *hdr = &iser_ctask->desc.iser_header;
- struct iser_data_buf *buf_in = &iser_ctask->data[ISER_DIR_IN];
+ struct iser_hdr *hdr = &iser_task->desc.iser_header;
+ struct iser_data_buf *buf_in = &iser_task->data[ISER_DIR_IN];
- err = iser_dma_map_task_data(iser_ctask,
+ err = iser_dma_map_task_data(iser_task,
buf_in,
ISER_DIR_IN,
DMA_FROM_DEVICE);
if (err)
return err;
- if (edtl > iser_ctask->data[ISER_DIR_IN].data_len) {
+ if (edtl > iser_task->data[ISER_DIR_IN].data_len) {
iser_err("Total data length: %ld, less than EDTL: "
"%d, in READ cmd BHS itt: %d, conn: 0x%p\n",
- iser_ctask->data[ISER_DIR_IN].data_len, edtl,
- ctask->itt, iser_ctask->iser_conn);
+ iser_task->data[ISER_DIR_IN].data_len, edtl,
+ task->itt, iser_task->iser_conn);
return -EINVAL;
}
- err = iser_reg_rdma_mem(iser_ctask,ISER_DIR_IN);
+ err = iser_reg_rdma_mem(iser_task,ISER_DIR_IN);
if (err) {
iser_err("Failed to set up Data-IN RDMA\n");
return err;
}
- regd_buf = &iser_ctask->rdma_regd[ISER_DIR_IN];
+ regd_buf = &iser_task->rdma_regd[ISER_DIR_IN];
hdr->flags |= ISER_RSV;
hdr->read_stag = cpu_to_be32(regd_buf->reg.rkey);
hdr->read_va = cpu_to_be64(regd_buf->reg.va);
iser_dbg("Cmd itt:%d READ tags RKEY:%#.4X VA:%#llX\n",
- ctask->itt, regd_buf->reg.rkey,
+ task->itt, regd_buf->reg.rkey,
(unsigned long long)regd_buf->reg.va);
return 0;
@@ -111,43 +111,43 @@ static int iser_prepare_read_cmd(struct iscsi_cmd_task *ctask,
/* Register user buffer memory and initialize passive rdma
* dto descriptor. Total data size is stored in
- * ctask->data[ISER_DIR_OUT].data_len
+ * task->data[ISER_DIR_OUT].data_len
*/
static int
-iser_prepare_write_cmd(struct iscsi_cmd_task *ctask,
+iser_prepare_write_cmd(struct iscsi_task *task,
unsigned int imm_sz,
unsigned int unsol_sz,
unsigned int edtl)
{
- struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ struct iscsi_iser_task *iser_task = task->dd_data;
struct iser_regd_buf *regd_buf;
int err;
- struct iser_dto *send_dto = &iser_ctask->desc.dto;
- struct iser_hdr *hdr = &iser_ctask->desc.iser_header;
- struct iser_data_buf *buf_out = &iser_ctask->data[ISER_DIR_OUT];
+ struct iser_dto *send_dto = &iser_task->desc.dto;
+ struct iser_hdr *hdr = &iser_task->desc.iser_header;
+ struct iser_data_buf *buf_out = &iser_task->data[ISER_DIR_OUT];
- err = iser_dma_map_task_data(iser_ctask,
+ err = iser_dma_map_task_data(iser_task,
buf_out,
ISER_DIR_OUT,
DMA_TO_DEVICE);
if (err)
return err;
- if (edtl > iser_ctask->data[ISER_DIR_OUT].data_len) {
+ if (edtl > iser_task->data[ISER_DIR_OUT].data_len) {
iser_err("Total data length: %ld, less than EDTL: %d, "
"in WRITE cmd BHS itt: %d, conn: 0x%p\n",
- iser_ctask->data[ISER_DIR_OUT].data_len,
- edtl, ctask->itt, ctask->conn);
+ iser_task->data[ISER_DIR_OUT].data_len,
+ edtl, task->itt, task->conn);
return -EINVAL;
}
- err = iser_reg_rdma_mem(iser_ctask,ISER_DIR_OUT);
+ err = iser_reg_rdma_mem(iser_task,ISER_DIR_OUT);
if (err != 0) {
iser_err("Failed to register write cmd RDMA mem\n");
return err;
}
- regd_buf = &iser_ctask->rdma_regd[ISER_DIR_OUT];
+ regd_buf = &iser_task->rdma_regd[ISER_DIR_OUT];
if (unsol_sz < edtl) {
hdr->flags |= ISER_WSV;
@@ -156,13 +156,13 @@ iser_prepare_write_cmd(struct iscsi_cmd_task *ctask,
iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X "
"VA:%#llX + unsol:%d\n",
- ctask->itt, regd_buf->reg.rkey,
+ task->itt, regd_buf->reg.rkey,
(unsigned long long)regd_buf->reg.va, unsol_sz);
}
if (imm_sz > 0) {
iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n",
- ctask->itt, imm_sz);
+ task->itt, imm_sz);
iser_dto_add_regd_buff(send_dto,
regd_buf,
0,
@@ -314,38 +314,38 @@ iser_check_xmit(struct iscsi_conn *conn, void *task)
/**
* iser_send_command - send command PDU
*/
-int iser_send_command(struct iscsi_conn *conn,
- struct iscsi_cmd_task *ctask)
+int iser_send_command(struct iscsi_conn *conn,
+ struct iscsi_task *task)
{
struct iscsi_iser_conn *iser_conn = conn->dd_data;
- struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ struct iscsi_iser_task *iser_task = task->dd_data;
struct iser_dto *send_dto = NULL;
unsigned long edtl;
int err = 0;
struct iser_data_buf *data_buf;
- struct iscsi_cmd *hdr = ctask->hdr;
- struct scsi_cmnd *sc = ctask->sc;
+ struct iscsi_cmd *hdr = task->hdr;
+ struct scsi_cmnd *sc = task->sc;
if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) {
iser_err("Failed to send, conn: 0x%p is not up\n", iser_conn->ib_conn);
return -EPERM;
}
- if (iser_check_xmit(conn, ctask))
+ if (iser_check_xmit(conn, task))
return -ENOBUFS;
edtl = ntohl(hdr->data_length);
/* build the tx desc regd header and add it to the tx desc dto */
- iser_ctask->desc.type = ISCSI_TX_SCSI_COMMAND;
- send_dto = &iser_ctask->desc.dto;
- send_dto->ctask = iser_ctask;
- iser_create_send_desc(iser_conn, &iser_ctask->desc);
+ iser_task->desc.type = ISCSI_TX_SCSI_COMMAND;
+ send_dto = &iser_task->desc.dto;
+ send_dto->task = iser_task;
+ iser_create_send_desc(iser_conn, &iser_task->desc);
if (hdr->flags & ISCSI_FLAG_CMD_READ)
- data_buf = &iser_ctask->data[ISER_DIR_IN];
+ data_buf = &iser_task->data[ISER_DIR_IN];
else
- data_buf = &iser_ctask->data[ISER_DIR_OUT];
+ data_buf = &iser_task->data[ISER_DIR_OUT];
if (scsi_sg_count(sc)) { /* using a scatter list */
data_buf->buf = scsi_sglist(sc);
@@ -355,15 +355,15 @@ int iser_send_command(struct iscsi_conn *conn,
data_buf->data_len = scsi_bufflen(sc);
if (hdr->flags & ISCSI_FLAG_CMD_READ) {
- err = iser_prepare_read_cmd(ctask, edtl);
+ err = iser_prepare_read_cmd(task, edtl);
if (err)
goto send_command_error;
}
if (hdr->flags & ISCSI_FLAG_CMD_WRITE) {
- err = iser_prepare_write_cmd(ctask,
- ctask->imm_count,
- ctask->imm_count +
- ctask->unsol_count,
+ err = iser_prepare_write_cmd(task,
+ task->imm_count,
+ task->imm_count +
+ task->unsol_count,
edtl);
if (err)
goto send_command_error;
@@ -378,27 +378,27 @@ int iser_send_command(struct iscsi_conn *conn,
goto send_command_error;
}
- iser_ctask->status = ISER_TASK_STATUS_STARTED;
+ iser_task->status = ISER_TASK_STATUS_STARTED;
- err = iser_post_send(&iser_ctask->desc);
+ err = iser_post_send(&iser_task->desc);
if (!err)
return 0;
send_command_error:
iser_dto_buffs_release(send_dto);
- iser_err("conn %p failed ctask->itt %d err %d\n",conn, ctask->itt, err);
+ iser_err("conn %p failed task->itt %d err %d\n",conn, task->itt, err);
return err;
}
/**
* iser_send_data_out - send data out PDU
*/
-int iser_send_data_out(struct iscsi_conn *conn,
- struct iscsi_cmd_task *ctask,
+int iser_send_data_out(struct iscsi_conn *conn,
+ struct iscsi_task *task,
struct iscsi_data *hdr)
{
struct iscsi_iser_conn *iser_conn = conn->dd_data;
- struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ struct iscsi_iser_task *iser_task = task->dd_data;
struct iser_desc *tx_desc = NULL;
struct iser_dto *send_dto = NULL;
unsigned long buf_offset;
@@ -411,7 +411,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
return -EPERM;
}
- if (iser_check_xmit(conn, ctask))
+ if (iser_check_xmit(conn, task))
return -ENOBUFS;
itt = (__force uint32_t)hdr->itt;
@@ -432,7 +432,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
/* build the tx desc regd header and add it to the tx desc dto */
send_dto = &tx_desc->dto;
- send_dto->ctask = iser_ctask;
+ send_dto->task = iser_task;
iser_create_send_desc(iser_conn, tx_desc);
iser_reg_single(iser_conn->ib_conn->device,
@@ -440,15 +440,15 @@ int iser_send_data_out(struct iscsi_conn *conn,
/* all data was registered for RDMA, we can use the lkey */
iser_dto_add_regd_buff(send_dto,
- &iser_ctask->rdma_regd[ISER_DIR_OUT],
+ &iser_task->rdma_regd[ISER_DIR_OUT],
buf_offset,
data_seg_len);
- if (buf_offset + data_seg_len > iser_ctask->data[ISER_DIR_OUT].data_len) {
+ if (buf_offset + data_seg_len > iser_task->data[ISER_DIR_OUT].data_len) {
iser_err("Offset:%ld & DSL:%ld in Data-Out "
"inconsistent with total len:%ld, itt:%d\n",
buf_offset, data_seg_len,
- iser_ctask->data[ISER_DIR_OUT].data_len, itt);
+ iser_task->data[ISER_DIR_OUT].data_len, itt);
err = -EINVAL;
goto send_data_out_error;
}
@@ -468,10 +468,11 @@ send_data_out_error:
}
int iser_send_control(struct iscsi_conn *conn,
- struct iscsi_mgmt_task *mtask)
+ struct iscsi_task *task)
{
struct iscsi_iser_conn *iser_conn = conn->dd_data;
- struct iser_desc *mdesc = mtask->dd_data;
+ struct iscsi_iser_task *iser_task = task->dd_data;
+ struct iser_desc *mdesc = &iser_task->desc;
struct iser_dto *send_dto = NULL;
unsigned long data_seg_len;
int err = 0;
@@ -483,27 +484,27 @@ int iser_send_control(struct iscsi_conn *conn,
return -EPERM;
}
- if (iser_check_xmit(conn,mtask))
+ if (iser_check_xmit(conn, task))
return -ENOBUFS;
/* build the tx desc regd header and add it to the tx desc dto */
mdesc->type = ISCSI_TX_CONTROL;
send_dto = &mdesc->dto;
- send_dto->ctask = NULL;
+ send_dto->task = NULL;
iser_create_send_desc(iser_conn, mdesc);
device = iser_conn->ib_conn->device;
iser_reg_single(device, send_dto->regd[0], DMA_TO_DEVICE);
- data_seg_len = ntoh24(mtask->hdr->dlength);
+ data_seg_len = ntoh24(task->hdr->dlength);
if (data_seg_len > 0) {
regd_buf = &mdesc->data_regd_buf;
memset(regd_buf, 0, sizeof(struct iser_regd_buf));
regd_buf->device = device;
- regd_buf->virt_addr = mtask->data;
- regd_buf->data_size = mtask->data_count;
+ regd_buf->virt_addr = task->data;
+ regd_buf->data_size = task->data_count;
iser_reg_single(device, regd_buf,
DMA_TO_DEVICE);
iser_dto_add_regd_buff(send_dto, regd_buf,
@@ -533,15 +534,13 @@ send_control_error:
void iser_rcv_completion(struct iser_desc *rx_desc,
unsigned long dto_xfer_len)
{
- struct iser_dto *dto = &rx_desc->dto;
+ struct iser_dto *dto = &rx_desc->dto;
struct iscsi_iser_conn *conn = dto->ib_conn->iser_conn;
- struct iscsi_session *session = conn->iscsi_conn->session;
- struct iscsi_cmd_task *ctask;
- struct iscsi_iser_cmd_task *iser_ctask;
+ struct iscsi_task *task;
+ struct iscsi_iser_task *iser_task;
struct iscsi_hdr *hdr;
char *rx_data = NULL;
int rx_data_len = 0;
- unsigned int itt;
unsigned char opcode;
hdr = &rx_desc->iscsi_header;
@@ -557,19 +556,24 @@ void iser_rcv_completion(struct iser_desc *rx_desc,
opcode = hdr->opcode & ISCSI_OPCODE_MASK;
if (opcode == ISCSI_OP_SCSI_CMD_RSP) {
- itt = get_itt(hdr->itt); /* mask out cid and age bits */
- if (!(itt < session->cmds_max))
+ spin_lock(&conn->iscsi_conn->session->lock);
+ task = iscsi_itt_to_ctask(conn->iscsi_conn, hdr->itt);
+ if (task)
+ __iscsi_get_task(task);
+ spin_unlock(&conn->iscsi_conn->session->lock);
+
+ if (!task)
iser_err("itt can't be matched to task!!! "
- "conn %p opcode %d cmds_max %d itt %d\n",
- conn->iscsi_conn,opcode,session->cmds_max,itt);
- /* use the mapping given with the cmds array indexed by itt */
- ctask = (struct iscsi_cmd_task *)session->cmds[itt];
- iser_ctask = ctask->dd_data;
- iser_dbg("itt %d ctask %p\n",itt,ctask);
- iser_ctask->status = ISER_TASK_STATUS_COMPLETED;
- iser_ctask_rdma_finalize(iser_ctask);
+ "conn %p opcode %d itt %d\n",
+ conn->iscsi_conn, opcode, hdr->itt);
+ else {
+ iser_task = task->dd_data;
+ iser_dbg("itt %d task %p\n",hdr->itt, task);
+ iser_task->status = ISER_TASK_STATUS_COMPLETED;
+ iser_task_rdma_finalize(iser_task);
+ iscsi_put_task(task);
+ }
}
-
iser_dto_buffs_release(dto);
iscsi_iser_recv(conn->iscsi_conn, hdr, rx_data, rx_data_len);
@@ -590,7 +594,7 @@ void iser_snd_completion(struct iser_desc *tx_desc)
struct iser_conn *ib_conn = dto->ib_conn;
struct iscsi_iser_conn *iser_conn = ib_conn->iser_conn;
struct iscsi_conn *conn = iser_conn->iscsi_conn;
- struct iscsi_mgmt_task *mtask;
+ struct iscsi_task *task;
int resume_tx = 0;
iser_dbg("Initiator, Data sent dto=0x%p\n", dto);
@@ -613,36 +617,31 @@ void iser_snd_completion(struct iser_desc *tx_desc)
if (tx_desc->type == ISCSI_TX_CONTROL) {
/* this arithmetic is legal by libiscsi dd_data allocation */
- mtask = (void *) ((long)(void *)tx_desc -
- sizeof(struct iscsi_mgmt_task));
- if (mtask->hdr->itt == RESERVED_ITT) {
- struct iscsi_session *session = conn->session;
-
- spin_lock(&conn->session->lock);
- iscsi_free_mgmt_task(conn, mtask);
- spin_unlock(&session->lock);
- }
+ task = (void *) ((long)(void *)tx_desc -
+ sizeof(struct iscsi_task));
+ if (task->hdr->itt == RESERVED_ITT)
+ iscsi_put_task(task);
}
}
-void iser_ctask_rdma_init(struct iscsi_iser_cmd_task *iser_ctask)
+void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
{
- iser_ctask->status = ISER_TASK_STATUS_INIT;
+ iser_task->status = ISER_TASK_STATUS_INIT;
- iser_ctask->dir[ISER_DIR_IN] = 0;
- iser_ctask->dir[ISER_DIR_OUT] = 0;
+ iser_task->dir[ISER_DIR_IN] = 0;
+ iser_task->dir[ISER_DIR_OUT] = 0;
- iser_ctask->data[ISER_DIR_IN].data_len = 0;
- iser_ctask->data[ISER_DIR_OUT].data_len = 0;
+ iser_task->data[ISER_DIR_IN].data_len = 0;
+ iser_task->data[ISER_DIR_OUT].data_len = 0;
- memset(&iser_ctask->rdma_regd[ISER_DIR_IN], 0,
+ memset(&iser_task->rdma_regd[ISER_DIR_IN], 0,
sizeof(struct iser_regd_buf));
- memset(&iser_ctask->rdma_regd[ISER_DIR_OUT], 0,
+ memset(&iser_task->rdma_regd[ISER_DIR_OUT], 0,
sizeof(struct iser_regd_buf));
}
-void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask)
+void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
{
int deferred;
int is_rdma_aligned = 1;
@@ -651,17 +650,17 @@ void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask)
/* if we were reading, copy back to unaligned sglist,
* anyway dma_unmap and free the copy
*/
- if (iser_ctask->data_copy[ISER_DIR_IN].copy_buf != NULL) {
+ if (iser_task->data_copy[ISER_DIR_IN].copy_buf != NULL) {
is_rdma_aligned = 0;
- iser_finalize_rdma_unaligned_sg(iser_ctask, ISER_DIR_IN);
+ iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_IN);
}
- if (iser_ctask->data_copy[ISER_DIR_OUT].copy_buf != NULL) {
+ if (iser_task->data_copy[ISER_DIR_OUT].copy_buf != NULL) {
is_rdma_aligned = 0;
- iser_finalize_rdma_unaligned_sg(iser_ctask, ISER_DIR_OUT);
+ iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_OUT);
}
- if (iser_ctask->dir[ISER_DIR_IN]) {
- regd = &iser_ctask->rdma_regd[ISER_DIR_IN];
+ if (iser_task->dir[ISER_DIR_IN]) {
+ regd = &iser_task->rdma_regd[ISER_DIR_IN];
deferred = iser_regd_buff_release(regd);
if (deferred) {
iser_err("%d references remain for BUF-IN rdma reg\n",
@@ -669,8 +668,8 @@ void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask)
}
}
- if (iser_ctask->dir[ISER_DIR_OUT]) {
- regd = &iser_ctask->rdma_regd[ISER_DIR_OUT];
+ if (iser_task->dir[ISER_DIR_OUT]) {
+ regd = &iser_task->rdma_regd[ISER_DIR_OUT];
deferred = iser_regd_buff_release(regd);
if (deferred) {
iser_err("%d references remain for BUF-OUT rdma reg\n",
@@ -680,7 +679,7 @@ void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask)
/* if the data was unaligned, it was already unmapped and then copied */
if (is_rdma_aligned)
- iser_dma_unmap_task_data(iser_ctask);
+ iser_dma_unmap_task_data(iser_task);
}
void iser_dto_buffs_release(struct iser_dto *dto)
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 81e49cb..b9453d0 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -99,13 +99,13 @@ void iser_reg_single(struct iser_device *device,
/**
* iser_start_rdma_unaligned_sg
*/
-static int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
+static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir)
{
int dma_nents;
struct ib_device *dev;
char *mem = NULL;
- struct iser_data_buf *data = &iser_ctask->data[cmd_dir];
+ struct iser_data_buf *data = &iser_task->data[cmd_dir];
unsigned long cmd_data_len = data->data_len;
if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
@@ -138,37 +138,37 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
}
}
- sg_init_one(&iser_ctask->data_copy[cmd_dir].sg_single, mem, cmd_data_len);
- iser_ctask->data_copy[cmd_dir].buf =
- &iser_ctask->data_copy[cmd_dir].sg_single;
- iser_ctask->data_copy[cmd_dir].size = 1;
+ sg_init_one(&iser_task->data_copy[cmd_dir].sg_single, mem, cmd_data_len);
+ iser_task->data_copy[cmd_dir].buf =
+ &iser_task->data_copy[cmd_dir].sg_single;
+ iser_task->data_copy[cmd_dir].size = 1;
- iser_ctask->data_copy[cmd_dir].copy_buf = mem;
+ iser_task->data_copy[cmd_dir].copy_buf = mem;
- dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
+ dev = iser_task->iser_conn->ib_conn->device->ib_device;
dma_nents = ib_dma_map_sg(dev,
- &iser_ctask->data_copy[cmd_dir].sg_single,
+ &iser_task->data_copy[cmd_dir].sg_single,
1,
(cmd_dir == ISER_DIR_OUT) ?
DMA_TO_DEVICE : DMA_FROM_DEVICE);
BUG_ON(dma_nents == 0);
- iser_ctask->data_copy[cmd_dir].dma_nents = dma_nents;
+ iser_task->data_copy[cmd_dir].dma_nents = dma_nents;
return 0;
}
/**
* iser_finalize_rdma_unaligned_sg
*/
-void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
+void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir)
{
struct ib_device *dev;
struct iser_data_buf *mem_copy;
unsigned long cmd_data_len;
- dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
- mem_copy = &iser_ctask->data_copy[cmd_dir];
+ dev = iser_task->iser_conn->ib_conn->device->ib_device;
+ mem_copy = &iser_task->data_copy[cmd_dir];
ib_dma_unmap_sg(dev, &mem_copy->sg_single, 1,
(cmd_dir == ISER_DIR_OUT) ?
@@ -184,8 +184,8 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
/* copy back read RDMA to unaligned sg */
mem = mem_copy->copy_buf;
- sgl = (struct scatterlist *)iser_ctask->data[ISER_DIR_IN].buf;
- sg_size = iser_ctask->data[ISER_DIR_IN].size;
+ sgl = (struct scatterlist *)iser_task->data[ISER_DIR_IN].buf;
+ sg_size = iser_task->data[ISER_DIR_IN].size;
p = mem;
for_each_sg(sgl, sg, sg_size, i) {
@@ -198,7 +198,7 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
}
}
- cmd_data_len = iser_ctask->data[cmd_dir].data_len;
+ cmd_data_len = iser_task->data[cmd_dir].data_len;
if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
free_pages((unsigned long)mem_copy->copy_buf,
@@ -376,15 +376,15 @@ static void iser_page_vec_build(struct iser_data_buf *data,
}
}
-int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask,
- struct iser_data_buf *data,
- enum iser_data_dir iser_dir,
- enum dma_data_direction dma_dir)
+int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
+ struct iser_data_buf *data,
+ enum iser_data_dir iser_dir,
+ enum dma_data_direction dma_dir)
{
struct ib_device *dev;
- iser_ctask->dir[iser_dir] = 1;
- dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
+ iser_task->dir[iser_dir] = 1;
+ dev = iser_task->iser_conn->ib_conn->device->ib_device;
data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir);
if (data->dma_nents == 0) {
@@ -394,20 +394,20 @@ int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask,
return 0;
}
-void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask)
+void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task)
{
struct ib_device *dev;
struct iser_data_buf *data;
- dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
+ dev = iser_task->iser_conn->ib_conn->device->ib_device;
- if (iser_ctask->dir[ISER_DIR_IN]) {
- data = &iser_ctask->data[ISER_DIR_IN];
+ if (iser_task->dir[ISER_DIR_IN]) {
+ data = &iser_task->data[ISER_DIR_IN];
ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE);
}
- if (iser_ctask->dir[ISER_DIR_OUT]) {
- data = &iser_ctask->data[ISER_DIR_OUT];
+ if (iser_task->dir[ISER_DIR_OUT]) {
+ data = &iser_task->data[ISER_DIR_OUT];
ib_dma_unmap_sg(dev, data->buf, data->size, DMA_TO_DEVICE);
}
}
@@ -418,21 +418,21 @@ void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask)
*
* returns 0 on success, errno code on failure
*/
-int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,
+int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir)
{
- struct iscsi_conn *iscsi_conn = iser_ctask->iser_conn->iscsi_conn;
- struct iser_conn *ib_conn = iser_ctask->iser_conn->ib_conn;
+ struct iscsi_conn *iscsi_conn = iser_task->iser_conn->iscsi_conn;
+ struct iser_conn *ib_conn = iser_task->iser_conn->ib_conn;
struct iser_device *device = ib_conn->device;
struct ib_device *ibdev = device->ib_device;
- struct iser_data_buf *mem = &iser_ctask->data[cmd_dir];
+ struct iser_data_buf *mem = &iser_task->data[cmd_dir];
struct iser_regd_buf *regd_buf;
int aligned_len;
int err;
int i;
struct scatterlist *sg;
- regd_buf = &iser_ctask->rdma_regd[cmd_dir];
+ regd_buf = &iser_task->rdma_regd[cmd_dir];
aligned_len = iser_data_buf_aligned_len(mem, ibdev);
if (aligned_len != mem->dma_nents) {
@@ -442,13 +442,13 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,
iser_data_buf_dump(mem, ibdev);
/* unmap the command data before accessing it */
- iser_dma_unmap_task_data(iser_ctask);
+ iser_dma_unmap_task_data(iser_task);
/* allocate copy buf, if we are writing, copy the */
/* unaligned scatterlist, dma map the copy */
- if (iser_start_rdma_unaligned_sg(iser_ctask, cmd_dir) != 0)
+ if (iser_start_rdma_unaligned_sg(iser_task, cmd_dir) != 0)
return -ENOMEM;
- mem = &iser_ctask->data_copy[cmd_dir];
+ mem = &iser_task->data_copy[cmd_dir];
}
/* if there a single dma entry, FMR is not needed */
@@ -472,8 +472,9 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,
err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, &regd_buf->reg);
if (err) {
iser_data_buf_dump(mem, ibdev);
- iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", mem->dma_nents,
- ntoh24(iser_ctask->desc.iscsi_header.dlength));
+ iser_err("mem->dma_nents = %d (dlength = 0x%x)\n",
+ mem->dma_nents,
+ ntoh24(iser_task->desc.iscsi_header.dlength));
iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n",
ib_conn->page_vec->data_size, ib_conn->page_vec->length,
ib_conn->page_vec->offset);
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 77cabee..3a917c1 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -323,7 +323,18 @@ static void iser_conn_release(struct iser_conn *ib_conn)
iser_device_try_release(device);
if (ib_conn->iser_conn)
ib_conn->iser_conn->ib_conn = NULL;
- kfree(ib_conn);
+ iscsi_destroy_endpoint(ib_conn->ep);
+}
+
+void iser_conn_get(struct iser_conn *ib_conn)
+{
+ atomic_inc(&ib_conn->refcount);
+}
+
+void iser_conn_put(struct iser_conn *ib_conn)
+{
+ if (atomic_dec_and_test(&ib_conn->refcount))
+ iser_conn_release(ib_conn);
}
/**
@@ -347,7 +358,7 @@ void iser_conn_terminate(struct iser_conn *ib_conn)
wait_event_interruptible(ib_conn->wait,
ib_conn->state == ISER_CONN_DOWN);
- iser_conn_release(ib_conn);
+ iser_conn_put(ib_conn);
}
static void iser_connect_error(struct rdma_cm_id *cma_id)
@@ -481,24 +492,15 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
return ret;
}
-int iser_conn_init(struct iser_conn **ibconn)
+void iser_conn_init(struct iser_conn *ib_conn)
{
- struct iser_conn *ib_conn;
-
- ib_conn = kzalloc(sizeof *ib_conn, GFP_KERNEL);
- if (!ib_conn) {
- iser_err("can't alloc memory for struct iser_conn\n");
- return -ENOMEM;
- }
ib_conn->state = ISER_CONN_INIT;
init_waitqueue_head(&ib_conn->wait);
atomic_set(&ib_conn->post_recv_buf_count, 0);
atomic_set(&ib_conn->post_send_buf_count, 0);
+ atomic_set(&ib_conn->refcount, 1);
INIT_LIST_HEAD(&ib_conn->conn_list);
spin_lock_init(&ib_conn->lock);
-
- *ibconn = ib_conn;
- return 0;
}
/**
OpenPOWER on IntegriCloud