summaryrefslogtreecommitdiffstats
path: root/drivers/scsi/qla2xxx/qla_os.c
diff options
context:
space:
mode:
authorQuinn Tran <quinn.tran@cavium.com>2017-12-28 12:33:24 -0800
committerMartin K. Petersen <martin.petersen@oracle.com>2018-01-03 23:41:06 -0500
commit9cd883f07a54e5301d51e259acd250bb035996be (patch)
tree372af187aaf712d28ebfeab37c551fc325802a09 /drivers/scsi/qla2xxx/qla_os.c
parent94d83e3641765e08076efc93632eab579c0397e2 (diff)
downloadop-kernel-dev-9cd883f07a54e5301d51e259acd250bb035996be.zip
op-kernel-dev-9cd883f07a54e5301d51e259acd250bb035996be.tar.gz
scsi: qla2xxx: Fix session cleanup for N2N
When connection type is N_Port to N_Port (point-to-point), there is a possibilty where initiator will not send PLOGI request and will directly send PRLI. In N2N connection the port has higher port name sends the PLOGI but not allow to send PRLI if is a target mode. Only initiator is allowed to send PRLI. Current driver code deletes old session when it receives PLOGI request. If we will not receive PLOGI request then we will not delete old session and create new session. Add check for N2N with PRLI receive only and trigger cleanup. For this case, the cleanup requires individual cmd abort instead of using implicit logout as a broad stroke flush. Signed-off-by: Krishna Kant <krishna.kant@purestorage.com> Signed-off-by: Alexei Potashnik <alexei@purestorage.com> Signed-off-by: Quinn Tran <quinn.tran@cavium.com> Signed-off-by: Himanshu Madhani <himanshu.madhani@cavium.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Diffstat (limited to 'drivers/scsi/qla2xxx/qla_os.c')
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c41
1 files changed, 34 insertions, 7 deletions
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index aca7afc..38d6d12 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -4761,6 +4761,10 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
(struct qlt_plogi_ack_t *)e->u.new_sess.pla;
uint8_t free_fcport = 0;
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC enter\n",
+ __func__, __LINE__, e->u.new_sess.port_name);
+
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
fcport = qla2x00_find_fcport_by_wwpn(vha, e->u.new_sess.port_name, 1);
if (fcport) {
@@ -4822,7 +4826,31 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
if (fcport) {
+ if (N2N_TOPO(vha->hw))
+ fcport->flags &= ~FCF_FABRIC_DEVICE;
+
if (pla) {
+ if (pla->iocb.u.isp24.status_subcode == ELS_PRLI) {
+ u16 wd3_lo;
+
+ fcport->fw_login_state = DSC_LS_PRLI_PEND;
+ fcport->local = 0;
+ fcport->loop_id =
+ le16_to_cpu(
+ pla->iocb.u.isp24.nport_handle);
+ fcport->fw_login_state = DSC_LS_PRLI_PEND;
+ wd3_lo =
+ le16_to_cpu(
+ pla->iocb.u.isp24.u.prli.wd3_lo);
+
+ if (wd3_lo & BIT_7)
+ fcport->conf_compl_supported = 1;
+
+ if ((wd3_lo & BIT_4) == 0)
+ fcport->port_type = FCT_INITIATOR;
+ else
+ fcport->port_type = FCT_TARGET;
+ }
qlt_plogi_ack_unref(vha, pla);
} else {
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
@@ -4985,14 +5013,13 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
struct event_arg ea;
list_for_each_entry(fcport, &vha->vp_fcports, list) {
- /*
- * If the port is not ONLINE then try to login
- * to it if we haven't run out of retries.
- */
+ /*
+ * If the port is not ONLINE then try to login
+ * to it if we haven't run out of retries.
+ */
if (atomic_read(&fcport->state) != FCS_ONLINE &&
fcport->login_retry && !(fcport->flags & FCF_ASYNC_SENT)) {
-
- if (fcport->flags & FCF_FABRIC_DEVICE) {
+ if (vha->hw->current_topology != ISP_CFG_NL) {
ql_dbg(ql_dbg_disc, fcport->vha, 0x2108,
"%s %8phC DS %d LS %d\n", __func__,
fcport->port_name, fcport->disc_state,
@@ -5001,7 +5028,7 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
ea.event = FCME_RELOGIN;
ea.fcport = fcport;
qla2x00_fcport_event_handler(vha, &ea);
- } else {
+ } else if (vha->hw->current_topology == ISP_CFG_NL) {
fcport->login_retry--;
status = qla2x00_local_device_login(vha,
fcport);
OpenPOWER on IntegriCloud