diff options
author | James Smart <jsmart2021@gmail.com> | 2018-05-24 21:08:58 -0700 |
---|---|---|
committer | Martin K. Petersen <martin.petersen@oracle.com> | 2018-05-28 22:40:33 -0400 |
commit | 4d5e789a2eb111d7f9e032d0ebaecb465a2eca8f (patch) | |
tree | 2cd682f0d56a385908cca3285dd3021e1189344e /drivers/scsi/lpfc | |
parent | dc19e3b4a80e0bb1e5f080473fffa0ac8c0694a6 (diff) | |
download | op-kernel-dev-4d5e789a2eb111d7f9e032d0ebaecb465a2eca8f.zip op-kernel-dev-4d5e789a2eb111d7f9e032d0ebaecb465a2eca8f.tar.gz |
scsi: lpfc: correct oversubscription of nvme io requests for an adapter
Under large configurations, the driver would start to log message 6065 -
NVME out of buffers (exchanges).
The driver is using the ndlp cmd_qdepth value when determining the max
outstanding ios for an adapter. This value, by default, is set to 65536,
which exceeds the maximum exchange counts supported on an adapter. The ndlp
cmd_qdepth has no relevance and outstanding io count should be capped at
the max exchange count with IO requests beyond that level getting bounced
back with an EBUSY status so that they are retried by the block layer.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Diffstat (limited to 'drivers/scsi/lpfc')
-rw-r--r-- | drivers/scsi/lpfc/lpfc_attr.c | 7 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_nportdisc.c | 6 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_nvme.c | 23 |
3 files changed, 32 insertions, 4 deletions
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index a66c4fb..729d343 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -297,6 +297,13 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, len = snprintf(buf, PAGE_SIZE, "NVME Initiator Enabled\n"); spin_lock_irq(shost->host_lock); + len += snprintf(buf + len, PAGE_SIZE - len, + "XRI Dist lpfc%d Total %d NVME %d SCSI %d ELS %d\n", + phba->brd_no, + phba->sli4_hba.max_cfg_param.max_xri, + phba->sli4_hba.nvme_xri_max, + phba->sli4_hba.scsi_xri_max, + lpfc_sli4_get_els_iocb_cnt(phba)); /* Port state is only one of two values for now. */ if (localport->port_id) diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index e790c0b..1a80397 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -1982,6 +1982,12 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, if (bf_get_be32(prli_disc, nvpr)) ndlp->nlp_type |= NLP_NVME_DISCOVERY; + /* This node is an NVME target. Adjust the command + * queue depth on this node to not exceed the available + * xris. + */ + ndlp->cmd_qdepth = phba->sli4_hba.nvme_xri_max; + /* * If prli_fba is set, the Target supports FirstBurst. * If prli_fb_sz is 0, the FirstBurst size is unlimited, diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index f5f90d1..288dd3c 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c @@ -973,9 +973,22 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, /* Sanity check on return of outstanding command */ if (!lpfc_ncmd || !lpfc_ncmd->nvmeCmd || !lpfc_ncmd->nrport) { + if (!lpfc_ncmd) { + lpfc_printf_vlog(vport, KERN_ERR, + LOG_NODE | LOG_NVME_IOERR, + "6071 Null lpfc_ncmd pointer. No " + "release, skip completion\n"); + return; + } + lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR, - "6071 Completion pointers bad on wqe %p.\n", - wcqe); + "6066 Missing cmpl ptrs: lpfc_ncmd %p, " + "nvmeCmd %p nrport %p\n", + lpfc_ncmd, lpfc_ncmd->nvmeCmd, + lpfc_ncmd->nrport); + + /* Release the lpfc_ncmd regardless of the missing elements. */ + lpfc_release_nvme_buf(phba, lpfc_ncmd); return; } nCmd = lpfc_ncmd->nvmeCmd; @@ -1537,8 +1550,10 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, !expedite) { lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, "6174 Fail IO, ndlp qdepth exceeded: " - "idx %d DID %x\n", - lpfc_queue_info->index, ndlp->nlp_DID); + "idx %d DID %x pend %d qdepth %d\n", + lpfc_queue_info->index, ndlp->nlp_DID, + atomic_read(&ndlp->cmd_pending), + ndlp->cmd_qdepth); atomic_inc(&lport->xmt_fcp_qdepth); ret = -EBUSY; goto out_fail; |