summaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2011-04-26 10:22:15 +0200
committerJiri Kosina <jkosina@suse.cz>2011-04-26 10:22:59 +0200
commit07f9479a40cc778bc1462ada11f95b01360ae4ff (patch)
tree0676cf38df3844004bb3ebfd99dfa67a4a8998f5 /drivers/scsi/lpfc
parent9d5e6bdb3013acfb311ab407eeca0b6a6a3dedbf (diff)
parentcd2e49e90f1cae7726c9a2c54488d881d7f1cd1c (diff)
downloadop-kernel-dev-07f9479a40cc778bc1462ada11f95b01360ae4ff.zip
op-kernel-dev-07f9479a40cc778bc1462ada11f95b01360ae4ff.tar.gz
Merge branch 'master' into for-next
Fast-forwarded to current state of Linus' tree as there are patches to be applied for files that didn't exist on the old branch.
Diffstat (limited to 'drivers/scsi/lpfc')
-rw-r--r--drivers/scsi/lpfc/Makefile8
-rw-r--r--drivers/scsi/lpfc/lpfc.h27
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c16
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c38
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c7
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c16
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h15
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h113
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c61
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_nl.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c53
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c539
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
19 files changed, 623 insertions, 296 deletions
diff --git a/drivers/scsi/lpfc/Makefile b/drivers/scsi/lpfc/Makefile
index ad05d6e..88928f0 100644
--- a/drivers/scsi/lpfc/Makefile
+++ b/drivers/scsi/lpfc/Makefile
@@ -1,7 +1,7 @@
#/*******************************************************************
# * This file is part of the Emulex Linux Device Driver for *
# * Fibre Channel Host Bus Adapters. *
-# * Copyright (C) 2004-2006 Emulex. All rights reserved. *
+# * Copyright (C) 2004-2011 Emulex. All rights reserved. *
# * EMULEX and SLI are trademarks of Emulex. *
# * www.emulex.com *
# * *
@@ -19,10 +19,8 @@
# *******************************************************************/
######################################################################
-ifneq ($(GCOV),)
- EXTRA_CFLAGS += -fprofile-arcs -ftest-coverage
- EXTRA_CFLAGS += -O0
-endif
+ccflags-$(GCOV) := -fprofile-arcs -ftest-coverage
+ccflags-$(GCOV) += -O0
obj-$(CONFIG_SCSI_LPFC) := lpfc.o
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index b64c6da..60e98a62 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -539,6 +539,8 @@ struct lpfc_hba {
(struct lpfc_hba *, uint32_t);
int (*lpfc_hba_down_link)
(struct lpfc_hba *, uint32_t);
+ int (*lpfc_selective_reset)
+ (struct lpfc_hba *);
/* SLI4 specific HBA data structure */
struct lpfc_sli4_hba sli4_hba;
@@ -895,7 +897,18 @@ lpfc_worker_wake_up(struct lpfc_hba *phba)
return;
}
-static inline void
+static inline int
+lpfc_readl(void __iomem *addr, uint32_t *data)
+{
+ uint32_t temp;
+ temp = readl(addr);
+ if (temp == 0xffffffff)
+ return -EIO;
+ *data = temp;
+ return 0;
+}
+
+static inline int
lpfc_sli_read_hs(struct lpfc_hba *phba)
{
/*
@@ -904,15 +917,17 @@ lpfc_sli_read_hs(struct lpfc_hba *phba)
*/
phba->sli.slistat.err_attn_event++;
- /* Save status info */
- phba->work_hs = readl(phba->HSregaddr);
- phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
- phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
+ /* Save status info and check for unplug error */
+ if (lpfc_readl(phba->HSregaddr, &phba->work_hs) ||
+ lpfc_readl(phba->MBslimaddr + 0xa8, &phba->work_status[0]) ||
+ lpfc_readl(phba->MBslimaddr + 0xac, &phba->work_status[1])) {
+ return -EIO;
+ }
/* Clear chip Host Attention error bit */
writel(HA_ERATT, phba->HAregaddr);
readl(phba->HAregaddr); /* flush */
phba->pport->stopped = 1;
- return;
+ return 0;
}
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index e7c020d..17d7893 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -685,7 +685,7 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
* -EIO reset not configured or error posting the event
* zero for success
**/
-static int
+int
lpfc_selective_reset(struct lpfc_hba *phba)
{
struct completion online_compl;
@@ -746,7 +746,7 @@ lpfc_issue_reset(struct device *dev, struct device_attribute *attr,
int status = -EINVAL;
if (strncmp(buf, "selective", sizeof("selective") - 1) == 0)
- status = lpfc_selective_reset(phba);
+ status = phba->lpfc_selective_reset(phba);
if (status == 0)
return strlen(buf);
@@ -1224,7 +1224,10 @@ lpfc_poll_store(struct device *dev, struct device_attribute *attr,
if (val & ENABLE_FCP_RING_POLLING) {
if ((val & DISABLE_FCP_RING_INT) &&
!(old_val & DISABLE_FCP_RING_INT)) {
- creg_val = readl(phba->HCregaddr);
+ if (lpfc_readl(phba->HCregaddr, &creg_val)) {
+ spin_unlock_irq(&phba->hbalock);
+ return -EINVAL;
+ }
creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
writel(creg_val, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
@@ -1242,7 +1245,10 @@ lpfc_poll_store(struct device *dev, struct device_attribute *attr,
spin_unlock_irq(&phba->hbalock);
del_timer(&phba->fcp_poll_timer);
spin_lock_irq(&phba->hbalock);
- creg_val = readl(phba->HCregaddr);
+ if (lpfc_readl(phba->HCregaddr, &creg_val)) {
+ spin_unlock_irq(&phba->hbalock);
+ return -EINVAL;
+ }
creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
writel(creg_val, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
@@ -4509,7 +4515,7 @@ static FC_RPORT_ATTR(field, S_IRUGO, lpfc_show_rport_##field, NULL)
* Description:
* This function is called by the transport after the @fc_vport's symbolic name
* has been changed. This function re-registers the symbolic name with the
- * switch to propogate the change into the fabric if the vport is active.
+ * switch to propagate the change into the fabric if the vport is active.
**/
static void
lpfc_set_vport_symbolic_name(struct fc_vport *fc_vport)
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 04fef03..3811ea9 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2009-2010 Emulex. All rights reserved. *
+ * Copyright (C) 2009-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -348,7 +348,10 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
dd_data->context_un.iocb.bmp = bmp;
if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
- creg_val = readl(phba->HCregaddr);
+ if (lpfc_readl(phba->HCregaddr, &creg_val)) {
+ rc = -EIO ;
+ goto free_cmdiocbq;
+ }
creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
writel(creg_val, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
@@ -599,7 +602,10 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
dd_data->context_un.iocb.ndlp = ndlp;
if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
- creg_val = readl(phba->HCregaddr);
+ if (lpfc_readl(phba->HCregaddr, &creg_val)) {
+ rc = -EIO;
+ goto linkdown_err;
+ }
creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
writel(creg_val, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
@@ -613,6 +619,7 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
else
rc = -EIO;
+linkdown_err:
pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
job->request_payload.sg_cnt, DMA_TO_DEVICE);
pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
@@ -1357,7 +1364,10 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
dd_data->context_un.iocb.ndlp = ndlp;
if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
- creg_val = readl(phba->HCregaddr);
+ if (lpfc_readl(phba->HCregaddr, &creg_val)) {
+ rc = -IOCB_ERROR;
+ goto issue_ct_rsp_exit;
+ }
creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
writel(creg_val, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
@@ -1929,7 +1939,7 @@ out:
* @rxxri: Receive exchange id
* @len: Number of data bytes
*
- * This function allocates and posts a data buffer of sufficient size to recieve
+ * This function allocates and posts a data buffer of sufficient size to receive
* an unsolicted CT command.
**/
static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
@@ -2479,16 +2489,18 @@ lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
from = (uint8_t *)dd_data->context_un.mbox.mb;
job = dd_data->context_un.mbox.set_job;
- size = job->reply_payload.payload_len;
- job->reply->reply_payload_rcv_len =
- sg_copy_from_buffer(job->reply_payload.sg_list,
- job->reply_payload.sg_cnt,
- from, size);
- job->reply->result = 0;
+ if (job) {
+ size = job->reply_payload.payload_len;
+ job->reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt,
+ from, size);
+ job->reply->result = 0;
+ job->dd_data = NULL;
+ job->job_done(job);
+ }
dd_data->context_un.mbox.set_job = NULL;
- job->dd_data = NULL;
- job->job_done(job);
/* need to hold the lock until we call job done to hold off
* the timeout handler returning to the midlayer while
* we are stillprocessing the job
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 3d40023..f0b332f 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2010 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -254,8 +254,8 @@ uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *);
void lpfc_sli_cancel_iocbs(struct lpfc_hba *, struct list_head *, uint32_t,
uint32_t);
void lpfc_sli_wake_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *);
-
-void lpfc_reset_barrier(struct lpfc_hba * phba);
+int lpfc_selective_reset(struct lpfc_hba *);
+void lpfc_reset_barrier(struct lpfc_hba *);
int lpfc_sli_brdready(struct lpfc_hba *, uint32_t);
int lpfc_sli_brdkill(struct lpfc_hba *);
int lpfc_sli_brdreset(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index a753581..3d96774 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -908,7 +908,7 @@ lpfc_debugfs_dumpData_open(struct inode *inode, struct file *file)
if (!debug)
goto out;
- /* Round to page boundry */
+ /* Round to page boundary */
printk(KERN_ERR "9059 BLKGRD: %s: _dump_buf_data=0x%p\n",
__func__, _dump_buf_data);
debug->buffer = _dump_buf_data;
@@ -938,7 +938,7 @@ lpfc_debugfs_dumpDif_open(struct inode *inode, struct file *file)
if (!debug)
goto out;
- /* Round to page boundry */
+ /* Round to page boundary */
printk(KERN_ERR "9060 BLKGRD: %s: _dump_buf_dif=0x%p file=%s\n",
__func__, _dump_buf_dif, file->f_dentry->d_name.name);
debug->buffer = _dump_buf_dif;
@@ -2158,7 +2158,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
debugfs_create_dir(name, phba->hba_debugfs_root);
if (!vport->vport_debugfs_root) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0417 Cant create debugfs\n");
+ "0417 Can't create debugfs\n");
goto debug_failed;
}
atomic_inc(&phba->debugfs_vport_count);
@@ -2211,7 +2211,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
vport, &lpfc_debugfs_op_nodelist);
if (!vport->debug_nodelist) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0409 Cant create debugfs nodelist\n");
+ "0409 Can't create debugfs nodelist\n");
goto debug_failed;
}
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 8e28edf..d34b69f 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -89,7 +89,8 @@ lpfc_els_chk_latt(struct lpfc_vport *vport)
return 0;
/* Read the HBA Host Attention Register */
- ha_copy = readl(phba->HAregaddr);
+ if (lpfc_readl(phba->HAregaddr, &ha_copy))
+ return 1;
if (!(ha_copy & HA_LATT))
return 0;
@@ -101,7 +102,7 @@ lpfc_els_chk_latt(struct lpfc_vport *vport)
phba->pport->port_state);
/* CLEAR_LA should re-enable link attention events and
- * we should then imediately take a LATT event. The
+ * we should then immediately take a LATT event. The
* LATT processing should call lpfc_linkdown() which
* will cleanup any left over in-progress discovery
* events.
@@ -1598,7 +1599,7 @@ out:
* This routine is the completion callback function for issuing the Port
* Login (PLOGI) command. For PLOGI completion, there must be an active
* ndlp on the vport node list that matches the remote node ID from the
- * PLOGI reponse IOCB. If such ndlp does not exist, the PLOGI is simply
+ * PLOGI response IOCB. If such ndlp does not exist, the PLOGI is simply
* ignored and command IOCB released. The PLOGI response IOCB status is
* checked for error conditons. If there is error status reported, PLOGI
* retry shall be attempted by invoking the lpfc_els_retry() routine.
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 154c715..3014983 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -739,7 +739,7 @@ lpfc_do_work(void *p)
/*
* This is only called to handle FC worker events. Since this a rare
- * occurance, we allocate a struct lpfc_work_evt structure here instead of
+ * occurrence, we allocate a struct lpfc_work_evt structure here instead of
* embedding it in the IOCB.
*/
int
@@ -1348,7 +1348,7 @@ lpfc_register_fcf(struct lpfc_hba *phba)
int rc;
spin_lock_irq(&phba->hbalock);
- /* If the FCF is not availabe do nothing. */
+ /* If the FCF is not available do nothing. */
if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) {
phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
spin_unlock_irq(&phba->hbalock);
@@ -1538,7 +1538,7 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
/*
* If user did not specify any addressing mode, or if the
- * prefered addressing mode specified by user is not supported
+ * preferred addressing mode specified by user is not supported
* by FCF, allow fabric to pick the addressing mode.
*/
*addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
@@ -1553,7 +1553,7 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
FCFCNCT_AM_SPMA) ?
LPFC_FCF_SPMA : LPFC_FCF_FPMA;
/*
- * If the user specified a prefered address mode, use the
+ * If the user specified a preferred address mode, use the
* addr mode only if FCF support the addr_mode.
*/
else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
@@ -3117,7 +3117,7 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
* back at reg login state so this
* mbox needs to be ignored becase
* there is another reg login in
- * proccess.
+ * process.
*/
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
@@ -4477,7 +4477,7 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
if ((vport->fc_flag & FC_RSCN_MODE) &&
!(vport->fc_flag & FC_NDISC_ACTIVE)) {
if (lpfc_rscn_payload_check(vport, did)) {
- /* If we've already recieved a PLOGI from this NPort
+ /* If we've already received a PLOGI from this NPort
* we don't need to try to discover it again.
*/
if (ndlp->nlp_flag & NLP_RCV_PLOGI)
@@ -4493,7 +4493,7 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
} else
ndlp = NULL;
} else {
- /* If we've already recieved a PLOGI from this NPort,
+ /* If we've already received a PLOGI from this NPort,
* or we are already in the process of discovery on it,
* we don't need to try to discover it again.
*/
@@ -5756,7 +5756,7 @@ lpfc_read_fcoe_param(struct lpfc_hba *phba,
* @size: Size of the data buffer.
* @rec_type: Record type to be searched.
*
- * This function searches config region data to find the begining
+ * This function searches config region data to find the beginning
* of the record specified by record_type. If record found, this
* function return pointer to the record else return NULL.
*/
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 94ae37c..95f11ed 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1344,7 +1344,7 @@ typedef struct { /* FireFly BIU registers */
#define HS_FFER1 0x80000000 /* Bit 31 */
#define HS_CRIT_TEMP 0x00000100 /* Bit 8 */
#define HS_FFERM 0xFF000100 /* Mask for error bits 31:24 and 8 */
-
+#define UNPLUG_ERR 0x00000001 /* Indicate pci hot unplug */
/* Host Control Register */
#define HC_REG_OFFSET 12 /* Byte offset from register base address */
@@ -1713,6 +1713,17 @@ struct lpfc_pde6 {
#define pde6_apptagval_WORD word2
};
+struct lpfc_pde7 {
+ uint32_t word0;
+#define pde7_type_SHIFT 24
+#define pde7_type_MASK 0x000000ff
+#define pde7_type_WORD word0
+#define pde7_rsvd0_SHIFT 0
+#define pde7_rsvd0_MASK 0x00ffffff
+#define pde7_rsvd0_WORD word0
+ uint32_t addrHigh;
+ uint32_t addrLow;
+};
/* Structure for MB Command LOAD_SM and DOWN_LOAD */
@@ -3621,7 +3632,7 @@ typedef struct _IOCB { /* IOCB structure */
ASYNCSTAT_FIELDS asyncstat; /* async_status iocb */
QUE_XRI64_CX_FIELDS quexri64cx; /* que_xri64_cx fields */
struct rcv_seq64 rcvseq64; /* RCV_SEQ64 and RCV_CONT64 */
- struct sli4_bls_acc bls_acc; /* UNSOL ABTS BLS_ACC params */
+ struct sli4_bls_rsp bls_rsp; /* UNSOL ABTS BLS_RSP params */
uint32_t ulpWord[IOCB_WORD_SZ - 2]; /* generic 6 'words' */
} un;
union {
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index c7178d6..8433ac0 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -215,7 +215,7 @@ struct lpfc_sli4_flags {
#define lpfc_fip_flag_WORD word0
};
-struct sli4_bls_acc {
+struct sli4_bls_rsp {
uint32_t word0_rsvd; /* Word0 must be reserved */
uint32_t word1;
#define lpfc_abts_orig_SHIFT 0
@@ -231,6 +231,16 @@ struct sli4_bls_acc {
#define lpfc_abts_oxid_MASK 0x0000FFFF
#define lpfc_abts_oxid_WORD word2
uint32_t word3;
+#define lpfc_vndr_code_SHIFT 0
+#define lpfc_vndr_code_MASK 0x000000FF
+#define lpfc_vndr_code_WORD word3
+#define lpfc_rsn_expln_SHIFT 8
+#define lpfc_rsn_expln_MASK 0x000000FF
+#define lpfc_rsn_expln_WORD word3
+#define lpfc_rsn_code_SHIFT 16
+#define lpfc_rsn_code_MASK 0x000000FF
+#define lpfc_rsn_code_WORD word3
+
uint32_t word4;
uint32_t word5_rsvd; /* Word5 must be reserved */
};
@@ -711,21 +721,27 @@ struct lpfc_sli4_cfg_mhdr {
union lpfc_sli4_cfg_shdr {
struct {
uint32_t word6;
-#define lpfc_mbox_hdr_opcode_SHIFT 0
-#define lpfc_mbox_hdr_opcode_MASK 0x000000FF
-#define lpfc_mbox_hdr_opcode_WORD word6
-#define lpfc_mbox_hdr_subsystem_SHIFT 8
-#define lpfc_mbox_hdr_subsystem_MASK 0x000000FF
-#define lpfc_mbox_hdr_subsystem_WORD word6
-#define lpfc_mbox_hdr_port_number_SHIFT 16
-#define lpfc_mbox_hdr_port_number_MASK 0x000000FF
-#define lpfc_mbox_hdr_port_number_WORD word6
-#define lpfc_mbox_hdr_domain_SHIFT 24
-#define lpfc_mbox_hdr_domain_MASK 0x000000FF
-#define lpfc_mbox_hdr_domain_WORD word6
+#define lpfc_mbox_hdr_opcode_SHIFT 0
+#define lpfc_mbox_hdr_opcode_MASK 0x000000FF
+#define lpfc_mbox_hdr_opcode_WORD word6
+#define lpfc_mbox_hdr_subsystem_SHIFT 8
+#define lpfc_mbox_hdr_subsystem_MASK 0x000000FF
+#define lpfc_mbox_hdr_subsystem_WORD word6
+#define lpfc_mbox_hdr_port_number_SHIFT 16
+#define lpfc_mbox_hdr_port_number_MASK 0x000000FF
+#define lpfc_mbox_hdr_port_number_WORD word6
+#define lpfc_mbox_hdr_domain_SHIFT 24
+#define lpfc_mbox_hdr_domain_MASK 0x000000FF
+#define lpfc_mbox_hdr_domain_WORD word6
uint32_t timeout;
uint32_t request_length;
- uint32_t reserved9;
+ uint32_t word9;
+#define lpfc_mbox_hdr_version_SHIFT 0
+#define lpfc_mbox_hdr_version_MASK 0x000000FF
+#define lpfc_mbox_hdr_version_WORD word9
+#define LPFC_Q_CREATE_VERSION_2 2
+#define LPFC_Q_CREATE_VERSION_1 1
+#define LPFC_Q_CREATE_VERSION_0 0
} request;
struct {
uint32_t word6;
@@ -917,9 +933,12 @@ struct cq_context {
#define LPFC_CQ_CNT_512 0x1
#define LPFC_CQ_CNT_1024 0x2
uint32_t word1;
-#define lpfc_cq_eq_id_SHIFT 22
+#define lpfc_cq_eq_id_SHIFT 22 /* Version 0 Only */
#define lpfc_cq_eq_id_MASK 0x000000FF
#define lpfc_cq_eq_id_WORD word1
+#define lpfc_cq_eq_id_2_SHIFT 0 /* Version 2 Only */
+#define lpfc_cq_eq_id_2_MASK 0x0000FFFF
+#define lpfc_cq_eq_id_2_WORD word1
uint32_t reserved0;
uint32_t reserved1;
};
@@ -929,6 +948,9 @@ struct lpfc_mbx_cq_create {
union {
struct {
uint32_t word0;
+#define lpfc_mbx_cq_create_page_size_SHIFT 16 /* Version 2 Only */
+#define lpfc_mbx_cq_create_page_size_MASK 0x000000FF
+#define lpfc_mbx_cq_create_page_size_WORD word0
#define lpfc_mbx_cq_create_num_pages_SHIFT 0
#define lpfc_mbx_cq_create_num_pages_MASK 0x0000FFFF
#define lpfc_mbx_cq_create_num_pages_WORD word0
@@ -969,7 +991,7 @@ struct wq_context {
struct lpfc_mbx_wq_create {
struct mbox_header header;
union {
- struct {
+ struct { /* Version 0 Request */
uint32_t word0;
#define lpfc_mbx_wq_create_num_pages_SHIFT 0
#define lpfc_mbx_wq_create_num_pages_MASK 0x0000FFFF
@@ -979,6 +1001,23 @@ struct lpfc_mbx_wq_create {
#define lpfc_mbx_wq_create_cq_id_WORD word0
struct dma_address page[LPFC_MAX_WQ_PAGE];
} request;
+ struct { /* Version 1 Request */
+ uint32_t word0; /* Word 0 is the same as in v0 */
+ uint32_t word1;
+#define lpfc_mbx_wq_create_page_size_SHIFT 0
+#define lpfc_mbx_wq_create_page_size_MASK 0x000000FF
+#define lpfc_mbx_wq_create_page_size_WORD word1
+#define lpfc_mbx_wq_create_wqe_size_SHIFT 8
+#define lpfc_mbx_wq_create_wqe_size_MASK 0x0000000F
+#define lpfc_mbx_wq_create_wqe_size_WORD word1
+#define LPFC_WQ_WQE_SIZE_64 0x5
+#define LPFC_WQ_WQE_SIZE_128 0x6
+#define lpfc_mbx_wq_create_wqe_count_SHIFT 16
+#define lpfc_mbx_wq_create_wqe_count_MASK 0x0000FFFF
+#define lpfc_mbx_wq_create_wqe_count_WORD word1
+ uint32_t word2;
+ struct dma_address page[LPFC_MAX_WQ_PAGE-1];
+ } request_1;
struct {
uint32_t word0;
#define lpfc_mbx_wq_create_q_id_SHIFT 0
@@ -1007,13 +1046,22 @@ struct lpfc_mbx_wq_destroy {
#define LPFC_DATA_BUF_SIZE 2048
struct rq_context {
uint32_t word0;
-#define lpfc_rq_context_rq_size_SHIFT 16
-#define lpfc_rq_context_rq_size_MASK 0x0000000F
-#define lpfc_rq_context_rq_size_WORD word0
+#define lpfc_rq_context_rqe_count_SHIFT 16 /* Version 0 Only */
+#define lpfc_rq_context_rqe_count_MASK 0x0000000F
+#define lpfc_rq_context_rqe_count_WORD word0
#define LPFC_RQ_RING_SIZE_512 9 /* 512 entries */
#define LPFC_RQ_RING_SIZE_1024 10 /* 1024 entries */
#define LPFC_RQ_RING_SIZE_2048 11 /* 2048 entries */
#define LPFC_RQ_RING_SIZE_4096 12 /* 4096 entries */
+#define lpfc_rq_context_rqe_count_1_SHIFT 16 /* Version 1 Only */
+#define lpfc_rq_context_rqe_count_1_MASK 0x0000FFFF
+#define lpfc_rq_context_rqe_count_1_WORD word0
+#define lpfc_rq_context_rqe_size_SHIFT 8 /* Version 1 Only */
+#define lpfc_rq_context_rqe_size_MASK 0x0000000F
+#define lpfc_rq_context_rqe_size_WORD word0
+#define lpfc_rq_context_page_size_SHIFT 0 /* Version 1 Only */
+#define lpfc_rq_context_page_size_MASK 0x000000FF
+#define lpfc_rq_context_page_size_WORD word0
uint32_t reserved1;
uint32_t word2;
#define lpfc_rq_context_cq_id_SHIFT 16
@@ -1022,7 +1070,7 @@ struct rq_context {
#define lpfc_rq_context_buf_size_SHIFT 0
#define lpfc_rq_context_buf_size_MASK 0x0000FFFF
#define lpfc_rq_context_buf_size_WORD word2
- uint32_t reserved3;
+ uint32_t buffer_size; /* Version 1 Only */
};
struct lpfc_mbx_rq_create {
@@ -1062,16 +1110,16 @@ struct lpfc_mbx_rq_destroy {
struct mq_context {
uint32_t word0;
-#define lpfc_mq_context_cq_id_SHIFT 22
+#define lpfc_mq_context_cq_id_SHIFT 22 /* Version 0 Only */
#define lpfc_mq_context_cq_id_MASK 0x000003FF
#define lpfc_mq_context_cq_id_WORD word0
-#define lpfc_mq_context_count_SHIFT 16
-#define lpfc_mq_context_count_MASK 0x0000000F
-#define lpfc_mq_context_count_WORD word0
-#define LPFC_MQ_CNT_16 0x5
-#define LPFC_MQ_CNT_32 0x6
-#define LPFC_MQ_CNT_64 0x7
-#define LPFC_MQ_CNT_128 0x8
+#define lpfc_mq_context_ring_size_SHIFT 16
+#define lpfc_mq_context_ring_size_MASK 0x0000000F
+#define lpfc_mq_context_ring_size_WORD word0
+#define LPFC_MQ_RING_SIZE_16 0x5
+#define LPFC_MQ_RING_SIZE_32 0x6
+#define LPFC_MQ_RING_SIZE_64 0x7
+#define LPFC_MQ_RING_SIZE_128 0x8
uint32_t word1;
#define lpfc_mq_context_valid_SHIFT 31
#define lpfc_mq_context_valid_MASK 0x00000001
@@ -1105,9 +1153,12 @@ struct lpfc_mbx_mq_create_ext {
union {
struct {
uint32_t word0;
-#define lpfc_mbx_mq_create_ext_num_pages_SHIFT 0
-#define lpfc_mbx_mq_create_ext_num_pages_MASK 0x0000FFFF
-#define lpfc_mbx_mq_create_ext_num_pages_WORD word0
+#define lpfc_mbx_mq_create_ext_num_pages_SHIFT 0
+#define lpfc_mbx_mq_create_ext_num_pages_MASK 0x0000FFFF
+#define lpfc_mbx_mq_create_ext_num_pages_WORD word0
+#define lpfc_mbx_mq_create_ext_cq_id_SHIFT 16 /* Version 1 Only */
+#define lpfc_mbx_mq_create_ext_cq_id_MASK 0x0000FFFF
+#define lpfc_mbx_mq_create_ext_cq_id_WORD word0
uint32_t async_evt_bmap;
#define lpfc_mbx_mq_create_ext_async_evt_link_SHIFT LPFC_TRAILER_CODE_LINK
#define lpfc_mbx_mq_create_ext_async_evt_link_MASK 0x00000001
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 35665cfb..505f884 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2010 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -507,7 +507,10 @@ lpfc_config_port_post(struct lpfc_hba *phba)
phba->hba_flag &= ~HBA_ERATT_HANDLED;
/* Enable appropriate host interrupts */
- status = readl(phba->HCregaddr);
+ if (lpfc_readl(phba->HCregaddr, &status)) {
+ spin_unlock_irq(&phba->hbalock);
+ return -EIO;
+ }
status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
if (psli->num_rings > 0)
status |= HC_R0INT_ENA;
@@ -1222,7 +1225,10 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
/* Wait for the ER1 bit to clear.*/
while (phba->work_hs & HS_FFER1) {
msleep(100);
- phba->work_hs = readl(phba->HSregaddr);
+ if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
+ phba->work_hs = UNPLUG_ERR ;
+ break;
+ }
/* If driver is unloading let the worker thread continue */
if (phba->pport->load_flag & FC_UNLOADING) {
phba->work_hs = 0;
@@ -4460,7 +4466,7 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
}
/**
- * lpfc_init_api_table_setup - Set up init api fucntion jump table
+ * lpfc_init_api_table_setup - Set up init api function jump table
* @phba: The hba struct for which this call is being executed.
* @dev_grp: The HBA PCI-Device group number.
*
@@ -4474,6 +4480,7 @@ lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
{
phba->lpfc_hba_init_link = lpfc_hba_init_link;
phba->lpfc_hba_down_link = lpfc_hba_down_link;
+ phba->lpfc_selective_reset = lpfc_selective_reset;
switch (dev_grp) {
case LPFC_PCI_DEV_LP:
phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
@@ -4843,7 +4850,7 @@ out_free_mem:
*
* Return codes
* 0 - successful
- * -ENOMEM - No availble memory
+ * -ENOMEM - No available memory
* -EIO - The mailbox failed to complete successfully.
**/
int
@@ -5385,13 +5392,16 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba)
int i, port_error = 0;
uint32_t if_type;
+ memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
+ memset(&reg_data, 0, sizeof(reg_data));
if (!phba->sli4_hba.PSMPHRregaddr)
return -ENODEV;
/* Wait up to 30 seconds for the SLI Port POST done and ready */
for (i = 0; i < 3000; i++) {
- portsmphr_reg.word0 = readl(phba->sli4_hba.PSMPHRregaddr);
- if (bf_get(lpfc_port_smphr_perr, &portsmphr_reg)) {
+ if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
+ &portsmphr_reg.word0) ||
+ (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
/* Port has a fatal POST error, break out */
port_error = -ENODEV;
break;
@@ -5472,9 +5482,9 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba)
break;
case LPFC_SLI_INTF_IF_TYPE_2:
/* Final checks. The port status should be clean. */
- reg_data.word0 =
- readl(phba->sli4_hba.u.if_type2.STATUSregaddr);
- if (bf_get(lpfc_sliport_status_err, &reg_data)) {
+ if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
+ &reg_data.word0) ||
+ bf_get(lpfc_sliport_status_err, &reg_data)) {
phba->work_status[0] =
readl(phba->sli4_hba.u.if_type2.
ERR1regaddr);
@@ -5720,7 +5730,7 @@ lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
*
* Return codes
* 0 - successful
- * -ENOMEM - No availble memory
+ * -ENOMEM - No available memory
* -EIO - The mailbox failed to complete successfully.
**/
static int
@@ -5825,7 +5835,7 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
*
* Return codes
* 0 - successful
- * -ENOMEM - No availble memory
+ * -ENOMEM - No available memory
* -EIO - The mailbox failed to complete successfully.
**/
static int
@@ -5884,7 +5894,7 @@ lpfc_setup_endian_order(struct lpfc_hba *phba)
*
* Return codes
* 0 - successful
- * -ENOMEM - No availble memory
+ * -ENOMEM - No available memory
* -EIO - The mailbox failed to complete successfully.
**/
static int
@@ -6179,7 +6189,7 @@ out_error:
*
* Return codes
* 0 - successful
- * -ENOMEM - No availble memory
+ * -ENOMEM - No available memory
* -EIO - The mailbox failed to complete successfully.
**/
static void
@@ -6243,7 +6253,7 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
*
* Return codes
* 0 - successful
- * -ENOMEM - No availble memory
+ * -ENOMEM - No available memory
* -EIO - The mailbox failed to complete successfully.
**/
int
@@ -6488,7 +6498,7 @@ out_error:
*
* Return codes
* 0 - successful
- * -ENOMEM - No availble memory
+ * -ENOMEM - No available memory
* -EIO - The mailbox failed to complete successfully.
**/
void
@@ -6533,7 +6543,7 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
*
* Return codes
* 0 - successful
- * -ENOMEM - No availble memory
+ * -ENOMEM - No available memory
**/
static int
lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
@@ -6694,7 +6704,7 @@ lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
*
* Return codes
* 0 - successful
- * -ENOMEM - No availble memory
+ * -ENOMEM - No available memory
* -EIO - The mailbox failed to complete successfully.
**/
int
@@ -6760,9 +6770,11 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
* the loop again.
*/
for (rdy_chk = 0; rdy_chk < 1000; rdy_chk++) {
- reg_data.word0 =
- readl(phba->sli4_hba.u.if_type2.
- STATUSregaddr);
+ if (lpfc_readl(phba->sli4_hba.u.if_type2.
+ STATUSregaddr, &reg_data.word0)) {
+ rc = -ENODEV;
+ break;
+ }
if (bf_get(lpfc_sliport_status_rdy, &reg_data))
break;
if (bf_get(lpfc_sliport_status_rn, &reg_data)) {
@@ -6783,8 +6795,11 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
}
/* Detect any port errors. */
- reg_data.word0 = readl(phba->sli4_hba.u.if_type2.
- STATUSregaddr);
+ if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
+ &reg_data.word0)) {
+ rc = -ENODEV;
+ break;
+ }
if ((bf_get(lpfc_sliport_status_err, &reg_data)) ||
(rdy_chk >= 1000)) {
phba->work_status[0] = readl(
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index dba32df..fbab973 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1834,7 +1834,7 @@ lpfc_sli4_mbox_opcode_get(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
* @fcf_index: index to fcf table.
*
* This routine routine allocates and constructs non-embedded mailbox command
- * for reading a FCF table entry refered by @fcf_index.
+ * for reading a FCF table entry referred by @fcf_index.
*
* Return: pointer to the mailbox command constructed if successful, otherwise
* NULL.
diff --git a/drivers/scsi/lpfc/lpfc_nl.h b/drivers/scsi/lpfc/lpfc_nl.h
index f3cfbe2..f2b1bbc 100644
--- a/drivers/scsi/lpfc/lpfc_nl.h
+++ b/drivers/scsi/lpfc/lpfc_nl.h
@@ -50,7 +50,7 @@
* and subcategory. The event type must come first.
* The subcategory further defines the data that follows in the rest
* of the payload. Each category will have its own unique header plus
- * any addtional data unique to the subcategory.
+ * any additional data unique to the subcategory.
* The payload sent via the fc transport is one-way driver->application.
*/
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 52b3515..0d92d42 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -658,7 +658,7 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
return 0;
}
/**
- * lpfc_release_rpi - Release a RPI by issueing unreg_login mailbox cmd.
+ * lpfc_release_rpi - Release a RPI by issuing unreg_login mailbox cmd.
* @phba : Pointer to lpfc_hba structure.
* @vport: Pointer to lpfc_vport structure.
* @rpi : rpi to be release.
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index bf34178..fe7cc84 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2009 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -577,7 +577,7 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
iocb->un.fcpi64.bdl.addrHigh = 0;
iocb->ulpBdeCount = 0;
iocb->ulpLe = 0;
- /* fill in responce BDE */
+ /* fill in response BDE */
iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags =
BUFF_TYPE_BDE_64;
iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
@@ -1217,10 +1217,10 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
(2 * sizeof(struct ulp_bde64)));
data_bde->addrHigh = putPaddrHigh(physaddr);
data_bde->addrLow = putPaddrLow(physaddr);
- /* ebde count includes the responce bde and data bpl */
+ /* ebde count includes the response bde and data bpl */
iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
} else {
- /* ebde count includes the responce bde and data bdes */
+ /* ebde count includes the response bde and data bdes */
iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
}
} else {
@@ -1514,10 +1514,11 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
struct scatterlist *sgpe = NULL; /* s/g prot entry */
struct lpfc_pde5 *pde5 = NULL;
struct lpfc_pde6 *pde6 = NULL;
- struct ulp_bde64 *prot_bde = NULL;
+ struct lpfc_pde7 *pde7 = NULL;
dma_addr_t dataphysaddr, protphysaddr;
unsigned short curr_data = 0, curr_prot = 0;
- unsigned int split_offset, protgroup_len;
+ unsigned int split_offset;
+ unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
unsigned int protgrp_blks, protgrp_bytes;
unsigned int remainder, subtotal;
int status;
@@ -1585,23 +1586,33 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
bpl++;
/* setup the first BDE that points to protection buffer */
- prot_bde = (struct ulp_bde64 *) bpl;
- protphysaddr = sg_dma_address(sgpe);
- prot_bde->addrHigh = le32_to_cpu(putPaddrLow(protphysaddr));
- prot_bde->addrLow = le32_to_cpu(putPaddrHigh(protphysaddr));
- protgroup_len = sg_dma_len(sgpe);
+ protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
+ protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
/* must be integer multiple of the DIF block length */
BUG_ON(protgroup_len % 8);
+ pde7 = (struct lpfc_pde7 *) bpl;
+ memset(pde7, 0, sizeof(struct lpfc_pde7));
+ bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR);
+
+ pde7->addrHigh = le32_to_cpu(putPaddrLow(protphysaddr));
+ pde7->addrLow = le32_to_cpu(putPaddrHigh(protphysaddr));
+
protgrp_blks = protgroup_len / 8;
protgrp_bytes = protgrp_blks * blksize;
- prot_bde->tus.f.bdeSize = protgroup_len;
- prot_bde->tus.f.bdeFlags = LPFC_PDE7_DESCRIPTOR;
- prot_bde->tus.w = le32_to_cpu(bpl->tus.w);
+ /* check if this pde is crossing the 4K boundary; if so split */
+ if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) {
+ protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff);
+ protgroup_offset += protgroup_remainder;
+ protgrp_blks = protgroup_remainder / 8;
+ protgrp_bytes = protgroup_remainder * blksize;
+ } else {
+ protgroup_offset = 0;
+ curr_prot++;
+ }
- curr_prot++;
num_bde++;
/* setup BDE's for data blocks associated with DIF data */
@@ -1653,6 +1664,13 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
}
+ if (protgroup_offset) {
+ /* update the reference tag */
+ reftag += protgrp_blks;
+ bpl++;
+ continue;
+ }
+
/* are we done ? */
if (curr_prot == protcnt) {
alldone = 1;
@@ -1675,6 +1693,7 @@ out:
return num_bde;
}
+
/*
* Given a SCSI command that supports DIF, determine composition of protection
* groups involved in setting up buffer lists
@@ -2361,7 +2380,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
}
/*
* The cmnd->underflow is the minimum number of bytes that must
- * be transfered for this command. Provided a sense condition
+ * be transferred for this command. Provided a sense condition
* is not present, make sure the actual amount transferred is at
* least the underflow value or fail.
*/
@@ -2854,7 +2873,7 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
}
/**
- * lpfc_scsi_api_table_setup - Set up scsi api fucntion jump table
+ * lpfc_scsi_api_table_setup - Set up scsi api function jump table
* @phba: The hba struct for which this call is being executed.
* @dev_grp: The HBA PCI-Device group number.
*
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index 5932273..ce645b2 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -130,7 +130,7 @@ struct lpfc_scsi_buf {
dma_addr_t nonsg_phys; /* Non scatter-gather physical address. */
/*
- * data and dma_handle are the kernel virutal and bus address of the
+ * data and dma_handle are the kernel virtual and bus address of the
* dma-able buffer containing the fcp_cmd, fcp_rsp and a scatter
* gather bde list that supports the sg_tablesize value.
*/
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 2ee0374..dacabbe 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2009 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -2817,7 +2817,7 @@ void lpfc_poll_eratt(unsigned long ptr)
* This function is called from the interrupt context when there is a ring
* event for the fcp ring. The caller does not hold any lock.
* The function processes each response iocb in the response ring until it
- * finds an iocb with LE bit set and chains all the iocbs upto the iocb with
+ * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
* LE bit set. The function will call the completion handler of the command iocb
* if the response iocb indicates a completion for a command iocb or it is
* an abort completion. The function will call lpfc_sli_process_unsol_iocb
@@ -3477,7 +3477,8 @@ lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
int retval = 0;
/* Read the HBA Host Status Register */
- status = readl(phba->HSregaddr);
+ if (lpfc_readl(phba->HSregaddr, &status))
+ return 1;
/*
* Check status register every 100ms for 5 retries, then every
@@ -3502,7 +3503,10 @@ lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
lpfc_sli_brdrestart(phba);
}
/* Read the HBA Host Status Register */
- status = readl(phba->HSregaddr);
+ if (lpfc_readl(phba->HSregaddr, &status)) {
+ retval = 1;
+ break;
+ }
}
/* Check to see if any errors occurred during init */
@@ -3584,7 +3588,7 @@ void lpfc_reset_barrier(struct lpfc_hba *phba)
uint32_t __iomem *resp_buf;
uint32_t __iomem *mbox_buf;
volatile uint32_t mbox;
- uint32_t hc_copy;
+ uint32_t hc_copy, ha_copy, resp_data;
int i;
uint8_t hdrtype;
@@ -3601,12 +3605,15 @@ void lpfc_reset_barrier(struct lpfc_hba *phba)
resp_buf = phba->MBslimaddr;
/* Disable the error attention */
- hc_copy = readl(phba->HCregaddr);
+ if (lpfc_readl(phba->HCregaddr, &hc_copy))
+ return;
writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
phba->link_flag |= LS_IGNORE_ERATT;
- if (readl(phba->HAregaddr) & HA_ERATT) {
+ if (lpfc_readl(phba->HAregaddr, &ha_copy))
+ return;
+ if (ha_copy & HA_ERATT) {
/* Clear Chip error bit */
writel(HA_ERATT, phba->HAregaddr);
phba->pport->stopped = 1;
@@ -3620,11 +3627,18 @@ void lpfc_reset_barrier(struct lpfc_hba *phba)
mbox_buf = phba->MBslimaddr;
writel(mbox, mbox_buf);
- for (i = 0;
- readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN) && i < 50; i++)
- mdelay(1);
-
- if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) {
+ for (i = 0; i < 50; i++) {
+ if (lpfc_readl((resp_buf + 1), &resp_data))
+ return;
+ if (resp_data != ~(BARRIER_TEST_PATTERN))
+ mdelay(1);
+ else
+ break;
+ }
+ resp_data = 0;
+ if (lpfc_readl((resp_buf + 1), &resp_data))
+ return;
+ if (resp_data != ~(BARRIER_TEST_PATTERN)) {
if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
phba->pport->stopped)
goto restore_hc;
@@ -3633,13 +3647,26 @@ void lpfc_reset_barrier(struct lpfc_hba *phba)
}
((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
- for (i = 0; readl(resp_buf) != mbox && i < 500; i++)
- mdelay(1);
+ resp_data = 0;
+ for (i = 0; i < 500; i++) {
+ if (lpfc_readl(resp_buf, &resp_data))
+ return;
+ if (resp_data != mbox)
+ mdelay(1);
+ else
+ break;
+ }
clear_errat:
- while (!(readl(phba->HAregaddr) & HA_ERATT) && ++i < 500)
- mdelay(1);
+ while (++i < 500) {
+ if (lpfc_readl(phba->HAregaddr, &ha_copy))
+ return;
+ if (!(ha_copy & HA_ERATT))
+ mdelay(1);
+ else
+ break;
+ }
if (readl(phba->HAregaddr) & HA_ERATT) {
writel(HA_ERATT, phba->HAregaddr);
@@ -3686,7 +3713,11 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
/* Disable the error attention */
spin_lock_irq(&phba->hbalock);
- status = readl(phba->HCregaddr);
+ if (lpfc_readl(phba->HCregaddr, &status)) {
+ spin_unlock_irq(&phba->hbalock);
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return 1;
+ }
status &= ~HC_ERINT_ENA;
writel(status, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
@@ -3720,11 +3751,12 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
* 3 seconds we still set HBA_ERROR state because the status of the
* board is now undefined.
*/
- ha_copy = readl(phba->HAregaddr);
-
+ if (lpfc_readl(phba->HAregaddr, &ha_copy))
+ return 1;
while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
mdelay(100);
- ha_copy = readl(phba->HAregaddr);
+ if (lpfc_readl(phba->HAregaddr, &ha_copy))
+ return 1;
}
del_timer_sync(&psli->mbox_tmo);
@@ -4018,7 +4050,8 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba)
uint32_t status, i = 0;
/* Read the HBA Host Status Register */
- status = readl(phba->HSregaddr);
+ if (lpfc_readl(phba->HSregaddr, &status))
+ return -EIO;
/* Check status register to see what current state is */
i = 0;
@@ -4073,7 +4106,8 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba)
lpfc_sli_brdrestart(phba);
}
/* Read the HBA Host Status Register */
- status = readl(phba->HSregaddr);
+ if (lpfc_readl(phba->HSregaddr, &status))
+ return -EIO;
}
/* Check to see if any errors occurred during init */
@@ -5083,7 +5117,7 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
/* Setting state unknown so lpfc_sli_abort_iocb_ring
* would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
- * it to fail all oustanding SCSI IO.
+ * it to fail all outstanding SCSI IO.
*/
spin_lock_irq(&phba->pport->work_port_lock);
phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
@@ -5136,7 +5170,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
MAILBOX_t *mb;
struct lpfc_sli *psli = &phba->sli;
uint32_t status, evtctr;
- uint32_t ha_copy;
+ uint32_t ha_copy, hc_copy;
int i;
unsigned long timeout;
unsigned long drvr_flag = 0;
@@ -5202,15 +5236,17 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
goto out_not_finished;
}
- if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT &&
- !(readl(phba->HCregaddr) & HC_MBINT_ENA)) {
- spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
+ if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
+ !(hc_copy & HC_MBINT_ENA)) {
+ spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"(%d):2528 Mailbox command x%x cannot "
"issue Data: x%x x%x\n",
pmbox->vport ? pmbox->vport->vpi : 0,
pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
- goto out_not_finished;
+ goto out_not_finished;
+ }
}
if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
@@ -5408,11 +5444,19 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
word0 = le32_to_cpu(word0);
} else {
/* First read mbox status word */
- word0 = readl(phba->MBslimaddr);
+ if (lpfc_readl(phba->MBslimaddr, &word0)) {
+ spin_unlock_irqrestore(&phba->hbalock,
+ drvr_flag);
+ goto out_not_finished;
+ }
}
/* Read the HBA Host Attention Register */
- ha_copy = readl(phba->HAregaddr);
+ if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
+ spin_unlock_irqrestore(&phba->hbalock,
+ drvr_flag);
+ goto out_not_finished;
+ }
timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
mb->mbxCommand) *
1000) + jiffies;
@@ -5463,7 +5507,11 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
word0 = readl(phba->MBslimaddr);
}
/* Read the HBA Host Attention Register */
- ha_copy = readl(phba->HAregaddr);
+ if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
+ spin_unlock_irqrestore(&phba->hbalock,
+ drvr_flag);
+ goto out_not_finished;
+ }
}
if (psli->sli_flag & LPFC_SLI_ACTIVE) {
@@ -5983,7 +6031,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
}
/**
- * lpfc_mbox_api_table_setup - Set up mbox api fucntion jump table
+ * lpfc_mbox_api_table_setup - Set up mbox api function jump table
* @phba: The hba struct for which this call is being executed.
* @dev_grp: The HBA PCI-Device group number.
*
@@ -6263,7 +6311,6 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
bf_set(lpfc_sli4_sge_last, sgl, 1);
else
bf_set(lpfc_sli4_sge_last, sgl, 0);
- sgl->word2 = cpu_to_le32(sgl->word2);
/* swap the size field back to the cpu so we
* can assign it to the sgl.
*/
@@ -6283,6 +6330,7 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
bf_set(lpfc_sli4_sge_offset, sgl, offset);
offset += bde.tus.f.bdeSize;
}
+ sgl->word2 = cpu_to_le32(sgl->word2);
bpl++;
sgl++;
}
@@ -6528,9 +6576,9 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
sizeof(struct ulp_bde64);
for (i = 0; i < numBdes; i++) {
- if (bpl[i].tus.f.bdeFlags != BUFF_TYPE_BDE_64)
- break;
bde.tus.w = le32_to_cpu(bpl[i].tus.w);
+ if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
+ break;
xmit_len += bde.tus.f.bdeSize;
}
/* word3 iocb=IO_TAG wqe=request_payload_len */
@@ -6620,15 +6668,15 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
xritag = 0;
break;
case CMD_XMIT_BLS_RSP64_CX:
- /* As BLS ABTS-ACC WQE is very different from other WQEs,
+ /* As BLS ABTS RSP WQE is very different from other WQEs,
* we re-construct this WQE here based on information in
* iocbq from scratch.
*/
memset(wqe, 0, sizeof(union lpfc_wqe));
/* OX_ID is invariable to who sent ABTS to CT exchange */
bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
- bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_acc));
- if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_acc) ==
+ bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
+ if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
LPFC_ABTS_UNSOL_INT) {
/* ABTS sent by initiator to CT exchange, the
* RX_ID field will be filled with the newly
@@ -6642,7 +6690,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
* RX_ID from ABTS.
*/
bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
- bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_acc));
+ bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
}
bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
@@ -6653,6 +6701,15 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
LPFC_WQE_LENLOC_NONE);
/* Overwrite the pre-set comnd type with OTHER_COMMAND */
command_type = OTHER_COMMAND;
+ if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
+ bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
+ bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
+ bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
+ bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
+ bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
+ bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
+ }
+
break;
case CMD_XRI_ABORTED_CX:
case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
@@ -6701,7 +6758,8 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
if (piocb->sli4_xritag == NO_XRI) {
if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
- piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
+ piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN ||
+ piocb->iocb.ulpCommand == CMD_XMIT_BLS_RSP64_CX)
sglq = NULL;
else {
if (pring->txq_cnt) {
@@ -6789,7 +6847,7 @@ __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
}
/**
- * lpfc_sli_api_table_setup - Set up sli api fucntion jump table
+ * lpfc_sli_api_table_setup - Set up sli api function jump table
* @phba: The hba struct for which this call is being executed.
* @dev_grp: The HBA PCI-Device group number.
*
@@ -7463,7 +7521,7 @@ lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_dmabuf *mp, *next_mp;
struct list_head *slp = &pring->postbufq;
- /* Search postbufq, from the begining, looking for a match on tag */
+ /* Search postbufq, from the beginning, looking for a match on tag */
spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
if (mp->buffer_tag == tag) {
@@ -7507,7 +7565,7 @@ lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_dmabuf *mp, *next_mp;
struct list_head *slp = &pring->postbufq;
- /* Search postbufq, from the begining, looking for a match on phys */
+ /* Search postbufq, from the beginning, looking for a match on phys */
spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
if (mp->phys == phys) {
@@ -8194,7 +8252,8 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
piocb->iocb_flag &= ~LPFC_IO_WAKE;
if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
- creg_val = readl(phba->HCregaddr);
+ if (lpfc_readl(phba->HCregaddr, &creg_val))
+ return IOCB_ERROR;
creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
writel(creg_val, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
@@ -8236,7 +8295,8 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
}
if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
- creg_val = readl(phba->HCregaddr);
+ if (lpfc_readl(phba->HCregaddr, &creg_val))
+ return IOCB_ERROR;
creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
writel(creg_val, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
@@ -8378,7 +8438,7 @@ lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba)
* for possible error attention events. The caller must hold the hostlock
* with spin_lock_irq().
*
- * This fucntion returns 1 when there is Error Attention in the Host Attention
+ * This function returns 1 when there is Error Attention in the Host Attention
* Register and returns 0 otherwise.
**/
static int
@@ -8387,10 +8447,13 @@ lpfc_sli_eratt_read(struct lpfc_hba *phba)
uint32_t ha_copy;
/* Read chip Host Attention (HA) register */
- ha_copy = readl(phba->HAregaddr);
+ if (lpfc_readl(phba->HAregaddr, &ha_copy))
+ goto unplug_err;
+
if (ha_copy & HA_ERATT) {
/* Read host status register to retrieve error event */
- lpfc_sli_read_hs(phba);
+ if (lpfc_sli_read_hs(phba))
+ goto unplug_err;
/* Check if there is a deferred error condition is active */
if ((HS_FFER1 & phba->work_hs) &&
@@ -8409,6 +8472,15 @@ lpfc_sli_eratt_read(struct lpfc_hba *phba)
return 1;
}
return 0;
+
+unplug_err:
+ /* Set the driver HS work bitmap */
+ phba->work_hs |= UNPLUG_ERR;
+ /* Set the driver HA work bitmap */
+ phba->work_ha |= HA_ERATT;
+ /* Indicate polling handles this ERATT */
+ phba->hba_flag |= HBA_ERATT_HANDLED;
+ return 1;
}
/**
@@ -8419,7 +8491,7 @@ lpfc_sli_eratt_read(struct lpfc_hba *phba)
* for possible error attention events. The caller must hold the hostlock
* with spin_lock_irq().
*
- * This fucntion returns 1 when there is Error Attention in the Host Attention
+ * This function returns 1 when there is Error Attention in the Host Attention
* Register and returns 0 otherwise.
**/
static int
@@ -8436,8 +8508,15 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba)
if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
switch (if_type) {
case LPFC_SLI_INTF_IF_TYPE_0:
- uerr_sta_lo = readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
- uerr_sta_hi = readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
+ if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
+ &uerr_sta_lo) ||
+ lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
+ &uerr_sta_hi)) {
+ phba->work_hs |= UNPLUG_ERR;
+ phba->work_ha |= HA_ERATT;
+ phba->hba_flag |= HBA_ERATT_HANDLED;
+ return 1;
+ }
if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
(~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8456,9 +8535,15 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba)
}
break;
case LPFC_SLI_INTF_IF_TYPE_2:
- portstat_reg.word0 =
- readl(phba->sli4_hba.u.if_type2.STATUSregaddr);
- portsmphr = readl(phba->sli4_hba.PSMPHRregaddr);
+ if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
+ &portstat_reg.word0) ||
+ lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
+ &portsmphr)){
+ phba->work_hs |= UNPLUG_ERR;
+ phba->work_ha |= HA_ERATT;
+ phba->hba_flag |= HBA_ERATT_HANDLED;
+ return 1;
+ }
if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
phba->work_status[0] =
readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
@@ -8496,7 +8581,7 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba)
* This function is called from timer soft interrupt context to check HBA's
* error attention register bit for error attention events.
*
- * This fucntion returns 1 when there is Error Attention in the Host Attention
+ * This function returns 1 when there is Error Attention in the Host Attention
* Register and returns 0 otherwise.
**/
int
@@ -8639,7 +8724,8 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id)
return IRQ_NONE;
/* Need to read HA REG for slow-path events */
spin_lock_irqsave(&phba->hbalock, iflag);
- ha_copy = readl(phba->HAregaddr);
+ if (lpfc_readl(phba->HAregaddr, &ha_copy))
+ goto unplug_error;
/* If somebody is waiting to handle an eratt don't process it
* here. The brdkill function will do this.
*/
@@ -8665,7 +8751,9 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id)
}
/* Clear up only attention source related to slow-path */
- hc_copy = readl(phba->HCregaddr);
+ if (lpfc_readl(phba->HCregaddr, &hc_copy))
+ goto unplug_error;
+
writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
HC_LAINT_ENA | HC_ERINT_ENA),
phba->HCregaddr);
@@ -8688,7 +8776,8 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id)
*/
spin_lock_irqsave(&phba->hbalock, iflag);
phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
- control = readl(phba->HCregaddr);
+ if (lpfc_readl(phba->HCregaddr, &control))
+ goto unplug_error;
control &= ~HC_LAINT_ENA;
writel(control, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
@@ -8708,7 +8797,8 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id)
status >>= (4*LPFC_ELS_RING);
if (status & HA_RXMASK) {
spin_lock_irqsave(&phba->hbalock, iflag);
- control = readl(phba->HCregaddr);
+ if (lpfc_readl(phba->HCregaddr, &control))
+ goto unplug_error;
lpfc_debugfs_slow_ring_trc(phba,
"ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
@@ -8741,7 +8831,8 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id)
}
spin_lock_irqsave(&phba->hbalock, iflag);
if (work_ha_copy & HA_ERATT) {
- lpfc_sli_read_hs(phba);
+ if (lpfc_sli_read_hs(phba))
+ goto unplug_error;
/*
* Check if there is a deferred error condition
* is active
@@ -8872,6 +8963,9 @@ send_current_mbox:
lpfc_worker_wake_up(phba);
}
return IRQ_HANDLED;
+unplug_error:
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ return IRQ_HANDLED;
} /* lpfc_sli_sp_intr_handler */
@@ -8919,7 +9013,8 @@ lpfc_sli_fp_intr_handler(int irq, void *dev_id)
if (lpfc_intr_state_check(phba))
return IRQ_NONE;
/* Need to read HA REG for FCP ring and other ring events */
- ha_copy = readl(phba->HAregaddr);
+ if (lpfc_readl(phba->HAregaddr, &ha_copy))
+ return IRQ_HANDLED;
/* Clear up only attention source related to fast-path */
spin_lock_irqsave(&phba->hbalock, iflag);
/*
@@ -9004,7 +9099,11 @@ lpfc_sli_intr_handler(int irq, void *dev_id)
return IRQ_NONE;
spin_lock(&phba->hbalock);
- phba->ha_copy = readl(phba->HAregaddr);
+ if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
+ spin_unlock(&phba->hbalock);
+ return IRQ_HANDLED;
+ }
+
if (unlikely(!phba->ha_copy)) {
spin_unlock(&phba->hbalock);
return IRQ_NONE;
@@ -9026,7 +9125,10 @@ lpfc_sli_intr_handler(int irq, void *dev_id)
}
/* Clear attention sources except link and error attentions */
- hc_copy = readl(phba->HCregaddr);
+ if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
+ spin_unlock(&phba->hbalock);
+ return IRQ_HANDLED;
+ }
writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
| HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
phba->HCregaddr);
@@ -9582,7 +9684,7 @@ out:
* @cq: Pointer to the completion queue.
* @wcqe: Pointer to a completion queue entry.
*
- * This routine process a slow-path work-queue or recieve queue completion queue
+ * This routine process a slow-path work-queue or receive queue completion queue
* entry.
*
* Return: true if work posted to worker thread, otherwise false.
@@ -10403,7 +10505,6 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
if (!phba->sli4_hba.pc_sli4_params.supported)
hw_page_size = SLI4_PAGE_SIZE;
-
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
return -ENOMEM;
@@ -10413,11 +10514,22 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
LPFC_MBOX_OPCODE_CQ_CREATE,
length, LPFC_SLI4_MBX_EMBED);
cq_create = &mbox->u.mqe.un.cq_create;
+ shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
cq->page_count);
bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
- bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, eq->queue_id);
+ bf_set(lpfc_mbox_hdr_version, &shdr->request,
+ phba->sli4_hba.pc_sli4_params.cqv);
+ if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
+ bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request,
+ (PAGE_SIZE/SLI4_PAGE_SIZE));
+ bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
+ eq->queue_id);
+ } else {
+ bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
+ eq->queue_id);
+ }
switch (cq->entry_count) {
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -10449,7 +10561,6 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
/* The IOCTL status is embedded in the mailbox subheader. */
- shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
if (shdr_status || shdr_add_status || rc) {
@@ -10515,20 +10626,20 @@ lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
switch (mq->entry_count) {
case 16:
- bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
- LPFC_MQ_CNT_16);
+ bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
+ LPFC_MQ_RING_SIZE_16);
break;
case 32:
- bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
- LPFC_MQ_CNT_32);
+ bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
+ LPFC_MQ_RING_SIZE_32);
break;
case 64:
- bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
- LPFC_MQ_CNT_64);
+ bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
+ LPFC_MQ_RING_SIZE_64);
break;
case 128:
- bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
- LPFC_MQ_CNT_128);
+ bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
+ LPFC_MQ_RING_SIZE_128);
break;
}
list_for_each_entry(dmabuf, &mq->page_list, list) {
@@ -10586,6 +10697,7 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
length, LPFC_SLI4_MBX_EMBED);
mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
+ shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
bf_set(lpfc_mbx_mq_create_ext_num_pages,
&mq_create_ext->u.request, mq->page_count);
bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
@@ -10598,9 +10710,15 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
&mq_create_ext->u.request, 1);
bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
&mq_create_ext->u.request, 1);
- bf_set(lpfc_mq_context_cq_id,
- &mq_create_ext->u.request.context, cq->queue_id);
bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
+ bf_set(lpfc_mbox_hdr_version, &shdr->request,
+ phba->sli4_hba.pc_sli4_params.mqv);
+ if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
+ bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
+ cq->queue_id);
+ else
+ bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
+ cq->queue_id);
switch (mq->entry_count) {
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -10610,20 +10728,24 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
return -EINVAL;
/* otherwise default to smallest count (drop through) */
case 16:
- bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context,
- LPFC_MQ_CNT_16);
+ bf_set(lpfc_mq_context_ring_size,
+ &mq_create_ext->u.request.context,
+ LPFC_MQ_RING_SIZE_16);
break;
case 32:
- bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context,
- LPFC_MQ_CNT_32);
+ bf_set(lpfc_mq_context_ring_size,
+ &mq_create_ext->u.request.context,
+ LPFC_MQ_RING_SIZE_32);
break;
case 64:
- bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context,
- LPFC_MQ_CNT_64);
+ bf_set(lpfc_mq_context_ring_size,
+ &mq_create_ext->u.request.context,
+ LPFC_MQ_RING_SIZE_64);
break;
case 128:
- bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context,
- LPFC_MQ_CNT_128);
+ bf_set(lpfc_mq_context_ring_size,
+ &mq_create_ext->u.request.context,
+ LPFC_MQ_RING_SIZE_128);
break;
}
list_for_each_entry(dmabuf, &mq->page_list, list) {
@@ -10634,7 +10756,6 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
putPaddrHigh(dmabuf->phys);
}
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
- shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
&mq_create_ext->u.response);
if (rc != MBX_SUCCESS) {
@@ -10711,6 +10832,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+ struct dma_address *page;
if (!phba->sli4_hba.pc_sli4_params.supported)
hw_page_size = SLI4_PAGE_SIZE;
@@ -10724,20 +10846,42 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
length, LPFC_SLI4_MBX_EMBED);
wq_create = &mbox->u.mqe.un.wq_create;
+ shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
wq->page_count);
bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
cq->queue_id);
+ bf_set(lpfc_mbox_hdr_version, &shdr->request,
+ phba->sli4_hba.pc_sli4_params.wqv);
+ if (phba->sli4_hba.pc_sli4_params.wqv == LPFC_Q_CREATE_VERSION_1) {
+ bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
+ wq->entry_count);
+ switch (wq->entry_size) {
+ default:
+ case 64:
+ bf_set(lpfc_mbx_wq_create_wqe_size,
+ &wq_create->u.request_1,
+ LPFC_WQ_WQE_SIZE_64);
+ break;
+ case 128:
+ bf_set(lpfc_mbx_wq_create_wqe_size,
+ &wq_create->u.request_1,
+ LPFC_WQ_WQE_SIZE_128);
+ break;
+ }
+ bf_set(lpfc_mbx_wq_create_page_size, &wq_create->u.request_1,
+ (PAGE_SIZE/SLI4_PAGE_SIZE));
+ page = wq_create->u.request_1.page;
+ } else {
+ page = wq_create->u.request.page;
+ }
list_for_each_entry(dmabuf, &wq->page_list, list) {
memset(dmabuf->virt, 0, hw_page_size);
- wq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
- putPaddrLow(dmabuf->phys);
- wq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
- putPaddrHigh(dmabuf->phys);
+ page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
+ page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
}
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
/* The IOCTL status is embedded in the mailbox subheader. */
- shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
if (shdr_status || shdr_add_status || rc) {
@@ -10815,37 +10959,51 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
length, LPFC_SLI4_MBX_EMBED);
rq_create = &mbox->u.mqe.un.rq_create;
- switch (hrq->entry_count) {
- default:
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "2535 Unsupported RQ count. (%d)\n",
- hrq->entry_count);
- if (hrq->entry_count < 512)
- return -EINVAL;
- /* otherwise default to smallest count (drop through) */
- case 512:
- bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
- LPFC_RQ_RING_SIZE_512);
- break;
- case 1024:
- bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
- LPFC_RQ_RING_SIZE_1024);
- break;
- case 2048:
- bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
- LPFC_RQ_RING_SIZE_2048);
- break;
- case 4096:
- bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
- LPFC_RQ_RING_SIZE_4096);
- break;
+ shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
+ bf_set(lpfc_mbox_hdr_version, &shdr->request,
+ phba->sli4_hba.pc_sli4_params.rqv);
+ if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
+ bf_set(lpfc_rq_context_rqe_count_1,
+ &rq_create->u.request.context,
+ hrq->entry_count);
+ rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
+ } else {
+ switch (hrq->entry_count) {
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2535 Unsupported RQ count. (%d)\n",
+ hrq->entry_count);
+ if (hrq->entry_count < 512)
+ return -EINVAL;
+ /* otherwise default to smallest count (drop through) */
+ case 512:
+ bf_set(lpfc_rq_context_rqe_count,
+ &rq_create->u.request.context,
+ LPFC_RQ_RING_SIZE_512);
+ break;
+ case 1024:
+ bf_set(lpfc_rq_context_rqe_count,
+ &rq_create->u.request.context,
+ LPFC_RQ_RING_SIZE_1024);
+ break;
+ case 2048:
+ bf_set(lpfc_rq_context_rqe_count,
+ &rq_create->u.request.context,
+ LPFC_RQ_RING_SIZE_2048);
+ break;
+ case 4096:
+ bf_set(lpfc_rq_context_rqe_count,
+ &rq_create->u.request.context,
+ LPFC_RQ_RING_SIZE_4096);
+ break;
+ }
+ bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
+ LPFC_HDR_BUF_SIZE);
}
bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
cq->queue_id);
bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
hrq->page_count);
- bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
- LPFC_HDR_BUF_SIZE);
list_for_each_entry(dmabuf, &hrq->page_list, list) {
memset(dmabuf->virt, 0, hw_page_size);
rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
@@ -10855,7 +11013,6 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
}
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
/* The IOCTL status is embedded in the mailbox subheader. */
- shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
if (shdr_status || shdr_add_status || rc) {
@@ -10881,37 +11038,50 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
length, LPFC_SLI4_MBX_EMBED);
- switch (drq->entry_count) {
- default:
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "2536 Unsupported RQ count. (%d)\n",
- drq->entry_count);
- if (drq->entry_count < 512)
- return -EINVAL;
- /* otherwise default to smallest count (drop through) */
- case 512:
- bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
- LPFC_RQ_RING_SIZE_512);
- break;
- case 1024:
- bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
- LPFC_RQ_RING_SIZE_1024);
- break;
- case 2048:
- bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
- LPFC_RQ_RING_SIZE_2048);
- break;
- case 4096:
- bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
- LPFC_RQ_RING_SIZE_4096);
- break;
+ bf_set(lpfc_mbox_hdr_version, &shdr->request,
+ phba->sli4_hba.pc_sli4_params.rqv);
+ if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
+ bf_set(lpfc_rq_context_rqe_count_1,
+ &rq_create->u.request.context,
+ hrq->entry_count);
+ rq_create->u.request.context.buffer_size = LPFC_DATA_BUF_SIZE;
+ } else {
+ switch (drq->entry_count) {
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2536 Unsupported RQ count. (%d)\n",
+ drq->entry_count);
+ if (drq->entry_count < 512)
+ return -EINVAL;
+ /* otherwise default to smallest count (drop through) */
+ case 512:
+ bf_set(lpfc_rq_context_rqe_count,
+ &rq_create->u.request.context,
+ LPFC_RQ_RING_SIZE_512);
+ break;
+ case 1024:
+ bf_set(lpfc_rq_context_rqe_count,
+ &rq_create->u.request.context,
+ LPFC_RQ_RING_SIZE_1024);
+ break;
+ case 2048:
+ bf_set(lpfc_rq_context_rqe_count,
+ &rq_create->u.request.context,
+ LPFC_RQ_RING_SIZE_2048);
+ break;
+ case 4096:
+ bf_set(lpfc_rq_context_rqe_count,
+ &rq_create->u.request.context,
+ LPFC_RQ_RING_SIZE_4096);
+ break;
+ }
+ bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
+ LPFC_DATA_BUF_SIZE);
}
bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
cq->queue_id);
bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
drq->page_count);
- bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
- LPFC_DATA_BUF_SIZE);
list_for_each_entry(dmabuf, &drq->page_list, list) {
rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
putPaddrLow(dmabuf->phys);
@@ -11580,6 +11750,7 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
static char *rctl_names[] = FC_RCTL_NAMES_INIT;
char *type_names[] = FC_TYPE_NAMES_INIT;
struct fc_vft_header *fc_vft_hdr;
+ uint32_t *header = (uint32_t *) fc_hdr;
switch (fc_hdr->fh_r_ctl) {
case FC_RCTL_DD_UNCAT: /* uncategorized information */
@@ -11628,10 +11799,15 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
default:
goto drop;
}
+
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "2538 Received frame rctl:%s type:%s\n",
+ "2538 Received frame rctl:%s type:%s "
+ "Frame Data:%08x %08x %08x %08x %08x %08x\n",
rctl_names[fc_hdr->fh_r_ctl],
- type_names[fc_hdr->fh_type]);
+ type_names[fc_hdr->fh_type],
+ be32_to_cpu(header[0]), be32_to_cpu(header[1]),
+ be32_to_cpu(header[2]), be32_to_cpu(header[3]),
+ be32_to_cpu(header[4]), be32_to_cpu(header[5]));
return 0;
drop:
lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
@@ -11928,17 +12104,17 @@ lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
}
/**
- * lpfc_sli4_seq_abort_acc_cmpl - Accept seq abort iocb complete handler
+ * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
* @phba: Pointer to HBA context object.
* @cmd_iocbq: pointer to the command iocbq structure.
* @rsp_iocbq: pointer to the response iocbq structure.
*
- * This function handles the sequence abort accept iocb command complete
+ * This function handles the sequence abort response iocb command complete
* event. It properly releases the memory allocated to the sequence abort
* accept iocb.
**/
static void
-lpfc_sli4_seq_abort_acc_cmpl(struct lpfc_hba *phba,
+lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
struct lpfc_iocbq *cmd_iocbq,
struct lpfc_iocbq *rsp_iocbq)
{
@@ -11947,15 +12123,15 @@ lpfc_sli4_seq_abort_acc_cmpl(struct lpfc_hba *phba,
}
/**
- * lpfc_sli4_seq_abort_acc - Accept sequence abort
+ * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
* @phba: Pointer to HBA context object.
* @fc_hdr: pointer to a FC frame header.
*
- * This function sends a basic accept to a previous unsol sequence abort
+ * This function sends a basic response to a previous unsol sequence abort
* event after aborting the sequence handling.
**/
static void
-lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba,
+lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
struct fc_frame_header *fc_hdr)
{
struct lpfc_iocbq *ctiocb = NULL;
@@ -11963,6 +12139,7 @@ lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba,
uint16_t oxid, rxid;
uint32_t sid, fctl;
IOCB_t *icmd;
+ int rc;
if (!lpfc_is_link_up(phba))
return;
@@ -11983,7 +12160,7 @@ lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba,
+ phba->sli4_hba.max_cfg_param.xri_base))
lpfc_set_rrq_active(phba, ndlp, rxid, oxid, 0);
- /* Allocate buffer for acc iocb */
+ /* Allocate buffer for rsp iocb */
ctiocb = lpfc_sli_get_iocbq(phba);
if (!ctiocb)
return;
@@ -12008,32 +12185,54 @@ lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba,
ctiocb->iocb_cmpl = NULL;
ctiocb->vport = phba->pport;
- ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_acc_cmpl;
+ ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
+ ctiocb->sli4_xritag = NO_XRI;
+
+ /* If the oxid maps to the FCP XRI range or if it is out of range,
+ * send a BLS_RJT. The driver no longer has that exchange.
+ * Override the IOCB for a BA_RJT.
+ */
+ if (oxid > (phba->sli4_hba.max_cfg_param.max_xri +
+ phba->sli4_hba.max_cfg_param.xri_base) ||
+ oxid > (lpfc_sli4_get_els_iocb_cnt(phba) +
+ phba->sli4_hba.max_cfg_param.xri_base)) {
+ icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
+ bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
+ bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
+ bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
+ }
if (fctl & FC_FC_EX_CTX) {
/* ABTS sent by responder to CT exchange, construction
* of BA_ACC will use OX_ID from ABTS for the XRI_TAG
* field and RX_ID from ABTS for RX_ID field.
*/
- bf_set(lpfc_abts_orig, &icmd->un.bls_acc, LPFC_ABTS_UNSOL_RSP);
- bf_set(lpfc_abts_rxid, &icmd->un.bls_acc, rxid);
- ctiocb->sli4_xritag = oxid;
+ bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
+ bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
} else {
/* ABTS sent by initiator to CT exchange, construction
* of BA_ACC will need to allocate a new XRI as for the
* XRI_TAG and RX_ID fields.
*/
- bf_set(lpfc_abts_orig, &icmd->un.bls_acc, LPFC_ABTS_UNSOL_INT);
- bf_set(lpfc_abts_rxid, &icmd->un.bls_acc, NO_XRI);
- ctiocb->sli4_xritag = NO_XRI;
+ bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
+ bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, NO_XRI);
}
- bf_set(lpfc_abts_oxid, &icmd->un.bls_acc, oxid);
+ bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
- /* Xmit CT abts accept on exchange <xid> */
+ /* Xmit CT abts response on exchange <xid> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "1200 Xmit CT ABTS ACC on exchange x%x Data: x%x\n",
- CMD_XMIT_BLS_RSP64_CX, phba->link_state);
- lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
+ "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
+ icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
+
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
+ if (rc == IOCB_ERROR) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "2925 Failed to issue CT ABTS RSP x%x on "
+ "xri x%x, Data x%x\n",
+ icmd->un.xseq64.w5.hcsw.Rctl, oxid,
+ phba->link_state);
+ lpfc_sli_release_iocbq(phba, ctiocb);
+ }
}
/**
@@ -12081,7 +12280,7 @@ lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
lpfc_in_buf_free(phba, &dmabuf->dbuf);
}
/* Send basic accept (BA_ACC) to the abort requester */
- lpfc_sli4_seq_abort_acc(phba, &fc_hdr);
+ lpfc_sli4_seq_abort_rsp(phba, &fc_hdr);
}
/**
@@ -12772,7 +12971,7 @@ lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
* record and processing it one at a time starting from the @fcf_index
* for initial FCF discovery or fast FCF failover rediscovery.
*
- * Return 0 if the mailbox command is submitted sucessfully, none 0
+ * Return 0 if the mailbox command is submitted successfully, none 0
* otherwise.
**/
int
@@ -12833,7 +13032,7 @@ fail_fcf_scan:
* This routine is invoked to read an FCF record indicated by @fcf_index
* and to use it for FLOGI roundrobin FCF failover.
*
- * Return 0 if the mailbox command is submitted sucessfully, none 0
+ * Return 0 if the mailbox command is submitted successfully, none 0
* otherwise.
**/
int
@@ -12879,7 +13078,7 @@ fail_fcf_read:
* This routine is invoked to read an FCF record indicated by @fcf_index to
* determine whether it's eligible for FLOGI roundrobin failover list.
*
- * Return 0 if the mailbox command is submitted sucessfully, none 0
+ * Return 0 if the mailbox command is submitted successfully, none 0
* otherwise.
**/
int
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 595056b..1a3cbf8 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2009 Emulex. All rights reserved. *
+ * Copyright (C) 2009-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 0a4d376..2404d1d 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "8.3.21"
+#define LPFC_DRIVER_VERSION "8.3.22"
#define LPFC_DRIVER_NAME "lpfc"
#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
OpenPOWER on IntegriCloud