summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c1
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c12
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c5
4 files changed, 18 insertions, 6 deletions
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 4ccce14..76f8bd5 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -884,7 +884,7 @@ sysfs_mbox_write(struct kobject *kobj, char *buf, loff_t off, size_t count)
phba->sysfs_mbox.mbox == NULL ) {
sysfs_mbox_idle(phba);
spin_unlock_irq(host->host_lock);
- return -EINVAL;
+ return -EAGAIN;
}
}
@@ -1008,7 +1008,7 @@ sysfs_mbox_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
if (rc != MBX_SUCCESS) {
sysfs_mbox_idle(phba);
spin_unlock_irq(host->host_lock);
- return -ENODEV;
+ return (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
}
phba->sysfs_mbox.state = SMBOX_READING;
}
@@ -1017,7 +1017,7 @@ sysfs_mbox_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
printk(KERN_WARNING "mbox_read: Bad State\n");
sysfs_mbox_idle(phba);
spin_unlock_irq(host->host_lock);
- return -EINVAL;
+ return -EAGAIN;
}
memcpy(buf, (uint8_t *) & phba->sysfs_mbox.mbox->mb + off, count);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index b65ee57..cab2d9d 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -131,6 +131,7 @@ lpfc_ct_unsol_event(struct lpfc_hba * phba,
}
ct_unsol_event_exit_piocbq:
+ list_del(&head);
if (pmbuf) {
list_for_each_entry_safe(matp, next_matp, &pmbuf->list, list) {
lpfc_mbuf_free(phba, matp->virt, matp->phys);
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index ef47b82..16dc8c8 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1616,7 +1616,11 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
goto out_free_iocbq;
}
- /* We can rely on a queue depth attribute only after SLI HBA setup */
+ /*
+ * Set initial can_queue value since 0 is no longer supported and
+ * scsi_add_host will fail. This will be adjusted later based on the
+ * max xri value determined in hba setup.
+ */
host->can_queue = phba->cfg_hba_queue_depth - 10;
/* Tell the midlayer we support 16 byte commands */
@@ -1656,6 +1660,12 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
goto out_free_irq;
}
+ /*
+ * hba setup may have changed the hba_queue_depth so we need to adjust
+ * the value of can_queue.
+ */
+ host->can_queue = phba->cfg_hba_queue_depth - 10;
+
lpfc_discovery_wait(phba);
if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index fbf108c..9802ee8 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -320,7 +320,8 @@ lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_iocbq * iocbq)
kfree(old_arr);
return iotag;
}
- }
+ } else
+ spin_unlock_irq(phba->host->host_lock);
lpfc_printf_log(phba, KERN_ERR,LOG_SLI,
"%d:0318 Failed to allocate IOTAG.last IOTAG is %d\n",
@@ -1399,11 +1400,11 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
next_iocb,
&saveq->list,
list) {
+ list_del(&rspiocbp->list);
lpfc_sli_release_iocbq(phba,
rspiocbp);
}
}
-
lpfc_sli_release_iocbq(phba, saveq);
}
}
OpenPOWER on IntegriCloud