summaryrefslogtreecommitdiffstats
path: root/drivers/scsi/qla2xxx/qla_iocb.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/qla2xxx/qla_iocb.c')
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c358
1 files changed, 214 insertions, 144 deletions
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 85bc0a4..5bedc9d 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -11,8 +11,9 @@
#include <scsi/scsi_tcq.h>
-static request_t *qla2x00_req_pkt(scsi_qla_host_t *ha);
-static void qla2x00_isp_cmd(scsi_qla_host_t *ha);
+static request_t *qla2x00_req_pkt(struct scsi_qla_host *, struct req_que *,
+ struct rsp_que *rsp);
+static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *);
/**
* qla2x00_get_cmd_direction() - Determine control_flag data direction.
@@ -30,11 +31,11 @@ qla2x00_get_cmd_direction(srb_t *sp)
/* Set transfer direction */
if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) {
cflags = CF_WRITE;
- sp->fcport->ha->qla_stats.output_bytes +=
+ sp->fcport->vha->hw->qla_stats.output_bytes +=
scsi_bufflen(sp->cmd);
} else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
cflags = CF_READ;
- sp->fcport->ha->qla_stats.input_bytes +=
+ sp->fcport->vha->hw->qla_stats.input_bytes +=
scsi_bufflen(sp->cmd);
}
return (cflags);
@@ -91,20 +92,19 @@ qla2x00_calc_iocbs_64(uint16_t dsds)
* Returns a pointer to the Continuation Type 0 IOCB packet.
*/
static inline cont_entry_t *
-qla2x00_prep_cont_type0_iocb(scsi_qla_host_t *ha)
+qla2x00_prep_cont_type0_iocb(struct req_que *req, struct scsi_qla_host *vha)
{
cont_entry_t *cont_pkt;
-
/* Adjust ring index. */
- ha->req_ring_index++;
- if (ha->req_ring_index == ha->request_q_length) {
- ha->req_ring_index = 0;
- ha->request_ring_ptr = ha->request_ring;
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
} else {
- ha->request_ring_ptr++;
+ req->ring_ptr++;
}
- cont_pkt = (cont_entry_t *)ha->request_ring_ptr;
+ cont_pkt = (cont_entry_t *)req->ring_ptr;
/* Load packet defaults. */
*((uint32_t *)(&cont_pkt->entry_type)) =
@@ -120,20 +120,20 @@ qla2x00_prep_cont_type0_iocb(scsi_qla_host_t *ha)
* Returns a pointer to the continuation type 1 IOCB packet.
*/
static inline cont_a64_entry_t *
-qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *ha)
+qla2x00_prep_cont_type1_iocb(struct req_que *req, scsi_qla_host_t *vha)
{
cont_a64_entry_t *cont_pkt;
/* Adjust ring index. */
- ha->req_ring_index++;
- if (ha->req_ring_index == ha->request_q_length) {
- ha->req_ring_index = 0;
- ha->request_ring_ptr = ha->request_ring;
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
} else {
- ha->request_ring_ptr++;
+ req->ring_ptr++;
}
- cont_pkt = (cont_a64_entry_t *)ha->request_ring_ptr;
+ cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
/* Load packet defaults. */
*((uint32_t *)(&cont_pkt->entry_type)) =
@@ -155,10 +155,11 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
{
uint16_t avail_dsds;
uint32_t *cur_dsd;
- scsi_qla_host_t *ha;
+ scsi_qla_host_t *vha;
struct scsi_cmnd *cmd;
struct scatterlist *sg;
int i;
+ struct req_que *req;
cmd = sp->cmd;
@@ -172,7 +173,8 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
return;
}
- ha = sp->ha;
+ vha = sp->vha;
+ req = sp->que;
cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
@@ -190,7 +192,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
* Seven DSDs are available in the Continuation
* Type 0 IOCB.
*/
- cont_pkt = qla2x00_prep_cont_type0_iocb(ha);
+ cont_pkt = qla2x00_prep_cont_type0_iocb(req, vha);
cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
avail_dsds = 7;
}
@@ -214,10 +216,11 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
{
uint16_t avail_dsds;
uint32_t *cur_dsd;
- scsi_qla_host_t *ha;
+ scsi_qla_host_t *vha;
struct scsi_cmnd *cmd;
struct scatterlist *sg;
int i;
+ struct req_que *req;
cmd = sp->cmd;
@@ -231,7 +234,8 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
return;
}
- ha = sp->ha;
+ vha = sp->vha;
+ req = sp->que;
cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
@@ -250,7 +254,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
* Five DSDs are available in the Continuation
* Type 1 IOCB.
*/
- cont_pkt = qla2x00_prep_cont_type1_iocb(ha);
+ cont_pkt = qla2x00_prep_cont_type1_iocb(req, vha);
cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
avail_dsds = 5;
}
@@ -274,7 +278,7 @@ qla2x00_start_scsi(srb_t *sp)
{
int ret, nseg;
unsigned long flags;
- scsi_qla_host_t *ha;
+ scsi_qla_host_t *vha;
struct scsi_cmnd *cmd;
uint32_t *clr_ptr;
uint32_t index;
@@ -284,33 +288,39 @@ qla2x00_start_scsi(srb_t *sp)
uint16_t req_cnt;
uint16_t tot_dsds;
struct device_reg_2xxx __iomem *reg;
+ struct qla_hw_data *ha;
+ struct req_que *req;
+ struct rsp_que *rsp;
/* Setup device pointers. */
ret = 0;
- ha = sp->ha;
+ vha = sp->vha;
+ ha = vha->hw;
reg = &ha->iobase->isp;
cmd = sp->cmd;
+ req = ha->req_q_map[0];
+ rsp = ha->rsp_q_map[0];
/* So we know we haven't pci_map'ed anything yet */
tot_dsds = 0;
/* Send marker if required */
- if (ha->marker_needed != 0) {
- if (qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
+ if (vha->marker_needed != 0) {
+ if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
+ != QLA_SUCCESS)
return (QLA_FUNCTION_FAILED);
- }
- ha->marker_needed = 0;
+ vha->marker_needed = 0;
}
/* Acquire ring specific lock */
spin_lock_irqsave(&ha->hardware_lock, flags);
/* Check for room in outstanding command list. */
- handle = ha->current_outstanding_cmd;
+ handle = req->current_outstanding_cmd;
for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
handle++;
if (handle == MAX_OUTSTANDING_COMMANDS)
handle = 1;
- if (!ha->outstanding_cmds[handle])
+ if (!req->outstanding_cmds[handle])
break;
}
if (index == MAX_OUTSTANDING_COMMANDS)
@@ -329,25 +339,26 @@ qla2x00_start_scsi(srb_t *sp)
/* Calculate the number of request entries needed. */
req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
- if (ha->req_q_cnt < (req_cnt + 2)) {
+ if (req->cnt < (req_cnt + 2)) {
cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
- if (ha->req_ring_index < cnt)
- ha->req_q_cnt = cnt - ha->req_ring_index;
+ if (req->ring_index < cnt)
+ req->cnt = cnt - req->ring_index;
else
- ha->req_q_cnt = ha->request_q_length -
- (ha->req_ring_index - cnt);
+ req->cnt = req->length -
+ (req->ring_index - cnt);
}
- if (ha->req_q_cnt < (req_cnt + 2))
+ if (req->cnt < (req_cnt + 2))
goto queuing_error;
/* Build command packet */
- ha->current_outstanding_cmd = handle;
- ha->outstanding_cmds[handle] = sp;
- sp->ha = ha;
+ req->current_outstanding_cmd = handle;
+ req->outstanding_cmds[handle] = sp;
+ sp->vha = vha;
+ sp->que = req;
sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
- ha->req_q_cnt -= req_cnt;
+ req->cnt -= req_cnt;
- cmd_pkt = (cmd_entry_t *)ha->request_ring_ptr;
+ cmd_pkt = (cmd_entry_t *)req->ring_ptr;
cmd_pkt->handle = handle;
/* Zero out remaining portion of packet. */
clr_ptr = (uint32_t *)cmd_pkt + 2;
@@ -373,23 +384,23 @@ qla2x00_start_scsi(srb_t *sp)
wmb();
/* Adjust ring index. */
- ha->req_ring_index++;
- if (ha->req_ring_index == ha->request_q_length) {
- ha->req_ring_index = 0;
- ha->request_ring_ptr = ha->request_ring;
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
} else
- ha->request_ring_ptr++;
+ req->ring_ptr++;
sp->flags |= SRB_DMA_VALID;
/* Set chip new ring index. */
- WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), ha->req_ring_index);
+ WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
/* Manage unprocessed RIO/ZIO commands in response queue. */
- if (ha->flags.process_response_queue &&
- ha->response_ring_ptr->signature != RESPONSE_PROCESSED)
- qla2x00_process_response_queue(ha);
+ if (vha->flags.process_response_queue &&
+ rsp->ring_ptr->signature != RESPONSE_PROCESSED)
+ qla2x00_process_response_queue(rsp);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return (QLA_SUCCESS);
@@ -415,18 +426,20 @@ queuing_error:
* Returns non-zero if a failure occurred, else zero.
*/
int
-__qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
- uint8_t type)
+__qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
+ struct rsp_que *rsp, uint16_t loop_id,
+ uint16_t lun, uint8_t type)
{
mrk_entry_t *mrk;
struct mrk_entry_24xx *mrk24;
- scsi_qla_host_t *pha = to_qla_parent(ha);
+ struct qla_hw_data *ha = vha->hw;
+ scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
mrk24 = NULL;
- mrk = (mrk_entry_t *)qla2x00_req_pkt(pha);
+ mrk = (mrk_entry_t *)qla2x00_req_pkt(vha, req, rsp);
if (mrk == NULL) {
DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n",
- __func__, ha->host_no));
+ __func__, base_vha->host_no));
return (QLA_FUNCTION_FAILED);
}
@@ -440,7 +453,7 @@ __qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
mrk24->lun[1] = LSB(lun);
mrk24->lun[2] = MSB(lun);
host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
- mrk24->vp_index = ha->vp_idx;
+ mrk24->vp_index = vha->vp_idx;
} else {
SET_TARGET_ID(ha, mrk->target, loop_id);
mrk->lun = cpu_to_le16(lun);
@@ -448,22 +461,22 @@ __qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
}
wmb();
- qla2x00_isp_cmd(pha);
+ qla2x00_isp_cmd(vha, req);
return (QLA_SUCCESS);
}
int
-qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
- uint8_t type)
+qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
+ struct rsp_que *rsp, uint16_t loop_id, uint16_t lun,
+ uint8_t type)
{
int ret;
unsigned long flags = 0;
- scsi_qla_host_t *pha = to_qla_parent(ha);
- spin_lock_irqsave(&pha->hardware_lock, flags);
- ret = __qla2x00_marker(ha, loop_id, lun, type);
- spin_unlock_irqrestore(&pha->hardware_lock, flags);
+ spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+ ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
+ spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
return (ret);
}
@@ -477,9 +490,11 @@ qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
* Returns NULL if function failed, else, a pointer to the request packet.
*/
static request_t *
-qla2x00_req_pkt(scsi_qla_host_t *ha)
+qla2x00_req_pkt(struct scsi_qla_host *vha, struct req_que *req,
+ struct rsp_que *rsp)
{
- device_reg_t __iomem *reg = ha->iobase;
+ struct qla_hw_data *ha = vha->hw;
+ device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
request_t *pkt = NULL;
uint16_t cnt;
uint32_t *dword_ptr;
@@ -488,24 +503,29 @@ qla2x00_req_pkt(scsi_qla_host_t *ha)
/* Wait 1 second for slot. */
for (timer = HZ; timer; timer--) {
- if ((req_cnt + 2) >= ha->req_q_cnt) {
+ if ((req_cnt + 2) >= req->cnt) {
/* Calculate number of free request entries. */
- if (IS_FWI2_CAPABLE(ha))
- cnt = (uint16_t)RD_REG_DWORD(
- &reg->isp24.req_q_out);
- else
- cnt = qla2x00_debounce_register(
- ISP_REQ_Q_OUT(ha, &reg->isp));
- if (ha->req_ring_index < cnt)
- ha->req_q_cnt = cnt - ha->req_ring_index;
+ if (ha->mqenable)
+ cnt = (uint16_t)
+ RD_REG_DWORD(&reg->isp25mq.req_q_out);
+ else {
+ if (IS_FWI2_CAPABLE(ha))
+ cnt = (uint16_t)RD_REG_DWORD(
+ &reg->isp24.req_q_out);
+ else
+ cnt = qla2x00_debounce_register(
+ ISP_REQ_Q_OUT(ha, &reg->isp));
+ }
+ if (req->ring_index < cnt)
+ req->cnt = cnt - req->ring_index;
else
- ha->req_q_cnt = ha->request_q_length -
- (ha->req_ring_index - cnt);
+ req->cnt = req->length -
+ (req->ring_index - cnt);
}
/* If room for request in request ring. */
- if ((req_cnt + 2) < ha->req_q_cnt) {
- ha->req_q_cnt--;
- pkt = ha->request_ring_ptr;
+ if ((req_cnt + 2) < req->cnt) {
+ req->cnt--;
+ pkt = req->ring_ptr;
/* Zero out packet. */
dword_ptr = (uint32_t *)pkt;
@@ -513,7 +533,7 @@ qla2x00_req_pkt(scsi_qla_host_t *ha)
*dword_ptr++ = 0;
/* Set system defined field. */
- pkt->sys_define = (uint8_t)ha->req_ring_index;
+ pkt->sys_define = (uint8_t)req->ring_index;
/* Set entry count. */
pkt->entry_count = 1;
@@ -522,15 +542,14 @@ qla2x00_req_pkt(scsi_qla_host_t *ha)
}
/* Release ring specific lock */
- spin_unlock(&ha->hardware_lock);
+ spin_unlock_irq(&ha->hardware_lock);
udelay(2); /* 2 us */
/* Check for pending interrupts. */
/* During init we issue marker directly */
- if (!ha->marker_needed && !ha->flags.init_done)
- qla2x00_poll(ha);
-
+ if (!vha->marker_needed && !vha->flags.init_done)
+ qla2x00_poll(rsp);
spin_lock_irq(&ha->hardware_lock);
}
if (!pkt) {
@@ -547,29 +566,38 @@ qla2x00_req_pkt(scsi_qla_host_t *ha)
* Note: The caller must hold the hardware lock before calling this routine.
*/
static void
-qla2x00_isp_cmd(scsi_qla_host_t *ha)
+qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
{
- device_reg_t __iomem *reg = ha->iobase;
+ struct qla_hw_data *ha = vha->hw;
+ device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
+ struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
DEBUG5(printk("%s(): IOCB data:\n", __func__));
DEBUG5(qla2x00_dump_buffer(
- (uint8_t *)ha->request_ring_ptr, REQUEST_ENTRY_SIZE));
+ (uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE));
/* Adjust ring index. */
- ha->req_ring_index++;
- if (ha->req_ring_index == ha->request_q_length) {
- ha->req_ring_index = 0;
- ha->request_ring_ptr = ha->request_ring;
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
} else
- ha->request_ring_ptr++;
+ req->ring_ptr++;
/* Set chip new ring index. */
- if (IS_FWI2_CAPABLE(ha)) {
- WRT_REG_DWORD(&reg->isp24.req_q_in, ha->req_ring_index);
- RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
- } else {
- WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp), ha->req_ring_index);
- RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
+ if (ha->mqenable) {
+ WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
+ RD_REG_DWORD(&ioreg->hccr);
+ }
+ else {
+ if (IS_FWI2_CAPABLE(ha)) {
+ WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
+ RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
+ } else {
+ WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
+ req->ring_index);
+ RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
+ }
}
}
@@ -610,10 +638,11 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
{
uint16_t avail_dsds;
uint32_t *cur_dsd;
- scsi_qla_host_t *ha;
+ scsi_qla_host_t *vha;
struct scsi_cmnd *cmd;
struct scatterlist *sg;
int i;
+ struct req_que *req;
cmd = sp->cmd;
@@ -627,18 +656,19 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
return;
}
- ha = sp->ha;
+ vha = sp->vha;
+ req = sp->que;
/* Set transfer direction */
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
cmd_pkt->task_mgmt_flags =
__constant_cpu_to_le16(TMF_WRITE_DATA);
- sp->fcport->ha->qla_stats.output_bytes +=
+ sp->fcport->vha->hw->qla_stats.output_bytes +=
scsi_bufflen(sp->cmd);
} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
cmd_pkt->task_mgmt_flags =
__constant_cpu_to_le16(TMF_READ_DATA);
- sp->fcport->ha->qla_stats.input_bytes +=
+ sp->fcport->vha->hw->qla_stats.input_bytes +=
scsi_bufflen(sp->cmd);
}
@@ -658,7 +688,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
* Five DSDs are available in the Continuation
* Type 1 IOCB.
*/
- cont_pkt = qla2x00_prep_cont_type1_iocb(ha);
+ cont_pkt = qla2x00_prep_cont_type1_iocb(req, vha);
cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
avail_dsds = 5;
}
@@ -683,8 +713,6 @@ qla24xx_start_scsi(srb_t *sp)
{
int ret, nseg;
unsigned long flags;
- scsi_qla_host_t *ha, *pha;
- struct scsi_cmnd *cmd;
uint32_t *clr_ptr;
uint32_t index;
uint32_t handle;
@@ -692,35 +720,45 @@ qla24xx_start_scsi(srb_t *sp)
uint16_t cnt;
uint16_t req_cnt;
uint16_t tot_dsds;
- struct device_reg_24xx __iomem *reg;
+ struct req_que *req = NULL;
+ struct rsp_que *rsp = NULL;
+ struct scsi_cmnd *cmd = sp->cmd;
+ struct scsi_qla_host *vha = sp->vha;
+ struct qla_hw_data *ha = vha->hw;
+ uint16_t que_id;
/* Setup device pointers. */
ret = 0;
- ha = sp->ha;
- pha = to_qla_parent(ha);
- reg = &ha->iobase->isp24;
- cmd = sp->cmd;
+ que_id = vha->req_ques[0];
+
+ req = ha->req_q_map[que_id];
+ sp->que = req;
+
+ if (req->rsp)
+ rsp = req->rsp;
+ else
+ rsp = ha->rsp_q_map[que_id];
/* So we know we haven't pci_map'ed anything yet */
tot_dsds = 0;
/* Send marker if required */
- if (ha->marker_needed != 0) {
- if (qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
+ if (vha->marker_needed != 0) {
+ if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
+ != QLA_SUCCESS)
return QLA_FUNCTION_FAILED;
- }
- ha->marker_needed = 0;
+ vha->marker_needed = 0;
}
/* Acquire ring specific lock */
- spin_lock_irqsave(&pha->hardware_lock, flags);
+ spin_lock_irqsave(&ha->hardware_lock, flags);
/* Check for room in outstanding command list. */
- handle = ha->current_outstanding_cmd;
+ handle = req->current_outstanding_cmd;
for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
handle++;
if (handle == MAX_OUTSTANDING_COMMANDS)
handle = 1;
- if (!ha->outstanding_cmds[handle])
+ if (!req->outstanding_cmds[handle])
break;
}
if (index == MAX_OUTSTANDING_COMMANDS)
@@ -738,25 +776,26 @@ qla24xx_start_scsi(srb_t *sp)
tot_dsds = nseg;
req_cnt = qla24xx_calc_iocbs(tot_dsds);
- if (ha->req_q_cnt < (req_cnt + 2)) {
- cnt = (uint16_t)RD_REG_DWORD_RELAXED(&reg->req_q_out);
- if (ha->req_ring_index < cnt)
- ha->req_q_cnt = cnt - ha->req_ring_index;
+ if (req->cnt < (req_cnt + 2)) {
+ cnt = ha->isp_ops->rd_req_reg(ha, req->id);
+
+ if (req->ring_index < cnt)
+ req->cnt = cnt - req->ring_index;
else
- ha->req_q_cnt = ha->request_q_length -
- (ha->req_ring_index - cnt);
+ req->cnt = req->length -
+ (req->ring_index - cnt);
}
- if (ha->req_q_cnt < (req_cnt + 2))
+ if (req->cnt < (req_cnt + 2))
goto queuing_error;
/* Build command packet. */
- ha->current_outstanding_cmd = handle;
- ha->outstanding_cmds[handle] = sp;
- sp->ha = ha;
+ req->current_outstanding_cmd = handle;
+ req->outstanding_cmds[handle] = sp;
+ sp->vha = vha;
sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
- ha->req_q_cnt -= req_cnt;
+ req->cnt -= req_cnt;
- cmd_pkt = (struct cmd_type_7 *)ha->request_ring_ptr;
+ cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
cmd_pkt->handle = handle;
/* Zero out remaining portion of packet. */
@@ -789,32 +828,63 @@ qla24xx_start_scsi(srb_t *sp)
wmb();
/* Adjust ring index. */
- ha->req_ring_index++;
- if (ha->req_ring_index == ha->request_q_length) {
- ha->req_ring_index = 0;
- ha->request_ring_ptr = ha->request_ring;
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
} else
- ha->request_ring_ptr++;
+ req->ring_ptr++;
sp->flags |= SRB_DMA_VALID;
/* Set chip new ring index. */
- WRT_REG_DWORD(&reg->req_q_in, ha->req_ring_index);
- RD_REG_DWORD_RELAXED(&reg->req_q_in); /* PCI Posting. */
+ ha->isp_ops->wrt_req_reg(ha, req->id, req->ring_index);
/* Manage unprocessed RIO/ZIO commands in response queue. */
- if (ha->flags.process_response_queue &&
- ha->response_ring_ptr->signature != RESPONSE_PROCESSED)
- qla24xx_process_response_queue(ha);
+ if (vha->flags.process_response_queue &&
+ rsp->ring_ptr->signature != RESPONSE_PROCESSED)
+ qla24xx_process_response_queue(rsp);
- spin_unlock_irqrestore(&pha->hardware_lock, flags);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_SUCCESS;
queuing_error:
if (tot_dsds)
scsi_dma_unmap(cmd);
- spin_unlock_irqrestore(&pha->hardware_lock, flags);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_FUNCTION_FAILED;
}
+
+uint16_t
+qla24xx_rd_req_reg(struct qla_hw_data *ha, uint16_t id)
+{
+ device_reg_t __iomem *reg = (void *) ha->iobase;
+ return RD_REG_DWORD_RELAXED(&reg->isp24.req_q_out);
+}
+
+uint16_t
+qla25xx_rd_req_reg(struct qla_hw_data *ha, uint16_t id)
+{
+ device_reg_t __iomem *reg = (void *) ha->mqiobase + QLA_QUE_PAGE * id;
+ return RD_REG_DWORD_RELAXED(&reg->isp25mq.req_q_out);
+}
+
+void
+qla24xx_wrt_req_reg(struct qla_hw_data *ha, uint16_t id, uint16_t index)
+{
+ device_reg_t __iomem *reg = (void *) ha->iobase;
+ WRT_REG_DWORD(&reg->isp24.req_q_in, index);
+ RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
+}
+
+void
+qla25xx_wrt_req_reg(struct qla_hw_data *ha, uint16_t id, uint16_t index)
+{
+ device_reg_t __iomem *reg = (void *) ha->mqiobase + QLA_QUE_PAGE * id;
+ struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
+ WRT_REG_DWORD(&reg->isp25mq.req_q_in, index);
+ RD_REG_DWORD(&ioreg->hccr); /* PCI posting */
+}
+
OpenPOWER on IntegriCloud