summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorjkim <jkim@FreeBSD.org>2017-09-20 17:49:57 +0000
committerjkim <jkim@FreeBSD.org>2017-09-20 17:49:57 +0000
commit9e69fe29d5069e577750b864a82ae7fe7673fe6d (patch)
tree79cfbcbbe30a4d2bce75303d7fb7bd32664ceecf
parent820d4258d09a7af3fb661ac50e8136cc5a180941 (diff)
downloadFreeBSD-src-9e69fe29d5069e577750b864a82ae7fe7673fe6d.zip
FreeBSD-src-9e69fe29d5069e577750b864a82ae7fe7673fe6d.tar.gz
MFC: r309284-r309294 (kadesai)
r309294 This patch upgrades driver version to 06.712.04.00-fbsd r309293 This patch will add code to refire IOCTL commands after OCR. r309292 This patch will unblock SYNCHRONIZE_CACHE command to firmware, i.e. don't block the SYNCHRONIZE_CACHE command at driver instead of passing it to firmware for all Gen3 controllers. r309291 Wait for AEN task to be completed(if in queue) before resetting the controller and return without processing event in AEN thread, if controller reset is in progress. r309290 This patch will add task management support in driver. Below is high level description: If a SCSI IO times out, then before initiating OCR, now the driver will try to send a target reset to the particular target for which the IO is timed out. If that also fails, then the driver will initiate OCR. r309289 Process outstanding reply descriptors from all the reply descriptor post queues before initiating OCR. r309288 Clean up reference to AEN command if abort AEN is succesful as the command is aborted. Did the same by setting sc->aen_cmd = NULL when aborting AEN is successful. r309287 Update controller properties(read OCR capability bit) when MR_EVT_CTRL_PROP_CHANGED recieved. r309286 Add sanity check in IO and IOCTL path not to process command further if controller is in HW_CRITICAL_ERROR. r309285 Use a variable to indicate Gen3 controllers and remove all PCI ids based checks used for gen3 controllers. r309284 High level description of new solution - Free MFI and MPT command from same context. Free both the command either from process (from where mfi-mpt pass-through was called) or from ISR context. Do not split freeing of MFI and MPT, because it creates the race condition which will do MFI/MPT list.
-rw-r--r--sys/dev/mrsas/mrsas.c322
-rw-r--r--sys/dev/mrsas/mrsas.h124
-rw-r--r--sys/dev/mrsas/mrsas_cam.c397
-rw-r--r--sys/dev/mrsas/mrsas_fp.c29
4 files changed, 670 insertions, 202 deletions
diff --git a/sys/dev/mrsas/mrsas.c b/sys/dev/mrsas/mrsas.c
index 1873958..d5e8349 100644
--- a/sys/dev/mrsas/mrsas.c
+++ b/sys/dev/mrsas/mrsas.c
@@ -110,6 +110,7 @@ int mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
int mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason);
int mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason);
int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex);
+int mrsas_reset_targets(struct mrsas_softc *sc);
int
mrsas_issue_blocked_cmd(struct mrsas_softc *sc,
struct mrsas_mfi_cmd *cmd);
@@ -153,7 +154,6 @@ extern void mrsas_cam_detach(struct mrsas_softc *sc);
extern void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
extern void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
extern int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc);
-extern void mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd);
extern struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc);
extern int mrsas_passthru(struct mrsas_softc *sc, void *arg, u_long ioctlCmd);
extern uint8_t MR_ValidateMapInfo(struct mrsas_softc *sc);
@@ -307,28 +307,11 @@ mrsas_enable_intr(struct mrsas_softc *sc)
static int
mrsas_clear_intr(struct mrsas_softc *sc)
{
- u_int32_t status, fw_status, fw_state;
+ u_int32_t status;
/* Read received interrupt */
status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
- /*
- * If FW state change interrupt is received, write to it again to
- * clear
- */
- if (status & MRSAS_FW_STATE_CHNG_INTERRUPT) {
- fw_status = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
- outbound_scratch_pad));
- fw_state = fw_status & MFI_STATE_MASK;
- if (fw_state == MFI_STATE_FAULT) {
- device_printf(sc->mrsas_dev, "FW is in FAULT state!\n");
- if (sc->ocr_thread_active)
- wakeup(&sc->ocr_chan);
- }
- mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), status);
- mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
- return (1);
- }
/* Not our interrupt, so just return */
if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
return (0);
@@ -449,6 +432,11 @@ mrsas_setup_sysctl(struct mrsas_softc *sc)
OID_AUTO, "reset_in_progress", CTLFLAG_RD,
&sc->reset_in_progress, 0, "ocr in progress status");
+ SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
+ OID_AUTO, "block_sync_cache", CTLFLAG_RW,
+ &sc->block_sync_cache, 0,
+ "Block SYNC CACHE at driver. <default: 0, send it to FW>");
+
}
/*
@@ -468,6 +456,7 @@ mrsas_get_tunables(struct mrsas_softc *sc)
sc->mrsas_fw_fault_check_delay = 1;
sc->reset_count = 0;
sc->reset_in_progress = 0;
+ sc->block_sync_cache = 0;
/*
* Grab the global variables.
@@ -674,16 +663,15 @@ mrsas_register_aen(struct mrsas_softc *sc, u_int32_t seq_num,
sc->aen_cmd);
if (ret_val) {
- printf("mrsas: Failed to abort "
- "previous AEN command\n");
+ printf("mrsas: Failed to abort previous AEN command\n");
return ret_val;
- }
+ } else
+ sc->aen_cmd = NULL;
}
}
cmd = mrsas_get_mfi_cmd(sc);
-
if (!cmd)
- return -ENOMEM;
+ return ENOMEM;
dcmd = &cmd->frame->dcmd;
@@ -835,6 +823,15 @@ mrsas_attach(device_t dev)
sc->mrsas_dev = dev;
sc->device_id = pci_get_device(dev);
+ if ((sc->device_id == MRSAS_INVADER) ||
+ (sc->device_id == MRSAS_FURY) ||
+ (sc->device_id == MRSAS_INTRUDER) ||
+ (sc->device_id == MRSAS_INTRUDER_24) ||
+ (sc->device_id == MRSAS_CUTLASS_52) ||
+ (sc->device_id == MRSAS_CUTLASS_53)) {
+ sc->mrsas_gen3_ctrl = 1;
+ }
+
mrsas_get_tunables(sc);
/*
@@ -875,6 +872,7 @@ mrsas_attach(device_t dev)
TAILQ_INIT(&sc->mrsas_mfi_cmd_list_head);
mrsas_atomic_set(&sc->fw_outstanding, 0);
+ mrsas_atomic_set(&sc->target_reset_outstanding, 0);
sc->io_cmds_highwater = 0;
@@ -953,8 +951,7 @@ mrsas_ich_startup(void *arg)
/*
* Intialize a counting Semaphore to take care no. of concurrent IOCTLs
*/
- sema_init(&sc->ioctl_count_sema,
- MRSAS_MAX_MFI_CMDS - 5,
+ sema_init(&sc->ioctl_count_sema, MRSAS_MAX_IOCTL_CMDS,
IOCTL_SEMA_DESCRIPTION);
/* Create a /dev entry for mrsas controller. */
@@ -1070,7 +1067,7 @@ mrsas_detach(device_t dev)
mtx_destroy(&sc->raidmap_lock);
/* Wait for all the semaphores to be released */
- while (sema_value(&sc->ioctl_count_sema) != (MRSAS_MAX_MFI_CMDS - 5))
+ while (sema_value(&sc->ioctl_count_sema) != MRSAS_MAX_IOCTL_CMDS)
pause("mr_shutdown", hz);
/* Destroy the counting semaphore created for Ioctl */
@@ -1354,9 +1351,11 @@ mrsas_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag,
if (!sc)
return ENOENT;
- if (sc->remove_in_progress) {
+ if (sc->remove_in_progress ||
+ (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)) {
mrsas_dprint(sc, MRSAS_INFO,
- "Driver remove or shutdown called.\n");
+ "Either driver remove or shutdown called or "
+ "HW is in unrecoverable critical error state.\n");
return ENOENT;
}
mtx_lock_spin(&sc->ioctl_lock);
@@ -1548,7 +1547,10 @@ mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex)
PLD_LOAD_BALANCE_INFO lbinfo;
u_int32_t device_id;
int threshold_reply_count = 0;
-
+#if TM_DEBUG
+ MR_TASK_MANAGE_REQUEST *mr_tm_req;
+ MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_tm_req;
+#endif
/* If we have a hardware error, not need to continue */
if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
@@ -1575,6 +1577,16 @@ mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex)
extStatus = scsi_io_req->RaidContext.exStatus;
switch (scsi_io_req->Function) {
+ case MPI2_FUNCTION_SCSI_TASK_MGMT:
+#if TM_DEBUG
+ mr_tm_req = (MR_TASK_MANAGE_REQUEST *) cmd_mpt->io_request;
+ mpi_tm_req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)
+ &mr_tm_req->TmRequest;
+ device_printf(sc->mrsas_dev, "TM completion type 0x%X, "
+ "TaskMID: 0x%X", mpi_tm_req->TaskType, mpi_tm_req->TaskMID);
+#endif
+ wakeup_one((void *)&sc->ocr_chan);
+ break;
case MPI2_FUNCTION_SCSI_IO_REQUEST: /* Fast Path IO. */
device_id = cmd_mpt->ccb_ptr->ccb_h.target_id;
lbinfo = &sc->load_balance_info[device_id];
@@ -1592,9 +1604,16 @@ mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex)
break;
case MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /* MFI command */
cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
- mrsas_complete_mptmfi_passthru(sc, cmd_mfi, status);
- cmd_mpt->flags = 0;
- mrsas_release_mpt_cmd(cmd_mpt);
+ /*
+ * Make sure NOT TO release the mfi command from the called
+ * function's context if it is fired with issue_polled call.
+ * And also make sure that the issue_polled call should only be
+ * used if INTERRUPT IS DISABLED.
+ */
+ if (cmd_mfi->frame->hdr.flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE)
+ mrsas_release_mfi_cmd(cmd_mfi);
+ else
+ mrsas_complete_mptmfi_passthru(sc, cmd_mfi, status);
break;
}
@@ -1629,12 +1648,7 @@ mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex)
*/
if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) {
if (sc->msix_enable) {
- if ((sc->device_id == MRSAS_INVADER) ||
- (sc->device_id == MRSAS_FURY) ||
- (sc->device_id == MRSAS_INTRUDER) ||
- (sc->device_id == MRSAS_INTRUDER_24) ||
- (sc->device_id == MRSAS_CUTLASS_52) ||
- (sc->device_id == MRSAS_CUTLASS_53))
+ if (sc->mrsas_gen3_ctrl)
mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8],
((MSIxIndex & 0x7) << 24) |
sc->last_reply_idx[MSIxIndex]);
@@ -1655,12 +1669,7 @@ mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex)
/* Clear response interrupt */
if (sc->msix_enable) {
- if ((sc->device_id == MRSAS_INVADER) ||
- (sc->device_id == MRSAS_FURY) ||
- (sc->device_id == MRSAS_INTRUDER) ||
- (sc->device_id == MRSAS_INTRUDER_24) ||
- (sc->device_id == MRSAS_CUTLASS_52) ||
- (sc->device_id == MRSAS_CUTLASS_53)) {
+ if (sc->mrsas_gen3_ctrl) {
mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8],
((MSIxIndex & 0x7) << 24) |
sc->last_reply_idx[MSIxIndex]);
@@ -2435,12 +2444,21 @@ mrsas_ioc_init(struct mrsas_softc *sc)
u_int8_t max_wait = MRSAS_IOC_INIT_WAIT_TIME;
bus_addr_t phys_addr;
int i, retcode = 0;
+ u_int32_t scratch_pad_2;
/* Allocate memory for the IOC INIT command */
if (mrsas_alloc_ioc_cmd(sc)) {
device_printf(sc->mrsas_dev, "Cannot allocate IOC command.\n");
return (1);
}
+
+ if (!sc->block_sync_cache) {
+ scratch_pad_2 = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
+ outbound_scratch_pad_2));
+ sc->fw_sync_cache_support = (scratch_pad_2 &
+ MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0;
+ }
+
IOCInitMsg = (pMpi2IOCInitRequest_t)(((char *)sc->ioc_init_mem) + 1024);
IOCInitMsg->Function = MPI2_FUNCTION_IOC_INIT;
IOCInitMsg->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
@@ -2458,12 +2476,7 @@ mrsas_ioc_init(struct mrsas_softc *sc)
init_frame->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
/* driver support Extended MSIX */
- if ((sc->device_id == MRSAS_INVADER) ||
- (sc->device_id == MRSAS_FURY) ||
- (sc->device_id == MRSAS_INTRUDER) ||
- (sc->device_id == MRSAS_INTRUDER_24) ||
- (sc->device_id == MRSAS_CUTLASS_52) ||
- (sc->device_id == MRSAS_CUTLASS_53)) {
+ if (sc->mrsas_gen3_ctrl) {
init_frame->driver_operations.
mfi_capabilities.support_additional_msix = 1;
}
@@ -2585,7 +2598,7 @@ mrsas_alloc_mpt_cmds(struct mrsas_softc *sc)
memset(cmd, 0, sizeof(struct mrsas_mpt_cmd));
cmd->index = i + 1;
cmd->ccb_ptr = NULL;
- callout_init(&cmd->cm_callout, 0);
+ callout_init_mtx(&cmd->cm_callout, &sc->sim_lock, 0);
cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
cmd->sc = sc;
cmd->io_request = (MRSAS_RAID_SCSI_IO_REQUEST *) (io_req_base + offset);
@@ -2780,6 +2793,7 @@ mrsas_ocr_thread(void *arg)
{
struct mrsas_softc *sc;
u_int32_t fw_status, fw_state;
+ u_int8_t tm_target_reset_failed = 0;
sc = (struct mrsas_softc *)arg;
@@ -2802,20 +2816,66 @@ mrsas_ocr_thread(void *arg)
fw_status = mrsas_read_reg(sc,
offsetof(mrsas_reg_set, outbound_scratch_pad));
fw_state = fw_status & MFI_STATE_MASK;
- if (fw_state == MFI_STATE_FAULT || sc->do_timedout_reset) {
- device_printf(sc->mrsas_dev, "%s started due to %s!\n",
- sc->disableOnlineCtrlReset ? "Kill Adapter" : "OCR",
- sc->do_timedout_reset ? "IO Timeout" :
- "FW fault detected");
- mtx_lock_spin(&sc->ioctl_lock);
- sc->reset_in_progress = 1;
- sc->reset_count++;
- mtx_unlock_spin(&sc->ioctl_lock);
+ if (fw_state == MFI_STATE_FAULT || sc->do_timedout_reset ||
+ mrsas_atomic_read(&sc->target_reset_outstanding)) {
+
+ /* First, freeze further IOs to come to the SIM */
mrsas_xpt_freeze(sc);
- mrsas_reset_ctrl(sc, sc->do_timedout_reset);
- mrsas_xpt_release(sc);
- sc->reset_in_progress = 0;
- sc->do_timedout_reset = 0;
+
+ /* If this is an IO timeout then go for target reset */
+ if (mrsas_atomic_read(&sc->target_reset_outstanding)) {
+ device_printf(sc->mrsas_dev, "Initiating Target RESET "
+ "because of SCSI IO timeout!\n");
+
+ /* Let the remaining IOs to complete */
+ msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO,
+ "mrsas_reset_targets", 5 * hz);
+
+ /* Try to reset the target device */
+ if (mrsas_reset_targets(sc) == FAIL)
+ tm_target_reset_failed = 1;
+ }
+
+ /* If this is a DCMD timeout or FW fault,
+ * then go for controller reset
+ */
+ if (fw_state == MFI_STATE_FAULT || tm_target_reset_failed ||
+ (sc->do_timedout_reset == MFI_DCMD_TIMEOUT_OCR)) {
+ if (tm_target_reset_failed)
+ device_printf(sc->mrsas_dev, "Initiaiting OCR because of "
+ "TM FAILURE!\n");
+ else
+ device_printf(sc->mrsas_dev, "Initiaiting OCR "
+ "because of %s!\n", sc->do_timedout_reset ?
+ "DCMD IO Timeout" : "FW fault");
+
+ mtx_lock_spin(&sc->ioctl_lock);
+ sc->reset_in_progress = 1;
+ mtx_unlock_spin(&sc->ioctl_lock);
+ sc->reset_count++;
+
+ /*
+ * Wait for the AEN task to be completed if it is running.
+ */
+ mtx_unlock(&sc->sim_lock);
+ taskqueue_drain(sc->ev_tq, &sc->ev_task);
+ mtx_lock(&sc->sim_lock);
+
+ taskqueue_block(sc->ev_tq);
+ /* Try to reset the controller */
+ mrsas_reset_ctrl(sc, sc->do_timedout_reset);
+
+ sc->do_timedout_reset = 0;
+ sc->reset_in_progress = 0;
+ tm_target_reset_failed = 0;
+ mrsas_atomic_set(&sc->target_reset_outstanding, 0);
+ memset(sc->target_reset_pool, 0,
+ sizeof(sc->target_reset_pool));
+ taskqueue_unblock(sc->ev_tq);
+ }
+
+ /* Now allow IOs to come to the SIM */
+ mrsas_xpt_release(sc);
}
}
mtx_unlock(&sc->sim_lock);
@@ -2867,6 +2927,7 @@ mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason)
struct mrsas_mfi_cmd *mfi_cmd;
struct mrsas_mpt_cmd *mpt_cmd;
union mrsas_evt_class_locale class_locale;
+ MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
device_printf(sc->mrsas_dev,
@@ -2994,13 +3055,25 @@ mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason)
mpt_cmd = sc->mpt_cmd_list[j];
if (mpt_cmd->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
mfi_cmd = sc->mfi_cmd_list[mpt_cmd->sync_cmd_idx];
- mrsas_release_mfi_cmd(mfi_cmd);
- mrsas_release_mpt_cmd(mpt_cmd);
+ /* If not an IOCTL then release the command else re-fire */
+ if (!mfi_cmd->sync_cmd) {
+ mrsas_release_mfi_cmd(mfi_cmd);
+ } else {
+ req_desc = mrsas_get_request_desc(sc,
+ mfi_cmd->cmd_id.context.smid - 1);
+ mrsas_dprint(sc, MRSAS_OCR,
+ "Re-fire command DCMD opcode 0x%x index %d\n ",
+ mfi_cmd->frame->dcmd.opcode, j);
+ if (!req_desc)
+ device_printf(sc->mrsas_dev,
+ "Cannot build MPT cmd.\n");
+ else
+ mrsas_fire_cmd(sc, req_desc->addr.u.low,
+ req_desc->addr.u.high);
+ }
}
}
- sc->aen_cmd = NULL;
-
/* Reset load balance info */
memset(sc->load_balance_info, 0,
sizeof(LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES_EXT);
@@ -3015,17 +3088,6 @@ mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason)
megasas_setup_jbod_map(sc);
- memset(sc->pd_list, 0,
- MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
- if (mrsas_get_pd_list(sc) != SUCCESS) {
- device_printf(sc->mrsas_dev, "Get PD list failed from OCR.\n"
- "Will get the latest PD LIST after OCR on event.\n");
- }
- memset(sc->ld_ids, 0xff, MRSAS_MAX_LD_IDS);
- if (mrsas_get_ld_list(sc) != SUCCESS) {
- device_printf(sc->mrsas_dev, "Get LD lsit failed from OCR.\n"
- "Will get the latest LD LIST after OCR on event.\n");
- }
mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
mrsas_enable_intr(sc);
sc->adprecovery = MRSAS_HBA_OPERATIONAL;
@@ -3035,6 +3097,7 @@ mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason)
class_locale.members.locale = MR_EVT_LOCALE_ALL;
class_locale.members.class = MR_EVT_CLASS_DEBUG;
+ mtx_unlock(&sc->sim_lock);
if (mrsas_register_aen(sc, sc->last_seq_num,
class_locale.word)) {
device_printf(sc->mrsas_dev,
@@ -3044,6 +3107,8 @@ mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason)
"or the controller does not support AEN.\n"
"Please contact to the SUPPORT TEAM if the problem persists\n");
}
+ mtx_lock(&sc->sim_lock);
+
/* Adapter reset completed successfully */
device_printf(sc->mrsas_dev, "Reset successful\n");
retval = SUCCESS;
@@ -3140,6 +3205,11 @@ mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason)
if (fw_state == MFI_STATE_FAULT) {
mrsas_dprint(sc, MRSAS_OCR,
"Found FW in FAULT state, will reset adapter.\n");
+ count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
+ mtx_unlock(&sc->sim_lock);
+ for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
+ mrsas_complete_cmd(sc, MSIxIndex);
+ mtx_lock(&sc->sim_lock);
retval = 1;
goto out;
}
@@ -3157,8 +3227,10 @@ mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason)
mrsas_dprint(sc, MRSAS_OCR, "[%2d]waiting for %d "
"commands to complete\n", i, outstanding);
count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
+ mtx_unlock(&sc->sim_lock);
for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
mrsas_complete_cmd(sc, MSIxIndex);
+ mtx_lock(&sc->sim_lock);
}
DELAY(1000 * 1000);
}
@@ -3177,17 +3249,33 @@ out:
* mrsas_release_mfi_cmd: Return a cmd to free command pool
* input: Command packet for return to free cmd pool
*
- * This function returns the MFI command to the command list.
+ * This function returns the MFI & MPT command to the command list.
*/
void
-mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd)
+mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd_mfi)
{
- struct mrsas_softc *sc = cmd->sc;
+ struct mrsas_softc *sc = cmd_mfi->sc;
+ struct mrsas_mpt_cmd *cmd_mpt;
+
mtx_lock(&sc->mfi_cmd_pool_lock);
- cmd->ccb_ptr = NULL;
- cmd->cmd_id.frame_count = 0;
- TAILQ_INSERT_TAIL(&(sc->mrsas_mfi_cmd_list_head), cmd, next);
+ /*
+ * Release the mpt command (if at all it is allocated
+ * associated with the mfi command
+ */
+ if (cmd_mfi->cmd_id.context.smid) {
+ mtx_lock(&sc->mpt_cmd_pool_lock);
+ /* Get the mpt cmd from mfi cmd frame's smid value */
+ cmd_mpt = sc->mpt_cmd_list[cmd_mfi->cmd_id.context.smid-1];
+ cmd_mpt->flags = 0;
+ cmd_mpt->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
+ TAILQ_INSERT_HEAD(&(sc->mrsas_mpt_cmd_list_head), cmd_mpt, next);
+ mtx_unlock(&sc->mpt_cmd_pool_lock);
+ }
+ /* Release the mfi command */
+ cmd_mfi->ccb_ptr = NULL;
+ cmd_mfi->cmd_id.frame_count = 0;
+ TAILQ_INSERT_HEAD(&(sc->mrsas_mfi_cmd_list_head), cmd_mfi, next);
mtx_unlock(&sc->mfi_cmd_pool_lock);
return;
@@ -3236,7 +3324,11 @@ mrsas_get_ctrl_info(struct mrsas_softc *sc)
dcmd->sgl.sge32[0].phys_addr = sc->ctlr_info_phys_addr;
dcmd->sgl.sge32[0].length = sizeof(struct mrsas_ctrl_info);
- retcode = mrsas_issue_polled(sc, cmd);
+ if (!sc->mask_interrupts)
+ retcode = mrsas_issue_blocked_cmd(sc, cmd);
+ else
+ retcode = mrsas_issue_polled(sc, cmd);
+
if (retcode == ETIMEDOUT)
goto dcmd_timeout;
else
@@ -3247,13 +3339,16 @@ mrsas_get_ctrl_info(struct mrsas_softc *sc)
sc->use_seqnum_jbod_fp =
sc->ctrl_info->adapterOperations3.useSeqNumJbodFP;
+ sc->disableOnlineCtrlReset =
+ sc->ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
dcmd_timeout:
mrsas_free_ctlr_info_cmd(sc);
if (do_ocr)
sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
- else
+
+ if (!sc->mask_interrupts)
mrsas_release_mfi_cmd(cmd);
return (retcode);
@@ -3496,12 +3591,7 @@ mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *mfi_cm
io_req = mpt_cmd->io_request;
- if ((sc->device_id == MRSAS_INVADER) ||
- (sc->device_id == MRSAS_FURY) ||
- (sc->device_id == MRSAS_INTRUDER) ||
- (sc->device_id == MRSAS_INTRUDER_24) ||
- (sc->device_id == MRSAS_CUTLASS_52) ||
- (sc->device_id == MRSAS_CUTLASS_53)) {
+ if (sc->mrsas_gen3_ctrl) {
pMpi25IeeeSgeChain64_t sgl_ptr_end = (pMpi25IeeeSgeChain64_t)&io_req->SGL;
sgl_ptr_end += sc->max_sge_in_main_msg - 1;
@@ -3869,8 +3959,6 @@ megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend)
dcmd_timeout:
if (do_ocr)
sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
- else
- mrsas_release_mfi_cmd(cmd);
return (retcode);
}
@@ -3947,8 +4035,6 @@ mrsas_get_ld_map_info(struct mrsas_softc *sc)
retcode = mrsas_issue_polled(sc, cmd);
if (retcode == ETIMEDOUT)
sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
- else
- mrsas_release_mfi_cmd(cmd);
return (retcode);
}
@@ -3975,9 +4061,8 @@ mrsas_sync_map_info(struct mrsas_softc *sc)
cmd = mrsas_get_mfi_cmd(sc);
if (!cmd) {
- device_printf(sc->mrsas_dev,
- "Cannot alloc for sync map info cmd\n");
- return 1;
+ device_printf(sc->mrsas_dev, "Cannot alloc for sync map info cmd\n");
+ return ENOMEM;
}
map = sc->ld_drv_map[sc->map_id & 1];
num_lds = map->raidMap.ldCount;
@@ -4077,7 +4162,11 @@ mrsas_get_pd_list(struct mrsas_softc *sc)
dcmd->sgl.sge32[0].phys_addr = pd_list_phys_addr;
dcmd->sgl.sge32[0].length = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
- retcode = mrsas_issue_polled(sc, cmd);
+ if (!sc->mask_interrupts)
+ retcode = mrsas_issue_blocked_cmd(sc, cmd);
+ else
+ retcode = mrsas_issue_polled(sc, cmd);
+
if (retcode == ETIMEDOUT)
goto dcmd_timeout;
@@ -4108,7 +4197,8 @@ dcmd_timeout:
if (do_ocr)
sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
- else
+
+ if (!sc->mask_interrupts)
mrsas_release_mfi_cmd(cmd);
return (retcode);
@@ -4170,7 +4260,11 @@ mrsas_get_ld_list(struct mrsas_softc *sc)
dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST);
dcmd->pad_0 = 0;
- retcode = mrsas_issue_polled(sc, cmd);
+ if (!sc->mask_interrupts)
+ retcode = mrsas_issue_blocked_cmd(sc, cmd);
+ else
+ retcode = mrsas_issue_polled(sc, cmd);
+
if (retcode == ETIMEDOUT)
goto dcmd_timeout;
@@ -4196,7 +4290,7 @@ dcmd_timeout:
if (do_ocr)
sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
- else
+ if (!sc->mask_interrupts)
mrsas_release_mfi_cmd(cmd);
return (retcode);
@@ -4360,6 +4454,11 @@ mrsas_aen_handler(struct mrsas_softc *sc)
printf("invalid instance!\n");
return;
}
+ if (sc->remove_in_progress || sc->reset_in_progress) {
+ device_printf(sc->mrsas_dev, "Returning from %s, line no %d\n",
+ __func__, __LINE__);
+ return;
+ }
if (sc->evt_detail_mem) {
switch (sc->evt_detail_mem->code) {
case MR_EVT_PD_INSERTED:
@@ -4368,7 +4467,6 @@ mrsas_aen_handler(struct mrsas_softc *sc)
mrsas_bus_scan_sim(sc, sc->sim_1);
else
goto skip_register_aen;
- doscan = 0;
break;
case MR_EVT_PD_REMOVED:
fail_aen = mrsas_get_pd_list(sc);
@@ -4376,13 +4474,11 @@ mrsas_aen_handler(struct mrsas_softc *sc)
mrsas_bus_scan_sim(sc, sc->sim_1);
else
goto skip_register_aen;
- doscan = 0;
break;
case MR_EVT_LD_OFFLINE:
case MR_EVT_CFG_CLEARED:
case MR_EVT_LD_DELETED:
mrsas_bus_scan_sim(sc, sc->sim_0);
- doscan = 0;
break;
case MR_EVT_LD_CREATED:
fail_aen = mrsas_get_ld_list(sc);
@@ -4390,15 +4486,18 @@ mrsas_aen_handler(struct mrsas_softc *sc)
mrsas_bus_scan_sim(sc, sc->sim_0);
else
goto skip_register_aen;
- doscan = 0;
break;
case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
case MR_EVT_FOREIGN_CFG_IMPORTED:
case MR_EVT_LD_STATE_CHANGE:
doscan = 1;
break;
+ case MR_EVT_CTRL_PROP_CHANGED:
+ fail_aen = mrsas_get_ctrl_info(sc);
+ if (fail_aen)
+ goto skip_register_aen;
+ break;
default:
- doscan = 0;
break;
}
} else {
@@ -4474,8 +4573,7 @@ mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
sc->aen_cmd = NULL;
mrsas_release_mfi_cmd(cmd);
- if (!sc->remove_in_progress)
- taskqueue_enqueue(sc->ev_tq, &sc->ev_task);
+ taskqueue_enqueue(sc->ev_tq, &sc->ev_task);
return;
}
diff --git a/sys/dev/mrsas/mrsas.h b/sys/dev/mrsas/mrsas.h
index 2c2a341..672b177 100644
--- a/sys/dev/mrsas/mrsas.h
+++ b/sys/dev/mrsas/mrsas.h
@@ -106,7 +106,7 @@ __FBSDID("$FreeBSD$");
*/
#define BYTE_ALIGNMENT 1
#define MRSAS_MAX_NAME_LENGTH 32
-#define MRSAS_VERSION "06.709.07.00-fbsd"
+#define MRSAS_VERSION "06.712.04.00-fbsd"
#define MRSAS_ULONG_MAX 0xFFFFFFFFFFFFFFFF
#define MRSAS_DEFAULT_TIMEOUT 0x14 /* Temporarily set */
#define DONE 0
@@ -205,7 +205,9 @@ typedef struct _RAID_CONTEXT {
#define MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD (0x0100)
#define MPI2_SCSIIO_EEDPFLAGS_INSERT_OP (0x0004)
#define MPI2_FUNCTION_SCSI_IO_REQUEST (0x00) /* SCSI IO */
-#define MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY (0x06)
+#define MPI2_FUNCTION_SCSI_TASK_MGMT (0x01)
+#define MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY (0x03)
+#define MPI2_REQ_DESCRIPT_FLAGS_FP_IO (0x06)
#define MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO (0x00)
#define MPI2_SGE_FLAGS_64_BIT_ADDRESSING (0x02)
#define MPI2_SCSIIO_CONTROL_WRITE (0x01000000)
@@ -314,6 +316,91 @@ typedef union {
} MPI2_SCSI_IO_CDB_UNION, MPI2_POINTER PTR_MPI2_SCSI_IO_CDB_UNION,
Mpi2ScsiIoCdb_t, MPI2_POINTER pMpi2ScsiIoCdb_t;
+/****************************************************************************
+ * * SCSI Task Management messages
+ * ****************************************************************************/
+
+/*SCSI Task Management Request Message */
+typedef struct _MPI2_SCSI_TASK_MANAGE_REQUEST {
+ u_int16_t DevHandle; /*0x00 */
+ u_int8_t ChainOffset; /*0x02 */
+ u_int8_t Function; /*0x03 */
+ u_int8_t Reserved1; /*0x04 */
+ u_int8_t TaskType; /*0x05 */
+ u_int8_t Reserved2; /*0x06 */
+ u_int8_t MsgFlags; /*0x07 */
+ u_int8_t VP_ID; /*0x08 */
+ u_int8_t VF_ID; /*0x09 */
+ u_int16_t Reserved3; /*0x0A */
+ u_int8_t LUN[8]; /*0x0C */
+ u_int32_t Reserved4[7]; /*0x14 */
+ u_int16_t TaskMID; /*0x30 */
+ u_int16_t Reserved5; /*0x32 */
+} MPI2_SCSI_TASK_MANAGE_REQUEST;
+
+/*SCSI Task Management Reply Message */
+typedef struct _MPI2_SCSI_TASK_MANAGE_REPLY {
+ u_int16_t DevHandle; /*0x00 */
+ u_int8_t MsgLength; /*0x02 */
+ u_int8_t Function; /*0x03 */
+ u_int8_t ResponseCode; /*0x04 */
+ u_int8_t TaskType; /*0x05 */
+ u_int8_t Reserved1; /*0x06 */
+ u_int8_t MsgFlags; /*0x07 */
+ u_int8_t VP_ID; /*0x08 */
+ u_int8_t VF_ID; /*0x09 */
+ u_int16_t Reserved2; /*0x0A */
+ u_int16_t Reserved3; /*0x0C */
+ u_int16_t IOCStatus; /*0x0E */
+ u_int32_t IOCLogInfo; /*0x10 */
+ u_int32_t TerminationCount; /*0x14 */
+ u_int32_t ResponseInfo; /*0x18 */
+} MPI2_SCSI_TASK_MANAGE_REPLY;
+
+typedef struct _MR_TM_REQUEST {
+ char request[128];
+} MR_TM_REQUEST;
+
+typedef struct _MR_TM_REPLY {
+ char reply[128];
+} MR_TM_REPLY;
+
+/* SCSI Task Management Request Message */
+typedef struct _MR_TASK_MANAGE_REQUEST {
+ /*To be type casted to struct MPI2_SCSI_TASK_MANAGE_REQUEST */
+ MR_TM_REQUEST TmRequest;
+ union {
+ struct {
+ u_int32_t isTMForLD:1;
+ u_int32_t isTMForPD:1;
+ u_int32_t reserved1:30;
+ u_int32_t reserved2;
+ } tmReqFlags;
+ MR_TM_REPLY TMReply;
+ } uTmReqReply;
+} MR_TASK_MANAGE_REQUEST;
+
+/* TaskType values */
+#define MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK (0x01)
+#define MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET (0x02)
+#define MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET (0x03)
+#define MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET (0x05)
+#define MPI2_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET (0x06)
+#define MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK (0x07)
+#define MPI2_SCSITASKMGMT_TASKTYPE_CLR_ACA (0x08)
+#define MPI2_SCSITASKMGMT_TASKTYPE_QRY_TASK_SET (0x09)
+#define MPI2_SCSITASKMGMT_TASKTYPE_QRY_ASYNC_EVENT (0x0A)
+
+/* ResponseCode values */
+#define MPI2_SCSITASKMGMT_RSP_TM_COMPLETE (0x00)
+#define MPI2_SCSITASKMGMT_RSP_INVALID_FRAME (0x02)
+#define MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED (0x04)
+#define MPI2_SCSITASKMGMT_RSP_TM_FAILED (0x05)
+#define MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED (0x08)
+#define MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN (0x09)
+#define MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG (0x0A)
+#define MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC (0x80)
+
/*
* RAID SCSI IO Request Message Total SGE count will be one less than
* _MPI2_SCSI_IO_REQUEST
@@ -584,7 +671,7 @@ Mpi2IOCInitRequest_t, MPI2_POINTER pMpi2IOCInitRequest_t;
#define MAX_RAIDMAP_PHYSICAL_DEVICES (MAX_PHYSICAL_DEVICES)
#define MR_DCMD_LD_MAP_GET_INFO 0x0300e101
#define MR_DCMD_SYSTEM_PD_MAP_GET_INFO 0x0200e102
-
+#define MR_DCMD_PD_MFI_TASK_MGMT 0x0200e100
#define MRSAS_MAX_PD_CHANNELS 1
#define MRSAS_MAX_LD_CHANNELS 1
@@ -599,7 +686,7 @@ Mpi2IOCInitRequest_t, MPI2_POINTER pMpi2IOCInitRequest_t;
#define VD_EXT_DEBUG 0
-
+#define TM_DEBUG 1
/*******************************************************************
* RAID map related structures
@@ -659,7 +746,8 @@ typedef struct _MR_LD_RAID {
u_int32_t fpWriteAcrossStripe:1;
u_int32_t fpReadAcrossStripe:1;
u_int32_t fpNonRWCapable:1;
- u_int32_t reserved4:7;
+ u_int32_t tmCapable:1;
+ u_int32_t reserved4:6;
} capability;
u_int32_t reserved6;
u_int64_t size;
@@ -876,7 +964,11 @@ struct IO_REQUEST_INFO {
struct MR_PD_CFG_SEQ {
u_int16_t seqNum;
u_int16_t devHandle;
- u_int8_t reserved[4];
+ struct {
+ u_int8_t tmCapable:1;
+ u_int8_t reserved:7;
+ } capability;
+ u_int8_t reserved[3];
} __packed;
struct MR_PD_CFG_SEQ_NUM_SYNC {
@@ -1242,7 +1334,6 @@ enum MR_EVT_ARGS {
MR_EVT_ARGS_GENERIC,
};
-
/*
* Thunderbolt (and later) Defines
*/
@@ -1256,7 +1347,8 @@ enum MR_EVT_ARGS {
#define HOST_DIAG_WRITE_ENABLE 0x80
#define HOST_DIAG_RESET_ADAPTER 0x4
#define MRSAS_TBOLT_MAX_RESET_TRIES 3
-#define MRSAS_MAX_MFI_CMDS 32
+#define MRSAS_MAX_MFI_CMDS 16
+#define MRSAS_MAX_IOCTL_CMDS 3
/*
* Invader Defines
@@ -1395,6 +1487,7 @@ struct mrsas_mpt_cmd {
union ccb *ccb_ptr;
struct callout cm_callout;
struct mrsas_softc *sc;
+ boolean_t tmCapable;
TAILQ_ENTRY(mrsas_mpt_cmd) next;
};
@@ -1448,6 +1541,7 @@ enum MR_PD_QUERY_TYPE {
#define MR_EVT_LD_DELETED 0x008b
#define MR_EVT_FOREIGN_CFG_IMPORTED 0x00db
#define MR_EVT_LD_OFFLINE 0x00fc
+#define MR_EVT_CTRL_PROP_CHANGED 0x012f
#define MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED 0x0152
enum MR_PD_STATE {
@@ -1990,6 +2084,11 @@ struct mrsas_ctrl_info {
#define MR_MAX_MSIX_REG_ARRAY 16
/*
+ * SYNC CACHE offset define
+ */
+#define MR_CAN_HANDLE_SYNC_CACHE_OFFSET 0X01000000
+
+/*
* FW reports the maximum of number of commands that it can accept (maximum
* commands that can be outstanding) at any time. The driver must report a
* lower number to the mid layer because it can issue a few internal commands
@@ -2470,8 +2569,7 @@ struct mrsas_irq_context {
enum MEGASAS_OCR_REASON {
FW_FAULT_OCR = 0,
- SCSIIO_TIMEOUT_OCR = 1,
- MFI_DCMD_TIMEOUT_OCR = 2,
+ MFI_DCMD_TIMEOUT_OCR = 1,
};
/* Controller management info added to support Linux Emulator */
@@ -2746,6 +2844,11 @@ struct mrsas_softc {
u_int8_t do_timedout_reset;
u_int32_t reset_in_progress;
u_int32_t reset_count;
+ u_int32_t block_sync_cache;
+ u_int8_t fw_sync_cache_support;
+ mrsas_atomic_t target_reset_outstanding;
+#define MRSAS_MAX_TM_TARGETS (MRSAS_MAX_PD + MRSAS_MAX_LD_IDS)
+ struct mrsas_mpt_cmd *target_reset_pool[MRSAS_MAX_TM_TARGETS];
bus_dma_tag_t jbodmap_tag[2];
bus_dmamap_t jbodmap_dmamap[2];
@@ -2794,6 +2897,7 @@ struct mrsas_softc {
LD_LOAD_BALANCE_INFO load_balance_info[MAX_LOGICAL_DRIVES_EXT];
LD_SPAN_INFO log_to_span[MAX_LOGICAL_DRIVES_EXT];
+ u_int8_t mrsas_gen3_ctrl;
u_int8_t secure_jbod_support;
u_int8_t use_seqnum_jbod_fp;
u_int8_t max256vdSupport;
diff --git a/sys/dev/mrsas/mrsas_cam.c b/sys/dev/mrsas/mrsas_cam.c
index 8b90bb7..f1f516c 100644
--- a/sys/dev/mrsas/mrsas_cam.c
+++ b/sys/dev/mrsas/mrsas_cam.c
@@ -95,6 +95,11 @@ static void mrsas_freeze_simq(struct mrsas_mpt_cmd *cmd, struct cam_sim *sim);
static void mrsas_cam_poll(struct cam_sim *sim);
static void mrsas_action(struct cam_sim *sim, union ccb *ccb);
static void mrsas_scsiio_timeout(void *data);
+static int mrsas_track_scsiio(struct mrsas_softc *sc, target_id_t id, u_int32_t bus_id);
+static void mrsas_tm_response_code(struct mrsas_softc *sc,
+ MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply);
+static int mrsas_issue_tm(struct mrsas_softc *sc,
+ MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc);
static void
mrsas_data_load_cb(void *arg, bus_dma_segment_t *segs,
int nseg, int error);
@@ -105,6 +110,10 @@ struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc);
MRSAS_REQUEST_DESCRIPTOR_UNION *
mrsas_get_request_desc(struct mrsas_softc *sc, u_int16_t index);
+extern void
+mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status,
+ u_int8_t extStatus);
+extern int mrsas_reset_targets(struct mrsas_softc *sc);
extern u_int16_t MR_TargetIdToLdGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map);
extern u_int32_t
MR_LdBlockSizeGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map,
@@ -125,6 +134,9 @@ extern u_int8_t
megasas_get_best_arm(PLD_LOAD_BALANCE_INFO lbInfo, u_int8_t arm,
u_int64_t block, u_int32_t count);
extern int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex);
+extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map);
+extern void mrsas_disable_intr(struct mrsas_softc *sc);
+extern void mrsas_enable_intr(struct mrsas_softc *sc);
/*
@@ -260,6 +272,17 @@ mrsas_action(struct cam_sim *sim, union ccb *ccb)
struct ccb_hdr *ccb_h = &(ccb->ccb_h);
u_int32_t device_id;
+ /*
+ * Check if the system going down
+ * or the adapter is in unrecoverable critical error
+ */
+ if (sc->remove_in_progress ||
+ (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)) {
+ ccb->ccb_h.status |= CAM_DEV_NOT_THERE;
+ xpt_done(ccb);
+ return;
+ }
+
switch (ccb->ccb_h.func_code) {
case XPT_SCSI_IO:
{
@@ -375,6 +398,10 @@ mrsas_scsiio_timeout(void *data)
{
struct mrsas_mpt_cmd *cmd;
struct mrsas_softc *sc;
+ u_int32_t target_id;
+
+ if (!data)
+ return;
cmd = (struct mrsas_mpt_cmd *)data;
sc = cmd->sc;
@@ -383,6 +410,7 @@ mrsas_scsiio_timeout(void *data)
printf("command timeout with NULL ccb\n");
return;
}
+
/*
* Below callout is dummy entry so that it will be cancelled from
* mrsas_cmd_done(). Now Controller will go to OCR/Kill Adapter based
@@ -390,15 +418,25 @@ mrsas_scsiio_timeout(void *data)
* context.
*/
#if (__FreeBSD_version >= 1000510)
- callout_reset_sbt(&cmd->cm_callout, SBT_1S * 600, 0,
+ callout_reset_sbt(&cmd->cm_callout, SBT_1S * 180, 0,
mrsas_scsiio_timeout, cmd, 0);
#else
- callout_reset(&cmd->cm_callout, (600000 * hz) / 1000,
+ callout_reset(&cmd->cm_callout, (180000 * hz) / 1000,
mrsas_scsiio_timeout, cmd);
#endif
- sc->do_timedout_reset = SCSIIO_TIMEOUT_OCR;
- if (sc->ocr_thread_active)
- wakeup(&sc->ocr_chan);
+
+ if (cmd->ccb_ptr->cpi.bus_id == 0)
+ target_id = cmd->ccb_ptr->ccb_h.target_id;
+ else
+ target_id = (cmd->ccb_ptr->ccb_h.target_id + (MRSAS_MAX_PD - 1));
+
+ /* Save the cmd to be processed for TM, if it is not there in the array */
+ if (sc->target_reset_pool[target_id] == NULL) {
+ sc->target_reset_pool[target_id] = cmd;
+ mrsas_atomic_inc(&sc->target_reset_outstanding);
+ }
+
+ return;
}
/*
@@ -421,7 +459,8 @@ mrsas_startio(struct mrsas_softc *sc, struct cam_sim *sim,
MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
u_int8_t cmd_type;
- if ((csio->cdb_io.cdb_bytes[0]) == SYNCHRONIZE_CACHE) {
+ if ((csio->cdb_io.cdb_bytes[0]) == SYNCHRONIZE_CACHE &&
+ (!sc->fw_sync_cache_support)) {
ccb->ccb_h.status = CAM_REQ_CMP;
xpt_done(ccb);
return (0);
@@ -585,10 +624,10 @@ mrsas_startio(struct mrsas_softc *sc, struct cam_sim *sim,
* Start timer for IO timeout. Default timeout value is 90 second.
*/
#if (__FreeBSD_version >= 1000510)
- callout_reset_sbt(&cmd->cm_callout, SBT_1S * 600, 0,
+ callout_reset_sbt(&cmd->cm_callout, SBT_1S * 180, 0,
mrsas_scsiio_timeout, cmd, 0);
#else
- callout_reset(&cmd->cm_callout, (600000 * hz) / 1000,
+ callout_reset(&cmd->cm_callout, (180000 * hz) / 1000,
mrsas_scsiio_timeout, cmd);
#endif
mrsas_atomic_inc(&sc->fw_outstanding);
@@ -679,7 +718,7 @@ mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd)
mtx_lock(&sc->mpt_cmd_pool_lock);
cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
- TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next);
+ TAILQ_INSERT_HEAD(&(sc->mrsas_mpt_cmd_list_head), cmd, next);
mtx_unlock(&sc->mpt_cmd_pool_lock);
return;
@@ -777,8 +816,9 @@ mrsas_setup_io(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
struct ccb_scsiio *csio = &(ccb->csio);
struct IO_REQUEST_INFO io_info;
MR_DRV_RAID_MAP_ALL *map_ptr;
+ MR_LD_RAID *raid;
u_int8_t fp_possible;
- u_int32_t start_lba_hi, start_lba_lo, ld_block_size;
+ u_int32_t start_lba_hi, start_lba_lo, ld_block_size, ld;
u_int32_t datalength = 0;
start_lba_lo = 0;
@@ -857,8 +897,8 @@ mrsas_setup_io(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
map_ptr = sc->ld_drv_map[(sc->map_id & 1)];
ld_block_size = MR_LdBlockSizeGet(device_id, map_ptr, sc);
- if ((MR_TargetIdToLdGet(device_id, map_ptr) >= MAX_LOGICAL_DRIVES_EXT) ||
- (!sc->fast_path_io)) {
+ ld = MR_TargetIdToLdGet(device_id, map_ptr);
+ if ((ld >= MAX_LOGICAL_DRIVES_EXT) || (!sc->fast_path_io)) {
io_request->RaidContext.regLockFlags = 0;
fp_possible = 0;
} else {
@@ -866,6 +906,10 @@ mrsas_setup_io(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
fp_possible = io_info.fpOkForIo;
}
+ raid = MR_LdRaidGet(ld, map_ptr);
+ /* Store the TM capability value in cmd */
+ cmd->tmCapable = raid->capability.tmCapable;
+
cmd->request_desc->SCSIIO.MSIxIndex =
sc->msix_vectors ? smp_processor_id() % sc->msix_vectors : 0;
@@ -875,14 +919,9 @@ mrsas_setup_io(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
start_lba_lo, ld_block_size);
io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
cmd->request_desc->SCSIIO.RequestFlags =
- (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
+ (MPI2_REQ_DESCRIPT_FLAGS_FP_IO <<
MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
- if ((sc->device_id == MRSAS_INVADER) ||
- (sc->device_id == MRSAS_FURY) ||
- (sc->device_id == MRSAS_INTRUDER) ||
- (sc->device_id == MRSAS_INTRUDER_24) ||
- (sc->device_id == MRSAS_CUTLASS_52) ||
- (sc->device_id == MRSAS_CUTLASS_53)) {
+ if (sc->mrsas_gen3_ctrl) {
if (io_request->RaidContext.regLockFlags == REGION_TYPE_UNUSED)
cmd->request_desc->SCSIIO.RequestFlags =
(MRSAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
@@ -911,12 +950,7 @@ mrsas_setup_io(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
cmd->request_desc->SCSIIO.RequestFlags =
(MRSAS_REQ_DESCRIPT_FLAGS_LD_IO <<
MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
- if ((sc->device_id == MRSAS_INVADER) ||
- (sc->device_id == MRSAS_FURY) ||
- (sc->device_id == MRSAS_INTRUDER) ||
- (sc->device_id == MRSAS_INTRUDER_24) ||
- (sc->device_id == MRSAS_CUTLASS_52) ||
- (sc->device_id == MRSAS_CUTLASS_53)) {
+ if (sc->mrsas_gen3_ctrl) {
if (io_request->RaidContext.regLockFlags == REGION_TYPE_UNUSED)
cmd->request_desc->SCSIIO.RequestFlags =
(MRSAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
@@ -947,12 +981,20 @@ mrsas_build_ldio_nonrw(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
union ccb *ccb)
{
struct ccb_hdr *ccb_h = &(ccb->ccb_h);
- u_int32_t device_id;
+ u_int32_t device_id, ld;
+ MR_DRV_RAID_MAP_ALL *map_ptr;
+ MR_LD_RAID *raid;
MRSAS_RAID_SCSI_IO_REQUEST *io_request;
io_request = cmd->io_request;
device_id = ccb_h->target_id;
+ map_ptr = sc->ld_drv_map[(sc->map_id & 1)];
+ ld = MR_TargetIdToLdGet(device_id, map_ptr);
+ raid = MR_LdRaidGet(ld, map_ptr);
+ /* Store the TM capability value in cmd */
+ cmd->tmCapable = raid->capability.tmCapable;
+
/* FW path for LD Non-RW (SCSI management commands) */
io_request->Function = MRSAS_MPI2_FUNCTION_LD_IO_REQUEST;
io_request->DevHandle = device_id;
@@ -1002,8 +1044,6 @@ mrsas_build_syspdio(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
MRSAS_RAID_SCSI_IO_REQUEST *io_request;
struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
- pd_sync = (void *)sc->jbodmap_mem[(sc->pd_seq_map_id - 1) & 1];
-
io_request = cmd->io_request;
device_id = ccb_h->target_id;
local_map_ptr = sc->ld_drv_map[(sc->map_id & 1)];
@@ -1017,6 +1057,8 @@ mrsas_build_syspdio(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
if (sc->use_seqnum_jbod_fp &&
sc->pd_list[device_id].driveType == 0x00) {
//printf("Using Drv seq num\n");
+ pd_sync = (void *)sc->jbodmap_mem[(sc->pd_seq_map_id - 1) & 1];
+ cmd->tmCapable = pd_sync->seq[device_id].capability.tmCapable;
io_request->RaidContext.VirtualDiskTgtId = device_id + 255;
io_request->RaidContext.configSeqNum = pd_sync->seq[device_id].seqNum;
io_request->DevHandle = pd_sync->seq[device_id].devHandle;
@@ -1065,7 +1107,7 @@ mrsas_build_syspdio(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
io_request->IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
cmd->request_desc->SCSIIO.RequestFlags =
- (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
+ (MPI2_REQ_DESCRIPT_FLAGS_FP_IO <<
MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
}
@@ -1192,12 +1234,7 @@ mrsas_data_load_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
io_request = cmd->io_request;
sgl_ptr = (pMpi25IeeeSgeChain64_t)&io_request->SGL;
- if ((sc->device_id == MRSAS_INVADER) ||
- (sc->device_id == MRSAS_FURY) ||
- (sc->device_id == MRSAS_INTRUDER) ||
- (sc->device_id == MRSAS_INTRUDER_24) ||
- (sc->device_id == MRSAS_CUTLASS_52) ||
- (sc->device_id == MRSAS_CUTLASS_53)) {
+ if (sc->mrsas_gen3_ctrl) {
pMpi25IeeeSgeChain64_t sgl_ptr_end = sgl_ptr;
sgl_ptr_end += sc->max_sge_in_main_msg - 1;
@@ -1208,12 +1245,7 @@ mrsas_data_load_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
sgl_ptr->Address = segs[i].ds_addr;
sgl_ptr->Length = segs[i].ds_len;
sgl_ptr->Flags = 0;
- if ((sc->device_id == MRSAS_INVADER) ||
- (sc->device_id == MRSAS_FURY) ||
- (sc->device_id == MRSAS_INTRUDER) ||
- (sc->device_id == MRSAS_INTRUDER_24) ||
- (sc->device_id == MRSAS_CUTLASS_52) ||
- (sc->device_id == MRSAS_CUTLASS_53)) {
+ if (sc->mrsas_gen3_ctrl) {
if (i == nseg - 1)
sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST;
}
@@ -1223,12 +1255,7 @@ mrsas_data_load_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
(nseg > sc->max_sge_in_main_msg)) {
pMpi25IeeeSgeChain64_t sg_chain;
- if ((sc->device_id == MRSAS_INVADER) ||
- (sc->device_id == MRSAS_FURY) ||
- (sc->device_id == MRSAS_INTRUDER) ||
- (sc->device_id == MRSAS_INTRUDER_24) ||
- (sc->device_id == MRSAS_CUTLASS_52) ||
- (sc->device_id == MRSAS_CUTLASS_53)) {
+ if (sc->mrsas_gen3_ctrl) {
if ((cmd->io_request->IoFlags & MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
!= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
cmd->io_request->ChainOffset = sc->chain_offset_io_request;
@@ -1237,12 +1264,7 @@ mrsas_data_load_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
} else
cmd->io_request->ChainOffset = sc->chain_offset_io_request;
sg_chain = sgl_ptr;
- if ((sc->device_id == MRSAS_INVADER) ||
- (sc->device_id == MRSAS_FURY) ||
- (sc->device_id == MRSAS_INTRUDER) ||
- (sc->device_id == MRSAS_INTRUDER_24) ||
- (sc->device_id == MRSAS_CUTLASS_52) ||
- (sc->device_id == MRSAS_CUTLASS_53))
+ if (sc->mrsas_gen3_ctrl)
sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT;
else
sg_chain->Flags = (IEEE_SGE_FLAGS_CHAIN_ELEMENT | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR);
@@ -1295,9 +1317,10 @@ mrsas_xpt_release(struct mrsas_softc *sc)
void
mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd)
{
- callout_stop(&cmd->cm_callout);
mrsas_unmap_request(sc, cmd);
+
mtx_lock(&sc->sim_lock);
+ callout_stop(&cmd->cm_callout);
xpt_done(cmd->ccb_ptr);
cmd->ccb_ptr = NULL;
mtx_unlock(&sc->sim_lock);
@@ -1395,3 +1418,269 @@ mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim)
return (0);
}
+
+/*
+ * mrsas_track_scsiio: Track IOs for a given target in the mpt_cmd_list
+ * input: Adapter instance soft state
+ * Target ID of target
+ * Bus ID of the target
+ *
+ * This function checks for any pending IO in the whole mpt_cmd_list pool
+ * with the bus_id and target_id passed in arguments. If some IO is found
+ * that means target reset is not successfully completed.
+ *
+ * Returns FAIL if IOs pending to the target device, else return SUCCESS
+ */
+static int
+mrsas_track_scsiio(struct mrsas_softc *sc, target_id_t tgt_id, u_int32_t bus_id)
+{
+ int i;
+ struct mrsas_mpt_cmd *mpt_cmd = NULL;
+
+ for (i = 0 ; i < sc->max_fw_cmds; i++) {
+ mpt_cmd = sc->mpt_cmd_list[i];
+
+ /*
+ * Check if the target_id and bus_id is same as the timeout IO
+ */
+ if (mpt_cmd->ccb_ptr) {
+ /* bus_id = 1 denotes a VD */
+ if (bus_id == 1)
+ tgt_id = (mpt_cmd->ccb_ptr->ccb_h.target_id - (MRSAS_MAX_PD - 1));
+
+ if (mpt_cmd->ccb_ptr->cpi.bus_id == bus_id &&
+ mpt_cmd->ccb_ptr->ccb_h.target_id == tgt_id) {
+ device_printf(sc->mrsas_dev,
+ "IO commands pending to target id %d\n", tgt_id);
+ return FAIL;
+ }
+ }
+ }
+
+ return SUCCESS;
+}
+
+#if TM_DEBUG
+/*
+ * mrsas_tm_response_code: Prints TM response code received from FW
+ * input: Adapter instance soft state
+ * MPI reply returned from firmware
+ *
+ * Returns nothing.
+ */
+static void
+mrsas_tm_response_code(struct mrsas_softc *sc,
+ MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply)
+{
+ char *desc;
+
+ switch (mpi_reply->ResponseCode) {
+ case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
+ desc = "task management request completed";
+ break;
+ case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
+ desc = "invalid frame";
+ break;
+ case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
+ desc = "task management request not supported";
+ break;
+ case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
+ desc = "task management request failed";
+ break;
+ case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
+ desc = "task management request succeeded";
+ break;
+ case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
+ desc = "invalid lun";
+ break;
+ case 0xA:
+ desc = "overlapped tag attempted";
+ break;
+ case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
+ desc = "task queued, however not sent to target";
+ break;
+ default:
+ desc = "unknown";
+ break;
+ }
+ device_printf(sc->mrsas_dev, "response_code(%01x): %s\n",
+ mpi_reply->ResponseCode, desc);
+ device_printf(sc->mrsas_dev,
+ "TerminationCount/DevHandle/Function/TaskType/IOCStat/IOCLoginfo\n"
+ "0x%x/0x%x/0x%x/0x%x/0x%x/0x%x\n",
+ mpi_reply->TerminationCount, mpi_reply->DevHandle,
+ mpi_reply->Function, mpi_reply->TaskType,
+ mpi_reply->IOCStatus, mpi_reply->IOCLogInfo);
+}
+#endif
+
+/*
+ * mrsas_issue_tm: Fires the TM command to FW and waits for completion
+ * input: Adapter instance soft state
+ * reqest descriptor compiled by mrsas_reset_targets
+ *
+ * Returns FAIL if TM command TIMEDOUT from FW else SUCCESS.
+ */
+static int
+mrsas_issue_tm(struct mrsas_softc *sc,
+ MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc)
+{
+ int sleep_stat;
+
+ mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high);
+ sleep_stat = msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, "tm_sleep", 50*hz);
+
+ if (sleep_stat == EWOULDBLOCK) {
+ device_printf(sc->mrsas_dev, "tm cmd TIMEDOUT\n");
+ return FAIL;
+ }
+
+ return SUCCESS;
+}
+
+/*
+ * mrsas_reset_targets : Gathers info to fire a target reset command
+ * input: Adapter instance soft state
+ *
+ * This function compiles data for a target reset command to be fired to the FW
+ * and then traverse the target_reset_pool to see targets with TIMEDOUT IOs.
+ *
+ * Returns SUCCESS or FAIL
+ */
+int mrsas_reset_targets(struct mrsas_softc *sc)
+{
+ struct mrsas_mpt_cmd *tm_mpt_cmd = NULL;
+ struct mrsas_mpt_cmd *tgt_mpt_cmd = NULL;
+ MR_TASK_MANAGE_REQUEST *mr_request;
+ MPI2_SCSI_TASK_MANAGE_REQUEST *tm_mpi_request;
+ MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+ int retCode = FAIL, count, i, outstanding;
+ u_int32_t MSIxIndex, bus_id;
+ target_id_t tgt_id;
+#if TM_DEBUG
+ MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply;
+#endif
+
+ outstanding = mrsas_atomic_read(&sc->fw_outstanding);
+
+ if (!outstanding) {
+ device_printf(sc->mrsas_dev, "NO IOs pending...\n");
+ mrsas_atomic_set(&sc->target_reset_outstanding, 0);
+ retCode = SUCCESS;
+ goto return_status;
+ } else if (sc->adprecovery != MRSAS_HBA_OPERATIONAL) {
+ device_printf(sc->mrsas_dev, "Controller is not operational\n");
+ goto return_status;
+ } else {
+ /* Some more error checks will be added in future */
+ }
+
+ /* Get an mpt frame and an index to fire the TM cmd */
+ tm_mpt_cmd = mrsas_get_mpt_cmd(sc);
+ if (!tm_mpt_cmd) {
+ retCode = FAIL;
+ goto return_status;
+ }
+
+ req_desc = mrsas_get_request_desc(sc, (tm_mpt_cmd->index) - 1);
+ if (!req_desc) {
+ device_printf(sc->mrsas_dev, "Cannot get request_descriptor for tm.\n");
+ retCode = FAIL;
+ goto release_mpt;
+ }
+ memset(req_desc, 0, sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION));
+
+ req_desc->HighPriority.SMID = tm_mpt_cmd->index;
+ req_desc->HighPriority.RequestFlags =
+ (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
+ MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ req_desc->HighPriority.MSIxIndex = 0;
+ req_desc->HighPriority.LMID = 0;
+ req_desc->HighPriority.Reserved1 = 0;
+ tm_mpt_cmd->request_desc = req_desc;
+
+ mr_request = (MR_TASK_MANAGE_REQUEST *) tm_mpt_cmd->io_request;
+ memset(mr_request, 0, sizeof(MR_TASK_MANAGE_REQUEST));
+
+ tm_mpi_request = (MPI2_SCSI_TASK_MANAGE_REQUEST *) &mr_request->TmRequest;
+ tm_mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
+ tm_mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
+ tm_mpi_request->TaskMID = 0; /* smid task */
+ tm_mpi_request->LUN[1] = 0;
+
+ /* Traverse the tm_mpt pool to get valid entries */
+ for (i = 0 ; i < MRSAS_MAX_TM_TARGETS; i++) {
+ if(!sc->target_reset_pool[i]) {
+ continue;
+ } else {
+ tgt_mpt_cmd = sc->target_reset_pool[i];
+ }
+
+ tgt_id = i;
+
+ /* See if the target is tm capable or NOT */
+ if (!tgt_mpt_cmd->tmCapable) {
+ device_printf(sc->mrsas_dev, "Task management NOT SUPPORTED for "
+ "CAM target:%d\n", tgt_id);
+
+ retCode = FAIL;
+ goto release_mpt;
+ }
+
+ tm_mpi_request->DevHandle = tgt_mpt_cmd->io_request->DevHandle;
+
+ if (i < (MRSAS_MAX_PD - 1)) {
+ mr_request->uTmReqReply.tmReqFlags.isTMForPD = 1;
+ bus_id = 0;
+ } else {
+ mr_request->uTmReqReply.tmReqFlags.isTMForLD = 1;
+ bus_id = 1;
+ }
+
+ device_printf(sc->mrsas_dev, "TM will be fired for "
+ "CAM target:%d and bus_id %d\n", tgt_id, bus_id);
+
+ sc->ocr_chan = (void *)&tm_mpt_cmd;
+ retCode = mrsas_issue_tm(sc, req_desc);
+ if (retCode == FAIL)
+ goto release_mpt;
+
+#if TM_DEBUG
+ mpi_reply =
+ (MPI2_SCSI_TASK_MANAGE_REPLY *) &mr_request->uTmReqReply.TMReply;
+ mrsas_tm_response_code(sc, mpi_reply);
+#endif
+ mrsas_atomic_dec(&sc->target_reset_outstanding);
+ sc->target_reset_pool[i] = NULL;
+
+ /* Check for pending cmds in the mpt_cmd_pool with the tgt_id */
+ mrsas_disable_intr(sc);
+ /* Wait for 1 second to complete parallel ISR calling same
+ * mrsas_complete_cmd()
+ */
+ msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, "mrsas_reset_wakeup",
+ 1 * hz);
+ count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
+ mtx_unlock(&sc->sim_lock);
+ for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
+ mrsas_complete_cmd(sc, MSIxIndex);
+ mtx_lock(&sc->sim_lock);
+ retCode = mrsas_track_scsiio(sc, tgt_id, bus_id);
+ mrsas_enable_intr(sc);
+
+ if (retCode == FAIL)
+ goto release_mpt;
+ }
+
+ device_printf(sc->mrsas_dev, "Number of targets outstanding "
+ "after reset: %d\n", mrsas_atomic_read(&sc->target_reset_outstanding));
+
+release_mpt:
+ mrsas_release_mpt_cmd(tm_mpt_cmd);
+return_status:
+ device_printf(sc->mrsas_dev, "target reset %s!!\n",
+ (retCode == SUCCESS) ? "SUCCESS" : "FAIL");
+
+ return retCode;
+}
+
diff --git a/sys/dev/mrsas/mrsas_fp.c b/sys/dev/mrsas/mrsas_fp.c
index f83d52b..e3e4e2a 100644
--- a/sys/dev/mrsas/mrsas_fp.c
+++ b/sys/dev/mrsas/mrsas_fp.c
@@ -747,15 +747,6 @@ mr_spanset_get_phy_params(struct mrsas_softc *sc, u_int32_t ld, u_int64_t stripR
u_int64_t *pdBlock = &io_info->pdBlock;
u_int16_t *pDevHandle = &io_info->devHandle;
u_int32_t logArm, rowMod, armQ, arm;
- u_int8_t do_invader = 0;
-
- if ((sc->device_id == MRSAS_INVADER) ||
- (sc->device_id == MRSAS_FURY) ||
- (sc->device_id == MRSAS_INTRUDER) ||
- (sc->device_id == MRSAS_INTRUDER_24) ||
- (sc->device_id == MRSAS_CUTLASS_52) ||
- (sc->device_id == MRSAS_CUTLASS_53))
- do_invader = 1;
/* Get row and span from io_info for Uneven Span IO. */
row = io_info->start_row;
@@ -782,7 +773,7 @@ mr_spanset_get_phy_params(struct mrsas_softc *sc, u_int32_t ld, u_int64_t stripR
*pDevHandle = MR_PdDevHandleGet(pd, map);
else {
*pDevHandle = MR_PD_INVALID;
- if ((raid->level >= 5) && ((!do_invader) || (do_invader &&
+ if ((raid->level >= 5) && ((!sc->mrsas_gen3_ctrl) || (sc->mrsas_gen3_ctrl &&
raid->regTypeReqOnRead != REGION_TYPE_UNUSED)))
pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
else if (raid->level == 1) {
@@ -965,12 +956,7 @@ MR_BuildRaidContext(struct mrsas_softc *sc, struct IO_REQUEST_INFO *io_info,
regSize += stripSize;
}
pRAID_Context->timeoutValue = map->raidMap.fpPdIoTimeoutSec;
- if ((sc->device_id == MRSAS_INVADER) ||
- (sc->device_id == MRSAS_FURY) ||
- (sc->device_id == MRSAS_INTRUDER) ||
- (sc->device_id == MRSAS_INTRUDER_24) ||
- (sc->device_id == MRSAS_CUTLASS_52) ||
- (sc->device_id == MRSAS_CUTLASS_53))
+ if (sc->mrsas_gen3_ctrl)
pRAID_Context->regLockFlags = (isRead) ? raid->regTypeReqOnRead : raid->regTypeReqOnWrite;
else
pRAID_Context->regLockFlags = (isRead) ? REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite;
@@ -1453,15 +1439,6 @@ MR_GetPhyParams(struct mrsas_softc *sc, u_int32_t ld,
u_int64_t *pdBlock = &io_info->pdBlock;
u_int16_t *pDevHandle = &io_info->devHandle;
u_int32_t rowMod, armQ, arm, logArm;
- u_int8_t do_invader = 0;
-
- if ((sc->device_id == MRSAS_INVADER) ||
- (sc->device_id == MRSAS_FURY) ||
- (sc->device_id == MRSAS_INTRUDER) ||
- (sc->device_id == MRSAS_INTRUDER_24) ||
- (sc->device_id == MRSAS_CUTLASS_52) ||
- (sc->device_id == MRSAS_CUTLASS_53))
- do_invader = 1;
row = mega_div64_32(stripRow, raid->rowDataSize);
@@ -1501,7 +1478,7 @@ MR_GetPhyParams(struct mrsas_softc *sc, u_int32_t ld,
*pDevHandle = MR_PdDevHandleGet(pd, map);
else {
*pDevHandle = MR_PD_INVALID; /* set dev handle as invalid. */
- if ((raid->level >= 5) && ((!do_invader) || (do_invader &&
+ if ((raid->level >= 5) && ((!sc->mrsas_gen3_ctrl) || (sc->mrsas_gen3_ctrl &&
raid->regTypeReqOnRead != REGION_TYPE_UNUSED)))
pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
else if (raid->level == 1) {
OpenPOWER on IntegriCloud