summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/conf/files2
-rw-r--r--sys/dev/mfi/mfi.c1347
-rw-r--r--sys/dev/mfi/mfi_cam.c6
-rw-r--r--sys/dev/mfi/mfi_disk.c21
-rw-r--r--sys/dev/mfi/mfi_ioctl.h7
-rw-r--r--sys/dev/mfi/mfi_linux.c1
-rw-r--r--sys/dev/mfi/mfi_pci.c50
-rw-r--r--sys/dev/mfi/mfi_syspd.c294
-rw-r--r--sys/dev/mfi/mfi_tbolt.c1410
-rw-r--r--sys/dev/mfi/mfireg.h654
-rw-r--r--sys/dev/mfi/mfivar.h209
-rw-r--r--sys/modules/mfi/Makefile2
12 files changed, 3798 insertions, 205 deletions
diff --git a/sys/conf/files b/sys/conf/files
index 1f2bd0b..beae781 100644
--- a/sys/conf/files
+++ b/sys/conf/files
@@ -1410,6 +1410,8 @@ dev/mfi/mfi.c optional mfi
dev/mfi/mfi_debug.c optional mfi
dev/mfi/mfi_pci.c optional mfi pci
dev/mfi/mfi_disk.c optional mfi
+dev/mfi/mfi_syspd.c optional mfi
+dev/mfi/mfi_tbolt.c optional mfi
dev/mfi/mfi_linux.c optional mfi compat_linux
dev/mfi/mfi_cam.c optional mfip scbus
dev/mii/acphy.c optional miibus | acphy
diff --git a/sys/dev/mfi/mfi.c b/sys/dev/mfi/mfi.c
index 8a3d277..e70d667 100644
--- a/sys/dev/mfi/mfi.c
+++ b/sys/dev/mfi/mfi.c
@@ -79,10 +79,11 @@ __FBSDID("$FreeBSD$");
#include <dev/mfi/mfireg.h>
#include <dev/mfi/mfi_ioctl.h>
#include <dev/mfi/mfivar.h>
+#include <sys/interrupt.h>
+#include <sys/priority.h>
static int mfi_alloc_commands(struct mfi_softc *);
static int mfi_comms_init(struct mfi_softc *);
-static int mfi_wait_command(struct mfi_softc *, struct mfi_command *);
static int mfi_get_controller_info(struct mfi_softc *);
static int mfi_get_log_state(struct mfi_softc *,
struct mfi_evt_log_state **);
@@ -93,16 +94,18 @@ static void mfi_data_cb(void *, bus_dma_segment_t *, int, int);
static void mfi_startup(void *arg);
static void mfi_intr(void *arg);
static void mfi_ldprobe(struct mfi_softc *sc);
+static void mfi_syspdprobe(struct mfi_softc *sc);
static int mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
static void mfi_aen_complete(struct mfi_command *);
-static int mfi_aen_setup(struct mfi_softc *, uint32_t);
static int mfi_add_ld(struct mfi_softc *sc, int);
static void mfi_add_ld_complete(struct mfi_command *);
+static int mfi_add_sys_pd(struct mfi_softc *sc, int);
+static void mfi_add_sys_pd_complete(struct mfi_command *);
static struct mfi_command * mfi_bio_command(struct mfi_softc *);
static void mfi_bio_complete(struct mfi_command *);
-static int mfi_mapcmd(struct mfi_softc *, struct mfi_command *);
+static struct mfi_command *mfi_build_ldio(struct mfi_softc *,struct bio*);
+static struct mfi_command *mfi_build_syspdio(struct mfi_softc *,struct bio*);
static int mfi_send_frame(struct mfi_softc *, struct mfi_command *);
-static void mfi_complete(struct mfi_softc *, struct mfi_command *);
static int mfi_abort(struct mfi_softc *, struct mfi_command *);
static int mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int, struct thread *);
static void mfi_timeout(void *);
@@ -110,12 +113,17 @@ static int mfi_user_command(struct mfi_softc *,
struct mfi_ioc_passthru *);
static void mfi_enable_intr_xscale(struct mfi_softc *sc);
static void mfi_enable_intr_ppc(struct mfi_softc *sc);
-static int32_t mfi_read_fw_status_xscale(struct mfi_softc *sc);
-static int32_t mfi_read_fw_status_ppc(struct mfi_softc *sc);
+static int32_t mfi_read_fw_status_xscale(struct mfi_softc *sc);
+static int32_t mfi_read_fw_status_ppc(struct mfi_softc *sc);
static int mfi_check_clear_intr_xscale(struct mfi_softc *sc);
static int mfi_check_clear_intr_ppc(struct mfi_softc *sc);
-static void mfi_issue_cmd_xscale(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt);
-static void mfi_issue_cmd_ppc(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt);
+static void mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add,uint32_t frame_cnt);
+static void mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add,uint32_t frame_cnt);
+static int mfi_config_lock(struct mfi_softc *sc, uint32_t opcode);
+static void mfi_config_unlock(struct mfi_softc *sc, int locked);
+static int mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm);
+static void mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm);
+static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm);
SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters");
static int mfi_event_locale = MFI_EVT_LOCALE_ALL;
@@ -152,6 +160,7 @@ static struct cdevsw mfi_cdevsw = {
MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
#define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
+struct mfi_skinny_dma_info mfi_skinny;
static void
mfi_enable_intr_xscale(struct mfi_softc *sc)
@@ -162,12 +171,17 @@ mfi_enable_intr_xscale(struct mfi_softc *sc)
static void
mfi_enable_intr_ppc(struct mfi_softc *sc)
{
- MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
if (sc->mfi_flags & MFI_FLAGS_1078) {
+ MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM);
- } else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
+ }
+ else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
+ MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM);
}
+ else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
+ MFI_WRITE4(sc, MFI_OMSK, ~0x00000001);
+ }
}
static int32_t
@@ -205,35 +219,51 @@ mfi_check_clear_intr_ppc(struct mfi_softc *sc)
if (!(status & MFI_1078_RM)) {
return 1;
}
- } else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
+ }
+ else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
if (!(status & MFI_GEN2_RM)) {
return 1;
}
}
-
- MFI_WRITE4(sc, MFI_ODCR0, status);
+ else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
+ if (!(status & MFI_SKINNY_RM)) {
+ return 1;
+ }
+ }
+ if (sc->mfi_flags & MFI_FLAGS_SKINNY)
+ MFI_WRITE4(sc, MFI_OSTS, status);
+ else
+ MFI_WRITE4(sc, MFI_ODCR0, status);
return 0;
}
static void
-mfi_issue_cmd_xscale(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt)
+mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
{
MFI_WRITE4(sc, MFI_IQP,(bus_add >>3)|frame_cnt);
}
static void
-mfi_issue_cmd_ppc(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt)
+mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
{
- MFI_WRITE4(sc, MFI_IQP, (bus_add |frame_cnt <<1)|1 );
+ if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
+ MFI_WRITE4(sc, MFI_IQPL, (bus_add | frame_cnt <<1)|1 );
+ MFI_WRITE4(sc, MFI_IQPH, 0x00000000);
+ } else {
+ MFI_WRITE4(sc, MFI_IQP, (bus_add | frame_cnt <<1)|1 );
+ }
}
-static int
+int
mfi_transition_firmware(struct mfi_softc *sc)
{
uint32_t fw_state, cur_state;
int max_wait, i;
+ uint32_t cur_abs_reg_val = 0;
+ uint32_t prev_abs_reg_val = 0;
- fw_state = sc->mfi_read_fw_status(sc)& MFI_FWSTATE_MASK;
+ cur_abs_reg_val = sc->mfi_read_fw_status(sc);
+ fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
while (fw_state != MFI_FWSTATE_READY) {
if (bootverbose)
device_printf(sc->mfi_dev, "Waiting for firmware to "
@@ -244,25 +274,41 @@ mfi_transition_firmware(struct mfi_softc *sc)
device_printf(sc->mfi_dev, "Firmware fault\n");
return (ENXIO);
case MFI_FWSTATE_WAIT_HANDSHAKE:
- MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
- max_wait = 2;
+ if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
+ MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
+ else
+ MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
+ max_wait = MFI_RESET_WAIT_TIME;
break;
case MFI_FWSTATE_OPERATIONAL:
- MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
- max_wait = 10;
+ if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
+ //MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_READY);
+ MFI_WRITE4(sc, MFI_SKINNY_IDB, 7);
+ else
+ MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
+ max_wait = MFI_RESET_WAIT_TIME;
break;
case MFI_FWSTATE_UNDEFINED:
case MFI_FWSTATE_BB_INIT:
- max_wait = 2;
+ max_wait = MFI_RESET_WAIT_TIME;
+ break;
+ case MFI_FWSTATE_FW_INIT_2:
+ max_wait = MFI_RESET_WAIT_TIME;
break;
case MFI_FWSTATE_FW_INIT:
- case MFI_FWSTATE_DEVICE_SCAN:
case MFI_FWSTATE_FLUSH_CACHE:
- max_wait = 20;
+ max_wait = MFI_RESET_WAIT_TIME;
+ break;
+ case MFI_FWSTATE_DEVICE_SCAN:
+ max_wait = MFI_RESET_WAIT_TIME; /* wait for 180 seconds */
+ prev_abs_reg_val = cur_abs_reg_val;
break;
case MFI_FWSTATE_BOOT_MESSAGE_PENDING:
- MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_HOTPLUG);
- max_wait = 10;
+ if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
+ MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_HOTPLUG);
+ else
+ MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_HOTPLUG);
+ max_wait = MFI_RESET_WAIT_TIME;
break;
default:
device_printf(sc->mfi_dev,"Unknown firmware state %#x\n",
@@ -270,12 +316,20 @@ mfi_transition_firmware(struct mfi_softc *sc)
return (ENXIO);
}
for (i = 0; i < (max_wait * 10); i++) {
- fw_state = sc->mfi_read_fw_status(sc) & MFI_FWSTATE_MASK;
+
+ cur_abs_reg_val = sc->mfi_read_fw_status(sc);
+ fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
if (fw_state == cur_state)
DELAY(100000);
else
break;
}
+ if (fw_state == MFI_FWSTATE_DEVICE_SCAN) {
+ /* Check the device scanning progress */
+ if (prev_abs_reg_val != cur_abs_reg_val) {
+ continue;
+ }
+ }
if (fw_state == cur_state) {
device_printf(sc->mfi_dev, "Firmware stuck in state "
"%#x\n", fw_state);
@@ -286,26 +340,31 @@ mfi_transition_firmware(struct mfi_softc *sc)
}
static void
-mfi_addr32_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
+mfi_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
{
- uint32_t *addr;
+ bus_addr_t *addr;
addr = arg;
*addr = segs[0].ds_addr;
}
+
int
mfi_attach(struct mfi_softc *sc)
{
uint32_t status;
int error, commsz, framessz, sensesz;
int frames, unit, max_fw_sge;
+ uint32_t tb_mem_size = 0;
+ if(sc == NULL)
+ return EINVAL;
- device_printf(sc->mfi_dev, "Megaraid SAS driver Ver 3.00 \n");
+ device_printf(sc->mfi_dev, "Megaraid SAS driver Ver %s \n",MEGASAS_VERSION);
mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF);
sx_init(&sc->mfi_config_lock, "MFI config");
TAILQ_INIT(&sc->mfi_ld_tqh);
+ TAILQ_INIT(&sc->mfi_syspd_tqh);
TAILQ_INIT(&sc->mfi_aen_pids);
TAILQ_INIT(&sc->mfi_cam_ccbq);
@@ -314,15 +373,32 @@ mfi_attach(struct mfi_softc *sc)
mfi_initq_busy(sc);
mfi_initq_bio(sc);
+ //atomic_set(&sc->fw_reset_no_pci_access, 0);
+ sc->adpreset = 0;
+ sc->last_seq_num = 0;
+ sc->disableOnlineCtrlReset = 1;
+ sc->issuepend_done = 1;
+ sc->hw_crit_error = 0;
+
if (sc->mfi_flags & MFI_FLAGS_1064R) {
sc->mfi_enable_intr = mfi_enable_intr_xscale;
sc->mfi_read_fw_status = mfi_read_fw_status_xscale;
sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale;
sc->mfi_issue_cmd = mfi_issue_cmd_xscale;
}
+ else if(sc->mfi_flags & MFI_FLAGS_TBOLT) {
+ sc->mfi_enable_intr = mfi_tbolt_enable_intr_ppc;
+ sc->mfi_disable_intr = mfi_tbolt_disable_intr_ppc;
+ sc->mfi_read_fw_status = mfi_tbolt_read_fw_status_ppc;
+ sc->mfi_check_clear_intr = mfi_tbolt_check_clear_intr_ppc;
+ sc->mfi_issue_cmd = mfi_tbolt_issue_cmd_ppc;
+ sc->mfi_adp_reset = mfi_tbolt_adp_reset;
+ sc->mfi_tbolt = 1;
+ TAILQ_INIT(&sc->mfi_cmd_tbolt_tqh);
+ }
else {
sc->mfi_enable_intr = mfi_enable_intr_ppc;
- sc->mfi_read_fw_status = mfi_read_fw_status_ppc;
+ sc->mfi_read_fw_status = mfi_read_fw_status_ppc;
sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc;
sc->mfi_issue_cmd = mfi_issue_cmd_ppc;
}
@@ -334,6 +410,32 @@ mfi_attach(struct mfi_softc *sc)
"error %d\n", error);
return (ENXIO);
}
+ //
+
+ //Start: LSIP200113393
+ if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
+ 1, 0, /* algnmnt, boundary */
+ BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ MEGASAS_MAX_NAME*sizeof(bus_addr_t), /* maxsize */
+ 1, /* msegments */
+ MEGASAS_MAX_NAME*sizeof(bus_addr_t), /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc->verbuf_h_dmat)) {
+ device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmat DMA tag\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamem_alloc(sc->verbuf_h_dmat, (void **)&sc->verbuf,
+ BUS_DMA_NOWAIT, &sc->verbuf_h_dmamap)) {
+ device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmamap memory\n");
+ return (ENOMEM);
+ }
+ bzero(sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t));
+ bus_dmamap_load(sc->verbuf_h_dmat, sc->verbuf_h_dmamap,
+ sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t), mfi_addr_cb, &sc->verbuf_h_busaddr, 0);
+ //End: LSIP200113393
/*
* Get information needed for sizing the contiguous memory for the
@@ -347,6 +449,94 @@ mfi_attach(struct mfi_softc *sc)
max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
sc->mfi_max_sge = min(max_fw_sge, ((MFI_MAXPHYS / PAGE_SIZE) + 1));
+ /* ThunderBolt Support get the contiguous memory */
+
+ if(sc->mfi_flags & MFI_FLAGS_TBOLT) {
+ mfi_tbolt_init_globals(sc);
+ device_printf(sc->mfi_dev,"MaxCmd = %x MaxSgl = %x state = %x \n",
+ sc->mfi_max_fw_cmds, sc->mfi_max_sge, status);
+ tb_mem_size = mfi_tbolt_get_memory_requirement(sc);
+
+ if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
+ 1, 0, /* algnmnt, boundary */
+ BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ tb_mem_size, /* maxsize */
+ 1, /* msegments */
+ tb_mem_size, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc->mfi_tb_dmat)) {
+ device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamem_alloc(sc->mfi_tb_dmat, (void **)&sc->request_message_pool,
+ BUS_DMA_NOWAIT, &sc->mfi_tb_dmamap)) {
+ device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
+ return (ENOMEM);
+ }
+ bzero(sc->request_message_pool, tb_mem_size);
+ bus_dmamap_load(sc->mfi_tb_dmat, sc->mfi_tb_dmamap,
+ sc->request_message_pool, tb_mem_size, mfi_addr_cb, &sc->mfi_tb_busaddr, 0);
+
+ /* For ThunderBolt memory init */
+ if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
+ 0x100, 0, /* algnmnt, boundary */
+ BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ MFI_FRAME_SIZE, /* maxsize */
+ 1, /* msegments */
+ MFI_FRAME_SIZE, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc->mfi_tb_init_dmat)) {
+ device_printf(sc->mfi_dev, "Cannot allocate init DMA tag\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamem_alloc(sc->mfi_tb_init_dmat, (void **)&sc->mfi_tb_init,
+ BUS_DMA_NOWAIT, &sc->mfi_tb_init_dmamap)) {
+ device_printf(sc->mfi_dev, "Cannot allocate init memory\n");
+ return (ENOMEM);
+ }
+ bzero(sc->mfi_tb_init, MFI_FRAME_SIZE);
+ bus_dmamap_load(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap,
+ sc->mfi_tb_init, MFI_FRAME_SIZE, mfi_addr_cb, &sc->mfi_tb_init_busaddr, 0);
+ if(mfi_tbolt_init_desc_pool(sc, sc->request_message_pool, tb_mem_size)) {
+ device_printf(sc->mfi_dev,"Thunderbolt pool preparation error\n");
+ return 0;
+ }
+
+ /*
+ Allocate DMA memory mapping for MPI2 IOC Init descriptor,
+ we are taking it diffrent from what we have allocated for Request
+ and reply descriptors to avoid confusion later
+ */
+ tb_mem_size = sizeof(struct MPI2_IOC_INIT_REQUEST);
+ if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
+ 1, 0, /* algnmnt, boundary */
+ BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ tb_mem_size, /* maxsize */
+ 1, /* msegments */
+ tb_mem_size, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc->mfi_tb_ioc_init_dmat)) {
+ device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamem_alloc(sc->mfi_tb_ioc_init_dmat, (void **)&sc->mfi_tb_ioc_init_desc,
+ BUS_DMA_NOWAIT, &sc->mfi_tb_ioc_init_dmamap)) {
+ device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
+ return (ENOMEM);
+ }
+ bzero(sc->mfi_tb_ioc_init_desc, tb_mem_size);
+ bus_dmamap_load(sc->mfi_tb_ioc_init_dmat, sc->mfi_tb_ioc_init_dmamap,
+ sc->mfi_tb_ioc_init_desc, tb_mem_size, mfi_addr_cb, &sc->mfi_tb_ioc_init_busaddr, 0);
+ }
/*
* Create the dma tag for data buffers. Used both for block I/O
* and for various internal data queries.
@@ -396,8 +586,7 @@ mfi_attach(struct mfi_softc *sc)
}
bzero(sc->mfi_comms, commsz);
bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
- sc->mfi_comms, commsz, mfi_addr32_cb, &sc->mfi_comms_busaddr, 0);
-
+ sc->mfi_comms, commsz, mfi_addr_cb, &sc->mfi_comms_busaddr, 0);
/*
* Allocate DMA memory for the command frames. Keep them in the
* lower 4GB for efficiency. Calculate the size of the commands at
@@ -414,6 +603,8 @@ mfi_attach(struct mfi_softc *sc)
} else {
sc->mfi_sge_size = sizeof(struct mfi_sg32);
}
+ if (sc->mfi_flags & MFI_FLAGS_SKINNY)
+ sc->mfi_sge_size = sizeof(struct mfi_sg_skinny);
frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2;
sc->mfi_cmd_size = frames * MFI_FRAME_SIZE;
framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds;
@@ -438,8 +629,7 @@ mfi_attach(struct mfi_softc *sc)
}
bzero(sc->mfi_frames, framessz);
bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
- sc->mfi_frames, framessz, mfi_addr32_cb, &sc->mfi_frames_busaddr,0);
-
+ sc->mfi_frames, framessz, mfi_addr_cb, &sc->mfi_frames_busaddr,0);
/*
* Allocate DMA memory for the frame sense data. Keep them in the
* lower 4GB for efficiency
@@ -465,40 +655,63 @@ mfi_attach(struct mfi_softc *sc)
return (ENOMEM);
}
bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
- sc->mfi_sense, sensesz, mfi_addr32_cb, &sc->mfi_sense_busaddr, 0);
-
+ sc->mfi_sense, sensesz, mfi_addr_cb, &sc->mfi_sense_busaddr, 0);
if ((error = mfi_alloc_commands(sc)) != 0)
return (error);
- if ((error = mfi_comms_init(sc)) != 0)
- return (error);
+ /* Before moving the FW to operational state, check whether
+ * hostmemory is required by the FW or not
+ */
- if ((error = mfi_get_controller_info(sc)) != 0)
- return (error);
+ /* ThunderBolt MFI_IOC2 INIT */
+ if(sc->mfi_flags & MFI_FLAGS_TBOLT)
+ {
+ sc->mfi_disable_intr(sc);
+ if((error = mfi_tbolt_init_MFI_queue(sc)) != 0)
+ {
+ device_printf(sc->mfi_dev,"TB Init has failed with error %d\n",error);
+ return error;
+ }
- mtx_lock(&sc->mfi_io_lock);
- if ((error = mfi_aen_setup(sc, 0), 0) != 0) {
- mtx_unlock(&sc->mfi_io_lock);
- return (error);
- }
- mtx_unlock(&sc->mfi_io_lock);
+ if((error = mfi_tbolt_alloc_cmd(sc)) != 0)
+ return error;
+ sc->mfi_irq_rid = 0;
+ if ((sc->mfi_irq = bus_alloc_resource_any(sc->mfi_dev, SYS_RES_IRQ,
+ &sc->mfi_irq_rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
+ device_printf(sc->mfi_dev, "Cannot allocate interrupt\n");
+ return (EINVAL);
+ }
+ if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq, INTR_MPSAFE|INTR_TYPE_BIO,
+ NULL, mfi_intr_tbolt, sc, &sc->mfi_intr)) {
+ device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
+ return (EINVAL);
+ }
+ sc->mfi_enable_intr(sc);
+ sc->map_id = 0;
+ }
+ else
+ {
+
+ if ((error = mfi_comms_init(sc)) != 0)
+ return (error);
- /*
- * Set up the interrupt handler. XXX This should happen in
- * mfi_pci.c
- */
- sc->mfi_irq_rid = 0;
- if ((sc->mfi_irq = bus_alloc_resource_any(sc->mfi_dev, SYS_RES_IRQ,
- &sc->mfi_irq_rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
- device_printf(sc->mfi_dev, "Cannot allocate interrupt\n");
- return (EINVAL);
- }
- if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq, INTR_MPSAFE|INTR_TYPE_BIO,
- NULL, mfi_intr, sc, &sc->mfi_intr)) {
- device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
- return (EINVAL);
+ sc->mfi_irq_rid = 0;
+ if ((sc->mfi_irq = bus_alloc_resource_any(sc->mfi_dev, SYS_RES_IRQ,
+ &sc->mfi_irq_rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
+ device_printf(sc->mfi_dev, "Cannot allocate interrupt\n");
+ return (EINVAL);
+ }
+ if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq, INTR_MPSAFE|INTR_TYPE_BIO,
+ NULL, mfi_intr, sc, &sc->mfi_intr)) {
+ device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
+ return (EINVAL);
+ }
+ sc->mfi_enable_intr(sc);
}
-
+ if ((error = mfi_get_controller_info(sc)) != 0)
+ return (error);
+ sc->disableOnlineCtrlReset = 0;
+
/* Register a config hook to probe the bus for arrays */
sc->mfi_ich.ich_func = mfi_startup;
sc->mfi_ich.ich_arg = sc;
@@ -507,6 +720,10 @@ mfi_attach(struct mfi_softc *sc)
"hook\n");
return (EINVAL);
}
+ if ((error = mfi_aen_setup(sc, 0), 0) != 0) {
+ mtx_unlock(&sc->mfi_io_lock);
+ return (error);
+ }
/*
* Register a shutdown handler.
@@ -548,6 +765,8 @@ mfi_attach(struct mfi_softc *sc)
return (0);
}
+
+
static int
mfi_alloc_commands(struct mfi_softc *sc)
{
@@ -578,8 +797,11 @@ mfi_alloc_commands(struct mfi_softc *sc)
cm->cm_sc = sc;
cm->cm_index = i;
if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
- &cm->cm_dmamap) == 0)
+ &cm->cm_dmamap) == 0) {
+ mtx_lock(&sc->mfi_io_lock);
mfi_release_command(cm);
+ mtx_unlock(&sc->mfi_io_lock);
+ }
else
break;
sc->mfi_total_cmds++;
@@ -594,6 +816,8 @@ mfi_release_command(struct mfi_command *cm)
struct mfi_frame_header *hdr;
uint32_t *hdr_data;
+ mtx_assert(&cm->cm_sc->mfi_io_lock, MA_OWNED);
+
/*
* Zero out the important fields of the frame, but make sure the
* context field is preserved. For efficiency, handle the fields
@@ -618,6 +842,7 @@ mfi_release_command(struct mfi_command *cm)
cm->cm_data = NULL;
cm->cm_sg = 0;
cm->cm_total_frame_size = 0;
+ cm->retry_for_fw_reset = 0;
mfi_enqueue_free(cm);
}
@@ -629,13 +854,19 @@ mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp, uint32_t opcode
struct mfi_command *cm;
struct mfi_dcmd_frame *dcmd;
void *buf = NULL;
-
+ uint32_t context = 0;
+
mtx_assert(&sc->mfi_io_lock, MA_OWNED);
-
+
cm = mfi_dequeue_free(sc);
if (cm == NULL)
return (EBUSY);
+ /* Zero out the MFI frame */
+ context = cm->cm_frame->header.context;
+ bzero(cm->cm_frame,sizeof (union mfi_frame));
+ cm->cm_frame->header.context = context;
+
if ((bufsize > 0) && (bufp != NULL)) {
if (*bufp == NULL) {
buf = malloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
@@ -655,6 +886,7 @@ mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp, uint32_t opcode
dcmd->header.timeout = 0;
dcmd->header.flags = 0;
dcmd->header.data_len = bufsize;
+ dcmd->header.scsi_status = 0;
dcmd->opcode = opcode;
cm->cm_sg = &dcmd->sgl;
cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
@@ -676,11 +908,17 @@ mfi_comms_init(struct mfi_softc *sc)
struct mfi_init_frame *init;
struct mfi_init_qinfo *qinfo;
int error;
+ uint32_t context = 0;
mtx_lock(&sc->mfi_io_lock);
if ((cm = mfi_dequeue_free(sc)) == NULL)
return (EBUSY);
+ /* Zero out the MFI frame */
+ context = cm->cm_frame->header.context;
+ bzero(cm->cm_frame,sizeof (union mfi_frame));
+ cm->cm_frame->header.context = context;
+
/*
* Abuse the SG list area of the frame to hold the init_qinfo
* object;
@@ -741,10 +979,13 @@ mfi_get_controller_info(struct mfi_softc *sc)
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
- max_sectors_1 = (1 << ci->stripe_sz_ops.min) * ci->max_strips_per_io;
+
+ //max_sectors_1 = (1 << ci->stripe_sz_ops.min) * ci->max_strips_per_io;
+ max_sectors_1 = (1 << ci->stripe_sz_ops.max) * ci->max_strips_per_io;
max_sectors_2 = ci->max_request_size;
sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
-
+ sc->disableOnlineCtrlReset = ci->properties.OnOffProperties.disableOnlineCtrlReset;
+
out:
if (ci)
free(ci, M_MFIBUF);
@@ -760,6 +1001,7 @@ mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
struct mfi_command *cm = NULL;
int error;
+ mtx_lock(&sc->mfi_io_lock);
error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
(void **)log_state, sizeof(**log_state));
if (error)
@@ -778,11 +1020,12 @@ mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
out:
if (cm)
mfi_release_command(cm);
+ mtx_unlock(&sc->mfi_io_lock);
return (error);
}
-static int
+int
mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
{
struct mfi_evt_log_state *log_state = NULL;
@@ -817,7 +1060,7 @@ mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
return 0;
}
-static int
+int
mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
{
@@ -840,7 +1083,6 @@ mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
msleep(cm, &sc->mfi_io_lock, PRIBIO, "mfiwait", 0);
return (cm->cm_error);
}
-
void
mfi_free(struct mfi_softc *sc)
{
@@ -889,7 +1131,70 @@ mfi_free(struct mfi_softc *sc)
sc->mfi_comms_dmamap);
if (sc->mfi_comms_dmat != NULL)
bus_dma_tag_destroy(sc->mfi_comms_dmat);
-
+
+ /* ThunderBolt contiguous memory free here */
+
+ if(sc->mfi_flags & MFI_FLAGS_TBOLT)
+ {
+ if (sc->mfi_tb_busaddr != 0)
+ bus_dmamap_unload(sc->mfi_tb_dmat, sc->mfi_tb_dmamap);
+ if (sc->request_message_pool != NULL)
+ bus_dmamem_free(sc->mfi_tb_dmat, sc->request_message_pool,
+ sc->mfi_tb_dmamap);
+ if (sc->mfi_tb_dmat != NULL)
+ bus_dma_tag_destroy(sc->mfi_tb_dmat);
+
+ /* Version buffer memory free */
+ // Start LSIP200113393
+ if (sc->verbuf_h_busaddr != 0)
+ bus_dmamap_unload(sc->verbuf_h_dmat, sc->verbuf_h_dmamap);
+ if (sc->verbuf != NULL)
+ bus_dmamem_free(sc->verbuf_h_dmat, sc->verbuf,
+ sc->verbuf_h_dmamap);
+ if (sc->verbuf_h_dmat != NULL)
+ bus_dma_tag_destroy(sc->verbuf_h_dmat);
+
+ // End LSIP200113393
+ /* ThunderBolt INIT pcaket memory Free */
+ if (sc->mfi_tb_init_busaddr != 0)
+ bus_dmamap_unload(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap);
+ if (sc->mfi_tb_init != NULL)
+ bus_dmamem_free(sc->mfi_tb_init_dmat, sc->mfi_tb_init,
+ sc->mfi_tb_init_dmamap);
+ if (sc->mfi_tb_init_dmat != NULL)
+ bus_dma_tag_destroy(sc->mfi_tb_init_dmat);
+
+ /* ThunderBolt IOC Init Desc memory free here */
+
+ if (sc->mfi_tb_ioc_init_busaddr != 0)
+ bus_dmamap_unload(sc->mfi_tb_ioc_init_dmat, sc->mfi_tb_ioc_init_dmamap);
+ if (sc->mfi_tb_ioc_init_desc != NULL)
+ bus_dmamem_free(sc->mfi_tb_ioc_init_dmat, sc->mfi_tb_ioc_init_desc, sc->mfi_tb_ioc_init_dmamap);
+ if (sc->mfi_tb_ioc_init_dmat != NULL)
+ bus_dma_tag_destroy(sc->mfi_tb_ioc_init_dmat);
+
+ for(int i=0; i < sc->mfi_max_fw_cmds; i++)
+ {
+ if (sc->mfi_cmd_pool_tbolt != NULL)
+ {
+ if (sc->mfi_cmd_pool_tbolt[i] != NULL)
+ {
+ free(sc->mfi_cmd_pool_tbolt[i], M_MFIBUF);
+ sc->mfi_cmd_pool_tbolt[i] = NULL;
+ }
+ }
+ }
+ if (sc->mfi_cmd_pool_tbolt != NULL)
+ {
+ free(sc->mfi_cmd_pool_tbolt, M_MFIBUF);
+ sc->mfi_cmd_pool_tbolt = NULL;
+ }
+ if (sc->request_desc_pool != NULL)
+ {
+ free(sc->request_desc_pool, M_MFIBUF);
+ sc->request_desc_pool = NULL;
+ }
+ }
if (sc->mfi_buffer_dmat != NULL)
bus_dma_tag_destroy(sc->mfi_buffer_dmat);
if (sc->mfi_parent_dmat != NULL)
@@ -912,10 +1217,12 @@ mfi_startup(void *arg)
config_intrhook_disestablish(&sc->mfi_ich);
- sc->mfi_enable_intr(sc);
+ //sc->mfi_enable_intr(sc);
sx_xlock(&sc->mfi_config_lock);
mtx_lock(&sc->mfi_io_lock);
mfi_ldprobe(sc);
+ if (sc->mfi_flags & MFI_FLAGS_SKINNY)
+ mfi_syspdprobe(sc);
mtx_unlock(&sc->mfi_io_lock);
sx_xunlock(&sc->mfi_config_lock);
}
@@ -976,6 +1283,9 @@ mfi_shutdown(struct mfi_softc *sc)
if (sc->mfi_aen_cm != NULL)
mfi_abort(sc, sc->mfi_aen_cm);
+ if (sc->map_update_cmd != NULL)
+ mfi_abort(sc, sc->map_update_cmd);
+
dcmd = &cm->cm_frame->dcmd;
dcmd->header.flags = MFI_FRAME_DIR_NONE;
cm->cm_flags = MFI_CMD_POLLED;
@@ -986,9 +1296,80 @@ mfi_shutdown(struct mfi_softc *sc)
}
mfi_release_command(cm);
+ sc->shutdown_issued = 1;
mtx_unlock(&sc->mfi_io_lock);
return (error);
}
+static void
+mfi_syspdprobe(struct mfi_softc *sc)
+{
+ struct mfi_frame_header *hdr;
+ struct mfi_command *cm = NULL;
+ struct mfi_pd_list *pdlist = NULL;
+ struct mfi_system_pd *syspd;
+ int error, i;
+
+ sx_assert(&sc->mfi_config_lock,SA_XLOCKED);
+ mtx_assert(&sc->mfi_io_lock,MA_OWNED);
+ /* Add SYSTEM PD's */
+ error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_LIST_QUERY,
+ (void **)&pdlist, sizeof(*pdlist));
+ if (error){
+ device_printf(sc->mfi_dev,"Error while forming SYSTEM PD list\n");
+ goto out;
+ }
+
+ cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
+ cm->cm_frame->dcmd.mbox[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
+ cm->cm_frame->dcmd.mbox[1] = 0;
+ if (mfi_mapcmd(sc, cm) != 0) {
+ device_printf(sc->mfi_dev, "Failed to get syspd device listing\n");
+ goto out;
+ }
+ bus_dmamap_sync(sc->mfi_buffer_dmat,cm->cm_dmamap,
+ BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
+ hdr = &cm->cm_frame->header;
+ if (hdr->cmd_status != MFI_STAT_OK) {
+ device_printf(sc->mfi_dev, "MFI_DCMD_PD_LIST_QUERY failed %x\n",
+ hdr->cmd_status);
+ goto out;
+ }
+ for (i=0;i<pdlist->count;i++) {
+ if(pdlist->addr[i].device_id == pdlist->addr[i].encl_device_id)
+ goto skip_sys_pd_add;
+ /* Get each PD and add it to the system */
+ if (!TAILQ_EMPTY(&sc->mfi_syspd_tqh)) {
+ TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,pd_link) {
+ if (syspd->pd_id == pdlist->addr[i].device_id)
+ goto skip_sys_pd_add;
+ }
+ }
+ mfi_add_sys_pd(sc,pdlist->addr[i].device_id);
+ skip_sys_pd_add:;
+
+ }
+ /* Delete SYSPD's whose state has been changed */
+ if (!TAILQ_EMPTY(&sc->mfi_syspd_tqh)) {
+ TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,pd_link) {
+ for (i=0;i<pdlist->count;i++) {
+ if (syspd->pd_id == pdlist->addr[i].device_id)
+ goto skip_sys_pd_delete;
+ }
+ mtx_lock(&Giant);
+ device_delete_child(sc->mfi_dev,syspd->pd_dev);
+ mtx_unlock(&Giant);
+skip_sys_pd_delete:;
+ }
+ }
+out:
+ if (pdlist)
+ free(pdlist, M_MFIBUF);
+ if (cm)
+ mfi_release_command(cm);
+
+ return;
+}
static void
mfi_ldprobe(struct mfi_softc *sc)
@@ -1082,8 +1463,79 @@ format_class(int8_t class)
}
static void
-mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
+mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail,uint8_t probe_sys_pd)
{
+ struct mfi_system_pd *syspd = NULL;
+ switch (detail->arg_type) {
+ case MR_EVT_ARGS_NONE:
+#define MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED 0x0152
+ if (detail->code == MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED) {
+ device_printf(sc->mfi_dev,"HostBus scan raised\n");
+ if (probe_sys_pd) {
+ /* Probe for new SYSPD's and Delete invalid SYSPD's */
+ sx_xlock(&sc->mfi_config_lock);
+ mtx_lock(&sc->mfi_io_lock);
+ mfi_syspdprobe(sc);
+ mtx_unlock(&sc->mfi_io_lock);
+ sx_xunlock(&sc->mfi_config_lock);
+ }
+ }
+ break;
+ case MR_EVT_ARGS_LD_STATE:
+ /* During load time driver reads all the events starting from the one that
+ * has been logged after shutdown. Avoid these old events.
+ */
+ if (!TAILQ_EMPTY(&sc->mfi_ld_tqh)) {
+ if (detail->args.ld_state.new_state == MFI_LD_STATE_OFFLINE ) {
+ /* Remove the LD */
+ struct mfi_disk *ld = NULL;
+ TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
+ if (ld->ld_id == detail->args.ld_state.ld.target_id)
+ break;
+ }
+ /*
+ Fix: for kernel panics when SSCD is removed
+ KASSERT(ld != NULL, ("volume dissappeared"));
+ */
+ if(ld != NULL)
+ {
+ mtx_lock(&Giant);
+ device_delete_child(sc->mfi_dev, ld->ld_dev);
+ mtx_unlock(&Giant);
+ }
+ }
+ }
+ break;
+ case MR_EVT_ARGS_PD:
+#define MR_EVT_PD_REMOVED 0x0070
+#define MR_EVT_PD_INSERTED 0x005b
+ if (detail->code == MR_EVT_PD_REMOVED) {
+ if (probe_sys_pd) {
+ /* If the removed device is a SYSPD then delete it */
+ if (!TAILQ_EMPTY(&sc->mfi_syspd_tqh)) {
+ TAILQ_FOREACH(syspd,&sc->mfi_syspd_tqh,pd_link) {
+ if (syspd->pd_id == detail->args.pd.device_id) {
+ mtx_lock(&Giant);
+ device_delete_child(sc->mfi_dev,syspd->pd_dev);
+ mtx_unlock(&Giant);
+ break;
+ }
+ }
+ }
+ }
+ }
+ if (detail->code == MR_EVT_PD_INSERTED) {
+ if (probe_sys_pd) {
+ /* Probe for new SYSPD's */
+ sx_xlock(&sc->mfi_config_lock);
+ mtx_lock(&sc->mfi_io_lock);
+ mfi_syspdprobe(sc);
+ mtx_unlock(&sc->mfi_io_lock);
+ sx_xunlock(&sc->mfi_config_lock);
+ }
+ }
+ break;
+ }
device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq,
format_timestamp(detail->time), detail->evt_class.members.locale,
@@ -1113,12 +1565,16 @@ mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
< current_aen.members.evt_class)
current_aen.members.evt_class =
prior_aen.members.evt_class;
+ mtx_lock(&sc->mfi_io_lock);
mfi_abort(sc, sc->mfi_aen_cm);
+ mtx_unlock(&sc->mfi_io_lock);
}
}
+ mtx_lock(&sc->mfi_io_lock);
error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
(void **)&ed, sizeof(*ed));
+ mtx_unlock(&sc->mfi_io_lock);
if (error) {
goto out;
}
@@ -1128,11 +1584,14 @@ mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
((uint32_t *)&dcmd->mbox)[1] = locale;
cm->cm_flags = MFI_CMD_DATAIN;
cm->cm_complete = mfi_aen_complete;
-
+
+ sc->last_seq_num = seq;
sc->mfi_aen_cm = cm;
+ mtx_lock(&sc->mfi_io_lock);
mfi_enqueue_ready(cm);
mfi_startio(sc);
+ mtx_unlock(&sc->mfi_io_lock);
out:
return (error);
@@ -1148,6 +1607,8 @@ mfi_aen_complete(struct mfi_command *cm)
int seq = 0, aborted = 0;
sc = cm->cm_sc;
+ mtx_assert(&sc->mfi_io_lock, MA_OWNED);
+
hdr = &cm->cm_frame->header;
if (sc->mfi_aen_cm == NULL)
@@ -1168,7 +1629,9 @@ mfi_aen_complete(struct mfi_command *cm)
* XXX If this function is too expensive or is recursive, then
* events should be put onto a queue and processed later.
*/
- mfi_decode_evt(sc, detail);
+ mtx_unlock(&sc->mfi_io_lock);
+ mfi_decode_evt(sc, detail,1);
+ mtx_lock(&sc->mfi_io_lock);
seq = detail->seq + 1;
TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
@@ -1187,7 +1650,9 @@ mfi_aen_complete(struct mfi_command *cm)
/* set it up again so the driver can catch more events */
if (!aborted) {
+ mtx_unlock(&sc->mfi_io_lock);
mfi_aen_setup(sc, seq);
+ mtx_lock(&sc->mfi_io_lock);
}
}
@@ -1213,10 +1678,13 @@ mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq)
return (ENOMEM);
for (seq = start_seq;;) {
+ mtx_lock(&sc->mfi_io_lock);
if ((cm = mfi_dequeue_free(sc)) == NULL) {
free(el, M_MFIBUF);
+ mtx_unlock(&sc->mfi_io_lock);
return (EBUSY);
}
+ mtx_unlock(&sc->mfi_io_lock);
dcmd = &cm->cm_frame->dcmd;
bzero(dcmd->mbox, MFI_MBOX_SIZE);
@@ -1232,29 +1700,38 @@ mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq)
cm->cm_data = el;
cm->cm_len = size;
+ mtx_lock(&sc->mfi_io_lock);
if ((error = mfi_mapcmd(sc, cm)) != 0) {
device_printf(sc->mfi_dev,
"Failed to get controller entries\n");
mfi_release_command(cm);
+ mtx_unlock(&sc->mfi_io_lock);
break;
}
+ mtx_unlock(&sc->mfi_io_lock);
bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) {
+ mtx_lock(&sc->mfi_io_lock);
mfi_release_command(cm);
+ mtx_unlock(&sc->mfi_io_lock);
break;
}
if (dcmd->header.cmd_status != MFI_STAT_OK) {
device_printf(sc->mfi_dev,
"Error %d fetching controller entries\n",
dcmd->header.cmd_status);
+ mtx_lock(&sc->mfi_io_lock);
mfi_release_command(cm);
+ mtx_unlock(&sc->mfi_io_lock);
break;
}
+ mtx_lock(&sc->mfi_io_lock);
mfi_release_command(cm);
+ mtx_unlock(&sc->mfi_io_lock);
for (i = 0; i < el->count; i++) {
/*
@@ -1270,7 +1747,7 @@ mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq)
else if (el->event[i].seq < start_seq)
break;
}
- mfi_decode_evt(sc, &el->event[i]);
+ mfi_decode_evt(sc, &el->event[i], 0);
}
seq = el->event[el->count - 1].seq + 1;
}
@@ -1307,8 +1784,14 @@ mfi_add_ld(struct mfi_softc *sc, int id)
free(ld_info, M_MFIBUF);
return (0);
}
-
- mfi_add_ld_complete(cm);
+ if (ld_info->ld_config.params.isSSCD != 1)
+ mfi_add_ld_complete(cm);
+ else
+ {
+ mfi_release_command(cm);
+ if(ld_info) /* SSCD drives ld_info free here */
+ free(ld_info, M_MFIBUF);
+ }
return (0);
}
@@ -1348,22 +1831,186 @@ mfi_add_ld_complete(struct mfi_command *cm)
mtx_lock(&sc->mfi_io_lock);
}
+static int mfi_add_sys_pd(struct mfi_softc *sc,int id)
+{
+ struct mfi_command *cm;
+ struct mfi_dcmd_frame *dcmd = NULL;
+ struct mfi_pd_info *pd_info = NULL;
+ int error;
+
+ mtx_assert(&sc->mfi_io_lock,MA_OWNED);
+
+ error = mfi_dcmd_command(sc,&cm,MFI_DCMD_PD_GET_INFO,
+ (void **)&pd_info, sizeof(*pd_info));
+ if (error) {
+ device_printf(sc->mfi_dev,
+ "Failed to allocated for MFI_DCMD_PD_GET_INFO %d\n",error);
+ if (pd_info)
+ free(pd_info,M_MFIBUF);
+ return (error);
+ }
+ cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
+ dcmd = &cm->cm_frame->dcmd;
+ dcmd->mbox[0]=id;
+ dcmd->header.scsi_status = 0;
+ dcmd->header.pad0 = 0;
+ if (mfi_mapcmd(sc,cm) != 0) {
+ device_printf(sc->mfi_dev,
+ "Failed to get physical drive info %d\n",id);
+ free(pd_info,M_MFIBUF);
+ return (0);
+ }
+ bus_dmamap_sync(sc->mfi_buffer_dmat,cm->cm_dmamap,BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(sc->mfi_buffer_dmat,cm->cm_dmamap);
+ mfi_add_sys_pd_complete(cm);
+ return (0);
+}
+
+static void
+mfi_add_sys_pd_complete(struct mfi_command *cm)
+{
+ struct mfi_frame_header *hdr;
+ struct mfi_pd_info *pd_info;
+ struct mfi_softc *sc;
+ device_t child;
+
+ sc = cm->cm_sc;
+ hdr = &cm->cm_frame->header;
+ pd_info = cm->cm_private;
+
+ if (hdr->cmd_status != MFI_STAT_OK) {
+ free(pd_info, M_MFIBUF);
+ mfi_release_command(cm);
+ return;
+ }
+ if (pd_info->fw_state != MFI_PD_STATE_SYSTEM) {
+ device_printf(sc->mfi_dev,"PD=%x is not SYSTEM PD\n",
+ pd_info->ref.v.device_id);
+ free(pd_info, M_MFIBUF);
+ mfi_release_command(cm);
+ return;
+ }
+ mfi_release_command(cm);
+
+ mtx_unlock(&sc->mfi_io_lock);
+ mtx_lock(&Giant);
+ if ((child = device_add_child(sc->mfi_dev, "mfisyspd", -1)) == NULL) {
+ device_printf(sc->mfi_dev, "Failed to add system pd\n");
+ free(pd_info, M_MFIBUF);
+ mtx_unlock(&Giant);
+ mtx_lock(&sc->mfi_io_lock);
+ return;
+ }
+
+ device_set_ivars(child, pd_info);
+ device_set_desc(child, "MFI System PD");
+ bus_generic_attach(sc->mfi_dev);
+ mtx_unlock(&Giant);
+ mtx_lock(&sc->mfi_io_lock);
+}
static struct mfi_command *
mfi_bio_command(struct mfi_softc *sc)
{
- struct mfi_io_frame *io;
- struct mfi_command *cm;
struct bio *bio;
- int flags, blkcount;
+ struct mfi_command *cm = NULL;
- if ((cm = mfi_dequeue_free(sc)) == NULL)
+ /*reserving two commands to avoid starvation for IOCTL*/
+ if(sc->mfi_qstat[MFIQ_FREE].q_length < 2){
return (NULL);
-
+ }
if ((bio = mfi_dequeue_bio(sc)) == NULL) {
- mfi_release_command(cm);
return (NULL);
}
+ if ((uintptr_t)bio->bio_driver2 == MFI_LD_IO) {
+ cm = mfi_build_ldio(sc,bio);
+ } else if ((uintptr_t) bio->bio_driver2 == MFI_SYS_PD_IO) {
+ cm = mfi_build_syspdio(sc,bio);
+ }
+ if (!cm)
+ mfi_enqueue_bio(sc,bio);
+ return cm;
+}
+static struct mfi_command *
+mfi_build_syspdio(struct mfi_softc *sc,struct bio *bio)
+{
+ struct mfi_command *cm;
+ struct mfi_pass_frame *pass;
+ int flags = 0,blkcount = 0;
+ uint32_t context = 0;
+
+ if ((cm = mfi_dequeue_free(sc)) == NULL)
+ return (NULL);
+
+ /* Zero out the MFI frame */
+ context = cm->cm_frame->header.context;
+ bzero(cm->cm_frame,sizeof(union mfi_frame));
+ cm->cm_frame->header.context = context;
+ pass = &cm->cm_frame->pass;
+ bzero(pass->cdb,16);
+ pass->header.cmd = MFI_CMD_PD_SCSI_IO;
+ switch (bio->bio_cmd & 0x03) {
+ case BIO_READ:
+#define SCSI_READ 0x28
+ pass->cdb[0] = SCSI_READ;
+ flags = MFI_CMD_DATAIN;
+ break;
+ case BIO_WRITE:
+#define SCSI_WRITE 0x2a
+ pass->cdb[0] = SCSI_WRITE;
+ flags = MFI_CMD_DATAOUT;
+ break;
+ default:
+ panic("Invalid bio command");
+ }
+
+ /* Cheat with the sector length to avoid a non-constant division */
+ blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
+ /* Fill the LBA and Transfer length in CDB */
+ pass->cdb[2] = (bio->bio_pblkno & 0xff000000) >> 24;
+ pass->cdb[3] = (bio->bio_pblkno & 0x00ff0000) >> 16;
+ pass->cdb[4] = (bio->bio_pblkno & 0x0000ff00) >> 8;
+ pass->cdb[5] = bio->bio_pblkno & 0x000000ff;
+ pass->cdb[7] = (blkcount & 0xff00) >> 8;
+ pass->cdb[8] = (blkcount & 0x00ff);
+ pass->header.target_id = (uintptr_t)bio->bio_driver1;
+ pass->header.timeout = 0;
+ pass->header.flags = 0;
+ pass->header.scsi_status = 0;
+ pass->header.sense_len = MFI_SENSE_LEN;
+ pass->header.data_len = bio->bio_bcount;
+ pass->header.cdb_len = 10;
+ #if defined(__amd64__)
+ pass->sense_addr_lo = (cm->cm_sense_busaddr & 0xFFFFFFFF);
+ pass->sense_addr_hi = (cm->cm_sense_busaddr & 0xFFFFFFFF00000000) >> 32;
+ #else
+ pass->sense_addr_lo = cm->cm_sense_busaddr;
+ pass->sense_addr_hi = 0;
+ #endif
+ cm->cm_complete = mfi_bio_complete;
+ cm->cm_private = bio;
+ cm->cm_data = bio->bio_data;
+ cm->cm_len = bio->bio_bcount;
+ cm->cm_sg = &pass->sgl;
+ cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
+ cm->cm_flags = flags;
+ return (cm);
+}
+static struct mfi_command *
+mfi_build_ldio(struct mfi_softc *sc,struct bio *bio)
+{
+ struct mfi_io_frame *io;
+ struct mfi_command *cm;
+ int flags, blkcount;
+ uint32_t context = 0;
+
+ if ((cm = mfi_dequeue_free(sc)) == NULL)
+ return (NULL);
+
+ /* Zero out the MFI frame */
+ context = cm->cm_frame->header.context;
+ bzero(cm->cm_frame,sizeof(union mfi_frame));
+ cm->cm_frame->header.context = context;
io = &cm->cm_frame->io;
switch (bio->bio_cmd & 0x03) {
case BIO_READ:
@@ -1383,10 +2030,16 @@ mfi_bio_command(struct mfi_softc *sc)
io->header.target_id = (uintptr_t)bio->bio_driver1;
io->header.timeout = 0;
io->header.flags = 0;
+ io->header.scsi_status = 0;
io->header.sense_len = MFI_SENSE_LEN;
io->header.data_len = blkcount;
+ #if defined(__amd64__)
+ io->sense_addr_lo = (cm->cm_sense_busaddr & 0xFFFFFFFF);
+ io->sense_addr_hi = (cm->cm_sense_busaddr & 0xFFFFFFFF00000000 ) >> 32;
+ #else
io->sense_addr_lo = cm->cm_sense_busaddr;
io->sense_addr_hi = 0;
+ #endif
io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32;
io->lba_lo = bio->bio_pblkno & 0xffffffff;
cm->cm_complete = mfi_bio_complete;
@@ -1459,14 +2112,14 @@ mfi_startio(struct mfi_softc *sc)
}
}
-static int
+int
mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
{
int error, polled;
mtx_assert(&sc->mfi_io_lock, MA_OWNED);
- if (cm->cm_data != NULL) {
+ if ((cm->cm_data != NULL) && (cm->cm_frame->header.cmd != MFI_CMD_STP )) {
polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
error = bus_dmamap_load(sc->mfi_buffer_dmat, cm->cm_dmamap,
cm->cm_data, cm->cm_len, mfi_data_cb, cm, polled);
@@ -1475,7 +2128,10 @@ mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
return (0);
}
} else {
- error = mfi_send_frame(sc, cm);
+ if(sc->MFA_enabled)
+ error = mfi_tbolt_send_frame(sc, cm);
+ else
+ error = mfi_send_frame(sc, cm);
}
return (error);
@@ -1489,6 +2145,8 @@ mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
union mfi_sgl *sgl;
struct mfi_softc *sc;
int i, j, first, dir;
+ int sgl_mapped = 0;
+ int sge_size = 0;
cm = (struct mfi_command *)arg;
sc = cm->cm_sc;
@@ -1501,34 +2159,54 @@ mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
mfi_complete(sc, cm);
return;
}
-
- j = 0;
- if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
- first = cm->cm_stp_len;
- if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
- sgl->sg32[j].addr = segs[0].ds_addr;
- sgl->sg32[j++].len = first;
- } else {
- sgl->sg64[j].addr = segs[0].ds_addr;
- sgl->sg64[j++].len = first;
- }
- } else
- first = 0;
- if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
+ /* Use IEEE sgl only for IO's on a SKINNY controller
+ * For other commands on a SKINNY controller use either
+ * sg32 or sg64 based on the sizeof(bus_addr_t).
+ * Also calculate the total frame size based on the type
+ * of SGL used.
+ */
+ if (((cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) ||
+ (cm->cm_frame->header.cmd == MFI_CMD_LD_READ) ||
+ (cm->cm_frame->header.cmd == MFI_CMD_LD_WRITE)) &&
+ (sc->mfi_flags & MFI_FLAGS_SKINNY)) {
for (i = 0; i < nsegs; i++) {
- sgl->sg32[j].addr = segs[i].ds_addr + first;
- sgl->sg32[j++].len = segs[i].ds_len - first;
- first = 0;
+ sgl->sg_skinny[i].addr = segs[i].ds_addr;
+ sgl->sg_skinny[i].len = segs[i].ds_len;
+ sgl->sg_skinny[i].flag = 0;
}
+ hdr->flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64;
+ sgl_mapped = 1;
+ sge_size = sizeof(struct mfi_sg_skinny);
} else {
- for (i = 0; i < nsegs; i++) {
- sgl->sg64[j].addr = segs[i].ds_addr + first;
- sgl->sg64[j++].len = segs[i].ds_len - first;
+ j = 0;
+ if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
+ first = cm->cm_stp_len;
+ if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
+ sgl->sg32[j].addr = segs[0].ds_addr;
+ sgl->sg32[j++].len = first;
+ } else {
+ sgl->sg64[j].addr = segs[0].ds_addr;
+ sgl->sg64[j++].len = first;
+ }
+ } else
first = 0;
+ if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
+ for (i = 0; i < nsegs; i++) {
+ sgl->sg32[j].addr = segs[i].ds_addr + first;
+ sgl->sg32[j++].len = segs[i].ds_len - first;
+ first = 0;
+ }
+ } else {
+ for (i = 0; i < nsegs; i++) {
+ sgl->sg64[j].addr = segs[i].ds_addr + first;
+ sgl->sg64[j++].len = segs[i].ds_len - first;
+ first = 0;
+ }
+ hdr->flags |= MFI_FRAME_SGL64;
}
- hdr->flags |= MFI_FRAME_SGL64;
+ hdr->sg_count = j;
}
- hdr->sg_count = j;
+ hdr->sg_count = nsegs;
dir = 0;
if (cm->cm_flags & MFI_CMD_DATAIN) {
@@ -1539,8 +2217,6 @@ mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
dir |= BUS_DMASYNC_PREWRITE;
hdr->flags |= MFI_FRAME_DIR_WRITE;
}
- if (cm->cm_frame->header.cmd == MFI_CMD_STP)
- dir |= BUS_DMASYNC_PREWRITE;
bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
cm->cm_flags |= MFI_CMD_MAPPED;
@@ -1550,10 +2226,13 @@ mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
* least 1 frame, so don't compensate for the modulo of the
* following division.
*/
- cm->cm_total_frame_size += (sc->mfi_sge_size * nsegs);
+ cm->cm_total_frame_size += (sge_size * nsegs);
cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
- mfi_send_frame(sc, cm);
+ if(sc->MFA_enabled)
+ mfi_tbolt_send_frame(sc, cm);
+ else
+ mfi_send_frame(sc, cm);
return;
}
@@ -1611,7 +2290,8 @@ mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
return (0);
}
-static void
+
+void
mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
{
int dir;
@@ -1643,6 +2323,7 @@ mfi_abort(struct mfi_softc *sc, struct mfi_command *cm_abort)
struct mfi_command *cm;
struct mfi_abort_frame *abort;
int i = 0;
+ uint32_t context = 0;
mtx_assert(&sc->mfi_io_lock, MA_OWNED);
@@ -1650,16 +2331,28 @@ mfi_abort(struct mfi_softc *sc, struct mfi_command *cm_abort)
return (EBUSY);
}
+ /* Zero out the MFI frame */
+ context = cm->cm_frame->header.context;
+ bzero(cm->cm_frame,sizeof (union mfi_frame));
+ cm->cm_frame->header.context = context;
+
abort = &cm->cm_frame->abort;
abort->header.cmd = MFI_CMD_ABORT;
abort->header.flags = 0;
+ abort->header.scsi_status = 0;
abort->abort_context = cm_abort->cm_frame->header.context;
+ #if defined(__amd64__)
+ abort->abort_mfi_addr_lo = cm_abort->cm_frame_busaddr & 0xFFFFFFFF;
+ abort->abort_mfi_addr_hi = (cm_abort->cm_frame_busaddr & 0xFFFFFFFF00000000 ) >> 32 ;
+ #else
abort->abort_mfi_addr_lo = cm_abort->cm_frame_busaddr;
abort->abort_mfi_addr_hi = 0;
+ #endif
cm->cm_data = NULL;
cm->cm_flags = MFI_CMD_POLLED;
- sc->mfi_aen_cm->cm_aen_abort = 1;
+ if(sc->mfi_aen_cm)
+ sc->mfi_aen_cm->cm_aen_abort = 1;
mfi_mapcmd(sc, cm);
mfi_release_command(cm);
@@ -1677,19 +2370,31 @@ mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt, int len)
struct mfi_command *cm;
struct mfi_io_frame *io;
int error;
+ uint32_t context = 0;
if ((cm = mfi_dequeue_free(sc)) == NULL)
return (EBUSY);
+ /* Zero out the MFI frame */
+ context = cm->cm_frame->header.context;
+ bzero(cm->cm_frame,sizeof (union mfi_frame));
+ cm->cm_frame->header.context = context;
+
io = &cm->cm_frame->io;
io->header.cmd = MFI_CMD_LD_WRITE;
io->header.target_id = id;
io->header.timeout = 0;
io->header.flags = 0;
+ io->header.scsi_status = 0;
io->header.sense_len = MFI_SENSE_LEN;
io->header.data_len = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
+ #if defined(__amd64__)
+ io->sense_addr_lo = (cm->cm_sense_busaddr & 0xFFFFFFFF);
+ io->sense_addr_hi = (cm->cm_sense_busaddr & 0xFFFFFFFF00000000) >> 32;
+ #else
io->sense_addr_lo = cm->cm_sense_busaddr;
io->sense_addr_hi = 0;
+ #endif
io->lba_hi = (lba & 0xffffffff00000000) >> 32;
io->lba_lo = lba & 0xffffffff;
cm->cm_data = virt;
@@ -1707,6 +2412,57 @@ mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt, int len)
return (error);
}
+int
+mfi_dump_syspd_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt, int len)
+{
+ struct mfi_command *cm;
+ struct mfi_pass_frame *pass;
+ int error;
+ int blkcount = 0;
+
+ if ((cm = mfi_dequeue_free(sc)) == NULL)
+ return (EBUSY);
+
+ pass = &cm->cm_frame->pass;
+ bzero(pass->cdb,16);
+ pass->header.cmd = MFI_CMD_PD_SCSI_IO;
+ pass->cdb[0] = SCSI_WRITE;
+ pass->cdb[2] = (lba & 0xff000000) >> 24;
+ pass->cdb[3] = (lba & 0x00ff0000) >> 16;
+ pass->cdb[4] = (lba & 0x0000ff00) >> 8;
+ pass->cdb[5] = (lba & 0x000000ff);
+ blkcount = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
+ pass->cdb[7] = (blkcount & 0xff00) >> 8;
+ pass->cdb[8] = (blkcount & 0x00ff);
+ pass->header.target_id = id;
+ pass->header.timeout = 0;
+ pass->header.flags = 0;
+ pass->header.scsi_status = 0;
+ pass->header.sense_len = MFI_SENSE_LEN;
+ pass->header.data_len = len;
+ pass->header.cdb_len = 10;
+ #if defined(__amd64__)
+ pass->sense_addr_lo = (cm->cm_sense_busaddr & 0xFFFFFFFF);
+ pass->sense_addr_hi = (cm->cm_sense_busaddr & 0xFFFFFFFF00000000) >> 32;
+ #else
+ pass->sense_addr_lo = cm->cm_sense_busaddr;
+ pass->sense_addr_hi = 0;
+ #endif
+ cm->cm_data = virt;
+ cm->cm_len = len;
+ cm->cm_sg = &pass->sgl;
+ cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
+ cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
+
+ error = mfi_mapcmd(sc, cm);
+ bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
+ mfi_release_command(cm);
+
+ return (error);
+}
+
static int
mfi_open(struct cdev *dev, int flags, int fmt, struct thread *td)
{
@@ -1778,6 +2534,9 @@ mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm)
{
struct mfi_disk *ld, *ld2;
int error;
+ struct mfi_system_pd *syspd = NULL;
+ uint16_t syspd_id;
+ uint16_t *mbox;
mtx_assert(&sc->mfi_io_lock, MA_OWNED);
error = 0;
@@ -1806,6 +2565,22 @@ mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm)
}
}
break;
+ case MFI_DCMD_PD_STATE_SET:
+ mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
+ syspd_id = mbox[0];
+ if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
+ if (!TAILQ_EMPTY(&sc->mfi_syspd_tqh)) {
+ TAILQ_FOREACH(syspd,&sc->mfi_syspd_tqh,pd_link) {
+ if(syspd->pd_id == syspd_id)
+ break;
+ }
+ }
+ }
+ else
+ break;
+ if(syspd)
+ error = mfi_syspd_disable(syspd);
+ break;
default:
break;
}
@@ -1817,6 +2592,9 @@ static void
mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm)
{
struct mfi_disk *ld, *ldn;
+ struct mfi_system_pd *syspd = NULL;
+ uint16_t syspd_id;
+ uint16_t *mbox;
switch (cm->cm_frame->dcmd.opcode) {
case MFI_DCMD_LD_DELETE:
@@ -1854,10 +2632,161 @@ mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm)
case MFI_DCMD_CFG_FOREIGN_IMPORT:
mfi_ldprobe(sc);
break;
+ case MFI_DCMD_PD_STATE_SET:
+ mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
+ syspd_id = mbox[0];
+ if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
+ if (!TAILQ_EMPTY(&sc->mfi_syspd_tqh)) {
+ TAILQ_FOREACH(syspd,&sc->mfi_syspd_tqh,pd_link) {
+ if(syspd->pd_id == syspd_id)
+ break;
+ }
+ }
+ }
+ else
+ break;
+ /* If the transition fails then enable the syspd again */
+ if(syspd && cm->cm_frame->header.cmd_status != MFI_STAT_OK)
+ mfi_syspd_enable(syspd);
+ break;
+ }
+}
+
+static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm)
+{
+ struct mfi_config_data *conf_data=(struct mfi_config_data *)cm->cm_data;
+ struct mfi_command *ld_cm = NULL;
+ struct mfi_ld_info *ld_info = NULL;
+ int error = 0;
+
+ if ((cm->cm_frame->dcmd.opcode == MFI_DCMD_CFG_ADD) &&
+ (conf_data->ld[0].params.isSSCD == 1)){
+ error = 1;
+ }else if (cm->cm_frame->dcmd.opcode == MFI_DCMD_LD_DELETE){
+ error = mfi_dcmd_command (sc, &ld_cm, MFI_DCMD_LD_GET_INFO,
+ (void **)&ld_info, sizeof(*ld_info));
+ if (error){
+ device_printf(sc->mfi_dev,"Failed to allocate"
+ "MFI_DCMD_LD_GET_INFO %d", error);
+ if (ld_info)
+ free(ld_info, M_MFIBUF);
+ return 0;
+ }
+ ld_cm->cm_flags = MFI_CMD_DATAIN;
+ ld_cm->cm_frame->dcmd.mbox[0]= cm->cm_frame->dcmd.mbox[0];
+ ld_cm->cm_frame->header.target_id = cm->cm_frame->dcmd.mbox[0];
+ if (mfi_wait_command(sc, ld_cm) != 0){
+ device_printf(sc->mfi_dev,"failed to get log drv\n");
+ mfi_release_command(ld_cm);
+ free(ld_info, M_MFIBUF);
+ return 0;
+ }
+
+ if(ld_cm->cm_frame->header.cmd_status != MFI_STAT_OK){
+ free(ld_info, M_MFIBUF);
+ mfi_release_command(ld_cm);
+ return 0;
+ }
+ else
+ ld_info = (struct mfi_ld_info *)ld_cm->cm_private;
+
+ if (ld_info->ld_config.params.isSSCD == 1)
+ error = 1;
+
+ mfi_release_command(ld_cm);
+ free(ld_info, M_MFIBUF);
+
}
+ return error;
}
static int
+mfi_stp_cmd(struct mfi_softc *sc, struct mfi_command *cm,caddr_t arg)
+{
+ uint8_t i;
+ struct mfi_ioc_packet *ioc;
+ ioc = (struct mfi_ioc_packet *)arg;
+ int sge_size, error;
+ struct megasas_sge *kern_sge;
+
+ memset(sc->kbuff_arr, 0, sizeof(sc->kbuff_arr));
+ kern_sge =(struct megasas_sge *) ((uintptr_t)cm->cm_frame + ioc->mfi_sgl_off);
+ cm->cm_frame->header.sg_count = ioc->mfi_sge_count;
+
+ if (sizeof(bus_addr_t) == 8) {
+ cm->cm_frame->header.flags |= MFI_FRAME_SGL64;
+ cm->cm_extra_frames = 2;
+ sge_size = sizeof(struct mfi_sg64);
+ } else {
+ cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
+ sge_size = sizeof(struct mfi_sg32);
+ }
+
+ cm->cm_total_frame_size += (sge_size * ioc->mfi_sge_count);
+ for (i = 0; i < ioc->mfi_sge_count; i++)
+ {
+ if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
+ 1, 0, /* algnmnt, boundary */
+ BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ ioc->mfi_sgl[i].iov_len, /* maxsize */
+ 2, /* nsegments */
+ ioc->mfi_sgl[i].iov_len, /* maxsegsize */
+ BUS_DMA_ALLOCNOW, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc->mfi_kbuff_arr_dmat[i])) {
+ device_printf(sc->mfi_dev, "Cannot allocate mfi_kbuff_arr_dmat tag\n");
+ return (ENOMEM);
+ }
+
+ if (bus_dmamem_alloc(sc->mfi_kbuff_arr_dmat[i], (void **)&sc->kbuff_arr[i],
+ BUS_DMA_NOWAIT, &sc->mfi_kbuff_arr_dmamap[i])) {
+ device_printf(sc->mfi_dev, "Cannot allocate mfi_kbuff_arr_dmamap memory\n");
+ return (ENOMEM);
+ }
+
+ bus_dmamap_load(sc->mfi_kbuff_arr_dmat[i], sc->mfi_kbuff_arr_dmamap[i],
+ sc->kbuff_arr[i], ioc->mfi_sgl[i].iov_len, mfi_addr_cb, &sc->mfi_kbuff_arr_busaddr[i], 0);
+
+ if (!sc->kbuff_arr[i]) {
+ device_printf(sc->mfi_dev,"Could not allocate memory for kbuff_arr"
+ " info\n");
+ return -1;
+ }
+ kern_sge[i].phys_addr = sc->mfi_kbuff_arr_busaddr[i];
+ kern_sge[i].length = ioc->mfi_sgl[i].iov_len;
+
+ if (sizeof(bus_addr_t) == 8) {
+ cm->cm_frame->stp.sgl.sg64[i].addr = kern_sge[i].phys_addr;
+ cm->cm_frame->stp.sgl.sg64[i].len = ioc->mfi_sgl[i].iov_len;
+ } else {
+ cm->cm_frame->stp.sgl.sg32[i].len = kern_sge[i].phys_addr;
+ cm->cm_frame->stp.sgl.sg32[i].len = ioc->mfi_sgl[i].iov_len;
+ }
+
+ error = copyin(ioc->mfi_sgl[i].iov_base,
+ sc->kbuff_arr[i],
+ ioc->mfi_sgl[i].iov_len);
+ if (error != 0)
+ {
+ device_printf(sc->mfi_dev,
+ "Copy in failed\n");
+ return error;
+ }
+ }
+
+ cm->cm_flags |=MFI_CMD_MAPPED;
+ return 0;
+}
+
+#ifdef __amd64__
+#define PTRIN(p) ((void *)(uintptr_t)(p))
+#else
+#define PTRIN(p) (p)
+#endif
+
+static int
mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc)
{
struct mfi_command *cm;
@@ -1942,21 +2871,30 @@ mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td
#endif
struct mfi_ioc_aen *aen;
struct mfi_command *cm = NULL;
- uint32_t context;
+ uint32_t context = 0;
union mfi_sense_ptr sense_ptr;
- uint8_t *data = NULL, *temp, *addr;
+ uint8_t *data = NULL, *temp, *addr, skip_pre_post = 0;
size_t len;
- int i;
+ int i, res;
struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg;
#ifdef __amd64__
struct mfi_ioc_passthru32 *iop32 = (struct mfi_ioc_passthru32 *)arg;
struct mfi_ioc_passthru iop_swab;
#endif
int error, locked;
-
+ union mfi_sgl *sgl;
sc = dev->si_drv1;
error = 0;
+ if (sc->adpreset)
+ return EBUSY;
+
+ if (sc->hw_crit_error)
+ return EBUSY;
+
+ if (sc->issuepend_done == 0)
+ return EBUSY;
+
switch (cmd) {
case MFIIO_STATS:
ms = (union mfi_statrequest *)arg;
@@ -2024,15 +2962,19 @@ mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td
* will clobber some data
*/
context = cm->cm_frame->header.context;
+ cm->cm_frame->header.context = cm->cm_index;
bcopy(ioc->mfi_frame.raw, cm->cm_frame,
- 2 * MFI_DCMD_FRAME_SIZE); /* this isn't quite right */
+ 2 * MEGAMFI_FRAME_SIZE);
cm->cm_total_frame_size = (sizeof(union mfi_sgl)
* ioc->mfi_sge_count) + ioc->mfi_sgl_off;
+ cm->cm_frame->header.scsi_status = 0;
+ cm->cm_frame->header.pad0 = 0;
if (ioc->mfi_sge_count) {
cm->cm_sg =
(union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off];
}
+ sgl = cm->cm_sg;
cm->cm_flags = 0;
if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
cm->cm_flags |= MFI_CMD_DATAIN;
@@ -2072,31 +3014,37 @@ mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td
/* restore header context */
cm->cm_frame->header.context = context;
- temp = data;
- if ((cm->cm_flags & MFI_CMD_DATAOUT) ||
- (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
- for (i = 0; i < ioc->mfi_sge_count; i++) {
+ if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
+ res = mfi_stp_cmd(sc,cm,arg);
+ if(res != 0)
+ goto out;
+ } else {
+ temp = data;
+ if ((cm->cm_flags & MFI_CMD_DATAOUT) ||
+ (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
+ for (i = 0; i < ioc->mfi_sge_count; i++) {
#ifdef __amd64__
- if (cmd == MFI_CMD) {
+ if (cmd == MFI_CMD) {
#endif
- /* Native */
- addr = ioc->mfi_sgl[i].iov_base;
- len = ioc->mfi_sgl[i].iov_len;
+ /* Native */
+ addr = ioc->mfi_sgl[i].iov_base;
+ len = ioc->mfi_sgl[i].iov_len;
#ifdef __amd64__
- } else {
- /* 32bit on 64bit */
- ioc32 = (struct mfi_ioc_packet32 *)ioc;
- addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
- len = ioc32->mfi_sgl[i].iov_len;
- }
+ } else {
+ /* 32bit on 64bit */
+ ioc32 = (struct mfi_ioc_packet32 *)ioc;
+ addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
+ len = ioc32->mfi_sgl[i].iov_len;
+ }
#endif
- error = copyin(addr, temp, len);
- if (error != 0) {
- device_printf(sc->mfi_dev,
- "Copy in failed\n");
- goto out;
+ error = copyin(addr, temp, len);
+ if (error != 0) {
+ device_printf(sc->mfi_dev,
+ "Copy in failed\n");
+ goto out;
+ }
+ temp = &temp[len];
}
- temp = &temp[len];
}
}
@@ -2104,52 +3052,61 @@ mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td
locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
+ #if defined(__amd64__)
+ cm->cm_frame->pass.sense_addr_lo = (cm->cm_sense_busaddr & 0xFFFFFFFF);
+ cm->cm_frame->pass.sense_addr_hi = (cm->cm_sense_busaddr& 0xFFFFFFFF00000000) >> 32;
+ #else
cm->cm_frame->pass.sense_addr_lo = cm->cm_sense_busaddr;
cm->cm_frame->pass.sense_addr_hi = 0;
+ #endif
}
-
mtx_lock(&sc->mfi_io_lock);
- error = mfi_check_command_pre(sc, cm);
- if (error) {
- mtx_unlock(&sc->mfi_io_lock);
- goto out;
+ skip_pre_post = mfi_check_for_sscd (sc, cm);
+ if (!skip_pre_post){
+ error = mfi_check_command_pre(sc, cm);
+ if (error) {
+ mtx_unlock(&sc->mfi_io_lock);
+ goto out;
+ }
}
-
if ((error = mfi_wait_command(sc, cm)) != 0) {
device_printf(sc->mfi_dev,
"Controller polled failed\n");
mtx_unlock(&sc->mfi_io_lock);
goto out;
}
-
- mfi_check_command_post(sc, cm);
+ if (!skip_pre_post){
+ mfi_check_command_post(sc, cm);
+ }
mtx_unlock(&sc->mfi_io_lock);
- temp = data;
- if ((cm->cm_flags & MFI_CMD_DATAIN) ||
- (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
- for (i = 0; i < ioc->mfi_sge_count; i++) {
+ if (cm->cm_frame->header.cmd != MFI_CMD_STP) {
+ temp = data;
+ if ((cm->cm_flags & MFI_CMD_DATAIN) ||
+ (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
+ for (i = 0; i < ioc->mfi_sge_count; i++) {
#ifdef __amd64__
- if (cmd == MFI_CMD) {
+ if (cmd == MFI_CMD) {
#endif
- /* Native */
- addr = ioc->mfi_sgl[i].iov_base;
- len = ioc->mfi_sgl[i].iov_len;
+ /* Native */
+ addr = ioc->mfi_sgl[i].iov_base;
+ len = ioc->mfi_sgl[i].iov_len;
#ifdef __amd64__
- } else {
- /* 32bit on 64bit */
- ioc32 = (struct mfi_ioc_packet32 *)ioc;
- addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
- len = ioc32->mfi_sgl[i].iov_len;
- }
+ } else {
+ /* 32bit on 64bit */
+ ioc32 = (struct mfi_ioc_packet32 *)ioc;
+ addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
+ len = ioc32->mfi_sgl[i].iov_len;
+ }
#endif
- error = copyout(temp, addr, len);
- if (error != 0) {
- device_printf(sc->mfi_dev,
- "Copy out failed\n");
- goto out;
+ error = copyout(temp, addr, len);
+ if (error != 0) {
+ device_printf(sc->mfi_dev,
+ "Copy out failed\n");
+ goto out;
+ }
+ temp = &temp[len];
}
- temp = &temp[len];
}
}
@@ -2180,6 +3137,21 @@ out:
mfi_config_unlock(sc, locked);
if (data)
free(data, M_MFIBUF);
+ if(cm->cm_frame->header.cmd == MFI_CMD_STP)
+ {
+ for(i=0;i<2;i++)
+ {
+ if (sc->kbuff_arr[i])
+ {
+ if (sc->mfi_kbuff_arr_busaddr != 0)
+ bus_dmamap_unload(sc->mfi_kbuff_arr_dmat[i], sc->mfi_kbuff_arr_dmamap[i]);
+ if (sc->kbuff_arr[i] != NULL)
+ bus_dmamem_free(sc->mfi_kbuff_arr_dmat[i], sc->kbuff_arr[i], sc->mfi_kbuff_arr_dmamap[i]);
+ if (sc->mfi_kbuff_arr_dmat[i] != NULL)
+ bus_dma_tag_destroy(sc->mfi_kbuff_arr_dmat[i]);
+ }
+ }
+ }
if (cm) {
mtx_lock(&sc->mfi_io_lock);
mfi_release_command(cm);
@@ -2269,7 +3241,7 @@ mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct
struct mfi_command *cm = NULL;
struct mfi_aen *mfi_aen_entry;
union mfi_sense_ptr sense_ptr;
- uint32_t context;
+ uint32_t context = 0;
uint8_t *data = NULL, *temp;
int i;
int error, locked;
@@ -2304,6 +3276,8 @@ mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct
2 * MFI_DCMD_FRAME_SIZE); /* this isn't quite right */
cm->cm_total_frame_size = (sizeof(union mfi_sgl)
* l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off;
+ cm->cm_frame->header.scsi_status = 0;
+ cm->cm_frame->header.pad0 = 0;
if (l_ioc.lioc_sge_count)
cm->cm_sg =
(union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
@@ -2347,10 +3321,15 @@ mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct
locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
+ #if defined(__amd64__)
+ cm->cm_frame->pass.sense_addr_lo = (cm->cm_sense_busaddr & 0xFFFFFFFF);
+ cm->cm_frame->pass.sense_addr_hi = (cm->cm_sense_busaddr & 0xFFFFFFFF00000000) >> 32;
+ #else
cm->cm_frame->pass.sense_addr_lo = cm->cm_sense_busaddr;
cm->cm_frame->pass.sense_addr_hi = 0;
+ #endif
}
-
+
mtx_lock(&sc->mfi_io_lock);
error = mfi_check_command_pre(sc, cm);
if (error) {
@@ -2542,17 +3521,31 @@ mfi_timeout(void *data)
int timedout = 0;
deadline = time_uptime - MFI_CMD_TIMEOUT;
+ if(sc->adpreset == 0)
+ {
+ if(!mfi_tbolt_reset(sc))
+ {
+ callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz, mfi_timeout, sc);
+ return;
+ }
+ }
mtx_lock(&sc->mfi_io_lock);
TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
if (sc->mfi_aen_cm == cm)
continue;
if ((sc->mfi_aen_cm != cm) && (cm->cm_timestamp < deadline)) {
- device_printf(sc->mfi_dev,
- "COMMAND %p TIMEOUT AFTER %d SECONDS\n", cm,
- (int)(time_uptime - cm->cm_timestamp));
- MFI_PRINT_CMD(cm);
- MFI_VALIDATE_CMD(sc, cm);
- timedout++;
+ if(sc->adpreset != 0 && sc->issuepend_done == 0){
+ cm->cm_timestamp = time_uptime;
+ }
+ else
+ {
+ device_printf(sc->mfi_dev,
+ "COMMAND %p TIMEOUT AFTER %d SECONDS\n", cm,
+ (int)(time_uptime - cm->cm_timestamp));
+ MFI_PRINT_CMD(cm);
+ MFI_VALIDATE_CMD(sc, cm);
+ timedout++;
+ }
}
}
diff --git a/sys/dev/mfi/mfi_cam.c b/sys/dev/mfi/mfi_cam.c
index c49daff..b9ed879 100644
--- a/sys/dev/mfi/mfi_cam.c
+++ b/sys/dev/mfi/mfi_cam.c
@@ -269,12 +269,18 @@ mfip_start(void *data)
struct mfip_softc *sc;
struct mfi_pass_frame *pt;
struct mfi_command *cm;
+ uint32_t context = 0;
sc = ccbh->ccb_mfip_ptr;
if ((cm = mfi_dequeue_free(sc->mfi_sc)) == NULL)
return (NULL);
+ /* Zero out the MFI frame */
+ context = cm->cm_frame->header.context;
+ bzero(cm->cm_frame,sizeof(union mfi_frame));
+ cm->cm_frame->header.context = context;
+
pt = &cm->cm_frame->pass;
pt->header.cmd = MFI_CMD_PD_SCSI_IO;
pt->header.cmd_status = 0;
diff --git a/sys/dev/mfi/mfi_disk.c b/sys/dev/mfi/mfi_disk.c
index 4594ca2..b9d44a3 100644
--- a/sys/dev/mfi/mfi_disk.c
+++ b/sys/dev/mfi/mfi_disk.c
@@ -223,7 +223,7 @@ mfi_disk_disable(struct mfi_disk *sc)
if (sc->ld_flags & MFI_DISK_FLAGS_OPEN) {
if (sc->ld_controller->mfi_delete_busy_volumes)
return (0);
- device_printf(sc->ld_dev, "Unable to delete busy device\n");
+ device_printf(sc->ld_dev, "Unable to delete busy ld device\n");
return (EBUSY);
}
sc->ld_flags |= MFI_DISK_FLAGS_DISABLED;
@@ -245,6 +245,7 @@ mfi_disk_strategy(struct bio *bio)
struct mfi_softc *controller;
sc = bio->bio_disk->d_drv1;
+ controller = sc->ld_controller;
if (sc == NULL) {
bio->bio_error = EINVAL;
@@ -254,8 +255,24 @@ mfi_disk_strategy(struct bio *bio)
return;
}
- controller = sc->ld_controller;
+ if (controller->adpreset){
+ bio->bio_error = EBUSY;
+ return;
+ }
+
+ if (controller->hw_crit_error){
+ bio->bio_error = EBUSY;
+ return;
+ }
+
+ if (controller->issuepend_done == 0){
+ bio->bio_error = EBUSY;
+ return;
+ }
+
bio->bio_driver1 = (void *)(uintptr_t)sc->ld_id;
+ /* Mark it as LD IO */
+ bio->bio_driver2 = (void *)MFI_LD_IO;
mtx_lock(&controller->mfi_io_lock);
mfi_enqueue_bio(controller, bio);
mfi_startio(controller);
diff --git a/sys/dev/mfi/mfi_ioctl.h b/sys/dev/mfi/mfi_ioctl.h
index 48e9c7f..cba44ed 100644
--- a/sys/dev/mfi/mfi_ioctl.h
+++ b/sys/dev/mfi/mfi_ioctl.h
@@ -28,6 +28,7 @@
__FBSDID("$FreeBSD$");
#include <dev/mfi/mfireg.h>
+#include <machine/bus.h>
#if defined(__amd64__) /* Assume amd64 wants 32 bit Linux */
struct iovec32 {
@@ -36,6 +37,12 @@ struct iovec32 {
};
#endif
+struct megasas_sge
+{
+ bus_addr_t phys_addr;
+ uint32_t length;
+};
+
#define MFIQ_FREE 0
#define MFIQ_BIO 1
#define MFIQ_READY 2
diff --git a/sys/dev/mfi/mfi_linux.c b/sys/dev/mfi/mfi_linux.c
index 12135ff..3328a66 100644
--- a/sys/dev/mfi/mfi_linux.c
+++ b/sys/dev/mfi/mfi_linux.c
@@ -35,6 +35,7 @@ __FBSDID("$FreeBSD$");
#include <sys/module.h>
#include <sys/file.h>
#include <sys/proc.h>
+#include <machine/bus.h>
#if defined(__amd64__) /* Assume amd64 wants 32 bit Linux */
#include <machine/../linux32/linux.h>
diff --git a/sys/dev/mfi/mfi_pci.c b/sys/dev/mfi/mfi_pci.c
index 685aa0b..06a7eb0 100644
--- a/sys/dev/mfi/mfi_pci.c
+++ b/sys/dev/mfi/mfi_pci.c
@@ -115,14 +115,20 @@ struct mfi_ident {
int flags;
const char *desc;
} mfi_identifiers[] = {
+ {0x1000, 0x005B, 0xffff, 0xffff, MFI_FLAGS_SKINNY| MFI_FLAGS_TBOLT, "ThunderBolt"},
+ {0x1000, 0x005B, 0x8086, 0x9265, MFI_FLAGS_SKINNY| MFI_FLAGS_TBOLT, "Intel (R) RAID Controller RS25DB080"},
+ {0x1000, 0x005B, 0x8086, 0x9285, MFI_FLAGS_SKINNY| MFI_FLAGS_TBOLT, "Intel (R) RAID Controller RS25NB008"},
{0x1000, 0x0060, 0x1028, 0xffff, MFI_FLAGS_1078, "Dell PERC 6"},
{0x1000, 0x0060, 0xffff, 0xffff, MFI_FLAGS_1078, "LSI MegaSAS 1078"},
+ {0x1000, 0x0071, 0xffff, 0xffff, MFI_FLAGS_SKINNY, "Drake Skinny"},
+ {0x1000, 0x0073, 0xffff, 0xffff, MFI_FLAGS_SKINNY, "Drake Skinny"},
{0x1000, 0x0078, 0xffff, 0xffff, MFI_FLAGS_GEN2, "LSI MegaSAS Gen2"},
{0x1000, 0x0079, 0x1028, 0x1f15, MFI_FLAGS_GEN2, "Dell PERC H800 Adapter"},
{0x1000, 0x0079, 0x1028, 0x1f16, MFI_FLAGS_GEN2, "Dell PERC H700 Adapter"},
{0x1000, 0x0079, 0x1028, 0x1f17, MFI_FLAGS_GEN2, "Dell PERC H700 Integrated"},
{0x1000, 0x0079, 0x1028, 0x1f18, MFI_FLAGS_GEN2, "Dell PERC H700 Modular"},
{0x1000, 0x0079, 0x1028, 0x1f19, MFI_FLAGS_GEN2, "Dell PERC H700"},
+ {0x1000, 0x0079, 0x1028, 0x1f1a, MFI_FLAGS_GEN2, "Dell PERC H800 Proto Adapter"},
{0x1000, 0x0079, 0x1028, 0x1f1b, MFI_FLAGS_GEN2, "Dell PERC H800"},
{0x1000, 0x0079, 0x1028, 0xffff, MFI_FLAGS_GEN2, "Dell PERC Gen2"},
{0x1000, 0x0079, 0xffff, 0xffff, MFI_FLAGS_GEN2, "LSI MegaSAS Gen2"},
@@ -196,8 +202,11 @@ mfi_pci_attach(device_t dev)
(sc->mfi_flags & MFI_FLAGS_1078)) {
/* 1068/1078: Memory mapped BAR is at offset 0x10 */
sc->mfi_regs_rid = PCIR_BAR(0);
- } else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
- /* GEN2: Memory mapped BAR is at offset 0x14 */
+ }
+ else if ((sc->mfi_flags & MFI_FLAGS_GEN2) ||
+ (sc->mfi_flags & MFI_FLAGS_SKINNY) ||
+ (sc->mfi_flags & MFI_FLAGS_TBOLT)) {
+ /* Gen2/Skinny: Memory mapped BAR is at offset 0x14 */
sc->mfi_regs_rid = PCIR_BAR(1);
}
if ((sc->mfi_regs_resource = bus_alloc_resource_any(sc->mfi_dev,
@@ -240,8 +249,10 @@ static int
mfi_pci_detach(device_t dev)
{
struct mfi_softc *sc;
- struct mfi_disk *ld;
- int error;
+ //struct mfi_disk *ld;
+ //struct mfi_system_pd *syspd = NULL;
+ int error, devcount, i;
+ device_t *devlist;
sc = device_get_softc(dev);
@@ -254,14 +265,41 @@ mfi_pci_detach(device_t dev)
}
sc->mfi_detaching = 1;
mtx_unlock(&sc->mfi_io_lock);
-
- while ((ld = TAILQ_FIRST(&sc->mfi_ld_tqh)) != NULL) {
+ /*
+ BHARAT: Fixed Kernel Corruption while unloading the driver
+ */
+ if((error = device_get_children(sc->mfi_dev, &devlist, &devcount)) != 0)
+ {
+ sx_xunlock(&sc->mfi_config_lock);
+ return error;
+ }
+ for(i=0; i < devcount; i++)
+ device_delete_child(sc->mfi_dev, devlist[i]);
+ free(devlist, M_TEMP);
+ /*
+ BHARAT: Following code causes Kernel corruption during
+ multiple driver load/unload, this cleanup code was not
+ getting called up in normal run hence OS was maintaining stale dev
+ entries which were resulting to crash, so added above
+ cleanup code.
+ */
+
+ /*while ((ld = TAILQ_FIRST(&sc->mfi_ld_tqh)) != NULL) {
if ((error = device_delete_child(dev, ld->ld_dev)) != 0) {
sc->mfi_detaching = 0;
sx_xunlock(&sc->mfi_config_lock);
return (error);
}
}
+
+ if(!TAILQ_EMPTY(&sc->mfi_syspd_tqh))
+ while ((syspd = TAILQ_FIRST(&sc->mfi_syspd_tqh)) != NULL) {
+ if ((error = device_delete_child(dev,syspd->pd_dev)) != 0) {
+ sc->mfi_detaching = 0;
+ sx_xunlock(&sc->mfi_config_lock);
+ return (error);
+ }
+ }*/
sx_xunlock(&sc->mfi_config_lock);
EVENTHANDLER_DEREGISTER(shutdown_final, sc->mfi_eh);
diff --git a/sys/dev/mfi/mfi_syspd.c b/sys/dev/mfi/mfi_syspd.c
new file mode 100644
index 0000000..b7343c5
--- /dev/null
+++ b/sys/dev/mfi/mfi_syspd.c
@@ -0,0 +1,294 @@
+/*-
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Copyright 1994-2009 The FreeBSD Project.
+ * All rights reserved.
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE FREEBSD PROJECT``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FREEBSD PROJECT OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY,OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation
+ * are those of the authors and should not be interpreted as representing
+ * official policies,either expressed or implied, of the FreeBSD Project.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: src/sys/dev/mfi/mfi_pddisk.c,v 1.2.2.6 2007/08/24 17:29:18 jhb Exp $");
+
+#include "opt_mfi.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/selinfo.h>
+#include <sys/module.h>
+#include <sys/malloc.h>
+#include <sys/uio.h>
+
+#include <sys/bio.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/disk.h>
+#include <geom/geom_disk.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include <machine/md_var.h>
+#include <machine/bus.h>
+#include <sys/rman.h>
+
+#include <dev/mfi/mfireg.h>
+#include <dev/mfi/mfi_ioctl.h>
+#include <dev/mfi/mfivar.h>
+
+static int mfi_syspd_probe(device_t dev);
+static int mfi_syspd_attach(device_t dev);
+static int mfi_syspd_detach(device_t dev);
+
+static disk_open_t mfi_syspd_open;
+static disk_close_t mfi_syspd_close;
+static disk_strategy_t mfi_syspd_strategy;
+static dumper_t mfi_syspd_dump;
+
+static devclass_t mfi_syspd_devclass;
+
+static device_method_t mfi_syspd_methods[] = {
+ DEVMETHOD(device_probe, mfi_syspd_probe),
+ DEVMETHOD(device_attach, mfi_syspd_attach),
+ DEVMETHOD(device_detach, mfi_syspd_detach),
+ { 0, 0 }
+};
+
+static driver_t mfi_syspd_driver = {
+ "mfisyspd",
+ mfi_syspd_methods,
+ sizeof(struct mfi_system_pd)
+};
+
+DRIVER_MODULE(mfisyspd, mfi, mfi_syspd_driver, mfi_syspd_devclass, 0, 0);
+
+static int
+mfi_syspd_probe(device_t dev)
+{
+
+ return (0);
+}
+
+static int
+mfi_syspd_attach(device_t dev)
+{
+ struct mfi_system_pd *sc;
+ struct mfi_pd_info *pd_info;
+ uint64_t sectors;
+ uint32_t secsize;
+
+ sc = device_get_softc(dev);
+ pd_info = device_get_ivars(dev);
+
+ sc->pd_dev = dev;
+ sc->pd_id = pd_info->ref.v.device_id;
+ sc->pd_unit = device_get_unit(dev);
+ sc->pd_info = pd_info;
+ sc->pd_controller = device_get_softc(device_get_parent(dev));
+ sc->pd_flags = 0;
+
+ sectors = pd_info->raw_size;
+ secsize = MFI_SECTOR_LEN;
+ mtx_lock(&sc->pd_controller->mfi_io_lock);
+ TAILQ_INSERT_TAIL(&sc->pd_controller->mfi_syspd_tqh, sc, pd_link);
+ mtx_unlock(&sc->pd_controller->mfi_io_lock);
+ device_printf(dev, "%juMB (%ju sectors) SYSPD volume\n",
+ sectors / (1024 * 1024 / secsize), sectors);
+ sc->pd_disk = disk_alloc();
+ sc->pd_disk->d_drv1 = sc;
+ sc->pd_disk->d_maxsize = sc->pd_controller->mfi_max_io * secsize;
+ sc->pd_disk->d_name = "mfisyspd";
+ sc->pd_disk->d_open = mfi_syspd_open;
+ sc->pd_disk->d_close = mfi_syspd_close;
+ sc->pd_disk->d_strategy = mfi_syspd_strategy;
+ sc->pd_disk->d_dump = mfi_syspd_dump;
+ sc->pd_disk->d_unit = sc->pd_unit;
+ sc->pd_disk->d_sectorsize = secsize;
+ sc->pd_disk->d_mediasize = sectors * secsize;
+ if (sc->pd_disk->d_mediasize >= (1 * 1024 * 1024)) {
+ sc->pd_disk->d_fwheads = 255;
+ sc->pd_disk->d_fwsectors = 63;
+ } else {
+ sc->pd_disk->d_fwheads = 64;
+ sc->pd_disk->d_fwsectors = 32;
+ }
+ disk_create(sc->pd_disk, DISK_VERSION);
+
+ device_printf(dev, " SYSPD volume attached\n");
+ return (0);
+}
+
+static int
+mfi_syspd_detach(device_t dev)
+{
+ struct mfi_system_pd *sc;
+
+ sc = device_get_softc(dev);
+ device_printf(dev, "Detaching syspd\n");
+ mtx_lock(&sc->pd_controller->mfi_io_lock);
+ if (((sc->pd_disk->d_flags & DISKFLAG_OPEN) ||
+ (sc->pd_flags & MFI_DISK_FLAGS_OPEN)) &&
+ (sc->pd_controller->mfi_keep_deleted_volumes ||
+ sc->pd_controller->mfi_detaching)) {
+ mtx_unlock(&sc->pd_controller->mfi_io_lock);
+ device_printf(dev,"Cant detach syspd\n");
+ return (EBUSY);
+ }
+ mtx_unlock(&sc->pd_controller->mfi_io_lock);
+
+ disk_destroy(sc->pd_disk);
+ mtx_lock(&sc->pd_controller->mfi_io_lock);
+ TAILQ_REMOVE(&sc->pd_controller->mfi_syspd_tqh, sc, pd_link);
+ mtx_unlock(&sc->pd_controller->mfi_io_lock);
+ free(sc->pd_info, M_MFIBUF);
+ return (0);
+}
+
+static int
+mfi_syspd_open(struct disk *dp)
+{
+ struct mfi_system_pd *sc;
+ int error;
+
+ sc = dp->d_drv1;
+ mtx_lock(&sc->pd_controller->mfi_io_lock);
+ if (sc->pd_flags & MFI_DISK_FLAGS_DISABLED)
+ error = ENXIO;
+ else {
+ sc->pd_flags |= MFI_DISK_FLAGS_OPEN;
+ error = 0;
+ }
+ mtx_unlock(&sc->pd_controller->mfi_io_lock);
+ return (error);
+}
+
+static int
+mfi_syspd_close(struct disk *dp)
+{
+ struct mfi_system_pd *sc;
+
+ sc = dp->d_drv1;
+ mtx_lock(&sc->pd_controller->mfi_io_lock);
+ sc->pd_flags &= ~MFI_DISK_FLAGS_OPEN;
+ mtx_unlock(&sc->pd_controller->mfi_io_lock);
+
+ return (0);
+}
+
+int
+mfi_syspd_disable(struct mfi_system_pd *sc)
+{
+
+ device_printf(sc->pd_dev,"syspd disable \n");
+ mtx_assert(&sc->pd_controller->mfi_io_lock, MA_OWNED);
+ if (sc->pd_flags & MFI_DISK_FLAGS_OPEN) {
+ if (sc->pd_controller->mfi_delete_busy_volumes)
+ return (0);
+ device_printf(sc->pd_dev, "Unable to delete busy syspd device\n");
+ return (EBUSY);
+ }
+ sc->pd_flags |= MFI_DISK_FLAGS_DISABLED;
+ return (0);
+}
+
+void
+mfi_syspd_enable(struct mfi_system_pd *sc)
+{
+
+ device_printf(sc->pd_dev,"syspd enable \n");
+ mtx_assert(&sc->pd_controller->mfi_io_lock, MA_OWNED);
+ sc->pd_flags &= ~MFI_DISK_FLAGS_DISABLED;
+}
+
+static void
+mfi_syspd_strategy(struct bio *bio)
+{
+ struct mfi_system_pd *sc;
+ struct mfi_softc *controller;
+
+ sc = bio->bio_disk->d_drv1;
+
+ if (sc == NULL) {
+ bio->bio_error = EINVAL;
+ bio->bio_flags |= BIO_ERROR;
+ bio->bio_resid = bio->bio_bcount;
+ biodone(bio);
+ return;
+ }
+
+ controller = sc->pd_controller;
+ bio->bio_driver1 = (void *)(uintptr_t)sc->pd_id;
+ /* Mark it as system PD IO */
+ bio->bio_driver2 = (void *)MFI_SYS_PD_IO;
+ mtx_lock(&controller->mfi_io_lock);
+ mfi_enqueue_bio(controller, bio);
+ mfi_startio(controller);
+ mtx_unlock(&controller->mfi_io_lock);
+ return;
+}
+
+#if 0
+void
+mfi_disk_complete(struct bio *bio)
+{
+ struct mfi_system_pd *sc;
+ struct mfi_frame_header *hdr;
+
+ sc = bio->bio_disk->d_drv1;
+ hdr = bio->bio_driver1;
+
+ if (bio->bio_flags & BIO_ERROR) {
+ if (bio->bio_error == 0)
+ bio->bio_error = EIO;
+ disk_err(bio, "hard error", -1, 1);
+ } else {
+ bio->bio_resid = 0;
+ }
+ biodone(bio);
+}
+#endif
+static int
+mfi_syspd_dump(void *arg, void *virt, vm_offset_t phys, off_t offset, size_t len)
+{
+
+ struct mfi_system_pd *sc;
+ struct mfi_softc *parent_sc;
+ struct disk *dp;
+ int error;
+
+ dp = arg;
+ sc = dp->d_drv1;
+ parent_sc = sc->pd_controller;
+
+ if (len > 0) {
+ if ((error = mfi_dump_syspd_blocks(parent_sc, sc->pd_id, offset /
+ MFI_SECTOR_LEN, virt, len)) != 0)
+ return (error);
+ } else {
+ /* mfi_sync_cache(parent_sc, sc->ld_id); */
+ }
+ return (0);
+}
diff --git a/sys/dev/mfi/mfi_tbolt.c b/sys/dev/mfi/mfi_tbolt.c
new file mode 100644
index 0000000..7ccac3a
--- /dev/null
+++ b/sys/dev/mfi/mfi_tbolt.c
@@ -0,0 +1,1410 @@
+
+ /*-
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Copyright 1994-2009 The FreeBSD Project.
+ * All rights reserved.
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE FREEBSD PROJECT``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FREEBSD PROJECT OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY,OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation
+ * are those of the authors and should not be interpreted as representing
+ * official policies,either expressed or implied, of the FreeBSD Project.
+ */
+
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: src/sys/dev/mfi/mfi_tbolt.c,v 1.00 2010/06/30 16:00:00 Bharat Gusain Exp $");
+
+#include "opt_mfi.h"
+
+#include <sys/param.h>
+#include <sys/types.h>
+#include <sys/kernel.h>
+#include <sys/selinfo.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/bio.h>
+#include <sys/ioccom.h>
+#include <sys/eventhandler.h>
+#include <sys/callout.h>
+#include <sys/uio.h>
+#include <machine/bus.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+
+#include <dev/mfi/mfireg.h>
+#include <dev/mfi/mfi_ioctl.h>
+#include <dev/mfi/mfivar.h>
+
+struct mfi_cmd_tbolt *mfi_tbolt_get_cmd(struct mfi_softc *sc);
+union mfi_mpi2_request_descriptor *
+mfi_tbolt_get_request_descriptor(struct mfi_softc *sc, uint16_t index);
+void mfi_tbolt_complete_cmd(struct mfi_softc *sc);
+int mfi_tbolt_build_io(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
+ struct mfi_cmd_tbolt *cmd);
+static inline void mfi_tbolt_return_cmd(struct mfi_softc *sc,
+ struct mfi_cmd_tbolt *cmd);
+union mfi_mpi2_request_descriptor *mfi_tbolt_build_mpt_cmd(struct mfi_softc
+ *sc, struct mfi_command *cmd);
+uint8_t
+mfi_build_mpt_pass_thru(struct mfi_softc *sc, struct mfi_command *mfi_cmd);
+union mfi_mpi2_request_descriptor *mfi_build_and_issue_cmd(struct mfi_softc
+ *sc, struct mfi_command *mfi_cmd);
+int mfi_tbolt_is_ldio(struct mfi_command *mfi_cmd);
+void mfi_tbolt_build_ldio(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
+ struct mfi_cmd_tbolt *cmd);
+static int mfi_tbolt_make_sgl(struct mfi_softc *sc, struct mfi_command
+ *mfi_cmd, pMpi25IeeeSgeChain64_t sgl_ptr, struct mfi_cmd_tbolt *cmd);
+static int mfi_tbolt_build_cdb(struct mfi_softc *sc, struct mfi_command
+ *mfi_cmd, uint8_t *cdb);
+void
+map_tbolt_cmd_status(struct mfi_command *mfi_cmd, uint8_t status,
+ uint8_t ext_status);
+static void mfi_issue_pending_cmds_again (struct mfi_softc *sc);
+static void mfi_kill_hba (struct mfi_softc *sc);
+static void mfi_process_fw_state_chg_isr(void *arg);
+uint8_t mfi_tbolt_get_map_info(struct mfi_softc *sc);
+
+#define MFI_FUSION_ENABLE_INTERRUPT_MASK (0x00000008)
+
+void
+mfi_tbolt_enable_intr_ppc(struct mfi_softc *sc)
+{
+ //MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
+ MFI_WRITE4(sc, MFI_OMSK, ~MFI_FUSION_ENABLE_INTERRUPT_MASK);
+ MFI_READ4(sc, MFI_OMSK);
+}
+
+void
+mfi_tbolt_disable_intr_ppc(struct mfi_softc *sc)
+{
+ MFI_WRITE4(sc, MFI_OMSK, 0xFFFFFFFF);
+ MFI_READ4(sc, MFI_OMSK);
+}
+
+int32_t
+mfi_tbolt_read_fw_status_ppc(struct mfi_softc *sc)
+{
+ return MFI_READ4(sc, MFI_OSP0);
+}
+
+int32_t
+mfi_tbolt_check_clear_intr_ppc(struct mfi_softc *sc)
+{
+ int32_t status, mfi_status = 0;
+
+ status = MFI_READ4(sc, MFI_OSTS);
+
+ if (status & 1)
+ {
+ MFI_WRITE4(sc, MFI_OSTS, status);
+ MFI_READ4(sc, MFI_OSTS);
+ if (status & MFI_STATE_CHANGE_INTERRUPT){
+ mfi_status |= MFI_FIRMWARE_STATE_CHANGE;
+ }
+
+ return mfi_status;
+ }
+ if(!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
+ return 1;
+
+ MFI_READ4(sc, MFI_OSTS);
+ return 0;
+}
+
+
+void
+mfi_tbolt_issue_cmd_ppc(struct mfi_softc *sc, uintptr_t bus_add,
+ uint32_t frame_cnt)
+{
+ bus_add |= (MFI_REQ_DESCRIPT_FLAGS_MFA
+ << MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ MFI_WRITE4(sc, MFI_IQPL, bus_add);
+ MFI_WRITE4(sc, MFI_IQPH, 0x00000000);
+}
+
+/**
+ * mfi_tbolt_adp_reset - For controller reset
+ * @regs: MFI register set
+ */
+int mfi_tbolt_adp_reset(struct mfi_softc *sc)
+{
+ int retry = 0, i = 0;
+ int HostDiag;
+
+ MFI_WRITE4(sc, MFI_WSR, 0xF);
+ MFI_WRITE4(sc, MFI_WSR, 4);
+ MFI_WRITE4(sc, MFI_WSR, 0xB);
+ MFI_WRITE4(sc, MFI_WSR, 2);
+ MFI_WRITE4(sc, MFI_WSR, 7);
+ MFI_WRITE4(sc, MFI_WSR, 0xD);
+
+ for (i=0;i<10000;i++) ;
+
+ HostDiag = (uint32_t)MFI_READ4(sc, MFI_HDR);
+
+ while ( !( HostDiag & DIAG_WRITE_ENABLE) )
+ {
+ for (i=0;i<1000;i++) ;
+ HostDiag = (uint32_t)MFI_READ4(sc, MFI_HDR);
+ device_printf(sc->mfi_dev,"ADP_RESET_TBOLT: retry time=%x, "
+ "hostdiag=%x\n", retry, HostDiag);
+
+ if (retry++ >= 100)
+ return 1;
+
+ }
+
+ device_printf(sc->mfi_dev,"ADP_RESET_TBOLT: HostDiag=%x\n", HostDiag);
+
+ MFI_WRITE4(sc, MFI_HDR, (HostDiag | DIAG_RESET_ADAPTER));
+
+ for (i=0; i < 10; i++) {
+ for (i = 0; i < 10000; i++);
+ }
+
+ HostDiag = (uint32_t)MFI_READ4(sc, MFI_RSR);
+ while (HostDiag & DIAG_RESET_ADAPTER)
+ {
+ for (i=0;i<1000;i++) ;
+ HostDiag = (uint32_t)MFI_READ4(sc, MFI_RSR);
+ device_printf(sc->mfi_dev,"ADP_RESET_TBOLT: retry time=%x, "
+ "hostdiag=%x\n", retry, HostDiag);
+
+ if (retry++ >= 1000)
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ *******************************************************************************************
+ * Description:
+ * This routine initialize Thunderbolt specific device information
+ *******************************************************************************************
+ */
+void mfi_tbolt_init_globals(struct mfi_softc *sc)
+{
+ /* Initialize single reply size and Message size */
+ sc->reply_size = MEGASAS_THUNDERBOLT_REPLY_SIZE;
+ sc->raid_io_msg_size = MEGASAS_THUNDERBOLT_NEW_MSG_SIZE;
+
+ /*
+ * Calculating how many SGEs allowed in a allocated main message
+ * (size of the Message - Raid SCSI IO message size(except SGE))
+ * / size of SGE
+ * (0x100 - (0x90 - 0x10)) / 0x10 = 8
+ */
+ sc->max_SGEs_in_main_message =
+ (uint8_t)((sc->raid_io_msg_size
+ - (sizeof(struct mfi_mpi2_request_raid_scsi_io)
+ - sizeof(MPI2_SGE_IO_UNION))) / sizeof(MPI2_SGE_IO_UNION));
+ /*
+ * (Command frame size allocaed in SRB ext - Raid SCSI IO message size)
+ * / size of SGL ;
+ * (1280 - 256) / 16 = 64
+ */
+ sc->max_SGEs_in_chain_message = (MR_COMMAND_SIZE
+ - sc->raid_io_msg_size) / sizeof(MPI2_SGE_IO_UNION);
+ /*
+ * (0x08-1) + 0x40 = 0x47 - 0x01 = 0x46 one is left for command
+ * colscing
+ */
+ sc->mfi_max_sge = (sc->max_SGEs_in_main_message - 1)
+ + sc->max_SGEs_in_chain_message - 1;
+ /*
+ * This is the offset in number of 4 * 32bit words to the next chain
+ * (0x100 - 0x10)/0x10 = 0xF(15)
+ */
+ sc->chain_offset_value_for_main_message = (sc->raid_io_msg_size
+ - sizeof(MPI2_SGE_IO_UNION))/16;
+ sc->chain_offset_value_for_mpt_ptmsg
+ = offsetof(struct mfi_mpi2_request_raid_scsi_io, SGL)/16;
+ sc->mfi_cmd_pool_tbolt = NULL;
+ sc->request_desc_pool = NULL;
+
+
+}
+
+/*
+ ****************************************************************************
+ * Description:
+ * This function calculates the memory requirement for Thunderbolt
+ * controller
+ * Return Value:
+ * Total required memory in bytes
+ ****************************************************************************
+ */
+
+uint32_t mfi_tbolt_get_memory_requirement(struct mfi_softc *sc)
+{
+ uint32_t size;
+ size = MEGASAS_THUNDERBOLT_MSG_ALLIGNMENT; // for Alignment
+ size += sc->raid_io_msg_size * (sc->mfi_max_fw_cmds + 1);
+ size += sc->reply_size * sc->mfi_max_fw_cmds;
+ // this is for SGL's
+ size += MEGASAS_MAX_SZ_CHAIN_FRAME * sc->mfi_max_fw_cmds;
+ return size;
+}
+
+/*
+ ****************************************************************************
+ * Description:
+ * This function will prepare message pools for the Thunderbolt controller
+ * Arguments:
+ * DevExt - HBA miniport driver's adapter data storage structure
+ * pMemLocation - start of the memory allocated for Thunderbolt.
+ * Return Value:
+ * TRUE if successful
+ * FALSE if failed
+ ****************************************************************************
+ */
+int mfi_tbolt_init_desc_pool(struct mfi_softc *sc, uint8_t* mem_location,
+ uint32_t tbolt_contg_length)
+{
+ uint32_t offset = 0;
+ uint8_t *addr = mem_location;
+
+ /* Request Descriptor Base physical Address */
+
+ /* For Request Decriptors Virtual Memory */
+ /* Initialise the aligned IO Frames Virtual Memory Pointer */
+ if (((uintptr_t)addr) & (0xFF)) {
+ addr = &addr[sc->raid_io_msg_size];
+ addr = (uint8_t *)((uintptr_t)addr & (~0xFF));
+ sc->request_message_pool_align = addr;
+ } else
+ sc->request_message_pool_align = addr;
+
+ offset = sc->request_message_pool_align - sc->request_message_pool;
+ sc->request_msg_busaddr = sc->mfi_tb_busaddr + offset;
+
+ /* DJA XXX should this be bus dma ??? */
+ /* Skip request message pool */
+ addr = &addr[sc->raid_io_msg_size * (sc->mfi_max_fw_cmds + 1)];
+ /* Reply Frame Pool is initialized */
+ sc->reply_frame_pool = (struct mfi_mpi2_reply_header *) addr;
+ if (((uintptr_t)addr) & (0xFF)) {
+ addr = &addr[sc->reply_size];
+ addr = (uint8_t *)((uintptr_t)addr & (~0xFF));
+ }
+ sc->reply_frame_pool_align
+ = (struct mfi_mpi2_reply_header *)addr;
+
+ offset = (uintptr_t)sc->reply_frame_pool_align
+ - (uintptr_t)sc->request_message_pool;
+ sc->reply_frame_busaddr = sc->mfi_tb_busaddr + offset;
+
+ /* Skip Reply Frame Pool */
+ addr += sc->reply_size * sc->mfi_max_fw_cmds;
+ sc->reply_pool_limit = addr;
+
+ /* initializing reply address to 0xFFFFFFFF */
+ memset((uint8_t *)sc->reply_frame_pool, 0xFF,
+ (sc->reply_size * sc->mfi_max_fw_cmds));
+
+ offset = sc->reply_size * sc->mfi_max_fw_cmds;
+ sc->sg_frame_busaddr = sc->reply_frame_busaddr + offset;
+ /* initialize the last_reply_idx to 0 */
+ sc->last_reply_idx = 0;
+ offset = (sc->sg_frame_busaddr + (MEGASAS_MAX_SZ_CHAIN_FRAME *
+ sc->mfi_max_fw_cmds)) - sc->mfi_tb_busaddr;
+ if(offset > tbolt_contg_length)
+ device_printf(sc->mfi_dev,"Error:Initialized more than "
+ "allocated\n");
+ return 0;
+}
+
+/*
+ ****************************************************************************
+ * Description:
+ * This routine prepare and issue INIT2 frame to the Firmware
+ ****************************************************************************
+ */
+
+int
+mfi_tbolt_init_MFI_queue(struct mfi_softc *sc)
+{
+ struct MPI2_IOC_INIT_REQUEST *mpi2IocInit;
+ struct mfi_init_frame *mfi_init;
+ uintptr_t offset = 0;
+ uintptr_t phyAddress;
+ MFI_ADDRESS *mfiAddressTemp;
+ struct mfi_command *cm;
+ int error;
+
+ mpi2IocInit = (struct MPI2_IOC_INIT_REQUEST *)sc->mfi_tb_ioc_init_desc;
+ /* Check if initialization is already completed */
+ if(sc->MFA_enabled) {
+ return 1;
+ }
+
+ mtx_lock(&sc->mfi_io_lock);
+ if ((cm = mfi_dequeue_free(sc)) == NULL) {
+ mtx_unlock(&sc->mfi_io_lock);
+ return (EBUSY);
+ }
+ cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_tb_init);
+ cm->cm_frame_busaddr = sc->mfi_tb_init_busaddr;
+ cm->cm_dmamap = sc->mfi_tb_init_dmamap;
+ cm->cm_frame->header.context = 0;
+ cm->cm_sc = sc;
+ cm->cm_index = 0;
+
+ /*
+ * Abuse the SG list area of the frame to hold the init_qinfo
+ * object;
+ */
+ mfi_init = &cm->cm_frame->init;
+
+ bzero(mpi2IocInit, sizeof(struct MPI2_IOC_INIT_REQUEST));
+ mpi2IocInit->Function = MPI2_FUNCTION_IOC_INIT;
+ mpi2IocInit->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
+
+ /* set MsgVersion and HeaderVersion host driver was built with */
+ mpi2IocInit->MsgVersion = MPI2_VERSION;
+ mpi2IocInit->HeaderVersion = MPI2_HEADER_VERSION;
+ mpi2IocInit->SystemRequestFrameSize = sc->raid_io_msg_size/4;
+ mpi2IocInit->ReplyDescriptorPostQueueDepth
+ = (uint16_t)sc->mfi_max_fw_cmds;
+ mpi2IocInit->ReplyFreeQueueDepth = 0; /* Not supported by MR. */
+
+ /* Get physical address of reply frame pool */
+ offset = (uintptr_t) sc->reply_frame_pool_align
+ - (uintptr_t)sc->request_message_pool;
+ phyAddress = sc->mfi_tb_busaddr + offset;
+ mfiAddressTemp =
+ (MFI_ADDRESS *)&mpi2IocInit->ReplyDescriptorPostQueueAddress;
+#if defined(__amd64__)
+ mfiAddressTemp->u.addressLow = (phyAddress & 0xFFFFFFFF);
+ mfiAddressTemp->u.addressHigh = (phyAddress & 0xFFFFFFFF00000000) >> 32;
+#else
+ mfiAddressTemp->u.addressLow = phyAddress & 0xFFFFFFFF;
+ mfiAddressTemp->u.addressHigh = 0;
+#endif
+
+ /* Get physical address of request message pool */
+ offset = sc->request_message_pool_align - sc->request_message_pool;
+ phyAddress = sc->mfi_tb_busaddr + offset;
+ mfiAddressTemp = (MFI_ADDRESS *)&mpi2IocInit->SystemRequestFrameBaseAddress;
+#if defined(__amd64__)
+ mfiAddressTemp->u.addressLow = (phyAddress & 0xFFFFFFFF);
+ mfiAddressTemp->u.addressHigh = (phyAddress & 0xFFFFFFFF00000000) >> 32;
+#else
+ mfiAddressTemp->u.addressLow = phyAddress & 0xFFFFFFFF;
+ mfiAddressTemp->u.addressHigh = 0; /* High Part */
+#endif
+ mpi2IocInit->ReplyFreeQueueAddress = 0; // Not supported by MR.
+ mpi2IocInit->TimeStamp = time_uptime;
+
+ if (sc->verbuf) {
+ snprintf((char *)sc->verbuf, strlen(MEGASAS_VERSION) + 2, "%s\n",
+ MEGASAS_VERSION);
+#if defined(__amd64__)
+ mfi_init->driver_ver_lo = (sc->verbuf_h_busaddr & 0xFFFFFFFF);
+ mfi_init->driver_ver_hi = (sc->verbuf_h_busaddr & 0xFFFFFFFF00000000) >> 32;
+#else
+ mfi_init->driver_ver_lo = sc->verbuf_h_busaddr;
+ mfi_init->driver_ver_hi = 0;
+#endif
+ }
+ /* Get the physical address of the mpi2 ioc init command */
+ phyAddress = sc->mfi_tb_ioc_init_busaddr;
+#if defined(__amd64__)
+ mfi_init->qinfo_new_addr_lo = (phyAddress & 0xFFFFFFFF);
+ mfi_init->qinfo_new_addr_hi = (phyAddress & 0xFFFFFFFF00000000) >> 32;
+#else
+ mfi_init->qinfo_new_addr_lo = phyAddress & 0xFFFFFFFF;
+ mfi_init->qinfo_new_addr_hi = 0;
+#endif
+ mfi_init->header.flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
+
+ mfi_init->header.cmd = MFI_CMD_INIT;
+ mfi_init->header.data_len = sizeof(struct MPI2_IOC_INIT_REQUEST);
+ mfi_init->header.cmd_status = MFI_STAT_INVALID_STATUS;
+
+ cm->cm_data = NULL;
+ cm->cm_flags |= MFI_CMD_POLLED;
+ cm->cm_timestamp = time_uptime;
+ if ((error = mfi_mapcmd(sc, cm)) != 0) {
+ device_printf(sc->mfi_dev, "failed to send IOC init2 "
+ "command %d at %lx\n", error, (long)cm->cm_frame_busaddr);
+ mfi_release_command(cm);
+ mtx_unlock(&sc->mfi_io_lock);
+ return (error);
+ }
+ mfi_release_command(cm);
+ mtx_unlock(&sc->mfi_io_lock);
+
+ if(mfi_init->header.cmd_status == 0) {
+ sc->MFA_enabled = 1;
+ }
+ else {
+ device_printf(sc->mfi_dev, "Init command Failed %x\n",
+ mfi_init->header.cmd_status);
+ return 1;
+ }
+
+ return 0;
+
+}
+
+int mfi_tbolt_alloc_cmd(struct mfi_softc *sc)
+{
+ struct mfi_cmd_tbolt *cmd;
+ uint32_t io_req_base_phys, offset = 0;
+ uint8_t *io_req_base;
+ uint16_t i = 0, j = 0;
+
+ /*
+ * sc->mfi_cmd_pool_tbolt is an array of struct mfi_cmd_tbolt pointers.
+ * Allocate the dynamic array first and then allocate individual
+ * commands.
+ */
+ sc->request_desc_pool = malloc(sizeof(
+ union mfi_mpi2_request_descriptor) * sc->mfi_max_fw_cmds,
+ M_MFIBUF, M_NOWAIT|M_ZERO);
+ sc->mfi_cmd_pool_tbolt = malloc(sizeof(struct mfi_cmd_tbolt*)
+ * sc->mfi_max_fw_cmds, M_MFIBUF, M_NOWAIT|M_ZERO);
+
+ if (!sc->mfi_cmd_pool_tbolt) {
+ device_printf(sc->mfi_dev, "out of memory. Could not alloc "
+ "memory for cmd_list_fusion\n");
+ return 1;
+ }
+
+ for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
+ sc->mfi_cmd_pool_tbolt[i] = malloc(sizeof(
+ struct mfi_cmd_tbolt),M_MFIBUF, M_NOWAIT|M_ZERO);
+
+ if (!sc->mfi_cmd_pool_tbolt[i]) {
+ device_printf(sc->mfi_dev, "Could not alloc cmd list "
+ "fusion\n");
+
+ for (j = 0; j < i; j++)
+ free(sc->mfi_cmd_pool_tbolt[j], M_MFIBUF);
+
+ free(sc->mfi_cmd_pool_tbolt, M_MFIBUF);
+ sc->mfi_cmd_pool_tbolt = NULL;
+ }
+ }
+
+ /*
+ * The first 256 bytes (SMID 0) is not used. Don't add to the cmd
+ *list
+ */
+ io_req_base = sc->request_message_pool_align
+ + MEGASAS_THUNDERBOLT_NEW_MSG_SIZE;
+ io_req_base_phys = sc->request_msg_busaddr
+ + MEGASAS_THUNDERBOLT_NEW_MSG_SIZE;
+
+ /*
+ * Add all the commands to command pool (instance->cmd_pool)
+ */
+ /* SMID 0 is reserved. Set SMID/index from 1 */
+
+ for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
+ cmd = sc->mfi_cmd_pool_tbolt[i];
+ offset = MEGASAS_THUNDERBOLT_NEW_MSG_SIZE * i;
+ cmd->index = i + 1;
+ cmd->request_desc = (union mfi_mpi2_request_descriptor *)
+ (sc->request_desc_pool + i);
+ cmd->io_request = (struct mfi_mpi2_request_raid_scsi_io *)
+ (io_req_base + offset);
+ cmd->io_request_phys_addr = io_req_base_phys + offset;
+ cmd->sg_frame = (MPI2_SGE_IO_UNION *)(sc->reply_pool_limit
+ + i * MEGASAS_MAX_SZ_CHAIN_FRAME);
+ cmd->sg_frame_phys_addr = sc->sg_frame_busaddr + i
+ * MEGASAS_MAX_SZ_CHAIN_FRAME;
+
+ TAILQ_INSERT_TAIL(&(sc->mfi_cmd_tbolt_tqh), cmd, next);
+ }
+ return 0;
+}
+
+int mfi_tbolt_reset(struct mfi_softc *sc)
+{
+ uint32_t fw_state;
+
+ mtx_lock(&sc->mfi_io_lock);
+ if (atomic_read(&sc->fw_reset_no_pci_access)){
+ device_printf(sc->mfi_dev,"NO PCI ACCESS\n");
+ mtx_unlock(&sc->mfi_io_lock);
+ return 1;
+ }
+
+ if (sc->hw_crit_error){
+ device_printf(sc->mfi_dev,"HW CRITICAL ERROR\n");
+ mtx_unlock(&sc->mfi_io_lock);
+ return 1;
+ }
+
+ if (sc->mfi_flags & MFI_FLAGS_TBOLT){
+ fw_state = sc->mfi_read_fw_status(sc);
+ if((fw_state & MFI_FWSTATE_FAULT) == MFI_FWSTATE_FAULT)
+ {
+ if((sc->disableOnlineCtrlReset == 0)
+ && (sc->adpreset == 0)){
+ device_printf(sc->mfi_dev,"Adapter RESET "
+ "condition is detected\n");
+ sc->adpreset = 1;
+ sc->issuepend_done = 0;
+ sc->MFA_enabled = 0;
+ sc->last_reply_idx = 0;
+ mfi_process_fw_state_chg_isr((void *) sc);
+ }
+ mtx_unlock(&sc->mfi_io_lock);
+ return 0;
+ }
+ }
+ mtx_unlock(&sc->mfi_io_lock);
+ return 1;
+}
+
+/*
+ * mfi_intr_tbolt - isr entry point
+ */
+void mfi_intr_tbolt(void *arg)
+{
+ struct mfi_softc *sc = (struct mfi_softc *)arg;
+
+ if(sc->mfi_check_clear_intr(sc) == 1)
+ {
+ return;
+ }
+ if(sc->shutdown_issued)
+ return;
+ mtx_lock(&sc->mfi_io_lock);
+ mfi_tbolt_complete_cmd(sc);
+ if(sc->mfi_flags & MFI_FLAGS_QFRZN)
+ sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
+ mfi_startio(sc);
+ mtx_unlock(&sc->mfi_io_lock);
+ return;
+}
+
+/**
+ * map_cmd_status - Maps FW cmd status to OS cmd status
+ * @cmd : Pointer to cmd
+ * @status : status of cmd returned by FW
+ * @ext_status : ext status of cmd returned by FW
+ */
+
+void
+map_tbolt_cmd_status(struct mfi_command *mfi_cmd, uint8_t status,
+ uint8_t ext_status)
+{
+
+ switch (status) {
+
+ case MFI_STAT_OK:
+ mfi_cmd->cm_frame->header.cmd_status = 0;
+ mfi_cmd->cm_frame->dcmd.header.cmd_status = 0;
+ break;
+
+ case MFI_STAT_SCSI_IO_FAILED:
+ case MFI_STAT_LD_INIT_IN_PROGRESS:
+ mfi_cmd->cm_frame->header.cmd_status = status;
+ mfi_cmd->cm_frame->header.scsi_status = ext_status;
+ mfi_cmd->cm_frame->dcmd.header.cmd_status = status;
+ mfi_cmd->cm_frame->dcmd.header.scsi_status
+ = ext_status;
+ break;
+
+ case MFI_STAT_SCSI_DONE_WITH_ERROR:
+ mfi_cmd->cm_frame->header.cmd_status = ext_status;
+ mfi_cmd->cm_frame->dcmd.header.cmd_status = ext_status;
+ break;
+
+ case MFI_STAT_LD_OFFLINE:
+ case MFI_STAT_DEVICE_NOT_FOUND:
+ mfi_cmd->cm_frame->header.cmd_status = status;
+ mfi_cmd->cm_frame->dcmd.header.cmd_status = status;
+ break;
+
+ default:
+ mfi_cmd->cm_frame->header.cmd_status = status;
+ mfi_cmd->cm_frame->dcmd.header.cmd_status = status;
+ break;
+ }
+}
+
+
+
+void mfi_tbolt_complete_cmd(struct mfi_softc *sc)
+{
+ struct mfi_mpi2_reply_header *desc, *reply_desc;
+ struct mfi_command *cmd_mfi; /* For MFA Cmds */
+ struct mfi_cmd_tbolt *cmd_tbolt;
+ uint16_t smid;
+ uint8_t reply_descript_type;
+ struct mfi_mpi2_request_raid_scsi_io *scsi_io_req;
+ uint32_t status, extStatus;
+ uint16_t num_completed;
+ union desc_value val;
+
+ desc = (struct mfi_mpi2_reply_header *)
+ ((uintptr_t)sc->reply_frame_pool_align
+ + sc->last_reply_idx * sc->reply_size);
+ reply_desc = desc;
+
+ if (!reply_desc)
+ device_printf(sc->mfi_dev, "reply desc is NULL!!\n");
+
+ reply_descript_type = reply_desc->ReplyFlags
+ & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
+ if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
+ return;
+
+ num_completed = 0;
+ val.word = ((union mfi_mpi2_reply_descriptor *)desc)->words;
+
+ /* Read Reply descriptor */
+ while ((val.u.low != 0xFFFFFFFF) && (val.u.high != 0xFFFFFFFF)) {
+
+ smid = reply_desc->SMID;
+ if (!smid || smid > sc->mfi_max_fw_cmds + 1) {
+ device_printf(sc->mfi_dev, "smid is %x. Cannot "
+ "proceed. Returning \n",smid);
+ return;
+ }
+
+ cmd_tbolt = sc->mfi_cmd_pool_tbolt[smid - 1];
+ cmd_mfi = &sc->mfi_commands[cmd_tbolt->sync_cmd_idx];
+ scsi_io_req = cmd_tbolt->io_request;
+
+ /* Check if internal commands */
+ status = cmd_mfi->cm_frame->dcmd.header.cmd_status;
+ extStatus = cmd_mfi->cm_frame->dcmd.header.scsi_status;
+
+ /*
+ switch (scsi_io_req->Function)
+ {
+ case MPI2_FUNCTION_SCSI_IO_REQUEST :
+ printf("HELLO MPI2_FUNCTION_SCSI_IO_REQUEST\n");
+ break;
+ case MPI2_FUNCTION_LD_IO_REQUEST :
+ printf("HELLO MPI2_FUNCTION_LD_IO_REQUEST\n");
+ break;
+ case MPI2_FUNCTION_PASSTHRU_IO_REQUEST:
+ printf("HELLO MPI2_FUNCTION_PASSTHRU_IO_REQUEST\n");
+ break;
+ default:
+ printf("HELLO default\n");
+ break;
+ }
+ */
+
+ switch (scsi_io_req->Function)
+ {
+ case MPI2_FUNCTION_LD_IO_REQUEST:
+ /* Regular Path IO. */
+ /* Map the Fw Error Status. */
+ map_tbolt_cmd_status(cmd_mfi, status,
+ extStatus);
+ if ((cmd_mfi->cm_frame->dcmd.opcode
+ == MFI_DCMD_LD_MAP_GET_INFO)
+ && (cmd_mfi->cm_frame->dcmd.mbox[1] == 1))
+ {
+ if (cmd_mfi->cm_frame->header.cmd_status
+ != 0)
+ device_printf(sc->mfi_dev,"map sync failed\n");
+ else {
+ sc->map_id++;
+ device_printf(sc->mfi_dev,"map sync completed\n");
+ mfi_release_command(cmd_mfi);
+ }
+ }
+ if ((cmd_mfi->cm_flags & MFI_ON_MFIQ_BUSY)
+ == MFI_ON_MFIQ_BUSY
+ && (cmd_mfi->cm_flags & MFI_CMD_POLLED) == 0) {
+ /* BHARAT poll workaround */
+ mfi_remove_busy(cmd_mfi);
+ cmd_mfi->cm_error = 0;
+ mfi_complete(sc, cmd_mfi);
+ }
+ mfi_tbolt_return_cmd(sc, cmd_tbolt);
+ break;
+ case MPI2_FUNCTION_PASSTHRU_IO_REQUEST:
+ map_tbolt_cmd_status(cmd_mfi, status, extStatus);
+ if ((cmd_mfi->cm_frame->dcmd.opcode
+ == MFI_DCMD_LD_MAP_GET_INFO)
+ && (cmd_mfi->cm_frame->dcmd.mbox[1] == 1)) {
+ if(cmd_mfi->cm_frame->header.cmd_status != 0)
+ device_printf(sc->mfi_dev,"map sync failed\n");
+ else {
+ sc->map_id++;
+ device_printf(sc->mfi_dev,"map sync completed\n");
+ mfi_release_command(cmd_mfi);
+ }
+ }
+ if ((cmd_mfi->cm_flags & MFI_ON_MFIQ_BUSY)
+ == MFI_ON_MFIQ_BUSY
+ && (cmd_mfi->cm_flags & MFI_CMD_POLLED) == 0) {
+ /* BHARAT poll workaround */
+ mfi_remove_busy(cmd_mfi);
+ cmd_mfi->cm_error = 0;
+ mfi_complete(sc, cmd_mfi);
+ }
+ mfi_tbolt_return_cmd(sc, cmd_tbolt);
+ break;
+ }
+
+ sc->last_reply_idx++;
+ if (sc->last_reply_idx >= sc->mfi_max_fw_cmds) {
+ MFI_WRITE4(sc, MFI_RPI, sc->last_reply_idx);
+ sc->last_reply_idx = 0;
+ }
+ /*set it back to all 0xfff.*/
+ ((union mfi_mpi2_reply_descriptor*)desc)->words =
+ ~((uint64_t)0x00);
+
+ num_completed++;
+
+ /* Get the next reply descriptor */
+ desc = (struct mfi_mpi2_reply_header *)
+ ((uintptr_t)sc->reply_frame_pool_align
+ + sc->last_reply_idx * sc->reply_size);
+ reply_desc = desc;
+ val.word = ((union mfi_mpi2_reply_descriptor*)desc)->words;
+ reply_descript_type = reply_desc->ReplyFlags
+ & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
+ if(reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
+ break;
+ }
+
+ if (!num_completed)
+ return;
+
+ /* update replyIndex to FW */
+ if(sc->last_reply_idx)
+ MFI_WRITE4(sc, MFI_RPI, sc->last_reply_idx);
+
+ return;
+}
+
+/**
+ * mfi_get_cmd - Get a command from the free pool
+ * @instance: Adapter soft state
+ *
+ * Returns a free command from the pool
+ */
+
+struct mfi_cmd_tbolt *mfi_tbolt_get_cmd(struct mfi_softc
+ *sc)
+{
+ struct mfi_cmd_tbolt *cmd = NULL;
+
+ mtx_assert(&sc->mfi_io_lock, MA_OWNED);
+
+ cmd = TAILQ_FIRST(&sc->mfi_cmd_tbolt_tqh);
+ TAILQ_REMOVE(&sc->mfi_cmd_tbolt_tqh, cmd, next);
+ memset((uint8_t *)cmd->sg_frame, 0, MEGASAS_MAX_SZ_CHAIN_FRAME);
+ memset((uint8_t *)cmd->io_request, 0,
+ MEGASAS_THUNDERBOLT_NEW_MSG_SIZE);
+ return cmd;
+}
+
+/**
+ * mfi_tbolt_return_cmd - Return a cmd to free command pool
+ * @instance: Adapter soft state
+ * @cmd: Command packet to be returned to free command pool
+ */
+static inline void
+mfi_tbolt_return_cmd(struct mfi_softc *sc, struct mfi_cmd_tbolt *cmd)
+{
+ mtx_assert(&sc->mfi_io_lock, MA_OWNED);
+
+ TAILQ_INSERT_TAIL(&sc->mfi_cmd_tbolt_tqh, cmd, next);
+}
+
+
+union mfi_mpi2_request_descriptor *
+mfi_tbolt_get_request_descriptor(struct mfi_softc *sc, uint16_t index)
+{
+ uint8_t *p;
+
+ if (index >= sc->mfi_max_fw_cmds) {
+ device_printf(sc->mfi_dev, "Invalid SMID (0x%x)request "
+ "for descriptor\n", index);
+ return NULL;
+ }
+ p = sc->request_desc_pool + sizeof(union mfi_mpi2_request_descriptor)
+ * index;
+ memset(p, 0, sizeof(union mfi_mpi2_request_descriptor));
+ return (union mfi_mpi2_request_descriptor *)p;
+}
+
+
+// Used to build IOCTL cmd
+uint8_t
+mfi_build_mpt_pass_thru(struct mfi_softc *sc, struct mfi_command *mfi_cmd)
+{
+ MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
+ struct mfi_mpi2_request_raid_scsi_io *io_req;
+ struct mfi_cmd_tbolt *cmd;
+
+ cmd = mfi_tbolt_get_cmd(sc);
+ if (!cmd)
+ return EBUSY;
+ mfi_cmd->cm_extra_frames = cmd->index; // Frame count used as SMID
+ cmd->sync_cmd_idx = mfi_cmd->cm_index;
+ io_req = cmd->io_request;
+ mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL.IeeeChain;
+
+ io_req->Function = MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
+ io_req->SGLOffset0 = offsetof(struct mfi_mpi2_request_raid_scsi_io,
+ SGL) / 4;
+ io_req->ChainOffset = sc->chain_offset_value_for_mpt_ptmsg;
+
+ mpi25_ieee_chain->Address = mfi_cmd->cm_frame_busaddr;
+
+ /*
+ In MFI pass thru, nextChainOffset will always be zero to
+ indicate the end of the chain.
+ */
+ mpi25_ieee_chain->Flags= MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT
+ | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
+
+ /* setting the length to the maximum length */
+ mpi25_ieee_chain->Length = 1024;
+
+ return 0;
+}
+
+void
+mfi_tbolt_build_ldio(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
+ struct mfi_cmd_tbolt *cmd)
+{
+ uint32_t start_lba_lo = 0, start_lba_hi = 0, device_id;
+ struct mfi_mpi2_request_raid_scsi_io *io_request;
+ struct IO_REQUEST_INFO io_info;
+
+ device_id = mfi_cmd->cm_frame->io.header.target_id;
+ io_request = cmd->io_request;
+ io_request->RaidContext.TargetID = device_id;
+ io_request->RaidContext.Status = 0;
+ io_request->RaidContext.exStatus =0;
+
+ start_lba_lo = mfi_cmd->cm_frame->io.lba_lo;
+ start_lba_hi = mfi_cmd->cm_frame->io.lba_hi;
+
+ memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO));
+ io_info.ldStartBlock = ((uint64_t)start_lba_hi << 32) | start_lba_lo;
+ io_info.numBlocks = mfi_cmd->cm_frame->io.header.data_len;
+ io_info.ldTgtId = device_id;
+ if ((mfi_cmd->cm_frame->header.flags & MFI_FRAME_DIR_READ) ==
+ MFI_FRAME_DIR_READ)
+ io_info.isRead = 1;
+
+ io_request->RaidContext.timeoutValue
+ = MFI_FUSION_FP_DEFAULT_TIMEOUT;
+ io_request->Function = MPI2_FUNCTION_LD_IO_REQUEST;
+ io_request->DevHandle = device_id;
+ cmd->request_desc->header.RequestFlags
+ = (MFI_REQ_DESCRIPT_FLAGS_LD_IO
+ << MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ if((io_request->IoFlags == 6) && (io_info.numBlocks == 0))
+ io_request->RaidContext.RegLockLength = 0x100;
+ io_request->DataLength = mfi_cmd->cm_frame->io.header.data_len
+ * MFI_SECTOR_LEN;
+}
+
+int mfi_tbolt_is_ldio(struct mfi_command *mfi_cmd)
+{
+ if(mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_READ
+ || mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE)
+ return 1;
+ else
+ return 0;
+}
+
+
+int
+mfi_tbolt_build_io(struct mfi_softc *sc, struct mfi_command *mfi_cmd, struct mfi_cmd_tbolt *cmd)
+{
+ uint32_t device_id;
+ uint32_t sge_count;
+ uint8_t cdb[32], cdb_len;
+
+ memset(cdb, 0, 32);
+ struct mfi_mpi2_request_raid_scsi_io *io_request = cmd->io_request;
+
+ device_id = mfi_cmd->cm_frame->header.target_id;
+
+ /* Have to build CDB here for TB as BSD don't have a scsi layer */
+ if((cdb_len = mfi_tbolt_build_cdb(sc, mfi_cmd, cdb)) == 1)
+ return 1;
+
+ /* Just the CDB length,rest of the Flags are zero */
+ io_request->IoFlags = cdb_len;
+ memcpy(io_request->CDB.CDB32, cdb, 32);
+
+ if (mfi_tbolt_is_ldio(mfi_cmd))
+ mfi_tbolt_build_ldio(sc, mfi_cmd , cmd);
+ else
+ return 1;
+
+ /*
+ * Construct SGL
+ */
+ sge_count = mfi_tbolt_make_sgl(sc, mfi_cmd,
+ (pMpi25IeeeSgeChain64_t) &io_request->SGL, cmd);
+ if (sge_count > sc->mfi_max_sge) {
+ device_printf(sc->mfi_dev, "Error. sge_count (0x%x) exceeds "
+ "max (0x%x) allowed\n", sge_count, sc->mfi_max_sge);
+ return 1;
+ }
+ io_request->RaidContext.numSGE = sge_count;
+ io_request->SGLFlags = MPI2_SGE_FLAGS_64_BIT_ADDRESSING;
+
+ if (mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE)
+ io_request->Control = MPI2_SCSIIO_CONTROL_WRITE;
+ else
+ io_request->Control = MPI2_SCSIIO_CONTROL_READ;
+
+ io_request->SGLOffset0 = offsetof(
+ struct mfi_mpi2_request_raid_scsi_io, SGL)/4;
+
+ io_request->SenseBufferLowAddress = mfi_cmd->cm_sense_busaddr;
+ io_request->SenseBufferLength = MFI_SENSE_LEN;
+ return 0;
+}
+
+static int
+mfi_tbolt_build_cdb(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
+ uint8_t *cdb)
+{
+ uint32_t lba_lo, lba_hi, num_lba;
+ uint8_t cdb_len;
+
+ if(mfi_cmd == NULL || cdb == NULL)
+ return 1;
+ num_lba = mfi_cmd->cm_frame->io.header.data_len;
+ lba_lo = mfi_cmd->cm_frame->io.lba_lo;
+ lba_hi = mfi_cmd->cm_frame->io.lba_hi;
+
+ if((num_lba <= 0xFF) && (lba_lo <= 0x1FFFFF))
+ {
+ if (mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE)
+ /* Read 6 or Write 6 */
+ cdb[0] = (uint8_t) (0x0A);
+ else
+ cdb[0] = (uint8_t) (0x08);
+
+ cdb[4] = (uint8_t) num_lba;
+ cdb[3] = (uint8_t) (lba_lo & 0xFF);
+ cdb[2] = (uint8_t) (lba_lo >> 8);
+ cdb[1] = (uint8_t) ((lba_lo >> 16) & 0x1F);
+ cdb_len = 6;
+ }
+ else if((num_lba <= 0xFFFF) && (lba_lo <= 0xFFFFFFFF))
+ {
+ if (mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE)
+ /* Read 10 or Write 10 */
+ cdb[0] = (uint8_t) (0x2A);
+ else
+ cdb[0] = (uint8_t) (0x28);
+ cdb[8] = (uint8_t) (num_lba & 0xFF);
+ cdb[7] = (uint8_t) (num_lba >> 8);
+ cdb[5] = (uint8_t) (lba_lo & 0xFF);
+ cdb[4] = (uint8_t) (lba_lo >> 8);
+ cdb[3] = (uint8_t) (lba_lo >> 16);
+ cdb[2] = (uint8_t) (lba_lo >> 24);
+ cdb_len = 10;
+ }
+ else if((num_lba > 0xFFFF) && (lba_hi == 0))
+ {
+ if (mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE)
+ /* Read 12 or Write 12 */
+ cdb[0] = (uint8_t) (0xAA);
+ else
+ cdb[0] = (uint8_t) (0xA8);
+ cdb[9] = (uint8_t) (num_lba & 0xFF);
+ cdb[8] = (uint8_t) (num_lba >> 8);
+ cdb[7] = (uint8_t) (num_lba >> 16);
+ cdb[6] = (uint8_t) (num_lba >> 24);
+ cdb[5] = (uint8_t) (lba_lo & 0xFF);
+ cdb[4] = (uint8_t) (lba_lo >> 8);
+ cdb[3] = (uint8_t) (lba_lo >> 16);
+ cdb[2] = (uint8_t) (lba_lo >> 24);
+ cdb_len = 12;
+ }
+ else
+ {
+ if (mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE)
+ cdb[0] = (uint8_t) (0x8A);
+ else
+ cdb[0] = (uint8_t) (0x88);
+ cdb[13] = (uint8_t) (num_lba & 0xFF);
+ cdb[12] = (uint8_t) (num_lba >> 8);
+ cdb[11] = (uint8_t) (num_lba >> 16);
+ cdb[10] = (uint8_t) (num_lba >> 24);
+ cdb[9] = (uint8_t) (lba_lo & 0xFF);
+ cdb[8] = (uint8_t) (lba_lo >> 8);
+ cdb[7] = (uint8_t) (lba_lo >> 16);
+ cdb[6] = (uint8_t) (lba_lo >> 24);
+ cdb[5] = (uint8_t) (lba_hi & 0xFF);
+ cdb[4] = (uint8_t) (lba_hi >> 8);
+ cdb[3] = (uint8_t) (lba_hi >> 16);
+ cdb[2] = (uint8_t) (lba_hi >> 24);
+ cdb_len = 16;
+ }
+ return cdb_len;
+}
+
+static int
+mfi_tbolt_make_sgl(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
+ pMpi25IeeeSgeChain64_t sgl_ptr,struct mfi_cmd_tbolt *cmd)
+{
+ uint8_t i, sg_processed,sg_to_process;
+ uint8_t sge_count, sge_idx;
+ union mfi_sgl *os_sgl;
+ uint64_t tmp = ~0x00;
+
+ /*
+ * Return 0 if there is no data transfer
+ */
+ if (!mfi_cmd->cm_sg || !mfi_cmd->cm_len) {
+ device_printf(sc->mfi_dev, "Buffer empty \n");
+ return 0;
+ }
+ os_sgl = mfi_cmd->cm_sg;
+ sge_count = mfi_cmd->cm_frame->header.sg_count;
+
+ if (sge_count > sc->mfi_max_sge) {
+ device_printf(sc->mfi_dev, "sgl ptr %p sg_cnt %d \n",
+ os_sgl,sge_count);
+ return sge_count;
+ }
+
+ if (sge_count > sc->max_SGEs_in_main_message)
+ /* One element to store the chain info */
+ sge_idx = sc->max_SGEs_in_main_message - 1;
+ else
+ sge_idx = sge_count;
+
+ for (i = 0; i < sge_idx; i++) {
+ /*
+ For 32bit BSD we are getting 32 bit SGL's from OS
+ but FW only take 64 bit SGL's so copying from 32 bit
+ SGL's to 64.
+ */
+ /*if((sc->mfi_flags & MFI_FLAGS_SG64) == 0)
+ {
+ sgl_ptr->Length = (uint32_t) os_sgl->sg64[0].len;
+ sgl_ptr->Address = (os_sgl->sg64[0].addr);
+ }
+ else*/
+ if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
+ sgl_ptr->Length = os_sgl->sg_skinny[i].len;
+ sgl_ptr->Address = os_sgl->sg_skinny[i].addr;
+ } else {
+ sgl_ptr->Length = os_sgl->sg32[i].len;
+ sgl_ptr->Address = os_sgl->sg32[i].addr & tmp;
+ }
+ sgl_ptr->Flags = 0;
+ sgl_ptr++;
+ cmd->io_request->ChainOffset = 0;
+ }
+
+ sg_processed = i;
+
+ if (sg_processed < sge_count) {
+ pMpi25IeeeSgeChain64_t sg_chain;
+ sg_to_process = sge_count - sg_processed;
+ cmd->io_request->ChainOffset =
+ sc->chain_offset_value_for_main_message;
+ sg_chain = sgl_ptr;
+ /* Prepare chain element */
+ sg_chain->NextChainOffset = 0;
+ sg_chain->Flags = (MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
+ MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR);
+ sg_chain->Length = (sizeof(MPI2_SGE_IO_UNION) *
+ (sge_count - sg_processed));
+ sg_chain->Address = ((uintptr_t)cmd->sg_frame_phys_addr) & tmp;
+ sgl_ptr = (pMpi25IeeeSgeChain64_t)cmd->sg_frame;
+ for (; i < sge_count; i++) {
+ if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
+ sgl_ptr->Length = os_sgl->sg_skinny[i].len;
+ sgl_ptr->Address = os_sgl->sg_skinny[i].addr;
+ }
+ else
+ {
+ sgl_ptr->Length = os_sgl->sg32[i].len;
+ sgl_ptr->Address = (os_sgl->sg32[i].addr) &
+ tmp;
+ }
+ sgl_ptr->Flags = 0;
+ sgl_ptr++;
+ }
+ }
+ return sge_count;
+}
+
+union mfi_mpi2_request_descriptor *
+mfi_build_and_issue_cmd(struct mfi_softc *sc, struct mfi_command *mfi_cmd)
+{
+ struct mfi_cmd_tbolt *cmd;
+ union mfi_mpi2_request_descriptor *req_desc = NULL;
+ uint16_t index;
+ cmd = mfi_tbolt_get_cmd(sc);
+ if (!cmd)
+ return NULL;
+ mfi_cmd->cm_extra_frames = cmd->index;
+ cmd->sync_cmd_idx = mfi_cmd->cm_index;
+
+ index = cmd->index;
+ req_desc = mfi_tbolt_get_request_descriptor(sc, index-1);
+ //req_desc->Words = 0;
+ if(mfi_tbolt_build_io(sc, mfi_cmd, cmd))
+ return NULL;
+ req_desc->header.SMID = index;
+ return req_desc;
+}
+
+union mfi_mpi2_request_descriptor *
+mfi_tbolt_build_mpt_cmd(struct mfi_softc *sc, struct mfi_command *cmd)
+{
+ union mfi_mpi2_request_descriptor *req_desc = NULL;
+ uint16_t index;
+ if (mfi_build_mpt_pass_thru(sc, cmd)) {
+ device_printf(sc->mfi_dev, "Couldn't build MFI pass thru "
+ "cmd\n");
+ return NULL;
+ }
+ /* For fusion the frame_count variable is used for SMID */
+ index = cmd->cm_extra_frames;
+
+ req_desc = mfi_tbolt_get_request_descriptor(sc, index - 1);
+ if(!req_desc)
+ return NULL;
+
+ bzero(req_desc, sizeof(req_desc));
+ req_desc->header.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
+ MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ req_desc->header.SMID = index;
+ return req_desc;
+}
+
+int
+mfi_tbolt_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
+{
+ struct mfi_frame_header *hdr;
+ uint8_t *cdb;
+ union mfi_mpi2_request_descriptor *req_desc = NULL;
+ int tm = MFI_POLL_TIMEOUT_SECS * 1000;
+
+ hdr = &cm->cm_frame->header;
+ cdb = cm->cm_frame->pass.cdb;
+ if(sc->adpreset)
+ return 1;
+ if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
+ cm->cm_timestamp = time_uptime;
+ mfi_enqueue_busy(cm);
+ }
+ else {
+ hdr->cmd_status = 0xff;
+ hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
+ }
+
+ if (hdr->cmd == MFI_CMD_PD_SCSI_IO) {
+ /* check for inquiry commands coming from CLI */
+ if (cdb[0] != 0x28 || cdb[0] != 0x2A) {
+ if ((req_desc = mfi_tbolt_build_mpt_cmd(sc, cm)) ==
+ NULL) {
+ device_printf(sc->mfi_dev, "Mapping from MFI "
+ "to MPT Failed \n");
+ return 1;
+ }
+ }
+ else
+ device_printf(sc->mfi_dev, "DJA NA XXX SYSPDIO\n");
+ }
+ else if (hdr->cmd == MFI_CMD_LD_SCSI_IO ||
+ hdr->cmd == MFI_CMD_LD_READ || hdr->cmd == MFI_CMD_LD_WRITE) {
+ if ((req_desc = mfi_build_and_issue_cmd(sc, cm)) == NULL) {
+ device_printf(sc->mfi_dev, "LDIO Failed \n");
+ return 1;
+ }
+ } else
+ if ((req_desc = mfi_tbolt_build_mpt_cmd(sc, cm)) == NULL) {
+ device_printf(sc->mfi_dev, "Mapping from MFI to MPT "
+ "Failed\n");
+ return 1;
+ }
+ MFI_WRITE4(sc, MFI_ILQP, (req_desc->words & 0xFFFFFFFF));
+ MFI_WRITE4(sc, MFI_IHQP, (req_desc->words >>0x20));
+
+ if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
+ return 0;
+
+ /* This is a polled command, so busy-wait for it to complete. */
+ while (hdr->cmd_status == 0xff) {
+ DELAY(1000);
+ tm -= 1;
+ if (tm <= 0)
+ break;
+ }
+
+ if (hdr->cmd_status == 0xff) {
+ device_printf(sc->mfi_dev, "Frame %p timed out "
+ "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
+ return (ETIMEDOUT);
+ }
+ return 0;
+}
+
+static void mfi_issue_pending_cmds_again (struct mfi_softc *sc)
+{
+ struct mfi_command *cm,*tmp;
+
+ mtx_assert(&sc->mfi_io_lock, MA_OWNED);
+ TAILQ_FOREACH_REVERSE_SAFE(cm, &sc->mfi_busy, BUSYQ, cm_link, tmp){
+
+ cm->retry_for_fw_reset++;
+
+ /*
+ * If a command has continuously been tried multiple times
+ * and causing a FW reset condition, no further recoveries
+ * should be performed on the controller
+ */
+ if (cm->retry_for_fw_reset == 3) {
+ device_printf(sc->mfi_dev,"megaraid_sas: command %d "
+ "was tried multiple times during adapter reset"
+ "Shutting down the HBA\n", cm->cm_index);
+ mfi_kill_hba(sc);
+ sc->hw_crit_error = 1;
+ return;
+ }
+
+ if ((cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0) {
+ struct mfi_cmd_tbolt *cmd;
+ mfi_remove_busy(cm);
+ cmd = sc->mfi_cmd_pool_tbolt[cm->cm_extra_frames -
+ 1 ];
+ mfi_tbolt_return_cmd(sc, cmd);
+ if ((cm->cm_flags & MFI_ON_MFIQ_MASK) == 0) {
+ if (cm->cm_frame->dcmd.opcode !=
+ MFI_DCMD_CTRL_EVENT_WAIT) {
+ device_printf(sc->mfi_dev,
+ "APJ ****requeue command %d \n",
+ cm->cm_index);
+ mfi_requeue_ready(cm);
+ }
+ }
+ else
+ mfi_release_command(cm);
+ }
+ }
+ mfi_startio(sc);
+}
+
+static void mfi_kill_hba (struct mfi_softc *sc)
+{
+ if (sc->mfi_flags & MFI_FLAGS_TBOLT)
+ MFI_WRITE4 (sc, 0x00,MFI_STOP_ADP);
+ else
+ MFI_WRITE4 (sc, MFI_IDB,MFI_STOP_ADP);
+}
+
+static void mfi_process_fw_state_chg_isr(void *arg)
+{
+ struct mfi_softc *sc= (struct mfi_softc *)arg;
+ struct mfi_cmd_tbolt *cmd;
+ int error, status;
+
+ if (sc->adpreset == 1) {
+ device_printf(sc->mfi_dev,"First stage of FW reset "
+ "initiated...\n");
+
+ sc->mfi_adp_reset(sc);
+ sc->mfi_enable_intr(sc);
+
+ device_printf(sc->mfi_dev,"First stage of reset complete, "
+ "second stage initiated...\n");
+
+ sc->adpreset = 2;
+
+ /* waiting for about 20 second before start the second init */
+ for(int wait = 0; wait < 20000; wait++)
+ DELAY(1000);
+ device_printf(sc->mfi_dev,"Second stage of FW reset "
+ "initiated...\n");
+ while((status = MFI_READ4(sc, MFI_RSR)) & 0x04);
+
+ sc->mfi_disable_intr(sc);
+
+ /* We expect the FW state to be READY */
+ if (mfi_transition_firmware(sc)) {
+ device_printf(sc->mfi_dev,"controller is not in ready "
+ "state\n");
+ mfi_kill_hba(sc);
+ sc->hw_crit_error= 1;
+ return ;
+ }
+ if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0)
+ return;
+
+ mtx_lock(&sc->mfi_io_lock);
+
+ sc->mfi_enable_intr(sc);
+ sc->adpreset = 0;
+ free(sc->mfi_aen_cm->cm_data, M_MFIBUF);
+ mfi_remove_busy(sc->mfi_aen_cm);
+ cmd = sc->mfi_cmd_pool_tbolt[sc->mfi_aen_cm->cm_extra_frames
+ - 1];
+ mfi_tbolt_return_cmd(sc, cmd);
+ if (sc->mfi_aen_cm) {
+ mfi_release_command(sc->mfi_aen_cm);
+ sc->mfi_aen_cm = NULL;
+ }
+ if (sc->map_update_cmd) {
+ mfi_release_command(sc->map_update_cmd);
+ sc->map_update_cmd = NULL;
+ }
+ mfi_issue_pending_cmds_again(sc);
+
+ /*
+ * Issue pending command can result in adapter being marked
+ * dead because of too many re-tries. Check for that
+ * condition before clearing the reset condition on the FW
+ */
+ if (!sc->hw_crit_error) {
+ /*
+ * Initiate AEN (Asynchronous Event Notification)
+ */
+ mfi_aen_setup(sc, sc->last_seq_num);
+ sc->issuepend_done = 1;
+ device_printf(sc->mfi_dev,"second stage of reset "
+ "complete, FW is ready now.\n");
+ } else {
+ device_printf(sc->mfi_dev,"second stage of reset "
+ "never completed, hba was marked offline.\n");
+ }
+ } else {
+ device_printf(sc->mfi_dev, "mfi_process_fw_state_chg_isr "
+ "called with unhandled value:%d\n", sc->adpreset);
+ }
+ mtx_unlock(&sc->mfi_io_lock);
+}
+
diff --git a/sys/dev/mfi/mfireg.h b/sys/dev/mfi/mfireg.h
index f005c37..7927aab 100644
--- a/sys/dev/mfi/mfireg.h
+++ b/sys/dev/mfi/mfireg.h
@@ -64,7 +64,7 @@ __FBSDID("$FreeBSD$");
* reason why this interface should be limited to just SAS. In any case, LSI
* seems to also call this interface 'MFI', so that will be used here.
*/
-
+#define MEGAMFI_FRAME_SIZE 64
/*
* Start with the register set. All registers are 32 bits wide.
* The usual Intel IOP style setup.
@@ -83,25 +83,56 @@ __FBSDID("$FreeBSD$");
#define MFI_OQP 0x44 /* Outbound queue port */
/*
+* ThunderBolt specific Register
+*/
+
+#define MFI_RPI 0x6c /* reply_post_host_index */
+#define MFI_ILQP 0xc0 /* inbound_low_queue_port */
+#define MFI_IHQP 0xc4 /* inbound_high_queue_port */
+
+/*
* 1078 specific related register
*/
#define MFI_ODR0 0x9c /* outbound doorbell register0 */
#define MFI_ODCR0 0xa0 /* outbound doorbell clear register0 */
#define MFI_OSP0 0xb0 /* outbound scratch pad0 */
#define MFI_1078_EIM 0x80000004 /* 1078 enable intrrupt mask */
-#define MFI_RMI 0x2 /* reply message interrupt */
+#define MFI_RMI 0x2 /* reply message interrupt */
#define MFI_1078_RM 0x80000000 /* reply 1078 message interrupt */
#define MFI_ODC 0x4 /* outbound doorbell change interrupt */
+/*OCR registers*/
+#define MFI_WSR 0x004 /*write sequence register*/
+#define MFI_HDR 0x008 /*host diagnostic register*/
+#define MFI_RSR 0x3c3 /* Reset Status Register */
+
/*
* GEN2 specific changes
*/
#define MFI_GEN2_EIM 0x00000005 /* GEN2 enable interrupt mask */
#define MFI_GEN2_RM 0x00000001 /* reply GEN2 message interrupt */
+/*
+ * gen2 specific changes
+ */
+#define MFI_GEN2_EIM 0x00000005 /* gen2 enable interrupt mask */
+#define MFI_GEN2_RM 0x00000001 /* reply gen2 message interrupt */
+
+/*
+ * skinny specific changes
+ */
+#define MFI_SKINNY_IDB 0x00 /* Inbound doorbell is at 0x00 for skinny */
+#define MFI_IQPL 0x000000c0
+#define MFI_IQPH 0x000000c4
+#define MFI_SKINNY_RM 0x00000001 /* reply skinny message interrupt */
+
/* Bits for MFI_OSTS */
#define MFI_OSTS_INTR_VALID 0x00000002
+/*OCR specific flags*/
+#define MFI_FIRMWARE_STATE_CHANGE 0x00000002
+#define MFI_STATE_CHANGE_INTERRUPT 0x00000004 /* MFI state change interrrupt */
+
/*
* Firmware state values. Found in OMSG0 during initialization.
*/
@@ -119,7 +150,16 @@ __FBSDID("$FreeBSD$");
#define MFI_FWSTATE_FAULT 0xf0000000
#define MFI_FWSTATE_MAXSGL_MASK 0x00ff0000
#define MFI_FWSTATE_MAXCMD_MASK 0x0000ffff
-
+#define MFI_FWSTATE_HOSTMEMREQD_MASK 0x08000000
+#define MFI_FWSTATE_BOOT_MESSAGE_PENDING 0x90000000
+#define MFI_RESET_REQUIRED 0x00000001
+/* ThunderBolt Support */
+
+#define MFI_FWSTATE_TB_MASK 0xf0000000
+#define MFI_FWSTATE_TB_RESET 0x00000000
+#define MFI_FWSTATE_TB_READY 0x10000000
+#define MFI_FWSTATE_TB_OPERATIONAL 0x20000000
+#define MFI_FWSTATE_TB_FAULT 0x40000000
/*
* Control bits to drive the card to ready state. These go into the IDB
* register.
@@ -130,6 +170,12 @@ __FBSDID("$FreeBSD$");
#define MFI_FWINIT_CLEAR_HANDSHAKE 0x00000008 /* Respond to WAIT_HANDSHAKE */
#define MFI_FWINIT_HOTPLUG 0x00000010
+/*ADP reset flags*/
+#define MFI_STOP_ADP 0x00000020
+#define MFI_ADP_RESET 0x00000040
+#define DIAG_WRITE_ENABLE 0x00000080
+#define DIAG_RESET_ADAPTER 0x00000004
+
/* MFI Commands */
typedef enum {
MFI_CMD_INIT = 0x00,
@@ -146,6 +192,7 @@ typedef enum {
/* Direct commands */
typedef enum {
MFI_DCMD_CTRL_GETINFO = 0x01010000,
+ MFI_DCMD_CTRL_MFI_HOST_MEM_ALLOC =0x0100e100,
MFI_DCMD_CTRL_MFC_DEFAULTS_GET =0x010e0201,
MFI_DCMD_CTRL_MFC_DEFAULTS_SET =0x010e0202,
MFI_DCMD_CTRL_FLUSHCACHE = 0x01101000,
@@ -164,6 +211,7 @@ typedef enum {
MFI_DCMD_FLASH_FW_FLASH = 0x010f0300,
MFI_DCMD_FLASH_FW_CLOSE = 0x010f0400,
MFI_DCMD_PD_GET_LIST = 0x02010000,
+ MFI_DCMD_PD_LIST_QUERY = 0x02010100,
MFI_DCMD_PD_GET_INFO = 0x02020000,
MFI_DCMD_PD_STATE_SET = 0x02030100,
MFI_DCMD_PD_REBUILD_START = 0x02040100,
@@ -173,6 +221,8 @@ typedef enum {
MFI_DCMD_PD_GET_PROGRESS = 0x02060000,
MFI_DCMD_PD_LOCATE_START = 0x02070100,
MFI_DCMD_PD_LOCATE_STOP = 0x02070200,
+ MFI_DCMD_LD_MAP_GET_INFO = 0x0300e101,
+ MFI_DCMD_LD_SYNC = 0x0300e102,
MFI_DCMD_LD_GET_LIST = 0x03010000,
MFI_DCMD_LD_GET_INFO = 0x03020000,
MFI_DCMD_LD_GET_PROP = 0x03030000,
@@ -213,6 +263,32 @@ typedef enum {
#define MFI_FRAME_DIR_WRITE 0x0008
#define MFI_FRAME_DIR_READ 0x0010
#define MFI_FRAME_DIR_BOTH 0x0018
+#define MFI_FRAME_IEEE_SGL 0x0020
+
+/* ThunderBolt Specific */
+
+// Pre-TB command size and TB command size. We will be checking it at the load time for the time being
+#define MR_COMMAND_SIZE (MFI_FRAME_SIZE*20) // 1280 bytes
+
+#define MEGASAS_THUNDERBOLT_MSG_ALLIGNMENT 256
+// We are defining only 128 byte message to reduce memory move over head
+// and also it will reduce the SRB extension size by 128byte compared with 256 message size
+#define MEGASAS_THUNDERBOLT_NEW_MSG_SIZE 256
+#define MEGASAS_THUNDERBOLT_MAX_COMMANDS 1024
+#define MEGASAS_THUNDERBOLT_MAX_REPLY_COUNT 1024
+#define MEGASAS_THUNDERBOLT_REPLY_SIZE 8
+#define MEGASAS_THUNDERBOLT_MAX_CHAIN_COUNT 1
+#define MEGASAS_MAX_SZ_CHAIN_FRAME 1024
+
+#define MPI2_FUNCTION_PASSTHRU_IO_REQUEST 0xF0
+#define MPI2_FUNCTION_LD_IO_REQUEST 0xF1
+//TODO remove this and place the right AEN
+#define MR_EVT_LD_FAST_PATH_IO_STATUS_CHANGED (0xFFFF)
+
+#define MR_INTERNAL_MFI_FRAMES_SMID 1
+#define MR_CTRL_EVENT_WAIT_SMID 2
+#define MR_INTERNAL_DRIVER_RESET_SMID 3
+
/* MFI Status codes */
typedef enum {
@@ -352,6 +428,15 @@ typedef enum {
MR_PD_CACHE_DISABLE = 2
} mfi_pd_cache;
+typedef enum {
+ MR_PD_QUERY_TYPE_ALL = 0,
+ MR_PD_QUERY_TYPE_STATE = 1,
+ MR_PD_QUERY_TYPE_POWER_STATE = 2,
+ MR_PD_QUERY_TYPE_MEDIA_TYPE = 3,
+ MR_PD_QUERY_TYPE_SPEED = 4,
+ MR_PD_QUERY_TYPE_EXPOSED_TO_HOST = 5, /*query for system drives */
+}mfi_pd_query_type;
+
/*
* Other propertities and definitions
*/
@@ -374,6 +459,8 @@ typedef enum {
#define MFI_SECTOR_LEN 512
/* Scatter Gather elements */
+
+/* Scatter Gather elements */
struct mfi_sg32 {
uint32_t addr;
uint32_t len;
@@ -384,9 +471,16 @@ struct mfi_sg64 {
uint32_t len;
} __packed;
+struct mfi_sg_skinny {
+ uint64_t addr;
+ uint32_t len;
+ uint32_t flag;
+} __packed;
+
union mfi_sgl {
- struct mfi_sg32 sg32[1];
- struct mfi_sg64 sg64[1];
+ struct mfi_sg32 sg32[1];
+ struct mfi_sg64 sg64[1];
+ struct mfi_sg_skinny sg_skinny[1];
} __packed;
/* Message frames. All messages have a common header */
@@ -400,6 +494,10 @@ struct mfi_frame_header {
uint8_t cdb_len;
uint8_t sg_count;
uint32_t context;
+ /*
+ * pad0 is MSI Specific. Not used by Driver. Zero the value before
+ * sending the command to f/w
+ */
uint32_t pad0;
uint16_t flags;
#define MFI_FRAME_DATAOUT 0x08
@@ -414,9 +512,30 @@ struct mfi_init_frame {
uint32_t qinfo_new_addr_hi;
uint32_t qinfo_old_addr_lo;
uint32_t qinfo_old_addr_hi;
- uint32_t reserved[6];
+ // Start LSIP200113393
+ uint32_t driver_ver_lo; /*28h */
+ uint32_t driver_ver_hi; /*2Ch */
+
+ uint32_t reserved[4];
+ // End LSIP200113393
} __packed;
+/*
+ * define MFI Address Context union
+ */
+
+#ifdef MFI_ADDRESS_IS_uint64_t
+ typedef uint64_t MFI_ADDRESS;
+#else
+ typedef union _MFI_ADDRESS {
+ struct {
+ uint32_t addressLow;
+ uint32_t addressHigh;
+ } u;
+ uint64_t address;
+ } MFI_ADDRESS, *PMFI_ADDRESS;
+#endif
+
#define MFI_IO_FRAME_SIZE 40
struct mfi_io_frame {
struct mfi_frame_header header;
@@ -447,10 +566,11 @@ struct mfi_dcmd_frame {
struct mfi_abort_frame {
struct mfi_frame_header header;
uint32_t abort_context;
- uint32_t pad;
+ /* pad is changed to reserved.*/
+ uint32_t reserved0;
uint32_t abort_mfi_addr_lo;
uint32_t abort_mfi_addr_hi;
- uint32_t reserved[6];
+ uint32_t reserved1[6];
} __packed;
struct mfi_smp_frame {
@@ -475,6 +595,7 @@ struct mfi_stp_frame {
union mfi_frame {
struct mfi_frame_header header;
struct mfi_init_frame init;
+ /* ThunderBolt Initialization */
struct mfi_io_frame io;
struct mfi_pass_frame pass;
struct mfi_dcmd_frame dcmd;
@@ -524,7 +645,43 @@ struct mfi_ctrl_props {
uint16_t ecc_bucket_leak_rate;
uint8_t restore_hotspare_on_insertion;
uint8_t expose_encl_devices;
- uint8_t reserved[38];
+ uint8_t maintainPdFailHistory;
+ uint8_t disallowHostRequestReordering;
+ uint8_t abortCCOnError; // set TRUE to abort CC on detecting an inconsistency
+ uint8_t loadBalanceMode;// load balance mode (MR_LOAD_BALANCE_MODE)
+ uint8_t disableAutoDetectBackplane; // 0 - use auto detect logic of backplanes like SGPIO, i2c SEP using h/w mechansim like GPIO pins
+ // 1 - disable auto detect SGPIO,
+ // 2 - disable i2c SEP auto detect
+ // 3 - disable both auto detect
+ uint8_t snapVDSpace; // % of source LD to be reserved for a VDs snapshot in snapshot repository, for metadata and user data
+ // 1=5%, 2=10%, 3=15% and so on
+
+ /*
+ * Add properties that can be controlled by a bit in the following structure.
+ */
+ struct {
+ uint32_t copyBackDisabled :1; // set TRUE to disable copyBack (0=copback enabled)
+ uint32_t SMARTerEnabled :1;
+ uint32_t prCorrectUnconfiguredAreas :1;
+ uint32_t useFdeOnly :1;
+ uint32_t disableNCQ :1;
+ uint32_t SSDSMARTerEnabled :1;
+ uint32_t SSDPatrolReadEnabled :1;
+ uint32_t enableSpinDownUnconfigured :1;
+ uint32_t autoEnhancedImport :1;
+ uint32_t enableSecretKeyControl :1;
+ uint32_t disableOnlineCtrlReset :1;
+ uint32_t allowBootWithPinnedCache :1;
+ uint32_t disableSpinDownHS :1;
+ uint32_t enableJBOD :1;
+ uint32_t reserved :18;
+ } OnOffProperties;
+ uint8_t autoSnapVDSpace; // % of source LD to be reserved for auto snapshot in snapshot repository, for metadata and user data
+ // 1=5%, 2=10%, 3=15% and so on
+ uint8_t viewSpace; // snapshot writeable VIEWs capacity as a % of source LD capacity. 0=READ only
+ // 1=5%, 2=10%, 3=15% and so on
+ uint16_t spinDownTime; // # of idle minutes before device is spun down (0=use FW defaults)
+ uint8_t reserved[24];
} __packed;
/* PCI information about the card. */
@@ -964,10 +1121,11 @@ struct mfi_pd_address {
uint64_t sas_addr[2];
} __packed;
+#define MAX_SYS_PDS 240
struct mfi_pd_list {
uint32_t size;
uint32_t count;
- struct mfi_pd_address addr[0];
+ struct mfi_pd_address addr[MAX_SYS_PDS];
} __packed;
enum mfi_pd_state {
@@ -1040,7 +1198,9 @@ struct mfi_ld_params {
#define MFI_LD_PARAMS_INIT_QUICK 1
#define MFI_LD_PARAMS_INIT_FULL 2
uint8_t is_consistent;
- uint8_t reserved[23];
+ uint8_t reserved1[6];
+ uint8_t isSSCD;
+ uint8_t reserved2[16];
} __packed;
struct mfi_ld_progress {
@@ -1081,7 +1241,7 @@ struct mfi_ld_info {
uint8_t reserved2[16];
} __packed;
-#define MAX_ARRAYS 16
+#define MAX_ARRAYS 128
struct mfi_spare {
union mfi_pd_ref ref;
uint8_t spare_type;
@@ -1118,9 +1278,9 @@ struct mfi_config_data {
uint16_t spares_count;
uint16_t spares_size;
uint8_t reserved[16];
- struct mfi_array array[0];
- struct mfi_ld_config ld[0];
- struct mfi_spare spare[0];
+ struct mfi_array array[1];
+ struct mfi_ld_config ld[1];
+ struct mfi_spare spare[1];
} __packed;
struct mfi_bbu_capacity_info {
@@ -1230,6 +1390,470 @@ struct mfi_pr_properties {
uint32_t clear_freq;
};
+/* ThunderBolt support */
+
+/*
+ * Raid Context structure which describes MegaRAID specific IO Paramenters
+ * This resides at offset 0x60 where the SGL normally starts in MPT IO Frames
+ */
+
+typedef struct _MPI2_SCSI_IO_VENDOR_UNIQUE {
+ uint16_t resvd0; // 0x00 -0x01
+ uint16_t timeoutValue; // 0x02 -0x03
+ uint8_t regLockFlags;
+ uint8_t armId;
+ uint16_t TargetID; // 0x06 -0x07
+
+ uint64_t RegLockLBA; // 0x08 - 0x0F
+
+ uint32_t RegLockLength; // 0x10 - 0x13
+
+ uint16_t SMID; //nextLMId; // 0x14 - 0x15
+ uint8_t exStatus; // 0x16
+ uint8_t Status; // 0x17 status
+
+ uint8_t RAIDFlags; // 0x18
+ uint8_t numSGE; // 0x19 numSge
+ uint16_t configSeqNum; // 0x1A - 0x1B
+ uint8_t spanArm; // 0x1C
+ uint8_t resvd2[3]; // 0x1D- 0x1F
+} MPI2_SCSI_IO_VENDOR_UNIQUE, MPI25_SCSI_IO_VENDOR_UNIQUE;
+
+/*** DJA *****/
+
+/*****************************************************************************
+*
+* Message Functions
+*
+*****************************************************************************/
+
+#define NA_MPI2_FUNCTION_SCSI_IO_REQUEST (0x00) /* SCSI IO */
+#define MPI2_FUNCTION_SCSI_TASK_MGMT (0x01) /* SCSI Task Management */
+#define MPI2_FUNCTION_IOC_INIT (0x02) /* IOC Init */
+#define MPI2_FUNCTION_IOC_FACTS (0x03) /* IOC Facts */
+#define MPI2_FUNCTION_CONFIG (0x04) /* Configuration */
+#define MPI2_FUNCTION_PORT_FACTS (0x05) /* Port Facts */
+#define MPI2_FUNCTION_PORT_ENABLE (0x06) /* Port Enable */
+#define MPI2_FUNCTION_EVENT_NOTIFICATION (0x07) /* Event Notification */
+#define MPI2_FUNCTION_EVENT_ACK (0x08) /* Event Acknowledge */
+#define MPI2_FUNCTION_FW_DOWNLOAD (0x09) /* FW Download */
+#define MPI2_FUNCTION_TARGET_ASSIST (0x0B) /* Target Assist */
+#define MPI2_FUNCTION_TARGET_STATUS_SEND (0x0C) /* Target Status Send */
+#define MPI2_FUNCTION_TARGET_MODE_ABORT (0x0D) /* Target Mode Abort */
+#define MPI2_FUNCTION_FW_UPLOAD (0x12) /* FW Upload */
+#define MPI2_FUNCTION_RAID_ACTION (0x15) /* RAID Action */
+#define MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH (0x16) /* SCSI IO RAID Passthrough */
+#define MPI2_FUNCTION_TOOLBOX (0x17) /* Toolbox */
+#define MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR (0x18) /* SCSI Enclosure Processor */
+#define MPI2_FUNCTION_SMP_PASSTHROUGH (0x1A) /* SMP Passthrough */
+#define MPI2_FUNCTION_SAS_IO_UNIT_CONTROL (0x1B) /* SAS IO Unit Control */
+#define MPI2_FUNCTION_SATA_PASSTHROUGH (0x1C) /* SATA Passthrough */
+#define MPI2_FUNCTION_DIAG_BUFFER_POST (0x1D) /* Diagnostic Buffer Post */
+#define MPI2_FUNCTION_DIAG_RELEASE (0x1E) /* Diagnostic Release */
+#define MPI2_FUNCTION_TARGET_CMD_BUF_BASE_POST (0x24) /* Target Command Buffer Post Base */
+#define MPI2_FUNCTION_TARGET_CMD_BUF_LIST_POST (0x25) /* Target Command Buffer Post List */
+#define MPI2_FUNCTION_RAID_ACCELERATOR (0x2C) /* RAID Accelerator */
+#define MPI2_FUNCTION_HOST_BASED_DISCOVERY_ACTION (0x2F) /* Host Based Discovery Action */
+#define MPI2_FUNCTION_PWR_MGMT_CONTROL (0x30) /* Power Management Control */
+#define MPI2_FUNCTION_MIN_PRODUCT_SPECIFIC (0xF0) /* beginning of product-specific range */
+#define MPI2_FUNCTION_MAX_PRODUCT_SPECIFIC (0xFF) /* end of product-specific range */
+
+
+
+/* Doorbell functions */
+#define MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET (0x40)
+#define MPI2_FUNCTION_HANDSHAKE (0x42)
+
+/*****************************************************************************
+*
+* MPI Version Definitions
+*
+*****************************************************************************/
+
+#define MPI2_VERSION_MAJOR (0x02)
+#define MPI2_VERSION_MINOR (0x00)
+#define MPI2_VERSION_MAJOR_MASK (0xFF00)
+#define MPI2_VERSION_MAJOR_SHIFT (8)
+#define MPI2_VERSION_MINOR_MASK (0x00FF)
+#define MPI2_VERSION_MINOR_SHIFT (0)
+#define MPI2_VERSION ((MPI2_VERSION_MAJOR << MPI2_VERSION_MAJOR_SHIFT) | \
+ MPI2_VERSION_MINOR)
+
+#define MPI2_VERSION_02_00 (0x0200)
+
+/* versioning for this MPI header set */
+#define MPI2_HEADER_VERSION_UNIT (0x10)
+#define MPI2_HEADER_VERSION_DEV (0x00)
+#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
+#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
+#define MPI2_HEADER_VERSION_DEV_MASK (0x00FF)
+#define MPI2_HEADER_VERSION_DEV_SHIFT (0)
+#define MPI2_HEADER_VERSION ((MPI2_HEADER_VERSION_UNIT << 8) | MPI2_HEADER_VERSION_DEV)
+
+
+/* IOCInit Request message */
+struct MPI2_IOC_INIT_REQUEST
+{
+ uint8_t WhoInit; /* 0x00 */
+ uint8_t Reserved1; /* 0x01 */
+ uint8_t ChainOffset; /* 0x02 */
+ uint8_t Function; /* 0x03 */
+ uint16_t Reserved2; /* 0x04 */
+ uint8_t Reserved3; /* 0x06 */
+ uint8_t MsgFlags; /* 0x07 */
+ uint8_t VP_ID; /* 0x08 */
+ uint8_t VF_ID; /* 0x09 */
+ uint16_t Reserved4; /* 0x0A */
+ uint16_t MsgVersion; /* 0x0C */
+ uint16_t HeaderVersion; /* 0x0E */
+ uint32_t Reserved5; /* 0x10 */
+ uint16_t Reserved6; /* 0x14 */
+ uint8_t Reserved7; /* 0x16 */
+ uint8_t HostMSIxVectors; /* 0x17 */
+ uint16_t Reserved8; /* 0x18 */
+ uint16_t SystemRequestFrameSize; /* 0x1A */
+ uint16_t ReplyDescriptorPostQueueDepth; /* 0x1C */
+ uint16_t ReplyFreeQueueDepth; /* 0x1E */
+ uint32_t SenseBufferAddressHigh; /* 0x20 */
+ uint32_t SystemReplyAddressHigh; /* 0x24 */
+ uint64_t SystemRequestFrameBaseAddress; /* 0x28 */
+ uint64_t ReplyDescriptorPostQueueAddress;/* 0x30 */
+ uint64_t ReplyFreeQueueAddress; /* 0x38 */
+ uint64_t TimeStamp; /* 0x40 */
+};
+
+/* WhoInit values */
+#define MPI2_WHOINIT_NOT_INITIALIZED (0x00)
+#define MPI2_WHOINIT_SYSTEM_BIOS (0x01)
+#define MPI2_WHOINIT_ROM_BIOS (0x02)
+#define MPI2_WHOINIT_PCI_PEER (0x03)
+#define MPI2_WHOINIT_HOST_DRIVER (0x04)
+#define MPI2_WHOINIT_MANUFACTURER (0x05)
+
+struct MPI2_SGE_CHAIN_UNION
+{
+ uint16_t Length;
+ uint8_t NextChainOffset;
+ uint8_t Flags;
+ union
+ {
+ uint32_t Address32;
+ uint64_t Address64;
+ } u;
+};
+
+struct MPI2_IEEE_SGE_SIMPLE32
+{
+ uint32_t Address;
+ uint32_t FlagsLength;
+};
+
+struct MPI2_IEEE_SGE_SIMPLE64
+{
+ uint64_t Address;
+ uint32_t Length;
+ uint16_t Reserved1;
+ uint8_t Reserved2;
+ uint8_t Flags;
+};
+
+typedef union _MPI2_IEEE_SGE_SIMPLE_UNION
+{
+ struct MPI2_IEEE_SGE_SIMPLE32 Simple32;
+ struct MPI2_IEEE_SGE_SIMPLE64 Simple64;
+} MPI2_IEEE_SGE_SIMPLE_UNION;
+
+typedef struct _MPI2_SGE_SIMPLE_UNION
+{
+ uint32_t FlagsLength;
+ union
+ {
+ uint32_t Address32;
+ uint64_t Address64;
+ } u;
+} MPI2_SGE_SIMPLE_UNION;
+
+/****************************************************************************
+* IEEE SGE field definitions and masks
+****************************************************************************/
+
+/* Flags field bit definitions */
+
+#define MPI2_IEEE_SGE_FLAGS_ELEMENT_TYPE_MASK (0x80)
+
+#define MPI2_IEEE32_SGE_FLAGS_SHIFT (24)
+
+#define MPI2_IEEE32_SGE_LENGTH_MASK (0x00FFFFFF)
+
+/* Element Type */
+
+#define MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT (0x00)
+#define MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT (0x80)
+
+/* Data Location Address Space */
+
+#define MPI2_IEEE_SGE_FLAGS_ADDR_MASK (0x03)
+#define MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR (0x00)
+#define MPI2_IEEE_SGE_FLAGS_IOCDDR_ADDR (0x01)
+#define MPI2_IEEE_SGE_FLAGS_IOCPLB_ADDR (0x02)
+#define MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03)
+
+/* Address Size */
+
+#define MPI2_SGE_FLAGS_32_BIT_ADDRESSING (0x00)
+#define MPI2_SGE_FLAGS_64_BIT_ADDRESSING (0x02)
+
+/*******************/
+/* SCSI IO Control bits */
+#define MPI2_SCSIIO_CONTROL_ADDCDBLEN_MASK (0xFC000000)
+#define MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT (26)
+
+#define MPI2_SCSIIO_CONTROL_DATADIRECTION_MASK (0x03000000)
+#define MPI2_SCSIIO_CONTROL_NODATATRANSFER (0x00000000)
+#define MPI2_SCSIIO_CONTROL_WRITE (0x01000000)
+#define MPI2_SCSIIO_CONTROL_READ (0x02000000)
+#define MPI2_SCSIIO_CONTROL_BIDIRECTIONAL (0x03000000)
+
+#define MPI2_SCSIIO_CONTROL_TASKPRI_MASK (0x00007800)
+#define MPI2_SCSIIO_CONTROL_TASKPRI_SHIFT (11)
+
+#define MPI2_SCSIIO_CONTROL_TASKATTRIBUTE_MASK (0x00000700)
+#define MPI2_SCSIIO_CONTROL_SIMPLEQ (0x00000000)
+#define MPI2_SCSIIO_CONTROL_HEADOFQ (0x00000100)
+#define MPI2_SCSIIO_CONTROL_ORDEREDQ (0x00000200)
+#define MPI2_SCSIIO_CONTROL_ACAQ (0x00000400)
+
+#define MPI2_SCSIIO_CONTROL_TLR_MASK (0x000000C0)
+#define MPI2_SCSIIO_CONTROL_NO_TLR (0x00000000)
+#define MPI2_SCSIIO_CONTROL_TLR_ON (0x00000040)
+#define MPI2_SCSIIO_CONTROL_TLR_OFF (0x00000080)
+
+/*******************/
+
+typedef struct
+{
+ uint8_t CDB[20]; /* 0x00 */
+ uint32_t PrimaryReferenceTag; /* 0x14 */
+ uint16_t PrimaryApplicationTag; /* 0x18 */
+ uint16_t PrimaryApplicationTagMask; /* 0x1A */
+ uint32_t TransferLength; /* 0x1C */
+} MPI2_SCSI_IO_CDB_EEDP32;
+
+
+typedef union _MPI2_IEEE_SGE_CHAIN_UNION
+{
+ struct MPI2_IEEE_SGE_SIMPLE32 Chain32;
+ struct MPI2_IEEE_SGE_SIMPLE64 Chain64;
+} MPI2_IEEE_SGE_CHAIN_UNION;
+
+typedef union _MPI2_SIMPLE_SGE_UNION
+{
+ MPI2_SGE_SIMPLE_UNION MpiSimple;
+ MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple;
+} MPI2_SIMPLE_SGE_UNION;
+
+typedef union _MPI2_SGE_IO_UNION
+{
+ MPI2_SGE_SIMPLE_UNION MpiSimple;
+ struct MPI2_SGE_CHAIN_UNION MpiChain;
+ MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple;
+ MPI2_IEEE_SGE_CHAIN_UNION IeeeChain;
+} MPI2_SGE_IO_UNION;
+
+typedef union
+{
+ uint8_t CDB32[32];
+ MPI2_SCSI_IO_CDB_EEDP32 EEDP32;
+ MPI2_SGE_SIMPLE_UNION SGE;
+} MPI2_SCSI_IO_CDB_UNION;
+
+
+/* MPI 2.5 SGLs */
+
+#define MPI25_IEEE_SGE_FLAGS_END_OF_LIST (0x40)
+
+typedef struct _MPI25_IEEE_SGE_CHAIN64
+{
+ uint64_t Address;
+ uint32_t Length;
+ uint16_t Reserved1;
+ uint8_t NextChainOffset;
+ uint8_t Flags;
+} MPI25_IEEE_SGE_CHAIN64, *pMpi25IeeeSgeChain64_t;
+
+/* use MPI2_IEEE_SGE_FLAGS_ defines for the Flags field */
+
+
+/********/
+
+/*
+ * RAID SCSI IO Request Message
+ * Total SGE count will be one less than _MPI2_SCSI_IO_REQUEST
+ */
+struct mfi_mpi2_request_raid_scsi_io
+{
+ uint16_t DevHandle; /* 0x00 */
+ uint8_t ChainOffset; /* 0x02 */
+ uint8_t Function; /* 0x03 */
+ uint16_t Reserved1; /* 0x04 */
+ uint8_t Reserved2; /* 0x06 */
+ uint8_t MsgFlags; /* 0x07 */
+ uint8_t VP_ID; /* 0x08 */
+ uint8_t VF_ID; /* 0x09 */
+ uint16_t Reserved3; /* 0x0A */
+ uint32_t SenseBufferLowAddress; /* 0x0C */
+ uint16_t SGLFlags; /* 0x10 */
+ uint8_t SenseBufferLength; /* 0x12 */
+ uint8_t Reserved4; /* 0x13 */
+ uint8_t SGLOffset0; /* 0x14 */
+ uint8_t SGLOffset1; /* 0x15 */
+ uint8_t SGLOffset2; /* 0x16 */
+ uint8_t SGLOffset3; /* 0x17 */
+ uint32_t SkipCount; /* 0x18 */
+ uint32_t DataLength; /* 0x1C */
+ uint32_t BidirectionalDataLength; /* 0x20 */
+ uint16_t IoFlags; /* 0x24 */
+ uint16_t EEDPFlags; /* 0x26 */
+ uint32_t EEDPBlockSize; /* 0x28 */
+ uint32_t SecondaryReferenceTag; /* 0x2C */
+ uint16_t SecondaryApplicationTag; /* 0x30 */
+ uint16_t ApplicationTagTranslationMask; /* 0x32 */
+ uint8_t LUN[8]; /* 0x34 */
+ uint32_t Control; /* 0x3C */
+ MPI2_SCSI_IO_CDB_UNION CDB; /* 0x40 */
+ MPI2_SCSI_IO_VENDOR_UNIQUE RaidContext; /* 0x60 */
+ MPI2_SGE_IO_UNION SGL; /* 0x80 */
+} __packed;
+
+/*
+ * MPT RAID MFA IO Descriptor.
+ */
+typedef struct _MFI_RAID_MFA_IO_DESCRIPTOR {
+ uint32_t RequestFlags : 8;
+ uint32_t MessageAddress1 : 24; /* bits 31:8*/
+ uint32_t MessageAddress2; /* bits 61:32 */
+} MFI_RAID_MFA_IO_REQUEST_DESCRIPTOR,*PMFI_RAID_MFA_IO_REQUEST_DESCRIPTOR;
+
+struct mfi_mpi2_request_header {
+ uint8_t RequestFlags; /* 0x00 */
+ uint8_t MSIxIndex; /* 0x01 */
+ uint16_t SMID; /* 0x02 */
+ uint16_t LMID; /* 0x04 */
+};
+
+/* defines for the RequestFlags field */
+#define MPI2_REQ_DESCRIPT_FLAGS_TYPE_MASK (0x0E)
+#define MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO (0x00)
+#define MPI2_REQ_DESCRIPT_FLAGS_SCSI_TARGET (0x02)
+#define MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY (0x06)
+#define MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE (0x08)
+#define MPI2_REQ_DESCRIPT_FLAGS_RAID_ACCELERATOR (0x0A)
+
+#define MPI2_REQ_DESCRIPT_FLAGS_IOC_FIFO_MARKER (0x01)
+
+struct mfi_mpi2_request_high_priority {
+ struct mfi_mpi2_request_header header;
+ uint16_t reserved;
+};
+
+struct mfi_mpi2_request_scsi_io {
+ struct mfi_mpi2_request_header header;
+ uint16_t scsi_io_dev_handle;
+};
+
+struct mfi_mpi2_request_scsi_target {
+ struct mfi_mpi2_request_header header;
+ uint16_t scsi_target_io_index;
+};
+
+/* Request Descriptors */
+union mfi_mpi2_request_descriptor {
+ struct mfi_mpi2_request_header header;
+ struct mfi_mpi2_request_high_priority high_priority;
+ struct mfi_mpi2_request_scsi_io scsi_io;
+ struct mfi_mpi2_request_scsi_target scsi_target;
+ uint64_t words;
+};
+
+
+struct mfi_mpi2_reply_header {
+ uint8_t ReplyFlags; /* 0x00 */
+ uint8_t MSIxIndex; /* 0x01 */
+ uint16_t SMID; /* 0x02 */
+};
+
+/* defines for the ReplyFlags field */
+#define MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK (0x0F)
+#define MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS (0x00)
+#define MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY (0x01)
+#define MPI2_RPY_DESCRIPT_FLAGS_TARGETASSIST_SUCCESS (0x02)
+#define MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER (0x03)
+#define MPI2_RPY_DESCRIPT_FLAGS_RAID_ACCELERATOR_SUCCESS (0x05)
+#define MPI2_RPY_DESCRIPT_FLAGS_UNUSED (0x0F)
+
+/* values for marking a reply descriptor as unused */
+#define MPI2_RPY_DESCRIPT_UNUSED_WORD0_MARK (0xFFFFFFFF)
+#define MPI2_RPY_DESCRIPT_UNUSED_WORD1_MARK (0xFFFFFFFF)
+
+struct mfi_mpi2_reply_default {
+ struct mfi_mpi2_reply_header header;
+ uint32_t DescriptorTypeDependent2;
+};
+
+struct mfi_mpi2_reply_address {
+ struct mfi_mpi2_reply_header header;
+ uint32_t ReplyFrameAddress;
+};
+
+struct mfi_mpi2_reply_scsi_io {
+ struct mfi_mpi2_reply_header header;
+ uint16_t TaskTag; /* 0x04 */
+ uint16_t Reserved1; /* 0x06 */
+};
+
+struct mfi_mpi2_reply_target_assist {
+ struct mfi_mpi2_reply_header header;
+ uint8_t SequenceNumber; /* 0x04 */
+ uint8_t Reserved1; /* 0x04 */
+ uint16_t IoIndex; /* 0x06 */
+};
+
+struct mfi_mpi2_reply_target_cmd_buffer {
+ struct mfi_mpi2_reply_header header;
+ uint8_t SequenceNumber; /* 0x04 */
+ uint8_t Flags; /* 0x04 */
+ uint16_t InitiatorDevHandle; /* 0x06 */
+ uint16_t IoIndex; /* 0x06 */
+};
+
+struct mfi_mpi2_reply_raid_accel {
+ struct mfi_mpi2_reply_header header;
+ uint8_t SequenceNumber; /* 0x04 */
+ uint32_t Reserved; /* 0x04 */
+};
+
+/* union of Reply Descriptors */
+union mfi_mpi2_reply_descriptor
+{
+ struct mfi_mpi2_reply_header header;
+ struct mfi_mpi2_reply_scsi_io scsi_io;
+ struct mfi_mpi2_reply_target_assist target_assist;
+ struct mfi_mpi2_reply_target_cmd_buffer target_cmd;
+ struct mfi_mpi2_reply_raid_accel raid_accel;
+ struct mfi_mpi2_reply_default reply_default;
+ uint64_t words;
+};
+
+struct IO_REQUEST_INFO {
+ uint64_t ldStartBlock;
+ uint32_t numBlocks;
+ uint16_t ldTgtId;
+ uint8_t isRead;
+ uint16_t devHandle;
+ uint64_t pdBlock;
+ uint8_t fpOkForIo;
+};
+
#define MFI_SCSI_MAX_TARGETS 128
#define MFI_SCSI_MAX_LUNS 8
#define MFI_SCSI_INITIATOR_ID 255
diff --git a/sys/dev/mfi/mfivar.h b/sys/dev/mfi/mfivar.h
index e6ad106..acd1ec5 100644
--- a/sys/dev/mfi/mfivar.h
+++ b/sys/dev/mfi/mfivar.h
@@ -59,6 +59,9 @@ __FBSDID("$FreeBSD$");
#include <sys/lock.h>
#include <sys/sx.h>
+#include <sys/types.h>
+#include <machine/atomic.h>
+
/*
* SCSI structures and definitions are used from here, but no linking
* requirements are made to CAM.
@@ -70,6 +73,8 @@ struct mfi_hwcomms {
uint32_t hw_ci;
uint32_t hw_reply_q[1];
};
+#define MEGASAS_MAX_NAME 32
+#define MEGASAS_VERSION "4.23"
struct mfi_softc;
struct disk;
@@ -80,9 +85,9 @@ struct mfi_command {
time_t cm_timestamp;
struct mfi_softc *cm_sc;
union mfi_frame *cm_frame;
- uint32_t cm_frame_busaddr;
+ bus_addr_t cm_frame_busaddr;
struct mfi_sense *cm_sense;
- uint32_t cm_sense_busaddr;
+ bus_addr_t cm_sense_busaddr;
bus_dmamap_t cm_dmamap;
union mfi_sgl *cm_sg;
void *cm_data;
@@ -101,6 +106,7 @@ struct mfi_command {
#define MFI_ON_MFIQ_BUSY (1<<7)
#define MFI_ON_MFIQ_MASK ((1<<5)|(1<<6)|(1<<7))
int cm_aen_abort;
+ uint8_t retry_for_fw_reset;
void (* cm_complete)(struct mfi_command *cm);
void *cm_private;
int cm_index;
@@ -120,11 +126,36 @@ struct mfi_disk {
#define MFI_DISK_FLAGS_DISABLED 0x02
};
+struct mfi_system_pd {
+ TAILQ_ENTRY(mfi_system_pd) pd_link;
+ device_t pd_dev;
+ int pd_id;
+ int pd_unit;
+ struct mfi_softc *pd_controller;
+ struct mfi_pd_info *pd_info;
+ struct disk *pd_disk;
+ int pd_flags;
+};
struct mfi_aen {
TAILQ_ENTRY(mfi_aen) aen_link;
struct proc *p;
};
+struct mfi_skinny_dma_info {
+ bus_dma_tag_t dmat[514];
+ bus_dmamap_t dmamap[514];
+ uint32_t mem[514];
+ int noofmaps;
+};
+
+struct mfi_cmd_tbolt;
+typedef struct {
+ volatile unsigned int val;
+} atomic_t;
+
+#define atomic_read(v) ((v)->val)
+#define atomic_set(v,i) ((v)->val - (i))
+
struct mfi_softc {
device_t mfi_dev;
int mfi_flags;
@@ -135,11 +166,26 @@ struct mfi_softc {
#define MFI_FLAGS_1064R (1<<4)
#define MFI_FLAGS_1078 (1<<5)
#define MFI_FLAGS_GEN2 (1<<6)
+#define MFI_FLAGS_SKINNY (1<<7)
+#define MFI_FLAGS_TBOLT (1<<8)
+ // Start: LSIP200113393
+ bus_dma_tag_t verbuf_h_dmat;
+ bus_dmamap_t verbuf_h_dmamap;
+ uint32_t verbuf_h_busaddr;
+ uint32_t *verbuf;
+ void * kbuff_arr[MAX_IOCTL_SGE];
+ bus_dma_tag_t mfi_kbuff_arr_dmat[2];
+ bus_dmamap_t mfi_kbuff_arr_dmamap[2];
+ #if defined (__amd64__)
+ uint64_t mfi_kbuff_arr_busaddr[2];
+ #else
+ uint32_t mfi_kbuff_arr_busaddr[2];
+ #endif
struct mfi_hwcomms *mfi_comms;
TAILQ_HEAD(,mfi_command) mfi_free;
TAILQ_HEAD(,mfi_command) mfi_ready;
- TAILQ_HEAD(,mfi_command) mfi_busy;
+ TAILQ_HEAD(BUSYQ,mfi_command) mfi_busy;
struct bio_queue_head mfi_bioq;
struct mfi_qstat mfi_qstat[MFIQ_COUNT];
@@ -153,15 +199,35 @@ struct mfi_softc {
bus_dma_tag_t mfi_comms_dmat;
bus_dmamap_t mfi_comms_dmamap;
+ #if defined(__amd64__)
+ uint64_t mfi_comms_busaddr;
+#else
uint32_t mfi_comms_busaddr;
+#endif
bus_dma_tag_t mfi_frames_dmat;
bus_dmamap_t mfi_frames_dmamap;
+#if defined(__amd64__)
+ uint64_t mfi_frames_busaddr;
+#else
uint32_t mfi_frames_busaddr;
+#endif
union mfi_frame *mfi_frames;
+ bus_dma_tag_t mfi_tb_init_dmat;
+ bus_dmamap_t mfi_tb_init_dmamap;
+ #if defined(__amd64__)
+ uint64_t mfi_tb_init_busaddr;
+ uint64_t mfi_tb_ioc_init_busaddr;
+ #else
+ uint32_t mfi_tb_init_busaddr;
+ uint32_t mfi_tb_ioc_init_busaddr;
+ #endif
+ union mfi_frame *mfi_tb_init;
+
TAILQ_HEAD(,mfi_aen) mfi_aen_pids;
struct mfi_command *mfi_aen_cm;
+ struct mfi_command *mfi_skinny_cm;
uint32_t mfi_aen_triggered;
uint32_t mfi_poll_waiting;
struct selinfo mfi_select;
@@ -180,6 +246,14 @@ struct mfi_softc {
struct intr_config_hook mfi_ich;
eventhandler_tag eh;
+ /*OCR flags*/
+ atomic_t fw_reset_no_pci_access;
+ uint8_t adpreset;
+ uint8_t issuepend_done;
+ uint8_t disableOnlineCtrlReset;
+ uint32_t mfiStatus;
+ uint32_t last_seq_num;
+ uint32_t volatile hw_crit_error;
/*
* Allocation for the command array. Used as an indexable array to
@@ -215,6 +289,7 @@ struct mfi_softc {
uint32_t mfi_max_io;
TAILQ_HEAD(,mfi_disk) mfi_ld_tqh;
+ TAILQ_HEAD(,mfi_system_pd) mfi_syspd_tqh;
eventhandler_tag mfi_eh;
struct cdev *mfi_cdev;
@@ -226,9 +301,107 @@ struct mfi_softc {
/* Controller type specific interfaces */
void (*mfi_enable_intr)(struct mfi_softc *sc);
+ void (*mfi_disable_intr)(struct mfi_softc *sc);
int32_t (*mfi_read_fw_status)(struct mfi_softc *sc);
int (*mfi_check_clear_intr)(struct mfi_softc *sc);
- void (*mfi_issue_cmd)(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt);
+ #if defined(__amd64__)
+ void (*mfi_issue_cmd)(struct mfi_softc *sc,uint64_t bus_add,uint32_t frame_cnt);
+ #else
+ void (*mfi_issue_cmd)(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt);
+ #endif
+ int (*mfi_adp_reset)(struct mfi_softc *sc);
+ int (*mfi_adp_check_reset)(struct mfi_softc *sc);
+
+ /* ThunderBolt */
+ uint32_t mfi_tbolt;
+ uint32_t MFA_enabled;
+ uint64_t map_id;
+ struct mfi_command *map_update_cmd;
+ //Thunderbolt Related structure members
+ uint16_t reply_size; // Single Reply structure size
+ uint16_t raid_io_msg_size; // Singler message size
+ TAILQ_HEAD(TB, mfi_cmd_tbolt) mfi_cmd_tbolt_tqh;
+ bus_dma_tag_t mfi_tb_dmat; // ThunderBolt base contiguous memory mapping
+ bus_dmamap_t mfi_tb_dmamap;
+ #if defined(__amd64__)
+ uint64_t mfi_tb_busaddr;
+ #else
+ uint32_t mfi_tb_busaddr;
+ #endif
+ uint8_t * request_message_pool; // ThunderBolt Contiguous DMA memory Mapping
+ uint8_t * request_message_pool_align;
+ uint8_t * request_desc_pool;
+ //uint32_t request_desc_busaddr;
+ #if defined(__amd64__)
+ uint64_t request_msg_busaddr;
+ uint64_t reply_frame_busaddr;
+ uint64_t sg_frame_busaddr;
+ #else
+ uint32_t request_msg_busaddr;
+ uint32_t reply_frame_busaddr;
+ uint32_t sg_frame_busaddr;
+ #endif
+ bus_dma_tag_t mfi_tb_ioc_init_dmat; // ThunderBolt IOC Init Descriptor
+ bus_dmamap_t mfi_tb_ioc_init_dmamap;
+ uint8_t * mfi_tb_ioc_init_desc;
+ struct mfi_cmd_tbolt **mfi_cmd_pool_tbolt;
+ struct mfi_mpi2_reply_header* reply_frame_pool; // Virtual address of reply Frame Pool
+ struct mfi_mpi2_reply_header* reply_frame_pool_align;
+
+ uint8_t * reply_pool_limit; // Last reply frame address
+ uint16_t last_reply_idx;
+ uint8_t max_SGEs_in_chain_message;
+ uint8_t max_SGEs_in_main_message;
+ uint8_t chain_offset_value_for_main_message;
+ uint8_t chain_offset_value_for_mpt_ptmsg;
+ uint64_t fast_path_io_AEN_data;
+ uint8_t shutdown_issued;
+};
+
+union desc_value {
+ uint64_t word;
+ struct {
+ uint32_t low;
+ uint32_t high;
+ }u;
+};
+
+// TODO find the right definition
+#define XXX_MFI_CMD_OP_INIT2 0x9
+/*
+ * Request descriptor types
+ */
+#define MFI_REQ_DESCRIPT_FLAGS_LD_IO 0x7
+#define MFI_REQ_DESCRIPT_FLAGS_MFA 0x1
+#define MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT 0x1
+#define MFI_FUSION_FP_DEFAULT_TIMEOUT 0x14
+#define MFI_LOAD_BALANCE_FLAG 0x1
+#define MFI_DCMD_MBOX_PEND_FLAG 0x1
+
+//#define MR_PROT_INFO_TYPE_CONTROLLER 0x08
+#define MEGASAS_SCSI_VARIABLE_LENGTH_CMD 0x7f
+#define MEGASAS_SCSI_SERVICE_ACTION_READ32 0x9
+#define MEGASAS_SCSI_SERVICE_ACTION_WRITE32 0xB
+#define MEGASAS_SCSI_ADDL_CDB_LEN 0x18
+#define MEGASAS_RD_WR_PROTECT_CHECK_ALL 0x20
+#define MEGASAS_RD_WR_PROTECT_CHECK_NONE 0x60
+#define MEGASAS_EEDPBLOCKSIZE 512
+struct mfi_cmd_tbolt {
+ union mfi_mpi2_request_descriptor *request_desc;
+ struct mfi_mpi2_request_raid_scsi_io *io_request;
+ uintptr_t io_request_phys_addr;
+ uintptr_t sg_frame_phys_addr;
+ uintptr_t sense_phys_addr;
+ MPI2_SGE_IO_UNION *sg_frame;
+ uint8_t *sense;
+ TAILQ_ENTRY(mfi_cmd_tbolt) next;
+ /*
+ * Context for a MFI frame.
+ * Used to get the mfi cmd from list when a MFI cmd is completed
+ */
+ uint32_t sync_cmd_idx;
+ uint16_t index;
+ uint8_t status;
};
extern int mfi_attach(struct mfi_softc *);
@@ -239,6 +412,30 @@ extern void mfi_disk_complete(struct bio *);
extern int mfi_disk_disable(struct mfi_disk *);
extern void mfi_disk_enable(struct mfi_disk *);
extern int mfi_dump_blocks(struct mfi_softc *, int id, uint64_t, void *, int);
+extern int mfi_syspd_disable(struct mfi_system_pd *);
+extern void mfi_syspd_enable(struct mfi_system_pd *);
+extern int mfi_dump_syspd_blocks(struct mfi_softc *, int id, uint64_t, void *,
+ int);
+extern int mfi_transition_firmware(struct mfi_softc *sc);
+extern int mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start);
+extern void mfi_complete(struct mfi_softc *sc, struct mfi_command *cm);
+extern int mfi_mapcmd(struct mfi_softc *sc,struct mfi_command *cm);
+extern int mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm);
+extern void mfi_tbolt_enable_intr_ppc(struct mfi_softc *);
+extern void mfi_tbolt_disable_intr_ppc(struct mfi_softc *);
+extern int32_t mfi_tbolt_read_fw_status_ppc(struct mfi_softc *);
+extern int32_t mfi_tbolt_check_clear_intr_ppc(struct mfi_softc *);
+extern void mfi_tbolt_issue_cmd_ppc(struct mfi_softc *,bus_addr_t, uint32_t);
+extern void mfi_tbolt_init_globals(struct mfi_softc*);
+extern uint32_t mfi_tbolt_get_memory_requirement(struct mfi_softc *);
+extern int mfi_tbolt_init_desc_pool(struct mfi_softc *, uint8_t *, uint32_t);
+extern int mfi_tbolt_init_MFI_queue(struct mfi_softc *);
+extern void mfi_intr_tbolt(void *arg);
+extern int mfi_tbolt_alloc_cmd(struct mfi_softc *sc);
+extern int mfi_tbolt_send_frame(struct mfi_softc *sc, struct mfi_command *cm);
+extern int mfi_tbolt_adp_reset(struct mfi_softc *sc);
+extern int mfi_tbolt_reset(struct mfi_softc *sc);
+extern int mfi_tbolt_sync_map_info(struct mfi_softc *sc);
#define MFIQ_ADD(sc, qname) \
do { \
@@ -396,7 +593,11 @@ mfi_print_sense(struct mfi_softc *sc, void *sense)
MALLOC_DECLARE(M_MFIBUF);
+#define MFI_RESET_WAIT_TIME 180
#define MFI_CMD_TIMEOUT 30
+#define MFI_SYS_PD_IO 0
+#define MFI_LD_IO 1
+#define SKINNY_MEMORY 0x02000000
#define MFI_MAXPHYS (128 * 1024)
#ifdef MFI_DEBUG
diff --git a/sys/modules/mfi/Makefile b/sys/modules/mfi/Makefile
index 00caa57..3a941fa 100644
--- a/sys/modules/mfi/Makefile
+++ b/sys/modules/mfi/Makefile
@@ -9,7 +9,7 @@ SUBDIR+= mfi_linux
.endif
KMOD= mfi
-SRCS= mfi.c mfi_pci.c mfi_disk.c mfi_debug.c
+SRCS= mfi.c mfi_pci.c mfi_disk.c mfi_debug.c mfi_syspd.c mfi_tbolt.c
SRCS+= opt_mfi.h opt_cam.h
SRCS+= device_if.h bus_if.h pci_if.h
OpenPOWER on IntegriCloud