summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorkadesai <kadesai@FreeBSD.org>2014-10-08 08:48:18 +0000
committerkadesai <kadesai@FreeBSD.org>2014-10-08 08:48:18 +0000
commit19898d914311955aef80693f5f59eacb74116af0 (patch)
tree61ef244a2ed585147e3de9999d92da7e9d4469f4
parent7bd2d46a11dd655647cc8a0922f6414b1ffad635 (diff)
downloadFreeBSD-src-19898d914311955aef80693f5f59eacb74116af0.zip
FreeBSD-src-19898d914311955aef80693f5f59eacb74116af0.tar.gz
Current MegaRAID firmware and hence the driver only supported 64VDs.
E.g: If the user wants to create more than 64VD on a controller, it is not possible on current firmware/driver. New feature and requirement to support upto 256VD, firmware/driver/apps need changes. In addition to that, there must be a backward compatibility of the new driver with the older firmware and vice versa. RAID map is the interface between Driver and FW to fetch all required fields(attributes) for each Virtual Drives. In the earlier design driver was using the FW copy of RAID map where as in the new design the Driver will keep the RAID map copy of its own; on which it will operate for any raid map access in fast path. Local driver raid map copy will provide ease of access through out the code and provide generic interface for future FW raid map changes. For the backward compatibility driver will notify FW that it supports 256VD to the FW in driver capability field. Based on the controller properly returned by the FW, the Driver will know whether it supports 256VD or not and will copy the RAID map accordingly. At any given time, driver will always have old or new Raid map. Reviewed by : ambrisko MFC after : 2 weeks Sponsored by: AVAGO Technologies
-rw-r--r--sys/dev/mrsas/mrsas.c246
-rw-r--r--sys/dev/mrsas/mrsas.h224
-rw-r--r--sys/dev/mrsas/mrsas_cam.c31
-rw-r--r--sys/dev/mrsas/mrsas_fp.c199
4 files changed, 512 insertions, 188 deletions
diff --git a/sys/dev/mrsas/mrsas.c b/sys/dev/mrsas/mrsas.c
index b740128..279ab7b 100644
--- a/sys/dev/mrsas/mrsas.c
+++ b/sys/dev/mrsas/mrsas.c
@@ -139,8 +139,8 @@ extern void mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd);
extern struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc);
extern int mrsas_passthru(struct mrsas_softc *sc, void *arg);
extern uint8_t MR_ValidateMapInfo(struct mrsas_softc *sc);
-extern u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_FW_RAID_MAP_ALL *map);
-extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_FW_RAID_MAP_ALL *map);
+extern u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL *map);
+extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL *map);
extern void mrsas_xpt_freeze(struct mrsas_softc *sc);
extern void mrsas_xpt_release(struct mrsas_softc *sc);
extern MRSAS_REQUEST_DESCRIPTOR_UNION *mrsas_get_request_desc(struct mrsas_softc *sc,
@@ -928,6 +928,9 @@ void mrsas_free_mem(struct mrsas_softc *sc)
bus_dmamem_free(sc->raidmap_tag[i], sc->raidmap_mem[i], sc->raidmap_dmamap[i]);
if (sc->raidmap_tag[i] != NULL)
bus_dma_tag_destroy(sc->raidmap_tag[i]);
+
+ if (sc->ld_drv_map[i] != NULL)
+ free(sc->ld_drv_map[i], M_MRSAS);
}
/*
@@ -1634,9 +1637,58 @@ mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
* Allocate DMA memory for the RAID maps and perform setup.
*/
static int mrsas_setup_raidmap(struct mrsas_softc *sc)
-{
- sc->map_sz = sizeof(MR_FW_RAID_MAP) +
- (sizeof(MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
+{
+ int i;
+
+ sc->drv_supported_vd_count =
+ MRSAS_MAX_LD_CHANNELS * MRSAS_MAX_DEV_PER_CHANNEL;
+ sc->drv_supported_pd_count =
+ MRSAS_MAX_PD_CHANNELS * MRSAS_MAX_DEV_PER_CHANNEL;
+
+ if(sc->max256vdSupport) {
+ sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
+ sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
+ } else {
+ sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
+ sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
+ }
+
+#if VD_EXT_DEBUG
+ device_printf(sc->mrsas_dev, "FW supports: max256vdSupport = %s\n",
+ sc->max256vdSupport ? "YES":"NO");
+ device_printf(sc->mrsas_dev, "FW supports %dVDs %dPDs\n"
+ "DRIVER supports %dVDs %dPDs \n",
+ sc->fw_supported_vd_count, sc->fw_supported_pd_count,
+ sc->drv_supported_vd_count, sc->drv_supported_pd_count);
+#endif
+
+ sc->old_map_sz = sizeof(MR_FW_RAID_MAP) +
+ (sizeof(MR_LD_SPAN_MAP) * (sc->fw_supported_vd_count - 1));
+ sc->new_map_sz = sizeof(MR_FW_RAID_MAP_EXT);
+ sc->drv_map_sz = sizeof(MR_DRV_RAID_MAP) +
+ (sizeof(MR_LD_SPAN_MAP) * (sc->drv_supported_vd_count-1));
+
+ for (i = 0; i < 2; i++) {
+ sc->ld_drv_map[i] =
+ (void*) malloc(sc->drv_map_sz, M_MRSAS, M_NOWAIT);
+ /* Do Error handling */
+ if (!sc->ld_drv_map[i]) {
+ device_printf(sc->mrsas_dev, "Could not allocate memory for local map");
+
+ if (i == 1)
+ free (sc->ld_drv_map[0], M_MRSAS);
+ //ABORT driver initialization
+ goto ABORT;
+ }
+ }
+
+ sc->max_map_sz = max(sc->old_map_sz, sc->new_map_sz);
+
+ if(sc->max256vdSupport)
+ sc->current_map_sz = sc->new_map_sz;
+ else
+ sc->current_map_sz = sc->old_map_sz;
+
for (int i=0; i < 2; i++)
{
@@ -1645,28 +1697,36 @@ static int mrsas_setup_raidmap(struct mrsas_softc *sc)
BUS_SPACE_MAXADDR_32BIT,// lowaddr
BUS_SPACE_MAXADDR, // highaddr
NULL, NULL, // filter, filterarg
- sc->map_sz, // maxsize
+ sc->max_map_sz, // maxsize
1, // nsegments
- sc->map_sz, // maxsegsize
+ sc->max_map_sz, // maxsegsize
BUS_DMA_ALLOCNOW, // flags
NULL, NULL, // lockfunc, lockarg
&sc->raidmap_tag[i])) {
- device_printf(sc->mrsas_dev, "Cannot allocate raid map tag.\n");
- return (ENOMEM);
+ device_printf(sc->mrsas_dev,
+ "Cannot allocate raid map tag.\n");
+ return (ENOMEM);
}
- if (bus_dmamem_alloc(sc->raidmap_tag[i], (void **)&sc->raidmap_mem[i],
- BUS_DMA_NOWAIT, &sc->raidmap_dmamap[i])) {
- device_printf(sc->mrsas_dev, "Cannot allocate raidmap memory.\n");
- return (ENOMEM);
+ if (bus_dmamem_alloc(sc->raidmap_tag[i],
+ (void **)&sc->raidmap_mem[i],
+ BUS_DMA_NOWAIT, &sc->raidmap_dmamap[i])) {
+ device_printf(sc->mrsas_dev,
+ "Cannot allocate raidmap memory.\n");
+ return (ENOMEM);
}
+
+ bzero (sc->raidmap_mem[i], sc->max_map_sz);
+
if (bus_dmamap_load(sc->raidmap_tag[i], sc->raidmap_dmamap[i],
- sc->raidmap_mem[i], sc->map_sz, mrsas_addr_cb, &sc->raidmap_phys_addr[i],
- BUS_DMA_NOWAIT)){
+ sc->raidmap_mem[i], sc->max_map_sz,
+ mrsas_addr_cb, &sc->raidmap_phys_addr[i],
+ BUS_DMA_NOWAIT)){
device_printf(sc->mrsas_dev, "Cannot load raidmap memory.\n");
return (ENOMEM);
}
if (!sc->raidmap_mem[i]) {
- device_printf(sc->mrsas_dev, "Cannot allocate memory for raid map.\n");
+ device_printf(sc->mrsas_dev,
+ "Cannot allocate memory for raid map.\n");
return (ENOMEM);
}
}
@@ -1675,6 +1735,9 @@ static int mrsas_setup_raidmap(struct mrsas_softc *sc)
mrsas_sync_map_info(sc);
return (0);
+
+ABORT:
+ return (1);
}
/**
@@ -1708,13 +1771,32 @@ static int mrsas_init_fw(struct mrsas_softc *sc)
if (mrsas_init_adapter(sc) != SUCCESS){
device_printf(sc->mrsas_dev, "Adapter initialize Fail.\n");
return(1);
- }
+ }
/* Allocate internal commands for pass-thru */
if (mrsas_alloc_mfi_cmds(sc) != SUCCESS){
device_printf(sc->mrsas_dev, "Allocate MFI cmd failed.\n");
return(1);
- }
+ }
+
+ /*
+ * Get the controller info from FW, so that
+ * the MAX VD support availability can be decided.
+ */
+ ctrl_info = malloc(sizeof(struct mrsas_ctrl_info), M_MRSAS, M_NOWAIT);
+ if (!ctrl_info)
+ device_printf(sc->mrsas_dev, "Malloc for ctrl_info failed.\n");
+
+ if (mrsas_get_ctrl_info(sc, ctrl_info)) {
+ device_printf(sc->mrsas_dev, "Unable to get FW ctrl_info.\n");
+ }
+
+ sc->max256vdSupport =
+ (u_int8_t) ctrl_info->adapterOperations3.supportMaxExtLDs;
+
+ if (ctrl_info->max_lds > 64){
+ sc->max256vdSupport = 1;
+ }
if (mrsas_setup_raidmap(sc) != SUCCESS) {
device_printf(sc->mrsas_dev, "Set up RAID map failed.\n");
@@ -1722,16 +1804,13 @@ static int mrsas_init_fw(struct mrsas_softc *sc)
}
/* For pass-thru, get PD/LD list and controller info */
- memset(sc->pd_list, 0, MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
+ memset(sc->pd_list, 0,
+ MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
mrsas_get_pd_list(sc);
- memset(sc->ld_ids, 0xff, MRSAS_MAX_LD);
+ memset(sc->ld_ids, 0xff, MRSAS_MAX_LD_IDS);
mrsas_get_ld_list(sc);
- //memset(sc->log_to_span, 0, MRSAS_MAX_LD * sizeof(LD_SPAN_INFO));
-
- ctrl_info = malloc(sizeof(struct mrsas_ctrl_info), M_MRSAS, M_NOWAIT);
-
/*
* Compute the max allowed sectors per IO: The controller info has two
* limits on max sectors. Driver should use the minimum of these two.
@@ -1742,33 +1821,32 @@ static int mrsas_init_fw(struct mrsas_softc *sc)
* to calculate max_sectors_1. So the number ended up as zero always.
*/
tmp_sectors = 0;
- if (ctrl_info && !mrsas_get_ctrl_info(sc, ctrl_info)) {
- max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
- ctrl_info->max_strips_per_io;
- max_sectors_2 = ctrl_info->max_request_size;
- tmp_sectors = min(max_sectors_1 , max_sectors_2);
- sc->disableOnlineCtrlReset =
- ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
- sc->UnevenSpanSupport =
- ctrl_info->adapterOperations2.supportUnevenSpans;
- if(sc->UnevenSpanSupport) {
- device_printf(sc->mrsas_dev, "FW supports: UnevenSpanSupport=%x\n",
- sc->UnevenSpanSupport);
- if (MR_ValidateMapInfo(sc))
- sc->fast_path_io = 1;
- else
- sc->fast_path_io = 0;
-
- }
- }
+ max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
+ ctrl_info->max_strips_per_io;
+ max_sectors_2 = ctrl_info->max_request_size;
+ tmp_sectors = min(max_sectors_1 , max_sectors_2);
sc->max_sectors_per_req = sc->max_num_sge * MRSAS_PAGE_SIZE / 512;
if (tmp_sectors && (sc->max_sectors_per_req > tmp_sectors))
sc->max_sectors_per_req = tmp_sectors;
+ sc->disableOnlineCtrlReset =
+ ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
+ sc->UnevenSpanSupport =
+ ctrl_info->adapterOperations2.supportUnevenSpans;
+ if(sc->UnevenSpanSupport) {
+ printf("FW supports: UnevenSpanSupport=%x\n\n",
+ sc->UnevenSpanSupport);
+
+ if (MR_ValidateMapInfo(sc))
+ sc->fast_path_io = 1;
+ else
+ sc->fast_path_io = 0;
+ }
+
if (ctrl_info)
free(ctrl_info, M_MRSAS);
-
+
return(0);
}
@@ -1934,6 +2012,7 @@ int mrsas_ioc_init(struct mrsas_softc *sc)
init_frame->driver_ver_hi = 0;
}
+ init_frame->driver_operations.mfi_capabilities.support_max_255lds = 1;
phys_addr = (bus_addr_t)sc->ioc_init_phys_mem + 1024;
init_frame->queue_info_new_phys_addr_lo = phys_addr;
init_frame->data_xfer_len = sizeof(Mpi2IOCInitRequest_t);
@@ -2468,7 +2547,7 @@ int mrsas_reset_ctrl(struct mrsas_softc *sc)
/* Reset load balance info */
memset(sc->load_balance_info, 0,
- sizeof(LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES);
+ sizeof(LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES_EXT);
if (!mrsas_get_map_info(sc))
mrsas_sync_map_info(sc);
@@ -3135,25 +3214,27 @@ static int mrsas_get_ld_map_info(struct mrsas_softc *sc)
int retcode = 0;
struct mrsas_mfi_cmd *cmd;
struct mrsas_dcmd_frame *dcmd;
- MR_FW_RAID_MAP_ALL *map;
+ void *map;
bus_addr_t map_phys_addr = 0;
cmd = mrsas_get_mfi_cmd(sc);
if (!cmd) {
- device_printf(sc->mrsas_dev, "Cannot alloc for ld map info cmd.\n");
+ device_printf(sc->mrsas_dev,
+ "Cannot alloc for ld map info cmd.\n");
return 1;
}
dcmd = &cmd->frame->dcmd;
- map = sc->raidmap_mem[(sc->map_id & 1)];
+ map = (void *)sc->raidmap_mem[(sc->map_id & 1)];
map_phys_addr = sc->raidmap_phys_addr[(sc->map_id & 1)];
if (!map) {
- device_printf(sc->mrsas_dev, "Failed to alloc mem for ld map info.\n");
+ device_printf(sc->mrsas_dev,
+ "Failed to alloc mem for ld map info.\n");
mrsas_release_mfi_cmd(cmd);
return (ENOMEM);
}
- memset(map, 0, sizeof(*map));
+ memset(map, 0, sizeof(sc->max_map_sz));
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
dcmd->cmd = MFI_CMD_DCMD;
@@ -3162,18 +3243,21 @@ static int mrsas_get_ld_map_info(struct mrsas_softc *sc)
dcmd->flags = MFI_FRAME_DIR_READ;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
- dcmd->data_xfer_len = sc->map_sz;
+ dcmd->data_xfer_len = sc->current_map_sz;
dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
- dcmd->sgl.sge32[0].length = sc->map_sz;
+ dcmd->sgl.sge32[0].length = sc->current_map_sz;
+
if (!mrsas_issue_polled(sc, cmd))
retcode = 0;
- else
+ else
{
- device_printf(sc->mrsas_dev, "Fail to send get LD map info cmd.\n");
+ device_printf(sc->mrsas_dev,
+ "Fail to send get LD map info cmd.\n");
retcode = 1;
}
mrsas_release_mfi_cmd(cmd);
+
return(retcode);
}
@@ -3191,26 +3275,28 @@ static int mrsas_sync_map_info(struct mrsas_softc *sc)
struct mrsas_dcmd_frame *dcmd;
uint32_t size_sync_info, num_lds;
MR_LD_TARGET_SYNC *target_map = NULL;
- MR_FW_RAID_MAP_ALL *map;
+ MR_DRV_RAID_MAP_ALL *map;
MR_LD_RAID *raid;
MR_LD_TARGET_SYNC *ld_sync;
bus_addr_t map_phys_addr = 0;
cmd = mrsas_get_mfi_cmd(sc);
if (!cmd) {
- device_printf(sc->mrsas_dev, "Cannot alloc for sync map info cmd\n");
+ device_printf(sc->mrsas_dev,
+ "Cannot alloc for sync map info cmd\n");
return 1;
}
- map = sc->raidmap_mem[sc->map_id & 1];
+ map = sc->ld_drv_map[sc->map_id & 1];
num_lds = map->raidMap.ldCount;
-
+
dcmd = &cmd->frame->dcmd;
size_sync_info = sizeof(MR_LD_TARGET_SYNC) * num_lds;
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
- target_map = (MR_LD_TARGET_SYNC *)sc->raidmap_mem[(sc->map_id - 1) & 1];
- memset(target_map, 0, sizeof(MR_FW_RAID_MAP_ALL));
+ target_map =
+ (MR_LD_TARGET_SYNC *)sc->raidmap_mem[(sc->map_id - 1) & 1];
+ memset(target_map, 0, sc->max_map_sz);
map_phys_addr = sc->raidmap_phys_addr[(sc->map_id - 1) & 1];
@@ -3228,16 +3314,17 @@ static int mrsas_sync_map_info(struct mrsas_softc *sc)
dcmd->flags = MFI_FRAME_DIR_WRITE;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
- dcmd->data_xfer_len = sc->map_sz;
+ dcmd->data_xfer_len = sc->current_map_sz;
dcmd->mbox.b[0] = num_lds;
dcmd->mbox.b[1] = MRSAS_DCMD_MBOX_PEND_FLAG;
dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
- dcmd->sgl.sge32[0].length = sc->map_sz;
+ dcmd->sgl.sge32[0].length = sc->current_map_sz;
sc->map_update_cmd = cmd;
if (mrsas_issue_dcmd(sc, cmd)) {
- device_printf(sc->mrsas_dev, "Fail to send sync map info command.\n");
+ device_printf(sc->mrsas_dev,
+ "Fail to send sync map info command.\n");
return(1);
}
return(retcode);
@@ -3263,7 +3350,8 @@ static int mrsas_get_pd_list(struct mrsas_softc *sc)
cmd = mrsas_get_mfi_cmd(sc);
if (!cmd) {
- device_printf(sc->mrsas_dev, "Cannot alloc for get PD list cmd\n");
+ device_printf(sc->mrsas_dev,
+ "Cannot alloc for get PD list cmd\n");
return 1;
}
@@ -3272,7 +3360,8 @@ static int mrsas_get_pd_list(struct mrsas_softc *sc)
tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
pd_list_size = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
if (mrsas_alloc_tmp_dcmd(sc, tcmd, pd_list_size) != SUCCESS) {
- device_printf(sc->mrsas_dev, "Cannot alloc dmamap for get PD list cmd\n");
+ device_printf(sc->mrsas_dev,
+ "Cannot alloc dmamap for get PD list cmd\n");
mrsas_release_mfi_cmd(cmd);
return(ENOMEM);
}
@@ -3304,11 +3393,14 @@ static int mrsas_get_pd_list(struct mrsas_softc *sc)
pd_count = MRSAS_MAX_PD;
pd_addr = pd_list_mem->addr;
if (retcode == 0 && pd_list_mem->count < pd_count) {
- memset(sc->local_pd_list, 0, MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
+ memset(sc->local_pd_list, 0,
+ MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
for (pd_index = 0; pd_index < pd_list_mem->count; pd_index++) {
sc->local_pd_list[pd_addr->deviceId].tid = pd_addr->deviceId;
- sc->local_pd_list[pd_addr->deviceId].driveType = pd_addr->scsiDevType;
- sc->local_pd_list[pd_addr->deviceId].driveState = MR_PD_STATE_SYSTEM;
+ sc->local_pd_list[pd_addr->deviceId].driveType =
+ pd_addr->scsiDevType;
+ sc->local_pd_list[pd_addr->deviceId].driveState =
+ MR_PD_STATE_SYSTEM;
pd_addr++;
}
}
@@ -3340,7 +3432,8 @@ static int mrsas_get_ld_list(struct mrsas_softc *sc)
cmd = mrsas_get_mfi_cmd(sc);
if (!cmd) {
- device_printf(sc->mrsas_dev, "Cannot alloc for get LD list cmd\n");
+ device_printf(sc->mrsas_dev,
+ "Cannot alloc for get LD list cmd\n");
return 1;
}
@@ -3349,7 +3442,8 @@ static int mrsas_get_ld_list(struct mrsas_softc *sc)
tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
ld_list_size = sizeof(struct MR_LD_LIST);
if (mrsas_alloc_tmp_dcmd(sc, tcmd, ld_list_size) != SUCCESS) {
- device_printf(sc->mrsas_dev, "Cannot alloc dmamap for get LD list cmd\n");
+ device_printf(sc->mrsas_dev,
+ "Cannot alloc dmamap for get LD list cmd\n");
mrsas_release_mfi_cmd(cmd);
return(ENOMEM);
}
@@ -3359,6 +3453,9 @@ static int mrsas_get_ld_list(struct mrsas_softc *sc)
}
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+ if (sc->max256vdSupport)
+ dcmd->mbox.b[0]=1;
+
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0xFF;
dcmd->sge_count = 1;
@@ -3375,10 +3472,15 @@ static int mrsas_get_ld_list(struct mrsas_softc *sc)
else
retcode = 1;
+#if VD_EXT_DEBUG
+ printf ("Number of LDs %d\n", ld_list_mem->ldCount);
+#endif
+
/* Get the instance LD list */
- if ((retcode == 0) && (ld_list_mem->ldCount <= (MAX_LOGICAL_DRIVES))){
+ if ((retcode == 0) &&
+ (ld_list_mem->ldCount <= sc->fw_supported_vd_count)){
sc->CurLdCount = ld_list_mem->ldCount;
- memset(sc->ld_ids, 0xff, MRSAS_MAX_LD);
+ memset(sc->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
for (ld_index = 0; ld_index < ld_list_mem->ldCount; ld_index++) {
if (ld_list_mem->ldList[ld_index].state != 0) {
ids = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
diff --git a/sys/dev/mrsas/mrsas.h b/sys/dev/mrsas/mrsas.h
index af43daa..a925047 100644
--- a/sys/dev/mrsas/mrsas.h
+++ b/sys/dev/mrsas/mrsas.h
@@ -573,32 +573,55 @@ typedef struct _MPI2_IOC_INIT_REQUEST
/*
* MR private defines
*/
-#define MR_PD_INVALID 0xFFFF
-#define MAX_SPAN_DEPTH 8
-#define MAX_QUAD_DEPTH MAX_SPAN_DEPTH
+#define MR_PD_INVALID 0xFFFF
+#define MAX_SPAN_DEPTH 8
+#define MAX_QUAD_DEPTH MAX_SPAN_DEPTH
#define MAX_RAIDMAP_SPAN_DEPTH (MAX_SPAN_DEPTH)
-#define MAX_ROW_SIZE 32
-#define MAX_RAIDMAP_ROW_SIZE (MAX_ROW_SIZE)
-#define MAX_LOGICAL_DRIVES 64
-#define MAX_RAIDMAP_LOGICAL_DRIVES (MAX_LOGICAL_DRIVES)
-#define MAX_RAIDMAP_VIEWS (MAX_LOGICAL_DRIVES)
-#define MAX_ARRAYS 128
-#define MAX_RAIDMAP_ARRAYS (MAX_ARRAYS)
-#define MAX_PHYSICAL_DEVICES 256
-#define MAX_RAIDMAP_PHYSICAL_DEVICES (MAX_PHYSICAL_DEVICES)
-#define MR_DCMD_LD_MAP_GET_INFO 0x0300e101 // get the mapping information of this LD
+#define MAX_ROW_SIZE 32
+#define MAX_RAIDMAP_ROW_SIZE (MAX_ROW_SIZE)
+#define MAX_LOGICAL_DRIVES 64
+#define MAX_LOGICAL_DRIVES_EXT 256
+
+#define MAX_RAIDMAP_LOGICAL_DRIVES (MAX_LOGICAL_DRIVES)
+#define MAX_RAIDMAP_VIEWS (MAX_LOGICAL_DRIVES)
+
+#define MAX_ARRAYS 128
+#define MAX_RAIDMAP_ARRAYS (MAX_ARRAYS)
+
+#define MAX_ARRAYS_EXT 256
+#define MAX_API_ARRAYS_EXT MAX_ARRAYS_EXT
+
+#define MAX_PHYSICAL_DEVICES 256
+#define MAX_RAIDMAP_PHYSICAL_DEVICES (MAX_PHYSICAL_DEVICES)
+#define MR_DCMD_LD_MAP_GET_INFO 0x0300e101 // get the mapping information of this LD
+
+
+#define MRSAS_MAX_PD_CHANNELS 1
+#define MRSAS_MAX_LD_CHANNELS 1
+#define MRSAS_MAX_DEV_PER_CHANNEL 256
+#define MRSAS_DEFAULT_INIT_ID -1
+#define MRSAS_MAX_LUN 8
+#define MRSAS_DEFAULT_CMD_PER_LUN 256
+#define MRSAS_MAX_PD (MRSAS_MAX_PD_CHANNELS * \
+ MRSAS_MAX_DEV_PER_CHANNEL)
+#define MRSAS_MAX_LD_IDS (MRSAS_MAX_LD_CHANNELS * \
+ MRSAS_MAX_DEV_PER_CHANNEL)
+
+
+#define VD_EXT_DEBUG 0
/*******************************************************************
* RAID map related structures
********************************************************************/
-
+#pragma pack(1)
typedef struct _MR_DEV_HANDLE_INFO {
- u_int16_t curDevHdl; // the device handle currently used by fw to issue the command.
+ u_int16_t curDevHdl; // the device handle currently used by fw to issue the command.
u_int8_t validHandles; // bitmap of valid device handles.
u_int8_t reserved;
u_int16_t devHandle[2]; // 0x04 dev handles for all the paths.
} MR_DEV_HANDLE_INFO;
+#pragma pack()
typedef struct _MR_ARRAY_INFO {
u_int16_t pd[MAX_RAIDMAP_ROW_SIZE];
@@ -720,6 +743,86 @@ typedef struct _MR_FW_RAID_MAP {
MR_LD_SPAN_MAP ldSpanMap[1]; // 0x28a8-[0-MAX_RAIDMAP_LOGICAL_DRIVES+MAX_RAIDMAP_VIEWS+1];
} MR_FW_RAID_MAP; // 0x3288, Total Size
+
+typedef struct _MR_FW_RAID_MAP_EXT {
+ /* Not used in new map */
+ u_int32_t reserved;
+
+ union {
+ struct {
+ u_int32_t maxLd;
+ u_int32_t maxSpanDepth;
+ u_int32_t maxRowSize;
+ u_int32_t maxPdCount;
+ u_int32_t maxArrays;
+ } validationInfo;
+ u_int32_t version[5];
+ u_int32_t reserved1[5];
+ }fw_raid_desc;
+
+ u_int8_t fpPdIoTimeoutSec;
+ u_int8_t reserved2[7];
+
+ u_int16_t ldCount;
+ u_int16_t arCount;
+ u_int16_t spanCount;
+ u_int16_t reserve3;
+
+ MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES];
+ u_int8_t ldTgtIdToLd[MAX_LOGICAL_DRIVES_EXT];
+ MR_ARRAY_INFO arMapInfo[MAX_API_ARRAYS_EXT];
+ MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_EXT];
+} MR_FW_RAID_MAP_EXT;
+
+
+typedef struct _MR_DRV_RAID_MAP {
+ /* total size of this structure, including this field.
+ * This feild will be manupulated by driver for ext raid map,
+ * else pick the value from firmware raid map.
+ */
+ u_int32_t totalSize;
+
+ union {
+ struct {
+ u_int32_t maxLd;
+ u_int32_t maxSpanDepth;
+ u_int32_t maxRowSize;
+ u_int32_t maxPdCount;
+ u_int32_t maxArrays;
+ } validationInfo;
+ u_int32_t version[5];
+ u_int32_t reserved1[5];
+ }drv_raid_desc;
+
+ /* timeout value used by driver in FP IOs*/
+ u_int8_t fpPdIoTimeoutSec;
+ u_int8_t reserved2[7];
+
+ u_int16_t ldCount;
+ u_int16_t arCount;
+ u_int16_t spanCount;
+ u_int16_t reserve3;
+
+ MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES];
+ u_int8_t ldTgtIdToLd[MAX_LOGICAL_DRIVES_EXT];
+ MR_ARRAY_INFO arMapInfo[MAX_API_ARRAYS_EXT];
+ MR_LD_SPAN_MAP ldSpanMap[1];
+
+}MR_DRV_RAID_MAP;
+
+/* Driver raid map size is same as raid map ext
+ * MR_DRV_RAID_MAP_ALL is created to sync with old raid.
+ * And it is mainly for code re-use purpose.
+ */
+
+#pragma pack(1)
+typedef struct _MR_DRV_RAID_MAP_ALL {
+
+ MR_DRV_RAID_MAP raidMap;
+ MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_EXT - 1];
+}MR_DRV_RAID_MAP_ALL;
+#pragma pack()
+
typedef struct _LD_LOAD_BALANCE_INFO
{
u_int8_t loadBalanceFlag;
@@ -1200,22 +1303,6 @@ typedef enum _REGION_TYPE {
REGION_TYPE_EXCLUSIVE = 3, // exclusive lock (for writes)
} REGION_TYPE;
-/*
- * MR private defines
- */
-#define MR_PD_INVALID 0xFFFF
-#define MAX_SPAN_DEPTH 8
-#define MAX_RAIDMAP_SPAN_DEPTH (MAX_SPAN_DEPTH)
-#define MAX_ROW_SIZE 32
-#define MAX_RAIDMAP_ROW_SIZE (MAX_ROW_SIZE)
-#define MAX_LOGICAL_DRIVES 64
-#define MAX_RAIDMAP_LOGICAL_DRIVES (MAX_LOGICAL_DRIVES)
-#define MAX_RAIDMAP_VIEWS (MAX_LOGICAL_DRIVES)
-#define MAX_ARRAYS 128
-#define MAX_RAIDMAP_ARRAYS (MAX_ARRAYS)
-#define MAX_PHYSICAL_DEVICES 256
-#define MAX_RAIDMAP_PHYSICAL_DEVICES (MAX_PHYSICAL_DEVICES)
-#define MR_DCMD_LD_MAP_GET_INFO 0x0300e101
/*
* SCSI-CAM Related Defines
@@ -1423,7 +1510,7 @@ struct MR_LD_LIST {
u_int8_t state; // current LD state (MR_LD_STATE)
u_int8_t reserved[3]; // pad to 8-byte boundary
u_int64_t size; // LD size
- } ldList[MAX_LOGICAL_DRIVES];
+ } ldList[MAX_LOGICAL_DRIVES_EXT];
};
#pragma pack()
@@ -1485,7 +1572,23 @@ struct mrsas_ctrl_prop {
u_int32_t allowBootWithPinnedCache : 1;
u_int32_t disableSpinDownHS : 1;
u_int32_t enableJBOD : 1;
- u_int32_t reserved :18;
+ u_int32_t disableCacheBypass : 1; // 1 = disable cache-bypass-performance-improvement feature
+ u_int32_t useDiskActivityForLocate : 1; // 1 = drive activity LED is toggled for LOCATE
+ u_int32_t enablePI : 1; // 0 = Disable SCSI PI for controller. Remove any active protection information
+ u_int32_t preventPIImport : 1; // 1 = Prevent import of SCSI DIF protected logical disks
+ u_int32_t useGlobalSparesForEmergency : 1; // 1 = Use global spares for Emergency (if spare is incompatible without Emergency)
+ u_int32_t useUnconfGoodForEmergency : 1; // 1 = Use uncofgured good drives for Emergency
+ u_int32_t useEmergencySparesforSMARTer: 1; // 1 = Use Emergency spares for SMARTer
+ u_int32_t forceSGPIOForQuadOnly : 1; // 1 = Force SGPIO status per port only for four drives, affects HPC controllers
+ u_int32_t enableConfigAutoBalance : 1; // 0 = Configuration auto balance disabled, 1 = Configuration auto balance enabled
+ u_int32_t enableVirtualCache : 1; // 1 = Virtual caching is enabled on DFF and SFM.
+ u_int32_t enableAutoLockRecovery : 1; // 1 = Auto Lock Recovery on DFF and SFM
+ u_int32_t disableImmediateIO : 1; // 1 = Disable Legacy Immediate IO, 0 = Enable
+ u_int32_t disableT10RebuildAssist : 1; // 1 = Disable T10 Rebuild Assist, use legacy rebuild method
+ u_int32_t ignore64ldRestriction : 1; // 0 - limit LD to 64 even if more LD support exists, 1 - support more than 64 ld with new DDF format
+ u_int32_t enableSwZone : 1; // 1 = enable Software Zone
+ u_int32_t limitMaxRateSATA3G : 1; // 1 = negotiated link rates to direct attached SATA devices shall be limited to 3Gbps
+ u_int32_t reserved :2;
} OnOffProperties;
u_int8_t autoSnapVDSpace; // % of source LD to be reserved for auto
// snapshot in snapshot repository, for
@@ -1828,16 +1931,18 @@ struct mrsas_ctrl_info {
u_int32_t reserved :26;
} cluster;
- char clusterId[16]; //0x7D4
+ char clusterId[16]; //0x7D4
- u_int8_t pad[0x800-0x7E4]; //0x7E4
-} __packed;
+ char reserved6[4]; //0x7E4 RESERVED FOR IOV
-/*
- * Ld and PD Max Support Defines
- */
-#define MRSAS_MAX_PD 256
-#define MRSAS_MAX_LD 64
+ struct{ //0x7E8
+ u_int32_t resrved :5;
+ u_int32_t supportMaxExtLDs :1;
+ u_int32_t reserved1 :26;
+ }adapterOperations3;
+
+ u_int8_t pad[0x800-0x7EC]; //0x7EC
+} __packed;
/*
* When SCSI mid-layer calls driver's reset routine, driver waits for
@@ -1930,6 +2035,18 @@ struct mrsas_header {
};
#pragma pack()
+
+typedef union _MFI_CAPABILITIES {
+ struct {
+ u_int32_t support_fp_remote_lun:1;
+ u_int32_t support_additional_msix:1;
+ u_int32_t support_fastpath_wb:1;
+ u_int32_t support_max_255lds:1;
+ u_int32_t reserved:28;
+ } mfi_capabilities;
+ u_int32_t reg;
+} MFI_CAPABILITIES;
+
#pragma pack(1)
struct mrsas_init_frame {
u_int8_t cmd; /*00h */
@@ -1937,8 +2054,7 @@ struct mrsas_init_frame {
u_int8_t cmd_status; /*02h */
u_int8_t reserved_1; /*03h */
- u_int32_t reserved_2; /*04h */
-
+ MFI_CAPABILITIES driver_operations; /*04h*/
u_int32_t context; /*08h */
u_int32_t pad_0; /*0Ch */
@@ -2423,13 +2539,29 @@ struct mrsas_softc {
bus_addr_t el_info_phys_addr; //get event log info cmd physical addr
struct mrsas_pd_list pd_list[MRSAS_MAX_PD];
struct mrsas_pd_list local_pd_list[MRSAS_MAX_PD];
- u_int8_t ld_ids[MRSAS_MAX_LD];
+ u_int8_t ld_ids[MRSAS_MAX_LD_IDS];
struct taskqueue *ev_tq; //taskqueue for events
struct task ev_task;
u_int32_t CurLdCount;
u_int64_t reset_flags;
- LD_LOAD_BALANCE_INFO load_balance_info[MAX_LOGICAL_DRIVES];
- LD_SPAN_INFO log_to_span[MAX_LOGICAL_DRIVES];
+ LD_LOAD_BALANCE_INFO load_balance_info[MAX_LOGICAL_DRIVES_EXT];
+ LD_SPAN_INFO log_to_span[MAX_LOGICAL_DRIVES_EXT];
+
+ u_int8_t max256vdSupport;
+ u_int16_t fw_supported_vd_count;
+ u_int16_t fw_supported_pd_count;
+
+ u_int16_t drv_supported_vd_count;
+ u_int16_t drv_supported_pd_count;
+
+ u_int32_t max_map_sz;
+ u_int32_t current_map_sz;
+ u_int32_t old_map_sz;
+ u_int32_t new_map_sz;
+ u_int32_t drv_map_sz;
+
+ /*Non dma-able memory. Driver local copy.*/
+ MR_DRV_RAID_MAP_ALL *ld_drv_map[2];
};
/* Compatibility shims for different OS versions */
diff --git a/sys/dev/mrsas/mrsas_cam.c b/sys/dev/mrsas/mrsas_cam.c
index bde974a..2a04029 100644
--- a/sys/dev/mrsas/mrsas_cam.c
+++ b/sys/dev/mrsas/mrsas_cam.c
@@ -77,7 +77,7 @@ void mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
u_int32_t req_desc_hi);
void mrsas_set_pd_lba(MRSAS_RAID_SCSI_IO_REQUEST *io_request, u_int8_t cdb_len,
struct IO_REQUEST_INFO *io_info, union ccb *ccb,
- MR_FW_RAID_MAP_ALL *local_map_ptr, u_int32_t ref_tag,
+ MR_DRV_RAID_MAP_ALL *local_map_ptr, u_int32_t ref_tag,
u_int32_t ld_block_size);
static void mrsas_freeze_simq(struct mrsas_mpt_cmd *cmd, struct cam_sim *sim);
static void mrsas_poll(struct cam_sim *sim);
@@ -91,16 +91,16 @@ struct mrsas_mpt_cmd * mrsas_get_mpt_cmd(struct mrsas_softc *sc);
MRSAS_REQUEST_DESCRIPTOR_UNION *mrsas_get_request_desc(struct mrsas_softc *sc,
u_int16_t index);
-extern u_int16_t MR_TargetIdToLdGet(u_int32_t ldTgtId, MR_FW_RAID_MAP_ALL *map);
-extern u_int32_t MR_LdBlockSizeGet(u_int32_t ldTgtId, MR_FW_RAID_MAP_ALL *map,
+extern u_int16_t MR_TargetIdToLdGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL *map);
+extern u_int32_t MR_LdBlockSizeGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL *map,
struct mrsas_softc *sc);
extern void mrsas_isr(void *arg);
extern void mrsas_aen_handler(struct mrsas_softc *sc);
extern u_int8_t MR_BuildRaidContext(struct mrsas_softc *sc,
struct IO_REQUEST_INFO *io_info,RAID_CONTEXT *pRAID_Context,
- MR_FW_RAID_MAP_ALL *map);
+ MR_DRV_RAID_MAP_ALL *map);
extern u_int16_t MR_LdSpanArrayGet(u_int32_t ld, u_int32_t span,
- MR_FW_RAID_MAP_ALL *map);
+ MR_DRV_RAID_MAP_ALL *map);
extern u_int16_t mrsas_get_updated_dev_handle(PLD_LOAD_BALANCE_INFO lbInfo,
struct IO_REQUEST_INFO *io_info);
extern u_int8_t megasas_get_best_arm(PLD_LOAD_BALANCE_INFO lbInfo, u_int8_t arm,
@@ -321,9 +321,9 @@ static void mrsas_action(struct cam_sim *sim, union ccb *ccb)
ccb->cpi.protocol = PROTO_SCSI;
ccb->cpi.protocol_version = SCSI_REV_2;
if (ccb->cpi.bus_id == 0)
- ccb->cpi.max_target = MRSAS_MAX_LD-1;
- else
ccb->cpi.max_target = MRSAS_MAX_PD-1;
+ else
+ ccb->cpi.max_target = MRSAS_MAX_LD_IDS-1;
#if (__FreeBSD_version > 704000)
ccb->cpi.maxio = MRSAS_MAX_IO_SIZE;
#endif
@@ -701,7 +701,7 @@ int mrsas_setup_io(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
struct ccb_hdr *ccb_h = &(ccb->ccb_h);
struct ccb_scsiio *csio = &(ccb->csio);
struct IO_REQUEST_INFO io_info;
- MR_FW_RAID_MAP_ALL *map_ptr;
+ MR_DRV_RAID_MAP_ALL *map_ptr;
u_int8_t fp_possible;
u_int32_t start_lba_hi, start_lba_lo, ld_block_size;
u_int32_t datalength = 0;
@@ -780,10 +780,10 @@ int mrsas_setup_io(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
break;
}
- map_ptr = sc->raidmap_mem[(sc->map_id & 1)];
+ map_ptr = sc->ld_drv_map[(sc->map_id & 1)];
ld_block_size = MR_LdBlockSizeGet(device_id, map_ptr, sc);
- if ((MR_TargetIdToLdGet(device_id, map_ptr) >= MAX_LOGICAL_DRIVES) ||
+ if ((MR_TargetIdToLdGet(device_id, map_ptr) >= MAX_LOGICAL_DRIVES_EXT) ||
(!sc->fast_path_io)) {
io_request->RaidContext.regLockFlags = 0;
fp_possible = 0;
@@ -851,12 +851,12 @@ int mrsas_build_dcdb(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
{
struct ccb_hdr *ccb_h = &(ccb->ccb_h);
u_int32_t device_id;
- MR_FW_RAID_MAP_ALL *map_ptr;
+ MR_DRV_RAID_MAP_ALL *map_ptr;
MRSAS_RAID_SCSI_IO_REQUEST *io_request;
io_request = cmd->io_request;
device_id = ccb_h->target_id;
- map_ptr = sc->raidmap_mem[(sc->map_id & 1)];
+ map_ptr = sc->ld_drv_map[(sc->map_id & 1)];
/* Check if this is for system PD */
if (cam_sim_bus(sim) == 1 &&
@@ -867,7 +867,12 @@ int mrsas_build_dcdb(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
io_request->RaidContext.regLockFlags = 0;
io_request->RaidContext.regLockRowLBA = 0;
io_request->RaidContext.regLockLength = 0;
- io_request->RaidContext.RAIDFlags = MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD <<
+
+ // LSI TEST
+ //printf("LSI Debug bus %d device_id %d map_ptr->raidMap.devHndlInfo[device_id].curDevHdl %d \n",
+ // cam_sim_bus(sim), device_id, map_ptr->raidMap.devHndlInfo[device_id].curDevHdl);
+
+ io_request->RaidContext.RAIDFlags = MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD <<
MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY))
io_request->IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
diff --git a/sys/dev/mrsas/mrsas_fp.c b/sys/dev/mrsas/mrsas_fp.c
index 91005a5..71c4ecb 100644
--- a/sys/dev/mrsas/mrsas_fp.c
+++ b/sys/dev/mrsas/mrsas_fp.c
@@ -63,54 +63,56 @@ u_int8_t mrsas_get_best_arm(PLD_LOAD_BALANCE_INFO lbInfo, u_int8_t arm,
u_int64_t block, u_int32_t count);
u_int8_t MR_BuildRaidContext(struct mrsas_softc *sc,
struct IO_REQUEST_INFO *io_info,
- RAID_CONTEXT *pRAID_Context, MR_FW_RAID_MAP_ALL *map);
+ RAID_CONTEXT *pRAID_Context, MR_DRV_RAID_MAP_ALL *map);
u_int8_t MR_GetPhyParams(struct mrsas_softc *sc, u_int32_t ld,
u_int64_t stripRow, u_int16_t stripRef, struct IO_REQUEST_INFO *io_info,
RAID_CONTEXT *pRAID_Context,
- MR_FW_RAID_MAP_ALL *map);
-u_int16_t MR_TargetIdToLdGet(u_int32_t ldTgtId, MR_FW_RAID_MAP_ALL *map);
-u_int32_t MR_LdBlockSizeGet(u_int32_t ldTgtId, MR_FW_RAID_MAP_ALL *map);
-u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_FW_RAID_MAP_ALL *map);
+ MR_DRV_RAID_MAP_ALL *map);
+u_int16_t MR_TargetIdToLdGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL *map);
+u_int32_t MR_LdBlockSizeGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL *map);
+u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL *map);
u_int16_t mrsas_get_updated_dev_handle(PLD_LOAD_BALANCE_INFO lbInfo,
struct IO_REQUEST_INFO *io_info);
u_int32_t mega_mod64(u_int64_t dividend, u_int32_t divisor);
u_int32_t MR_GetSpanBlock(u_int32_t ld, u_int64_t row, u_int64_t *span_blk,
- MR_FW_RAID_MAP_ALL *map, int *div_error);
+ MR_DRV_RAID_MAP_ALL *map, int *div_error);
u_int64_t mega_div64_32(u_int64_t dividend, u_int32_t divisor);
-void mrsas_update_load_balance_params(MR_FW_RAID_MAP_ALL *map,
+void mrsas_update_load_balance_params(MR_DRV_RAID_MAP_ALL *map,
PLD_LOAD_BALANCE_INFO lbInfo);
void mrsas_set_pd_lba(MRSAS_RAID_SCSI_IO_REQUEST *io_request,
u_int8_t cdb_len, struct IO_REQUEST_INFO *io_info, union ccb *ccb,
- MR_FW_RAID_MAP_ALL *local_map_ptr, u_int32_t ref_tag,
+ MR_DRV_RAID_MAP_ALL *local_map_ptr, u_int32_t ref_tag,
u_int32_t ld_block_size);
static u_int16_t MR_LdSpanArrayGet(u_int32_t ld, u_int32_t span,
- MR_FW_RAID_MAP_ALL *map);
-static u_int16_t MR_PdDevHandleGet(u_int32_t pd, MR_FW_RAID_MAP_ALL *map);
+ MR_DRV_RAID_MAP_ALL *map);
+static u_int16_t MR_PdDevHandleGet(u_int32_t pd, MR_DRV_RAID_MAP_ALL *map);
static u_int16_t MR_ArPdGet(u_int32_t ar, u_int32_t arm,
- MR_FW_RAID_MAP_ALL *map);
+ MR_DRV_RAID_MAP_ALL *map);
static MR_LD_SPAN *MR_LdSpanPtrGet(u_int32_t ld, u_int32_t span,
- MR_FW_RAID_MAP_ALL *map);
+ MR_DRV_RAID_MAP_ALL *map);
static u_int8_t MR_LdDataArmGet(u_int32_t ld, u_int32_t armIdx,
- MR_FW_RAID_MAP_ALL *map);
+ MR_DRV_RAID_MAP_ALL *map);
static MR_SPAN_BLOCK_INFO *MR_LdSpanInfoGet(u_int32_t ld,
- MR_FW_RAID_MAP_ALL *map);
-MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_FW_RAID_MAP_ALL *map);
+ MR_DRV_RAID_MAP_ALL *map);
+MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL *map);
+void MR_PopulateDrvRaidMap (struct mrsas_softc *sc);
+
/*
* Spanset related function prototypes
* Added for PRL11 configuration (Uneven span support)
*/
-void mr_update_span_set(MR_FW_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo);
+void mr_update_span_set(MR_DRV_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo);
static u_int8_t mr_spanset_get_phy_params(struct mrsas_softc *sc, u_int32_t ld,
u_int64_t stripRow, u_int16_t stripRef, struct IO_REQUEST_INFO *io_info,
- RAID_CONTEXT *pRAID_Context, MR_FW_RAID_MAP_ALL *map);
+ RAID_CONTEXT *pRAID_Context, MR_DRV_RAID_MAP_ALL *map);
static u_int64_t get_row_from_strip(struct mrsas_softc *sc, u_int32_t ld,
- u_int64_t strip, MR_FW_RAID_MAP_ALL *map);
+ u_int64_t strip, MR_DRV_RAID_MAP_ALL *map);
static u_int32_t mr_spanset_get_span_block(struct mrsas_softc *sc,
u_int32_t ld, u_int64_t row, u_int64_t *span_blk,
- MR_FW_RAID_MAP_ALL *map, int *div_error);
+ MR_DRV_RAID_MAP_ALL *map, int *div_error);
static u_int8_t get_arm(struct mrsas_softc *sc, u_int32_t ld, u_int8_t span,
- u_int64_t stripe, MR_FW_RAID_MAP_ALL *map);
+ u_int64_t stripe, MR_DRV_RAID_MAP_ALL *map);
/*
@@ -173,52 +175,52 @@ quotient;})
* parts of the RAID map and returns the appropriate parameters.
*/
-MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_FW_RAID_MAP_ALL *map)
+MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL *map)
{
return (&map->raidMap.ldSpanMap[ld].ldRaid);
}
-u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_FW_RAID_MAP_ALL *map)
+u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL *map)
{
return (map->raidMap.ldSpanMap[ld].ldRaid.targetId);
}
-static u_int16_t MR_LdSpanArrayGet(u_int32_t ld, u_int32_t span, MR_FW_RAID_MAP_ALL *map)
+static u_int16_t MR_LdSpanArrayGet(u_int32_t ld, u_int32_t span, MR_DRV_RAID_MAP_ALL *map)
{
return map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef;
}
-static u_int8_t MR_LdDataArmGet(u_int32_t ld, u_int32_t armIdx, MR_FW_RAID_MAP_ALL *map)
+static u_int8_t MR_LdDataArmGet(u_int32_t ld, u_int32_t armIdx, MR_DRV_RAID_MAP_ALL *map)
{
return map->raidMap.ldSpanMap[ld].dataArmMap[armIdx];
}
-static u_int16_t MR_PdDevHandleGet(u_int32_t pd, MR_FW_RAID_MAP_ALL *map)
+static u_int16_t MR_PdDevHandleGet(u_int32_t pd, MR_DRV_RAID_MAP_ALL *map)
{
return map->raidMap.devHndlInfo[pd].curDevHdl;
}
-static u_int16_t MR_ArPdGet(u_int32_t ar, u_int32_t arm, MR_FW_RAID_MAP_ALL *map)
+static u_int16_t MR_ArPdGet(u_int32_t ar, u_int32_t arm, MR_DRV_RAID_MAP_ALL *map)
{
return map->raidMap.arMapInfo[ar].pd[arm];
}
-static MR_LD_SPAN *MR_LdSpanPtrGet(u_int32_t ld, u_int32_t span, MR_FW_RAID_MAP_ALL *map)
+static MR_LD_SPAN *MR_LdSpanPtrGet(u_int32_t ld, u_int32_t span, MR_DRV_RAID_MAP_ALL *map)
{
return &map->raidMap.ldSpanMap[ld].spanBlock[span].span;
}
-static MR_SPAN_BLOCK_INFO *MR_LdSpanInfoGet(u_int32_t ld, MR_FW_RAID_MAP_ALL *map)
+static MR_SPAN_BLOCK_INFO *MR_LdSpanInfoGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL *map)
{
return &map->raidMap.ldSpanMap[ld].spanBlock[0];
}
-u_int16_t MR_TargetIdToLdGet(u_int32_t ldTgtId, MR_FW_RAID_MAP_ALL *map)
+u_int16_t MR_TargetIdToLdGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL *map)
{
return map->raidMap.ldTgtIdToLd[ldTgtId];
}
-u_int32_t MR_LdBlockSizeGet(u_int32_t ldTgtId, MR_FW_RAID_MAP_ALL *map)
+u_int32_t MR_LdBlockSizeGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL *map)
{
MR_LD_RAID *raid;
u_int32_t ld, ldBlockSize = MRSAS_SCSIBLOCKSIZE;
@@ -239,6 +241,80 @@ u_int32_t MR_LdBlockSizeGet(u_int32_t ldTgtId, MR_FW_RAID_MAP_ALL *map)
return ldBlockSize;
}
+/*
+ * This function will Populate Driver Map using firmware raid map
+ */
+void MR_PopulateDrvRaidMap(struct mrsas_softc *sc)
+{
+ MR_FW_RAID_MAP_ALL *fw_map_old = NULL;
+ MR_FW_RAID_MAP *pFwRaidMap = NULL;
+ unsigned int i;
+
+ MR_DRV_RAID_MAP_ALL *drv_map = sc->ld_drv_map[(sc->map_id & 1)];
+ MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap;
+
+ if(sc->max256vdSupport) {
+ memcpy(sc->ld_drv_map[sc->map_id & 1],
+ sc->raidmap_mem[sc->map_id & 1],
+ sc->current_map_sz);
+ /* New Raid map will not set totalSize, so keep expected value
+ * for legacy code in ValidateMapInfo
+ */
+ pDrvRaidMap->totalSize = sizeof (MR_FW_RAID_MAP_EXT);
+ } else {
+ fw_map_old =(MR_FW_RAID_MAP_ALL *) sc->raidmap_mem[(sc->map_id & 1)];
+ pFwRaidMap = &fw_map_old->raidMap;
+
+#if VD_EXT_DEBUG
+ for (i = 0; i < pFwRaidMap->ldCount; i++) {
+ device_printf(sc->mrsas_dev,
+ "Index 0x%x Target Id 0x%x Seq Num 0x%x Size 0/%lx\n", i,
+ fw_map_old->raidMap.ldSpanMap[i].ldRaid.targetId,
+ fw_map_old->raidMap.ldSpanMap[i].ldRaid.seqNum,
+ fw_map_old->raidMap.ldSpanMap[i].ldRaid.size );
+ }
+#endif
+
+ memset(drv_map, 0, sc->drv_map_sz);
+ pDrvRaidMap->totalSize = pFwRaidMap->totalSize;
+ pDrvRaidMap->ldCount = pFwRaidMap->ldCount;
+ pDrvRaidMap->fpPdIoTimeoutSec =
+ pFwRaidMap->fpPdIoTimeoutSec;
+
+ for(i=0; i < MAX_RAIDMAP_LOGICAL_DRIVES+MAX_RAIDMAP_VIEWS; i++) {
+ pDrvRaidMap->ldTgtIdToLd[i] =
+ (u_int8_t)pFwRaidMap->ldTgtIdToLd[i];
+ }
+
+ for(i=0; i < pDrvRaidMap->ldCount; i++) {
+ pDrvRaidMap->ldSpanMap[i] =
+ pFwRaidMap->ldSpanMap[i];
+
+#if VD_EXT_DEBUG
+ device_printf(sc->mrsas_dev, "pFwRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x "
+ "pFwRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x size 0x%x\n",
+ i, i, pFwRaidMap->ldSpanMap[i].ldRaid.targetId,
+ pFwRaidMap->ldSpanMap[i].ldRaid.seqNum,
+ (u_int32_t)pFwRaidMap->ldSpanMap[i].ldRaid.rowSize);
+ device_printf(sc->mrsas_dev, "pDrvRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x"
+ "pDrvRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x size 0x%x\n",i, i,
+ pDrvRaidMap->ldSpanMap[i].ldRaid.targetId,
+ pDrvRaidMap->ldSpanMap[i].ldRaid.seqNum,
+ (u_int32_t)pDrvRaidMap->ldSpanMap[i].ldRaid.rowSize);
+ device_printf(sc->mrsas_dev, "drv raid map all %p raid map %p LD RAID MAP %p/%p\n",
+ drv_map, pDrvRaidMap,
+ &pFwRaidMap->ldSpanMap[i].ldRaid, &pDrvRaidMap->ldSpanMap[i].ldRaid);
+#endif
+ }
+
+ memcpy(pDrvRaidMap->arMapInfo, pFwRaidMap->arMapInfo,
+ sizeof(MR_ARRAY_INFO) * MAX_RAIDMAP_ARRAYS);
+ memcpy(pDrvRaidMap->devHndlInfo, pFwRaidMap->devHndlInfo,
+ sizeof(MR_DEV_HANDLE_INFO) *
+ MAX_RAIDMAP_PHYSICAL_DEVICES);
+ }
+}
+
/**
* MR_ValidateMapInfo: Validate RAID map
* input: Adapter instance soft state
@@ -251,26 +327,37 @@ u_int8_t MR_ValidateMapInfo(struct mrsas_softc *sc)
if (!sc) {
return 1;
}
- uint32_t total_map_sz;
- MR_FW_RAID_MAP_ALL *map = sc->raidmap_mem[(sc->map_id & 1)];
- MR_FW_RAID_MAP *pFwRaidMap = &map->raidMap;
+
+ MR_PopulateDrvRaidMap (sc);
+
+ MR_DRV_RAID_MAP_ALL *drv_map = sc->ld_drv_map[(sc->map_id & 1)];
+ MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap;
+
+ u_int32_t expected_map_size;
+ drv_map = sc->ld_drv_map[(sc->map_id & 1)];
+ pDrvRaidMap = &drv_map->raidMap;
PLD_SPAN_INFO ldSpanInfo = (PLD_SPAN_INFO) &sc->log_to_span;
- total_map_sz = (sizeof(MR_FW_RAID_MAP) - sizeof(MR_LD_SPAN_MAP) +
- (sizeof(MR_LD_SPAN_MAP) * pFwRaidMap->ldCount));
+ if(sc->max256vdSupport)
+ expected_map_size = sizeof (MR_FW_RAID_MAP_EXT);
+ else
+ expected_map_size =
+ (sizeof (MR_FW_RAID_MAP) - sizeof(MR_LD_SPAN_MAP)) +
+ (sizeof(MR_LD_SPAN_MAP) * pDrvRaidMap->ldCount);
- if (pFwRaidMap->totalSize != total_map_sz) {
- device_printf(sc->mrsas_dev, "map size %x not matching ld count\n", total_map_sz);
+ if (pDrvRaidMap->totalSize != expected_map_size) {
+ device_printf(sc->mrsas_dev, "map size %x not matching ld count\n", expected_map_size);
device_printf(sc->mrsas_dev, "span map= %x\n", (unsigned int)sizeof(MR_LD_SPAN_MAP));
- device_printf(sc->mrsas_dev, "pFwRaidMap->totalSize=%x\n", pFwRaidMap->totalSize);
+ device_printf(sc->mrsas_dev, "pDrvRaidMap->totalSize=%x\n", pDrvRaidMap->totalSize);
return 1;
}
if (sc->UnevenSpanSupport) {
- mr_update_span_set(map, ldSpanInfo);
+ printf ("Updating span set\n\n");
+ mr_update_span_set(drv_map, ldSpanInfo);
}
- mrsas_update_load_balance_params(map, sc->load_balance_info);
+ mrsas_update_load_balance_params(drv_map, sc->load_balance_info);
return 0;
}
@@ -287,7 +374,7 @@ u_int8_t MR_ValidateMapInfo(struct mrsas_softc *sc)
*
* */
#if SPAN_DEBUG
-static int getSpanInfo(MR_FW_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo)
+static int getSpanInfo(MR_DRV_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo)
{
u_int8_t span;
@@ -369,7 +456,7 @@ static int getSpanInfo(MR_FW_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo)
*/
u_int32_t mr_spanset_get_span_block(struct mrsas_softc *sc, u_int32_t ld, u_int64_t row,
- u_int64_t *span_blk, MR_FW_RAID_MAP_ALL *map, int *div_error)
+ u_int64_t *span_blk, MR_DRV_RAID_MAP_ALL *map, int *div_error)
{
MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
LD_SPAN_SET *span_set;
@@ -430,7 +517,7 @@ u_int32_t mr_spanset_get_span_block(struct mrsas_softc *sc, u_int32_t ld, u_int6
*/
static u_int64_t get_row_from_strip(struct mrsas_softc *sc,
- u_int32_t ld, u_int64_t strip, MR_FW_RAID_MAP_ALL *map)
+ u_int32_t ld, u_int64_t strip, MR_DRV_RAID_MAP_ALL *map)
{
MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
LD_SPAN_SET *span_set;
@@ -489,7 +576,7 @@ static u_int64_t get_row_from_strip(struct mrsas_softc *sc,
*/
static u_int64_t get_strip_from_row(struct mrsas_softc *sc,
- u_int32_t ld, u_int64_t row, MR_FW_RAID_MAP_ALL *map)
+ u_int32_t ld, u_int64_t row, MR_DRV_RAID_MAP_ALL *map)
{
MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
LD_SPAN_SET *span_set;
@@ -546,7 +633,7 @@ static u_int64_t get_strip_from_row(struct mrsas_softc *sc,
*/
static u_int32_t get_arm_from_strip(struct mrsas_softc *sc,
- u_int32_t ld, u_int64_t strip, MR_FW_RAID_MAP_ALL *map)
+ u_int32_t ld, u_int64_t strip, MR_DRV_RAID_MAP_ALL *map)
{
MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
LD_SPAN_SET *span_set;
@@ -588,7 +675,7 @@ static u_int32_t get_arm_from_strip(struct mrsas_softc *sc,
/* This Function will return Phys arm */
u_int8_t get_arm(struct mrsas_softc *sc, u_int32_t ld, u_int8_t span, u_int64_t stripe,
- MR_FW_RAID_MAP_ALL *map)
+ MR_DRV_RAID_MAP_ALL *map)
{
MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
/* Need to check correct default value */
@@ -630,7 +717,7 @@ u_int8_t get_arm(struct mrsas_softc *sc, u_int32_t ld, u_int8_t span, u_int64_t
*/
static u_int8_t mr_spanset_get_phy_params(struct mrsas_softc *sc, u_int32_t ld, u_int64_t stripRow,
u_int16_t stripRef, struct IO_REQUEST_INFO *io_info,
- RAID_CONTEXT *pRAID_Context, MR_FW_RAID_MAP_ALL *map)
+ RAID_CONTEXT *pRAID_Context, MR_DRV_RAID_MAP_ALL *map)
{
MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
u_int32_t pd, arRef;
@@ -695,7 +782,7 @@ static u_int8_t mr_spanset_get_phy_params(struct mrsas_softc *sc, u_int32_t ld,
*/
u_int8_t
MR_BuildRaidContext(struct mrsas_softc *sc, struct IO_REQUEST_INFO *io_info,
- RAID_CONTEXT *pRAID_Context, MR_FW_RAID_MAP_ALL *map)
+ RAID_CONTEXT *pRAID_Context, MR_DRV_RAID_MAP_ALL *map)
{
MR_LD_RAID *raid;
u_int32_t ld, stripSize, stripe_mask;
@@ -915,7 +1002,7 @@ MR_BuildRaidContext(struct mrsas_softc *sc, struct IO_REQUEST_INFO *io_info,
* ldSpanInfo - ldSpanInfo per HBA instance
*
*/
-void mr_update_span_set(MR_FW_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo)
+void mr_update_span_set(MR_DRV_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo)
{
u_int8_t span,count;
u_int32_t element,span_row_width;
@@ -925,13 +1012,11 @@ void mr_update_span_set(MR_FW_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo)
MR_QUAD_ELEMENT *quad;
int ldCount;
u_int16_t ld;
-
- if (!ldSpanInfo)
- return;
-
+
for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++)
{
ld = MR_TargetIdToLdGet(ldCount, map);
+ printf ("ld = %d\n\n", ld);
if (ld >= MAX_LOGICAL_DRIVES)
continue;
raid = MR_LdRaidGet(ld, map);
@@ -1031,7 +1116,7 @@ void mr_update_span_set(MR_FW_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo)
* This function updates the load balance parameters for the LD config
* of a two drive optimal RAID-1.
*/
-void mrsas_update_load_balance_params(MR_FW_RAID_MAP_ALL *map,
+void mrsas_update_load_balance_params(MR_DRV_RAID_MAP_ALL *map,
PLD_LOAD_BALANCE_INFO lbInfo)
{
int ldCount;
@@ -1085,7 +1170,7 @@ void mrsas_update_load_balance_params(MR_FW_RAID_MAP_ALL *map,
*/
void mrsas_set_pd_lba(MRSAS_RAID_SCSI_IO_REQUEST *io_request, u_int8_t cdb_len,
struct IO_REQUEST_INFO *io_info, union ccb *ccb,
- MR_FW_RAID_MAP_ALL *local_map_ptr, u_int32_t ref_tag,
+ MR_DRV_RAID_MAP_ALL *local_map_ptr, u_int32_t ref_tag,
u_int32_t ld_block_size)
{
MR_LD_RAID *raid;
@@ -1338,7 +1423,7 @@ u_int16_t mrsas_get_updated_dev_handle(PLD_LOAD_BALANCE_INFO lbInfo,
u_int8_t MR_GetPhyParams(struct mrsas_softc *sc, u_int32_t ld,
u_int64_t stripRow,
u_int16_t stripRef, struct IO_REQUEST_INFO *io_info,
- RAID_CONTEXT *pRAID_Context, MR_FW_RAID_MAP_ALL *map)
+ RAID_CONTEXT *pRAID_Context, MR_DRV_RAID_MAP_ALL *map)
{
MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
u_int32_t pd, arRef;
@@ -1419,7 +1504,7 @@ u_int8_t MR_GetPhyParams(struct mrsas_softc *sc, u_int32_t ld,
* This routine calculates the span from the span block info.
*/
u_int32_t MR_GetSpanBlock(u_int32_t ld, u_int64_t row, u_int64_t *span_blk,
- MR_FW_RAID_MAP_ALL *map, int *div_error)
+ MR_DRV_RAID_MAP_ALL *map, int *div_error)
{
MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map);
MR_QUAD_ELEMENT *quad;
OpenPOWER on IntegriCloud