summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorambrisko <ambrisko@FreeBSD.org>2014-05-12 17:55:24 +0000
committerambrisko <ambrisko@FreeBSD.org>2014-05-12 17:55:24 +0000
commit5b370014732fb8f53cee1a9a0c3f2f48b570718c (patch)
treed6eb1c52edd8d9fd63eeb0dfacf754cd92aca73d
parent7a535d184518005fc2e123c2da36f15dc88c2e96 (diff)
downloadFreeBSD-src-5b370014732fb8f53cee1a9a0c3f2f48b570718c.zip
FreeBSD-src-5b370014732fb8f53cee1a9a0c3f2f48b570718c.tar.gz
MFC 265555
dd mrsas(4) driver from LSI official support of newer MegaRAID SAS cards. LSI has been maintaining this driver outside of the FreeBSD tree. It overlaps support of ThunderBolt and Invader cards that mfi(4) supports. By default mfi(4) will attach to cards. If the tunable: hw.mfi.mrsas_enable=1 is set then mfi(4) will not probe and attach to these newer cards and allow mrsas(4) to attach. So by default this driver will not effect a FreeBSD system unless mfi(4) is removed from the kernel or the tunable is enabled. mrsas(4) attaches disks to the CAM layer so it depends on CAM and devices show up as /dev/daX. mfiutil(8) does not work with mrsas. The FreeBSD version of MegaCli and StorCli from LSI do work with mrsas. It appears that StorCli only works with mrsas. MegaCli appears to work with mfi(4) and mrsas(4). It would be good to add mfiutil(4) support to mrsas, emulations modes, kernel logging, device aliases to ease the transition between mfi(4) and mrsas(4). Style issues should be resolved by LSI when they get committers approved. The plan is get this driver in FreeBSD 9.3 to improve HW support. Thanks to LSI for developing, testing and working with FreeBSD to make this driver co-exist in FreeBSD. This improves the overall support of MegaRAID SAS. Submitted by: Kashyap Desai <Kashyap.Desai@lsi.com> Sponsored by: LSI
-rw-r--r--share/man/man4/Makefile1
-rw-r--r--share/man/man4/mrsas.4374
-rw-r--r--sys/conf/files4
-rw-r--r--sys/dev/mrsas/mrsas.c3672
-rw-r--r--sys/dev/mrsas/mrsas.h2464
-rw-r--r--sys/dev/mrsas/mrsas_cam.c1179
-rw-r--r--sys/dev/mrsas/mrsas_fp.c1451
-rw-r--r--sys/dev/mrsas/mrsas_ioctl.c546
-rw-r--r--sys/dev/mrsas/mrsas_ioctl.h97
-rw-r--r--sys/modules/Makefile1
-rw-r--r--sys/modules/mrsas/Makefile20
11 files changed, 9809 insertions, 0 deletions
diff --git a/share/man/man4/Makefile b/share/man/man4/Makefile
index ad64b26..c56f761 100644
--- a/share/man/man4/Makefile
+++ b/share/man/man4/Makefile
@@ -262,6 +262,7 @@ MAN= aac.4 \
mpr.4 \
mps.4 \
mpt.4 \
+ mrsas.4 \
msk.4 \
mtio.4 \
multicast.4 \
diff --git a/share/man/man4/mrsas.4 b/share/man/man4/mrsas.4
new file mode 100644
index 0000000..18c87e0
--- /dev/null
+++ b/share/man/man4/mrsas.4
@@ -0,0 +1,374 @@
+.\" Copyright (c) 2014 LSI Corp
+.\" All rights reserved.
+.\" Author: Kashyap Desai
+.\" Support: freebsdraid@lsi.com
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\" 3. Neither the name of the <ORGANIZATION> nor the names of its
+.\" contributors may be used to endorse or promote products derived
+.\" from this software without specific prior written permission.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+.\" "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+.\" LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+.\" FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+.\" COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+.\" INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+.\" BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+.\" LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+.\" CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+.\" ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+.\" POSSIBILITY OF SUCH DAMAGE.
+
+.\" The views and conclusions contained in the software and documentation
+.\" are those of the authors and should not be interpreted as representing
+.\" official policies,either expressed or implied, of the FreeBSD Project
+.\"
+.\" $FreeBSD$
+.\"
+
+
+.Dd Apr 12, 2013
+.Dt MRSAS 4
+.Os
+.Sh NAME
+.Nm mrsas
+.Nd "LSI MegaRAID 6Gb/s and 12Gb/s SAS+SATA Raid controller driver"
+.Sh SYNOPSIS
+To compile this driver into the kernel,
+place the following lines in your
+kernel configuration file:
+.Bd -ragged -offset indent
+.Cd "device pci"
+.Cd "device mrsas"
+.Ed
+.Pp
+Alternatively, to load the driver as a
+module at boot time, place the following line in
+.Xr loader.conf 5 :
+.Bd -literal -offset indent
+mrsas_load="YES"
+.Ed
+.Sh DESCRIPTION
+The
+.Nm
+driver will detect LSI's next generation (6Gb/s and 12Gb/s) PCI Express
+SAS/SATA RAID controllers.
+See the
+.Nm HARDWARE
+section for the supported devices list.
+A disk (virtual disk/physical disk) attached to the
+.Nm
+driver will be visible to the user through
+.Xr camcontrol 8 as
+.Pa /dev/da?
+device nodes.
+A simple management interface is also provided on a per-controller basis via the
+.Pa /dev/mrsas?
+device node.
+.Pp
+The
+.Nm
+name is derived from the phrase "MegaRAID SAS HBA", which is
+substantially different than the old "MegaRAID" Driver
+.Xr mfi 4
+which does not connect targets
+to the
+.Xr cam 4
+layer and thus requires a new driver which attaches targets to the
+.Xr cam 4
+layer. Older MegaRAID controllers are supported by
+.Xr mfi 4
+and will not work with
+.Nm ,
+but both the
+.Xr mfi 4
+and
+.Nm
+drivers can detect and manage the LSI MegaRAID SAS 2208/2308/3008/3108 series of
+controllers.
+.Pp
+The
+.Nm device.hints
+option is provided to tune the
+.Nm
+driver's behavior for LSI MegaRAID SAS 2208/2308/3008/3108 controllers.
+By default, the
+.Xr mfi 4
+driver will detect these controllers. See the
+.Nm PRIORITY
+section to know more about driver priority for MR-Fusion devices.
+.Pp
+.Nm
+will provide a priority of (-30) (between BUS_PROBE_DEFAULT and
+BUS_PROBE_LOW_PRIORITY) at probe call for device id's 0x005B, 0x005D, and
+0x005F so that
+.Nm
+does not take control of these devices without user intervention.
+.Sh HARDWARE
+The
+.Nm
+driver supports the following hardware:
+.Pp
+.Bl -bullet -compact
+[ Thunderbolt 6Gbp/s MR controller ]
+.It
+LSI MegaRAID SAS 9265
+.It
+LSI MegaRAID SAS 9266
+.It
+LSI MegaRAID SAS 9267
+.It
+LSI MegaRAID SAS 9270
+.It
+LSI MegaRAID SAS 9271
+.It
+LSI MegaRAID SAS 9272
+.It
+LSI MegaRAID SAS 9285
+.It
+LSI MegaRAID SAS 9286
+.It
+DELL PERC H810
+.It
+DELL PERC H710/P
+.El
+.Pp
+.Bl -bullet -compact
+[ Invader/Fury 12Gpb/s MR controller ]
+.It
+LSI MegaRAID SAS 9380
+.It
+LSI MegaRAID SAS 9361
+.It
+LSI MegaRAID SAS 9341
+.It
+DELL PERC H830
+.It
+DELL PERC H730/P
+.It
+DELL PERC H330
+.El
+.Sh CONFIGURATION
+To disable Online Controller Reset(OCR) for a specific
+.Nm
+driver instance, set the
+following tunable value in
+.Xr loader.conf 5 :
+.Bd -literal -offset indent
+dev.mrsas.X.disable_ocr=1
+.Ed
+.Pp
+where X is the adapter number.
+.Pp
+To change the IO timeout value for a specific
+.Nm
+driver instance, set the following tunable value in
+.Xr loader.conf 5 :
+.Bd -literal -offset indent
+dev.mrsas.X.mrsas_io_timeout=NNNNNN
+.Ed
+.Pp
+where NNNNNN is the timeout value in milli-seconds.
+.Pp
+To change the firmware fault check timer value for a specific
+.Nm
+driver instance, set the following tunable value in
+.Xr loader.conf 5 :
+.Bd -literal -offset indent
+dev.mrsas.X.mrsas_fw_fault_check_delay=NN
+.Ed
+.Pp
+where NN is the fault check delay value in seconds.
+.Pp
+The current number of active I/O commands is shown in the
+dev.mrsas.X.fw_outstanding
+.Xr sysctl 8
+variable.
+.Sh DEBUGGING
+To enable debugging prints from the
+.Nm
+driver, set the
+.Bd -literal -offset indent
+hw.mrsas.X.debug_level
+.Ed
+.Pp
+variable, where X is the adapter number, either in
+.Xr loader.conf 5
+or via
+.Xr sysctl 8 .
+The following bits have the described effects:
+.Bl -tag -offset indent
+.It 0x01
+Enable informational prints.
+.It 0x02
+Enable tracing prints.
+.It 0x04
+Enable prints for driver faults.
+.It 0x08
+Enable prints for OCR and IO timeout.
+.It 0x10
+Enable prints for AEN events.
+.El
+.Sh PRIORITY
+The
+.Nm
+driver will always set a default (-30) priority in the pci subsystem for
+selection of MR-Fusion cards. (It is between BUS_PROBE_DEFAULT and
+BUS_PROBE_LOW_PRIORITY). MR-Fusion Controllers include all cards with the
+Device IDs -
+0x005B,
+0x005D,
+0x005F.
+.Pp
+The
+.Xr mfi 4
+driver will set a priority of either BUS_PROBE_DEFAULT or
+BUS_PROBE_LOW_PRIORITY (depending on the device.hint setting) in the pci
+subsystem for selection of MR-Fusion cards. With the above design in place, the
+.Xr mfi 4
+driver will attach to a MR-Fusion card given that it has a higher priority than
+.Nm .
+.Pp
+Using /boot/device.hints (as mentioned below), the user can provide a preference
+for the
+.Nm
+driver to detect a MR-Fusion card instead of the
+.Xr mfi 4
+driver.
+.Bd -ragged -offset indent
+.Cd hw.mfi.mrsas_enable="1"
+.Ed
+.Pp
+At boot time, the
+.Xr mfi 4
+driver will get priority to detect MR-Fusion controllers by default. Before
+changing this default driver selection policy, LSI advises users to understand
+how the driver selection policy works. LSI's policy is to provide priority to
+the
+.Xr mfi 4
+driver to detect MR-Fusion cards, but allow for the ability to choose the
+.Nm
+driver to detect MR-Fusion cards.
+.Pp
+LSI recommends setting hw.mfi.mrsas_enable="0" for customers who are using the
+older
+.Xr mfi 4
+driver and do not want to switch to
+.Nm .
+For those customers who are using a MR-Fusion controller for the first time, LSI
+recommends using the
+.Nm
+driver and setting hw.mfi.mrsas_enable="1".
+.Pp
+Changing the default behavior is well tested under most conditions, but
+unexpected behavior may pop up if more complex and unrealistic operations are
+executed by switching between the
+.Xr mfi 4 and
+.Nm
+drivers for MR-Fusion.
+Switching drivers is designed to happen only one time. Although multiple
+switching is possible, it is not recommended. The user should decide from
+.Nm Start of Day
+which driver they want to use for the MR-Fusion card.
+.Pp
+The user may see different device names when switching from
+.Xr mfi 4
+to
+.Nm .
+This behavior is
+.Nm Functions As Designed
+and the user needs to change the fstab
+entry manually if they are doing any experiments with
+.Xr mfi 4
+and
+.Nm
+interoperability.
+.Sh FILES
+.Bl -tag -width ".Pa /dev/mrsas?" -compact
+.It /dev/da?
+array/logical disk interface
+.It /dev/mrsas?
+management interface
+.El
+.Sh SEE ALSO
+.Xr pci 4 ,
+.Xr mfi 4 ,
+.Xr cam 4 ,
+.Xr device.hints 5 ,
+.Sh HISTORY
+The
+.Nm
+driver first appeared in
+.Fx 10.0 .
+.Bd -ragged
+.Cd "mfi Driver:"
+.Xr mfi 4
+is the old FreeBSD driver which started with support for Gen-1 Controllers and
+was extended to support up to MR-Fusion (Device ID = 0x005B, 0x005D, 0x005F).
+.Ed
+.Bd -ragged
+.Cd "mrsas Driver:"
+.Nm
+is the new driver reworked by LSI which supports Thunderbolt and onward
+products. The SAS+SATA RAID controller with device id 0x005b is referred to as
+the Thunderbolt controller throughout in this man page.
+.Ed
+.Bd -ragged
+.Nm cam aware HBA drivers:
+FreeBSD has a
+.Xr cam 4
+layer which attaches storage devices and provides a common access mechanism to
+storage controllers and attached devices. The
+.Nm
+driver is
+.Xr cam 4 aware and devices associated with
+.Nm
+can be seen using
+.Xr camcontrol 8 .
+The
+.Xr mfi 4
+driver does not understand the
+.Xr cam 4
+layer and it directly associates storage disks to the block layer.
+.Pp
+.Nm Thunderbolt Controller:
+This is the 6Gb/s MegaRAID HBA card which has device id 0x005B.
+.Pp
+.Nm Invader Controller:
+This is 12Gb/s MegaRAID HBA card which has device id 0x005D.
+.Pp
+.Nm Fury Controller:
+This is the 12Gb/s MegaRAID HBA card which has device id 0x005F.
+.Ed
+.Sh AUTHORS
+The
+.Nm
+driver and this manual page were written by
+.An Kashyap Desai Aq Kashyap.Desai@lsi.com .
+.Sh TODO
+The driver does not support big-endian architectures at this time.
+.Pp
+The driver does not support alias for device name (it is required when the user
+switches between two drivers and does not want to edit /etc/fstab manually).
+.Pp
+The
+.Nm
+driver exposes devices as /dev/da?, whereas
+.Xr mfi 4
+exposes deivces as /dev/mfid?
+.Pp
+.Nm
+does not support the Linux Emulator interface.
+.Pp
+.Nm
+will not work with
+.Xr mfiutil 8
diff --git a/sys/conf/files b/sys/conf/files
index cb0a190..6961b1b 100644
--- a/sys/conf/files
+++ b/sys/conf/files
@@ -1868,6 +1868,10 @@ dev/mpt/mpt_debug.c optional mpt
dev/mpt/mpt_pci.c optional mpt pci
dev/mpt/mpt_raid.c optional mpt
dev/mpt/mpt_user.c optional mpt
+dev/mrsas/mrsas.c optional mrsas
+dev/mrsas/mrsas_cam.c optional mrsas
+dev/mrsas/mrsas_ioctl.c optional mrsas
+dev/mrsas/mrsas_fp.c optional mrsas
dev/msk/if_msk.c optional msk
dev/mvs/mvs.c optional mvs
dev/mvs/mvs_if.m optional mvs
diff --git a/sys/dev/mrsas/mrsas.c b/sys/dev/mrsas/mrsas.c
new file mode 100644
index 0000000..b740128
--- /dev/null
+++ b/sys/dev/mrsas/mrsas.c
@@ -0,0 +1,3672 @@
+/*
+ * Copyright (c) 2014, LSI Corp.
+ * All rights reserved.
+ * Author: Marian Choy
+ * Support: freebsdraid@lsi.com
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * 3. Neither the name of the <ORGANIZATION> nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation
+ * are those of the authors and should not be interpreted as representing
+ * official policies,either expressed or implied, of the FreeBSD Project.
+ *
+ * Send feedback to: <megaraidfbsd@lsi.com>
+ * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035
+ * ATTN: MegaRaid FreeBSD
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/mrsas/mrsas.h>
+#include <dev/mrsas/mrsas_ioctl.h>
+
+#include <cam/cam.h>
+#include <cam/cam_ccb.h>
+
+#include <sys/sysctl.h>
+#include <sys/types.h>
+#include <sys/kthread.h>
+#include <sys/taskqueue.h>
+
+
+/*
+ * Function prototypes
+ */
+static d_open_t mrsas_open;
+static d_close_t mrsas_close;
+static d_read_t mrsas_read;
+static d_write_t mrsas_write;
+static d_ioctl_t mrsas_ioctl;
+
+static struct mrsas_ident *mrsas_find_ident(device_t);
+static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode);
+static void mrsas_flush_cache(struct mrsas_softc *sc);
+static void mrsas_reset_reply_desc(struct mrsas_softc *sc);
+static void mrsas_ocr_thread(void *arg);
+static int mrsas_get_map_info(struct mrsas_softc *sc);
+static int mrsas_get_ld_map_info(struct mrsas_softc *sc);
+static int mrsas_sync_map_info(struct mrsas_softc *sc);
+static int mrsas_get_pd_list(struct mrsas_softc *sc);
+static int mrsas_get_ld_list(struct mrsas_softc *sc);
+static int mrsas_setup_irq(struct mrsas_softc *sc);
+static int mrsas_alloc_mem(struct mrsas_softc *sc);
+static int mrsas_init_fw(struct mrsas_softc *sc);
+static int mrsas_setup_raidmap(struct mrsas_softc *sc);
+static int mrsas_complete_cmd(struct mrsas_softc *sc);
+static int mrsas_clear_intr(struct mrsas_softc *sc);
+static int mrsas_get_ctrl_info(struct mrsas_softc *sc,
+ struct mrsas_ctrl_info *ctrl_info);
+static int mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
+ struct mrsas_mfi_cmd *cmd_to_abort);
+u_int32_t mrsas_read_reg(struct mrsas_softc *sc, int offset);
+u_int8_t mrsas_build_mptmfi_passthru(struct mrsas_softc *sc,
+ struct mrsas_mfi_cmd *mfi_cmd);
+int mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr);
+int mrsas_init_adapter(struct mrsas_softc *sc);
+int mrsas_alloc_mpt_cmds(struct mrsas_softc *sc);
+int mrsas_alloc_ioc_cmd(struct mrsas_softc *sc);
+int mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc);
+int mrsas_ioc_init(struct mrsas_softc *sc);
+int mrsas_bus_scan(struct mrsas_softc *sc);
+int mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
+int mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
+int mrsas_reset_ctrl(struct mrsas_softc *sc);
+int mrsas_wait_for_outstanding(struct mrsas_softc *sc);
+int mrsas_issue_blocked_cmd(struct mrsas_softc *sc,
+ struct mrsas_mfi_cmd *cmd);
+int mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd,
+ int size);
+void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd);
+void mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
+void mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
+void mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
+void mrsas_disable_intr(struct mrsas_softc *sc);
+void mrsas_enable_intr(struct mrsas_softc *sc);
+void mrsas_free_ioc_cmd(struct mrsas_softc *sc);
+void mrsas_free_mem(struct mrsas_softc *sc);
+void mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp);
+void mrsas_isr(void *arg);
+void mrsas_teardown_intr(struct mrsas_softc *sc);
+void mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error);
+void mrsas_kill_hba (struct mrsas_softc *sc);
+void mrsas_aen_handler(struct mrsas_softc *sc);
+void mrsas_write_reg(struct mrsas_softc *sc, int offset,
+ u_int32_t value);
+void mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
+ u_int32_t req_desc_hi);
+void mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc);
+void mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc,
+ struct mrsas_mfi_cmd *cmd, u_int8_t status);
+void mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status,
+ u_int8_t extStatus);
+struct mrsas_mfi_cmd* mrsas_get_mfi_cmd(struct mrsas_softc *sc);
+MRSAS_REQUEST_DESCRIPTOR_UNION * mrsas_build_mpt_cmd(struct mrsas_softc *sc,
+ struct mrsas_mfi_cmd *cmd);
+
+extern int mrsas_cam_attach(struct mrsas_softc *sc);
+extern void mrsas_cam_detach(struct mrsas_softc *sc);
+extern void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
+extern void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
+extern int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc);
+extern void mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd);
+extern struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc);
+extern int mrsas_passthru(struct mrsas_softc *sc, void *arg);
+extern uint8_t MR_ValidateMapInfo(struct mrsas_softc *sc);
+extern u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_FW_RAID_MAP_ALL *map);
+extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_FW_RAID_MAP_ALL *map);
+extern void mrsas_xpt_freeze(struct mrsas_softc *sc);
+extern void mrsas_xpt_release(struct mrsas_softc *sc);
+extern MRSAS_REQUEST_DESCRIPTOR_UNION *mrsas_get_request_desc(struct mrsas_softc *sc,
+ u_int16_t index);
+extern int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim);
+static int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc);
+static void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc);
+SYSCTL_NODE(_hw, OID_AUTO, mrsas, CTLFLAG_RD, 0, "MRSAS Driver Parameters");
+
+
+/**
+ * PCI device struct and table
+ *
+ */
+typedef struct mrsas_ident {
+ uint16_t vendor;
+ uint16_t device;
+ uint16_t subvendor;
+ uint16_t subdevice;
+ const char *desc;
+} MRSAS_CTLR_ID;
+
+MRSAS_CTLR_ID device_table[] = {
+ {0x1000, MRSAS_TBOLT, 0xffff, 0xffff, "LSI Thunderbolt SAS Controller"},
+ {0x1000, MRSAS_INVADER, 0xffff, 0xffff, "LSI Invader SAS Controller"},
+ {0x1000, MRSAS_FURY, 0xffff, 0xffff, "LSI Fury SAS Controller"},
+ {0, 0, 0, 0, NULL}
+};
+
+/**
+ * Character device entry points
+ *
+ */
+static struct cdevsw mrsas_cdevsw = {
+ .d_version = D_VERSION,
+ .d_open = mrsas_open,
+ .d_close = mrsas_close,
+ .d_read = mrsas_read,
+ .d_write = mrsas_write,
+ .d_ioctl = mrsas_ioctl,
+ .d_name = "mrsas",
+};
+
+MALLOC_DEFINE(M_MRSAS, "mrsasbuf", "Buffers for the MRSAS driver");
+
+/**
+ * In the cdevsw routines, we find our softc by using the si_drv1 member
+ * of struct cdev. We set this variable to point to our softc in our
+ * attach routine when we create the /dev entry.
+ */
+int
+mrsas_open(struct cdev *dev, int oflags, int devtype, d_thread_t *td)
+{
+ struct mrsas_softc *sc;
+
+ sc = dev->si_drv1;
+ return (0);
+}
+
+int
+mrsas_close(struct cdev *dev, int fflag, int devtype, d_thread_t *td)
+{
+ struct mrsas_softc *sc;
+
+ sc = dev->si_drv1;
+ return (0);
+}
+
+int
+mrsas_read(struct cdev *dev, struct uio *uio, int ioflag)
+{
+ struct mrsas_softc *sc;
+
+ sc = dev->si_drv1;
+ return (0);
+}
+int
+mrsas_write(struct cdev *dev, struct uio *uio, int ioflag)
+{
+ struct mrsas_softc *sc;
+
+ sc = dev->si_drv1;
+ return (0);
+}
+
+/**
+ * Register Read/Write Functions
+ *
+ */
+void
+mrsas_write_reg(struct mrsas_softc *sc, int offset,
+ u_int32_t value)
+{
+ bus_space_tag_t bus_tag = sc->bus_tag;
+ bus_space_handle_t bus_handle = sc->bus_handle;
+
+ bus_space_write_4(bus_tag, bus_handle, offset, value);
+}
+
+u_int32_t
+mrsas_read_reg(struct mrsas_softc *sc, int offset)
+{
+ bus_space_tag_t bus_tag = sc->bus_tag;
+ bus_space_handle_t bus_handle = sc->bus_handle;
+
+ return((u_int32_t)bus_space_read_4(bus_tag, bus_handle, offset));
+}
+
+
+/**
+ * Interrupt Disable/Enable/Clear Functions
+ *
+ */
+void mrsas_disable_intr(struct mrsas_softc *sc)
+{
+ u_int32_t mask = 0xFFFFFFFF;
+ u_int32_t status;
+
+ mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), mask);
+ /* Dummy read to force pci flush */
+ status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
+}
+
+void mrsas_enable_intr(struct mrsas_softc *sc)
+{
+ u_int32_t mask = MFI_FUSION_ENABLE_INTERRUPT_MASK;
+ u_int32_t status;
+
+ mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), ~0);
+ status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
+
+ mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), ~mask);
+ status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
+}
+
+static int mrsas_clear_intr(struct mrsas_softc *sc)
+{
+ u_int32_t status, fw_status, fw_state;
+
+ /* Read received interrupt */
+ status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
+
+ /* If FW state change interrupt is received, write to it again to clear */
+ if (status & MRSAS_FW_STATE_CHNG_INTERRUPT) {
+ fw_status = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
+ outbound_scratch_pad));
+ fw_state = fw_status & MFI_STATE_MASK;
+ if (fw_state == MFI_STATE_FAULT) {
+ device_printf(sc->mrsas_dev, "FW is in FAULT state!\n");
+ if(sc->ocr_thread_active)
+ wakeup(&sc->ocr_chan);
+ }
+ mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), status);
+ mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
+ return(1);
+ }
+
+ /* Not our interrupt, so just return */
+ if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
+ return(0);
+
+ /* We got a reply interrupt */
+ return(1);
+}
+
+/**
+ * PCI Support Functions
+ *
+ */
+static struct mrsas_ident * mrsas_find_ident(device_t dev)
+{
+ struct mrsas_ident *pci_device;
+
+ for (pci_device=device_table; pci_device->vendor != 0; pci_device++)
+ {
+ if ((pci_device->vendor == pci_get_vendor(dev)) &&
+ (pci_device->device == pci_get_device(dev)) &&
+ ((pci_device->subvendor == pci_get_subvendor(dev)) ||
+ (pci_device->subvendor == 0xffff)) &&
+ ((pci_device->subdevice == pci_get_subdevice(dev)) ||
+ (pci_device->subdevice == 0xffff)))
+ return (pci_device);
+ }
+ return (NULL);
+}
+
+static int mrsas_probe(device_t dev)
+{
+ static u_int8_t first_ctrl = 1;
+ struct mrsas_ident *id;
+
+ if ((id = mrsas_find_ident(dev)) != NULL) {
+ if (first_ctrl) {
+ printf("LSI MegaRAID SAS FreeBSD mrsas driver version: %s\n", MRSAS_VERSION);
+ first_ctrl = 0;
+ }
+ device_set_desc(dev, id->desc);
+ /* between BUS_PROBE_DEFAULT and BUS_PROBE_LOW_PRIORITY */
+ return (-30);
+ }
+ return (ENXIO);
+}
+
+/**
+ * mrsas_setup_sysctl: setup sysctl values for mrsas
+ * input: Adapter instance soft state
+ *
+ * Setup sysctl entries for mrsas driver.
+ */
+static void
+mrsas_setup_sysctl(struct mrsas_softc *sc)
+{
+ struct sysctl_ctx_list *sysctl_ctx = NULL;
+ struct sysctl_oid *sysctl_tree = NULL;
+ char tmpstr[80], tmpstr2[80];
+
+ /*
+ * Setup the sysctl variable so the user can change the debug level
+ * on the fly.
+ */
+ snprintf(tmpstr, sizeof(tmpstr), "MRSAS controller %d",
+ device_get_unit(sc->mrsas_dev));
+ snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mrsas_dev));
+
+ sysctl_ctx = device_get_sysctl_ctx(sc->mrsas_dev);
+ if (sysctl_ctx != NULL)
+ sysctl_tree = device_get_sysctl_tree(sc->mrsas_dev);
+
+ if (sysctl_tree == NULL) {
+ sysctl_ctx_init(&sc->sysctl_ctx);
+ sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
+ SYSCTL_STATIC_CHILDREN(_hw_mrsas), OID_AUTO, tmpstr2,
+ CTLFLAG_RD, 0, tmpstr);
+ if (sc->sysctl_tree == NULL)
+ return;
+ sysctl_ctx = &sc->sysctl_ctx;
+ sysctl_tree = sc->sysctl_tree;
+ }
+ SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
+ OID_AUTO, "disable_ocr", CTLFLAG_RW, &sc->disableOnlineCtrlReset, 0,
+ "Disable the use of OCR");
+
+ SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
+ OID_AUTO, "driver_version", CTLFLAG_RD, MRSAS_VERSION,
+ strlen(MRSAS_VERSION), "driver version");
+
+ SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
+ OID_AUTO, "reset_count", CTLFLAG_RD,
+ &sc->reset_count, 0, "number of ocr from start of the day");
+
+ SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
+ OID_AUTO, "fw_outstanding", CTLFLAG_RD,
+ &sc->fw_outstanding, 0, "FW outstanding commands");
+
+ SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
+ OID_AUTO, "io_cmds_highwater", CTLFLAG_RD,
+ &sc->io_cmds_highwater, 0, "Max FW outstanding commands");
+
+ SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
+ OID_AUTO, "mrsas_debug", CTLFLAG_RW, &sc->mrsas_debug, 0,
+ "Driver debug level");
+
+ SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
+ OID_AUTO, "mrsas_io_timeout", CTLFLAG_RW, &sc->mrsas_io_timeout,
+ 0, "Driver IO timeout value in mili-second.");
+
+ SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
+ OID_AUTO, "mrsas_fw_fault_check_delay", CTLFLAG_RW,
+ &sc->mrsas_fw_fault_check_delay,
+ 0, "FW fault check thread delay in seconds. <default is 1 sec>");
+
+ SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
+ OID_AUTO, "reset_in_progress", CTLFLAG_RD,
+ &sc->reset_in_progress, 0, "ocr in progress status");
+
+}
+
+/**
+ * mrsas_get_tunables: get tunable parameters.
+ * input: Adapter instance soft state
+ *
+ * Get tunable parameters. This will help to debug driver at boot time.
+ */
+static void
+mrsas_get_tunables(struct mrsas_softc *sc)
+{
+ char tmpstr[80];
+
+ /* XXX default to some debugging for now */
+ sc->mrsas_debug = MRSAS_FAULT;
+ sc->mrsas_io_timeout = MRSAS_IO_TIMEOUT;
+ sc->mrsas_fw_fault_check_delay = 1;
+ sc->reset_count = 0;
+ sc->reset_in_progress = 0;
+
+ /*
+ * Grab the global variables.
+ */
+ TUNABLE_INT_FETCH("hw.mrsas.debug_level", &sc->mrsas_debug);
+
+ /* Grab the unit-instance variables */
+ snprintf(tmpstr, sizeof(tmpstr), "dev.mrsas.%d.debug_level",
+ device_get_unit(sc->mrsas_dev));
+ TUNABLE_INT_FETCH(tmpstr, &sc->mrsas_debug);
+}
+
+/**
+ * mrsas_alloc_evt_log_info cmd: Allocates memory to get event log information.
+ * Used to get sequence number at driver load time.
+ * input: Adapter soft state
+ *
+ * Allocates DMAable memory for the event log info internal command.
+ */
+int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc)
+{
+ int el_info_size;
+
+ /* Allocate get event log info command */
+ el_info_size = sizeof(struct mrsas_evt_log_info);
+ if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
+ 1, 0, // algnmnt, boundary
+ BUS_SPACE_MAXADDR_32BIT,// lowaddr
+ BUS_SPACE_MAXADDR, // highaddr
+ NULL, NULL, // filter, filterarg
+ el_info_size, // maxsize
+ 1, // msegments
+ el_info_size, // maxsegsize
+ BUS_DMA_ALLOCNOW, // flags
+ NULL, NULL, // lockfunc, lockarg
+ &sc->el_info_tag)) {
+ device_printf(sc->mrsas_dev, "Cannot allocate event log info tag\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamem_alloc(sc->el_info_tag, (void **)&sc->el_info_mem,
+ BUS_DMA_NOWAIT, &sc->el_info_dmamap)) {
+ device_printf(sc->mrsas_dev, "Cannot allocate event log info cmd mem\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamap_load(sc->el_info_tag, sc->el_info_dmamap,
+ sc->el_info_mem, el_info_size, mrsas_addr_cb,
+ &sc->el_info_phys_addr, BUS_DMA_NOWAIT)) {
+ device_printf(sc->mrsas_dev, "Cannot load event log info cmd mem\n");
+ return (ENOMEM);
+ }
+
+ memset(sc->el_info_mem, 0, el_info_size);
+ return (0);
+}
+
+/**
+ * mrsas_free_evt_info_cmd: Free memory for Event log info command
+ * input: Adapter soft state
+ *
+ * Deallocates memory for the event log info internal command.
+ */
+void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc)
+{
+ if (sc->el_info_phys_addr)
+ bus_dmamap_unload(sc->el_info_tag, sc->el_info_dmamap);
+ if (sc->el_info_mem != NULL)
+ bus_dmamem_free(sc->el_info_tag, sc->el_info_mem, sc->el_info_dmamap);
+ if (sc->el_info_tag != NULL)
+ bus_dma_tag_destroy(sc->el_info_tag);
+}
+
+/**
+ * mrsas_get_seq_num: Get latest event sequence number
+ * @sc: Adapter soft state
+ * @eli: Firmware event log sequence number information.
+ * Firmware maintains a log of all events in a non-volatile area.
+ * Driver get the sequence number using DCMD
+ * "MR_DCMD_CTRL_EVENT_GET_INFO" at driver load time.
+ */
+
+static int
+mrsas_get_seq_num(struct mrsas_softc *sc,
+ struct mrsas_evt_log_info *eli)
+{
+ struct mrsas_mfi_cmd *cmd;
+ struct mrsas_dcmd_frame *dcmd;
+
+ cmd = mrsas_get_mfi_cmd(sc);
+
+ if (!cmd) {
+ device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
+ return -ENOMEM;
+ }
+
+ dcmd = &cmd->frame->dcmd;
+
+ if (mrsas_alloc_evt_log_info_cmd(sc) != SUCCESS) {
+ device_printf(sc->mrsas_dev, "Cannot allocate evt log info cmd\n");
+ mrsas_release_mfi_cmd(cmd);
+ return -ENOMEM;
+ }
+
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0x0;
+ dcmd->sge_count = 1;
+ dcmd->flags = MFI_FRAME_DIR_READ;
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = sizeof(struct mrsas_evt_log_info);
+ dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO;
+ dcmd->sgl.sge32[0].phys_addr = sc->el_info_phys_addr;
+ dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_log_info);
+
+ mrsas_issue_blocked_cmd(sc, cmd);
+
+ /*
+ * Copy the data back into callers buffer
+ */
+ memcpy(eli, sc->el_info_mem, sizeof(struct mrsas_evt_log_info));
+ mrsas_free_evt_log_info_cmd(sc);
+ mrsas_release_mfi_cmd(cmd);
+
+ return 0;
+}
+
+
+/**
+ * mrsas_register_aen: Register for asynchronous event notification
+ * @sc: Adapter soft state
+ * @seq_num: Starting sequence number
+ * @class_locale: Class of the event
+ * This function subscribes for events beyond the @seq_num
+ * and type @class_locale.
+ *
+ * */
+static int
+mrsas_register_aen(struct mrsas_softc *sc, u_int32_t seq_num,
+ u_int32_t class_locale_word)
+{
+ int ret_val;
+ struct mrsas_mfi_cmd *cmd;
+ struct mrsas_dcmd_frame *dcmd;
+ union mrsas_evt_class_locale curr_aen;
+ union mrsas_evt_class_locale prev_aen;
+
+/*
+ * If there an AEN pending already (aen_cmd), check if the
+ * class_locale of that pending AEN is inclusive of the new
+ * AEN request we currently have. If it is, then we don't have
+ * to do anything. In other words, whichever events the current
+ * AEN request is subscribing to, have already been subscribed
+ * to.
+ * If the old_cmd is _not_ inclusive, then we have to abort
+ * that command, form a class_locale that is superset of both
+ * old and current and re-issue to the FW
+ * */
+
+ curr_aen.word = class_locale_word;
+
+ if (sc->aen_cmd) {
+
+ prev_aen.word = sc->aen_cmd->frame->dcmd.mbox.w[1];
+
+/*
+ * A class whose enum value is smaller is inclusive of all
+ * higher values. If a PROGRESS (= -1) was previously
+ * registered, then a new registration requests for higher
+ * classes need not be sent to FW. They are automatically
+ * included.
+ * Locale numbers don't have such hierarchy. They are bitmap values
+ */
+ if ((prev_aen.members.class <= curr_aen.members.class) &&
+ !((prev_aen.members.locale & curr_aen.members.locale) ^
+ curr_aen.members.locale)) {
+ /*
+ * Previously issued event registration includes
+ * current request. Nothing to do.
+ */
+ return 0;
+ } else {
+ curr_aen.members.locale |= prev_aen.members.locale;
+
+ if (prev_aen.members.class < curr_aen.members.class)
+ curr_aen.members.class = prev_aen.members.class;
+
+ sc->aen_cmd->abort_aen = 1;
+ ret_val = mrsas_issue_blocked_abort_cmd(sc,
+ sc->aen_cmd);
+
+ if (ret_val) {
+ printf("mrsas: Failed to abort "
+ "previous AEN command\n");
+ return ret_val;
+ }
+ }
+ }
+
+ cmd = mrsas_get_mfi_cmd(sc);
+
+ if (!cmd)
+ return -ENOMEM;
+
+ dcmd = &cmd->frame->dcmd;
+
+ memset(sc->evt_detail_mem, 0, sizeof(struct mrsas_evt_detail));
+
+/*
+ * Prepare DCMD for aen registration
+ */
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0x0;
+ dcmd->sge_count = 1;
+ dcmd->flags = MFI_FRAME_DIR_READ;
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = sizeof(struct mrsas_evt_detail);
+ dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT;
+ dcmd->mbox.w[0] = seq_num;
+ sc->last_seq_num = seq_num;
+ dcmd->mbox.w[1] = curr_aen.word;
+ dcmd->sgl.sge32[0].phys_addr = (u_int32_t) sc->evt_detail_phys_addr;
+ dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_detail);
+
+ if (sc->aen_cmd != NULL) {
+ mrsas_release_mfi_cmd(cmd);
+ return 0;
+ }
+
+ /*
+ * Store reference to the cmd used to register for AEN. When an
+ * application wants us to register for AEN, we have to abort this
+ * cmd and re-register with a new EVENT LOCALE supplied by that app
+ */
+ sc->aen_cmd = cmd;
+
+ /*
+ Issue the aen registration frame
+ */
+ if (mrsas_issue_dcmd(sc, cmd)){
+ device_printf(sc->mrsas_dev, "Cannot issue AEN DCMD command.\n");
+ return(1);
+ }
+
+ return 0;
+}
+/**
+ * mrsas_start_aen - Subscribes to AEN during driver load time
+ * @instance: Adapter soft state
+ */
+static int mrsas_start_aen(struct mrsas_softc *sc)
+{
+ struct mrsas_evt_log_info eli;
+ union mrsas_evt_class_locale class_locale;
+
+
+ /* Get the latest sequence number from FW*/
+
+ memset(&eli, 0, sizeof(eli));
+
+ if (mrsas_get_seq_num(sc, &eli))
+ return -1;
+
+ /* Register AEN with FW for latest sequence number plus 1*/
+ class_locale.members.reserved = 0;
+ class_locale.members.locale = MR_EVT_LOCALE_ALL;
+ class_locale.members.class = MR_EVT_CLASS_DEBUG;
+
+ return mrsas_register_aen(sc, eli.newest_seq_num + 1,
+ class_locale.word);
+}
+
+/**
+ * mrsas_attach: PCI entry point
+ * input: device struct pointer
+ *
+ * Performs setup of PCI and registers, initializes mutexes and
+ * linked lists, registers interrupts and CAM, and initializes
+ * the adapter/controller to its proper state.
+ */
+static int mrsas_attach(device_t dev)
+{
+ struct mrsas_softc *sc = device_get_softc(dev);
+ uint32_t cmd, bar, error;
+
+ /* Look up our softc and initialize its fields. */
+ sc->mrsas_dev = dev;
+ sc->device_id = pci_get_device(dev);
+
+ mrsas_get_tunables(sc);
+
+ /*
+ * Set up PCI and registers
+ */
+ cmd = pci_read_config(dev, PCIR_COMMAND, 2);
+ if ( (cmd & PCIM_CMD_PORTEN) == 0) {
+ return (ENXIO);
+ }
+ /* Force the busmaster enable bit on. */
+ cmd |= PCIM_CMD_BUSMASTEREN;
+ pci_write_config(dev, PCIR_COMMAND, cmd, 2);
+
+ //bar = pci_read_config(dev, MRSAS_PCI_BAR0, 4);
+ bar = pci_read_config(dev, MRSAS_PCI_BAR1, 4);
+
+ sc->reg_res_id = MRSAS_PCI_BAR1; /* BAR1 offset */
+ if ((sc->reg_res = bus_alloc_resource(dev, SYS_RES_MEMORY,
+ &(sc->reg_res_id), 0, ~0, 1, RF_ACTIVE))
+ == NULL) {
+ device_printf(dev, "Cannot allocate PCI registers\n");
+ goto attach_fail;
+ }
+ sc->bus_tag = rman_get_bustag(sc->reg_res);
+ sc->bus_handle = rman_get_bushandle(sc->reg_res);
+
+ /* Intialize mutexes */
+ mtx_init(&sc->sim_lock, "mrsas_sim_lock", NULL, MTX_DEF);
+ mtx_init(&sc->pci_lock, "mrsas_pci_lock", NULL, MTX_DEF);
+ mtx_init(&sc->io_lock, "mrsas_io_lock", NULL, MTX_DEF);
+ mtx_init(&sc->aen_lock, "mrsas_aen_lock", NULL, MTX_DEF);
+ mtx_init(&sc->ioctl_lock, "mrsas_ioctl_lock", NULL, MTX_SPIN);
+ mtx_init(&sc->mpt_cmd_pool_lock, "mrsas_mpt_cmd_pool_lock", NULL, MTX_DEF);
+ mtx_init(&sc->mfi_cmd_pool_lock, "mrsas_mfi_cmd_pool_lock", NULL, MTX_DEF);
+ mtx_init(&sc->raidmap_lock, "mrsas_raidmap_lock", NULL, MTX_DEF);
+
+ /* Intialize linked list */
+ TAILQ_INIT(&sc->mrsas_mpt_cmd_list_head);
+ TAILQ_INIT(&sc->mrsas_mfi_cmd_list_head);
+
+ atomic_set(&sc->fw_outstanding,0);
+
+ sc->io_cmds_highwater = 0;
+
+ /* Create a /dev entry for this device. */
+ sc->mrsas_cdev = make_dev(&mrsas_cdevsw, device_get_unit(dev), UID_ROOT,
+ GID_OPERATOR, (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP), "mrsas%u",
+ device_get_unit(dev));
+ if (sc->mrsas_cdev)
+ sc->mrsas_cdev->si_drv1 = sc;
+
+ sc->adprecovery = MRSAS_HBA_OPERATIONAL;
+ sc->UnevenSpanSupport = 0;
+
+ /* Initialize Firmware */
+ if (mrsas_init_fw(sc) != SUCCESS) {
+ goto attach_fail_fw;
+ }
+
+ /* Register SCSI mid-layer */
+ if ((mrsas_cam_attach(sc) != SUCCESS)) {
+ goto attach_fail_cam;
+ }
+
+ /* Register IRQs */
+ if (mrsas_setup_irq(sc) != SUCCESS) {
+ goto attach_fail_irq;
+ }
+
+ /* Enable Interrupts */
+ mrsas_enable_intr(sc);
+
+ error = mrsas_kproc_create(mrsas_ocr_thread, sc,
+ &sc->ocr_thread, 0, 0, "mrsas_ocr%d",
+ device_get_unit(sc->mrsas_dev));
+ if (error) {
+ printf("Error %d starting rescan thread\n", error);
+ goto attach_fail_irq;
+ }
+
+ mrsas_setup_sysctl(sc);
+
+ /* Initiate AEN (Asynchronous Event Notification)*/
+
+ if (mrsas_start_aen(sc)) {
+ printf("Error: start aen failed\n");
+ goto fail_start_aen;
+ }
+
+ return (0);
+
+fail_start_aen:
+attach_fail_irq:
+ mrsas_teardown_intr(sc);
+attach_fail_cam:
+ mrsas_cam_detach(sc);
+attach_fail_fw:
+//attach_fail_raidmap:
+ mrsas_free_mem(sc);
+ mtx_destroy(&sc->sim_lock);
+ mtx_destroy(&sc->aen_lock);
+ mtx_destroy(&sc->pci_lock);
+ mtx_destroy(&sc->io_lock);
+ mtx_destroy(&sc->ioctl_lock);
+ mtx_destroy(&sc->mpt_cmd_pool_lock);
+ mtx_destroy(&sc->mfi_cmd_pool_lock);
+ mtx_destroy(&sc->raidmap_lock);
+attach_fail:
+ destroy_dev(sc->mrsas_cdev);
+ if (sc->reg_res){
+ bus_release_resource(sc->mrsas_dev, SYS_RES_MEMORY,
+ sc->reg_res_id, sc->reg_res);
+ }
+ return (ENXIO);
+}
+
+/**
+ * mrsas_detach: De-allocates and teardown resources
+ * input: device struct pointer
+ *
+ * This function is the entry point for device disconnect and detach. It
+ * performs memory de-allocations, shutdown of the controller and various
+ * teardown and destroy resource functions.
+ */
+static int mrsas_detach(device_t dev)
+{
+ struct mrsas_softc *sc;
+ int i = 0;
+
+ sc = device_get_softc(dev);
+ sc->remove_in_progress = 1;
+ if(sc->ocr_thread_active)
+ wakeup(&sc->ocr_chan);
+ while(sc->reset_in_progress){
+ i++;
+ if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
+ mrsas_dprint(sc, MRSAS_INFO,
+ "[%2d]waiting for ocr to be finished\n",i);
+ }
+ pause("mr_shutdown", hz);
+ }
+ i = 0;
+ while(sc->ocr_thread_active){
+ i++;
+ if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
+ mrsas_dprint(sc, MRSAS_INFO,
+ "[%2d]waiting for "
+ "mrsas_ocr thread to quit ocr %d\n",i,
+ sc->ocr_thread_active);
+ }
+ pause("mr_shutdown", hz);
+ }
+ mrsas_flush_cache(sc);
+ mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN);
+ mrsas_disable_intr(sc);
+ mrsas_cam_detach(sc);
+ mrsas_teardown_intr(sc);
+ mrsas_free_mem(sc);
+ mtx_destroy(&sc->sim_lock);
+ mtx_destroy(&sc->aen_lock);
+ mtx_destroy(&sc->pci_lock);
+ mtx_destroy(&sc->io_lock);
+ mtx_destroy(&sc->ioctl_lock);
+ mtx_destroy(&sc->mpt_cmd_pool_lock);
+ mtx_destroy(&sc->mfi_cmd_pool_lock);
+ mtx_destroy(&sc->raidmap_lock);
+ if (sc->reg_res){
+ bus_release_resource(sc->mrsas_dev,
+ SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res);
+ }
+ destroy_dev(sc->mrsas_cdev);
+ if (sc->sysctl_tree != NULL)
+ sysctl_ctx_free(&sc->sysctl_ctx);
+ return (0);
+}
+
+/**
+ * mrsas_free_mem: Frees allocated memory
+ * input: Adapter instance soft state
+ *
+ * This function is called from mrsas_detach() to free previously allocated
+ * memory.
+ */
+void mrsas_free_mem(struct mrsas_softc *sc)
+{
+ int i;
+ u_int32_t max_cmd;
+ struct mrsas_mfi_cmd *mfi_cmd;
+ struct mrsas_mpt_cmd *mpt_cmd;
+
+ /*
+ * Free RAID map memory
+ */
+ for (i=0; i < 2; i++)
+ {
+ if (sc->raidmap_phys_addr[i])
+ bus_dmamap_unload(sc->raidmap_tag[i], sc->raidmap_dmamap[i]);
+ if (sc->raidmap_mem[i] != NULL)
+ bus_dmamem_free(sc->raidmap_tag[i], sc->raidmap_mem[i], sc->raidmap_dmamap[i]);
+ if (sc->raidmap_tag[i] != NULL)
+ bus_dma_tag_destroy(sc->raidmap_tag[i]);
+ }
+
+ /*
+ * Free version buffer memroy
+ */
+ if (sc->verbuf_phys_addr)
+ bus_dmamap_unload(sc->verbuf_tag, sc->verbuf_dmamap);
+ if (sc->verbuf_mem != NULL)
+ bus_dmamem_free(sc->verbuf_tag, sc->verbuf_mem, sc->verbuf_dmamap);
+ if (sc->verbuf_tag != NULL)
+ bus_dma_tag_destroy(sc->verbuf_tag);
+
+
+ /*
+ * Free sense buffer memory
+ */
+ if (sc->sense_phys_addr)
+ bus_dmamap_unload(sc->sense_tag, sc->sense_dmamap);
+ if (sc->sense_mem != NULL)
+ bus_dmamem_free(sc->sense_tag, sc->sense_mem, sc->sense_dmamap);
+ if (sc->sense_tag != NULL)
+ bus_dma_tag_destroy(sc->sense_tag);
+
+ /*
+ * Free chain frame memory
+ */
+ if (sc->chain_frame_phys_addr)
+ bus_dmamap_unload(sc->chain_frame_tag, sc->chain_frame_dmamap);
+ if (sc->chain_frame_mem != NULL)
+ bus_dmamem_free(sc->chain_frame_tag, sc->chain_frame_mem, sc->chain_frame_dmamap);
+ if (sc->chain_frame_tag != NULL)
+ bus_dma_tag_destroy(sc->chain_frame_tag);
+
+ /*
+ * Free IO Request memory
+ */
+ if (sc->io_request_phys_addr)
+ bus_dmamap_unload(sc->io_request_tag, sc->io_request_dmamap);
+ if (sc->io_request_mem != NULL)
+ bus_dmamem_free(sc->io_request_tag, sc->io_request_mem, sc->io_request_dmamap);
+ if (sc->io_request_tag != NULL)
+ bus_dma_tag_destroy(sc->io_request_tag);
+
+ /*
+ * Free Reply Descriptor memory
+ */
+ if (sc->reply_desc_phys_addr)
+ bus_dmamap_unload(sc->reply_desc_tag, sc->reply_desc_dmamap);
+ if (sc->reply_desc_mem != NULL)
+ bus_dmamem_free(sc->reply_desc_tag, sc->reply_desc_mem, sc->reply_desc_dmamap);
+ if (sc->reply_desc_tag != NULL)
+ bus_dma_tag_destroy(sc->reply_desc_tag);
+
+ /*
+ * Free event detail memory
+ */
+ if (sc->evt_detail_phys_addr)
+ bus_dmamap_unload(sc->evt_detail_tag, sc->evt_detail_dmamap);
+ if (sc->evt_detail_mem != NULL)
+ bus_dmamem_free(sc->evt_detail_tag, sc->evt_detail_mem, sc->evt_detail_dmamap);
+ if (sc->evt_detail_tag != NULL)
+ bus_dma_tag_destroy(sc->evt_detail_tag);
+
+ /*
+ * Free MFI frames
+ */
+ if (sc->mfi_cmd_list) {
+ for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
+ mfi_cmd = sc->mfi_cmd_list[i];
+ mrsas_free_frame(sc, mfi_cmd);
+ }
+ }
+ if (sc->mficmd_frame_tag != NULL)
+ bus_dma_tag_destroy(sc->mficmd_frame_tag);
+
+ /*
+ * Free MPT internal command list
+ */
+ max_cmd = sc->max_fw_cmds;
+ if (sc->mpt_cmd_list) {
+ for (i = 0; i < max_cmd; i++) {
+ mpt_cmd = sc->mpt_cmd_list[i];
+ bus_dmamap_destroy(sc->data_tag, mpt_cmd->data_dmamap);
+ free(sc->mpt_cmd_list[i], M_MRSAS);
+ }
+ free(sc->mpt_cmd_list, M_MRSAS);
+ sc->mpt_cmd_list = NULL;
+ }
+
+ /*
+ * Free MFI internal command list
+ */
+
+ if (sc->mfi_cmd_list) {
+ for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
+ free(sc->mfi_cmd_list[i], M_MRSAS);
+ }
+ free(sc->mfi_cmd_list, M_MRSAS);
+ sc->mfi_cmd_list = NULL;
+ }
+
+ /*
+ * Free request descriptor memory
+ */
+ free(sc->req_desc, M_MRSAS);
+ sc->req_desc = NULL;
+
+ /*
+ * Destroy parent tag
+ */
+ if (sc->mrsas_parent_tag != NULL)
+ bus_dma_tag_destroy(sc->mrsas_parent_tag);
+}
+
+/**
+ * mrsas_teardown_intr: Teardown interrupt
+ * input: Adapter instance soft state
+ *
+ * This function is called from mrsas_detach() to teardown and release
+ * bus interrupt resourse.
+ */
+void mrsas_teardown_intr(struct mrsas_softc *sc)
+{
+ if (sc->intr_handle)
+ bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq, sc->intr_handle);
+ if (sc->mrsas_irq != NULL)
+ bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ, sc->irq_id, sc->mrsas_irq);
+ sc->intr_handle = NULL;
+}
+
+/**
+ * mrsas_suspend: Suspend entry point
+ * input: Device struct pointer
+ *
+ * This function is the entry point for system suspend from the OS.
+ */
+static int mrsas_suspend(device_t dev)
+{
+ struct mrsas_softc *sc;
+
+ sc = device_get_softc(dev);
+ return (0);
+}
+
+/**
+ * mrsas_resume: Resume entry point
+ * input: Device struct pointer
+ *
+ * This function is the entry point for system resume from the OS.
+ */
+static int mrsas_resume(device_t dev)
+{
+ struct mrsas_softc *sc;
+
+ sc = device_get_softc(dev);
+ return (0);
+}
+
+/**
+ * mrsas_ioctl: IOCtl commands entry point.
+ *
+ * This function is the entry point for IOCtls from the OS. It calls the
+ * appropriate function for processing depending on the command received.
+ */
+static int
+mrsas_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, d_thread_t *td)
+{
+ struct mrsas_softc *sc;
+ int ret = 0, i = 0;
+
+ sc = (struct mrsas_softc *)(dev->si_drv1);
+
+ if (sc->remove_in_progress) {
+ mrsas_dprint(sc, MRSAS_INFO,
+ "Driver remove or shutdown called.\n");
+ return ENOENT;
+ }
+
+ mtx_lock_spin(&sc->ioctl_lock);
+ if (!sc->reset_in_progress) {
+ mtx_unlock_spin(&sc->ioctl_lock);
+ goto do_ioctl;
+ }
+
+ /* Release ioclt_lock, and wait for OCR
+ * to be finished */
+ mtx_unlock_spin(&sc->ioctl_lock);
+ while(sc->reset_in_progress){
+ i++;
+ if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
+ mrsas_dprint(sc, MRSAS_INFO,
+ "[%2d]waiting for "
+ "OCR to be finished %d\n",i,
+ sc->ocr_thread_active);
+ }
+ pause("mr_ioctl", hz);
+ }
+
+do_ioctl:
+ switch (cmd) {
+ case MRSAS_IOC_FIRMWARE_PASS_THROUGH:
+ ret = mrsas_passthru(sc, (void *)arg);
+ break;
+ case MRSAS_IOC_SCAN_BUS:
+ ret = mrsas_bus_scan(sc);
+ break;
+ }
+
+ return (ret);
+}
+
+/**
+ * mrsas_setup_irq: Set up interrupt.
+ * input: Adapter instance soft state
+ *
+ * This function sets up interrupts as a bus resource, with flags indicating
+ * resource permitting contemporaneous sharing and for resource to activate
+ * atomically.
+ */
+static int mrsas_setup_irq(struct mrsas_softc *sc)
+{
+ sc->irq_id = 0;
+ sc->mrsas_irq = bus_alloc_resource_any(sc->mrsas_dev, SYS_RES_IRQ,
+ &sc->irq_id, RF_SHAREABLE | RF_ACTIVE);
+ if (sc->mrsas_irq == NULL){
+ device_printf(sc->mrsas_dev, "Cannot allocate interrupt\n");
+ return (FAIL);
+ }
+ if (bus_setup_intr(sc->mrsas_dev, sc->mrsas_irq, INTR_MPSAFE|INTR_TYPE_CAM,
+ NULL, mrsas_isr, sc, &sc->intr_handle)) {
+ device_printf(sc->mrsas_dev, "Cannot set up interrupt\n");
+ return (FAIL);
+ }
+
+ return (0);
+}
+
+/*
+ * mrsas_isr: ISR entry point
+ * input: argument pointer
+ *
+ * This function is the interrupt service routine entry point. There
+ * are two types of interrupts, state change interrupt and response
+ * interrupt. If an interrupt is not ours, we just return.
+ */
+void mrsas_isr(void *arg)
+{
+ struct mrsas_softc *sc = (struct mrsas_softc *)arg;
+ int status;
+
+ /* Clear FW state change interrupt */
+ status = mrsas_clear_intr(sc);
+
+ /* Not our interrupt */
+ if (!status)
+ return;
+
+ /* If we are resetting, bail */
+ if (test_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags)) {
+ printf(" Entered into ISR when OCR is going active. \n");
+ mrsas_clear_intr(sc);
+ return;
+ }
+ /* Process for reply request and clear response interrupt */
+ if (mrsas_complete_cmd(sc) != SUCCESS)
+ mrsas_clear_intr(sc);
+
+ return;
+}
+
+/*
+ * mrsas_complete_cmd: Process reply request
+ * input: Adapter instance soft state
+ *
+ * This function is called from mrsas_isr() to process reply request and
+ * clear response interrupt. Processing of the reply request entails
+ * walking through the reply descriptor array for the command request
+ * pended from Firmware. We look at the Function field to determine
+ * the command type and perform the appropriate action. Before we
+ * return, we clear the response interrupt.
+ */
+static int mrsas_complete_cmd(struct mrsas_softc *sc)
+{
+ Mpi2ReplyDescriptorsUnion_t *desc;
+ MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc;
+ MRSAS_RAID_SCSI_IO_REQUEST *scsi_io_req;
+ struct mrsas_mpt_cmd *cmd_mpt;
+ struct mrsas_mfi_cmd *cmd_mfi;
+ u_int8_t arm, reply_descript_type;
+ u_int16_t smid, num_completed;
+ u_int8_t status, extStatus;
+ union desc_value desc_val;
+ PLD_LOAD_BALANCE_INFO lbinfo;
+ u_int32_t device_id;
+ int threshold_reply_count = 0;
+
+
+ /* If we have a hardware error, not need to continue */
+ if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
+ return (DONE);
+
+ desc = sc->reply_desc_mem;
+ desc += sc->last_reply_idx;
+
+ reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
+
+ desc_val.word = desc->Words;
+ num_completed = 0;
+
+ reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
+
+ /* Find our reply descriptor for the command and process */
+ while((desc_val.u.low != 0xFFFFFFFF) && (desc_val.u.high != 0xFFFFFFFF))
+ {
+ smid = reply_desc->SMID;
+ cmd_mpt = sc->mpt_cmd_list[smid -1];
+ scsi_io_req = (MRSAS_RAID_SCSI_IO_REQUEST *)cmd_mpt->io_request;
+
+ status = scsi_io_req->RaidContext.status;
+ extStatus = scsi_io_req->RaidContext.exStatus;
+
+ switch (scsi_io_req->Function)
+ {
+ case MPI2_FUNCTION_SCSI_IO_REQUEST : /*Fast Path IO.*/
+ device_id = cmd_mpt->ccb_ptr->ccb_h.target_id;
+ lbinfo = &sc->load_balance_info[device_id];
+ if (cmd_mpt->load_balance == MRSAS_LOAD_BALANCE_FLAG) {
+ arm = lbinfo->raid1DevHandle[0] == scsi_io_req->DevHandle ? 0 : 1;
+ atomic_dec(&lbinfo->scsi_pending_cmds[arm]);
+ cmd_mpt->load_balance &= ~MRSAS_LOAD_BALANCE_FLAG;
+ }
+ //Fall thru and complete IO
+ case MRSAS_MPI2_FUNCTION_LD_IO_REQUEST:
+ mrsas_map_mpt_cmd_status(cmd_mpt, status, extStatus);
+ mrsas_cmd_done(sc, cmd_mpt);
+ scsi_io_req->RaidContext.status = 0;
+ scsi_io_req->RaidContext.exStatus = 0;
+ atomic_dec(&sc->fw_outstanding);
+ break;
+ case MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /*MFI command */
+ cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
+ mrsas_complete_mptmfi_passthru(sc, cmd_mfi, status);
+ cmd_mpt->flags = 0;
+ mrsas_release_mpt_cmd(cmd_mpt);
+ break;
+ }
+
+ sc->last_reply_idx++;
+ if (sc->last_reply_idx >= sc->reply_q_depth)
+ sc->last_reply_idx = 0;
+
+ desc->Words = ~((uint64_t)0x00); /* set it back to all 0xFFFFFFFFs */
+ num_completed++;
+ threshold_reply_count++;
+
+ /* Get the next reply descriptor */
+ if (!sc->last_reply_idx)
+ desc = sc->reply_desc_mem;
+ else
+ desc++;
+
+ reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
+ desc_val.word = desc->Words;
+
+ reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
+
+ if(reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
+ break;
+
+ /*
+ * Write to reply post index after completing threshold reply count
+ * and still there are more replies in reply queue pending to be
+ * completed.
+ */
+ if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) {
+ mrsas_write_reg(sc, offsetof(mrsas_reg_set, reply_post_host_index),
+ sc->last_reply_idx);
+ threshold_reply_count = 0;
+ }
+ }
+
+ /* No match, just return */
+ if (num_completed == 0)
+ return (DONE);
+
+ /* Clear response interrupt */
+ mrsas_write_reg(sc, offsetof(mrsas_reg_set, reply_post_host_index),sc->last_reply_idx);
+
+ return(0);
+}
+
+/*
+ * mrsas_map_mpt_cmd_status: Allocate DMAable memory.
+ * input: Adapter instance soft state
+ *
+ * This function is called from mrsas_complete_cmd(), for LD IO and FastPath IO.
+ * It checks the command status and maps the appropriate CAM status for the CCB.
+ */
+void mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status, u_int8_t extStatus)
+{
+ struct mrsas_softc *sc = cmd->sc;
+ u_int8_t *sense_data;
+
+ switch (status) {
+ case MFI_STAT_OK:
+ cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP;
+ break;
+ case MFI_STAT_SCSI_IO_FAILED:
+ case MFI_STAT_SCSI_DONE_WITH_ERROR:
+ cmd->ccb_ptr->ccb_h.status = CAM_SCSI_STATUS_ERROR;
+ sense_data = (u_int8_t *)&cmd->ccb_ptr->csio.sense_data;
+ if (sense_data) {
+ /* For now just copy 18 bytes back */
+ memcpy(sense_data, cmd->sense, 18);
+ cmd->ccb_ptr->csio.sense_len = 18;
+ cmd->ccb_ptr->ccb_h.status |= CAM_AUTOSNS_VALID;
+ }
+ break;
+ case MFI_STAT_LD_OFFLINE:
+ case MFI_STAT_DEVICE_NOT_FOUND:
+ if (cmd->ccb_ptr->ccb_h.target_lun)
+ cmd->ccb_ptr->ccb_h.status |= CAM_LUN_INVALID;
+ else
+ cmd->ccb_ptr->ccb_h.status |= CAM_DEV_NOT_THERE;
+ break;
+ case MFI_STAT_CONFIG_SEQ_MISMATCH:
+ /*send status to CAM layer to retry sending command without
+ * decrementing retry counter*/
+ cmd->ccb_ptr->ccb_h.status |= CAM_REQUEUE_REQ;
+ break;
+ default:
+ device_printf(sc->mrsas_dev, "FW cmd complete status %x\n", status);
+ cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP_ERR;
+ cmd->ccb_ptr->csio.scsi_status = status;
+ }
+ return;
+}
+
+/*
+ * mrsas_alloc_mem: Allocate DMAable memory.
+ * input: Adapter instance soft state
+ *
+ * This function creates the parent DMA tag and allocates DMAable memory.
+ * DMA tag describes constraints of DMA mapping. Memory allocated is mapped
+ * into Kernel virtual address. Callback argument is physical memory address.
+ */
+static int mrsas_alloc_mem(struct mrsas_softc *sc)
+{
+ u_int32_t verbuf_size, io_req_size, reply_desc_size, sense_size,
+ chain_frame_size, evt_detail_size;
+
+ /*
+ * Allocate parent DMA tag
+ */
+ if (bus_dma_tag_create(NULL, /* parent */
+ 1, /* alignment */
+ 0, /* boundary */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ MRSAS_MAX_IO_SIZE,/* maxsize */
+ MRSAS_MAX_SGL, /* nsegments */
+ MRSAS_MAX_IO_SIZE,/* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc->mrsas_parent_tag /* tag */
+ )) {
+ device_printf(sc->mrsas_dev, "Cannot allocate parent DMA tag\n");
+ return(ENOMEM);
+ }
+
+ /*
+ * Allocate for version buffer
+ */
+ verbuf_size = MRSAS_MAX_NAME_LENGTH*(sizeof(bus_addr_t));
+ if (bus_dma_tag_create(sc->mrsas_parent_tag, // parent
+ 1, 0, // algnmnt, boundary
+ BUS_SPACE_MAXADDR_32BIT,// lowaddr
+ BUS_SPACE_MAXADDR, // highaddr
+ NULL, NULL, // filter, filterarg
+ verbuf_size, // maxsize
+ 1, // msegments
+ verbuf_size, // maxsegsize
+ BUS_DMA_ALLOCNOW, // flags
+ NULL, NULL, // lockfunc, lockarg
+ &sc->verbuf_tag)) {
+ device_printf(sc->mrsas_dev, "Cannot allocate verbuf DMA tag\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamem_alloc(sc->verbuf_tag, (void **)&sc->verbuf_mem,
+ BUS_DMA_NOWAIT, &sc->verbuf_dmamap)) {
+ device_printf(sc->mrsas_dev, "Cannot allocate verbuf memory\n");
+ return (ENOMEM);
+ }
+ bzero(sc->verbuf_mem, verbuf_size);
+ if (bus_dmamap_load(sc->verbuf_tag, sc->verbuf_dmamap, sc->verbuf_mem,
+ verbuf_size, mrsas_addr_cb, &sc->verbuf_phys_addr, BUS_DMA_NOWAIT)){
+ device_printf(sc->mrsas_dev, "Cannot load verbuf DMA map\n");
+ return(ENOMEM);
+ }
+
+ /*
+ * Allocate IO Request Frames
+ */
+ io_req_size = sc->io_frames_alloc_sz;
+ if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
+ 16, 0, // algnmnt, boundary
+ BUS_SPACE_MAXADDR_32BIT,// lowaddr
+ BUS_SPACE_MAXADDR, // highaddr
+ NULL, NULL, // filter, filterarg
+ io_req_size, // maxsize
+ 1, // msegments
+ io_req_size, // maxsegsize
+ BUS_DMA_ALLOCNOW, // flags
+ NULL, NULL, // lockfunc, lockarg
+ &sc->io_request_tag)) {
+ device_printf(sc->mrsas_dev, "Cannot create IO request tag\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamem_alloc(sc->io_request_tag, (void **)&sc->io_request_mem,
+ BUS_DMA_NOWAIT, &sc->io_request_dmamap)) {
+ device_printf(sc->mrsas_dev, "Cannot alloc IO request memory\n");
+ return (ENOMEM);
+ }
+ bzero(sc->io_request_mem, io_req_size);
+ if (bus_dmamap_load(sc->io_request_tag, sc->io_request_dmamap,
+ sc->io_request_mem, io_req_size, mrsas_addr_cb,
+ &sc->io_request_phys_addr, BUS_DMA_NOWAIT)) {
+ device_printf(sc->mrsas_dev, "Cannot load IO request memory\n");
+ return (ENOMEM);
+ }
+
+ /*
+ * Allocate Chain Frames
+ */
+ chain_frame_size = sc->chain_frames_alloc_sz;
+ if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
+ 4, 0, // algnmnt, boundary
+ BUS_SPACE_MAXADDR_32BIT,// lowaddr
+ BUS_SPACE_MAXADDR, // highaddr
+ NULL, NULL, // filter, filterarg
+ chain_frame_size, // maxsize
+ 1, // msegments
+ chain_frame_size, // maxsegsize
+ BUS_DMA_ALLOCNOW, // flags
+ NULL, NULL, // lockfunc, lockarg
+ &sc->chain_frame_tag)) {
+ device_printf(sc->mrsas_dev, "Cannot create chain frame tag\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamem_alloc(sc->chain_frame_tag, (void **)&sc->chain_frame_mem,
+ BUS_DMA_NOWAIT, &sc->chain_frame_dmamap)) {
+ device_printf(sc->mrsas_dev, "Cannot alloc chain frame memory\n");
+ return (ENOMEM);
+ }
+ bzero(sc->chain_frame_mem, chain_frame_size);
+ if (bus_dmamap_load(sc->chain_frame_tag, sc->chain_frame_dmamap,
+ sc->chain_frame_mem, chain_frame_size, mrsas_addr_cb,
+ &sc->chain_frame_phys_addr, BUS_DMA_NOWAIT)) {
+ device_printf(sc->mrsas_dev, "Cannot load chain frame memory\n");
+ return (ENOMEM);
+ }
+
+ /*
+ * Allocate Reply Descriptor Array
+ */
+ reply_desc_size = sc->reply_alloc_sz;
+ if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
+ 16, 0, // algnmnt, boundary
+ BUS_SPACE_MAXADDR_32BIT,// lowaddr
+ BUS_SPACE_MAXADDR, // highaddr
+ NULL, NULL, // filter, filterarg
+ reply_desc_size, // maxsize
+ 1, // msegments
+ reply_desc_size, // maxsegsize
+ BUS_DMA_ALLOCNOW, // flags
+ NULL, NULL, // lockfunc, lockarg
+ &sc->reply_desc_tag)) {
+ device_printf(sc->mrsas_dev, "Cannot create reply descriptor tag\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamem_alloc(sc->reply_desc_tag, (void **)&sc->reply_desc_mem,
+ BUS_DMA_NOWAIT, &sc->reply_desc_dmamap)) {
+ device_printf(sc->mrsas_dev, "Cannot alloc reply descriptor memory\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamap_load(sc->reply_desc_tag, sc->reply_desc_dmamap,
+ sc->reply_desc_mem, reply_desc_size, mrsas_addr_cb,
+ &sc->reply_desc_phys_addr, BUS_DMA_NOWAIT)) {
+ device_printf(sc->mrsas_dev, "Cannot load reply descriptor memory\n");
+ return (ENOMEM);
+ }
+
+ /*
+ * Allocate Sense Buffer Array. Keep in lower 4GB
+ */
+ sense_size = sc->max_fw_cmds * MRSAS_SENSE_LEN;
+ if (bus_dma_tag_create(sc->mrsas_parent_tag, // parent
+ 64, 0, // algnmnt, boundary
+ BUS_SPACE_MAXADDR_32BIT,// lowaddr
+ BUS_SPACE_MAXADDR, // highaddr
+ NULL, NULL, // filter, filterarg
+ sense_size, // maxsize
+ 1, // nsegments
+ sense_size, // maxsegsize
+ BUS_DMA_ALLOCNOW, // flags
+ NULL, NULL, // lockfunc, lockarg
+ &sc->sense_tag)) {
+ device_printf(sc->mrsas_dev, "Cannot allocate sense buf tag\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamem_alloc(sc->sense_tag, (void **)&sc->sense_mem,
+ BUS_DMA_NOWAIT, &sc->sense_dmamap)) {
+ device_printf(sc->mrsas_dev, "Cannot allocate sense buf memory\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamap_load(sc->sense_tag, sc->sense_dmamap,
+ sc->sense_mem, sense_size, mrsas_addr_cb, &sc->sense_phys_addr,
+ BUS_DMA_NOWAIT)){
+ device_printf(sc->mrsas_dev, "Cannot load sense buf memory\n");
+ return (ENOMEM);
+ }
+
+ /*
+ * Allocate for Event detail structure
+ */
+ evt_detail_size = sizeof(struct mrsas_evt_detail);
+ if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
+ 1, 0, // algnmnt, boundary
+ BUS_SPACE_MAXADDR_32BIT,// lowaddr
+ BUS_SPACE_MAXADDR, // highaddr
+ NULL, NULL, // filter, filterarg
+ evt_detail_size, // maxsize
+ 1, // msegments
+ evt_detail_size, // maxsegsize
+ BUS_DMA_ALLOCNOW, // flags
+ NULL, NULL, // lockfunc, lockarg
+ &sc->evt_detail_tag)) {
+ device_printf(sc->mrsas_dev, "Cannot create Event detail tag\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamem_alloc(sc->evt_detail_tag, (void **)&sc->evt_detail_mem,
+ BUS_DMA_NOWAIT, &sc->evt_detail_dmamap)) {
+ device_printf(sc->mrsas_dev, "Cannot alloc Event detail buffer memory\n");
+ return (ENOMEM);
+ }
+ bzero(sc->evt_detail_mem, evt_detail_size);
+ if (bus_dmamap_load(sc->evt_detail_tag, sc->evt_detail_dmamap,
+ sc->evt_detail_mem, evt_detail_size, mrsas_addr_cb,
+ &sc->evt_detail_phys_addr, BUS_DMA_NOWAIT)) {
+ device_printf(sc->mrsas_dev, "Cannot load Event detail buffer memory\n");
+ return (ENOMEM);
+ }
+
+
+ /*
+ * Create a dma tag for data buffers; size will be the maximum
+ * possible I/O size (280kB).
+ */
+ if (bus_dma_tag_create(sc->mrsas_parent_tag, // parent
+ 1, // alignment
+ 0, // boundary
+ BUS_SPACE_MAXADDR, // lowaddr
+ BUS_SPACE_MAXADDR, // highaddr
+ NULL, NULL, // filter, filterarg
+ MRSAS_MAX_IO_SIZE, // maxsize
+ MRSAS_MAX_SGL, // nsegments
+ MRSAS_MAX_IO_SIZE, // maxsegsize
+ BUS_DMA_ALLOCNOW, // flags
+ busdma_lock_mutex, // lockfunc
+ &sc->io_lock, // lockfuncarg
+ &sc->data_tag)) {
+ device_printf(sc->mrsas_dev, "Cannot create data dma tag\n");
+ return(ENOMEM);
+ }
+
+ return(0);
+}
+
+/*
+ * mrsas_addr_cb: Callback function of bus_dmamap_load()
+ * input: callback argument,
+ * machine dependent type that describes DMA segments,
+ * number of segments,
+ * error code.
+ *
+ * This function is for the driver to receive mapping information resultant
+ * of the bus_dmamap_load(). The information is actually not being used,
+ * but the address is saved anyway.
+ */
+void
+mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
+{
+ bus_addr_t *addr;
+
+ addr = arg;
+ *addr = segs[0].ds_addr;
+}
+
+/*
+ * mrsas_setup_raidmap: Set up RAID map.
+ * input: Adapter instance soft state
+ *
+ * Allocate DMA memory for the RAID maps and perform setup.
+ */
+static int mrsas_setup_raidmap(struct mrsas_softc *sc)
+{
+ sc->map_sz = sizeof(MR_FW_RAID_MAP) +
+ (sizeof(MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
+
+ for (int i=0; i < 2; i++)
+ {
+ if (bus_dma_tag_create(sc->mrsas_parent_tag, // parent
+ 4, 0, // algnmnt, boundary
+ BUS_SPACE_MAXADDR_32BIT,// lowaddr
+ BUS_SPACE_MAXADDR, // highaddr
+ NULL, NULL, // filter, filterarg
+ sc->map_sz, // maxsize
+ 1, // nsegments
+ sc->map_sz, // maxsegsize
+ BUS_DMA_ALLOCNOW, // flags
+ NULL, NULL, // lockfunc, lockarg
+ &sc->raidmap_tag[i])) {
+ device_printf(sc->mrsas_dev, "Cannot allocate raid map tag.\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamem_alloc(sc->raidmap_tag[i], (void **)&sc->raidmap_mem[i],
+ BUS_DMA_NOWAIT, &sc->raidmap_dmamap[i])) {
+ device_printf(sc->mrsas_dev, "Cannot allocate raidmap memory.\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamap_load(sc->raidmap_tag[i], sc->raidmap_dmamap[i],
+ sc->raidmap_mem[i], sc->map_sz, mrsas_addr_cb, &sc->raidmap_phys_addr[i],
+ BUS_DMA_NOWAIT)){
+ device_printf(sc->mrsas_dev, "Cannot load raidmap memory.\n");
+ return (ENOMEM);
+ }
+ if (!sc->raidmap_mem[i]) {
+ device_printf(sc->mrsas_dev, "Cannot allocate memory for raid map.\n");
+ return (ENOMEM);
+ }
+ }
+
+ if (!mrsas_get_map_info(sc))
+ mrsas_sync_map_info(sc);
+
+ return (0);
+}
+
+/**
+ * mrsas_init_fw: Initialize Firmware
+ * input: Adapter soft state
+ *
+ * Calls transition_to_ready() to make sure Firmware is in operational
+ * state and calls mrsas_init_adapter() to send IOC_INIT command to
+ * Firmware. It issues internal commands to get the controller info
+ * after the IOC_INIT command response is received by Firmware.
+ * Note: code relating to get_pdlist, get_ld_list and max_sectors
+ * are currently not being used, it is left here as placeholder.
+ */
+static int mrsas_init_fw(struct mrsas_softc *sc)
+{
+ u_int32_t max_sectors_1;
+ u_int32_t max_sectors_2;
+ u_int32_t tmp_sectors;
+ struct mrsas_ctrl_info *ctrl_info;
+
+ int ret, ocr = 0;
+
+
+ /* Make sure Firmware is ready */
+ ret = mrsas_transition_to_ready(sc, ocr);
+ if (ret != SUCCESS) {
+ return(ret);
+ }
+
+ /* Get operational params, sge flags, send init cmd to ctlr */
+ if (mrsas_init_adapter(sc) != SUCCESS){
+ device_printf(sc->mrsas_dev, "Adapter initialize Fail.\n");
+ return(1);
+ }
+
+ /* Allocate internal commands for pass-thru */
+ if (mrsas_alloc_mfi_cmds(sc) != SUCCESS){
+ device_printf(sc->mrsas_dev, "Allocate MFI cmd failed.\n");
+ return(1);
+ }
+
+ if (mrsas_setup_raidmap(sc) != SUCCESS) {
+ device_printf(sc->mrsas_dev, "Set up RAID map failed.\n");
+ return(1);
+ }
+
+ /* For pass-thru, get PD/LD list and controller info */
+ memset(sc->pd_list, 0, MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
+ mrsas_get_pd_list(sc);
+
+ memset(sc->ld_ids, 0xff, MRSAS_MAX_LD);
+ mrsas_get_ld_list(sc);
+
+ //memset(sc->log_to_span, 0, MRSAS_MAX_LD * sizeof(LD_SPAN_INFO));
+
+ ctrl_info = malloc(sizeof(struct mrsas_ctrl_info), M_MRSAS, M_NOWAIT);
+
+ /*
+ * Compute the max allowed sectors per IO: The controller info has two
+ * limits on max sectors. Driver should use the minimum of these two.
+ *
+ * 1 << stripe_sz_ops.min = max sectors per strip
+ *
+ * Note that older firmwares ( < FW ver 30) didn't report information
+ * to calculate max_sectors_1. So the number ended up as zero always.
+ */
+ tmp_sectors = 0;
+ if (ctrl_info && !mrsas_get_ctrl_info(sc, ctrl_info)) {
+ max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
+ ctrl_info->max_strips_per_io;
+ max_sectors_2 = ctrl_info->max_request_size;
+ tmp_sectors = min(max_sectors_1 , max_sectors_2);
+ sc->disableOnlineCtrlReset =
+ ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
+ sc->UnevenSpanSupport =
+ ctrl_info->adapterOperations2.supportUnevenSpans;
+ if(sc->UnevenSpanSupport) {
+ device_printf(sc->mrsas_dev, "FW supports: UnevenSpanSupport=%x\n",
+ sc->UnevenSpanSupport);
+ if (MR_ValidateMapInfo(sc))
+ sc->fast_path_io = 1;
+ else
+ sc->fast_path_io = 0;
+
+ }
+ }
+ sc->max_sectors_per_req = sc->max_num_sge * MRSAS_PAGE_SIZE / 512;
+
+ if (tmp_sectors && (sc->max_sectors_per_req > tmp_sectors))
+ sc->max_sectors_per_req = tmp_sectors;
+
+ if (ctrl_info)
+ free(ctrl_info, M_MRSAS);
+
+ return(0);
+}
+
+/**
+ * mrsas_init_adapter: Initializes the adapter/controller
+ * input: Adapter soft state
+ *
+ * Prepares for the issuing of the IOC Init cmd to FW for initializing the
+ * ROC/controller. The FW register is read to determined the number of
+ * commands that is supported. All memory allocations for IO is based on
+ * max_cmd. Appropriate calculations are performed in this function.
+ */
+int mrsas_init_adapter(struct mrsas_softc *sc)
+{
+ uint32_t status;
+ u_int32_t max_cmd;
+ int ret;
+
+ /* Read FW status register */
+ status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
+
+ /* Get operational params from status register */
+ sc->max_fw_cmds = status & MRSAS_FWSTATE_MAXCMD_MASK;
+
+ /* Decrement the max supported by 1, to correlate with FW */
+ sc->max_fw_cmds = sc->max_fw_cmds-1;
+ max_cmd = sc->max_fw_cmds;
+
+ /* Determine allocation size of command frames */
+ sc->reply_q_depth = ((max_cmd *2 +1 +15)/16*16);
+ sc->request_alloc_sz = sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * max_cmd;
+ sc->reply_alloc_sz = sizeof(MPI2_REPLY_DESCRIPTORS_UNION) * (sc->reply_q_depth);
+ sc->io_frames_alloc_sz = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * (max_cmd + 1));
+ sc->chain_frames_alloc_sz = 1024 * max_cmd;
+ sc->max_sge_in_main_msg = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
+ offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL))/16;
+
+ sc->max_sge_in_chain = MRSAS_MAX_SZ_CHAIN_FRAME / sizeof(MPI2_SGE_IO_UNION);
+ sc->max_num_sge = sc->max_sge_in_main_msg + sc->max_sge_in_chain - 2;
+
+ /* Used for pass thru MFI frame (DCMD) */
+ sc->chain_offset_mfi_pthru = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL)/16;
+
+ sc->chain_offset_io_request = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
+ sizeof(MPI2_SGE_IO_UNION))/16;
+
+ sc->last_reply_idx = 0;
+
+ ret = mrsas_alloc_mem(sc);
+ if (ret != SUCCESS)
+ return(ret);
+
+ ret = mrsas_alloc_mpt_cmds(sc);
+ if (ret != SUCCESS)
+ return(ret);
+
+ ret = mrsas_ioc_init(sc);
+ if (ret != SUCCESS)
+ return(ret);
+
+
+ return(0);
+}
+
+/**
+ * mrsas_alloc_ioc_cmd: Allocates memory for IOC Init command
+ * input: Adapter soft state
+ *
+ * Allocates for the IOC Init cmd to FW to initialize the ROC/controller.
+ */
+int mrsas_alloc_ioc_cmd(struct mrsas_softc *sc)
+{
+ int ioc_init_size;
+
+ /* Allocate IOC INIT command */
+ ioc_init_size = 1024 + sizeof(MPI2_IOC_INIT_REQUEST);
+ if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
+ 1, 0, // algnmnt, boundary
+ BUS_SPACE_MAXADDR_32BIT,// lowaddr
+ BUS_SPACE_MAXADDR, // highaddr
+ NULL, NULL, // filter, filterarg
+ ioc_init_size, // maxsize
+ 1, // msegments
+ ioc_init_size, // maxsegsize
+ BUS_DMA_ALLOCNOW, // flags
+ NULL, NULL, // lockfunc, lockarg
+ &sc->ioc_init_tag)) {
+ device_printf(sc->mrsas_dev, "Cannot allocate ioc init tag\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamem_alloc(sc->ioc_init_tag, (void **)&sc->ioc_init_mem,
+ BUS_DMA_NOWAIT, &sc->ioc_init_dmamap)) {
+ device_printf(sc->mrsas_dev, "Cannot allocate ioc init cmd mem\n");
+ return (ENOMEM);
+ }
+ bzero(sc->ioc_init_mem, ioc_init_size);
+ if (bus_dmamap_load(sc->ioc_init_tag, sc->ioc_init_dmamap,
+ sc->ioc_init_mem, ioc_init_size, mrsas_addr_cb,
+ &sc->ioc_init_phys_mem, BUS_DMA_NOWAIT)) {
+ device_printf(sc->mrsas_dev, "Cannot load ioc init cmd mem\n");
+ return (ENOMEM);
+ }
+
+ return (0);
+}
+
+/**
+ * mrsas_free_ioc_cmd: Allocates memory for IOC Init command
+ * input: Adapter soft state
+ *
+ * Deallocates memory of the IOC Init cmd.
+ */
+void mrsas_free_ioc_cmd(struct mrsas_softc *sc)
+{
+ if (sc->ioc_init_phys_mem)
+ bus_dmamap_unload(sc->ioc_init_tag, sc->ioc_init_dmamap);
+ if (sc->ioc_init_mem != NULL)
+ bus_dmamem_free(sc->ioc_init_tag, sc->ioc_init_mem, sc->ioc_init_dmamap);
+ if (sc->ioc_init_tag != NULL)
+ bus_dma_tag_destroy(sc->ioc_init_tag);
+}
+
+/**
+ * mrsas_ioc_init: Sends IOC Init command to FW
+ * input: Adapter soft state
+ *
+ * Issues the IOC Init cmd to FW to initialize the ROC/controller.
+ */
+int mrsas_ioc_init(struct mrsas_softc *sc)
+{
+ struct mrsas_init_frame *init_frame;
+ pMpi2IOCInitRequest_t IOCInitMsg;
+ MRSAS_REQUEST_DESCRIPTOR_UNION req_desc;
+ u_int8_t max_wait = MRSAS_IOC_INIT_WAIT_TIME;
+ bus_addr_t phys_addr;
+ int i, retcode = 0;
+
+ /* Allocate memory for the IOC INIT command */
+ if (mrsas_alloc_ioc_cmd(sc)) {
+ device_printf(sc->mrsas_dev, "Cannot allocate IOC command.\n");
+ return(1);
+ }
+
+ IOCInitMsg = (pMpi2IOCInitRequest_t)(((char *)sc->ioc_init_mem) +1024);
+ IOCInitMsg->Function = MPI2_FUNCTION_IOC_INIT;
+ IOCInitMsg->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
+ IOCInitMsg->MsgVersion = MPI2_VERSION;
+ IOCInitMsg->HeaderVersion = MPI2_HEADER_VERSION;
+ IOCInitMsg->SystemRequestFrameSize = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4;
+ IOCInitMsg->ReplyDescriptorPostQueueDepth = sc->reply_q_depth;
+ IOCInitMsg->ReplyDescriptorPostQueueAddress = sc->reply_desc_phys_addr;
+ IOCInitMsg->SystemRequestFrameBaseAddress = sc->io_request_phys_addr;
+
+ init_frame = (struct mrsas_init_frame *)sc->ioc_init_mem;
+ init_frame->cmd = MFI_CMD_INIT;
+ init_frame->cmd_status = 0xFF;
+ init_frame->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
+
+ if (sc->verbuf_mem) {
+ snprintf((char *)sc->verbuf_mem, strlen(MRSAS_VERSION)+2,"%s\n",
+ MRSAS_VERSION);
+ init_frame->driver_ver_lo = (bus_addr_t)sc->verbuf_phys_addr;
+ init_frame->driver_ver_hi = 0;
+ }
+
+ phys_addr = (bus_addr_t)sc->ioc_init_phys_mem + 1024;
+ init_frame->queue_info_new_phys_addr_lo = phys_addr;
+ init_frame->data_xfer_len = sizeof(Mpi2IOCInitRequest_t);
+
+ req_desc.addr.Words = (bus_addr_t)sc->ioc_init_phys_mem;
+ req_desc.MFAIo.RequestFlags =
+ (MRSAS_REQ_DESCRIPT_FLAGS_MFA << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+
+ mrsas_disable_intr(sc);
+ mrsas_dprint(sc, MRSAS_OCR, "Issuing IOC INIT command to FW.\n");
+ //device_printf(sc->mrsas_dev, "Issuing IOC INIT command to FW.\n");del?
+ mrsas_fire_cmd(sc, req_desc.addr.u.low, req_desc.addr.u.high);
+
+ /*
+ * Poll response timer to wait for Firmware response. While this
+ * timer with the DELAY call could block CPU, the time interval for
+ * this is only 1 millisecond.
+ */
+ if (init_frame->cmd_status == 0xFF) {
+ for (i=0; i < (max_wait * 1000); i++){
+ if (init_frame->cmd_status == 0xFF)
+ DELAY(1000);
+ else
+ break;
+ }
+ }
+
+ if (init_frame->cmd_status == 0)
+ mrsas_dprint(sc, MRSAS_OCR,
+ "IOC INIT response received from FW.\n");
+ //device_printf(sc->mrsas_dev, "IOC INIT response received from FW.\n");del?
+ else
+ {
+ if (init_frame->cmd_status == 0xFF)
+ device_printf(sc->mrsas_dev, "IOC Init timed out after %d seconds.\n", max_wait);
+ else
+ device_printf(sc->mrsas_dev, "IOC Init failed, status = 0x%x\n", init_frame->cmd_status);
+ retcode = 1;
+ }
+
+ mrsas_free_ioc_cmd(sc);
+ return (retcode);
+}
+
+/**
+ * mrsas_alloc_mpt_cmds: Allocates the command packets
+ * input: Adapter instance soft state
+ *
+ * This function allocates the internal commands for IOs. Each command that is
+ * issued to FW is wrapped in a local data structure called mrsas_mpt_cmd.
+ * An array is allocated with mrsas_mpt_cmd context. The free commands are
+ * maintained in a linked list (cmd pool). SMID value range is from 1 to
+ * max_fw_cmds.
+ */
+int mrsas_alloc_mpt_cmds(struct mrsas_softc *sc)
+{
+ int i, j;
+ u_int32_t max_cmd;
+ struct mrsas_mpt_cmd *cmd;
+ pMpi2ReplyDescriptorsUnion_t reply_desc;
+ u_int32_t offset, chain_offset, sense_offset;
+ bus_addr_t io_req_base_phys, chain_frame_base_phys, sense_base_phys;
+ u_int8_t *io_req_base, *chain_frame_base, *sense_base;
+
+ max_cmd = sc->max_fw_cmds;
+
+ sc->req_desc = malloc(sc->request_alloc_sz, M_MRSAS, M_NOWAIT);
+ if (!sc->req_desc) {
+ device_printf(sc->mrsas_dev, "Out of memory, cannot alloc req desc\n");
+ return(ENOMEM);
+ }
+ memset(sc->req_desc, 0, sc->request_alloc_sz);
+
+ /*
+ * sc->mpt_cmd_list is an array of struct mrsas_mpt_cmd pointers. Allocate the
+ * dynamic array first and then allocate individual commands.
+ */
+ sc->mpt_cmd_list = malloc(sizeof(struct mrsas_mpt_cmd*)*max_cmd, M_MRSAS, M_NOWAIT);
+ if (!sc->mpt_cmd_list) {
+ device_printf(sc->mrsas_dev, "Cannot alloc memory for mpt_cmd_list.\n");
+ return(ENOMEM);
+ }
+ memset(sc->mpt_cmd_list, 0, sizeof(struct mrsas_mpt_cmd *)*max_cmd);
+ for (i = 0; i < max_cmd; i++) {
+ sc->mpt_cmd_list[i] = malloc(sizeof(struct mrsas_mpt_cmd),
+ M_MRSAS, M_NOWAIT);
+ if (!sc->mpt_cmd_list[i]) {
+ for (j = 0; j < i; j++)
+ free(sc->mpt_cmd_list[j],M_MRSAS);
+ free(sc->mpt_cmd_list, M_MRSAS);
+ sc->mpt_cmd_list = NULL;
+ return(ENOMEM);
+ }
+ }
+
+ io_req_base = (u_int8_t*)sc->io_request_mem + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
+ io_req_base_phys = (bus_addr_t)sc->io_request_phys_addr + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
+ chain_frame_base = (u_int8_t*)sc->chain_frame_mem;
+ chain_frame_base_phys = (bus_addr_t)sc->chain_frame_phys_addr;
+ sense_base = (u_int8_t*)sc->sense_mem;
+ sense_base_phys = (bus_addr_t)sc->sense_phys_addr;
+ for (i = 0; i < max_cmd; i++) {
+ cmd = sc->mpt_cmd_list[i];
+ offset = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
+ chain_offset = 1024 * i;
+ sense_offset = MRSAS_SENSE_LEN * i;
+ memset(cmd, 0, sizeof(struct mrsas_mpt_cmd));
+ cmd->index = i + 1;
+ cmd->ccb_ptr = NULL;
+ callout_init(&cmd->cm_callout, 0);
+ cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
+ cmd->sc = sc;
+ cmd->io_request = (MRSAS_RAID_SCSI_IO_REQUEST *) (io_req_base + offset);
+ memset(cmd->io_request, 0, sizeof(MRSAS_RAID_SCSI_IO_REQUEST));
+ cmd->io_request_phys_addr = io_req_base_phys + offset;
+ cmd->chain_frame = (MPI2_SGE_IO_UNION *) (chain_frame_base + chain_offset);
+ cmd->chain_frame_phys_addr = chain_frame_base_phys + chain_offset;
+ cmd->sense = sense_base + sense_offset;
+ cmd->sense_phys_addr = sense_base_phys + sense_offset;
+ if (bus_dmamap_create(sc->data_tag, 0, &cmd->data_dmamap)) {
+ return(FAIL);
+ }
+ TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next);
+ }
+
+ /* Initialize reply descriptor array to 0xFFFFFFFF */
+ reply_desc = sc->reply_desc_mem;
+ for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) {
+ reply_desc->Words = MRSAS_ULONG_MAX;
+ }
+ return(0);
+}
+
+/**
+ * mrsas_fire_cmd: Sends command to FW
+ * input: Adapter soft state
+ * request descriptor address low
+ * request descriptor address high
+ *
+ * This functions fires the command to Firmware by writing to the
+ * inbound_low_queue_port and inbound_high_queue_port.
+ */
+void mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
+ u_int32_t req_desc_hi)
+{
+ mtx_lock(&sc->pci_lock);
+ mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_low_queue_port),
+ req_desc_lo);
+ mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_high_queue_port),
+ req_desc_hi);
+ mtx_unlock(&sc->pci_lock);
+}
+
+/**
+ * mrsas_transition_to_ready: Move FW to Ready state
+ * input: Adapter instance soft state
+ *
+ * During the initialization, FW passes can potentially be in any one of
+ * several possible states. If the FW in operational, waiting-for-handshake
+ * states, driver must take steps to bring it to ready state. Otherwise, it
+ * has to wait for the ready state.
+ */
+int mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr)
+{
+ int i;
+ u_int8_t max_wait;
+ u_int32_t val, fw_state;
+ u_int32_t cur_state;
+ u_int32_t abs_state, curr_abs_state;
+
+ val = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
+ fw_state = val & MFI_STATE_MASK;
+ max_wait = MRSAS_RESET_WAIT_TIME;
+
+ if (fw_state != MFI_STATE_READY)
+ device_printf(sc->mrsas_dev, "Waiting for FW to come to ready state\n");
+
+ while (fw_state != MFI_STATE_READY) {
+ abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
+ switch (fw_state) {
+ case MFI_STATE_FAULT:
+ device_printf(sc->mrsas_dev, "FW is in FAULT state!!\n");
+ if (ocr) {
+ cur_state = MFI_STATE_FAULT;
+ break;
+ }
+ else
+ return -ENODEV;
+ case MFI_STATE_WAIT_HANDSHAKE:
+ /* Set the CLR bit in inbound doorbell */
+ mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
+ MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG);
+ cur_state = MFI_STATE_WAIT_HANDSHAKE;
+ break;
+ case MFI_STATE_BOOT_MESSAGE_PENDING:
+ mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
+ MFI_INIT_HOTPLUG);
+ cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
+ break;
+ case MFI_STATE_OPERATIONAL:
+ /* Bring it to READY state; assuming max wait 10 secs */
+ mrsas_disable_intr(sc);
+ mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_RESET_FLAGS);
+ for (i=0; i < max_wait * 1000; i++) {
+ if (mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell)) & 1)
+ DELAY(1000);
+ else
+ break;
+ }
+ cur_state = MFI_STATE_OPERATIONAL;
+ break;
+ case MFI_STATE_UNDEFINED:
+ /* This state should not last for more than 2 seconds */
+ cur_state = MFI_STATE_UNDEFINED;
+ break;
+ case MFI_STATE_BB_INIT:
+ cur_state = MFI_STATE_BB_INIT;
+ break;
+ case MFI_STATE_FW_INIT:
+ cur_state = MFI_STATE_FW_INIT;
+ break;
+ case MFI_STATE_FW_INIT_2:
+ cur_state = MFI_STATE_FW_INIT_2;
+ break;
+ case MFI_STATE_DEVICE_SCAN:
+ cur_state = MFI_STATE_DEVICE_SCAN;
+ break;
+ case MFI_STATE_FLUSH_CACHE:
+ cur_state = MFI_STATE_FLUSH_CACHE;
+ break;
+ default:
+ device_printf(sc->mrsas_dev, "Unknown state 0x%x\n", fw_state);
+ return -ENODEV;
+ }
+
+ /*
+ * The cur_state should not last for more than max_wait secs
+ */
+ for (i = 0; i < (max_wait * 1000); i++) {
+ fw_state = (mrsas_read_reg(sc, offsetof(mrsas_reg_set,
+ outbound_scratch_pad))& MFI_STATE_MASK);
+ curr_abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
+ outbound_scratch_pad));
+ if (abs_state == curr_abs_state)
+ DELAY(1000);
+ else
+ break;
+ }
+
+ /*
+ * Return error if fw_state hasn't changed after max_wait
+ */
+ if (curr_abs_state == abs_state) {
+ device_printf(sc->mrsas_dev, "FW state [%d] hasn't changed "
+ "in %d secs\n", fw_state, max_wait);
+ return -ENODEV;
+ }
+ }
+ mrsas_dprint(sc, MRSAS_OCR, "FW now in Ready state\n");
+ //device_printf(sc->mrsas_dev, "FW now in Ready state\n");del?
+ return 0;
+}
+
+/**
+ * mrsas_get_mfi_cmd: Get a cmd from free command pool
+ * input: Adapter soft state
+ *
+ * This function removes an MFI command from the command list.
+ */
+struct mrsas_mfi_cmd* mrsas_get_mfi_cmd(struct mrsas_softc *sc)
+{
+ struct mrsas_mfi_cmd *cmd = NULL;
+
+ mtx_lock(&sc->mfi_cmd_pool_lock);
+ if (!TAILQ_EMPTY(&sc->mrsas_mfi_cmd_list_head)){
+ cmd = TAILQ_FIRST(&sc->mrsas_mfi_cmd_list_head);
+ TAILQ_REMOVE(&sc->mrsas_mfi_cmd_list_head, cmd, next);
+ }
+ mtx_unlock(&sc->mfi_cmd_pool_lock);
+
+ return cmd;
+}
+
+/**
+ * mrsas_ocr_thread Thread to handle OCR/Kill Adapter.
+ * input: Adapter Context.
+ *
+ * This function will check FW status register and flag
+ * do_timeout_reset flag. It will do OCR/Kill adapter if
+ * FW is in fault state or IO timed out has trigger reset.
+ */
+static void
+mrsas_ocr_thread(void *arg)
+{
+ struct mrsas_softc *sc;
+ u_int32_t fw_status, fw_state;
+
+ sc = (struct mrsas_softc *)arg;
+
+ mrsas_dprint(sc, MRSAS_TRACE, "%s\n", __func__);
+
+ sc->ocr_thread_active = 1;
+ mtx_lock(&sc->sim_lock);
+ for (;;) {
+ /* Sleep for 1 second and check the queue status*/
+ msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO,
+ "mrsas_ocr", sc->mrsas_fw_fault_check_delay * hz);
+ if (sc->remove_in_progress) {
+ mrsas_dprint(sc, MRSAS_OCR,
+ "Exit due to shutdown from %s\n", __func__);
+ break;
+ }
+ fw_status = mrsas_read_reg(sc,
+ offsetof(mrsas_reg_set, outbound_scratch_pad));
+ fw_state = fw_status & MFI_STATE_MASK;
+ if (fw_state == MFI_STATE_FAULT || sc->do_timedout_reset) {
+ device_printf(sc->mrsas_dev, "OCR started due to %s!\n",
+ sc->do_timedout_reset?"IO Timeout":
+ "FW fault detected");
+ mtx_lock_spin(&sc->ioctl_lock);
+ sc->reset_in_progress = 1;
+ sc->reset_count++;
+ mtx_unlock_spin(&sc->ioctl_lock);
+ mrsas_xpt_freeze(sc);
+ mrsas_reset_ctrl(sc);
+ mrsas_xpt_release(sc);
+ sc->reset_in_progress = 0;
+ sc->do_timedout_reset = 0;
+ }
+ }
+ mtx_unlock(&sc->sim_lock);
+ sc->ocr_thread_active = 0;
+ mrsas_kproc_exit(0);
+}
+
+/**
+ * mrsas_reset_reply_desc Reset Reply descriptor as part of OCR.
+ * input: Adapter Context.
+ *
+ * This function will clear reply descriptor so that post OCR
+ * driver and FW will lost old history.
+ */
+void mrsas_reset_reply_desc(struct mrsas_softc *sc)
+{
+ int i;
+ pMpi2ReplyDescriptorsUnion_t reply_desc;
+
+ sc->last_reply_idx = 0;
+ reply_desc = sc->reply_desc_mem;
+ for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) {
+ reply_desc->Words = MRSAS_ULONG_MAX;
+ }
+}
+
+/**
+ * mrsas_reset_ctrl Core function to OCR/Kill adapter.
+ * input: Adapter Context.
+ *
+ * This function will run from thread context so that it can sleep.
+ * 1. Do not handle OCR if FW is in HW critical error.
+ * 2. Wait for outstanding command to complete for 180 seconds.
+ * 3. If #2 does not find any outstanding command Controller is in working
+ * state, so skip OCR.
+ * Otherwise, do OCR/kill Adapter based on flag disableOnlineCtrlReset.
+ * 4. Start of the OCR, return all SCSI command back to CAM layer which has
+ * ccb_ptr.
+ * 5. Post OCR, Re-fire Managment command and move Controller to Operation
+ * state.
+ */
+int mrsas_reset_ctrl(struct mrsas_softc *sc)
+{
+ int retval = SUCCESS, i, j, retry = 0;
+ u_int32_t host_diag, abs_state, status_reg, reset_adapter;
+ union ccb *ccb;
+ struct mrsas_mfi_cmd *mfi_cmd;
+ struct mrsas_mpt_cmd *mpt_cmd;
+ MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+
+ if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
+ device_printf(sc->mrsas_dev,
+ "mrsas: Hardware critical error, returning FAIL.\n");
+ return FAIL;
+ }
+
+ set_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
+ sc->adprecovery = MRSAS_ADPRESET_SM_INFAULT;
+ mrsas_disable_intr(sc);
+ DELAY(1000 * 1000);
+
+ /* First try waiting for commands to complete */
+ if (mrsas_wait_for_outstanding(sc)) {
+ mrsas_dprint(sc, MRSAS_OCR,
+ "resetting adapter from %s.\n",
+ __func__);
+ /* Now return commands back to the CAM layer */
+ for (i = 0 ; i < sc->max_fw_cmds; i++) {
+ mpt_cmd = sc->mpt_cmd_list[i];
+ if (mpt_cmd->ccb_ptr) {
+ ccb = (union ccb *)(mpt_cmd->ccb_ptr);
+ ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
+ mrsas_cmd_done(sc, mpt_cmd);
+ atomic_dec(&sc->fw_outstanding);
+ }
+ }
+
+ status_reg = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
+ outbound_scratch_pad));
+ abs_state = status_reg & MFI_STATE_MASK;
+ reset_adapter = status_reg & MFI_RESET_ADAPTER;
+ if (sc->disableOnlineCtrlReset ||
+ (abs_state == MFI_STATE_FAULT && !reset_adapter)) {
+ /* Reset not supported, kill adapter */
+ mrsas_dprint(sc, MRSAS_OCR,"Reset not supported, killing adapter.\n");
+ mrsas_kill_hba(sc);
+ sc->adprecovery = MRSAS_HW_CRITICAL_ERROR;
+ retval = FAIL;
+ goto out;
+ }
+
+ /* Now try to reset the chip */
+ for (i = 0; i < MRSAS_FUSION_MAX_RESET_TRIES; i++) {
+ mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
+ MPI2_WRSEQ_FLUSH_KEY_VALUE);
+ mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
+ MPI2_WRSEQ_1ST_KEY_VALUE);
+ mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
+ MPI2_WRSEQ_2ND_KEY_VALUE);
+ mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
+ MPI2_WRSEQ_3RD_KEY_VALUE);
+ mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
+ MPI2_WRSEQ_4TH_KEY_VALUE);
+ mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
+ MPI2_WRSEQ_5TH_KEY_VALUE);
+ mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
+ MPI2_WRSEQ_6TH_KEY_VALUE);
+
+ /* Check that the diag write enable (DRWE) bit is on */
+ host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
+ fusion_host_diag));
+ retry = 0;
+ while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
+ DELAY(100 * 1000);
+ host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
+ fusion_host_diag));
+ if (retry++ == 100) {
+ mrsas_dprint(sc, MRSAS_OCR,
+ "Host diag unlock failed!\n");
+ break;
+ }
+ }
+ if (!(host_diag & HOST_DIAG_WRITE_ENABLE))
+ continue;
+
+ /* Send chip reset command */
+ mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag),
+ host_diag | HOST_DIAG_RESET_ADAPTER);
+ DELAY(3000 * 1000);
+
+ /* Make sure reset adapter bit is cleared */
+ host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
+ fusion_host_diag));
+ retry = 0;
+ while (host_diag & HOST_DIAG_RESET_ADAPTER) {
+ DELAY(100 * 1000);
+ host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
+ fusion_host_diag));
+ if (retry++ == 1000) {
+ mrsas_dprint(sc, MRSAS_OCR,
+ "Diag reset adapter never cleared!\n");
+ break;
+ }
+ }
+ if (host_diag & HOST_DIAG_RESET_ADAPTER)
+ continue;
+
+ abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
+ outbound_scratch_pad)) & MFI_STATE_MASK;
+ retry = 0;
+
+ while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
+ DELAY(100 * 1000);
+ abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
+ outbound_scratch_pad)) & MFI_STATE_MASK;
+ }
+ if (abs_state <= MFI_STATE_FW_INIT) {
+ mrsas_dprint(sc, MRSAS_OCR, "firmware state < MFI_STATE_FW_INIT,"
+ " state = 0x%x\n", abs_state);
+ continue;
+ }
+
+ /* Wait for FW to become ready */
+ if (mrsas_transition_to_ready(sc, 1)) {
+ mrsas_dprint(sc, MRSAS_OCR,
+ "mrsas: Failed to transition controller to ready.\n");
+ continue;
+ }
+
+ mrsas_reset_reply_desc(sc);
+ if (mrsas_ioc_init(sc)) {
+ mrsas_dprint(sc, MRSAS_OCR, "mrsas_ioc_init() failed!\n");
+ continue;
+ }
+
+ clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
+ mrsas_enable_intr(sc);
+ sc->adprecovery = MRSAS_HBA_OPERATIONAL;
+
+ /* Re-fire management commands */
+ for (j = 0 ; j < sc->max_fw_cmds; j++) {
+ mpt_cmd = sc->mpt_cmd_list[j];
+ if (mpt_cmd->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
+ mfi_cmd = sc->mfi_cmd_list[mpt_cmd->sync_cmd_idx];
+ if (mfi_cmd->frame->dcmd.opcode ==
+ MR_DCMD_LD_MAP_GET_INFO) {
+ mrsas_release_mfi_cmd(mfi_cmd);
+ mrsas_release_mpt_cmd(mpt_cmd);
+ } else {
+ req_desc = mrsas_get_request_desc(sc,
+ mfi_cmd->cmd_id.context.smid - 1);
+ mrsas_dprint(sc, MRSAS_OCR,
+ "Re-fire command DCMD opcode 0x%x index %d\n ",
+ mfi_cmd->frame->dcmd.opcode, j);
+ if (!req_desc)
+ device_printf(sc->mrsas_dev,
+ "Cannot build MPT cmd.\n");
+ else
+ mrsas_fire_cmd(sc, req_desc->addr.u.low,
+ req_desc->addr.u.high);
+ }
+ }
+ }
+
+ /* Reset load balance info */
+ memset(sc->load_balance_info, 0,
+ sizeof(LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES);
+
+ if (!mrsas_get_map_info(sc))
+ mrsas_sync_map_info(sc);
+
+ /* Adapter reset completed successfully */
+ device_printf(sc->mrsas_dev, "Reset successful\n");
+ retval = SUCCESS;
+ goto out;
+ }
+ /* Reset failed, kill the adapter */
+ device_printf(sc->mrsas_dev, "Reset failed, killing adapter.\n");
+ mrsas_kill_hba(sc);
+ retval = FAIL;
+ } else {
+ clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
+ mrsas_enable_intr(sc);
+ sc->adprecovery = MRSAS_HBA_OPERATIONAL;
+ }
+out:
+ clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
+ mrsas_dprint(sc, MRSAS_OCR,
+ "Reset Exit with %d.\n", retval);
+ return retval;
+}
+
+/**
+ * mrsas_kill_hba Kill HBA when OCR is not supported.
+ * input: Adapter Context.
+ *
+ * This function will kill HBA when OCR is not supported.
+ */
+void mrsas_kill_hba (struct mrsas_softc *sc)
+{
+ mrsas_dprint(sc, MRSAS_OCR, "%s\n", __func__);
+ mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
+ MFI_STOP_ADP);
+ /* Flush */
+ mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell));
+}
+
+/**
+ * mrsas_wait_for_outstanding Wait for outstanding commands
+ * input: Adapter Context.
+ *
+ * This function will wait for 180 seconds for outstanding
+ * commands to be completed.
+ */
+int mrsas_wait_for_outstanding(struct mrsas_softc *sc)
+{
+ int i, outstanding, retval = 0;
+ u_int32_t fw_state;
+
+ for (i = 0; i < MRSAS_RESET_WAIT_TIME; i++) {
+ if (sc->remove_in_progress) {
+ mrsas_dprint(sc, MRSAS_OCR,
+ "Driver remove or shutdown called.\n");
+ retval = 1;
+ goto out;
+ }
+ /* Check if firmware is in fault state */
+ fw_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
+ outbound_scratch_pad)) & MFI_STATE_MASK;
+ if (fw_state == MFI_STATE_FAULT) {
+ mrsas_dprint(sc, MRSAS_OCR,
+ "Found FW in FAULT state, will reset adapter.\n");
+ retval = 1;
+ goto out;
+ }
+ outstanding = atomic_read(&sc->fw_outstanding);
+ if (!outstanding)
+ goto out;
+
+ if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
+ mrsas_dprint(sc, MRSAS_OCR, "[%2d]waiting for %d "
+ "commands to complete\n",i,outstanding);
+ mrsas_complete_cmd(sc);
+ }
+ DELAY(1000 * 1000);
+ }
+
+ if (atomic_read(&sc->fw_outstanding)) {
+ mrsas_dprint(sc, MRSAS_OCR,
+ " pending commands remain after waiting,"
+ " will reset adapter.\n");
+ retval = 1;
+ }
+out:
+ return retval;
+}
+
+/**
+ * mrsas_release_mfi_cmd: Return a cmd to free command pool
+ * input: Command packet for return to free cmd pool
+ *
+ * This function returns the MFI command to the command list.
+ */
+void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd)
+{
+ struct mrsas_softc *sc = cmd->sc;
+
+ mtx_lock(&sc->mfi_cmd_pool_lock);
+ cmd->ccb_ptr = NULL;
+ cmd->cmd_id.frame_count = 0;
+ TAILQ_INSERT_TAIL(&(sc->mrsas_mfi_cmd_list_head), cmd, next);
+ mtx_unlock(&sc->mfi_cmd_pool_lock);
+
+ return;
+}
+
+/**
+ * mrsas_get_controller_info - Returns FW's controller structure
+ * input: Adapter soft state
+ * Controller information structure
+ *
+ * Issues an internal command (DCMD) to get the FW's controller structure.
+ * This information is mainly used to find out the maximum IO transfer per
+ * command supported by the FW.
+ */
+static int mrsas_get_ctrl_info(struct mrsas_softc *sc,
+ struct mrsas_ctrl_info *ctrl_info)
+{
+ int retcode = 0;
+ struct mrsas_mfi_cmd *cmd;
+ struct mrsas_dcmd_frame *dcmd;
+
+ cmd = mrsas_get_mfi_cmd(sc);
+
+ if (!cmd) {
+ device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
+ return -ENOMEM;
+ }
+ dcmd = &cmd->frame->dcmd;
+
+ if (mrsas_alloc_ctlr_info_cmd(sc) != SUCCESS) {
+ device_printf(sc->mrsas_dev, "Cannot allocate get ctlr info cmd\n");
+ mrsas_release_mfi_cmd(cmd);
+ return -ENOMEM;
+ }
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0xFF;
+ dcmd->sge_count = 1;
+ dcmd->flags = MFI_FRAME_DIR_READ;
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = sizeof(struct mrsas_ctrl_info);
+ dcmd->opcode = MR_DCMD_CTRL_GET_INFO;
+ dcmd->sgl.sge32[0].phys_addr = sc->ctlr_info_phys_addr;
+ dcmd->sgl.sge32[0].length = sizeof(struct mrsas_ctrl_info);
+
+ if (!mrsas_issue_polled(sc, cmd))
+ memcpy(ctrl_info, sc->ctlr_info_mem, sizeof(struct mrsas_ctrl_info));
+ else
+ retcode = 1;
+
+ mrsas_free_ctlr_info_cmd(sc);
+ mrsas_release_mfi_cmd(cmd);
+ return(retcode);
+}
+
+/**
+ * mrsas_alloc_ctlr_info_cmd: Allocates memory for controller info command
+ * input: Adapter soft state
+ *
+ * Allocates DMAable memory for the controller info internal command.
+ */
+int mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc)
+{
+ int ctlr_info_size;
+
+ /* Allocate get controller info command */
+ ctlr_info_size = sizeof(struct mrsas_ctrl_info);
+ if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
+ 1, 0, // algnmnt, boundary
+ BUS_SPACE_MAXADDR_32BIT,// lowaddr
+ BUS_SPACE_MAXADDR, // highaddr
+ NULL, NULL, // filter, filterarg
+ ctlr_info_size, // maxsize
+ 1, // msegments
+ ctlr_info_size, // maxsegsize
+ BUS_DMA_ALLOCNOW, // flags
+ NULL, NULL, // lockfunc, lockarg
+ &sc->ctlr_info_tag)) {
+ device_printf(sc->mrsas_dev, "Cannot allocate ctlr info tag\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamem_alloc(sc->ctlr_info_tag, (void **)&sc->ctlr_info_mem,
+ BUS_DMA_NOWAIT, &sc->ctlr_info_dmamap)) {
+ device_printf(sc->mrsas_dev, "Cannot allocate ctlr info cmd mem\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamap_load(sc->ctlr_info_tag, sc->ctlr_info_dmamap,
+ sc->ctlr_info_mem, ctlr_info_size, mrsas_addr_cb,
+ &sc->ctlr_info_phys_addr, BUS_DMA_NOWAIT)) {
+ device_printf(sc->mrsas_dev, "Cannot load ctlr info cmd mem\n");
+ return (ENOMEM);
+ }
+
+ memset(sc->ctlr_info_mem, 0, ctlr_info_size);
+ return (0);
+}
+
+/**
+ * mrsas_free_ctlr_info_cmd: Free memory for controller info command
+ * input: Adapter soft state
+ *
+ * Deallocates memory of the get controller info cmd.
+ */
+void mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc)
+{
+ if (sc->ctlr_info_phys_addr)
+ bus_dmamap_unload(sc->ctlr_info_tag, sc->ctlr_info_dmamap);
+ if (sc->ctlr_info_mem != NULL)
+ bus_dmamem_free(sc->ctlr_info_tag, sc->ctlr_info_mem, sc->ctlr_info_dmamap);
+ if (sc->ctlr_info_tag != NULL)
+ bus_dma_tag_destroy(sc->ctlr_info_tag);
+}
+
+/**
+ * mrsas_issue_polled: Issues a polling command
+ * inputs: Adapter soft state
+ * Command packet to be issued
+ *
+ * This function is for posting of internal commands to Firmware. MFI
+ * requires the cmd_status to be set to 0xFF before posting. The maximun
+ * wait time of the poll response timer is 180 seconds.
+ */
+int mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
+{
+ struct mrsas_header *frame_hdr = &cmd->frame->hdr;
+ u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
+ int i, retcode = 0;
+
+ frame_hdr->cmd_status = 0xFF;
+ frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
+
+ /* Issue the frame using inbound queue port */
+ if (mrsas_issue_dcmd(sc, cmd)) {
+ device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
+ return(1);
+ }
+
+ /*
+ * Poll response timer to wait for Firmware response. While this
+ * timer with the DELAY call could block CPU, the time interval for
+ * this is only 1 millisecond.
+ */
+ if (frame_hdr->cmd_status == 0xFF) {
+ for (i=0; i < (max_wait * 1000); i++){
+ if (frame_hdr->cmd_status == 0xFF)
+ DELAY(1000);
+ else
+ break;
+ }
+ }
+ if (frame_hdr->cmd_status != 0)
+ {
+ if (frame_hdr->cmd_status == 0xFF)
+ device_printf(sc->mrsas_dev, "DCMD timed out after %d seconds.\n", max_wait);
+ else
+ device_printf(sc->mrsas_dev, "DCMD failed, status = 0x%x\n", frame_hdr->cmd_status);
+ retcode = 1;
+ }
+ return(retcode);
+}
+
+/**
+ * mrsas_issue_dcmd - Issues a MFI Pass thru cmd
+ * input: Adapter soft state
+ * mfi cmd pointer
+ *
+ * This function is called by mrsas_issued_blocked_cmd() and
+ * mrsas_issued_polled(), to build the MPT command and then fire the
+ * command to Firmware.
+ */
+int
+mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
+{
+ MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+
+ req_desc = mrsas_build_mpt_cmd(sc, cmd);
+ if (!req_desc) {
+ device_printf(sc->mrsas_dev, "Cannot build MPT cmd.\n");
+ return(1);
+ }
+
+ mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high);
+
+ return(0);
+}
+
+/**
+ * mrsas_build_mpt_cmd - Calls helper function to build Passthru cmd
+ * input: Adapter soft state
+ * mfi cmd to build
+ *
+ * This function is called by mrsas_issue_cmd() to build the MPT-MFI
+ * passthru command and prepares the MPT command to send to Firmware.
+ */
+MRSAS_REQUEST_DESCRIPTOR_UNION *
+mrsas_build_mpt_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
+{
+ MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+ u_int16_t index;
+
+ if (mrsas_build_mptmfi_passthru(sc, cmd)) {
+ device_printf(sc->mrsas_dev, "Cannot build MPT-MFI passthru cmd.\n");
+ return NULL;
+ }
+
+ index = cmd->cmd_id.context.smid;
+
+ req_desc = mrsas_get_request_desc(sc, index-1);
+ if(!req_desc)
+ return NULL;
+
+ req_desc->addr.Words = 0;
+ req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+
+ req_desc->SCSIIO.SMID = index;
+
+ return(req_desc);
+}
+
+/**
+ * mrsas_build_mptmfi_passthru - Builds a MPT MFI Passthru command
+ * input: Adapter soft state
+ * mfi cmd pointer
+ *
+ * The MPT command and the io_request are setup as a passthru command.
+ * The SGE chain address is set to frame_phys_addr of the MFI command.
+ */
+u_int8_t
+mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *mfi_cmd)
+{
+ MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
+ PTR_MRSAS_RAID_SCSI_IO_REQUEST io_req;
+ struct mrsas_mpt_cmd *mpt_cmd;
+ struct mrsas_header *frame_hdr = &mfi_cmd->frame->hdr;
+
+ mpt_cmd = mrsas_get_mpt_cmd(sc);
+ if (!mpt_cmd)
+ return(1);
+
+ /* Save the smid. To be used for returning the cmd */
+ mfi_cmd->cmd_id.context.smid = mpt_cmd->index;
+
+ mpt_cmd->sync_cmd_idx = mfi_cmd->index;
+
+ /*
+ * For cmds where the flag is set, store the flag and check
+ * on completion. For cmds with this flag, don't call
+ * mrsas_complete_cmd.
+ */
+
+ if (frame_hdr->flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE)
+ mpt_cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
+
+ io_req = mpt_cmd->io_request;
+
+ if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
+ pMpi25IeeeSgeChain64_t sgl_ptr_end = (pMpi25IeeeSgeChain64_t) &io_req->SGL;
+ sgl_ptr_end += sc->max_sge_in_main_msg - 1;
+ sgl_ptr_end->Flags = 0;
+ }
+
+ mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL.IeeeChain;
+
+ io_req->Function = MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
+ io_req->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4;
+ io_req->ChainOffset = sc->chain_offset_mfi_pthru;
+
+ mpi25_ieee_chain->Address = mfi_cmd->frame_phys_addr;
+
+ mpi25_ieee_chain->Flags= IEEE_SGE_FLAGS_CHAIN_ELEMENT |
+ MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
+
+ mpi25_ieee_chain->Length = MRSAS_MAX_SZ_CHAIN_FRAME;
+
+ return(0);
+}
+
+/**
+ * mrsas_issue_blocked_cmd - Synchronous wrapper around regular FW cmds
+ * input: Adapter soft state
+ * Command to be issued
+ *
+ * This function waits on an event for the command to be returned
+ * from the ISR. Max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME secs.
+ * Used for issuing internal and ioctl commands.
+ */
+int mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
+{
+ u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
+ unsigned long total_time = 0;
+ int retcode = 0;
+
+ /* Initialize cmd_status */
+ cmd->cmd_status = ECONNREFUSED;
+
+ /* Build MPT-MFI command for issue to FW */
+ if (mrsas_issue_dcmd(sc, cmd)){
+ device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
+ return(1);
+ }
+
+ sc->chan = (void*)&cmd;
+
+ /* The following is for debug only... */
+ //device_printf(sc->mrsas_dev,"DCMD issued to FW, about to sleep-wait...\n");
+ //device_printf(sc->mrsas_dev,"sc->chan = %p\n", sc->chan);
+
+ while (1) {
+ if (cmd->cmd_status == ECONNREFUSED){
+ tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
+ }
+ else
+ break;
+ total_time++;
+ if (total_time >= max_wait) {
+ device_printf(sc->mrsas_dev, "Internal command timed out after %d seconds.\n", max_wait);
+ retcode = 1;
+ break;
+ }
+ }
+ return(retcode);
+}
+
+/**
+ * mrsas_complete_mptmfi_passthru - Completes a command
+ * input: sc: Adapter soft state
+ * cmd: Command to be completed
+ * status: cmd completion status
+ *
+ * This function is called from mrsas_complete_cmd() after an interrupt
+ * is received from Firmware, and io_request->Function is
+ * MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST.
+ */
+void
+mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd,
+ u_int8_t status)
+{
+ struct mrsas_header *hdr = &cmd->frame->hdr;
+ u_int8_t cmd_status = cmd->frame->hdr.cmd_status;
+
+ /* Reset the retry counter for future re-tries */
+ cmd->retry_for_fw_reset = 0;
+
+ if (cmd->ccb_ptr)
+ cmd->ccb_ptr = NULL;
+
+ switch (hdr->cmd) {
+ case MFI_CMD_INVALID:
+ device_printf(sc->mrsas_dev, "MFI_CMD_INVALID command.\n");
+ break;
+ case MFI_CMD_PD_SCSI_IO:
+ case MFI_CMD_LD_SCSI_IO:
+ /*
+ * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
+ * issued either through an IO path or an IOCTL path. If it
+ * was via IOCTL, we will send it to internal completion.
+ */
+ if (cmd->sync_cmd) {
+ cmd->sync_cmd = 0;
+ mrsas_wakeup(sc, cmd);
+ break;
+ }
+ case MFI_CMD_SMP:
+ case MFI_CMD_STP:
+ case MFI_CMD_DCMD:
+ /* Check for LD map update */
+ if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) &&
+ (cmd->frame->dcmd.mbox.b[1] == 1)) {
+ sc->fast_path_io = 0;
+ mtx_lock(&sc->raidmap_lock);
+ if (cmd_status != 0) {
+ if (cmd_status != MFI_STAT_NOT_FOUND)
+ device_printf(sc->mrsas_dev, "map sync failed, status=%x\n",cmd_status);
+ else {
+ mrsas_release_mfi_cmd(cmd);
+ mtx_unlock(&sc->raidmap_lock);
+ break;
+ }
+ }
+ else
+ sc->map_id++;
+ mrsas_release_mfi_cmd(cmd);
+ if (MR_ValidateMapInfo(sc))
+ sc->fast_path_io = 0;
+ else
+ sc->fast_path_io = 1;
+ mrsas_sync_map_info(sc);
+ mtx_unlock(&sc->raidmap_lock);
+ break;
+ }
+#if 0 //currently not supporting event handling, so commenting out
+ if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
+ cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) {
+ mrsas_poll_wait_aen = 0;
+ }
+#endif
+ /* See if got an event notification */
+ if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT)
+ mrsas_complete_aen(sc, cmd);
+ else
+ mrsas_wakeup(sc, cmd);
+ break;
+ case MFI_CMD_ABORT:
+ /* Command issued to abort another cmd return */
+ mrsas_complete_abort(sc, cmd);
+ break;
+ default:
+ device_printf(sc->mrsas_dev,"Unknown command completed! [0x%X]\n", hdr->cmd);
+ break;
+ }
+}
+
+/**
+ * mrsas_wakeup - Completes an internal command
+ * input: Adapter soft state
+ * Command to be completed
+ *
+ * In mrsas_issue_blocked_cmd(), after a command is issued to Firmware,
+ * a wait timer is started. This function is called from
+ * mrsas_complete_mptmfi_passthru() as it completes the command,
+ * to wake up from the command wait.
+ */
+void mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
+{
+ cmd->cmd_status = cmd->frame->io.cmd_status;
+
+ if (cmd->cmd_status == ECONNREFUSED)
+ cmd->cmd_status = 0;
+
+ /* For debug only ... */
+ //device_printf(sc->mrsas_dev,"DCMD rec'd for wakeup, sc->chan=%p\n", sc->chan);
+
+ sc->chan = (void*)&cmd;
+ wakeup_one((void *)&sc->chan);
+ return;
+}
+
+/**
+ * mrsas_shutdown_ctlr: Instructs FW to shutdown the controller
+ * input: Adapter soft state
+ * Shutdown/Hibernate
+ *
+ * This function issues a DCMD internal command to Firmware to initiate
+ * shutdown of the controller.
+ */
+static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode)
+{
+ struct mrsas_mfi_cmd *cmd;
+ struct mrsas_dcmd_frame *dcmd;
+
+ if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
+ return;
+
+ cmd = mrsas_get_mfi_cmd(sc);
+ if (!cmd) {
+ device_printf(sc->mrsas_dev,"Cannot allocate for shutdown cmd.\n");
+ return;
+ }
+
+ if (sc->aen_cmd)
+ mrsas_issue_blocked_abort_cmd(sc, sc->aen_cmd);
+
+ if (sc->map_update_cmd)
+ mrsas_issue_blocked_abort_cmd(sc, sc->map_update_cmd);
+
+ dcmd = &cmd->frame->dcmd;
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0x0;
+ dcmd->sge_count = 0;
+ dcmd->flags = MFI_FRAME_DIR_NONE;
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = 0;
+ dcmd->opcode = opcode;
+
+ device_printf(sc->mrsas_dev,"Preparing to shut down controller.\n");
+
+ mrsas_issue_blocked_cmd(sc, cmd);
+ mrsas_release_mfi_cmd(cmd);
+
+ return;
+}
+
+/**
+ * mrsas_flush_cache: Requests FW to flush all its caches
+ * input: Adapter soft state
+ *
+ * This function is issues a DCMD internal command to Firmware to initiate
+ * flushing of all caches.
+ */
+static void mrsas_flush_cache(struct mrsas_softc *sc)
+{
+ struct mrsas_mfi_cmd *cmd;
+ struct mrsas_dcmd_frame *dcmd;
+
+ if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
+ return;
+
+ cmd = mrsas_get_mfi_cmd(sc);
+ if (!cmd) {
+ device_printf(sc->mrsas_dev,"Cannot allocate for flush cache cmd.\n");
+ return;
+ }
+
+ dcmd = &cmd->frame->dcmd;
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0x0;
+ dcmd->sge_count = 0;
+ dcmd->flags = MFI_FRAME_DIR_NONE;
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = 0;
+ dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH;
+ dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
+
+ mrsas_issue_blocked_cmd(sc, cmd);
+ mrsas_release_mfi_cmd(cmd);
+
+ return;
+}
+
+/**
+ * mrsas_get_map_info: Load and validate RAID map
+ * input: Adapter instance soft state
+ *
+ * This function calls mrsas_get_ld_map_info() and MR_ValidateMapInfo()
+ * to load and validate RAID map. It returns 0 if successful, 1 other-
+ * wise.
+ */
+static int mrsas_get_map_info(struct mrsas_softc *sc)
+{
+ uint8_t retcode = 0;
+
+ sc->fast_path_io = 0;
+ if (!mrsas_get_ld_map_info(sc)) {
+ retcode = MR_ValidateMapInfo(sc);
+ if (retcode == 0) {
+ sc->fast_path_io = 1;
+ return 0;
+ }
+ }
+ return 1;
+}
+
+/**
+ * mrsas_get_ld_map_info: Get FW's ld_map structure
+ * input: Adapter instance soft state
+ *
+ * Issues an internal command (DCMD) to get the FW's controller PD
+ * list structure.
+ */
+static int mrsas_get_ld_map_info(struct mrsas_softc *sc)
+{
+ int retcode = 0;
+ struct mrsas_mfi_cmd *cmd;
+ struct mrsas_dcmd_frame *dcmd;
+ MR_FW_RAID_MAP_ALL *map;
+ bus_addr_t map_phys_addr = 0;
+
+ cmd = mrsas_get_mfi_cmd(sc);
+ if (!cmd) {
+ device_printf(sc->mrsas_dev, "Cannot alloc for ld map info cmd.\n");
+ return 1;
+ }
+
+ dcmd = &cmd->frame->dcmd;
+
+ map = sc->raidmap_mem[(sc->map_id & 1)];
+ map_phys_addr = sc->raidmap_phys_addr[(sc->map_id & 1)];
+ if (!map) {
+ device_printf(sc->mrsas_dev, "Failed to alloc mem for ld map info.\n");
+ mrsas_release_mfi_cmd(cmd);
+ return (ENOMEM);
+ }
+ memset(map, 0, sizeof(*map));
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0xFF;
+ dcmd->sge_count = 1;
+ dcmd->flags = MFI_FRAME_DIR_READ;
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = sc->map_sz;
+ dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
+ dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
+ dcmd->sgl.sge32[0].length = sc->map_sz;
+ if (!mrsas_issue_polled(sc, cmd))
+ retcode = 0;
+ else
+ {
+ device_printf(sc->mrsas_dev, "Fail to send get LD map info cmd.\n");
+ retcode = 1;
+ }
+ mrsas_release_mfi_cmd(cmd);
+ return(retcode);
+}
+
+/**
+ * mrsas_sync_map_info: Get FW's ld_map structure
+ * input: Adapter instance soft state
+ *
+ * Issues an internal command (DCMD) to get the FW's controller PD
+ * list structure.
+ */
+static int mrsas_sync_map_info(struct mrsas_softc *sc)
+{
+ int retcode = 0, i;
+ struct mrsas_mfi_cmd *cmd;
+ struct mrsas_dcmd_frame *dcmd;
+ uint32_t size_sync_info, num_lds;
+ MR_LD_TARGET_SYNC *target_map = NULL;
+ MR_FW_RAID_MAP_ALL *map;
+ MR_LD_RAID *raid;
+ MR_LD_TARGET_SYNC *ld_sync;
+ bus_addr_t map_phys_addr = 0;
+
+ cmd = mrsas_get_mfi_cmd(sc);
+ if (!cmd) {
+ device_printf(sc->mrsas_dev, "Cannot alloc for sync map info cmd\n");
+ return 1;
+ }
+
+ map = sc->raidmap_mem[sc->map_id & 1];
+ num_lds = map->raidMap.ldCount;
+
+ dcmd = &cmd->frame->dcmd;
+ size_sync_info = sizeof(MR_LD_TARGET_SYNC) * num_lds;
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ target_map = (MR_LD_TARGET_SYNC *)sc->raidmap_mem[(sc->map_id - 1) & 1];
+ memset(target_map, 0, sizeof(MR_FW_RAID_MAP_ALL));
+
+ map_phys_addr = sc->raidmap_phys_addr[(sc->map_id - 1) & 1];
+
+ ld_sync = (MR_LD_TARGET_SYNC *)target_map;
+
+ for (i = 0; i < num_lds; i++, ld_sync++) {
+ raid = MR_LdRaidGet(i, map);
+ ld_sync->targetId = MR_GetLDTgtId(i, map);
+ ld_sync->seqNum = raid->seqNum;
+ }
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0xFF;
+ dcmd->sge_count = 1;
+ dcmd->flags = MFI_FRAME_DIR_WRITE;
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = sc->map_sz;
+ dcmd->mbox.b[0] = num_lds;
+ dcmd->mbox.b[1] = MRSAS_DCMD_MBOX_PEND_FLAG;
+ dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
+ dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
+ dcmd->sgl.sge32[0].length = sc->map_sz;
+
+ sc->map_update_cmd = cmd;
+ if (mrsas_issue_dcmd(sc, cmd)) {
+ device_printf(sc->mrsas_dev, "Fail to send sync map info command.\n");
+ return(1);
+ }
+ return(retcode);
+}
+
+/**
+ * mrsas_get_pd_list: Returns FW's PD list structure
+ * input: Adapter soft state
+ *
+ * Issues an internal command (DCMD) to get the FW's controller PD
+ * list structure. This information is mainly used to find out about
+ * system supported by Firmware.
+ */
+static int mrsas_get_pd_list(struct mrsas_softc *sc)
+{
+ int retcode = 0, pd_index = 0, pd_count=0, pd_list_size;
+ struct mrsas_mfi_cmd *cmd;
+ struct mrsas_dcmd_frame *dcmd;
+ struct MR_PD_LIST *pd_list_mem;
+ struct MR_PD_ADDRESS *pd_addr;
+ bus_addr_t pd_list_phys_addr = 0;
+ struct mrsas_tmp_dcmd *tcmd;
+
+ cmd = mrsas_get_mfi_cmd(sc);
+ if (!cmd) {
+ device_printf(sc->mrsas_dev, "Cannot alloc for get PD list cmd\n");
+ return 1;
+ }
+
+ dcmd = &cmd->frame->dcmd;
+
+ tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
+ pd_list_size = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
+ if (mrsas_alloc_tmp_dcmd(sc, tcmd, pd_list_size) != SUCCESS) {
+ device_printf(sc->mrsas_dev, "Cannot alloc dmamap for get PD list cmd\n");
+ mrsas_release_mfi_cmd(cmd);
+ return(ENOMEM);
+ }
+ else {
+ pd_list_mem = tcmd->tmp_dcmd_mem;
+ pd_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
+ }
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
+ dcmd->mbox.b[1] = 0;
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0xFF;
+ dcmd->sge_count = 1;
+ dcmd->flags = MFI_FRAME_DIR_READ;
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
+ dcmd->opcode = MR_DCMD_PD_LIST_QUERY;
+ dcmd->sgl.sge32[0].phys_addr = pd_list_phys_addr;
+ dcmd->sgl.sge32[0].length = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
+
+ if (!mrsas_issue_polled(sc, cmd))
+ retcode = 0;
+ else
+ retcode = 1;
+
+ /* Get the instance PD list */
+ pd_count = MRSAS_MAX_PD;
+ pd_addr = pd_list_mem->addr;
+ if (retcode == 0 && pd_list_mem->count < pd_count) {
+ memset(sc->local_pd_list, 0, MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
+ for (pd_index = 0; pd_index < pd_list_mem->count; pd_index++) {
+ sc->local_pd_list[pd_addr->deviceId].tid = pd_addr->deviceId;
+ sc->local_pd_list[pd_addr->deviceId].driveType = pd_addr->scsiDevType;
+ sc->local_pd_list[pd_addr->deviceId].driveState = MR_PD_STATE_SYSTEM;
+ pd_addr++;
+ }
+ }
+
+ /* Use mutext/spinlock if pd_list component size increase more than 32 bit. */
+ memcpy(sc->pd_list, sc->local_pd_list, sizeof(sc->local_pd_list));
+ mrsas_free_tmp_dcmd(tcmd);
+ mrsas_release_mfi_cmd(cmd);
+ free(tcmd, M_MRSAS);
+ return(retcode);
+}
+
+/**
+ * mrsas_get_ld_list: Returns FW's LD list structure
+ * input: Adapter soft state
+ *
+ * Issues an internal command (DCMD) to get the FW's controller PD
+ * list structure. This information is mainly used to find out about
+ * supported by the FW.
+ */
+static int mrsas_get_ld_list(struct mrsas_softc *sc)
+{
+ int ld_list_size, retcode = 0, ld_index = 0, ids = 0;
+ struct mrsas_mfi_cmd *cmd;
+ struct mrsas_dcmd_frame *dcmd;
+ struct MR_LD_LIST *ld_list_mem;
+ bus_addr_t ld_list_phys_addr = 0;
+ struct mrsas_tmp_dcmd *tcmd;
+
+ cmd = mrsas_get_mfi_cmd(sc);
+ if (!cmd) {
+ device_printf(sc->mrsas_dev, "Cannot alloc for get LD list cmd\n");
+ return 1;
+ }
+
+ dcmd = &cmd->frame->dcmd;
+
+ tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
+ ld_list_size = sizeof(struct MR_LD_LIST);
+ if (mrsas_alloc_tmp_dcmd(sc, tcmd, ld_list_size) != SUCCESS) {
+ device_printf(sc->mrsas_dev, "Cannot alloc dmamap for get LD list cmd\n");
+ mrsas_release_mfi_cmd(cmd);
+ return(ENOMEM);
+ }
+ else {
+ ld_list_mem = tcmd->tmp_dcmd_mem;
+ ld_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
+ }
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0xFF;
+ dcmd->sge_count = 1;
+ dcmd->flags = MFI_FRAME_DIR_READ;
+ dcmd->timeout = 0;
+ dcmd->data_xfer_len = sizeof(struct MR_LD_LIST);
+ dcmd->opcode = MR_DCMD_LD_GET_LIST;
+ dcmd->sgl.sge32[0].phys_addr = ld_list_phys_addr;
+ dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST);
+ dcmd->pad_0 = 0;
+
+ if (!mrsas_issue_polled(sc, cmd))
+ retcode = 0;
+ else
+ retcode = 1;
+
+ /* Get the instance LD list */
+ if ((retcode == 0) && (ld_list_mem->ldCount <= (MAX_LOGICAL_DRIVES))){
+ sc->CurLdCount = ld_list_mem->ldCount;
+ memset(sc->ld_ids, 0xff, MRSAS_MAX_LD);
+ for (ld_index = 0; ld_index < ld_list_mem->ldCount; ld_index++) {
+ if (ld_list_mem->ldList[ld_index].state != 0) {
+ ids = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
+ sc->ld_ids[ids] = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
+ }
+ }
+ }
+
+ mrsas_free_tmp_dcmd(tcmd);
+ mrsas_release_mfi_cmd(cmd);
+ free(tcmd, M_MRSAS);
+ return(retcode);
+}
+
+/**
+ * mrsas_alloc_tmp_dcmd: Allocates memory for temporary command
+ * input: Adapter soft state
+ * Temp command
+ * Size of alloction
+ *
+ * Allocates DMAable memory for a temporary internal command. The allocated
+ * memory is initialized to all zeros upon successful loading of the dma
+ * mapped memory.
+ */
+int mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd,
+ int size)
+{
+ if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
+ 1, 0, // algnmnt, boundary
+ BUS_SPACE_MAXADDR_32BIT,// lowaddr
+ BUS_SPACE_MAXADDR, // highaddr
+ NULL, NULL, // filter, filterarg
+ size, // maxsize
+ 1, // msegments
+ size, // maxsegsize
+ BUS_DMA_ALLOCNOW, // flags
+ NULL, NULL, // lockfunc, lockarg
+ &tcmd->tmp_dcmd_tag)) {
+ device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd tag\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamem_alloc(tcmd->tmp_dcmd_tag, (void **)&tcmd->tmp_dcmd_mem,
+ BUS_DMA_NOWAIT, &tcmd->tmp_dcmd_dmamap)) {
+ device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd mem\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamap_load(tcmd->tmp_dcmd_tag, tcmd->tmp_dcmd_dmamap,
+ tcmd->tmp_dcmd_mem, size, mrsas_addr_cb,
+ &tcmd->tmp_dcmd_phys_addr, BUS_DMA_NOWAIT)) {
+ device_printf(sc->mrsas_dev, "Cannot load tmp dcmd mem\n");
+ return (ENOMEM);
+ }
+
+ memset(tcmd->tmp_dcmd_mem, 0, size);
+ return (0);
+}
+
+/**
+ * mrsas_free_tmp_dcmd: Free memory for temporary command
+ * input: temporary dcmd pointer
+ *
+ * Deallocates memory of the temporary command for use in the construction
+ * of the internal DCMD.
+ */
+void mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp)
+{
+ if (tmp->tmp_dcmd_phys_addr)
+ bus_dmamap_unload(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_dmamap);
+ if (tmp->tmp_dcmd_mem != NULL)
+ bus_dmamem_free(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_mem, tmp->tmp_dcmd_dmamap);
+ if (tmp->tmp_dcmd_tag != NULL)
+ bus_dma_tag_destroy(tmp->tmp_dcmd_tag);
+}
+
+/**
+ * mrsas_issue_blocked_abort_cmd: Aborts previously issued cmd
+ * input: Adapter soft state
+ * Previously issued cmd to be aborted
+ *
+ * This function is used to abort previously issued commands, such as AEN and
+ * RAID map sync map commands. The abort command is sent as a DCMD internal
+ * command and subsequently the driver will wait for a return status. The
+ * max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME seconds.
+ */
+static int mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
+ struct mrsas_mfi_cmd *cmd_to_abort)
+{
+ struct mrsas_mfi_cmd *cmd;
+ struct mrsas_abort_frame *abort_fr;
+ u_int8_t retcode = 0;
+ unsigned long total_time = 0;
+ u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
+
+ cmd = mrsas_get_mfi_cmd(sc);
+ if (!cmd) {
+ device_printf(sc->mrsas_dev, "Cannot alloc for abort cmd\n");
+ return(1);
+ }
+
+ abort_fr = &cmd->frame->abort;
+
+ /* Prepare and issue the abort frame */
+ abort_fr->cmd = MFI_CMD_ABORT;
+ abort_fr->cmd_status = 0xFF;
+ abort_fr->flags = 0;
+ abort_fr->abort_context = cmd_to_abort->index;
+ abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr;
+ abort_fr->abort_mfi_phys_addr_hi = 0;
+
+ cmd->sync_cmd = 1;
+ cmd->cmd_status = 0xFF;
+
+ if (mrsas_issue_dcmd(sc, cmd)) {
+ device_printf(sc->mrsas_dev, "Fail to send abort command.\n");
+ return(1);
+ }
+
+ /* Wait for this cmd to complete */
+ sc->chan = (void*)&cmd;
+ while (1) {
+ if (cmd->cmd_status == 0xFF){
+ tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
+ }
+ else
+ break;
+ total_time++;
+ if (total_time >= max_wait) {
+ device_printf(sc->mrsas_dev, "Abort cmd timed out after %d sec.\n", max_wait);
+ retcode = 1;
+ break;
+ }
+ }
+
+ cmd->sync_cmd = 0;
+ mrsas_release_mfi_cmd(cmd);
+ return(retcode);
+}
+
+/**
+ * mrsas_complete_abort: Completes aborting a command
+ * input: Adapter soft state
+ * Cmd that was issued to abort another cmd
+ *
+ * The mrsas_issue_blocked_abort_cmd() function waits for the command status
+ * to change after sending the command. This function is called from
+ * mrsas_complete_mptmfi_passthru() to wake up the sleep thread associated.
+ */
+void mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
+{
+ if (cmd->sync_cmd) {
+ cmd->sync_cmd = 0;
+ cmd->cmd_status = 0;
+ sc->chan = (void*)&cmd;
+ wakeup_one((void *)&sc->chan);
+ }
+ return;
+}
+
+/**
+ * mrsas_aen_handler: Callback function for AEN processing from thread context.
+ * input: Adapter soft state
+ *
+ */
+void mrsas_aen_handler(struct mrsas_softc *sc)
+{
+ union mrsas_evt_class_locale class_locale;
+ int doscan = 0;
+ u_int32_t seq_num;
+ int error;
+
+ if (!sc) {
+ device_printf(sc->mrsas_dev, "invalid instance!\n");
+ return;
+ }
+
+ if (sc->evt_detail_mem) {
+ switch (sc->evt_detail_mem->code) {
+ case MR_EVT_PD_INSERTED:
+ mrsas_get_pd_list(sc);
+ mrsas_bus_scan_sim(sc, sc->sim_1);
+ doscan = 0;
+ break;
+ case MR_EVT_PD_REMOVED:
+ mrsas_get_pd_list(sc);
+ mrsas_bus_scan_sim(sc, sc->sim_1);
+ doscan = 0;
+ break;
+ case MR_EVT_LD_OFFLINE:
+ case MR_EVT_CFG_CLEARED:
+ case MR_EVT_LD_DELETED:
+ mrsas_bus_scan_sim(sc, sc->sim_0);
+ doscan = 0;
+ break;
+ case MR_EVT_LD_CREATED:
+ mrsas_get_ld_list(sc);
+ mrsas_bus_scan_sim(sc, sc->sim_0);
+ doscan = 0;
+ break;
+ case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
+ case MR_EVT_FOREIGN_CFG_IMPORTED:
+ case MR_EVT_LD_STATE_CHANGE:
+ doscan = 1;
+ break;
+ default:
+ doscan = 0;
+ break;
+ }
+ } else {
+ device_printf(sc->mrsas_dev, "invalid evt_detail\n");
+ return;
+ }
+ if (doscan) {
+ mrsas_get_pd_list(sc);
+ mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 1\n");
+ mrsas_bus_scan_sim(sc, sc->sim_1);
+ mrsas_get_ld_list(sc);
+ mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 0\n");
+ mrsas_bus_scan_sim(sc, sc->sim_0);
+ }
+
+ seq_num = sc->evt_detail_mem->seq_num + 1;
+
+ // Register AEN with FW for latest sequence number plus 1
+ class_locale.members.reserved = 0;
+ class_locale.members.locale = MR_EVT_LOCALE_ALL;
+ class_locale.members.class = MR_EVT_CLASS_DEBUG;
+
+ if (sc->aen_cmd != NULL )
+ return ;
+
+ mtx_lock(&sc->aen_lock);
+ error = mrsas_register_aen(sc, seq_num,
+ class_locale.word);
+ mtx_unlock(&sc->aen_lock);
+
+ if (error)
+ device_printf(sc->mrsas_dev, "register aen failed error %x\n", error);
+
+}
+
+
+/**
+ * mrsas_complete_aen: Completes AEN command
+ * input: Adapter soft state
+ * Cmd that was issued to abort another cmd
+ *
+ * This function will be called from ISR and will continue
+ * event processing from thread context by enqueuing task
+ * in ev_tq (callback function "mrsas_aen_handler").
+ */
+void mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
+{
+ /*
+ * Don't signal app if it is just an aborted previously registered aen
+ */
+ if ((!cmd->abort_aen) && (sc->remove_in_progress == 0)) {
+ /* TO DO (?) */
+ }
+ else
+ cmd->abort_aen = 0;
+
+ sc->aen_cmd = NULL;
+ mrsas_release_mfi_cmd(cmd);
+
+ if (!sc->remove_in_progress)
+ taskqueue_enqueue(sc->ev_tq, &sc->ev_task);
+
+ return;
+}
+
+static device_method_t mrsas_methods[] = {
+ DEVMETHOD(device_probe, mrsas_probe),
+ DEVMETHOD(device_attach, mrsas_attach),
+ DEVMETHOD(device_detach, mrsas_detach),
+ DEVMETHOD(device_suspend, mrsas_suspend),
+ DEVMETHOD(device_resume, mrsas_resume),
+ DEVMETHOD(bus_print_child, bus_generic_print_child),
+ DEVMETHOD(bus_driver_added, bus_generic_driver_added),
+ { 0, 0 }
+};
+
+static driver_t mrsas_driver = {
+ "mrsas",
+ mrsas_methods,
+ sizeof(struct mrsas_softc)
+};
+
+static devclass_t mrsas_devclass;
+DRIVER_MODULE(mrsas, pci, mrsas_driver, mrsas_devclass, 0, 0);
+MODULE_DEPEND(mrsas, cam, 1,1,1);
+
diff --git a/sys/dev/mrsas/mrsas.h b/sys/dev/mrsas/mrsas.h
new file mode 100644
index 0000000..6ec7891
--- /dev/null
+++ b/sys/dev/mrsas/mrsas.h
@@ -0,0 +1,2464 @@
+/*
+ * Copyright (c) 2014, LSI Corp.
+ * All rights reserved.
+ * Authors: Marian Choy
+ * Support: freebsdraid@lsi.com
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * 3. Neither the name of the <ORGANIZATION> nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation
+ * are those of the authors and should not be interpreted as representing
+ * official policies,either expressed or implied, of the FreeBSD Project.
+ *
+ * Send feedback to: <megaraidfbsd@lsi.com>
+ * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035
+ * ATTN: MegaRaid FreeBSD
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#ifndef MRSAS_H
+#define MRSAS_H
+
+#include <sys/param.h> /* defines used in kernel.h */
+#include <sys/module.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/errno.h>
+#include <sys/kernel.h> /* types used in module initialization */
+#include <sys/conf.h> /* cdevsw struct */
+#include <sys/uio.h> /* uio struct */
+#include <sys/malloc.h>
+#include <sys/bus.h> /* structs, prototypes for pci bus stuff */
+
+#include <machine/bus.h>
+#include <sys/rman.h>
+#include <machine/resource.h>
+#include <machine/atomic.h>
+
+#include <dev/pci/pcivar.h> /* For pci_get macros! */
+#include <dev/pci/pcireg.h>
+
+#include <sys/types.h>
+#include <sys/sysctl.h>
+#include <sys/stat.h>
+#include <sys/taskqueue.h>
+#include <sys/poll.h>
+#include <sys/selinfo.h>
+
+/*
+ * Device IDs and PCI
+ */
+#define MRSAS_TBOLT 0x005b
+#define MRSAS_INVADER 0x005d
+#define MRSAS_FURY 0x005f
+#define MRSAS_PCI_BAR0 0x10
+#define MRSAS_PCI_BAR1 0x14
+#define MRSAS_PCI_BAR2 0x1C
+
+/*
+ * Firmware State Defines
+ */
+#define MRSAS_FWSTATE_MAXCMD_MASK 0x0000FFFF
+#define MRSAS_FWSTATE_SGE_MASK 0x00FF0000
+#define MRSAS_FW_STATE_CHNG_INTERRUPT 1
+
+/*
+ * Message Frame Defines
+ */
+#define MRSAS_SENSE_LEN 96
+#define MRSAS_FUSION_MAX_RESET_TRIES 3
+
+/*
+ * Miscellaneous Defines
+ */
+#define BYTE_ALIGNMENT 1
+#define MRSAS_MAX_NAME_LENGTH 32
+#define MRSAS_VERSION "06.704.01.00-fbsd"
+#define MRSAS_ULONG_MAX 0xFFFFFFFFFFFFFFFF
+#define MRSAS_DEFAULT_TIMEOUT 0x14 //temp
+#define DONE 0
+#define MRSAS_PAGE_SIZE 4096
+#define MRSAS_RESET_NOTICE_INTERVAL 5
+#define MRSAS_IO_TIMEOUT 180000 /* 180 second timeout */
+#define MRSAS_LDIO_QUEUE_DEPTH 70 /* 70 percent as default */
+#define THRESHOLD_REPLY_COUNT 50
+
+/*
+ Boolean types
+*/
+#if (__FreeBSD_version < 901000)
+ typedef enum _boolean { false, true } boolean;
+#endif
+enum err { SUCCESS, FAIL };
+
+MALLOC_DECLARE(M_MRSAS);
+SYSCTL_DECL(_hw_mrsas);
+
+#define MRSAS_INFO (1 << 0)
+#define MRSAS_TRACE (1 << 1)
+#define MRSAS_FAULT (1 << 2)
+#define MRSAS_OCR (1 << 3)
+#define MRSAS_TOUT MRSAS_OCR
+#define MRSAS_AEN (1 << 4)
+#define MRSAS_PRL11 (1 << 5)
+
+#define mrsas_dprint(sc, level, msg, args...) \
+do { \
+ if (sc->mrsas_debug & level) \
+ device_printf(sc->mrsas_dev, msg, ##args); \
+} while (0)
+
+
+/****************************************************************************
+ * Raid Context structure which describes MegaRAID specific IO Paramenters
+ * This resides at offset 0x60 where the SGL normally starts in MPT IO Frames
+ ****************************************************************************/
+
+typedef struct _RAID_CONTEXT {
+ u_int8_t Type:4; // 0x00
+ u_int8_t nseg:4; // 0x00
+ u_int8_t resvd0; // 0x01
+ u_int16_t timeoutValue; // 0x02 -0x03
+ u_int8_t regLockFlags; // 0x04
+ u_int8_t resvd1; // 0x05
+ u_int16_t VirtualDiskTgtId; // 0x06 -0x07
+ u_int64_t regLockRowLBA; // 0x08 - 0x0F
+ u_int32_t regLockLength; // 0x10 - 0x13
+ u_int16_t nextLMId; // 0x14 - 0x15
+ u_int8_t exStatus; // 0x16
+ u_int8_t status; // 0x17 status
+ u_int8_t RAIDFlags; // 0x18 resvd[7:6],ioSubType[5:4],resvd[3:1],preferredCpu[0]
+ u_int8_t numSGE; // 0x19 numSge; not including chain entries
+ u_int16_t configSeqNum; // 0x1A -0x1B
+ u_int8_t spanArm; // 0x1C span[7:5], arm[4:0]
+ u_int8_t resvd2[3]; // 0x1D-0x1f
+} RAID_CONTEXT;
+
+
+/*************************************************************************
+ * MPI2 Defines
+ ************************************************************************/
+
+#define MPI2_FUNCTION_IOC_INIT (0x02) /* IOC Init */
+#define MPI2_WHOINIT_HOST_DRIVER (0x04)
+#define MPI2_VERSION_MAJOR (0x02)
+#define MPI2_VERSION_MINOR (0x00)
+#define MPI2_VERSION_MAJOR_MASK (0xFF00)
+#define MPI2_VERSION_MAJOR_SHIFT (8)
+#define MPI2_VERSION_MINOR_MASK (0x00FF)
+#define MPI2_VERSION_MINOR_SHIFT (0)
+#define MPI2_VERSION ((MPI2_VERSION_MAJOR << MPI2_VERSION_MAJOR_SHIFT) | \
+ MPI2_VERSION_MINOR)
+#define MPI2_HEADER_VERSION_UNIT (0x10)
+#define MPI2_HEADER_VERSION_DEV (0x00)
+#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
+#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
+#define MPI2_HEADER_VERSION_DEV_MASK (0x00FF)
+#define MPI2_HEADER_VERSION_DEV_SHIFT (0)
+#define MPI2_HEADER_VERSION ((MPI2_HEADER_VERSION_UNIT << 8) | MPI2_HEADER_VERSION_DEV)
+#define MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03)
+#define MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG (0x8000)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG (0x0400)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP (0x0003)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG (0x0200)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD (0x0100)
+#define MPI2_SCSIIO_EEDPFLAGS_INSERT_OP (0x0004)
+#define MPI2_FUNCTION_SCSI_IO_REQUEST (0x00) /* SCSI IO */
+#define MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY (0x06)
+#define MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO (0x00)
+#define MPI2_SGE_FLAGS_64_BIT_ADDRESSING (0x02)
+#define MPI2_SCSIIO_CONTROL_WRITE (0x01000000)
+#define MPI2_SCSIIO_CONTROL_READ (0x02000000)
+#define MPI2_REQ_DESCRIPT_FLAGS_TYPE_MASK (0x0E)
+#define MPI2_RPY_DESCRIPT_FLAGS_UNUSED (0x0F)
+#define MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS (0x00)
+#define MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK (0x0F)
+#define MPI2_WRSEQ_FLUSH_KEY_VALUE (0x0)
+#define MPI2_WRITE_SEQUENCE_OFFSET (0x00000004)
+#define MPI2_WRSEQ_1ST_KEY_VALUE (0xF)
+#define MPI2_WRSEQ_2ND_KEY_VALUE (0x4)
+#define MPI2_WRSEQ_3RD_KEY_VALUE (0xB)
+#define MPI2_WRSEQ_4TH_KEY_VALUE (0x2)
+#define MPI2_WRSEQ_5TH_KEY_VALUE (0x7)
+#define MPI2_WRSEQ_6TH_KEY_VALUE (0xD)
+
+#ifndef MPI2_POINTER
+#define MPI2_POINTER *
+#endif
+
+
+/***************************************
+ * MPI2 Structures
+ ***************************************/
+
+typedef struct _MPI25_IEEE_SGE_CHAIN64
+{
+ u_int64_t Address;
+ u_int32_t Length;
+ u_int16_t Reserved1;
+ u_int8_t NextChainOffset;
+ u_int8_t Flags;
+} MPI25_IEEE_SGE_CHAIN64, MPI2_POINTER PTR_MPI25_IEEE_SGE_CHAIN64,
+ Mpi25IeeeSgeChain64_t, MPI2_POINTER pMpi25IeeeSgeChain64_t;
+
+typedef struct _MPI2_SGE_SIMPLE_UNION
+{
+ u_int32_t FlagsLength;
+ union
+ {
+ u_int32_t Address32;
+ u_int64_t Address64;
+ } u;
+} MPI2_SGE_SIMPLE_UNION, MPI2_POINTER PTR_MPI2_SGE_SIMPLE_UNION,
+ Mpi2SGESimpleUnion_t, MPI2_POINTER pMpi2SGESimpleUnion_t;
+
+typedef struct
+{
+ u_int8_t CDB[20]; /* 0x00 */
+ u_int32_t PrimaryReferenceTag; /* 0x14 */
+ u_int16_t PrimaryApplicationTag; /* 0x18 */
+ u_int16_t PrimaryApplicationTagMask; /* 0x1A */
+ u_int32_t TransferLength; /* 0x1C */
+} MPI2_SCSI_IO_CDB_EEDP32, MPI2_POINTER PTR_MPI2_SCSI_IO_CDB_EEDP32,
+ Mpi2ScsiIoCdbEedp32_t, MPI2_POINTER pMpi2ScsiIoCdbEedp32_t;
+
+typedef struct _MPI2_SGE_CHAIN_UNION
+{
+ u_int16_t Length;
+ u_int8_t NextChainOffset;
+ u_int8_t Flags;
+ union
+ {
+ u_int32_t Address32;
+ u_int64_t Address64;
+ } u;
+} MPI2_SGE_CHAIN_UNION, MPI2_POINTER PTR_MPI2_SGE_CHAIN_UNION,
+ Mpi2SGEChainUnion_t, MPI2_POINTER pMpi2SGEChainUnion_t;
+
+typedef struct _MPI2_IEEE_SGE_SIMPLE32
+{
+ u_int32_t Address;
+ u_int32_t FlagsLength;
+} MPI2_IEEE_SGE_SIMPLE32, MPI2_POINTER PTR_MPI2_IEEE_SGE_SIMPLE32,
+ Mpi2IeeeSgeSimple32_t, MPI2_POINTER pMpi2IeeeSgeSimple32_t;
+typedef struct _MPI2_IEEE_SGE_SIMPLE64
+{
+ u_int64_t Address;
+ u_int32_t Length;
+ u_int16_t Reserved1;
+ u_int8_t Reserved2;
+ u_int8_t Flags;
+} MPI2_IEEE_SGE_SIMPLE64, MPI2_POINTER PTR_MPI2_IEEE_SGE_SIMPLE64,
+ Mpi2IeeeSgeSimple64_t, MPI2_POINTER pMpi2IeeeSgeSimple64_t;
+
+typedef union _MPI2_IEEE_SGE_SIMPLE_UNION
+{
+ MPI2_IEEE_SGE_SIMPLE32 Simple32;
+ MPI2_IEEE_SGE_SIMPLE64 Simple64;
+} MPI2_IEEE_SGE_SIMPLE_UNION, MPI2_POINTER PTR_MPI2_IEEE_SGE_SIMPLE_UNION,
+ Mpi2IeeeSgeSimpleUnion_t, MPI2_POINTER pMpi2IeeeSgeSimpleUnion_t;
+
+typedef MPI2_IEEE_SGE_SIMPLE32 MPI2_IEEE_SGE_CHAIN32;
+typedef MPI2_IEEE_SGE_SIMPLE64 MPI2_IEEE_SGE_CHAIN64;
+
+typedef union _MPI2_IEEE_SGE_CHAIN_UNION
+{
+ MPI2_IEEE_SGE_CHAIN32 Chain32;
+ MPI2_IEEE_SGE_CHAIN64 Chain64;
+} MPI2_IEEE_SGE_CHAIN_UNION, MPI2_POINTER PTR_MPI2_IEEE_SGE_CHAIN_UNION,
+ Mpi2IeeeSgeChainUnion_t, MPI2_POINTER pMpi2IeeeSgeChainUnion_t;
+
+typedef union _MPI2_SGE_IO_UNION
+{
+ MPI2_SGE_SIMPLE_UNION MpiSimple;
+ MPI2_SGE_CHAIN_UNION MpiChain;
+ MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple;
+ MPI2_IEEE_SGE_CHAIN_UNION IeeeChain;
+} MPI2_SGE_IO_UNION, MPI2_POINTER PTR_MPI2_SGE_IO_UNION,
+ Mpi2SGEIOUnion_t, MPI2_POINTER pMpi2SGEIOUnion_t;
+
+typedef union
+{
+ u_int8_t CDB32[32];
+ MPI2_SCSI_IO_CDB_EEDP32 EEDP32;
+ MPI2_SGE_SIMPLE_UNION SGE;
+} MPI2_SCSI_IO_CDB_UNION, MPI2_POINTER PTR_MPI2_SCSI_IO_CDB_UNION,
+ Mpi2ScsiIoCdb_t, MPI2_POINTER pMpi2ScsiIoCdb_t;
+
+/*
+ * RAID SCSI IO Request Message
+ * Total SGE count will be one less than _MPI2_SCSI_IO_REQUEST
+ */
+typedef struct _MPI2_RAID_SCSI_IO_REQUEST
+{
+ u_int16_t DevHandle; /* 0x00 */
+ u_int8_t ChainOffset; /* 0x02 */
+ u_int8_t Function; /* 0x03 */
+ u_int16_t Reserved1; /* 0x04 */
+ u_int8_t Reserved2; /* 0x06 */
+ u_int8_t MsgFlags; /* 0x07 */
+ u_int8_t VP_ID; /* 0x08 */
+ u_int8_t VF_ID; /* 0x09 */
+ u_int16_t Reserved3; /* 0x0A */
+ u_int32_t SenseBufferLowAddress; /* 0x0C */
+ u_int16_t SGLFlags; /* 0x10 */
+ u_int8_t SenseBufferLength; /* 0x12 */
+ u_int8_t Reserved4; /* 0x13 */
+ u_int8_t SGLOffset0; /* 0x14 */
+ u_int8_t SGLOffset1; /* 0x15 */
+ u_int8_t SGLOffset2; /* 0x16 */
+ u_int8_t SGLOffset3; /* 0x17 */
+ u_int32_t SkipCount; /* 0x18 */
+ u_int32_t DataLength; /* 0x1C */
+ u_int32_t BidirectionalDataLength; /* 0x20 */
+ u_int16_t IoFlags; /* 0x24 */
+ u_int16_t EEDPFlags; /* 0x26 */
+ u_int32_t EEDPBlockSize; /* 0x28 */
+ u_int32_t SecondaryReferenceTag; /* 0x2C */
+ u_int16_t SecondaryApplicationTag; /* 0x30 */
+ u_int16_t ApplicationTagTranslationMask; /* 0x32 */
+ u_int8_t LUN[8]; /* 0x34 */
+ u_int32_t Control; /* 0x3C */
+ MPI2_SCSI_IO_CDB_UNION CDB; /* 0x40 */
+ RAID_CONTEXT RaidContext; /* 0x60 */
+ MPI2_SGE_IO_UNION SGL; /* 0x80 */
+} MRSAS_RAID_SCSI_IO_REQUEST, MPI2_POINTER PTR_MRSAS_RAID_SCSI_IO_REQUEST,
+ MRSASRaidSCSIIORequest_t, MPI2_POINTER pMRSASRaidSCSIIORequest_t;
+
+/*
+ * MPT RAID MFA IO Descriptor.
+ */
+typedef struct _MRSAS_RAID_MFA_IO_DESCRIPTOR {
+ u_int32_t RequestFlags : 8;
+ u_int32_t MessageAddress1 : 24; /* bits 31:8*/
+ u_int32_t MessageAddress2; /* bits 61:32 */
+} MRSAS_RAID_MFA_IO_REQUEST_DESCRIPTOR,*PMRSAS_RAID_MFA_IO_REQUEST_DESCRIPTOR;
+
+/* Default Request Descriptor */
+typedef struct _MPI2_DEFAULT_REQUEST_DESCRIPTOR
+{
+ u_int8_t RequestFlags; /* 0x00 */
+ u_int8_t MSIxIndex; /* 0x01 */
+ u_int16_t SMID; /* 0x02 */
+ u_int16_t LMID; /* 0x04 */
+ u_int16_t DescriptorTypeDependent; /* 0x06 */
+} MPI2_DEFAULT_REQUEST_DESCRIPTOR,
+ MPI2_POINTER PTR_MPI2_DEFAULT_REQUEST_DESCRIPTOR,
+ Mpi2DefaultRequestDescriptor_t, MPI2_POINTER pMpi2DefaultRequestDescriptor_t;
+
+/* High Priority Request Descriptor */
+typedef struct _MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR
+{
+ u_int8_t RequestFlags; /* 0x00 */
+ u_int8_t MSIxIndex; /* 0x01 */
+ u_int16_t SMID; /* 0x02 */
+ u_int16_t LMID; /* 0x04 */
+ u_int16_t Reserved1; /* 0x06 */
+} MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR,
+ MPI2_POINTER PTR_MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR,
+ Mpi2HighPriorityRequestDescriptor_t,
+ MPI2_POINTER pMpi2HighPriorityRequestDescriptor_t;
+
+/* SCSI IO Request Descriptor */
+typedef struct _MPI2_SCSI_IO_REQUEST_DESCRIPTOR
+{
+ u_int8_t RequestFlags; /* 0x00 */
+ u_int8_t MSIxIndex; /* 0x01 */
+ u_int16_t SMID; /* 0x02 */
+ u_int16_t LMID; /* 0x04 */
+ u_int16_t DevHandle; /* 0x06 */
+} MPI2_SCSI_IO_REQUEST_DESCRIPTOR,
+ MPI2_POINTER PTR_MPI2_SCSI_IO_REQUEST_DESCRIPTOR,
+ Mpi2SCSIIORequestDescriptor_t, MPI2_POINTER pMpi2SCSIIORequestDescriptor_t;
+
+/* SCSI Target Request Descriptor */
+typedef struct _MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR
+{
+ u_int8_t RequestFlags; /* 0x00 */
+ u_int8_t MSIxIndex; /* 0x01 */
+ u_int16_t SMID; /* 0x02 */
+ u_int16_t LMID; /* 0x04 */
+ u_int16_t IoIndex; /* 0x06 */
+} MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR,
+ MPI2_POINTER PTR_MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR,
+ Mpi2SCSITargetRequestDescriptor_t,
+ MPI2_POINTER pMpi2SCSITargetRequestDescriptor_t;
+
+/* RAID Accelerator Request Descriptor */
+typedef struct _MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR
+{
+ u_int8_t RequestFlags; /* 0x00 */
+ u_int8_t MSIxIndex; /* 0x01 */
+ u_int16_t SMID; /* 0x02 */
+ u_int16_t LMID; /* 0x04 */
+ u_int16_t Reserved; /* 0x06 */
+} MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR,
+ MPI2_POINTER PTR_MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR,
+ Mpi2RAIDAcceleratorRequestDescriptor_t,
+ MPI2_POINTER pMpi2RAIDAcceleratorRequestDescriptor_t;
+
+/* union of Request Descriptors */
+typedef union _MRSAS_REQUEST_DESCRIPTOR_UNION
+{
+ MPI2_DEFAULT_REQUEST_DESCRIPTOR Default;
+ MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR HighPriority;
+ MPI2_SCSI_IO_REQUEST_DESCRIPTOR SCSIIO;
+ MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR SCSITarget;
+ MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR RAIDAccelerator;
+ MRSAS_RAID_MFA_IO_REQUEST_DESCRIPTOR MFAIo;
+ union {
+ struct {
+ u_int32_t low;
+ u_int32_t high;
+ } u;
+ u_int64_t Words;
+ } addr;
+} MRSAS_REQUEST_DESCRIPTOR_UNION;
+
+/* Default Reply Descriptor */
+typedef struct _MPI2_DEFAULT_REPLY_DESCRIPTOR
+{
+ u_int8_t ReplyFlags; /* 0x00 */
+ u_int8_t MSIxIndex; /* 0x01 */
+ u_int16_t DescriptorTypeDependent1; /* 0x02 */
+ u_int32_t DescriptorTypeDependent2; /* 0x04 */
+} MPI2_DEFAULT_REPLY_DESCRIPTOR, MPI2_POINTER PTR_MPI2_DEFAULT_REPLY_DESCRIPTOR,
+ Mpi2DefaultReplyDescriptor_t, MPI2_POINTER pMpi2DefaultReplyDescriptor_t;
+
+/* Address Reply Descriptor */
+typedef struct _MPI2_ADDRESS_REPLY_DESCRIPTOR
+{
+ u_int8_t ReplyFlags; /* 0x00 */
+ u_int8_t MSIxIndex; /* 0x01 */
+ u_int16_t SMID; /* 0x02 */
+ u_int32_t ReplyFrameAddress; /* 0x04 */
+} MPI2_ADDRESS_REPLY_DESCRIPTOR, MPI2_POINTER PTR_MPI2_ADDRESS_REPLY_DESCRIPTOR,
+ Mpi2AddressReplyDescriptor_t, MPI2_POINTER pMpi2AddressReplyDescriptor_t;
+
+/* SCSI IO Success Reply Descriptor */
+typedef struct _MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR
+{
+ u_int8_t ReplyFlags; /* 0x00 */
+ u_int8_t MSIxIndex; /* 0x01 */
+ u_int16_t SMID; /* 0x02 */
+ u_int16_t TaskTag; /* 0x04 */
+ u_int16_t Reserved1; /* 0x06 */
+} MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR,
+ MPI2_POINTER PTR_MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR,
+ Mpi2SCSIIOSuccessReplyDescriptor_t,
+ MPI2_POINTER pMpi2SCSIIOSuccessReplyDescriptor_t;
+
+/* TargetAssist Success Reply Descriptor */
+typedef struct _MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR
+{
+ u_int8_t ReplyFlags; /* 0x00 */
+ u_int8_t MSIxIndex; /* 0x01 */
+ u_int16_t SMID; /* 0x02 */
+ u_int8_t SequenceNumber; /* 0x04 */
+ u_int8_t Reserved1; /* 0x05 */
+ u_int16_t IoIndex; /* 0x06 */
+} MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR,
+ MPI2_POINTER PTR_MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR,
+ Mpi2TargetAssistSuccessReplyDescriptor_t,
+ MPI2_POINTER pMpi2TargetAssistSuccessReplyDescriptor_t;
+
+/* Target Command Buffer Reply Descriptor */
+typedef struct _MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR
+{
+ u_int8_t ReplyFlags; /* 0x00 */
+ u_int8_t MSIxIndex; /* 0x01 */
+ u_int8_t VP_ID; /* 0x02 */
+ u_int8_t Flags; /* 0x03 */
+ u_int16_t InitiatorDevHandle; /* 0x04 */
+ u_int16_t IoIndex; /* 0x06 */
+} MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR,
+ MPI2_POINTER PTR_MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR,
+ Mpi2TargetCommandBufferReplyDescriptor_t,
+ MPI2_POINTER pMpi2TargetCommandBufferReplyDescriptor_t;
+
+/* RAID Accelerator Success Reply Descriptor */
+typedef struct _MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR
+{
+ u_int8_t ReplyFlags; /* 0x00 */
+ u_int8_t MSIxIndex; /* 0x01 */
+ u_int16_t SMID; /* 0x02 */
+ u_int32_t Reserved; /* 0x04 */
+} MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR,
+ MPI2_POINTER PTR_MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR,
+ Mpi2RAIDAcceleratorSuccessReplyDescriptor_t,
+ MPI2_POINTER pMpi2RAIDAcceleratorSuccessReplyDescriptor_t;
+
+/* union of Reply Descriptors */
+typedef union _MPI2_REPLY_DESCRIPTORS_UNION
+{
+ MPI2_DEFAULT_REPLY_DESCRIPTOR Default;
+ MPI2_ADDRESS_REPLY_DESCRIPTOR AddressReply;
+ MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR SCSIIOSuccess;
+ MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR TargetAssistSuccess;
+ MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR TargetCommandBuffer;
+ MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR RAIDAcceleratorSuccess;
+ u_int64_t Words;
+} MPI2_REPLY_DESCRIPTORS_UNION, MPI2_POINTER PTR_MPI2_REPLY_DESCRIPTORS_UNION,
+ Mpi2ReplyDescriptorsUnion_t, MPI2_POINTER pMpi2ReplyDescriptorsUnion_t;
+
+typedef struct {
+ volatile unsigned int val;
+} atomic_t;
+
+#define atomic_read(v) atomic_load_acq_int(&(v)->val)
+#define atomic_set(v,i) atomic_store_rel_int(&(v)->val, i)
+#define atomic_dec(v) atomic_fetchadd_int(&(v)->val, -1)
+#define atomic_inc(v) atomic_fetchadd_int(&(v)->val, 1)
+
+/* IOCInit Request message */
+typedef struct _MPI2_IOC_INIT_REQUEST
+{
+ u_int8_t WhoInit; /* 0x00 */
+ u_int8_t Reserved1; /* 0x01 */
+ u_int8_t ChainOffset; /* 0x02 */
+ u_int8_t Function; /* 0x03 */
+ u_int16_t Reserved2; /* 0x04 */
+ u_int8_t Reserved3; /* 0x06 */
+ u_int8_t MsgFlags; /* 0x07 */
+ u_int8_t VP_ID; /* 0x08 */
+ u_int8_t VF_ID; /* 0x09 */
+ u_int16_t Reserved4; /* 0x0A */
+ u_int16_t MsgVersion; /* 0x0C */
+ u_int16_t HeaderVersion; /* 0x0E */
+ u_int32_t Reserved5; /* 0x10 */
+ u_int16_t Reserved6; /* 0x14 */
+ u_int8_t Reserved7; /* 0x16 */
+ u_int8_t HostMSIxVectors; /* 0x17 */
+ u_int16_t Reserved8; /* 0x18 */
+ u_int16_t SystemRequestFrameSize; /* 0x1A */
+ u_int16_t ReplyDescriptorPostQueueDepth; /* 0x1C */
+ u_int16_t ReplyFreeQueueDepth; /* 0x1E */
+ u_int32_t SenseBufferAddressHigh; /* 0x20 */
+ u_int32_t SystemReplyAddressHigh; /* 0x24 */
+ u_int64_t SystemRequestFrameBaseAddress; /* 0x28 */
+ u_int64_t ReplyDescriptorPostQueueAddress;/* 0x30 */
+ u_int64_t ReplyFreeQueueAddress; /* 0x38 */
+ u_int64_t TimeStamp; /* 0x40 */
+} MPI2_IOC_INIT_REQUEST, MPI2_POINTER PTR_MPI2_IOC_INIT_REQUEST,
+ Mpi2IOCInitRequest_t, MPI2_POINTER pMpi2IOCInitRequest_t;
+
+/*
+ * MR private defines
+ */
+#define MR_PD_INVALID 0xFFFF
+#define MAX_SPAN_DEPTH 8
+#define MAX_QUAD_DEPTH MAX_SPAN_DEPTH
+#define MAX_RAIDMAP_SPAN_DEPTH (MAX_SPAN_DEPTH)
+#define MAX_ROW_SIZE 32
+#define MAX_RAIDMAP_ROW_SIZE (MAX_ROW_SIZE)
+#define MAX_LOGICAL_DRIVES 64
+#define MAX_RAIDMAP_LOGICAL_DRIVES (MAX_LOGICAL_DRIVES)
+#define MAX_RAIDMAP_VIEWS (MAX_LOGICAL_DRIVES)
+#define MAX_ARRAYS 128
+#define MAX_RAIDMAP_ARRAYS (MAX_ARRAYS)
+#define MAX_PHYSICAL_DEVICES 256
+#define MAX_RAIDMAP_PHYSICAL_DEVICES (MAX_PHYSICAL_DEVICES)
+#define MR_DCMD_LD_MAP_GET_INFO 0x0300e101 // get the mapping information of this LD
+
+
+/*******************************************************************
+ * RAID map related structures
+ ********************************************************************/
+
+typedef struct _MR_DEV_HANDLE_INFO {
+ u_int16_t curDevHdl; // the device handle currently used by fw to issue the command.
+ u_int8_t validHandles; // bitmap of valid device handles.
+ u_int8_t reserved;
+ u_int16_t devHandle[2]; // 0x04 dev handles for all the paths.
+} MR_DEV_HANDLE_INFO;
+
+typedef struct _MR_ARRAY_INFO {
+ u_int16_t pd[MAX_RAIDMAP_ROW_SIZE];
+} MR_ARRAY_INFO; // 0x40, Total Size
+
+typedef struct _MR_QUAD_ELEMENT {
+ u_int64_t logStart; // 0x00
+ u_int64_t logEnd; // 0x08
+ u_int64_t offsetInSpan; // 0x10
+ u_int32_t diff; // 0x18
+ u_int32_t reserved1; // 0x1C
+} MR_QUAD_ELEMENT; // 0x20, Total size
+
+typedef struct _MR_SPAN_INFO {
+ u_int32_t noElements; // 0x00
+ u_int32_t reserved1; // 0x04
+ MR_QUAD_ELEMENT quad[MAX_RAIDMAP_SPAN_DEPTH]; // 0x08
+} MR_SPAN_INFO; // 0x108, Total size
+
+typedef struct _MR_LD_SPAN_ { // SPAN structure
+ u_int64_t startBlk; // 0x00, starting block number in array
+ u_int64_t numBlks; // 0x08, number of blocks
+ u_int16_t arrayRef; // 0x10, array reference
+ u_int8_t spanRowSize; // 0x11, span row size
+ u_int8_t spanRowDataSize; // 0x12, span row data size
+ u_int8_t reserved[4]; // 0x13, reserved
+} MR_LD_SPAN; // 0x18, Total Size
+
+typedef struct _MR_SPAN_BLOCK_INFO {
+ u_int64_t num_rows; // number of rows/span
+ MR_LD_SPAN span; // 0x08
+ MR_SPAN_INFO block_span_info; // 0x20
+} MR_SPAN_BLOCK_INFO;
+
+typedef struct _MR_LD_RAID {
+ struct {
+ u_int32_t fpCapable :1;
+ u_int32_t reserved5 :3;
+ u_int32_t ldPiMode :4;
+ u_int32_t pdPiMode :4; // Every Pd has to be same.
+ u_int32_t encryptionType :8; // FDE or ctlr encryption (MR_LD_ENCRYPTION_TYPE)
+ u_int32_t fpWriteCapable :1;
+ u_int32_t fpReadCapable :1;
+ u_int32_t fpWriteAcrossStripe :1;
+ u_int32_t fpReadAcrossStripe :1;
+ u_int32_t fpNonRWCapable :1; // TRUE if supporting Non RW IO
+ u_int32_t reserved4 :7;
+ } capability; // 0x00
+ u_int32_t reserved6;
+ u_int64_t size; // 0x08, LD size in blocks
+
+ u_int8_t spanDepth; // 0x10, Total Number of Spans
+ u_int8_t level; // 0x11, RAID level
+ u_int8_t stripeShift; // 0x12, shift-count to get stripe size (0=512, 1=1K, 7=64K, etc.)
+ u_int8_t rowSize; // 0x13, number of disks in a row
+
+ u_int8_t rowDataSize; // 0x14, number of data disks in a row
+ u_int8_t writeMode; // 0x15, WRITE_THROUGH or WRITE_BACK
+ u_int8_t PRL; // 0x16, To differentiate between RAID1 and RAID1E
+ u_int8_t SRL; // 0x17
+
+ u_int16_t targetId; // 0x18, ld Target Id.
+ u_int8_t ldState; // 0x1a, state of ld, state corresponds to MR_LD_STATE
+ u_int8_t regTypeReqOnWrite;// 0x1b, Pre calculate region type requests based on MFC etc..
+ u_int8_t modFactor; // 0x1c, same as rowSize,
+ u_int8_t regTypeReqOnRead; // 0x1d, region lock type used for read, valid only if regTypeOnReadIsValid=1
+ u_int16_t seqNum; // 0x1e, LD sequence number
+
+ struct {
+ u_int32_t ldSyncRequired:1; // This LD requires sync command before completing
+ u_int32_t regTypeReqOnReadLsValid:1; // Qualifier for regTypeOnRead
+ u_int32_t reserved:30;
+ } flags; // 0x20
+
+ u_int8_t LUN[8]; // 0x24, 8 byte LUN field used for SCSI
+ u_int8_t fpIoTimeoutForLd; // 0x2C, timeout value for FP IOs
+ u_int8_t reserved2[3]; // 0x2D
+ u_int32_t logicalBlockLength; // 0x30 Logical block size for the LD
+ struct {
+ u_int32_t LdPiExp:4; // 0x34, P_I_EXPONENT for ReadCap 16
+ u_int32_t LdLogicalBlockExp:4; // 0x34, LOGICAL BLOCKS PER PHYS BLOCK
+ u_int32_t reserved1:24; // 0x34
+ } exponent;
+ u_int8_t reserved3[0x80-0x38]; // 0x38
+} MR_LD_RAID; // 0x80, Total Size
+
+typedef struct _MR_LD_SPAN_MAP {
+ MR_LD_RAID ldRaid; // 0x00
+ u_int8_t dataArmMap[MAX_RAIDMAP_ROW_SIZE]; // 0x80, needed for GET_ARM() - R0/1/5 only.
+ MR_SPAN_BLOCK_INFO spanBlock[MAX_RAIDMAP_SPAN_DEPTH]; // 0xA0
+} MR_LD_SPAN_MAP; // 0x9E0
+
+typedef struct _MR_FW_RAID_MAP {
+ u_int32_t totalSize; // total size of this structure, including this field.
+ union {
+ struct { // Simple method of version checking variables
+ u_int32_t maxLd;
+ u_int32_t maxSpanDepth;
+ u_int32_t maxRowSize;
+ u_int32_t maxPdCount;
+ u_int32_t maxArrays;
+ } validationInfo;
+ u_int32_t version[5];
+ u_int32_t reserved1[5];
+ } raid_desc;
+ u_int32_t ldCount; // count of lds.
+ u_int32_t Reserved1;
+ u_int8_t ldTgtIdToLd[MAX_RAIDMAP_LOGICAL_DRIVES+MAX_RAIDMAP_VIEWS]; // 0x20
+ // This doesn't correspond to
+ // FW Ld Tgt Id to LD, but will purge. For example: if tgt Id is 4
+ // and FW LD is 2, and there is only one LD, FW will populate the
+ // array like this. [0xFF, 0xFF, 0xFF, 0xFF, 0x0,.....]. This is to
+ // help reduce the entire strcture size if there are few LDs or
+ // driver is looking info for 1 LD only.
+ u_int8_t fpPdIoTimeoutSec; // timeout value used by driver in FP IOs
+ u_int8_t reserved2[7];
+ MR_ARRAY_INFO arMapInfo[MAX_RAIDMAP_ARRAYS]; // 0x00a8
+ MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES]; // 0x20a8
+ MR_LD_SPAN_MAP ldSpanMap[1]; // 0x28a8-[0-MAX_RAIDMAP_LOGICAL_DRIVES+MAX_RAIDMAP_VIEWS+1];
+} MR_FW_RAID_MAP; // 0x3288, Total Size
+
+typedef struct _LD_LOAD_BALANCE_INFO
+{
+ u_int8_t loadBalanceFlag;
+ u_int8_t reserved1;
+ u_int16_t raid1DevHandle[2];
+ atomic_t scsi_pending_cmds[2];
+ u_int64_t last_accessed_block[2];
+} LD_LOAD_BALANCE_INFO, *PLD_LOAD_BALANCE_INFO;
+
+/* SPAN_SET is info caclulated from span info from Raid map per ld */
+typedef struct _LD_SPAN_SET {
+ u_int64_t log_start_lba;
+ u_int64_t log_end_lba;
+ u_int64_t span_row_start;
+ u_int64_t span_row_end;
+ u_int64_t data_strip_start;
+ u_int64_t data_strip_end;
+ u_int64_t data_row_start;
+ u_int64_t data_row_end;
+ u_int8_t strip_offset[MAX_SPAN_DEPTH];
+ u_int32_t span_row_data_width;
+ u_int32_t diff;
+ u_int32_t reserved[2];
+}LD_SPAN_SET, *PLD_SPAN_SET;
+
+typedef struct LOG_BLOCK_SPAN_INFO {
+ LD_SPAN_SET span_set[MAX_SPAN_DEPTH];
+}LD_SPAN_INFO, *PLD_SPAN_INFO;
+
+#pragma pack(1)
+typedef struct _MR_FW_RAID_MAP_ALL {
+ MR_FW_RAID_MAP raidMap;
+ MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES - 1];
+} MR_FW_RAID_MAP_ALL;
+#pragma pack()
+
+struct IO_REQUEST_INFO {
+ u_int64_t ldStartBlock;
+ u_int32_t numBlocks;
+ u_int16_t ldTgtId;
+ u_int8_t isRead;
+ u_int16_t devHandle;
+ u_int64_t pdBlock;
+ u_int8_t fpOkForIo;
+ u_int8_t IoforUnevenSpan;
+ u_int8_t start_span;
+ u_int8_t reserved;
+ u_int64_t start_row;
+};
+
+typedef struct _MR_LD_TARGET_SYNC {
+ u_int8_t targetId;
+ u_int8_t reserved;
+ u_int16_t seqNum;
+} MR_LD_TARGET_SYNC;
+
+#define IEEE_SGE_FLAGS_ADDR_MASK (0x03)
+#define IEEE_SGE_FLAGS_SYSTEM_ADDR (0x00)
+#define IEEE_SGE_FLAGS_IOCDDR_ADDR (0x01)
+#define IEEE_SGE_FLAGS_IOCPLB_ADDR (0x02)
+#define IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03)
+#define IEEE_SGE_FLAGS_CHAIN_ELEMENT (0x80)
+#define IEEE_SGE_FLAGS_END_OF_LIST (0x40)
+
+union desc_value {
+ u_int64_t word;
+ struct {
+ u_int32_t low;
+ u_int32_t high;
+ } u;
+};
+
+/*******************************************************************
+ * Temporary command
+ ********************************************************************/
+struct mrsas_tmp_dcmd {
+ bus_dma_tag_t tmp_dcmd_tag; // tag for tmp DMCD cmd
+ bus_dmamap_t tmp_dcmd_dmamap; // dmamap for tmp DCMD cmd
+ void *tmp_dcmd_mem; // virtual addr of tmp DCMD cmd
+ bus_addr_t tmp_dcmd_phys_addr; //physical addr of tmp DCMD
+};
+
+/*******************************************************************
+ * Register set, included legacy controllers 1068 and 1078,
+ * structure extended for 1078 registers
+ ********************************************************************/
+#pragma pack(1)
+typedef struct _mrsas_register_set {
+ u_int32_t doorbell; /*0000h*/
+ u_int32_t fusion_seq_offset; /*0004h*/
+ u_int32_t fusion_host_diag; /*0008h*/
+ u_int32_t reserved_01; /*000Ch*/
+
+ u_int32_t inbound_msg_0; /*0010h*/
+ u_int32_t inbound_msg_1; /*0014h*/
+ u_int32_t outbound_msg_0; /*0018h*/
+ u_int32_t outbound_msg_1; /*001Ch*/
+
+ u_int32_t inbound_doorbell; /*0020h*/
+ u_int32_t inbound_intr_status; /*0024h*/
+ u_int32_t inbound_intr_mask; /*0028h*/
+
+ u_int32_t outbound_doorbell; /*002Ch*/
+ u_int32_t outbound_intr_status; /*0030h*/
+ u_int32_t outbound_intr_mask; /*0034h*/
+
+ u_int32_t reserved_1[2]; /*0038h*/
+
+ u_int32_t inbound_queue_port; /*0040h*/
+ u_int32_t outbound_queue_port; /*0044h*/
+
+ u_int32_t reserved_2[9]; /*0048h*/
+ u_int32_t reply_post_host_index; /*006Ch*/
+ u_int32_t reserved_2_2[12]; /*0070h*/
+
+ u_int32_t outbound_doorbell_clear; /*00A0h*/
+
+ u_int32_t reserved_3[3]; /*00A4h*/
+
+ u_int32_t outbound_scratch_pad ; /*00B0h*/
+ u_int32_t outbound_scratch_pad_2; /*00B4h*/
+
+ u_int32_t reserved_4[2]; /*00B8h*/
+
+ u_int32_t inbound_low_queue_port ; /*00C0h*/
+
+ u_int32_t inbound_high_queue_port ; /*00C4h*/
+
+ u_int32_t reserved_5; /*00C8h*/
+ u_int32_t res_6[11]; /*CCh*/
+ u_int32_t host_diag;
+ u_int32_t seq_offset;
+ u_int32_t index_registers[807]; /*00CCh*/
+
+} mrsas_reg_set;
+#pragma pack()
+
+/*******************************************************************
+ * Firmware Interface Defines
+ *******************************************************************
+ * MFI stands for MegaRAID SAS FW Interface. This is just a moniker
+ * for protocol between the software and firmware. Commands are
+ * issued using "message frames".
+ ******************************************************************/
+/*
+ * FW posts its state in upper 4 bits of outbound_msg_0 register
+ */
+#define MFI_STATE_MASK 0xF0000000
+#define MFI_STATE_UNDEFINED 0x00000000
+#define MFI_STATE_BB_INIT 0x10000000
+#define MFI_STATE_FW_INIT 0x40000000
+#define MFI_STATE_WAIT_HANDSHAKE 0x60000000
+#define MFI_STATE_FW_INIT_2 0x70000000
+#define MFI_STATE_DEVICE_SCAN 0x80000000
+#define MFI_STATE_BOOT_MESSAGE_PENDING 0x90000000
+#define MFI_STATE_FLUSH_CACHE 0xA0000000
+#define MFI_STATE_READY 0xB0000000
+#define MFI_STATE_OPERATIONAL 0xC0000000
+#define MFI_STATE_FAULT 0xF0000000
+#define MFI_RESET_REQUIRED 0x00000001
+#define MFI_RESET_ADAPTER 0x00000002
+#define MEGAMFI_FRAME_SIZE 64
+#define MRSAS_MFI_FRAME_SIZE 1024
+#define MRSAS_MFI_SENSE_SIZE 128
+
+/*
+ * During FW init, clear pending cmds & reset state using inbound_msg_0
+ *
+ * ABORT : Abort all pending cmds
+ * READY : Move from OPERATIONAL to READY state; discard queue info
+ * MFIMODE : Discard (possible) low MFA posted in 64-bit mode (??)
+ * CLR_HANDSHAKE: FW is waiting for HANDSHAKE from BIOS or Driver
+ * HOTPLUG : Resume from Hotplug
+ * MFI_STOP_ADP : Send signal to FW to stop processing
+ */
+
+#define WRITE_SEQUENCE_OFFSET (0x0000000FC) // I20
+#define HOST_DIAGNOSTIC_OFFSET (0x000000F8) // I20
+#define DIAG_WRITE_ENABLE (0x00000080)
+#define DIAG_RESET_ADAPTER (0x00000004)
+
+#define MFI_ADP_RESET 0x00000040
+#define MFI_INIT_ABORT 0x00000001
+#define MFI_INIT_READY 0x00000002
+#define MFI_INIT_MFIMODE 0x00000004
+#define MFI_INIT_CLEAR_HANDSHAKE 0x00000008
+#define MFI_INIT_HOTPLUG 0x00000010
+#define MFI_STOP_ADP 0x00000020
+#define MFI_RESET_FLAGS MFI_INIT_READY| \
+ MFI_INIT_MFIMODE| \
+ MFI_INIT_ABORT
+
+/*
+ * MFI frame flags
+ */
+#define MFI_FRAME_POST_IN_REPLY_QUEUE 0x0000
+#define MFI_FRAME_DONT_POST_IN_REPLY_QUEUE 0x0001
+#define MFI_FRAME_SGL32 0x0000
+#define MFI_FRAME_SGL64 0x0002
+#define MFI_FRAME_SENSE32 0x0000
+#define MFI_FRAME_SENSE64 0x0004
+#define MFI_FRAME_DIR_NONE 0x0000
+#define MFI_FRAME_DIR_WRITE 0x0008
+#define MFI_FRAME_DIR_READ 0x0010
+#define MFI_FRAME_DIR_BOTH 0x0018
+#define MFI_FRAME_IEEE 0x0020
+
+/*
+ * Definition for cmd_status
+ */
+#define MFI_CMD_STATUS_POLL_MODE 0xFF
+
+/*
+ * MFI command opcodes
+ */
+#define MFI_CMD_INIT 0x00
+#define MFI_CMD_LD_READ 0x01
+#define MFI_CMD_LD_WRITE 0x02
+#define MFI_CMD_LD_SCSI_IO 0x03
+#define MFI_CMD_PD_SCSI_IO 0x04
+#define MFI_CMD_DCMD 0x05
+#define MFI_CMD_ABORT 0x06
+#define MFI_CMD_SMP 0x07
+#define MFI_CMD_STP 0x08
+#define MFI_CMD_INVALID 0xff
+
+#define MR_DCMD_CTRL_GET_INFO 0x01010000
+#define MR_DCMD_LD_GET_LIST 0x03010000
+#define MR_DCMD_CTRL_CACHE_FLUSH 0x01101000
+#define MR_FLUSH_CTRL_CACHE 0x01
+#define MR_FLUSH_DISK_CACHE 0x02
+
+#define MR_DCMD_CTRL_SHUTDOWN 0x01050000
+#define MR_DCMD_HIBERNATE_SHUTDOWN 0x01060000
+#define MR_ENABLE_DRIVE_SPINDOWN 0x01
+
+#define MR_DCMD_CTRL_EVENT_GET_INFO 0x01040100
+#define MR_DCMD_CTRL_EVENT_GET 0x01040300
+#define MR_DCMD_CTRL_EVENT_WAIT 0x01040500
+#define MR_DCMD_LD_GET_PROPERTIES 0x03030000
+
+#define MR_DCMD_CLUSTER 0x08000000
+#define MR_DCMD_CLUSTER_RESET_ALL 0x08010100
+#define MR_DCMD_CLUSTER_RESET_LD 0x08010200
+#define MR_DCMD_PD_LIST_QUERY 0x02010100
+
+#define MR_DCMD_CTRL_MISC_CPX 0x0100e200
+#define MR_DCMD_CTRL_MISC_CPX_INIT_DATA_GET 0x0100e201
+#define MR_DCMD_CTRL_MISC_CPX_QUEUE_DATA 0x0100e202
+#define MR_DCMD_CTRL_MISC_CPX_UNREGISTER 0x0100e203
+#define MAX_MR_ROW_SIZE 32
+#define MR_CPX_DIR_WRITE 1
+#define MR_CPX_DIR_READ 0
+#define MR_CPX_VERSION 1
+
+#define MR_DCMD_CTRL_IO_METRICS_GET 0x01170200 // get IO metrics
+
+#define MR_EVT_CFG_CLEARED 0x0004
+
+#define MR_EVT_LD_STATE_CHANGE 0x0051
+#define MR_EVT_PD_INSERTED 0x005b
+#define MR_EVT_PD_REMOVED 0x0070
+#define MR_EVT_LD_CREATED 0x008a
+#define MR_EVT_LD_DELETED 0x008b
+#define MR_EVT_FOREIGN_CFG_IMPORTED 0x00db
+#define MR_EVT_LD_OFFLINE 0x00fc
+#define MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED 0x0152
+#define MR_EVT_CTRL_PERF_COLLECTION 0x017e
+
+/*
+ * MFI command completion codes
+ */
+enum MFI_STAT {
+ MFI_STAT_OK = 0x00,
+ MFI_STAT_INVALID_CMD = 0x01,
+ MFI_STAT_INVALID_DCMD = 0x02,
+ MFI_STAT_INVALID_PARAMETER = 0x03,
+ MFI_STAT_INVALID_SEQUENCE_NUMBER = 0x04,
+ MFI_STAT_ABORT_NOT_POSSIBLE = 0x05,
+ MFI_STAT_APP_HOST_CODE_NOT_FOUND = 0x06,
+ MFI_STAT_APP_IN_USE = 0x07,
+ MFI_STAT_APP_NOT_INITIALIZED = 0x08,
+ MFI_STAT_ARRAY_INDEX_INVALID = 0x09,
+ MFI_STAT_ARRAY_ROW_NOT_EMPTY = 0x0a,
+ MFI_STAT_CONFIG_RESOURCE_CONFLICT = 0x0b,
+ MFI_STAT_DEVICE_NOT_FOUND = 0x0c,
+ MFI_STAT_DRIVE_TOO_SMALL = 0x0d,
+ MFI_STAT_FLASH_ALLOC_FAIL = 0x0e,
+ MFI_STAT_FLASH_BUSY = 0x0f,
+ MFI_STAT_FLASH_ERROR = 0x10,
+ MFI_STAT_FLASH_IMAGE_BAD = 0x11,
+ MFI_STAT_FLASH_IMAGE_INCOMPLETE = 0x12,
+ MFI_STAT_FLASH_NOT_OPEN = 0x13,
+ MFI_STAT_FLASH_NOT_STARTED = 0x14,
+ MFI_STAT_FLUSH_FAILED = 0x15,
+ MFI_STAT_HOST_CODE_NOT_FOUNT = 0x16,
+ MFI_STAT_LD_CC_IN_PROGRESS = 0x17,
+ MFI_STAT_LD_INIT_IN_PROGRESS = 0x18,
+ MFI_STAT_LD_LBA_OUT_OF_RANGE = 0x19,
+ MFI_STAT_LD_MAX_CONFIGURED = 0x1a,
+ MFI_STAT_LD_NOT_OPTIMAL = 0x1b,
+ MFI_STAT_LD_RBLD_IN_PROGRESS = 0x1c,
+ MFI_STAT_LD_RECON_IN_PROGRESS = 0x1d,
+ MFI_STAT_LD_WRONG_RAID_LEVEL = 0x1e,
+ MFI_STAT_MAX_SPARES_EXCEEDED = 0x1f,
+ MFI_STAT_MEMORY_NOT_AVAILABLE = 0x20,
+ MFI_STAT_MFC_HW_ERROR = 0x21,
+ MFI_STAT_NO_HW_PRESENT = 0x22,
+ MFI_STAT_NOT_FOUND = 0x23,
+ MFI_STAT_NOT_IN_ENCL = 0x24,
+ MFI_STAT_PD_CLEAR_IN_PROGRESS = 0x25,
+ MFI_STAT_PD_TYPE_WRONG = 0x26,
+ MFI_STAT_PR_DISABLED = 0x27,
+ MFI_STAT_ROW_INDEX_INVALID = 0x28,
+ MFI_STAT_SAS_CONFIG_INVALID_ACTION = 0x29,
+ MFI_STAT_SAS_CONFIG_INVALID_DATA = 0x2a,
+ MFI_STAT_SAS_CONFIG_INVALID_PAGE = 0x2b,
+ MFI_STAT_SAS_CONFIG_INVALID_TYPE = 0x2c,
+ MFI_STAT_SCSI_DONE_WITH_ERROR = 0x2d,
+ MFI_STAT_SCSI_IO_FAILED = 0x2e,
+ MFI_STAT_SCSI_RESERVATION_CONFLICT = 0x2f,
+ MFI_STAT_SHUTDOWN_FAILED = 0x30,
+ MFI_STAT_TIME_NOT_SET = 0x31,
+ MFI_STAT_WRONG_STATE = 0x32,
+ MFI_STAT_LD_OFFLINE = 0x33,
+ MFI_STAT_PEER_NOTIFICATION_REJECTED = 0x34,
+ MFI_STAT_PEER_NOTIFICATION_FAILED = 0x35,
+ MFI_STAT_RESERVATION_IN_PROGRESS = 0x36,
+ MFI_STAT_I2C_ERRORS_DETECTED = 0x37,
+ MFI_STAT_PCI_ERRORS_DETECTED = 0x38,
+ MFI_STAT_CONFIG_SEQ_MISMATCH = 0x67,
+
+ MFI_STAT_INVALID_STATUS = 0xFF
+};
+
+/*
+ * Number of mailbox bytes in DCMD message frame
+ */
+#define MFI_MBOX_SIZE 12
+
+enum MR_EVT_CLASS {
+
+ MR_EVT_CLASS_DEBUG = -2,
+ MR_EVT_CLASS_PROGRESS = -1,
+ MR_EVT_CLASS_INFO = 0,
+ MR_EVT_CLASS_WARNING = 1,
+ MR_EVT_CLASS_CRITICAL = 2,
+ MR_EVT_CLASS_FATAL = 3,
+ MR_EVT_CLASS_DEAD = 4,
+
+};
+
+enum MR_EVT_LOCALE {
+
+ MR_EVT_LOCALE_LD = 0x0001,
+ MR_EVT_LOCALE_PD = 0x0002,
+ MR_EVT_LOCALE_ENCL = 0x0004,
+ MR_EVT_LOCALE_BBU = 0x0008,
+ MR_EVT_LOCALE_SAS = 0x0010,
+ MR_EVT_LOCALE_CTRL = 0x0020,
+ MR_EVT_LOCALE_CONFIG = 0x0040,
+ MR_EVT_LOCALE_CLUSTER = 0x0080,
+ MR_EVT_LOCALE_ALL = 0xffff,
+
+};
+
+enum MR_EVT_ARGS {
+
+ MR_EVT_ARGS_NONE,
+ MR_EVT_ARGS_CDB_SENSE,
+ MR_EVT_ARGS_LD,
+ MR_EVT_ARGS_LD_COUNT,
+ MR_EVT_ARGS_LD_LBA,
+ MR_EVT_ARGS_LD_OWNER,
+ MR_EVT_ARGS_LD_LBA_PD_LBA,
+ MR_EVT_ARGS_LD_PROG,
+ MR_EVT_ARGS_LD_STATE,
+ MR_EVT_ARGS_LD_STRIP,
+ MR_EVT_ARGS_PD,
+ MR_EVT_ARGS_PD_ERR,
+ MR_EVT_ARGS_PD_LBA,
+ MR_EVT_ARGS_PD_LBA_LD,
+ MR_EVT_ARGS_PD_PROG,
+ MR_EVT_ARGS_PD_STATE,
+ MR_EVT_ARGS_PCI,
+ MR_EVT_ARGS_RATE,
+ MR_EVT_ARGS_STR,
+ MR_EVT_ARGS_TIME,
+ MR_EVT_ARGS_ECC,
+ MR_EVT_ARGS_LD_PROP,
+ MR_EVT_ARGS_PD_SPARE,
+ MR_EVT_ARGS_PD_INDEX,
+ MR_EVT_ARGS_DIAG_PASS,
+ MR_EVT_ARGS_DIAG_FAIL,
+ MR_EVT_ARGS_PD_LBA_LBA,
+ MR_EVT_ARGS_PORT_PHY,
+ MR_EVT_ARGS_PD_MISSING,
+ MR_EVT_ARGS_PD_ADDRESS,
+ MR_EVT_ARGS_BITMAP,
+ MR_EVT_ARGS_CONNECTOR,
+ MR_EVT_ARGS_PD_PD,
+ MR_EVT_ARGS_PD_FRU,
+ MR_EVT_ARGS_PD_PATHINFO,
+ MR_EVT_ARGS_PD_POWER_STATE,
+ MR_EVT_ARGS_GENERIC,
+};
+
+
+/*
+ * Thunderbolt (and later) Defines
+ */
+#define MRSAS_MAX_SZ_CHAIN_FRAME 1024
+#define MFI_FUSION_ENABLE_INTERRUPT_MASK (0x00000009)
+#define MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE 256
+#define MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST 0xF0
+#define MRSAS_MPI2_FUNCTION_LD_IO_REQUEST 0xF1
+#define MRSAS_LOAD_BALANCE_FLAG 0x1
+#define MRSAS_DCMD_MBOX_PEND_FLAG 0x1
+#define HOST_DIAG_WRITE_ENABLE 0x80
+#define HOST_DIAG_RESET_ADAPTER 0x4
+#define MRSAS_TBOLT_MAX_RESET_TRIES 3
+#define MRSAS_MAX_MFI_CMDS 32
+
+/*
+ * Invader Defines
+ */
+#define MPI2_TYPE_CUDA 0x2
+#define MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH 0x4000
+#define MR_RL_FLAGS_GRANT_DESTINATION_CPU0 0x00
+#define MR_RL_FLAGS_GRANT_DESTINATION_CPU1 0x10
+#define MR_RL_FLAGS_GRANT_DESTINATION_CUDA 0x80
+#define MR_RL_FLAGS_SEQ_NUM_ENABLE 0x8
+
+/*
+ * T10 PI defines
+ */
+#define MR_PROT_INFO_TYPE_CONTROLLER 0x8
+#define MRSAS_SCSI_VARIABLE_LENGTH_CMD 0x7f
+#define MRSAS_SCSI_SERVICE_ACTION_READ32 0x9
+#define MRSAS_SCSI_SERVICE_ACTION_WRITE32 0xB
+#define MRSAS_SCSI_ADDL_CDB_LEN 0x18
+#define MRSAS_RD_WR_PROTECT_CHECK_ALL 0x20
+#define MRSAS_RD_WR_PROTECT_CHECK_NONE 0x60
+#define MRSAS_SCSIBLOCKSIZE 512
+
+/*
+ * Raid context flags
+ */
+#define MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT 0x4
+#define MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_MASK 0x30
+typedef enum MR_RAID_FLAGS_IO_SUB_TYPE {
+ MR_RAID_FLAGS_IO_SUB_TYPE_NONE = 0,
+ MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD = 1,
+} MR_RAID_FLAGS_IO_SUB_TYPE;
+
+/*
+ * Request descriptor types
+ */
+#define MRSAS_REQ_DESCRIPT_FLAGS_LD_IO 0x7
+#define MRSAS_REQ_DESCRIPT_FLAGS_MFA 0x1
+#define MRSAS_REQ_DESCRIPT_FLAGS_NO_LOCK 0x2
+#define MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT 1
+#define MRSAS_FP_CMD_LEN 16
+#define MRSAS_FUSION_IN_RESET 0
+
+#define RAID_CTX_SPANARM_ARM_SHIFT (0)
+#define RAID_CTX_SPANARM_ARM_MASK (0x1f)
+#define RAID_CTX_SPANARM_SPAN_SHIFT (5)
+#define RAID_CTX_SPANARM_SPAN_MASK (0xE0)
+
+/*
+ * Define region lock types
+ */
+typedef enum _REGION_TYPE {
+ REGION_TYPE_UNUSED = 0, // lock is currently not active
+ REGION_TYPE_SHARED_READ = 1, // shared lock (for reads)
+ REGION_TYPE_SHARED_WRITE = 2,
+ REGION_TYPE_EXCLUSIVE = 3, // exclusive lock (for writes)
+} REGION_TYPE;
+
+/*
+ * MR private defines
+ */
+#define MR_PD_INVALID 0xFFFF
+#define MAX_SPAN_DEPTH 8
+#define MAX_RAIDMAP_SPAN_DEPTH (MAX_SPAN_DEPTH)
+#define MAX_ROW_SIZE 32
+#define MAX_RAIDMAP_ROW_SIZE (MAX_ROW_SIZE)
+#define MAX_LOGICAL_DRIVES 64
+#define MAX_RAIDMAP_LOGICAL_DRIVES (MAX_LOGICAL_DRIVES)
+#define MAX_RAIDMAP_VIEWS (MAX_LOGICAL_DRIVES)
+#define MAX_ARRAYS 128
+#define MAX_RAIDMAP_ARRAYS (MAX_ARRAYS)
+#define MAX_PHYSICAL_DEVICES 256
+#define MAX_RAIDMAP_PHYSICAL_DEVICES (MAX_PHYSICAL_DEVICES)
+#define MR_DCMD_LD_MAP_GET_INFO 0x0300e101
+
+/*
+ * SCSI-CAM Related Defines
+ */
+#define MRSAS_SCSI_MAX_LUNS 0 //zero for now
+#define MRSAS_SCSI_INITIATOR_ID 255
+#define MRSAS_SCSI_MAX_CMDS 8
+#define MRSAS_SCSI_MAX_CDB_LEN 16
+#define MRSAS_SCSI_SENSE_BUFFERSIZE 96
+#define MRSAS_MAX_SGL 70
+#define MRSAS_MAX_IO_SIZE (256 * 1024)
+#define MRSAS_INTERNAL_CMDS 32
+
+/* Request types */
+#define MRSAS_REQ_TYPE_INTERNAL_CMD 0x0
+#define MRSAS_REQ_TYPE_AEN_FETCH 0x1
+#define MRSAS_REQ_TYPE_PASSTHRU 0x2
+#define MRSAS_REQ_TYPE_GETSET_PARAM 0x3
+#define MRSAS_REQ_TYPE_SCSI_IO 0x4
+
+/* Request states */
+#define MRSAS_REQ_STATE_FREE 0
+#define MRSAS_REQ_STATE_BUSY 1
+#define MRSAS_REQ_STATE_TRAN 2
+#define MRSAS_REQ_STATE_COMPLETE 3
+
+enum mrsas_req_flags {
+ MRSAS_DIR_UNKNOWN = 0x1,
+ MRSAS_DIR_IN = 0x2,
+ MRSAS_DIR_OUT = 0x4,
+ MRSAS_DIR_NONE = 0x8,
+};
+
+/*
+ * Adapter Reset States
+ */
+enum {
+ MRSAS_HBA_OPERATIONAL = 0,
+ MRSAS_ADPRESET_SM_INFAULT = 1,
+ MRSAS_ADPRESET_SM_FW_RESET_SUCCESS = 2,
+ MRSAS_ADPRESET_SM_OPERATIONAL = 3,
+ MRSAS_HW_CRITICAL_ERROR = 4,
+ MRSAS_ADPRESET_INPROG_SIGN = 0xDEADDEAD,
+};
+
+/*
+ * MPT Command Structure
+ */
+struct mrsas_mpt_cmd {
+ MRSAS_RAID_SCSI_IO_REQUEST *io_request;
+ bus_addr_t io_request_phys_addr;
+ MPI2_SGE_IO_UNION *chain_frame;
+ bus_addr_t chain_frame_phys_addr;
+ u_int32_t sge_count;
+ u_int8_t *sense;
+ bus_addr_t sense_phys_addr;
+ u_int8_t retry_for_fw_reset;
+ MRSAS_REQUEST_DESCRIPTOR_UNION *request_desc;
+ u_int32_t sync_cmd_idx; //For getting MFI cmd from list when complete
+ u_int32_t index;
+ u_int8_t flags;
+ u_int8_t load_balance;
+ bus_size_t length; // request length
+ u_int32_t error_code; // error during request dmamap load
+ bus_dmamap_t data_dmamap;
+ void *data;
+ union ccb *ccb_ptr; // pointer to ccb
+ struct callout cm_callout;
+ struct mrsas_softc *sc;
+ TAILQ_ENTRY(mrsas_mpt_cmd) next;
+};
+
+/*
+ * MFI Command Structure
+ */
+struct mrsas_mfi_cmd {
+ union mrsas_frame *frame;
+ bus_dmamap_t frame_dmamap; // mfi frame dmamap
+ void *frame_mem; // mfi frame virtual addr
+ bus_addr_t frame_phys_addr; // mfi frame physical addr
+ u_int8_t *sense;
+ bus_dmamap_t sense_dmamap; // mfi sense dmamap
+ void *sense_mem; // mfi sense virtual addr
+ bus_addr_t sense_phys_addr;
+ u_int32_t index;
+ u_int8_t sync_cmd;
+ u_int8_t cmd_status;
+ u_int8_t abort_aen;
+ u_int8_t retry_for_fw_reset;
+ struct mrsas_softc *sc;
+ union ccb *ccb_ptr;
+ union {
+ struct {
+ u_int16_t smid;
+ u_int16_t resvd;
+ } context;
+ u_int32_t frame_count;
+ } cmd_id;
+ TAILQ_ENTRY(mrsas_mfi_cmd) next;
+};
+
+
+/*
+ * define constants for device list query options
+ */
+enum MR_PD_QUERY_TYPE {
+ MR_PD_QUERY_TYPE_ALL = 0,
+ MR_PD_QUERY_TYPE_STATE = 1,
+ MR_PD_QUERY_TYPE_POWER_STATE = 2,
+ MR_PD_QUERY_TYPE_MEDIA_TYPE = 3,
+ MR_PD_QUERY_TYPE_SPEED = 4,
+ MR_PD_QUERY_TYPE_EXPOSED_TO_HOST = 5,
+};
+
+#define MR_EVT_CFG_CLEARED 0x0004
+#define MR_EVT_LD_STATE_CHANGE 0x0051
+#define MR_EVT_PD_INSERTED 0x005b
+#define MR_EVT_PD_REMOVED 0x0070
+#define MR_EVT_LD_CREATED 0x008a
+#define MR_EVT_LD_DELETED 0x008b
+#define MR_EVT_FOREIGN_CFG_IMPORTED 0x00db
+#define MR_EVT_LD_OFFLINE 0x00fc
+#define MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED 0x0152
+
+enum MR_PD_STATE {
+ MR_PD_STATE_UNCONFIGURED_GOOD = 0x00,
+ MR_PD_STATE_UNCONFIGURED_BAD = 0x01,
+ MR_PD_STATE_HOT_SPARE = 0x02,
+ MR_PD_STATE_OFFLINE = 0x10,
+ MR_PD_STATE_FAILED = 0x11,
+ MR_PD_STATE_REBUILD = 0x14,
+ MR_PD_STATE_ONLINE = 0x18,
+ MR_PD_STATE_COPYBACK = 0x20,
+ MR_PD_STATE_SYSTEM = 0x40,
+ };
+
+ /*
+ * defines the physical drive address structure
+ */
+#pragma pack(1)
+struct MR_PD_ADDRESS {
+ u_int16_t deviceId;
+ u_int16_t enclDeviceId;
+
+ union {
+ struct {
+ u_int8_t enclIndex;
+ u_int8_t slotNumber;
+ } mrPdAddress;
+ struct {
+ u_int8_t enclPosition;
+ u_int8_t enclConnectorIndex;
+ } mrEnclAddress;
+ } u1;
+ u_int8_t scsiDevType;
+ union {
+ u_int8_t connectedPortBitmap;
+ u_int8_t connectedPortNumbers;
+ } u2;
+ u_int64_t sasAddr[2];
+};
+#pragma pack()
+
+/*
+ * defines the physical drive list structure
+ */
+#pragma pack(1)
+struct MR_PD_LIST {
+ u_int32_t size;
+ u_int32_t count;
+ struct MR_PD_ADDRESS addr[1];
+};
+#pragma pack()
+
+#pragma pack(1)
+struct mrsas_pd_list {
+ u_int16_t tid;
+ u_int8_t driveType;
+ u_int8_t driveState;
+};
+#pragma pack()
+
+ /*
+ * defines the logical drive reference structure
+ */
+typedef union _MR_LD_REF { // LD reference structure
+ struct {
+ u_int8_t targetId; // LD target id (0 to MAX_TARGET_ID)
+ u_int8_t reserved; // reserved to make in line with MR_PD_REF
+ u_int16_t seqNum; // Sequence Number
+ } ld_context;
+ u_int32_t ref; // shorthand reference to full 32-bits
+} MR_LD_REF; // 4 bytes
+
+
+/*
+ * defines the logical drive list structure
+ */
+#pragma pack(1)
+struct MR_LD_LIST {
+ u_int32_t ldCount; // number of LDs
+ u_int32_t reserved; // pad to 8-byte boundary
+ struct {
+ MR_LD_REF ref; // LD reference
+ u_int8_t state; // current LD state (MR_LD_STATE)
+ u_int8_t reserved[3]; // pad to 8-byte boundary
+ u_int64_t size; // LD size
+ } ldList[MAX_LOGICAL_DRIVES];
+};
+#pragma pack()
+
+/*
+ * SAS controller properties
+ */
+#pragma pack(1)
+struct mrsas_ctrl_prop {
+ u_int16_t seq_num;
+ u_int16_t pred_fail_poll_interval;
+ u_int16_t intr_throttle_count;
+ u_int16_t intr_throttle_timeouts;
+ u_int8_t rebuild_rate;
+ u_int8_t patrol_read_rate;
+ u_int8_t bgi_rate;
+ u_int8_t cc_rate;
+ u_int8_t recon_rate;
+ u_int8_t cache_flush_interval;
+ u_int8_t spinup_drv_count;
+ u_int8_t spinup_delay;
+ u_int8_t cluster_enable;
+ u_int8_t coercion_mode;
+ u_int8_t alarm_enable;
+ u_int8_t disable_auto_rebuild;
+ u_int8_t disable_battery_warn;
+ u_int8_t ecc_bucket_size;
+ u_int16_t ecc_bucket_leak_rate;
+ u_int8_t restore_hotspare_on_insertion;
+ u_int8_t expose_encl_devices;
+ u_int8_t maintainPdFailHistory;
+ u_int8_t disallowHostRequestReordering;
+ u_int8_t abortCCOnError; // set TRUE to abort CC on detecting an inconsistency
+ u_int8_t loadBalanceMode; // load balance mode (MR_LOAD_BALANCE_MODE)
+ u_int8_t disableAutoDetectBackplane; // 0 - use auto detect logic of backplanes
+ // like SGPIO, i2c SEP using h/w mechansim
+ // like GPIO pins.
+ // 1 - disable auto detect SGPIO,
+ // 2 - disable i2c SEP auto detect
+ // 3 - disable both auto detect
+ u_int8_t snapVDSpace; // % of source LD to be reserved for a VDs snapshot in
+ // snapshot repository, for metadata and user data.
+ // 1=5%, 2=10%, 3=15% and so on.
+ /*
+ * Add properties that can be controlled by a bit in the following structure.
+ */
+ struct {
+ u_int32_t copyBackDisabled : 1; // set TRUE to disable copyBack
+ // (0=copback enabled)
+ u_int32_t SMARTerEnabled : 1;
+ u_int32_t prCorrectUnconfiguredAreas : 1;
+ u_int32_t useFdeOnly : 1;
+ u_int32_t disableNCQ : 1;
+ u_int32_t SSDSMARTerEnabled : 1;
+ u_int32_t SSDPatrolReadEnabled : 1;
+ u_int32_t enableSpinDownUnconfigured : 1;
+ u_int32_t autoEnhancedImport : 1;
+ u_int32_t enableSecretKeyControl : 1;
+ u_int32_t disableOnlineCtrlReset : 1;
+ u_int32_t allowBootWithPinnedCache : 1;
+ u_int32_t disableSpinDownHS : 1;
+ u_int32_t enableJBOD : 1;
+ u_int32_t reserved :18;
+ } OnOffProperties;
+ u_int8_t autoSnapVDSpace; // % of source LD to be reserved for auto
+ // snapshot in snapshot repository, for
+ // metadata and user data.
+ // 1=5%, 2=10%, 3=15% and so on.
+ u_int8_t viewSpace; // snapshot writeable VIEWs capacity as a %
+ // of source LD capacity. 0=READ only.
+ // 1=5%, 2=10%, 3=15% and so on
+ u_int16_t spinDownTime; // # of idle minutes before device is spun
+ // down (0=use FW defaults).
+ u_int8_t reserved[24];
+
+};
+#pragma pack()
+
+
+/*
+ * SAS controller information
+ */
+//#pragma pack(1)
+struct mrsas_ctrl_info {
+ /*
+ * PCI device information
+ */
+ struct {
+ u_int16_t vendor_id;
+ u_int16_t device_id;
+ u_int16_t sub_vendor_id;
+ u_int16_t sub_device_id;
+ u_int8_t reserved[24];
+ } __packed pci;
+ /*
+ * Host interface information
+ */
+ struct {
+ u_int8_t PCIX:1;
+ u_int8_t PCIE:1;
+ u_int8_t iSCSI:1;
+ u_int8_t SAS_3G:1;
+ u_int8_t reserved_0:4;
+ u_int8_t reserved_1[6];
+ u_int8_t port_count;
+ u_int64_t port_addr[8];
+ } __packed host_interface;
+ /*
+ * Device (backend) interface information
+ */
+ struct {
+ u_int8_t SPI:1;
+ u_int8_t SAS_3G:1;
+ u_int8_t SATA_1_5G:1;
+ u_int8_t SATA_3G:1;
+ u_int8_t reserved_0:4;
+ u_int8_t reserved_1[6];
+ u_int8_t port_count;
+ u_int64_t port_addr[8];
+ } __packed device_interface;
+
+ /*
+ * List of components residing in flash. All str are null terminated
+ */
+ u_int32_t image_check_word;
+ u_int32_t image_component_count;
+
+ struct {
+ char name[8];
+ char version[32];
+ char build_date[16];
+ char built_time[16];
+ } __packed image_component[8];
+ /*
+ * List of flash components that have been flashed on the card, but
+ * are not in use, pending reset of the adapter. This list will be
+ * empty if a flash operation has not occurred. All stings are null
+ * terminated
+ */
+ u_int32_t pending_image_component_count;
+
+ struct {
+ char name[8];
+ char version[32];
+ char build_date[16];
+ char build_time[16];
+ } __packed pending_image_component[8];
+
+ u_int8_t max_arms;
+ u_int8_t max_spans;
+ u_int8_t max_arrays;
+ u_int8_t max_lds;
+ char product_name[80];
+ char serial_no[32];
+
+ /*
+ * Other physical/controller/operation information. Indicates the
+ * presence of the hardware
+ */
+ struct {
+ u_int32_t bbu:1;
+ u_int32_t alarm:1;
+ u_int32_t nvram:1;
+ u_int32_t uart:1;
+ u_int32_t reserved:28;
+ } __packed hw_present;
+
+ u_int32_t current_fw_time;
+
+ /*
+ * Maximum data transfer sizes
+ */
+ u_int16_t max_concurrent_cmds;
+ u_int16_t max_sge_count;
+ u_int32_t max_request_size;
+
+ /*
+ * Logical and physical device counts
+ */
+ u_int16_t ld_present_count;
+ u_int16_t ld_degraded_count;
+ u_int16_t ld_offline_count;
+
+ u_int16_t pd_present_count;
+ u_int16_t pd_disk_present_count;
+ u_int16_t pd_disk_pred_failure_count;
+ u_int16_t pd_disk_failed_count;
+
+ /*
+ * Memory size information
+ */
+ u_int16_t nvram_size;
+ u_int16_t memory_size;
+ u_int16_t flash_size;
+
+ /*
+ * Error counters
+ */
+ u_int16_t mem_correctable_error_count;
+ u_int16_t mem_uncorrectable_error_count;
+
+ /*
+ * Cluster information
+ */
+ u_int8_t cluster_permitted;
+ u_int8_t cluster_active;
+
+ /*
+ * Additional max data transfer sizes
+ */
+ u_int16_t max_strips_per_io;
+
+ /*
+ * Controller capabilities structures
+ */
+ struct {
+ u_int32_t raid_level_0:1;
+ u_int32_t raid_level_1:1;
+ u_int32_t raid_level_5:1;
+ u_int32_t raid_level_1E:1;
+ u_int32_t raid_level_6:1;
+ u_int32_t reserved:27;
+ } __packed raid_levels;
+
+ struct {
+ u_int32_t rbld_rate:1;
+ u_int32_t cc_rate:1;
+ u_int32_t bgi_rate:1;
+ u_int32_t recon_rate:1;
+ u_int32_t patrol_rate:1;
+ u_int32_t alarm_control:1;
+ u_int32_t cluster_supported:1;
+ u_int32_t bbu:1;
+ u_int32_t spanning_allowed:1;
+ u_int32_t dedicated_hotspares:1;
+ u_int32_t revertible_hotspares:1;
+ u_int32_t foreign_config_import:1;
+ u_int32_t self_diagnostic:1;
+ u_int32_t mixed_redundancy_arr:1;
+ u_int32_t global_hot_spares:1;
+ u_int32_t reserved:17;
+ } __packed adapter_operations;
+
+ struct {
+ u_int32_t read_policy:1;
+ u_int32_t write_policy:1;
+ u_int32_t io_policy:1;
+ u_int32_t access_policy:1;
+ u_int32_t disk_cache_policy:1;
+ u_int32_t reserved:27;
+ } __packed ld_operations;
+
+ struct {
+ u_int8_t min;
+ u_int8_t max;
+ u_int8_t reserved[2];
+ } __packed stripe_sz_ops;
+
+ struct {
+ u_int32_t force_online:1;
+ u_int32_t force_offline:1;
+ u_int32_t force_rebuild:1;
+ u_int32_t reserved:29;
+ } __packed pd_operations;
+
+ struct {
+ u_int32_t ctrl_supports_sas:1;
+ u_int32_t ctrl_supports_sata:1;
+ u_int32_t allow_mix_in_encl:1;
+ u_int32_t allow_mix_in_ld:1;
+ u_int32_t allow_sata_in_cluster:1;
+ u_int32_t reserved:27;
+ } __packed pd_mix_support;
+
+ /*
+ * Define ECC single-bit-error bucket information
+ */
+ u_int8_t ecc_bucket_count;
+ u_int8_t reserved_2[11];
+
+ /*
+ * Include the controller properties (changeable items)
+ */
+ struct mrsas_ctrl_prop properties;
+
+ /*
+ * Define FW pkg version (set in envt v'bles on OEM basis)
+ */
+ char package_version[0x60];
+
+ /*
+ * If adapterOperations.supportMoreThan8Phys is set, and deviceInterface.portCount is greater than 8,
+ * SAS Addrs for first 8 ports shall be populated in deviceInterface.portAddr, and the rest shall be
+ * populated in deviceInterfacePortAddr2.
+ */
+ u_int64_t deviceInterfacePortAddr2[8]; //0x6a0
+ u_int8_t reserved3[128]; //0x6e0
+
+ struct { //0x760
+ u_int16_t minPdRaidLevel_0 : 4;
+ u_int16_t maxPdRaidLevel_0 : 12;
+
+ u_int16_t minPdRaidLevel_1 : 4;
+ u_int16_t maxPdRaidLevel_1 : 12;
+
+ u_int16_t minPdRaidLevel_5 : 4;
+ u_int16_t maxPdRaidLevel_5 : 12;
+
+ u_int16_t minPdRaidLevel_1E : 4;
+ u_int16_t maxPdRaidLevel_1E : 12;
+
+ u_int16_t minPdRaidLevel_6 : 4;
+ u_int16_t maxPdRaidLevel_6 : 12;
+
+ u_int16_t minPdRaidLevel_10 : 4;
+ u_int16_t maxPdRaidLevel_10 : 12;
+
+ u_int16_t minPdRaidLevel_50 : 4;
+ u_int16_t maxPdRaidLevel_50 : 12;
+
+ u_int16_t minPdRaidLevel_60 : 4;
+ u_int16_t maxPdRaidLevel_60 : 12;
+
+ u_int16_t minPdRaidLevel_1E_RLQ0 : 4;
+ u_int16_t maxPdRaidLevel_1E_RLQ0 : 12;
+
+ u_int16_t minPdRaidLevel_1E0_RLQ0 : 4;
+ u_int16_t maxPdRaidLevel_1E0_RLQ0 : 12;
+
+ u_int16_t reserved[6];
+ } pdsForRaidLevels;
+
+ u_int16_t maxPds; //0x780
+ u_int16_t maxDedHSPs; //0x782
+ u_int16_t maxGlobalHSPs; //0x784
+ u_int16_t ddfSize; //0x786
+ u_int8_t maxLdsPerArray; //0x788
+ u_int8_t partitionsInDDF; //0x789
+ u_int8_t lockKeyBinding; //0x78a
+ u_int8_t maxPITsPerLd; //0x78b
+ u_int8_t maxViewsPerLd; //0x78c
+ u_int8_t maxTargetId; //0x78d
+ u_int16_t maxBvlVdSize; //0x78e
+
+ u_int16_t maxConfigurableSSCSize; //0x790
+ u_int16_t currentSSCsize; //0x792
+
+ char expanderFwVersion[12]; //0x794
+
+ u_int16_t PFKTrialTimeRemaining; //0x7A0
+
+ u_int16_t cacheMemorySize; //0x7A2
+
+ struct { //0x7A4
+ u_int32_t supportPIcontroller :1;
+ u_int32_t supportLdPIType1 :1;
+ u_int32_t supportLdPIType2 :1;
+ u_int32_t supportLdPIType3 :1;
+ u_int32_t supportLdBBMInfo :1;
+ u_int32_t supportShieldState :1;
+ u_int32_t blockSSDWriteCacheChange :1;
+ u_int32_t supportSuspendResumeBGops :1;
+ u_int32_t supportEmergencySpares :1;
+ u_int32_t supportSetLinkSpeed :1;
+ u_int32_t supportBootTimePFKChange :1;
+ u_int32_t supportJBOD :1;
+ u_int32_t disableOnlinePFKChange :1;
+ u_int32_t supportPerfTuning :1;
+ u_int32_t supportSSDPatrolRead :1;
+ u_int32_t realTimeScheduler :1;
+
+ u_int32_t supportResetNow :1;
+ u_int32_t supportEmulatedDrives :1;
+ u_int32_t headlessMode :1;
+ u_int32_t dedicatedHotSparesLimited :1;
+
+
+ u_int32_t supportUnevenSpans :1;
+ u_int32_t reserved :11;
+ } adapterOperations2;
+
+ u_int8_t driverVersion[32]; //0x7A8
+ u_int8_t maxDAPdCountSpinup60; //0x7C8
+ u_int8_t temperatureROC; //0x7C9
+ u_int8_t temperatureCtrl; //0x7CA
+ u_int8_t reserved4; //0x7CB
+ u_int16_t maxConfigurablePds; //0x7CC
+
+
+ u_int8_t reserved5[2]; //0x7CD reserved for future use
+
+ /*
+ * HA cluster information
+ */
+ struct {
+ u_int32_t peerIsPresent :1;
+ u_int32_t peerIsIncompatible :1;
+
+ u_int32_t hwIncompatible :1;
+ u_int32_t fwVersionMismatch :1;
+ u_int32_t ctrlPropIncompatible :1;
+ u_int32_t premiumFeatureMismatch :1;
+ u_int32_t reserved :26;
+ } cluster;
+
+ char clusterId[16]; //0x7D4
+
+ u_int8_t pad[0x800-0x7E4]; //0x7E4
+} __packed;
+
+/*
+ * Ld and PD Max Support Defines
+ */
+#define MRSAS_MAX_PD 256
+#define MRSAS_MAX_LD 64
+
+/*
+ * When SCSI mid-layer calls driver's reset routine, driver waits for
+ * MRSAS_RESET_WAIT_TIME seconds for all outstanding IO to complete. Note
+ * that the driver cannot _actually_ abort or reset pending commands. While
+ * it is waiting for the commands to complete, it prints a diagnostic message
+ * every MRSAS_RESET_NOTICE_INTERVAL seconds
+ */
+#define MRSAS_RESET_WAIT_TIME 180
+#define MRSAS_INTERNAL_CMD_WAIT_TIME 180
+#define MRSAS_IOC_INIT_WAIT_TIME 60
+#define MRSAS_RESET_NOTICE_INTERVAL 5
+#define MRSAS_IOCTL_CMD 0
+#define MRSAS_DEFAULT_CMD_TIMEOUT 90
+#define MRSAS_THROTTLE_QUEUE_DEPTH 16
+
+/*
+ * FW reports the maximum of number of commands that it can accept (maximum
+ * commands that can be outstanding) at any time. The driver must report a
+ * lower number to the mid layer because it can issue a few internal commands
+ * itself (E.g, AEN, abort cmd, IOCTLs etc). The number of commands it needs
+ * is shown below
+ */
+#define MRSAS_INT_CMDS 32
+#define MRSAS_SKINNY_INT_CMDS 5
+#define MRSAS_MAX_MSIX_QUEUES 16
+
+/*
+ * FW can accept both 32 and 64 bit SGLs. We want to allocate 32/64 bit
+ * SGLs based on the size of bus_addr_t
+ */
+#define IS_DMA64 (sizeof(bus_addr_t) == 8)
+
+#define MFI_XSCALE_OMR0_CHANGE_INTERRUPT 0x00000001 // MFI state change interrupt
+#define MFI_INTR_FLAG_REPLY_MESSAGE 0x00000001
+#define MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE 0x00000002
+#define MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT 0x00000004 //MFI state change interrupt
+
+#define MFI_OB_INTR_STATUS_MASK 0x00000002
+#define MFI_POLL_TIMEOUT_SECS 60
+
+#define MFI_REPLY_1078_MESSAGE_INTERRUPT 0x80000000
+#define MFI_REPLY_GEN2_MESSAGE_INTERRUPT 0x00000001
+#define MFI_GEN2_ENABLE_INTERRUPT_MASK 0x00000001
+#define MFI_REPLY_SKINNY_MESSAGE_INTERRUPT 0x40000000
+#define MFI_SKINNY_ENABLE_INTERRUPT_MASK (0x00000001)
+#define MFI_1068_PCSR_OFFSET 0x84
+#define MFI_1068_FW_HANDSHAKE_OFFSET 0x64
+#define MFI_1068_FW_READY 0xDDDD0000
+
+#pragma pack(1)
+struct mrsas_sge32 {
+ u_int32_t phys_addr;
+ u_int32_t length;
+};
+#pragma pack()
+
+#pragma pack(1)
+struct mrsas_sge64 {
+ u_int64_t phys_addr;
+ u_int32_t length;
+};
+#pragma pack()
+
+#pragma pack()
+union mrsas_sgl {
+ struct mrsas_sge32 sge32[1];
+ struct mrsas_sge64 sge64[1];
+};
+#pragma pack()
+
+#pragma pack(1)
+struct mrsas_header {
+ u_int8_t cmd; /*00e */
+ u_int8_t sense_len; /*01h */
+ u_int8_t cmd_status; /*02h */
+ u_int8_t scsi_status; /*03h */
+
+ u_int8_t target_id; /*04h */
+ u_int8_t lun; /*05h */
+ u_int8_t cdb_len; /*06h */
+ u_int8_t sge_count; /*07h */
+
+ u_int32_t context; /*08h */
+ u_int32_t pad_0; /*0Ch */
+
+ u_int16_t flags; /*10h */
+ u_int16_t timeout; /*12h */
+ u_int32_t data_xferlen; /*14h */
+};
+#pragma pack()
+
+#pragma pack(1)
+struct mrsas_init_frame {
+ u_int8_t cmd; /*00h */
+ u_int8_t reserved_0; /*01h */
+ u_int8_t cmd_status; /*02h */
+
+ u_int8_t reserved_1; /*03h */
+ u_int32_t reserved_2; /*04h */
+
+ u_int32_t context; /*08h */
+ u_int32_t pad_0; /*0Ch */
+
+ u_int16_t flags; /*10h */
+ u_int16_t reserved_3; /*12h */
+ u_int32_t data_xfer_len; /*14h */
+
+ u_int32_t queue_info_new_phys_addr_lo; /*18h */
+ u_int32_t queue_info_new_phys_addr_hi; /*1Ch */
+ u_int32_t queue_info_old_phys_addr_lo; /*20h */
+ u_int32_t queue_info_old_phys_addr_hi; /*24h */
+ u_int32_t driver_ver_lo; /*28h */
+ u_int32_t driver_ver_hi; /*2Ch */
+ u_int32_t reserved_4[4]; /*30h */
+};
+#pragma pack()
+
+#pragma pack(1)
+struct mrsas_io_frame {
+ u_int8_t cmd; /*00h */
+ u_int8_t sense_len; /*01h */
+ u_int8_t cmd_status; /*02h */
+ u_int8_t scsi_status; /*03h */
+
+ u_int8_t target_id; /*04h */
+ u_int8_t access_byte; /*05h */
+ u_int8_t reserved_0; /*06h */
+ u_int8_t sge_count; /*07h */
+
+ u_int32_t context; /*08h */
+ u_int32_t pad_0; /*0Ch */
+
+ u_int16_t flags; /*10h */
+ u_int16_t timeout; /*12h */
+ u_int32_t lba_count; /*14h */
+
+ u_int32_t sense_buf_phys_addr_lo; /*18h */
+ u_int32_t sense_buf_phys_addr_hi; /*1Ch */
+
+ u_int32_t start_lba_lo; /*20h */
+ u_int32_t start_lba_hi; /*24h */
+
+ union mrsas_sgl sgl; /*28h */
+};
+#pragma pack()
+
+#pragma pack(1)
+struct mrsas_pthru_frame {
+ u_int8_t cmd; /*00h */
+ u_int8_t sense_len; /*01h */
+ u_int8_t cmd_status; /*02h */
+ u_int8_t scsi_status; /*03h */
+
+ u_int8_t target_id; /*04h */
+ u_int8_t lun; /*05h */
+ u_int8_t cdb_len; /*06h */
+ u_int8_t sge_count; /*07h */
+
+ u_int32_t context; /*08h */
+ u_int32_t pad_0; /*0Ch */
+
+ u_int16_t flags; /*10h */
+ u_int16_t timeout; /*12h */
+ u_int32_t data_xfer_len; /*14h */
+
+ u_int32_t sense_buf_phys_addr_lo; /*18h */
+ u_int32_t sense_buf_phys_addr_hi; /*1Ch */
+
+ u_int8_t cdb[16]; /*20h */
+ union mrsas_sgl sgl; /*30h */
+};
+#pragma pack()
+
+#pragma pack(1)
+struct mrsas_dcmd_frame {
+ u_int8_t cmd; /*00h */
+ u_int8_t reserved_0; /*01h */
+ u_int8_t cmd_status; /*02h */
+ u_int8_t reserved_1[4]; /*03h */
+ u_int8_t sge_count; /*07h */
+
+ u_int32_t context; /*08h */
+ u_int32_t pad_0; /*0Ch */
+
+ u_int16_t flags; /*10h */
+ u_int16_t timeout; /*12h */
+
+ u_int32_t data_xfer_len; /*14h */
+ u_int32_t opcode; /*18h */
+
+ union { /*1Ch */
+ u_int8_t b[12];
+ u_int16_t s[6];
+ u_int32_t w[3];
+ } mbox;
+
+ union mrsas_sgl sgl; /*28h */
+};
+#pragma pack()
+
+#pragma pack(1)
+struct mrsas_abort_frame {
+ u_int8_t cmd; /*00h */
+ u_int8_t reserved_0; /*01h */
+ u_int8_t cmd_status; /*02h */
+
+ u_int8_t reserved_1; /*03h */
+ u_int32_t reserved_2; /*04h */
+
+ u_int32_t context; /*08h */
+ u_int32_t pad_0; /*0Ch */
+
+ u_int16_t flags; /*10h */
+ u_int16_t reserved_3; /*12h */
+ u_int32_t reserved_4; /*14h */
+
+ u_int32_t abort_context; /*18h */
+ u_int32_t pad_1; /*1Ch */
+
+ u_int32_t abort_mfi_phys_addr_lo; /*20h */
+ u_int32_t abort_mfi_phys_addr_hi; /*24h */
+
+ u_int32_t reserved_5[6]; /*28h */
+};
+#pragma pack()
+
+#pragma pack(1)
+struct mrsas_smp_frame {
+ u_int8_t cmd; /*00h */
+ u_int8_t reserved_1; /*01h */
+ u_int8_t cmd_status; /*02h */
+ u_int8_t connection_status; /*03h */
+
+ u_int8_t reserved_2[3]; /*04h */
+ u_int8_t sge_count; /*07h */
+
+ u_int32_t context; /*08h */
+ u_int32_t pad_0; /*0Ch */
+
+ u_int16_t flags; /*10h */
+ u_int16_t timeout; /*12h */
+
+ u_int32_t data_xfer_len; /*14h */
+ u_int64_t sas_addr; /*18h */
+
+ union {
+ struct mrsas_sge32 sge32[2]; /* [0]: resp [1]: req */
+ struct mrsas_sge64 sge64[2]; /* [0]: resp [1]: req */
+ } sgl;
+};
+#pragma pack()
+
+
+#pragma pack(1)
+struct mrsas_stp_frame {
+ u_int8_t cmd; /*00h */
+ u_int8_t reserved_1; /*01h */
+ u_int8_t cmd_status; /*02h */
+ u_int8_t reserved_2; /*03h */
+
+ u_int8_t target_id; /*04h */
+ u_int8_t reserved_3[2]; /*05h */
+ u_int8_t sge_count; /*07h */
+
+ u_int32_t context; /*08h */
+ u_int32_t pad_0; /*0Ch */
+
+ u_int16_t flags; /*10h */
+ u_int16_t timeout; /*12h */
+
+ u_int32_t data_xfer_len; /*14h */
+
+ u_int16_t fis[10]; /*18h */
+ u_int32_t stp_flags;
+
+ union {
+ struct mrsas_sge32 sge32[2]; /* [0]: resp [1]: data */
+ struct mrsas_sge64 sge64[2]; /* [0]: resp [1]: data */
+ } sgl;
+};
+#pragma pack()
+
+union mrsas_frame {
+ struct mrsas_header hdr;
+ struct mrsas_init_frame init;
+ struct mrsas_io_frame io;
+ struct mrsas_pthru_frame pthru;
+ struct mrsas_dcmd_frame dcmd;
+ struct mrsas_abort_frame abort;
+ struct mrsas_smp_frame smp;
+ struct mrsas_stp_frame stp;
+ u_int8_t raw_bytes[64];
+};
+
+#pragma pack(1)
+union mrsas_evt_class_locale {
+
+ struct {
+ u_int16_t locale;
+ u_int8_t reserved;
+ int8_t class;
+ } __packed members;
+
+ u_int32_t word;
+
+} __packed;
+
+#pragma pack()
+
+
+#pragma pack(1)
+struct mrsas_evt_log_info {
+ u_int32_t newest_seq_num;
+ u_int32_t oldest_seq_num;
+ u_int32_t clear_seq_num;
+ u_int32_t shutdown_seq_num;
+ u_int32_t boot_seq_num;
+
+} __packed;
+
+#pragma pack()
+
+struct mrsas_progress {
+
+ u_int16_t progress;
+ u_int16_t elapsed_seconds;
+
+} __packed;
+
+struct mrsas_evtarg_ld {
+
+ u_int16_t target_id;
+ u_int8_t ld_index;
+ u_int8_t reserved;
+
+} __packed;
+
+struct mrsas_evtarg_pd {
+ u_int16_t device_id;
+ u_int8_t encl_index;
+ u_int8_t slot_number;
+
+} __packed;
+
+struct mrsas_evt_detail {
+
+ u_int32_t seq_num;
+ u_int32_t time_stamp;
+ u_int32_t code;
+ union mrsas_evt_class_locale cl;
+ u_int8_t arg_type;
+ u_int8_t reserved1[15];
+
+ union {
+ struct {
+ struct mrsas_evtarg_pd pd;
+ u_int8_t cdb_length;
+ u_int8_t sense_length;
+ u_int8_t reserved[2];
+ u_int8_t cdb[16];
+ u_int8_t sense[64];
+ } __packed cdbSense;
+
+ struct mrsas_evtarg_ld ld;
+
+ struct {
+ struct mrsas_evtarg_ld ld;
+ u_int64_t count;
+ } __packed ld_count;
+
+ struct {
+ u_int64_t lba;
+ struct mrsas_evtarg_ld ld;
+ } __packed ld_lba;
+
+ struct {
+ struct mrsas_evtarg_ld ld;
+ u_int32_t prevOwner;
+ u_int32_t newOwner;
+ } __packed ld_owner;
+
+ struct {
+ u_int64_t ld_lba;
+ u_int64_t pd_lba;
+ struct mrsas_evtarg_ld ld;
+ struct mrsas_evtarg_pd pd;
+ } __packed ld_lba_pd_lba;
+
+ struct {
+ struct mrsas_evtarg_ld ld;
+ struct mrsas_progress prog;
+ } __packed ld_prog;
+
+ struct {
+ struct mrsas_evtarg_ld ld;
+ u_int32_t prev_state;
+ u_int32_t new_state;
+ } __packed ld_state;
+
+ struct {
+ u_int64_t strip;
+ struct mrsas_evtarg_ld ld;
+ } __packed ld_strip;
+
+ struct mrsas_evtarg_pd pd;
+
+ struct {
+ struct mrsas_evtarg_pd pd;
+ u_int32_t err;
+ } __packed pd_err;
+
+ struct {
+ u_int64_t lba;
+ struct mrsas_evtarg_pd pd;
+ } __packed pd_lba;
+
+ struct {
+ u_int64_t lba;
+ struct mrsas_evtarg_pd pd;
+ struct mrsas_evtarg_ld ld;
+ } __packed pd_lba_ld;
+
+ struct {
+ struct mrsas_evtarg_pd pd;
+ struct mrsas_progress prog;
+ } __packed pd_prog;
+
+ struct {
+ struct mrsas_evtarg_pd pd;
+ u_int32_t prevState;
+ u_int32_t newState;
+ } __packed pd_state;
+
+ struct {
+ u_int16_t vendorId;
+ u_int16_t deviceId;
+ u_int16_t subVendorId;
+ u_int16_t subDeviceId;
+ } __packed pci;
+
+ u_int32_t rate;
+ char str[96];
+
+ struct {
+ u_int32_t rtc;
+ u_int32_t elapsedSeconds;
+ } __packed time;
+
+ struct {
+ u_int32_t ecar;
+ u_int32_t elog;
+ char str[64];
+ } __packed ecc;
+
+ u_int8_t b[96];
+ u_int16_t s[48];
+ u_int32_t w[24];
+ u_int64_t d[12];
+ } args;
+
+ char description[128];
+
+} __packed;
+
+
+/*******************************************************************
+ * per-instance data
+ ********************************************************************/
+struct mrsas_softc {
+ device_t mrsas_dev; // bus device
+ struct cdev *mrsas_cdev; // controller device
+ uint16_t device_id; // pci device
+ struct resource *reg_res; // register interface window
+ int reg_res_id; // register resource id
+ bus_space_tag_t bus_tag; // bus space tag
+ bus_space_handle_t bus_handle; // bus space handle
+ bus_dma_tag_t mrsas_parent_tag; // bus dma parent tag
+ bus_dma_tag_t verbuf_tag; // verbuf tag
+ bus_dmamap_t verbuf_dmamap; // verbuf dmamap
+ void *verbuf_mem; // verbuf mem
+ bus_addr_t verbuf_phys_addr; // verbuf physical addr
+ bus_dma_tag_t sense_tag; // bus dma verbuf tag
+ bus_dmamap_t sense_dmamap; // bus dma verbuf dmamap
+ void *sense_mem; // pointer to sense buf
+ bus_addr_t sense_phys_addr; // bus dma verbuf mem
+ bus_dma_tag_t io_request_tag; // bus dma io request tag
+ bus_dmamap_t io_request_dmamap; // bus dma io request dmamap
+ void *io_request_mem; // bus dma io request mem
+ bus_addr_t io_request_phys_addr; // io request physical address
+ bus_dma_tag_t chain_frame_tag; // bus dma chain frame tag
+ bus_dmamap_t chain_frame_dmamap; // bus dma chain frame dmamap
+ void *chain_frame_mem; // bus dma chain frame mem
+ bus_addr_t chain_frame_phys_addr; // chain frame phys address
+ bus_dma_tag_t reply_desc_tag; // bus dma io request tag
+ bus_dmamap_t reply_desc_dmamap; // bus dma io request dmamap
+ void *reply_desc_mem; // bus dma io request mem
+ bus_addr_t reply_desc_phys_addr; // bus dma io request mem
+ bus_dma_tag_t ioc_init_tag; // bus dma io request tag
+ bus_dmamap_t ioc_init_dmamap; // bus dma io request dmamap
+ void *ioc_init_mem; // bus dma io request mem
+ bus_addr_t ioc_init_phys_mem; // io request physical address
+ bus_dma_tag_t data_tag; // bus dma data from OS tag
+ struct cam_sim *sim_0; // SIM pointer
+ struct cam_sim *sim_1; // SIM pointer
+ struct cam_path *path_0; // ldio path pointer to CAM
+ struct cam_path *path_1; // syspd path pointer to CAM
+ struct mtx sim_lock; // sim lock
+ struct mtx pci_lock; // serialize pci access
+ struct mtx io_lock; // IO lock
+ struct mtx ioctl_lock; // IOCTL lock
+ struct mtx mpt_cmd_pool_lock; // lock for cmd pool linked list
+ struct mtx mfi_cmd_pool_lock; // lock for cmd pool linked list
+ struct mtx raidmap_lock; // lock for raid map access/update
+ struct mtx aen_lock; // aen lock
+ uint32_t max_fw_cmds; // Max commands from FW
+ uint32_t max_num_sge; // Max number of SGEs
+ struct resource *mrsas_irq; // interrupt interface window
+ void *intr_handle; // handle
+ int irq_id; // intr resource id
+ struct mrsas_mpt_cmd **mpt_cmd_list;
+ struct mrsas_mfi_cmd **mfi_cmd_list;
+ TAILQ_HEAD(, mrsas_mpt_cmd) mrsas_mpt_cmd_list_head;
+ TAILQ_HEAD(, mrsas_mfi_cmd) mrsas_mfi_cmd_list_head;
+ bus_addr_t req_frames_desc_phys;
+ u_int8_t *req_frames_desc;
+ u_int8_t *req_desc;
+ bus_addr_t io_request_frames_phys;
+ u_int8_t *io_request_frames;
+ bus_addr_t reply_frames_desc_phys;
+ u_int16_t last_reply_idx;
+ u_int32_t reply_q_depth;
+ u_int32_t request_alloc_sz;
+ u_int32_t reply_alloc_sz;
+ u_int32_t io_frames_alloc_sz;
+ u_int32_t chain_frames_alloc_sz;
+ u_int16_t max_sge_in_main_msg;
+ u_int16_t max_sge_in_chain;
+ u_int8_t chain_offset_io_request;
+ u_int8_t chain_offset_mfi_pthru;
+ u_int32_t map_sz;
+ u_int64_t map_id;
+ struct mrsas_mfi_cmd *map_update_cmd;
+ struct mrsas_mfi_cmd *aen_cmd;
+ u_int8_t fast_path_io;
+ void* chan;
+ void* ocr_chan;
+ u_int8_t adprecovery;
+ u_int8_t remove_in_progress;
+ u_int8_t ocr_thread_active;
+ u_int8_t do_timedout_reset;
+ u_int32_t reset_in_progress;
+ u_int32_t reset_count;
+ bus_dma_tag_t raidmap_tag[2]; // bus dma tag for RAID map
+ bus_dmamap_t raidmap_dmamap[2]; // bus dma dmamap RAID map
+ void *raidmap_mem[2]; // bus dma mem RAID map
+ bus_addr_t raidmap_phys_addr[2]; // RAID map physical address
+ bus_dma_tag_t mficmd_frame_tag; // tag for mfi frame
+ bus_dma_tag_t mficmd_sense_tag; // tag for mfi sense
+ bus_dma_tag_t evt_detail_tag; // event detail tag
+ bus_dmamap_t evt_detail_dmamap; // event detail dmamap
+ struct mrsas_evt_detail *evt_detail_mem; // event detail mem
+ bus_addr_t evt_detail_phys_addr; // event detail physical addr
+ bus_dma_tag_t ctlr_info_tag; // tag for get ctlr info cmd
+ bus_dmamap_t ctlr_info_dmamap; // get ctlr info cmd dmamap
+ void *ctlr_info_mem; // get ctlr info cmd virtual addr
+ bus_addr_t ctlr_info_phys_addr; //get ctlr info cmd physical addr
+ u_int32_t max_sectors_per_req;
+ u_int8_t disableOnlineCtrlReset;
+ atomic_t fw_outstanding;
+ u_int32_t mrsas_debug;
+ u_int32_t mrsas_io_timeout;
+ u_int32_t mrsas_fw_fault_check_delay;
+ u_int32_t io_cmds_highwater;
+ u_int8_t UnevenSpanSupport;
+ struct sysctl_ctx_list sysctl_ctx;
+ struct sysctl_oid *sysctl_tree;
+ struct proc *ocr_thread;
+ u_int32_t last_seq_num;
+ bus_dma_tag_t el_info_tag; // tag for get event log info cmd
+ bus_dmamap_t el_info_dmamap; // get event log info cmd dmamap
+ void *el_info_mem; // get event log info cmd virtual addr
+ bus_addr_t el_info_phys_addr; //get event log info cmd physical addr
+ struct mrsas_pd_list pd_list[MRSAS_MAX_PD];
+ struct mrsas_pd_list local_pd_list[MRSAS_MAX_PD];
+ u_int8_t ld_ids[MRSAS_MAX_LD];
+ struct taskqueue *ev_tq; //taskqueue for events
+ struct task ev_task;
+ u_int32_t CurLdCount;
+ u_int64_t reset_flags;
+ LD_LOAD_BALANCE_INFO load_balance_info[MAX_LOGICAL_DRIVES];
+ LD_SPAN_INFO log_to_span[MAX_LOGICAL_DRIVES];
+};
+
+/* Compatibility shims for different OS versions */
+#if __FreeBSD_version >= 800001
+#define mrsas_kproc_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) \
+ kproc_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg)
+#define mrsas_kproc_exit(arg) kproc_exit(arg)
+#else
+#define mrsas_kproc_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) \
+ kthread_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg)
+#define mrsas_kproc_exit(arg) kthread_exit(arg)
+#endif
+
+static __inline void
+clear_bit(int b, volatile void *p)
+{
+ atomic_clear_int(((volatile int *)p) + (b >> 5), 1 << (b & 0x1f));
+}
+
+static __inline void
+set_bit(int b, volatile void *p)
+{
+ atomic_set_int(((volatile int *)p) + (b >> 5), 1 << (b & 0x1f));
+}
+
+static __inline int
+test_bit(int b, volatile void *p)
+{
+ return ((volatile int *)p)[b >> 5] & (1 << (b & 0x1f));
+}
+
+#endif /* MRSAS_H */
diff --git a/sys/dev/mrsas/mrsas_cam.c b/sys/dev/mrsas/mrsas_cam.c
new file mode 100644
index 0000000..81e8fcb
--- /dev/null
+++ b/sys/dev/mrsas/mrsas_cam.c
@@ -0,0 +1,1179 @@
+/*
+ * Copyright (c) 2014, LSI Corp.
+ * All rights reserved.
+ * Author: Marian Choy
+ * Support: freebsdraid@lsi.com
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * 3. Neither the name of the <ORGANIZATION> nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+*/
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "dev/mrsas/mrsas.h"
+
+#include <cam/cam.h>
+#include <cam/cam_ccb.h>
+#include <cam/cam_sim.h>
+#include <cam/cam_xpt_sim.h>
+#include <cam/cam_debug.h>
+#include <cam/cam_periph.h>
+#include <cam/cam_xpt_periph.h>
+
+#include <cam/scsi/scsi_all.h>
+#include <cam/scsi/scsi_message.h>
+#include <sys/taskqueue.h>
+
+
+/*
+ * Function prototypes
+ */
+int mrsas_cam_attach(struct mrsas_softc *sc);
+//int mrsas_ldio_inq(union ccb *ccb);
+int mrsas_ldio_inq(struct cam_sim *sim, union ccb *ccb);
+int mrsas_bus_scan(struct mrsas_softc *sc);
+int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim);
+int mrsas_map_request(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
+int mrsas_build_ldio(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
+ union ccb *ccb);
+int mrsas_build_dcdb(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
+ union ccb *ccb, struct cam_sim *sim);
+int mrsas_setup_io(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
+ union ccb *ccb, u_int32_t device_id,
+ MRSAS_RAID_SCSI_IO_REQUEST *io_request);
+void mrsas_xpt_freeze(struct mrsas_softc *sc);
+void mrsas_xpt_release(struct mrsas_softc *sc);
+void mrsas_cam_detach(struct mrsas_softc *sc);
+void mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd);
+void mrsas_unmap_request(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
+void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
+void mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
+ u_int32_t req_desc_hi);
+void mrsas_set_pd_lba(MRSAS_RAID_SCSI_IO_REQUEST *io_request, u_int8_t cdb_len,
+ struct IO_REQUEST_INFO *io_info, union ccb *ccb,
+ MR_FW_RAID_MAP_ALL *local_map_ptr, u_int32_t ref_tag,
+ u_int32_t ld_block_size);
+static void mrsas_freeze_simq(struct mrsas_mpt_cmd *cmd, struct cam_sim *sim);
+static void mrsas_poll(struct cam_sim *sim);
+static void mrsas_action(struct cam_sim *sim, union ccb *ccb);
+static void mrsas_scsiio_timeout(void *data);
+static void mrsas_data_load_cb(void *arg, bus_dma_segment_t *segs,
+ int nseg, int error);
+static int32_t mrsas_startio(struct mrsas_softc *sc, struct cam_sim *sim,
+ union ccb *ccb);
+struct mrsas_mpt_cmd * mrsas_get_mpt_cmd(struct mrsas_softc *sc);
+MRSAS_REQUEST_DESCRIPTOR_UNION *mrsas_get_request_desc(struct mrsas_softc *sc,
+ u_int16_t index);
+
+extern u_int16_t MR_TargetIdToLdGet(u_int32_t ldTgtId, MR_FW_RAID_MAP_ALL *map);
+extern u_int32_t MR_LdBlockSizeGet(u_int32_t ldTgtId, MR_FW_RAID_MAP_ALL *map,
+ struct mrsas_softc *sc);
+extern void mrsas_isr(void *arg);
+extern void mrsas_aen_handler(struct mrsas_softc *sc);
+extern u_int8_t MR_BuildRaidContext(struct mrsas_softc *sc,
+ struct IO_REQUEST_INFO *io_info,RAID_CONTEXT *pRAID_Context,
+ MR_FW_RAID_MAP_ALL *map);
+extern u_int16_t MR_LdSpanArrayGet(u_int32_t ld, u_int32_t span,
+ MR_FW_RAID_MAP_ALL *map);
+extern u_int16_t mrsas_get_updated_dev_handle(PLD_LOAD_BALANCE_INFO lbInfo,
+ struct IO_REQUEST_INFO *io_info);
+extern u_int8_t megasas_get_best_arm(PLD_LOAD_BALANCE_INFO lbInfo, u_int8_t arm,
+ u_int64_t block, u_int32_t count);
+
+
+/**
+ * mrsas_cam_attach: Main entry to CAM subsystem
+ * input: Adapter instance soft state
+ *
+ * This function is called from mrsas_attach() during initialization
+ * to perform SIM allocations and XPT bus registration. If the kernel
+ * version is 7.4 or earlier, it would also initiate a bus scan.
+ */
+int mrsas_cam_attach(struct mrsas_softc *sc)
+{
+ struct cam_devq *devq;
+ int mrsas_cam_depth;
+
+ mrsas_cam_depth = sc->max_fw_cmds - MRSAS_INTERNAL_CMDS;
+
+ if ((devq = cam_simq_alloc(mrsas_cam_depth)) == NULL) {
+ device_printf(sc->mrsas_dev, "Cannot allocate SIM queue\n");
+ return(ENOMEM);
+ }
+
+
+ /*
+ * Create SIM for bus 0 and register, also create path
+ */
+ sc->sim_0 = cam_sim_alloc(mrsas_action, mrsas_poll, "mrsas", sc,
+ device_get_unit(sc->mrsas_dev), &sc->sim_lock, mrsas_cam_depth,
+ mrsas_cam_depth, devq);
+ if (sc->sim_0 == NULL){
+ cam_simq_free(devq);
+ device_printf(sc->mrsas_dev, "Cannot register SIM\n");
+ return(ENXIO);
+ }
+ /* Initialize taskqueue for Event Handling */
+ TASK_INIT(&sc->ev_task, 0, (void *)mrsas_aen_handler, sc);
+ sc->ev_tq = taskqueue_create("mrsas_taskq", M_NOWAIT | M_ZERO,
+ taskqueue_thread_enqueue, &sc->ev_tq);
+
+ /* Run the task queue with lowest priority */
+ taskqueue_start_threads(&sc->ev_tq, 1, 255, "%s taskq",
+ device_get_nameunit(sc->mrsas_dev));
+ mtx_lock(&sc->sim_lock);
+ if (xpt_bus_register(sc->sim_0, sc->mrsas_dev,0) != CAM_SUCCESS)
+ {
+ cam_sim_free(sc->sim_0, TRUE); // passing true frees the devq
+ mtx_unlock(&sc->sim_lock);
+ return(ENXIO);
+ }
+ if (xpt_create_path(&sc->path_0, NULL, cam_sim_path(sc->sim_0),
+ CAM_TARGET_WILDCARD,
+ CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
+ xpt_bus_deregister(cam_sim_path(sc->sim_0));
+ cam_sim_free(sc->sim_0, TRUE); // passing true will free the devq
+ mtx_unlock(&sc->sim_lock);
+ return(ENXIO);
+ }
+ mtx_unlock(&sc->sim_lock);
+
+ /*
+ * Create SIM for bus 1 and register, also create path
+ */
+ sc->sim_1 = cam_sim_alloc(mrsas_action, mrsas_poll, "mrsas", sc,
+ device_get_unit(sc->mrsas_dev), &sc->sim_lock, mrsas_cam_depth,
+ mrsas_cam_depth, devq);
+ if (sc->sim_1 == NULL){
+ cam_simq_free(devq);
+ device_printf(sc->mrsas_dev, "Cannot register SIM\n");
+ return(ENXIO);
+ }
+
+ mtx_lock(&sc->sim_lock);
+ if (xpt_bus_register(sc->sim_1, sc->mrsas_dev, 1) != CAM_SUCCESS){
+ cam_sim_free(sc->sim_1, TRUE); // passing true frees the devq
+ mtx_unlock(&sc->sim_lock);
+ return(ENXIO);
+ }
+ if (xpt_create_path(&sc->path_1, NULL, cam_sim_path(sc->sim_1),
+ CAM_TARGET_WILDCARD,
+ CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
+ xpt_bus_deregister(cam_sim_path(sc->sim_1));
+ cam_sim_free(sc->sim_1, TRUE);
+ mtx_unlock(&sc->sim_lock);
+ return(ENXIO);
+ }
+ mtx_unlock(&sc->sim_lock);
+
+#if (__FreeBSD_version <= 704000)
+ if (mrsas_bus_scan(sc)){
+ device_printf(sc->mrsas_dev, "Error in bus scan.\n");
+ return(1);
+ }
+#endif
+ return(0);
+}
+
+/**
+ * mrsas_cam_detach: De-allocates and teardown CAM
+ * input: Adapter instance soft state
+ *
+ * De-registers and frees the paths and SIMs.
+ */
+void mrsas_cam_detach(struct mrsas_softc *sc)
+{
+ if (sc->ev_tq != NULL)
+ taskqueue_free(sc->ev_tq);
+ mtx_lock(&sc->sim_lock);
+ if (sc->path_0)
+ xpt_free_path(sc->path_0);
+ if (sc->sim_0) {
+ xpt_bus_deregister(cam_sim_path(sc->sim_0));
+ cam_sim_free(sc->sim_0, FALSE);
+ }
+ if (sc->path_1)
+ xpt_free_path(sc->path_1);
+ if (sc->sim_1) {
+ xpt_bus_deregister(cam_sim_path(sc->sim_1));
+ cam_sim_free(sc->sim_1, TRUE);
+ }
+ mtx_unlock(&sc->sim_lock);
+}
+
+/**
+ * mrsas_action: SIM callback entry point
+ * input: pointer to SIM
+ * pointer to CAM Control Block
+ *
+ * This function processes CAM subsystem requests. The type of request is
+ * stored in ccb->ccb_h.func_code. The preprocessor #ifdef is necessary
+ * because ccb->cpi.maxio is not supported for FreeBSD version 7.4 or
+ * earlier.
+ */
+static void mrsas_action(struct cam_sim *sim, union ccb *ccb)
+{
+ struct mrsas_softc *sc = (struct mrsas_softc *)cam_sim_softc(sim);
+ struct ccb_hdr *ccb_h = &(ccb->ccb_h);
+ u_int32_t device_id;
+
+ switch (ccb->ccb_h.func_code) {
+ case XPT_SCSI_IO:
+ {
+ device_id = ccb_h->target_id;
+
+ /*
+ * bus 0 is LD, bus 1 is for system-PD
+ */
+ if (cam_sim_bus(sim) == 1 &&
+ sc->pd_list[device_id].driveState != MR_PD_STATE_SYSTEM) {
+ ccb->ccb_h.status |= CAM_DEV_NOT_THERE;
+ xpt_done(ccb);
+ }
+ else {
+ if (mrsas_startio(sc, sim, ccb)){
+ ccb->ccb_h.status |= CAM_REQ_INVALID;
+ xpt_done(ccb);
+ }
+ }
+ break;
+ }
+ case XPT_ABORT:
+ {
+ ccb->ccb_h.status = CAM_UA_ABORT;
+ xpt_done(ccb);
+ break;
+ }
+ case XPT_RESET_BUS:
+ {
+ xpt_done(ccb);
+ break;
+ }
+ case XPT_GET_TRAN_SETTINGS:
+ {
+ ccb->cts.protocol = PROTO_SCSI;
+ ccb->cts.protocol_version = SCSI_REV_2;
+ ccb->cts.transport = XPORT_SPI;
+ ccb->cts.transport_version = 2;
+ ccb->cts.xport_specific.spi.valid = CTS_SPI_VALID_DISC;
+ ccb->cts.xport_specific.spi.flags = CTS_SPI_FLAGS_DISC_ENB;
+ ccb->cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
+ ccb->cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ xpt_done(ccb);
+ break;
+ }
+ case XPT_SET_TRAN_SETTINGS:
+ {
+ ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
+ xpt_done(ccb);
+ break;
+ }
+ case XPT_CALC_GEOMETRY:
+ {
+ cam_calc_geometry(&ccb->ccg, 1);
+ xpt_done(ccb);
+ break;
+ }
+ case XPT_PATH_INQ:
+ {
+ ccb->cpi.version_num = 1;
+ ccb->cpi.hba_inquiry = 0;
+ ccb->cpi.target_sprt = 0;
+ ccb->cpi.hba_misc = 0;
+ ccb->cpi.hba_eng_cnt = 0;
+ ccb->cpi.max_lun = MRSAS_SCSI_MAX_LUNS;
+ ccb->cpi.unit_number = cam_sim_unit(sim);
+ ccb->cpi.bus_id = cam_sim_bus(sim);
+ ccb->cpi.initiator_id = MRSAS_SCSI_INITIATOR_ID;
+ ccb->cpi.base_transfer_speed = 150000;
+ strncpy(ccb->cpi.sim_vid, "FreeBSD", SIM_IDLEN);
+ strncpy(ccb->cpi.hba_vid, "LSI", HBA_IDLEN);
+ strncpy(ccb->cpi.dev_name, cam_sim_name(sim), DEV_IDLEN);
+ ccb->cpi.transport = XPORT_SPI;
+ ccb->cpi.transport_version = 2;
+ ccb->cpi.protocol = PROTO_SCSI;
+ ccb->cpi.protocol_version = SCSI_REV_2;
+ if (ccb->cpi.bus_id == 0)
+ ccb->cpi.max_target = MRSAS_MAX_LD-1;
+ else
+ ccb->cpi.max_target = MRSAS_MAX_PD-1;
+#if (__FreeBSD_version > 704000)
+ ccb->cpi.maxio = MRSAS_MAX_IO_SIZE;
+#endif
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ xpt_done(ccb);
+ break;
+ }
+ default:
+ {
+ ccb->ccb_h.status = CAM_REQ_INVALID;
+ xpt_done(ccb);
+ break;
+ }
+ }
+}
+
+/**
+ * mrsas_scsiio_timeout Callback function for IO timed out
+ * input: mpt command context
+ *
+ * This function will execute after timeout value
+ * provided by ccb header from CAM layer, if timer expires.
+ * Driver will run timer for all DCDM and LDIO comming from CAM layer.
+ * This function is callback function for IO timeout and it runs in
+ * no-sleep context. Set do_timedout_reset in Adapter context so that
+ * it will execute OCR/Kill adpter from ocr_thread context.
+ */
+static void
+mrsas_scsiio_timeout(void *data)
+{
+ struct mrsas_mpt_cmd *cmd;
+ struct mrsas_softc *sc;
+
+ cmd = (struct mrsas_mpt_cmd *)data;
+ sc = cmd->sc;
+
+ if (cmd->ccb_ptr == NULL) {
+ printf("command timeout with NULL ccb\n");
+ return;
+ }
+
+ /* Below callout is dummy entry so that it will be
+ * cancelled from mrsas_cmd_done(). Now Controller will
+ * go to OCR/Kill Adapter based on OCR enable/disable
+ * property of Controller from ocr_thread context.
+ */
+ callout_reset(&cmd->cm_callout, (600000 * hz) / 1000,
+ mrsas_scsiio_timeout, cmd);
+ sc->do_timedout_reset = 1;
+ if(sc->ocr_thread_active)
+ wakeup(&sc->ocr_chan);
+}
+
+/**
+ * mrsas_startio: SCSI IO entry point
+ * input: Adapter instance soft state
+ * pointer to CAM Control Block
+ *
+ * This function is the SCSI IO entry point and it initiates IO processing.
+ * It copies the IO and depending if the IO is read/write or inquiry, it would
+ * call mrsas_build_ldio() or mrsas_build_dcdb(), respectively. It returns
+ * 0 if the command is sent to firmware successfully, otherwise it returns 1.
+ */
+static int32_t mrsas_startio(struct mrsas_softc *sc, struct cam_sim *sim,
+ union ccb *ccb)
+{
+ struct mrsas_mpt_cmd *cmd;
+ struct ccb_hdr *ccb_h = &(ccb->ccb_h);
+ struct ccb_scsiio *csio = &(ccb->csio);
+ MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+
+ if ((csio->cdb_io.cdb_bytes[0]) == SYNCHRONIZE_CACHE){
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ xpt_done(ccb);
+ return(0);
+ }
+
+ ccb_h->status |= CAM_SIM_QUEUED;
+ cmd = mrsas_get_mpt_cmd(sc);
+
+ if (!cmd) {
+ ccb_h->status |= CAM_REQUEUE_REQ;
+ xpt_done(ccb);
+ return(0);
+ }
+
+ if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
+ if(ccb_h->flags & CAM_DIR_IN)
+ cmd->flags |= MRSAS_DIR_IN;
+ if(ccb_h->flags & CAM_DIR_OUT)
+ cmd->flags |= MRSAS_DIR_OUT;
+ }
+ else
+ cmd->flags = MRSAS_DIR_NONE; /* no data */
+
+/* For FreeBSD 10.0 and higher */
+#if (__FreeBSD_version >= 1000000)
+/*
+ * * XXX We don't yet support physical addresses here.
+ */
+ switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
+ case CAM_DATA_PADDR:
+ case CAM_DATA_SG_PADDR:
+ printf("%s: physical addresses not supported\n",
+ __func__);
+ mrsas_release_mpt_cmd(cmd);
+ ccb_h->status = CAM_REQ_INVALID;
+ ccb_h->status &= ~CAM_SIM_QUEUED;
+ goto done;
+ case CAM_DATA_SG:
+ printf("%s: scatter gather is not supported\n",
+ __func__);
+ mrsas_release_mpt_cmd(cmd);
+ ccb_h->status = CAM_REQ_INVALID;
+ goto done;
+ case CAM_DATA_VADDR:
+ if (csio->dxfer_len > MRSAS_MAX_IO_SIZE) {
+ mrsas_release_mpt_cmd(cmd);
+ ccb_h->status = CAM_REQ_TOO_BIG;
+ goto done;
+ }
+ cmd->length = csio->dxfer_len;
+ if (cmd->length)
+ cmd->data = csio->data_ptr;
+ break;
+ default:
+ ccb->ccb_h.status = CAM_REQ_INVALID;
+ goto done;
+ }
+#else
+ if (!(ccb_h->flags & CAM_DATA_PHYS)) { //Virtual data address
+ if (!(ccb_h->flags & CAM_SCATTER_VALID)) {
+ if (csio->dxfer_len > MRSAS_MAX_IO_SIZE) {
+ mrsas_release_mpt_cmd(cmd);
+ ccb_h->status = CAM_REQ_TOO_BIG;
+ goto done;
+ }
+ cmd->length = csio->dxfer_len;
+ if (cmd->length)
+ cmd->data = csio->data_ptr;
+ }
+ else {
+ mrsas_release_mpt_cmd(cmd);
+ ccb_h->status = CAM_REQ_INVALID;
+ goto done;
+ }
+ }
+ else { //Data addresses are physical.
+ mrsas_release_mpt_cmd(cmd);
+ ccb_h->status = CAM_REQ_INVALID;
+ ccb_h->status &= ~CAM_SIM_QUEUED;
+ goto done;
+ }
+#endif
+ /* save ccb ptr */
+ cmd->ccb_ptr = ccb;
+
+ req_desc = mrsas_get_request_desc(sc, (cmd->index)-1);
+ if (!req_desc) {
+ device_printf(sc->mrsas_dev, "Cannot get request_descriptor.\n");
+ return (FAIL);
+ }
+ memset(req_desc, 0, sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION));
+ cmd->request_desc = req_desc;
+
+ if (ccb_h->flags & CAM_CDB_POINTER)
+ bcopy(csio->cdb_io.cdb_ptr, cmd->io_request->CDB.CDB32, csio->cdb_len);
+ else
+ bcopy(csio->cdb_io.cdb_bytes, cmd->io_request->CDB.CDB32, csio->cdb_len);
+ mtx_lock(&sc->raidmap_lock);
+
+ if (mrsas_ldio_inq(sim, ccb)) {
+ if (mrsas_build_ldio(sc, cmd, ccb)){
+ device_printf(sc->mrsas_dev, "Build LDIO failed.\n");
+ mtx_unlock(&sc->raidmap_lock);
+ return(1);
+ }
+ }
+ else {
+ if (mrsas_build_dcdb(sc, cmd, ccb, sim)) {
+ device_printf(sc->mrsas_dev, "Build DCDB failed.\n");
+ mtx_unlock(&sc->raidmap_lock);
+ return(1);
+ }
+ }
+ mtx_unlock(&sc->raidmap_lock);
+
+ if (cmd->flags == MRSAS_DIR_IN) //from device
+ cmd->io_request->Control |= MPI2_SCSIIO_CONTROL_READ;
+ else if (cmd->flags == MRSAS_DIR_OUT) //to device
+ cmd->io_request->Control |= MPI2_SCSIIO_CONTROL_WRITE;
+
+ cmd->io_request->SGLFlags = MPI2_SGE_FLAGS_64_BIT_ADDRESSING;
+ cmd->io_request->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL)/4;
+ cmd->io_request->SenseBufferLowAddress = cmd->sense_phys_addr;
+ cmd->io_request->SenseBufferLength = MRSAS_SCSI_SENSE_BUFFERSIZE;
+
+ req_desc = cmd->request_desc;
+ req_desc->SCSIIO.SMID = cmd->index;
+
+ /*
+ * Start timer for IO timeout. Default timeout value is 90 second.
+ */
+ callout_reset(&cmd->cm_callout, (sc->mrsas_io_timeout * hz) / 1000,
+ mrsas_scsiio_timeout, cmd);
+ atomic_inc(&sc->fw_outstanding);
+
+ if(atomic_read(&sc->fw_outstanding) > sc->io_cmds_highwater)
+ sc->io_cmds_highwater++;
+
+ mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high);
+ return(0);
+
+done:
+ xpt_done(ccb);
+ return(0);
+}
+
+/**
+ * mrsas_ldio_inq: Determines if IO is read/write or inquiry
+ * input: pointer to CAM Control Block
+ *
+ * This function determines if the IO is read/write or inquiry. It returns a
+ * 1 if the IO is read/write and 0 if it is inquiry.
+ */
+int mrsas_ldio_inq(struct cam_sim *sim, union ccb *ccb)
+{
+ struct ccb_scsiio *csio = &(ccb->csio);
+
+ if (cam_sim_bus(sim) == 1)
+ return(0);
+
+ switch (csio->cdb_io.cdb_bytes[0]) {
+ case READ_10:
+ case WRITE_10:
+ case READ_12:
+ case WRITE_12:
+ case READ_6:
+ case WRITE_6:
+ case READ_16:
+ case WRITE_16:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+/**
+ * mrsas_get_mpt_cmd: Get a cmd from free command pool
+ * input: Adapter instance soft state
+ *
+ * This function removes an MPT command from the command free list and
+ * initializes it.
+ */
+struct mrsas_mpt_cmd* mrsas_get_mpt_cmd(struct mrsas_softc *sc)
+{
+ struct mrsas_mpt_cmd *cmd = NULL;
+
+ mtx_lock(&sc->mpt_cmd_pool_lock);
+ if (!TAILQ_EMPTY(&sc->mrsas_mpt_cmd_list_head)){
+ cmd = TAILQ_FIRST(&sc->mrsas_mpt_cmd_list_head);
+ TAILQ_REMOVE(&sc->mrsas_mpt_cmd_list_head, cmd, next);
+ }
+ memset((uint8_t *)cmd->io_request, 0, MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE);
+ cmd->data = NULL;
+ cmd->length = 0;
+ cmd->flags = 0;
+ cmd->error_code = 0;
+ cmd->load_balance = 0;
+ cmd->ccb_ptr = NULL;
+ mtx_unlock(&sc->mpt_cmd_pool_lock);
+
+ return cmd;
+}
+
+/**
+ * mrsas_release_mpt_cmd: Return a cmd to free command pool
+ * input: Command packet for return to free command pool
+ *
+ * This function returns an MPT command to the free command list.
+ */
+void mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd)
+{
+ struct mrsas_softc *sc = cmd->sc;
+
+ mtx_lock(&sc->mpt_cmd_pool_lock);
+ cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
+ TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next);
+ mtx_unlock(&sc->mpt_cmd_pool_lock);
+
+ return;
+}
+
+/**
+ * mrsas_get_request_desc: Get request descriptor from array
+ * input: Adapter instance soft state
+ * SMID index
+ *
+ * This function returns a pointer to the request descriptor.
+ */
+MRSAS_REQUEST_DESCRIPTOR_UNION *
+mrsas_get_request_desc(struct mrsas_softc *sc, u_int16_t index)
+{
+ u_int8_t *p;
+
+ if (index >= sc->max_fw_cmds) {
+ device_printf(sc->mrsas_dev, "Invalid SMID (0x%x)request for desc\n", index);
+ return NULL;
+ }
+ p = sc->req_desc + sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * index;
+
+ return (MRSAS_REQUEST_DESCRIPTOR_UNION *)p;
+}
+
+/**
+ * mrsas_build_ldio: Builds an LDIO command
+ * input: Adapter instance soft state
+ * Pointer to command packet
+ * Pointer to CCB
+ *
+ * This function builds the LDIO command packet. It returns 0 if the
+ * command is built successfully, otherwise it returns a 1.
+ */
+int mrsas_build_ldio(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
+ union ccb *ccb)
+{
+ struct ccb_hdr *ccb_h = &(ccb->ccb_h);
+ struct ccb_scsiio *csio = &(ccb->csio);
+ u_int32_t device_id;
+ MRSAS_RAID_SCSI_IO_REQUEST *io_request;
+
+ device_id = ccb_h->target_id;
+
+ io_request = cmd->io_request;
+ io_request->RaidContext.VirtualDiskTgtId = device_id;
+ io_request->RaidContext.status = 0;
+ io_request->RaidContext.exStatus = 0;
+
+ /* just the cdb len, other flags zero, and ORed-in later for FP */
+ io_request->IoFlags = csio->cdb_len;
+
+ if (mrsas_setup_io(sc, cmd, ccb, device_id, io_request) != SUCCESS)
+ device_printf(sc->mrsas_dev, "Build ldio or fpio error\n");
+
+ io_request->DataLength = cmd->length;
+
+ if (mrsas_map_request(sc, cmd) == SUCCESS) {
+ if (cmd->sge_count > MRSAS_MAX_SGL) {
+ device_printf(sc->mrsas_dev, "Error: sge_count (0x%x) exceeds"
+ "max (0x%x) allowed\n", cmd->sge_count, sc->max_num_sge);
+ return (FAIL);
+ }
+ io_request->RaidContext.numSGE = cmd->sge_count;
+ }
+ else {
+ device_printf(sc->mrsas_dev, "Data map/load failed.\n");
+ return(FAIL);
+ }
+ return(0);
+}
+
+/**
+ * mrsas_setup_io: Set up data including Fast Path I/O
+ * input: Adapter instance soft state
+ * Pointer to command packet
+ * Pointer to CCB
+ *
+ * This function builds the DCDB inquiry command. It returns 0 if the
+ * command is built successfully, otherwise it returns a 1.
+ */
+int mrsas_setup_io(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
+ union ccb *ccb, u_int32_t device_id,
+ MRSAS_RAID_SCSI_IO_REQUEST *io_request)
+{
+ struct ccb_hdr *ccb_h = &(ccb->ccb_h);
+ struct ccb_scsiio *csio = &(ccb->csio);
+ struct IO_REQUEST_INFO io_info;
+ MR_FW_RAID_MAP_ALL *map_ptr;
+ u_int8_t fp_possible;
+ u_int32_t start_lba_hi, start_lba_lo, ld_block_size;
+ u_int32_t datalength = 0;
+
+ start_lba_lo = 0;
+ start_lba_hi = 0;
+ fp_possible = 0;
+
+ /*
+ * READ_6 (0x08) or WRITE_6 (0x0A) cdb
+ */
+ if (csio->cdb_len == 6) {
+ datalength = (u_int32_t)csio->cdb_io.cdb_bytes[4];
+ start_lba_lo = ((u_int32_t) csio->cdb_io.cdb_bytes[1] << 16) |
+ ((u_int32_t) csio->cdb_io.cdb_bytes[2] << 8) |
+ (u_int32_t) csio->cdb_io.cdb_bytes[3];
+ start_lba_lo &= 0x1FFFFF;
+ }
+ /*
+ * READ_10 (0x28) or WRITE_6 (0x2A) cdb
+ */
+ else if (csio->cdb_len == 10) {
+ datalength = (u_int32_t)csio->cdb_io.cdb_bytes[8] |
+ ((u_int32_t)csio->cdb_io.cdb_bytes[7] << 8);
+ start_lba_lo = ((u_int32_t) csio->cdb_io.cdb_bytes[2] << 24) |
+ ((u_int32_t) csio->cdb_io.cdb_bytes[3] << 16) |
+ (u_int32_t) csio->cdb_io.cdb_bytes[4] << 8 |
+ ((u_int32_t) csio->cdb_io.cdb_bytes[5]);
+ }
+ /*
+ * READ_12 (0xA8) or WRITE_12 (0xAA) cdb
+ */
+ else if (csio->cdb_len == 12) {
+ datalength = (u_int32_t)csio->cdb_io.cdb_bytes[6] << 24 |
+ ((u_int32_t)csio->cdb_io.cdb_bytes[7] << 16) |
+ ((u_int32_t)csio->cdb_io.cdb_bytes[8] << 8) |
+ ((u_int32_t)csio->cdb_io.cdb_bytes[9]);
+ start_lba_lo = ((u_int32_t) csio->cdb_io.cdb_bytes[2] << 24) |
+ ((u_int32_t) csio->cdb_io.cdb_bytes[3] << 16) |
+ (u_int32_t) csio->cdb_io.cdb_bytes[4] << 8 |
+ ((u_int32_t) csio->cdb_io.cdb_bytes[5]);
+ }
+ /*
+ * READ_16 (0x88) or WRITE_16 (0xx8A) cdb
+ */
+ else if (csio->cdb_len == 16) {
+ datalength = (u_int32_t)csio->cdb_io.cdb_bytes[10] << 24 |
+ ((u_int32_t)csio->cdb_io.cdb_bytes[11] << 16) |
+ ((u_int32_t)csio->cdb_io.cdb_bytes[12] << 8) |
+ ((u_int32_t)csio->cdb_io.cdb_bytes[13]);
+ start_lba_lo = ((u_int32_t) csio->cdb_io.cdb_bytes[6] << 24) |
+ ((u_int32_t) csio->cdb_io.cdb_bytes[7] << 16) |
+ (u_int32_t) csio->cdb_io.cdb_bytes[8] << 8 |
+ ((u_int32_t) csio->cdb_io.cdb_bytes[9]);
+ start_lba_hi = ((u_int32_t) csio->cdb_io.cdb_bytes[2] << 24) |
+ ((u_int32_t) csio->cdb_io.cdb_bytes[3] << 16) |
+ (u_int32_t) csio->cdb_io.cdb_bytes[4] << 8 |
+ ((u_int32_t) csio->cdb_io.cdb_bytes[5]);
+ }
+
+ memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO));
+ io_info.ldStartBlock = ((u_int64_t)start_lba_hi << 32) | start_lba_lo;
+ io_info.numBlocks = datalength;
+ io_info.ldTgtId = device_id;
+
+ switch (ccb_h->flags & CAM_DIR_MASK) {
+ case CAM_DIR_IN:
+ io_info.isRead = 1;
+ break;
+ case CAM_DIR_OUT:
+ io_info.isRead = 0;
+ break;
+ case CAM_DIR_NONE:
+ default:
+ mrsas_dprint(sc, MRSAS_TRACE, "From %s : DMA Flag is %d \n", __func__, ccb_h->flags & CAM_DIR_MASK);
+ break;
+ }
+
+ map_ptr = sc->raidmap_mem[(sc->map_id & 1)];
+ ld_block_size = MR_LdBlockSizeGet(device_id, map_ptr, sc);
+
+ if ((MR_TargetIdToLdGet(device_id, map_ptr) >= MAX_LOGICAL_DRIVES) ||
+ (!sc->fast_path_io)) {
+ io_request->RaidContext.regLockFlags = 0;
+ fp_possible = 0;
+ }
+ else
+ {
+ if (MR_BuildRaidContext(sc, &io_info, &io_request->RaidContext, map_ptr))
+ fp_possible = io_info.fpOkForIo;
+ }
+
+ if (fp_possible) {
+ mrsas_set_pd_lba(io_request, csio->cdb_len, &io_info, ccb, map_ptr,
+ start_lba_lo, ld_block_size);
+ io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
+ cmd->request_desc->SCSIIO.RequestFlags =
+ (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY
+ << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
+ if (io_request->RaidContext.regLockFlags == REGION_TYPE_UNUSED)
+ cmd->request_desc->SCSIIO.RequestFlags = (MRSAS_REQ_DESCRIPT_FLAGS_NO_LOCK << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ io_request->RaidContext.Type = MPI2_TYPE_CUDA;
+ io_request->RaidContext.nseg = 0x1;
+ io_request->IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
+ io_request->RaidContext.regLockFlags |= (MR_RL_FLAGS_GRANT_DESTINATION_CUDA | MR_RL_FLAGS_SEQ_NUM_ENABLE);
+ }
+ if ((sc->load_balance_info[device_id].loadBalanceFlag) && (io_info.isRead)) {
+ io_info.devHandle = mrsas_get_updated_dev_handle(&sc->load_balance_info[device_id],
+ &io_info);
+ cmd->load_balance = MRSAS_LOAD_BALANCE_FLAG;
+ }
+ else
+ cmd->load_balance = 0;
+ cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle;
+ io_request->DevHandle = io_info.devHandle;
+ }
+ else {
+ /* Not FP IO */
+ io_request->RaidContext.timeoutValue = map_ptr->raidMap.fpPdIoTimeoutSec;
+ cmd->request_desc->SCSIIO.RequestFlags =
+ (MRSAS_REQ_DESCRIPT_FLAGS_LD_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
+ if (io_request->RaidContext.regLockFlags == REGION_TYPE_UNUSED)
+ cmd->request_desc->SCSIIO.RequestFlags = (MRSAS_REQ_DESCRIPT_FLAGS_NO_LOCK << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ io_request->RaidContext.Type = MPI2_TYPE_CUDA;
+ io_request->RaidContext.regLockFlags |= (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 | MR_RL_FLAGS_SEQ_NUM_ENABLE);
+ io_request->RaidContext.nseg = 0x1;
+ }
+ io_request->Function = MRSAS_MPI2_FUNCTION_LD_IO_REQUEST;
+ io_request->DevHandle = device_id;
+ }
+ return(0);
+}
+
+/**
+ * mrsas_build_dcdb: Builds an DCDB command
+ * input: Adapter instance soft state
+ * Pointer to command packet
+ * Pointer to CCB
+ *
+ * This function builds the DCDB inquiry command. It returns 0 if the
+ * command is built successfully, otherwise it returns a 1.
+ */
+int mrsas_build_dcdb(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
+ union ccb *ccb, struct cam_sim *sim)
+{
+ struct ccb_hdr *ccb_h = &(ccb->ccb_h);
+ u_int32_t device_id;
+ MR_FW_RAID_MAP_ALL *map_ptr;
+ MRSAS_RAID_SCSI_IO_REQUEST *io_request;
+
+ io_request = cmd->io_request;
+ device_id = ccb_h->target_id;
+ map_ptr = sc->raidmap_mem[(sc->map_id & 1)];
+
+ /* Check if this is for system PD */
+ if (cam_sim_bus(sim) == 1 &&
+ sc->pd_list[device_id].driveState == MR_PD_STATE_SYSTEM) {
+ io_request->Function = 0;
+ io_request->DevHandle = map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
+ io_request->RaidContext.timeoutValue = map_ptr->raidMap.fpPdIoTimeoutSec;
+ io_request->RaidContext.regLockFlags = 0;
+ io_request->RaidContext.regLockRowLBA = 0;
+ io_request->RaidContext.regLockLength = 0;
+ io_request->RaidContext.RAIDFlags = MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD <<
+ MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
+ if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY))
+ io_request->IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
+ cmd->request_desc->SCSIIO.RequestFlags =
+ (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
+ MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ cmd->request_desc->SCSIIO.DevHandle =
+ map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
+ }
+ else {
+ io_request->Function = MRSAS_MPI2_FUNCTION_LD_IO_REQUEST;
+ io_request->DevHandle = device_id;
+ cmd->request_desc->SCSIIO.RequestFlags =
+ (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ }
+
+ io_request->RaidContext.VirtualDiskTgtId = device_id;
+ io_request->LUN[1] = ccb_h->target_lun & 0xF;
+ io_request->DataLength = cmd->length;
+
+ if (mrsas_map_request(sc, cmd) == SUCCESS) {
+ if (cmd->sge_count > sc->max_num_sge) {
+ device_printf(sc->mrsas_dev, "Error: sge_count (0x%x) exceeds"
+ "max (0x%x) allowed\n", cmd->sge_count, sc->max_num_sge);
+ return (1);
+ }
+ io_request->RaidContext.numSGE = cmd->sge_count;
+ }
+ else {
+ device_printf(sc->mrsas_dev, "Data map/load failed.\n");
+ return(1);
+ }
+ return(0);
+}
+
+/**
+ * mrsas_map_request: Map and load data
+ * input: Adapter instance soft state
+ * Pointer to command packet
+ *
+ * For data from OS, map and load the data buffer into bus space. The
+ * SG list is built in the callback. If the bus dmamap load is not
+ * successful, cmd->error_code will contain the error code and a 1 is
+ * returned.
+ */
+int mrsas_map_request(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd)
+{
+ u_int32_t retcode = 0;
+ struct cam_sim *sim;
+ int flag = BUS_DMA_NOWAIT;
+
+ sim = xpt_path_sim(cmd->ccb_ptr->ccb_h.path);
+
+ if (cmd->data != NULL) {
+ mtx_lock(&sc->io_lock);
+ /* Map data buffer into bus space */
+ retcode = bus_dmamap_load(sc->data_tag, cmd->data_dmamap, cmd->data,
+ cmd->length, mrsas_data_load_cb, cmd, flag);
+ mtx_unlock(&sc->io_lock);
+ if (retcode)
+ device_printf(sc->mrsas_dev, "bus_dmamap_load(): retcode = %d\n", retcode);
+ if (retcode == EINPROGRESS) {
+ device_printf(sc->mrsas_dev, "request load in progress\n");
+ mrsas_freeze_simq(cmd, sim);
+ }
+ }
+ if (cmd->error_code)
+ return(1);
+ return(retcode);
+}
+
+/**
+ * mrsas_unmap_request: Unmap and unload data
+ * input: Adapter instance soft state
+ * Pointer to command packet
+ *
+ * This function unmaps and unloads data from OS.
+ */
+void mrsas_unmap_request(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd)
+{
+ if (cmd->data != NULL) {
+ if (cmd->flags & MRSAS_DIR_IN)
+ bus_dmamap_sync(sc->data_tag, cmd->data_dmamap, BUS_DMASYNC_POSTREAD);
+ if (cmd->flags & MRSAS_DIR_OUT)
+ bus_dmamap_sync(sc->data_tag, cmd->data_dmamap, BUS_DMASYNC_POSTWRITE);
+ mtx_lock(&sc->io_lock);
+ bus_dmamap_unload(sc->data_tag, cmd->data_dmamap);
+ mtx_unlock(&sc->io_lock);
+ }
+}
+
+/**
+ * mrsas_data_load_cb: Callback entry point
+ * input: Pointer to command packet as argument
+ * Pointer to segment
+ * Number of segments
+ * Error
+ *
+ * This is the callback function of the bus dma map load. It builds
+ * the SG list.
+ */
+static void
+mrsas_data_load_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
+{
+ struct mrsas_mpt_cmd *cmd = (struct mrsas_mpt_cmd *)arg;
+ struct mrsas_softc *sc = cmd->sc;
+ MRSAS_RAID_SCSI_IO_REQUEST *io_request;
+ pMpi25IeeeSgeChain64_t sgl_ptr;
+ int i=0, sg_processed=0;
+
+ if (error)
+ {
+ cmd->error_code = error;
+ device_printf(sc->mrsas_dev, "mrsas_data_load_cb: error=%d\n", error);
+ if (error == EFBIG) {
+ cmd->ccb_ptr->ccb_h.status = CAM_REQ_TOO_BIG;
+ return;
+ }
+ }
+
+ if (cmd->flags & MRSAS_DIR_IN)
+ bus_dmamap_sync(cmd->sc->data_tag, cmd->data_dmamap,
+ BUS_DMASYNC_PREREAD);
+ if (cmd->flags & MRSAS_DIR_OUT)
+ bus_dmamap_sync(cmd->sc->data_tag, cmd->data_dmamap,
+ BUS_DMASYNC_PREWRITE);
+ if (nseg > sc->max_num_sge) {
+ device_printf(sc->mrsas_dev, "SGE count is too large or 0.\n");
+ return;
+ }
+
+ io_request = cmd->io_request;
+ sgl_ptr = (pMpi25IeeeSgeChain64_t)&io_request->SGL;
+
+ if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
+ pMpi25IeeeSgeChain64_t sgl_ptr_end = sgl_ptr;
+ sgl_ptr_end += sc->max_sge_in_main_msg - 1;
+ sgl_ptr_end->Flags = 0;
+ }
+
+ if (nseg != 0) {
+ for (i=0; i < nseg; i++) {
+ sgl_ptr->Address = segs[i].ds_addr;
+ sgl_ptr->Length = segs[i].ds_len;
+ sgl_ptr->Flags = 0;
+ if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
+ if (i == nseg - 1)
+ sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST;
+ }
+ sgl_ptr++;
+ sg_processed = i + 1;
+ /*
+ * Prepare chain element
+ */
+ if ((sg_processed == (sc->max_sge_in_main_msg - 1)) &&
+ (nseg > sc->max_sge_in_main_msg)) {
+ pMpi25IeeeSgeChain64_t sg_chain;
+ if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
+ if ((cmd->io_request->IoFlags & MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
+ != MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
+ cmd->io_request->ChainOffset = sc->chain_offset_io_request;
+ else
+ cmd->io_request->ChainOffset = 0;
+ } else
+ cmd->io_request->ChainOffset = sc->chain_offset_io_request;
+ sg_chain = sgl_ptr;
+ if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY))
+ sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT;
+ else
+ sg_chain->Flags = (IEEE_SGE_FLAGS_CHAIN_ELEMENT | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR);
+ sg_chain->Length = (sizeof(MPI2_SGE_IO_UNION) * (nseg - sg_processed));
+ sg_chain->Address = cmd->chain_frame_phys_addr;
+ sgl_ptr = (pMpi25IeeeSgeChain64_t)cmd->chain_frame;
+ }
+ }
+ }
+ cmd->sge_count = nseg;
+}
+
+/**
+ * mrsas_freeze_simq: Freeze SIM queue
+ * input: Pointer to command packet
+ * Pointer to SIM
+ *
+ * This function freezes the sim queue.
+ */
+static void mrsas_freeze_simq(struct mrsas_mpt_cmd *cmd, struct cam_sim *sim)
+{
+ union ccb *ccb = (union ccb *)(cmd->ccb_ptr);
+
+ xpt_freeze_simq(sim, 1);
+ ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
+ ccb->ccb_h.status |= CAM_REQUEUE_REQ;
+}
+
+void mrsas_xpt_freeze(struct mrsas_softc *sc) {
+ xpt_freeze_simq(sc->sim_0, 1);
+ xpt_freeze_simq(sc->sim_1, 1);
+}
+
+void mrsas_xpt_release(struct mrsas_softc *sc) {
+ xpt_release_simq(sc->sim_0, 1);
+ xpt_release_simq(sc->sim_1, 1);
+}
+
+/**
+ * mrsas_cmd_done: Perform remaining command completion
+ * input: Adapter instance soft state
+ * Pointer to command packet
+ *
+ * This function calls ummap request and releases the MPT command.
+ */
+void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd)
+{
+ callout_stop(&cmd->cm_callout);
+ mrsas_unmap_request(sc, cmd);
+ mtx_lock(&sc->sim_lock);
+ xpt_done(cmd->ccb_ptr);
+ cmd->ccb_ptr = NULL;
+ mtx_unlock(&sc->sim_lock);
+ mrsas_release_mpt_cmd(cmd);
+}
+
+/**
+ * mrsas_poll: Polling entry point
+ * input: Pointer to SIM
+ *
+ * This is currently a stub function.
+ */
+static void mrsas_poll(struct cam_sim *sim)
+{
+ struct mrsas_softc *sc = (struct mrsas_softc *)cam_sim_softc(sim);
+ mrsas_isr((void *) sc);
+}
+
+/*
+ * mrsas_bus_scan: Perform bus scan
+ * input: Adapter instance soft state
+ *
+ * This mrsas_bus_scan function is needed for FreeBSD 7.x. Also, it should
+ * not be called in FreeBSD 8.x and later versions, where the bus scan is
+ * automatic.
+ */
+int mrsas_bus_scan(struct mrsas_softc *sc)
+{
+ union ccb *ccb_0;
+ union ccb *ccb_1;
+
+ mtx_lock(&sc->sim_lock);
+ if ((ccb_0 = xpt_alloc_ccb()) == NULL) {
+ mtx_unlock(&sc->sim_lock);
+ return(ENOMEM);
+ }
+
+ if ((ccb_1 = xpt_alloc_ccb()) == NULL) {
+ xpt_free_ccb(ccb_0);
+ mtx_unlock(&sc->sim_lock);
+ return(ENOMEM);
+ }
+
+ if (xpt_create_path(&ccb_0->ccb_h.path, xpt_periph, cam_sim_path(sc->sim_0),
+ CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP){
+ xpt_free_ccb(ccb_0);
+ xpt_free_ccb(ccb_1);
+ mtx_unlock(&sc->sim_lock);
+ return(EIO);
+ }
+
+ if (xpt_create_path(&ccb_1->ccb_h.path, xpt_periph, cam_sim_path(sc->sim_1),
+ CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP){
+ xpt_free_ccb(ccb_0);
+ xpt_free_ccb(ccb_1);
+ mtx_unlock(&sc->sim_lock);
+ return(EIO);
+ }
+
+ xpt_rescan(ccb_0);
+ xpt_rescan(ccb_1);
+ mtx_unlock(&sc->sim_lock);
+
+ return(0);
+}
+
+/*
+ * mrsas_bus_scan_sim: Perform bus scan per SIM
+ * input: Adapter instance soft state
+ * This function will be called from Event handler
+ * on LD creation/deletion, JBOD on/off.
+ */
+int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim)
+{
+ union ccb *ccb;
+
+ mtx_lock(&sc->sim_lock);
+ if ((ccb = xpt_alloc_ccb()) == NULL) {
+ mtx_unlock(&sc->sim_lock);
+ return(ENOMEM);
+ }
+ if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, cam_sim_path(sim),
+ CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP){
+ xpt_free_ccb(ccb);
+ mtx_unlock(&sc->sim_lock);
+ return(EIO);
+ }
+ xpt_rescan(ccb);
+ mtx_unlock(&sc->sim_lock);
+
+ return(0);
+}
diff --git a/sys/dev/mrsas/mrsas_fp.c b/sys/dev/mrsas/mrsas_fp.c
new file mode 100644
index 0000000..69a996f
--- /dev/null
+++ b/sys/dev/mrsas/mrsas_fp.c
@@ -0,0 +1,1451 @@
+/*
+ * Copyright (c) 2014, LSI Corp.
+ * All rights reserved.
+ * Author: Marian Choy
+ * Support: freebsdraid@lsi.com
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * 3. Neither the name of the <ORGANIZATION> nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation
+ * are those of the authors and should not be interpreted as representing
+ * official policies,either expressed or implied, of the FreeBSD Project.
+ *
+ * Send feedback to: <megaraidfbsd@lsi.com>
+ * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035
+ * ATTN: MegaRaid FreeBSD
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/mrsas/mrsas.h>
+
+#include <cam/cam.h>
+#include <cam/cam_ccb.h>
+#include <cam/cam_sim.h>
+#include <cam/cam_xpt_sim.h>
+#include <cam/cam_debug.h>
+#include <cam/cam_periph.h>
+#include <cam/cam_xpt_periph.h>
+
+
+/*
+ * Function prototypes
+ */
+u_int8_t MR_ValidateMapInfo(struct mrsas_softc *sc);
+u_int8_t mrsas_get_best_arm(PLD_LOAD_BALANCE_INFO lbInfo, u_int8_t arm,
+ u_int64_t block, u_int32_t count);
+u_int8_t MR_BuildRaidContext(struct mrsas_softc *sc,
+ struct IO_REQUEST_INFO *io_info,
+ RAID_CONTEXT *pRAID_Context, MR_FW_RAID_MAP_ALL *map);
+u_int8_t MR_GetPhyParams(struct mrsas_softc *sc, u_int32_t ld,
+ u_int64_t stripRow, u_int16_t stripRef, struct IO_REQUEST_INFO *io_info,
+ RAID_CONTEXT *pRAID_Context,
+ MR_FW_RAID_MAP_ALL *map);
+u_int16_t MR_TargetIdToLdGet(u_int32_t ldTgtId, MR_FW_RAID_MAP_ALL *map);
+u_int32_t MR_LdBlockSizeGet(u_int32_t ldTgtId, MR_FW_RAID_MAP_ALL *map);
+u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_FW_RAID_MAP_ALL *map);
+u_int16_t mrsas_get_updated_dev_handle(PLD_LOAD_BALANCE_INFO lbInfo,
+ struct IO_REQUEST_INFO *io_info);
+u_int32_t mega_mod64(u_int64_t dividend, u_int32_t divisor);
+u_int32_t MR_GetSpanBlock(u_int32_t ld, u_int64_t row, u_int64_t *span_blk,
+ MR_FW_RAID_MAP_ALL *map, int *div_error);
+u_int64_t mega_div64_32(u_int64_t dividend, u_int32_t divisor);
+void mrsas_update_load_balance_params(MR_FW_RAID_MAP_ALL *map,
+ PLD_LOAD_BALANCE_INFO lbInfo);
+void mrsas_set_pd_lba(MRSAS_RAID_SCSI_IO_REQUEST *io_request,
+ u_int8_t cdb_len, struct IO_REQUEST_INFO *io_info, union ccb *ccb,
+ MR_FW_RAID_MAP_ALL *local_map_ptr, u_int32_t ref_tag,
+ u_int32_t ld_block_size);
+static u_int16_t MR_LdSpanArrayGet(u_int32_t ld, u_int32_t span,
+ MR_FW_RAID_MAP_ALL *map);
+static u_int16_t MR_PdDevHandleGet(u_int32_t pd, MR_FW_RAID_MAP_ALL *map);
+static u_int16_t MR_ArPdGet(u_int32_t ar, u_int32_t arm,
+ MR_FW_RAID_MAP_ALL *map);
+static MR_LD_SPAN *MR_LdSpanPtrGet(u_int32_t ld, u_int32_t span,
+ MR_FW_RAID_MAP_ALL *map);
+static u_int8_t MR_LdDataArmGet(u_int32_t ld, u_int32_t armIdx,
+ MR_FW_RAID_MAP_ALL *map);
+static MR_SPAN_BLOCK_INFO *MR_LdSpanInfoGet(u_int32_t ld,
+ MR_FW_RAID_MAP_ALL *map);
+MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_FW_RAID_MAP_ALL *map);
+
+/*
+ * Spanset related function prototypes
+ * Added for PRL11 configuration (Uneven span support)
+ */
+void mr_update_span_set(MR_FW_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo);
+static u_int8_t mr_spanset_get_phy_params(struct mrsas_softc *sc, u_int32_t ld,
+ u_int64_t stripRow, u_int16_t stripRef, struct IO_REQUEST_INFO *io_info,
+ RAID_CONTEXT *pRAID_Context, MR_FW_RAID_MAP_ALL *map);
+static u_int64_t get_row_from_strip(struct mrsas_softc *sc, u_int32_t ld,
+ u_int64_t strip, MR_FW_RAID_MAP_ALL *map);
+static u_int32_t mr_spanset_get_span_block(struct mrsas_softc *sc,
+ u_int32_t ld, u_int64_t row, u_int64_t *span_blk,
+ MR_FW_RAID_MAP_ALL *map, int *div_error);
+static u_int8_t get_arm(struct mrsas_softc *sc, u_int32_t ld, u_int8_t span,
+ u_int64_t stripe, MR_FW_RAID_MAP_ALL *map);
+
+
+/*
+ * Spanset related defines
+ * Added for PRL11 configuration(Uneven span support)
+ */
+#define SPAN_ROW_SIZE(map, ld, index_) MR_LdSpanPtrGet(ld, index_, map)->spanRowSize
+#define SPAN_ROW_DATA_SIZE(map_, ld, index_) MR_LdSpanPtrGet(ld, index_, map)->spanRowDataSize
+#define SPAN_INVALID 0xff
+#define SPAN_DEBUG 0
+
+/*
+ * Related Defines
+ */
+
+typedef u_int64_t REGION_KEY;
+typedef u_int32_t REGION_LEN;
+
+#define MR_LD_STATE_OPTIMAL 3
+#define FALSE 0
+#define TRUE 1
+
+
+/*
+ * Related Macros
+ */
+
+#define ABS_DIFF(a,b) ( ((a) > (b)) ? ((a) - (b)) : ((b) - (a)) )
+
+#define swap32(x) \
+ ((unsigned int)( \
+ (((unsigned int)(x) & (unsigned int)0x000000ffUL) << 24) | \
+ (((unsigned int)(x) & (unsigned int)0x0000ff00UL) << 8) | \
+ (((unsigned int)(x) & (unsigned int)0x00ff0000UL) >> 8) | \
+ (((unsigned int)(x) & (unsigned int)0xff000000UL) >> 24) ))
+
+
+/*
+ * In-line functions for mod and divide of 64-bit dividend and 32-bit divisor.
+ * Assumes a check for a divisor of zero is not possible.
+ *
+ * @param dividend : Dividend
+ * @param divisor : Divisor
+ * @return remainder
+ */
+
+#define mega_mod64(dividend, divisor) ({ \
+int remainder; \
+remainder = ((u_int64_t) (dividend)) % (u_int32_t) (divisor); \
+remainder;})
+
+#define mega_div64_32(dividend, divisor) ({ \
+int quotient; \
+quotient = ((u_int64_t) (dividend)) / (u_int32_t) (divisor); \
+quotient;})
+
+
+/*
+ * Various RAID map access functions. These functions access the various
+ * parts of the RAID map and returns the appropriate parameters.
+ */
+
+MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_FW_RAID_MAP_ALL *map)
+{
+ return (&map->raidMap.ldSpanMap[ld].ldRaid);
+}
+
+u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_FW_RAID_MAP_ALL *map)
+{
+ return (map->raidMap.ldSpanMap[ld].ldRaid.targetId);
+}
+
+static u_int16_t MR_LdSpanArrayGet(u_int32_t ld, u_int32_t span, MR_FW_RAID_MAP_ALL *map)
+{
+ return map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef;
+}
+
+static u_int8_t MR_LdDataArmGet(u_int32_t ld, u_int32_t armIdx, MR_FW_RAID_MAP_ALL *map)
+{
+ return map->raidMap.ldSpanMap[ld].dataArmMap[armIdx];
+}
+
+static u_int16_t MR_PdDevHandleGet(u_int32_t pd, MR_FW_RAID_MAP_ALL *map)
+{
+ return map->raidMap.devHndlInfo[pd].curDevHdl;
+}
+
+static u_int16_t MR_ArPdGet(u_int32_t ar, u_int32_t arm, MR_FW_RAID_MAP_ALL *map)
+{
+ return map->raidMap.arMapInfo[ar].pd[arm];
+}
+
+static MR_LD_SPAN *MR_LdSpanPtrGet(u_int32_t ld, u_int32_t span, MR_FW_RAID_MAP_ALL *map)
+{
+ return &map->raidMap.ldSpanMap[ld].spanBlock[span].span;
+}
+
+static MR_SPAN_BLOCK_INFO *MR_LdSpanInfoGet(u_int32_t ld, MR_FW_RAID_MAP_ALL *map)
+{
+ return &map->raidMap.ldSpanMap[ld].spanBlock[0];
+}
+
+u_int16_t MR_TargetIdToLdGet(u_int32_t ldTgtId, MR_FW_RAID_MAP_ALL *map)
+{
+ return map->raidMap.ldTgtIdToLd[ldTgtId];
+}
+
+u_int32_t MR_LdBlockSizeGet(u_int32_t ldTgtId, MR_FW_RAID_MAP_ALL *map)
+{
+ MR_LD_RAID *raid;
+ u_int32_t ld, ldBlockSize = MRSAS_SCSIBLOCKSIZE;
+
+ ld = MR_TargetIdToLdGet(ldTgtId, map);
+
+ /*
+ * Check if logical drive was removed.
+ */
+ if (ld >= MAX_LOGICAL_DRIVES)
+ return ldBlockSize;
+
+ raid = MR_LdRaidGet(ld, map);
+ ldBlockSize = raid->logicalBlockLength;
+ if (!ldBlockSize)
+ ldBlockSize = MRSAS_SCSIBLOCKSIZE;
+
+ return ldBlockSize;
+}
+
+/**
+ * MR_ValidateMapInfo: Validate RAID map
+ * input: Adapter instance soft state
+ *
+ * This function checks and validates the loaded RAID map. It returns 0 if
+ * successful, and 1 otherwise.
+ */
+u_int8_t MR_ValidateMapInfo(struct mrsas_softc *sc)
+{
+ if (!sc) {
+ return 1;
+ }
+ uint32_t total_map_sz;
+ MR_FW_RAID_MAP_ALL *map = sc->raidmap_mem[(sc->map_id & 1)];
+ MR_FW_RAID_MAP *pFwRaidMap = &map->raidMap;
+ PLD_SPAN_INFO ldSpanInfo = (PLD_SPAN_INFO) &sc->log_to_span;
+
+ total_map_sz = (sizeof(MR_FW_RAID_MAP) - sizeof(MR_LD_SPAN_MAP) +
+ (sizeof(MR_LD_SPAN_MAP) * pFwRaidMap->ldCount));
+
+ if (pFwRaidMap->totalSize != total_map_sz) {
+ device_printf(sc->mrsas_dev, "map size %x not matching ld count\n", total_map_sz);
+ device_printf(sc->mrsas_dev, "span map= %x\n", (unsigned int)sizeof(MR_LD_SPAN_MAP));
+ device_printf(sc->mrsas_dev, "pFwRaidMap->totalSize=%x\n", pFwRaidMap->totalSize);
+ return 1;
+ }
+
+ if (sc->UnevenSpanSupport) {
+ mr_update_span_set(map, ldSpanInfo);
+ }
+
+ mrsas_update_load_balance_params(map, sc->load_balance_info);
+
+ return 0;
+}
+
+/*
+ * ******************************************************************************
+ *
+ * Function to print info about span set created in driver from FW raid map
+ *
+ * Inputs :
+ * map - LD map
+ * ldSpanInfo - ldSpanInfo per HBA instance
+ *
+ *
+ * */
+#if SPAN_DEBUG
+static int getSpanInfo(MR_FW_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo)
+{
+
+ u_int8_t span;
+ u_int32_t element;
+ MR_LD_RAID *raid;
+ LD_SPAN_SET *span_set;
+ MR_QUAD_ELEMENT *quad;
+ int ldCount;
+ u_int16_t ld;
+
+ for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++)
+ {
+ ld = MR_TargetIdToLdGet(ldCount, map);
+ if (ld >= MAX_LOGICAL_DRIVES) {
+ continue;
+ }
+ raid = MR_LdRaidGet(ld, map);
+ printf("LD %x: span_depth=%x\n", ld, raid->spanDepth);
+ for (span=0; span<raid->spanDepth; span++)
+ printf("Span=%x, number of quads=%x\n", span,
+ map->raidMap.ldSpanMap[ld].spanBlock[span].
+ block_span_info.noElements);
+ for (element=0; element < MAX_QUAD_DEPTH; element++) {
+ span_set = &(ldSpanInfo[ld].span_set[element]);
+ if (span_set->span_row_data_width == 0) break;
+
+ printf(" Span Set %x: width=%x, diff=%x\n", element,
+ (unsigned int)span_set->span_row_data_width,
+ (unsigned int)span_set->diff);
+ printf(" logical LBA start=0x%08lx, end=0x%08lx\n",
+ (long unsigned int)span_set->log_start_lba,
+ (long unsigned int)span_set->log_end_lba);
+ printf(" span row start=0x%08lx, end=0x%08lx\n",
+ (long unsigned int)span_set->span_row_start,
+ (long unsigned int)span_set->span_row_end);
+ printf(" data row start=0x%08lx, end=0x%08lx\n",
+ (long unsigned int)span_set->data_row_start,
+ (long unsigned int)span_set->data_row_end);
+ printf(" data strip start=0x%08lx, end=0x%08lx\n",
+ (long unsigned int)span_set->data_strip_start,
+ (long unsigned int)span_set->data_strip_end);
+
+ for (span=0; span<raid->spanDepth; span++) {
+ if (map->raidMap.ldSpanMap[ld].spanBlock[span].
+ block_span_info.noElements >=element+1){
+ quad = &map->raidMap.ldSpanMap[ld].
+ spanBlock[span].block_span_info.
+ quad[element];
+ printf(" Span=%x, Quad=%x, diff=%x\n", span,
+ element, quad->diff);
+ printf(" offset_in_span=0x%08lx\n",
+ (long unsigned int)quad->offsetInSpan);
+ printf(" logical start=0x%08lx, end=0x%08lx\n",
+ (long unsigned int)quad->logStart,
+ (long unsigned int)quad->logEnd);
+ }
+ }
+ }
+ }
+ return 0;
+}
+#endif
+/*
+******************************************************************************
+*
+* This routine calculates the Span block for given row using spanset.
+*
+* Inputs :
+* instance - HBA instance
+* ld - Logical drive number
+* row - Row number
+* map - LD map
+*
+* Outputs :
+*
+* span - Span number
+* block - Absolute Block number in the physical disk
+* div_error - Devide error code.
+*/
+
+u_int32_t mr_spanset_get_span_block(struct mrsas_softc *sc, u_int32_t ld, u_int64_t row,
+ u_int64_t *span_blk, MR_FW_RAID_MAP_ALL *map, int *div_error)
+{
+ MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
+ LD_SPAN_SET *span_set;
+ MR_QUAD_ELEMENT *quad;
+ u_int32_t span, info;
+ PLD_SPAN_INFO ldSpanInfo = sc->log_to_span;
+
+ for (info=0; info < MAX_QUAD_DEPTH; info++) {
+ span_set = &(ldSpanInfo[ld].span_set[info]);
+
+ if (span_set->span_row_data_width == 0) break;
+ if (row > span_set->data_row_end) continue;
+
+ for (span=0; span<raid->spanDepth; span++)
+ if (map->raidMap.ldSpanMap[ld].spanBlock[span].
+ block_span_info.noElements >= info+1) {
+ quad = &map->raidMap.ldSpanMap[ld].
+ spanBlock[span].
+ block_span_info.quad[info];
+ if (quad->diff == 0) {
+ *div_error = 1;
+ return span;
+ }
+ if ( quad->logStart <= row &&
+ row <= quad->logEnd &&
+ (mega_mod64(row - quad->logStart,
+ quad->diff)) == 0 ) {
+ if (span_blk != NULL) {
+ u_int64_t blk;
+ blk = mega_div64_32
+ ((row - quad->logStart),
+ quad->diff);
+ blk = (blk + quad->offsetInSpan)
+ << raid->stripeShift;
+ *span_blk = blk;
+ }
+ return span;
+ }
+ }
+ }
+ return SPAN_INVALID;
+}
+
+/*
+******************************************************************************
+*
+* This routine calculates the row for given strip using spanset.
+*
+* Inputs :
+* instance - HBA instance
+* ld - Logical drive number
+* Strip - Strip
+* map - LD map
+*
+* Outputs :
+*
+* row - row associated with strip
+*/
+
+static u_int64_t get_row_from_strip(struct mrsas_softc *sc,
+ u_int32_t ld, u_int64_t strip, MR_FW_RAID_MAP_ALL *map)
+{
+ MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
+ LD_SPAN_SET *span_set;
+ PLD_SPAN_INFO ldSpanInfo = sc->log_to_span;
+ u_int32_t info, strip_offset, span, span_offset;
+ u_int64_t span_set_Strip, span_set_Row;
+
+ for (info=0; info < MAX_QUAD_DEPTH; info++) {
+ span_set = &(ldSpanInfo[ld].span_set[info]);
+
+ if (span_set->span_row_data_width == 0) break;
+ if (strip > span_set->data_strip_end) continue;
+
+ span_set_Strip = strip - span_set->data_strip_start;
+ strip_offset = mega_mod64(span_set_Strip,
+ span_set->span_row_data_width);
+ span_set_Row = mega_div64_32(span_set_Strip,
+ span_set->span_row_data_width) * span_set->diff;
+ for (span=0,span_offset=0; span<raid->spanDepth; span++)
+ if (map->raidMap.ldSpanMap[ld].spanBlock[span].
+ block_span_info.noElements >=info+1) {
+ if (strip_offset >=
+ span_set->strip_offset[span])
+ span_offset++;
+ else
+ break;
+ }
+ mrsas_dprint(sc, MRSAS_PRL11, "LSI Debug : Strip 0x%llx, span_set_Strip 0x%llx, span_set_Row 0x%llx "
+ "data width 0x%llx span offset 0x%llx\n", (unsigned long long)strip,
+ (unsigned long long)span_set_Strip,
+ (unsigned long long)span_set_Row,
+ (unsigned long long)span_set->span_row_data_width, (unsigned long long)span_offset);
+ mrsas_dprint(sc, MRSAS_PRL11, "LSI Debug : For strip 0x%llx row is 0x%llx\n", (unsigned long long)strip,
+ (unsigned long long) span_set->data_row_start +
+ (unsigned long long) span_set_Row + (span_offset - 1));
+ return (span_set->data_row_start + span_set_Row + (span_offset - 1));
+ }
+ return -1LLU;
+}
+
+
+/*
+******************************************************************************
+*
+* This routine calculates the Start Strip for given row using spanset.
+*
+* Inputs :
+* instance - HBA instance
+* ld - Logical drive number
+* row - Row number
+* map - LD map
+*
+* Outputs :
+*
+* Strip - Start strip associated with row
+*/
+
+static u_int64_t get_strip_from_row(struct mrsas_softc *sc,
+ u_int32_t ld, u_int64_t row, MR_FW_RAID_MAP_ALL *map)
+{
+ MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
+ LD_SPAN_SET *span_set;
+ MR_QUAD_ELEMENT *quad;
+ PLD_SPAN_INFO ldSpanInfo = sc->log_to_span;
+ u_int32_t span, info;
+ u_int64_t strip;
+
+ for (info=0; info<MAX_QUAD_DEPTH; info++) {
+ span_set = &(ldSpanInfo[ld].span_set[info]);
+
+ if (span_set->span_row_data_width == 0) break;
+ if (row > span_set->data_row_end) continue;
+
+ for (span=0; span<raid->spanDepth; span++)
+ if (map->raidMap.ldSpanMap[ld].spanBlock[span].
+ block_span_info.noElements >=info+1) {
+ quad = &map->raidMap.ldSpanMap[ld].
+ spanBlock[span].block_span_info.quad[info];
+ if ( quad->logStart <= row &&
+ row <= quad->logEnd &&
+ mega_mod64((row - quad->logStart),
+ quad->diff) == 0 ) {
+ strip = mega_div64_32
+ (((row - span_set->data_row_start)
+ - quad->logStart),
+ quad->diff);
+ strip *= span_set->span_row_data_width;
+ strip += span_set->data_strip_start;
+ strip += span_set->strip_offset[span];
+ return strip;
+ }
+ }
+ }
+ mrsas_dprint(sc, MRSAS_PRL11,"LSI Debug - get_strip_from_row: returns invalid "
+ "strip for ld=%x, row=%lx\n", ld, (long unsigned int)row);
+ return -1;
+}
+
+/*
+******************************************************************************
+*
+* This routine calculates the Physical Arm for given strip using spanset.
+*
+* Inputs :
+* instance - HBA instance
+* ld - Logical drive number
+* strip - Strip
+* map - LD map
+*
+* Outputs :
+*
+* Phys Arm - Phys Arm associated with strip
+*/
+
+static u_int32_t get_arm_from_strip(struct mrsas_softc *sc,
+ u_int32_t ld, u_int64_t strip, MR_FW_RAID_MAP_ALL *map)
+{
+ MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
+ LD_SPAN_SET *span_set;
+ PLD_SPAN_INFO ldSpanInfo = sc->log_to_span;
+ u_int32_t info, strip_offset, span, span_offset;
+
+ for (info=0; info<MAX_QUAD_DEPTH; info++) {
+ span_set = &(ldSpanInfo[ld].span_set[info]);
+
+ if (span_set->span_row_data_width == 0) break;
+ if (strip > span_set->data_strip_end) continue;
+
+ strip_offset = (u_int32_t)mega_mod64
+ ((strip - span_set->data_strip_start),
+ span_set->span_row_data_width);
+
+ for (span=0,span_offset=0; span<raid->spanDepth; span++)
+ if (map->raidMap.ldSpanMap[ld].spanBlock[span].
+ block_span_info.noElements >=info+1) {
+ if (strip_offset >=
+ span_set->strip_offset[span])
+ span_offset =
+ span_set->strip_offset[span];
+ else
+ break;
+ }
+ mrsas_dprint(sc, MRSAS_PRL11, "LSI PRL11: get_arm_from_strip: "
+ " for ld=0x%x strip=0x%lx arm is 0x%x\n", ld,
+ (long unsigned int)strip, (strip_offset - span_offset));
+ return (strip_offset - span_offset);
+ }
+
+ mrsas_dprint(sc, MRSAS_PRL11, "LSI Debug: - get_arm_from_strip: returns invalid arm"
+ " for ld=%x strip=%lx\n", ld, (long unsigned int)strip);
+
+ return -1;
+}
+
+
+/* This Function will return Phys arm */
+u_int8_t get_arm(struct mrsas_softc *sc, u_int32_t ld, u_int8_t span, u_int64_t stripe,
+ MR_FW_RAID_MAP_ALL *map)
+{
+ MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
+ /* Need to check correct default value */
+ u_int32_t arm = 0;
+
+ switch (raid->level) {
+ case 0:
+ case 5:
+ case 6:
+ arm = mega_mod64(stripe, SPAN_ROW_SIZE(map, ld, span));
+ break;
+ case 1:
+ // start with logical arm
+ arm = get_arm_from_strip(sc, ld, stripe, map);
+ arm *= 2;
+ break;
+
+ }
+
+ return arm;
+}
+
+/*
+******************************************************************************
+*
+* This routine calculates the arm, span and block for the specified stripe and
+* reference in stripe using spanset
+*
+* Inputs :
+*
+* ld - Logical drive number
+* stripRow - Stripe number
+* stripRef - Reference in stripe
+*
+* Outputs :
+*
+* span - Span number
+* block - Absolute Block number in the physical disk
+*/
+static u_int8_t mr_spanset_get_phy_params(struct mrsas_softc *sc, u_int32_t ld, u_int64_t stripRow,
+ u_int16_t stripRef, struct IO_REQUEST_INFO *io_info,
+ RAID_CONTEXT *pRAID_Context, MR_FW_RAID_MAP_ALL *map)
+{
+ MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
+ u_int32_t pd, arRef;
+ u_int8_t physArm, span;
+ u_int64_t row;
+ u_int8_t retval = TRUE;
+ u_int64_t *pdBlock = &io_info->pdBlock;
+ u_int16_t *pDevHandle = &io_info->devHandle;
+ u_int32_t logArm, rowMod, armQ, arm;
+ u_int8_t do_invader = 0;
+
+ if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY))
+ do_invader = 1;
+
+ // Get row and span from io_info for Uneven Span IO.
+ row = io_info->start_row;
+ span = io_info->start_span;
+
+
+ if (raid->level == 6) {
+ logArm = get_arm_from_strip(sc, ld, stripRow, map);
+ rowMod = mega_mod64(row, SPAN_ROW_SIZE(map, ld, span));
+ armQ = SPAN_ROW_SIZE(map,ld,span) - 1 - rowMod;
+ arm = armQ + 1 + logArm;
+ if (arm >= SPAN_ROW_SIZE(map, ld, span))
+ arm -= SPAN_ROW_SIZE(map ,ld ,span);
+ physArm = (u_int8_t)arm;
+ } else
+ // Calculate the arm
+ physArm = get_arm(sc, ld, span, stripRow, map);
+
+
+ arRef = MR_LdSpanArrayGet(ld, span, map);
+ pd = MR_ArPdGet(arRef, physArm, map);
+
+ if (pd != MR_PD_INVALID)
+ *pDevHandle = MR_PdDevHandleGet(pd, map);
+ else {
+ *pDevHandle = MR_PD_INVALID;
+ if ((raid->level >= 5) && ((!do_invader) || (do_invader &&
+ raid->regTypeReqOnRead != REGION_TYPE_UNUSED)))
+ pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
+ else if (raid->level == 1) {
+ pd = MR_ArPdGet(arRef, physArm + 1, map);
+ if (pd != MR_PD_INVALID)
+ *pDevHandle = MR_PdDevHandleGet(pd, map);
+ }
+ }
+
+ *pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk;
+ pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
+ return retval;
+}
+
+/**
+* MR_BuildRaidContext: Set up Fast path RAID context
+*
+* This function will initiate command processing. The start/end row
+* and strip information is calculated then the lock is acquired.
+* This function will return 0 if region lock was acquired OR return
+* num strips.
+*/
+u_int8_t
+MR_BuildRaidContext(struct mrsas_softc *sc, struct IO_REQUEST_INFO *io_info,
+ RAID_CONTEXT *pRAID_Context, MR_FW_RAID_MAP_ALL *map)
+{
+ MR_LD_RAID *raid;
+ u_int32_t ld, stripSize, stripe_mask;
+ u_int64_t endLba, endStrip, endRow, start_row, start_strip;
+ REGION_KEY regStart;
+ REGION_LEN regSize;
+ u_int8_t num_strips, numRows;
+ u_int16_t ref_in_start_stripe, ref_in_end_stripe;
+ u_int64_t ldStartBlock;
+ u_int32_t numBlocks, ldTgtId;
+ u_int8_t isRead, stripIdx;
+ u_int8_t retval = 0;
+ u_int8_t startlba_span = SPAN_INVALID;
+ u_int64_t *pdBlock = &io_info->pdBlock;
+ int error_code = 0;
+
+ ldStartBlock = io_info->ldStartBlock;
+ numBlocks = io_info->numBlocks;
+ ldTgtId = io_info->ldTgtId;
+ isRead = io_info->isRead;
+
+ io_info->IoforUnevenSpan = 0;
+ io_info->start_span = SPAN_INVALID;
+
+ ld = MR_TargetIdToLdGet(ldTgtId, map);
+ raid = MR_LdRaidGet(ld, map);
+
+ /*
+ * if rowDataSize @RAID map and spanRowDataSize @SPAN INFO are zero
+ * return FALSE
+ */
+ if (raid->rowDataSize == 0) {
+ if (MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize == 0)
+ return FALSE;
+ else if (sc->UnevenSpanSupport) {
+ io_info->IoforUnevenSpan = 1;
+ }
+ else {
+ mrsas_dprint(sc, MRSAS_PRL11, "LSI Debug: raid->rowDataSize is 0, but has SPAN[0] rowDataSize = 0x%0x,"
+ " but there is _NO_ UnevenSpanSupport\n",
+ MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize);
+ return FALSE;
+ }
+ }
+ stripSize = 1 << raid->stripeShift;
+ stripe_mask = stripSize-1;
+ /*
+ * calculate starting row and stripe, and number of strips and rows
+ */
+ start_strip = ldStartBlock >> raid->stripeShift;
+ ref_in_start_stripe = (u_int16_t)(ldStartBlock & stripe_mask);
+ endLba = ldStartBlock + numBlocks - 1;
+ ref_in_end_stripe = (u_int16_t)(endLba & stripe_mask);
+ endStrip = endLba >> raid->stripeShift;
+ num_strips = (u_int8_t)(endStrip - start_strip + 1); // End strip
+ if (io_info->IoforUnevenSpan) {
+ start_row = get_row_from_strip(sc, ld, start_strip, map);
+ endRow = get_row_from_strip(sc, ld, endStrip, map);
+ if (raid->spanDepth == 1) {
+ startlba_span = 0;
+ *pdBlock = start_row << raid->stripeShift;
+ } else {
+ startlba_span = (u_int8_t)mr_spanset_get_span_block(sc, ld, start_row,
+ pdBlock, map, &error_code);
+ if (error_code == 1) {
+ mrsas_dprint(sc, MRSAS_PRL11, "LSI Debug: return from %s %d. Send IO w/o region lock.\n",
+ __func__, __LINE__);
+ return FALSE;
+ }
+ }
+ if (startlba_span == SPAN_INVALID) {
+ mrsas_dprint(sc, MRSAS_PRL11, "LSI Debug: return from %s %d for row 0x%llx,"
+ "start strip %llx endSrip %llx\n", __func__,
+ __LINE__, (unsigned long long)start_row,
+ (unsigned long long)start_strip,
+ (unsigned long long)endStrip);
+ return FALSE;
+ }
+ io_info->start_span = startlba_span;
+ io_info->start_row = start_row;
+ mrsas_dprint(sc, MRSAS_PRL11, "LSI Debug: Check Span number from %s %d for row 0x%llx, "
+ " start strip 0x%llx endSrip 0x%llx span 0x%x\n",
+ __func__, __LINE__, (unsigned long long)start_row,
+ (unsigned long long)start_strip,
+ (unsigned long long)endStrip, startlba_span);
+ mrsas_dprint(sc, MRSAS_PRL11, "LSI Debug : 1. start_row 0x%llx endRow 0x%llx Start span 0x%x\n",
+ (unsigned long long)start_row, (unsigned long long)endRow, startlba_span);
+ } else {
+ start_row = mega_div64_32(start_strip, raid->rowDataSize); // Start Row
+ endRow = mega_div64_32(endStrip, raid->rowDataSize);
+ }
+
+ numRows = (u_int8_t)(endRow - start_row + 1); // get the row count
+
+ /*
+ * Calculate region info. (Assume region at start of first row, and
+ * assume this IO needs the full row - will adjust if not true.)
+ */
+ regStart = start_row << raid->stripeShift;
+ regSize = stripSize;
+
+ /* Check if we can send this I/O via FastPath */
+ if (raid->capability.fpCapable) {
+ if (isRead)
+ io_info->fpOkForIo = (raid->capability.fpReadCapable &&
+ ((num_strips == 1) ||
+ raid->capability.
+ fpReadAcrossStripe));
+ else
+ io_info->fpOkForIo = (raid->capability.fpWriteCapable &&
+ ((num_strips == 1) ||
+ raid->capability.
+ fpWriteAcrossStripe));
+ }
+ else
+ io_info->fpOkForIo = FALSE;
+
+ if (numRows == 1) {
+ if (num_strips == 1) {
+ /* single-strip IOs can always lock only the data needed,
+ multi-strip IOs always need to full stripe locked */
+ regStart += ref_in_start_stripe;
+ regSize = numBlocks;
+ }
+ }
+ else if (io_info->IoforUnevenSpan == 0){
+ // For Even span region lock optimization.
+ // If the start strip is the last in the start row
+ if (start_strip == (start_row + 1) * raid->rowDataSize - 1) {
+ regStart += ref_in_start_stripe;
+ // initialize count to sectors from startRef to end of strip
+ regSize = stripSize - ref_in_start_stripe;
+ }
+ // add complete rows in the middle of the transfer
+ if (numRows > 2)
+ regSize += (numRows-2) << raid->stripeShift;
+
+ // if IO ends within first strip of last row
+ if (endStrip == endRow*raid->rowDataSize)
+ regSize += ref_in_end_stripe+1;
+ else
+ regSize += stripSize;
+ } else {
+ //For Uneven span region lock optimization.
+ // If the start strip is the last in the start row
+ if (start_strip == (get_strip_from_row(sc, ld, start_row, map) +
+ SPAN_ROW_DATA_SIZE(map, ld, startlba_span) - 1)) {
+ regStart += ref_in_start_stripe;
+ // initialize count to sectors from startRef to end of strip
+ regSize = stripSize - ref_in_start_stripe;
+ }
+ // add complete rows in the middle of the transfer
+ if (numRows > 2)
+ regSize += (numRows-2) << raid->stripeShift;
+
+ // if IO ends within first strip of last row
+ if (endStrip == get_strip_from_row(sc, ld, endRow, map))
+ regSize += ref_in_end_stripe+1;
+ else
+ regSize += stripSize;
+ }
+ pRAID_Context->timeoutValue = map->raidMap.fpPdIoTimeoutSec;
+ if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY))
+ pRAID_Context->regLockFlags = (isRead)? raid->regTypeReqOnRead : raid->regTypeReqOnWrite;
+ else
+ pRAID_Context->regLockFlags = (isRead)? REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite;
+ pRAID_Context->VirtualDiskTgtId = raid->targetId;
+ pRAID_Context->regLockRowLBA = regStart;
+ pRAID_Context->regLockLength = regSize;
+ pRAID_Context->configSeqNum = raid->seqNum;
+
+ /*
+ * Get Phy Params only if FP capable, or else leave it to MR firmware
+ * to do the calculation.
+ */
+ if (io_info->fpOkForIo) {
+ retval = io_info->IoforUnevenSpan ?
+ mr_spanset_get_phy_params(sc, ld,
+ start_strip, ref_in_start_stripe, io_info,
+ pRAID_Context, map) :
+ MR_GetPhyParams(sc, ld, start_strip,
+ ref_in_start_stripe, io_info, pRAID_Context, map);
+ /* If IO on an invalid Pd, then FP is not possible */
+ if (io_info->devHandle == MR_PD_INVALID)
+ io_info->fpOkForIo = FALSE;
+ return retval;
+ }
+ else if (isRead) {
+ for (stripIdx=0; stripIdx<num_strips; stripIdx++) {
+ retval = io_info->IoforUnevenSpan ?
+ mr_spanset_get_phy_params(sc, ld,
+ start_strip + stripIdx,
+ ref_in_start_stripe, io_info,
+ pRAID_Context, map) :
+ MR_GetPhyParams(sc, ld,
+ start_strip + stripIdx, ref_in_start_stripe,
+ io_info, pRAID_Context, map);
+ if (!retval)
+ return TRUE;
+ }
+ }
+#if SPAN_DEBUG
+ // Just for testing what arm we get for strip.
+ get_arm_from_strip(sc, ld, start_strip, map);
+#endif
+ return TRUE;
+}
+
+/*
+******************************************************************************
+*
+* This routine pepare spanset info from Valid Raid map and store it into
+* local copy of ldSpanInfo per instance data structure.
+*
+* Inputs :
+* map - LD map
+* ldSpanInfo - ldSpanInfo per HBA instance
+*
+*/
+void mr_update_span_set(MR_FW_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo)
+{
+ u_int8_t span,count;
+ u_int32_t element,span_row_width;
+ u_int64_t span_row;
+ MR_LD_RAID *raid;
+ LD_SPAN_SET *span_set, *span_set_prev;
+ MR_QUAD_ELEMENT *quad;
+ int ldCount;
+ u_int16_t ld;
+
+ if (!ldSpanInfo)
+ return;
+
+ for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++)
+ {
+ ld = MR_TargetIdToLdGet(ldCount, map);
+ if (ld >= MAX_LOGICAL_DRIVES)
+ continue;
+ raid = MR_LdRaidGet(ld, map);
+ for (element=0; element < MAX_QUAD_DEPTH; element++) {
+ for (span=0; span < raid->spanDepth; span++) {
+ if (map->raidMap.ldSpanMap[ld].spanBlock[span].
+ block_span_info.noElements < element+1)
+ continue;
+ // TO-DO
+ span_set = &(ldSpanInfo[ld].span_set[element]);
+ quad = &map->raidMap.ldSpanMap[ld].
+ spanBlock[span].block_span_info.
+ quad[element];
+
+ span_set->diff = quad->diff;
+
+ for (count=0,span_row_width=0;
+ count<raid->spanDepth; count++) {
+ if (map->raidMap.ldSpanMap[ld].
+ spanBlock[count].
+ block_span_info.
+ noElements >=element+1) {
+ span_set->strip_offset[count] =
+ span_row_width;
+ span_row_width +=
+ MR_LdSpanPtrGet
+ (ld, count, map)->spanRowDataSize;
+#if SPAN_DEBUG
+ printf("LSI Debug span %x rowDataSize %x\n",
+ count, MR_LdSpanPtrGet
+ (ld, count, map)->spanRowDataSize);
+#endif
+ }
+ }
+
+ span_set->span_row_data_width = span_row_width;
+ span_row = mega_div64_32(((quad->logEnd -
+ quad->logStart) + quad->diff), quad->diff);
+
+ if (element == 0) {
+ span_set->log_start_lba = 0;
+ span_set->log_end_lba =
+ ((span_row << raid->stripeShift) * span_row_width) - 1;
+
+ span_set->span_row_start = 0;
+ span_set->span_row_end = span_row - 1;
+
+ span_set->data_strip_start = 0;
+ span_set->data_strip_end =
+ (span_row * span_row_width) - 1;
+
+ span_set->data_row_start = 0;
+ span_set->data_row_end =
+ (span_row * quad->diff) - 1;
+ } else {
+ span_set_prev = &(ldSpanInfo[ld].
+ span_set[element - 1]);
+ span_set->log_start_lba =
+ span_set_prev->log_end_lba + 1;
+ span_set->log_end_lba =
+ span_set->log_start_lba +
+ ((span_row << raid->stripeShift) * span_row_width) - 1;
+
+ span_set->span_row_start =
+ span_set_prev->span_row_end + 1;
+ span_set->span_row_end =
+ span_set->span_row_start + span_row - 1;
+
+ span_set->data_strip_start =
+ span_set_prev->data_strip_end + 1;
+ span_set->data_strip_end =
+ span_set->data_strip_start +
+ (span_row * span_row_width) - 1;
+
+ span_set->data_row_start =
+ span_set_prev->data_row_end + 1;
+ span_set->data_row_end =
+ span_set->data_row_start +
+ (span_row * quad->diff) - 1;
+ }
+ break;
+ }
+ if (span == raid->spanDepth) break; // no quads remain
+ }
+ }
+#if SPAN_DEBUG
+ getSpanInfo(map, ldSpanInfo); //to get span set info
+#endif
+}
+
+/**
+ * mrsas_update_load_balance_params: Update load balance parmas
+ * Inputs: map pointer
+ * Load balance info
+ * io_info pointer
+ *
+ * This function updates the load balance parameters for the LD config
+ * of a two drive optimal RAID-1.
+ */
+void mrsas_update_load_balance_params(MR_FW_RAID_MAP_ALL *map,
+ PLD_LOAD_BALANCE_INFO lbInfo)
+{
+ int ldCount;
+ u_int16_t ld;
+ u_int32_t pd, arRef;
+ MR_LD_RAID *raid;
+
+ for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++)
+ {
+ ld = MR_TargetIdToLdGet(ldCount, map);
+ if (ld >= MAX_LOGICAL_DRIVES) {
+ lbInfo[ldCount].loadBalanceFlag = 0;
+ continue;
+ }
+
+ raid = MR_LdRaidGet(ld, map);
+
+ /* Two drive Optimal RAID 1 */
+ if ((raid->level == 1) && (raid->rowSize == 2) &&
+ (raid->spanDepth == 1)
+ && raid->ldState == MR_LD_STATE_OPTIMAL) {
+ lbInfo[ldCount].loadBalanceFlag = 1;
+
+ /* Get the array on which this span is present */
+ arRef = MR_LdSpanArrayGet(ld, 0, map);
+
+ /* Get the PD */
+ pd = MR_ArPdGet(arRef, 0, map);
+ /* Get dev handle from PD */
+ lbInfo[ldCount].raid1DevHandle[0] = MR_PdDevHandleGet(pd, map);
+ pd = MR_ArPdGet(arRef, 1, map);
+ lbInfo[ldCount].raid1DevHandle[1] = MR_PdDevHandleGet(pd, map);
+ }
+ else
+ lbInfo[ldCount].loadBalanceFlag = 0;
+ }
+}
+
+
+/**
+ * mrsas_set_pd_lba: Sets PD LBA
+ * input: io_request pointer
+ * CDB length
+ * io_info pointer
+ * Pointer to CCB
+ * Local RAID map pointer
+ * Start block of IO
+ * Block Size
+ *
+ * Used to set the PD logical block address in CDB for FP IOs.
+ */
+void mrsas_set_pd_lba(MRSAS_RAID_SCSI_IO_REQUEST *io_request, u_int8_t cdb_len,
+ struct IO_REQUEST_INFO *io_info, union ccb *ccb,
+ MR_FW_RAID_MAP_ALL *local_map_ptr, u_int32_t ref_tag,
+ u_int32_t ld_block_size)
+{
+ MR_LD_RAID *raid;
+ u_int32_t ld;
+ u_int64_t start_blk = io_info->pdBlock;
+ u_int8_t *cdb = io_request->CDB.CDB32;
+ u_int32_t num_blocks = io_info->numBlocks;
+ u_int8_t opcode = 0, flagvals = 0, groupnum = 0, control = 0;
+ struct ccb_hdr *ccb_h = &(ccb->ccb_h);
+
+ /* Check if T10 PI (DIF) is enabled for this LD */
+ ld = MR_TargetIdToLdGet(io_info->ldTgtId, local_map_ptr);
+ raid = MR_LdRaidGet(ld, local_map_ptr);
+ if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) {
+ memset(cdb, 0, sizeof(io_request->CDB.CDB32));
+ cdb[0] = MRSAS_SCSI_VARIABLE_LENGTH_CMD;
+ cdb[7] = MRSAS_SCSI_ADDL_CDB_LEN;
+
+ if (ccb_h->flags == CAM_DIR_OUT)
+ cdb[9] = MRSAS_SCSI_SERVICE_ACTION_READ32;
+ else
+ cdb[9] = MRSAS_SCSI_SERVICE_ACTION_WRITE32;
+ cdb[10] = MRSAS_RD_WR_PROTECT_CHECK_ALL;
+
+ /* LBA */
+ cdb[12] = (u_int8_t)((start_blk >> 56) & 0xff);
+ cdb[13] = (u_int8_t)((start_blk >> 48) & 0xff);
+ cdb[14] = (u_int8_t)((start_blk >> 40) & 0xff);
+ cdb[15] = (u_int8_t)((start_blk >> 32) & 0xff);
+ cdb[16] = (u_int8_t)((start_blk >> 24) & 0xff);
+ cdb[17] = (u_int8_t)((start_blk >> 16) & 0xff);
+ cdb[18] = (u_int8_t)((start_blk >> 8) & 0xff);
+ cdb[19] = (u_int8_t)(start_blk & 0xff);
+
+ /* Logical block reference tag */
+ io_request->CDB.EEDP32.PrimaryReferenceTag = swap32(ref_tag);
+ io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0xffff;
+ io_request->IoFlags = 32; /* Specify 32-byte cdb */
+
+ /* Transfer length */
+ cdb[28] = (u_int8_t)((num_blocks >> 24) & 0xff);
+ cdb[29] = (u_int8_t)((num_blocks >> 16) & 0xff);
+ cdb[30] = (u_int8_t)((num_blocks >> 8) & 0xff);
+ cdb[31] = (u_int8_t)(num_blocks & 0xff);
+
+ /* set SCSI IO EEDP Flags */
+ if (ccb_h->flags == CAM_DIR_OUT) {
+ io_request->EEDPFlags =
+ MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
+ MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
+ MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP |
+ MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG |
+ MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
+ }
+ else {
+ io_request->EEDPFlags =
+ MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
+ MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
+ }
+ io_request->Control |= (0x4 << 26);
+ io_request->EEDPBlockSize = ld_block_size;
+ }
+ else {
+ /* Some drives don't support 16/12 byte CDB's, convert to 10 */
+ if (((cdb_len == 12) || (cdb_len == 16)) &&
+ (start_blk <= 0xffffffff)) {
+ if (cdb_len == 16) {
+ opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10;
+ flagvals = cdb[1];
+ groupnum = cdb[14];
+ control = cdb[15];
+ }
+ else {
+ opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10;
+ flagvals = cdb[1];
+ groupnum = cdb[10];
+ control = cdb[11];
+ }
+
+ memset(cdb, 0, sizeof(io_request->CDB.CDB32));
+
+ cdb[0] = opcode;
+ cdb[1] = flagvals;
+ cdb[6] = groupnum;
+ cdb[9] = control;
+
+ /* Transfer length */
+ cdb[8] = (u_int8_t)(num_blocks & 0xff);
+ cdb[7] = (u_int8_t)((num_blocks >> 8) & 0xff);
+
+ io_request->IoFlags = 10; /* Specify 10-byte cdb */
+ cdb_len = 10;
+ } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) {
+ /* Convert to 16 byte CDB for large LBA's */
+ switch (cdb_len) {
+ case 6:
+ opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16;
+ control = cdb[5];
+ break;
+ case 10:
+ opcode = cdb[0] == READ_10 ? READ_16 : WRITE_16;
+ flagvals = cdb[1];
+ groupnum = cdb[6];
+ control = cdb[9];
+ break;
+ case 12:
+ opcode = cdb[0] == READ_12 ? READ_16 : WRITE_16;
+ flagvals = cdb[1];
+ groupnum = cdb[10];
+ control = cdb[11];
+ break;
+ }
+
+ memset(cdb, 0, sizeof(io_request->CDB.CDB32));
+
+ cdb[0] = opcode;
+ cdb[1] = flagvals;
+ cdb[14] = groupnum;
+ cdb[15] = control;
+
+ /* Transfer length */
+ cdb[13] = (u_int8_t)(num_blocks & 0xff);
+ cdb[12] = (u_int8_t)((num_blocks >> 8) & 0xff);
+ cdb[11] = (u_int8_t)((num_blocks >> 16) & 0xff);
+ cdb[10] = (u_int8_t)((num_blocks >> 24) & 0xff);
+
+ io_request->IoFlags = 16; /* Specify 16-byte cdb */
+ cdb_len = 16;
+ } else if ((cdb_len == 6) && (start_blk > 0x1fffff)) {
+ /* convert to 10 byte CDB */
+ opcode = cdb[0] == READ_6 ? READ_10 : WRITE_10;
+ control = cdb[5];
+
+ memset(cdb, 0, sizeof(cdb));
+ cdb[0] = opcode;
+ cdb[9] = control;
+
+ /* Set transfer length */
+ cdb[8] = (u_int8_t)(num_blocks & 0xff);
+ cdb[7] = (u_int8_t)((num_blocks >> 8) & 0xff);
+
+ /* Specify 10-byte cdb */
+ cdb_len = 10;
+ }
+
+ /* Fall through normal case, just load LBA here */
+ switch (cdb_len)
+ {
+ case 6:
+ {
+ u_int8_t val = cdb[1] & 0xE0;
+ cdb[3] = (u_int8_t)(start_blk & 0xff);
+ cdb[2] = (u_int8_t)((start_blk >> 8) & 0xff);
+ cdb[1] = val | ((u_int8_t)(start_blk >> 16) & 0x1f);
+ break;
+ }
+ case 10:
+ cdb[5] = (u_int8_t)(start_blk & 0xff);
+ cdb[4] = (u_int8_t)((start_blk >> 8) & 0xff);
+ cdb[3] = (u_int8_t)((start_blk >> 16) & 0xff);
+ cdb[2] = (u_int8_t)((start_blk >> 24) & 0xff);
+ break;
+ case 12:
+ cdb[5] = (u_int8_t)(start_blk & 0xff);
+ cdb[4] = (u_int8_t)((start_blk >> 8) & 0xff);
+ cdb[3] = (u_int8_t)((start_blk >> 16) & 0xff);
+ cdb[2] = (u_int8_t)((start_blk >> 24) & 0xff);
+ break;
+ case 16:
+ cdb[9] = (u_int8_t)(start_blk & 0xff);
+ cdb[8] = (u_int8_t)((start_blk >> 8) & 0xff);
+ cdb[7] = (u_int8_t)((start_blk >> 16) & 0xff);
+ cdb[6] = (u_int8_t)((start_blk >> 24) & 0xff);
+ cdb[5] = (u_int8_t)((start_blk >> 32) & 0xff);
+ cdb[4] = (u_int8_t)((start_blk >> 40) & 0xff);
+ cdb[3] = (u_int8_t)((start_blk >> 48) & 0xff);
+ cdb[2] = (u_int8_t)((start_blk >> 56) & 0xff);
+ break;
+ }
+ }
+}
+
+/**
+ * mrsas_get_best_arm Determine the best spindle arm
+ * Inputs: Load balance info
+ *
+ * This function determines and returns the best arm by looking at the
+ * parameters of the last PD access.
+ */
+u_int8_t mrsas_get_best_arm(PLD_LOAD_BALANCE_INFO lbInfo, u_int8_t arm,
+ u_int64_t block, u_int32_t count)
+{
+ u_int16_t pend0, pend1;
+ u_int64_t diff0, diff1;
+ u_int8_t bestArm;
+
+ /* get the pending cmds for the data and mirror arms */
+ pend0 = atomic_read(&lbInfo->scsi_pending_cmds[0]);
+ pend1 = atomic_read(&lbInfo->scsi_pending_cmds[1]);
+
+ /* Determine the disk whose head is nearer to the req. block */
+ diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[0]);
+ diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[1]);
+ bestArm = (diff0 <= diff1 ? 0 : 1);
+
+ if ((bestArm == arm && pend0 > pend1 + 16) || (bestArm != arm && pend1 > pend0 + 16))
+ bestArm ^= 1;
+
+ /* Update the last accessed block on the correct pd */
+ lbInfo->last_accessed_block[bestArm] = block + count - 1;
+
+ return bestArm;
+}
+
+/**
+ * mrsas_get_updated_dev_handle Get the update dev handle
+ * Inputs: Load balance info
+ * io_info pointer
+ *
+ * This function determines and returns the updated dev handle.
+ */
+u_int16_t mrsas_get_updated_dev_handle(PLD_LOAD_BALANCE_INFO lbInfo,
+ struct IO_REQUEST_INFO *io_info)
+{
+ u_int8_t arm, old_arm;
+ u_int16_t devHandle;
+
+ old_arm = lbInfo->raid1DevHandle[0] == io_info->devHandle ? 0 : 1;
+
+ /* get best new arm */
+ arm = mrsas_get_best_arm(lbInfo, old_arm, io_info->ldStartBlock, io_info->numBlocks);
+ devHandle = lbInfo->raid1DevHandle[arm];
+ atomic_inc(&lbInfo->scsi_pending_cmds[arm]);
+
+ return devHandle;
+}
+
+/**
+ * MR_GetPhyParams Calculates arm, span, and block
+ * Inputs: Adapter instance soft state
+ * Logical drive number (LD)
+ * Stripe number (stripRow)
+ * Reference in stripe (stripRef)
+ * Outputs: Span number
+ * Absolute Block number in the physical disk
+ *
+ * This routine calculates the arm, span and block for the specified stripe
+ * and reference in stripe.
+ */
+u_int8_t MR_GetPhyParams(struct mrsas_softc *sc, u_int32_t ld,
+ u_int64_t stripRow,
+ u_int16_t stripRef, struct IO_REQUEST_INFO *io_info,
+ RAID_CONTEXT *pRAID_Context, MR_FW_RAID_MAP_ALL *map)
+{
+ MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
+ u_int32_t pd, arRef;
+ u_int8_t physArm, span;
+ u_int64_t row;
+ u_int8_t retval = TRUE;
+ int error_code = 0;
+ u_int64_t *pdBlock = &io_info->pdBlock;
+ u_int16_t *pDevHandle = &io_info->devHandle;
+ u_int32_t rowMod, armQ, arm, logArm;
+ u_int8_t do_invader = 0;
+
+ if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY))
+ do_invader = 1;
+
+ row = mega_div64_32(stripRow, raid->rowDataSize);
+
+ if (raid->level == 6) {
+ logArm = mega_mod64(stripRow, raid->rowDataSize); // logical arm within row
+ if (raid->rowSize == 0)
+ return FALSE;
+ rowMod = mega_mod64(row, raid->rowSize); // get logical row mod
+ armQ = raid->rowSize-1-rowMod; // index of Q drive
+ arm = armQ+1+logArm; // data always logically follows Q
+ if (arm >= raid->rowSize) // handle wrap condition
+ arm -= raid->rowSize;
+ physArm = (u_int8_t)arm;
+ }
+ else {
+ if (raid->modFactor == 0)
+ return FALSE;
+ physArm = MR_LdDataArmGet(ld, mega_mod64(stripRow, raid->modFactor), map);
+ }
+
+ if (raid->spanDepth == 1) {
+ span = 0;
+ *pdBlock = row << raid->stripeShift;
+ }
+ else {
+ span = (u_int8_t)MR_GetSpanBlock(ld, row, pdBlock, map, &error_code);
+ if (error_code == 1)
+ return FALSE;
+ }
+
+ /* Get the array on which this span is present */
+ arRef = MR_LdSpanArrayGet(ld, span, map);
+
+ pd = MR_ArPdGet(arRef, physArm, map); // Get the Pd.
+
+ if (pd != MR_PD_INVALID)
+ *pDevHandle = MR_PdDevHandleGet(pd, map); // Get dev handle from Pd.
+ else {
+ *pDevHandle = MR_PD_INVALID; // set dev handle as invalid.
+ if ((raid->level >= 5) && ((!do_invader) || (do_invader &&
+ raid->regTypeReqOnRead != REGION_TYPE_UNUSED)))
+ pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
+ else if (raid->level == 1) {
+ pd = MR_ArPdGet(arRef, physArm + 1, map); // Get Alternate Pd.
+ if (pd != MR_PD_INVALID)
+ *pDevHandle = MR_PdDevHandleGet(pd, map);//Get dev handle from Pd.
+ }
+ }
+
+ *pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk;
+ pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
+ return retval;
+}
+
+/**
+ * MR_GetSpanBlock Calculates span block
+ * Inputs: LD
+ * row
+ * PD span block
+ * RAID map pointer
+ * Outputs: Span number
+ * Error code
+ *
+ * This routine calculates the span from the span block info.
+ */
+u_int32_t MR_GetSpanBlock(u_int32_t ld, u_int64_t row, u_int64_t *span_blk,
+ MR_FW_RAID_MAP_ALL *map, int *div_error)
+{
+ MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map);
+ MR_QUAD_ELEMENT *quad;
+ MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
+ u_int32_t span, j;
+ u_int64_t blk, debugBlk;
+
+ for (span=0; span < raid->spanDepth; span++, pSpanBlock++) {
+ for (j=0; j < pSpanBlock->block_span_info.noElements; j++) {
+ quad = &pSpanBlock->block_span_info.quad[j];
+ if (quad->diff == 0) {
+ *div_error = 1;
+ return span;
+ }
+ if (quad->logStart <= row && row <= quad->logEnd &&
+ (mega_mod64(row-quad->logStart, quad->diff)) == 0) {
+ if (span_blk != NULL) {
+ blk = mega_div64_32((row-quad->logStart), quad->diff);
+ debugBlk = blk;
+ blk = (blk + quad->offsetInSpan) << raid->stripeShift;
+ *span_blk = blk;
+ }
+ return span;
+ }
+ }
+ }
+ return span;
+}
+
diff --git a/sys/dev/mrsas/mrsas_ioctl.c b/sys/dev/mrsas/mrsas_ioctl.c
new file mode 100644
index 0000000..6343aa5
--- /dev/null
+++ b/sys/dev/mrsas/mrsas_ioctl.c
@@ -0,0 +1,546 @@
+/*
+ * Copyright (c) 2014, LSI Corp.
+ * All rights reserved.
+ * Author: Marian Choy
+ * Support: freebsdraid@lsi.com
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * 3. Neither the name of the <ORGANIZATION> nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation
+ * are those of the authors and should not be interpreted as representing
+ * official policies,either expressed or implied, of the FreeBSD Project.
+ *
+ * Send feedback to: <megaraidfbsd@lsi.com>
+ * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035
+ * ATTN: MegaRaid FreeBSD
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/mrsas/mrsas.h>
+#include <dev/mrsas/mrsas_ioctl.h>
+
+/*
+ * Function prototypes
+ */
+int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc);
+int mrsas_passthru(struct mrsas_softc *sc, void *arg);
+void mrsas_free_ioc_cmd(struct mrsas_softc *sc);
+void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
+void mrsas_dump_dcmd(struct mrsas_softc *sc, struct mrsas_dcmd_frame* dcmd);
+void mrsas_dump_ioctl(struct mrsas_softc *sc, struct mrsas_iocpacket *user_ioc);
+void * mrsas_alloc_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
+static int mrsas_create_frame_pool(struct mrsas_softc *sc);
+static void mrsas_alloc_cb(void *arg, bus_dma_segment_t *segs,
+ int nsegs, int error);
+
+extern struct mrsas_mfi_cmd* mrsas_get_mfi_cmd(struct mrsas_softc *sc);
+extern void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd);
+extern int mrsas_issue_blocked_cmd(struct mrsas_softc *sc,
+ struct mrsas_mfi_cmd *cmd);
+
+
+/**
+ * mrsas_dump_ioctl: Print debug output for DCMDs
+ * input: Adapter instance soft state
+ * DCMD frame structure
+ *
+ * This function is called from mrsas_passthru() to print out debug information
+ * in the handling and routing of DCMD commands.
+ */
+void mrsas_dump_dcmd( struct mrsas_softc *sc, struct mrsas_dcmd_frame* dcmd )
+{
+ int i;
+
+ device_printf(sc->mrsas_dev, "dcmd->cmd: 0x%02hhx\n", dcmd->cmd);
+ device_printf(sc->mrsas_dev, "dcmd->cmd_status: 0x%02hhx\n", dcmd->cmd_status);
+ device_printf(sc->mrsas_dev, "dcmd->sge_count: 0x%02hhx\n", dcmd->sge_count);
+ device_printf(sc->mrsas_dev, "dcmd->context: 0x%08x\n", dcmd->context);
+ device_printf(sc->mrsas_dev, "dcmd->flags: 0x%04hx\n", dcmd->flags);
+ device_printf(sc->mrsas_dev, "dcmd->timeout: 0x%04hx\n", dcmd->timeout);
+ device_printf(sc->mrsas_dev, "dcmd->data_xfer_len: 0x%08x\n", dcmd->data_xfer_len);
+ device_printf(sc->mrsas_dev, "dcmd->opcode: 0x%08x\n", dcmd->opcode);
+ device_printf(sc->mrsas_dev, "dcmd->mbox.w[0]: 0x%08x\n", dcmd->mbox.w[0]);
+ device_printf(sc->mrsas_dev, "dcmd->mbox.w[1]: 0x%08x\n", dcmd->mbox.w[1]);
+ device_printf(sc->mrsas_dev, "dcmd->mbox.w[2]: 0x%08x\n", dcmd->mbox.w[2]);
+ for (i=0; i< MIN(MAX_IOCTL_SGE, dcmd->sge_count); i++) {
+ device_printf(sc->mrsas_dev, "sgl[%02d]\n", i);
+ device_printf(sc->mrsas_dev, " sge32[%02d].phys_addr: 0x%08x\n",
+ i, dcmd->sgl.sge32[i].phys_addr);
+ device_printf(sc->mrsas_dev, " sge32[%02d].length: 0x%08x\n",
+ i, dcmd->sgl.sge32[i].length);
+ device_printf(sc->mrsas_dev, " sge64[%02d].phys_addr: 0x%08llx\n",
+ i, (long long unsigned int) dcmd->sgl.sge64[i].phys_addr);
+ device_printf(sc->mrsas_dev, " sge64[%02d].length: 0x%08x\n",
+ i, dcmd->sgl.sge64[i].length);
+ }
+}
+
+/**
+ * mrsas_dump_ioctl: Print debug output for ioctl
+ * input: Adapter instance soft state
+ * iocpacket structure
+ *
+ * This function is called from mrsas_passthru() to print out debug information
+ * in the handling and routing of ioctl commands.
+ */
+void mrsas_dump_ioctl(struct mrsas_softc *sc, struct mrsas_iocpacket *user_ioc)
+{
+ union mrsas_frame *in_cmd = (union mrsas_frame *) &(user_ioc->frame.raw);
+ struct mrsas_dcmd_frame* dcmd = (struct mrsas_dcmd_frame *) &(in_cmd->dcmd);
+ int i;
+
+ device_printf(sc->mrsas_dev,
+ "====== In %s() ======================================\n", __func__);
+ device_printf(sc->mrsas_dev, "host_no: 0x%04hx\n", user_ioc->host_no);
+ device_printf(sc->mrsas_dev, " __pad1: 0x%04hx\n", user_ioc->__pad1);
+ device_printf(sc->mrsas_dev, "sgl_off: 0x%08x\n", user_ioc->sgl_off);
+ device_printf(sc->mrsas_dev, "sge_count: 0x%08x\n", user_ioc->sge_count);
+ device_printf(sc->mrsas_dev, "sense_off: 0x%08x\n", user_ioc->sense_off);
+ device_printf(sc->mrsas_dev, "sense_len: 0x%08x\n", user_ioc->sense_len);
+
+ mrsas_dump_dcmd(sc, dcmd);
+
+ for (i=0; i< MIN(MAX_IOCTL_SGE, user_ioc->sge_count); i++) {
+ device_printf(sc->mrsas_dev, "sge[%02d]\n", i);
+ device_printf(sc->mrsas_dev,
+ " iov_base: %p\n", user_ioc->sgl[i].iov_base);
+ device_printf(sc->mrsas_dev, " iov_len: %p\n",
+ (void*)user_ioc->sgl[i].iov_len);
+ }
+ device_printf(sc->mrsas_dev,
+ "==================================================================\n");
+}
+
+/**
+ * mrsas_passthru: Handle pass-through commands
+ * input: Adapter instance soft state
+ * argument pointer
+ *
+ * This function is called from mrsas_ioctl() to handle pass-through and
+ * ioctl commands to Firmware.
+ */
+int mrsas_passthru( struct mrsas_softc *sc, void *arg )
+{
+ struct mrsas_iocpacket *user_ioc = (struct mrsas_iocpacket *)arg;
+ union mrsas_frame *in_cmd = (union mrsas_frame *) &(user_ioc->frame.raw);
+ struct mrsas_mfi_cmd *cmd = NULL;
+ bus_dma_tag_t ioctl_data_tag[MAX_IOCTL_SGE];
+ bus_dmamap_t ioctl_data_dmamap[MAX_IOCTL_SGE];
+ void *ioctl_data_mem[MAX_IOCTL_SGE]; // ioctl data virtual addr
+ bus_addr_t ioctl_data_phys_addr[MAX_IOCTL_SGE]; // ioctl data phys addr
+ bus_dma_tag_t ioctl_sense_tag = 0;
+ bus_dmamap_t ioctl_sense_dmamap = 0;
+ void *ioctl_sense_mem = 0;
+ bus_addr_t ioctl_sense_phys_addr = 0;
+ int i, adapter, ioctl_data_size, ioctl_sense_size, ret=0;
+ struct mrsas_sge32 *kern_sge32;
+ unsigned long *sense_ptr;
+
+ /* For debug - uncomment the following line for debug output */
+ //mrsas_dump_ioctl(sc, user_ioc);
+
+ /*
+ * Check for NOP from MegaCli... MegaCli can issue a DCMD of 0. In this
+ * case do nothing and return 0 to it as status.
+ */
+ if (in_cmd->dcmd.opcode == 0) {
+ device_printf(sc->mrsas_dev, "In %s() Got a NOP\n", __func__);
+ user_ioc->frame.hdr.cmd_status = MFI_STAT_OK;
+ return (0);
+ }
+
+ /* Validate host_no */
+ adapter = user_ioc->host_no;
+ if (adapter != device_get_unit(sc->mrsas_dev)) {
+ device_printf(sc->mrsas_dev, "In %s() IOCTL not for me!\n", __func__);
+ return(ENOENT);
+ }
+
+ /* Validate SGL length */
+ if (user_ioc->sge_count > MAX_IOCTL_SGE) {
+ device_printf(sc->mrsas_dev, "In %s() SGL is too long (%d > 8).\n",
+ __func__, user_ioc->sge_count);
+ return(ENOENT);
+ }
+
+ /* Get a command */
+ cmd = mrsas_get_mfi_cmd(sc);
+ if (!cmd) {
+ device_printf(sc->mrsas_dev, "Failed to get a free cmd for IOCTL\n");
+ return(ENOMEM);
+ }
+
+ /*
+ * User's IOCTL packet has 2 frames (maximum). Copy those two
+ * frames into our cmd's frames. cmd->frame's context will get
+ * overwritten when we copy from user's frames. So set that value
+ * alone separately
+ */
+ memcpy(cmd->frame, user_ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
+ cmd->frame->hdr.context = cmd->index;
+ cmd->frame->hdr.pad_0 = 0;
+ cmd->frame->hdr.flags &= ~(MFI_FRAME_IEEE | MFI_FRAME_SGL64 |
+ MFI_FRAME_SENSE64);
+
+ /*
+ * The management interface between applications and the fw uses
+ * MFI frames. E.g, RAID configuration changes, LD property changes
+ * etc are accomplishes through different kinds of MFI frames. The
+ * driver needs to care only about substituting user buffers with
+ * kernel buffers in SGLs. The location of SGL is embedded in the
+ * struct iocpacket itself.
+ */
+ kern_sge32 = (struct mrsas_sge32 *)
+ ((unsigned long)cmd->frame + user_ioc->sgl_off);
+
+ /*
+ * For each user buffer, create a mirror buffer and copy in
+ */
+ for (i=0; i < user_ioc->sge_count; i++) {
+ if (!user_ioc->sgl[i].iov_len)
+ continue;
+ ioctl_data_size = user_ioc->sgl[i].iov_len;
+ if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
+ 1, 0, // algnmnt, boundary
+ BUS_SPACE_MAXADDR_32BIT,// lowaddr
+ BUS_SPACE_MAXADDR, // highaddr
+ NULL, NULL, // filter, filterarg
+ ioctl_data_size, // maxsize
+ 1, // msegments
+ ioctl_data_size, // maxsegsize
+ BUS_DMA_ALLOCNOW, // flags
+ NULL, NULL, // lockfunc, lockarg
+ &ioctl_data_tag[i])) {
+ device_printf(sc->mrsas_dev, "Cannot allocate ioctl data tag\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamem_alloc(ioctl_data_tag[i], (void **)&ioctl_data_mem[i],
+ (BUS_DMA_NOWAIT | BUS_DMA_ZERO), &ioctl_data_dmamap[i])) {
+ device_printf(sc->mrsas_dev, "Cannot allocate ioctl data mem\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamap_load(ioctl_data_tag[i], ioctl_data_dmamap[i],
+ ioctl_data_mem[i], ioctl_data_size, mrsas_alloc_cb,
+ &ioctl_data_phys_addr[i], BUS_DMA_NOWAIT)) {
+ device_printf(sc->mrsas_dev, "Cannot load ioctl data mem\n");
+ return (ENOMEM);
+ }
+
+ /* Save the physical address and length */
+ kern_sge32[i].phys_addr = (u_int32_t)ioctl_data_phys_addr[i];
+ kern_sge32[i].length = user_ioc->sgl[i].iov_len;
+
+ /* Copy in data from user space */
+ ret = copyin(user_ioc->sgl[i].iov_base, ioctl_data_mem[i],
+ user_ioc->sgl[i].iov_len);
+ if (ret) {
+ device_printf(sc->mrsas_dev, "IOCTL copyin failed!\n");
+ goto out;
+ }
+ }
+
+ ioctl_sense_size = user_ioc->sense_len;
+ if (user_ioc->sense_len) {
+ if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
+ 1, 0, // algnmnt, boundary
+ BUS_SPACE_MAXADDR_32BIT,// lowaddr
+ BUS_SPACE_MAXADDR, // highaddr
+ NULL, NULL, // filter, filterarg
+ ioctl_sense_size, // maxsize
+ 1, // msegments
+ ioctl_sense_size, // maxsegsize
+ BUS_DMA_ALLOCNOW, // flags
+ NULL, NULL, // lockfunc, lockarg
+ &ioctl_sense_tag)) {
+ device_printf(sc->mrsas_dev, "Cannot allocate ioctl sense tag\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamem_alloc(ioctl_sense_tag, (void **)&ioctl_sense_mem,
+ (BUS_DMA_NOWAIT | BUS_DMA_ZERO), &ioctl_sense_dmamap)) {
+ device_printf(sc->mrsas_dev, "Cannot allocate ioctl data mem\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamap_load(ioctl_sense_tag, ioctl_sense_dmamap,
+ ioctl_sense_mem, ioctl_sense_size, mrsas_alloc_cb,
+ &ioctl_sense_phys_addr, BUS_DMA_NOWAIT)) {
+ device_printf(sc->mrsas_dev, "Cannot load ioctl sense mem\n");
+ return (ENOMEM);
+ }
+ sense_ptr =
+ (unsigned long *)((unsigned long)cmd->frame + user_ioc->sense_off);
+ sense_ptr = ioctl_sense_mem;
+ }
+
+ /*
+ * Set the sync_cmd flag so that the ISR knows not to complete this
+ * cmd to the SCSI mid-layer
+ */
+ cmd->sync_cmd = 1;
+ mrsas_issue_blocked_cmd(sc, cmd);
+ cmd->sync_cmd = 0;
+
+ /*
+ * copy out the kernel buffers to user buffers
+ */
+ for (i = 0; i < user_ioc->sge_count; i++) {
+ ret = copyout(ioctl_data_mem[i], user_ioc->sgl[i].iov_base,
+ user_ioc->sgl[i].iov_len);
+ if (ret) {
+ device_printf(sc->mrsas_dev, "IOCTL copyout failed!\n");
+ goto out;
+ }
+ }
+
+ /*
+ * copy out the sense
+ */
+ if (user_ioc->sense_len) {
+ /*
+ * sense_buff points to the location that has the user
+ * sense buffer address
+ */
+ sense_ptr = (unsigned long *) ((unsigned long)user_ioc->frame.raw +
+ user_ioc->sense_off);
+ ret = copyout(ioctl_sense_mem, (unsigned long*)*sense_ptr,
+ user_ioc->sense_len);
+ if (ret) {
+ device_printf(sc->mrsas_dev, "IOCTL sense copyout failed!\n");
+ goto out;
+ }
+ }
+
+ /*
+ * Return command status to user space
+ */
+ memcpy(&user_ioc->frame.hdr.cmd_status, &cmd->frame->hdr.cmd_status,
+ sizeof(u_int8_t));
+
+out:
+ /*
+ * Release sense buffer
+ */
+ if (ioctl_sense_phys_addr)
+ bus_dmamap_unload(ioctl_sense_tag, ioctl_sense_dmamap);
+ if (ioctl_sense_mem)
+ bus_dmamem_free(ioctl_sense_tag, ioctl_sense_mem, ioctl_sense_dmamap);
+ if (ioctl_sense_tag)
+ bus_dma_tag_destroy(ioctl_sense_tag);
+
+ /*
+ * Release data buffers
+ */
+ for (i = 0; i < user_ioc->sge_count; i++) {
+ if (!user_ioc->sgl[i].iov_len)
+ continue;
+ if (ioctl_data_phys_addr[i])
+ bus_dmamap_unload(ioctl_data_tag[i], ioctl_data_dmamap[i]);
+ if (ioctl_data_mem[i] != NULL)
+ bus_dmamem_free(ioctl_data_tag[i], ioctl_data_mem[i],
+ ioctl_data_dmamap[i]);
+ if (ioctl_data_tag[i] != NULL)
+ bus_dma_tag_destroy(ioctl_data_tag[i]);
+ }
+
+ /* Free command */
+ mrsas_release_mfi_cmd(cmd);
+
+ return(ret);
+}
+
+/**
+ * mrsas_alloc_mfi_cmds: Allocates the command packets
+ * input: Adapter instance soft state
+ *
+ * Each IOCTL or passthru command that is issued to the FW are wrapped in a
+ * local data structure called mrsas_mfi_cmd. The frame embedded in this
+ * mrsas_mfi is issued to FW. The array is used only to look up the
+ * mrsas_mfi_cmd given the context. The free commands are maintained in a
+ * linked list.
+ */
+int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc)
+{
+ int i, j;
+ u_int32_t max_cmd;
+ struct mrsas_mfi_cmd *cmd;
+
+ max_cmd = MRSAS_MAX_MFI_CMDS;
+
+ /*
+ * sc->mfi_cmd_list is an array of struct mrsas_mfi_cmd pointers. Allocate the
+ * dynamic array first and then allocate individual commands.
+ */
+ sc->mfi_cmd_list = malloc(sizeof(struct mrsas_mfi_cmd*)*max_cmd, M_MRSAS, M_NOWAIT);
+ if (!sc->mfi_cmd_list) {
+ device_printf(sc->mrsas_dev, "Cannot alloc memory for mfi_cmd cmd_list.\n");
+ return(ENOMEM);
+ }
+ memset(sc->mfi_cmd_list, 0, sizeof(struct mrsas_mfi_cmd *)*max_cmd);
+ for (i = 0; i < max_cmd; i++) {
+ sc->mfi_cmd_list[i] = malloc(sizeof(struct mrsas_mfi_cmd),
+ M_MRSAS, M_NOWAIT);
+ if (!sc->mfi_cmd_list[i]) {
+ for (j = 0; j < i; j++)
+ free(sc->mfi_cmd_list[j],M_MRSAS);
+ free(sc->mfi_cmd_list, M_MRSAS);
+ sc->mfi_cmd_list = NULL;
+ return(ENOMEM);
+ }
+ }
+
+ for (i = 0; i < max_cmd; i++) {
+ cmd = sc->mfi_cmd_list[i];
+ memset(cmd, 0, sizeof(struct mrsas_mfi_cmd));
+ cmd->index = i;
+ cmd->ccb_ptr = NULL;
+ cmd->sc = sc;
+ TAILQ_INSERT_TAIL(&(sc->mrsas_mfi_cmd_list_head), cmd, next);
+ }
+
+ /* create a frame pool and assign one frame to each command */
+ if (mrsas_create_frame_pool(sc)) {
+ device_printf(sc->mrsas_dev, "Cannot allocate DMA frame pool.\n");
+ for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) { // Free the frames
+ cmd = sc->mfi_cmd_list[i];
+ mrsas_free_frame(sc, cmd);
+ }
+ if (sc->mficmd_frame_tag != NULL)
+ bus_dma_tag_destroy(sc->mficmd_frame_tag);
+ return(ENOMEM);
+ }
+
+ return(0);
+}
+
+/**
+ * mrsas_create_frame_pool - Creates DMA pool for cmd frames
+ * input: Adapter soft state
+ *
+ * Each command packet has an embedded DMA memory buffer that is used for
+ * filling MFI frame and the SG list that immediately follows the frame. This
+ * function creates those DMA memory buffers for each command packet by using
+ * PCI pool facility. pad_0 is initialized to 0 to prevent corrupting value
+ * of context and could cause FW crash.
+ */
+static int mrsas_create_frame_pool(struct mrsas_softc *sc)
+{
+ int i;
+ struct mrsas_mfi_cmd *cmd;
+
+ if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
+ 1, 0, // algnmnt, boundary
+ BUS_SPACE_MAXADDR_32BIT,// lowaddr
+ BUS_SPACE_MAXADDR, // highaddr
+ NULL, NULL, // filter, filterarg
+ MRSAS_MFI_FRAME_SIZE, // maxsize
+ 1, // msegments
+ MRSAS_MFI_FRAME_SIZE, // maxsegsize
+ BUS_DMA_ALLOCNOW, // flags
+ NULL, NULL, // lockfunc, lockarg
+ &sc->mficmd_frame_tag)) {
+ device_printf(sc->mrsas_dev, "Cannot create MFI frame tag\n");
+ return (ENOMEM);
+ }
+
+ for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
+ cmd = sc->mfi_cmd_list[i];
+ cmd->frame = mrsas_alloc_frame(sc, cmd);
+ if (cmd->frame == NULL) {
+ device_printf(sc->mrsas_dev, "Cannot alloc MFI frame memory\n");
+ return (ENOMEM);
+ }
+ memset(cmd->frame, 0, MRSAS_MFI_FRAME_SIZE);
+ cmd->frame->io.context = cmd->index;
+ cmd->frame->io.pad_0 = 0;
+ }
+
+ return(0);
+}
+
+/**
+ * mrsas_alloc_frame - Allocates MFI Frames
+ * input: Adapter soft state
+ *
+ * Create bus DMA memory tag and dmamap and load memory for MFI frames.
+ * Returns virtual memory pointer to allocated region.
+ */
+void *mrsas_alloc_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
+{
+ u_int32_t frame_size = MRSAS_MFI_FRAME_SIZE;
+
+ if (bus_dmamem_alloc(sc->mficmd_frame_tag, (void **)&cmd->frame_mem,
+ BUS_DMA_NOWAIT, &cmd->frame_dmamap)) {
+ device_printf(sc->mrsas_dev, "Cannot alloc MFI frame memory\n");
+ return (NULL);
+ }
+ if (bus_dmamap_load(sc->mficmd_frame_tag, cmd->frame_dmamap,
+ cmd->frame_mem, frame_size, mrsas_alloc_cb,
+ &cmd->frame_phys_addr, BUS_DMA_NOWAIT)) {
+ device_printf(sc->mrsas_dev, "Cannot load IO request memory\n");
+ return (NULL);
+ }
+
+ return(cmd->frame_mem);
+}
+
+/*
+ * mrsas_alloc_cb: Callback function of bus_dmamap_load()
+ * input: callback argument,
+ * machine dependent type that describes DMA segments,
+ * number of segments,
+ * error code.
+ *
+ * This function is for the driver to receive mapping information resultant
+ * of the bus_dmamap_load(). The information is actually not being used,
+ * but the address is saved anyway.
+ */
+static void mrsas_alloc_cb(void *arg, bus_dma_segment_t *segs,
+ int nsegs, int error)
+{
+ bus_addr_t *addr;
+
+ addr = arg;
+ *addr = segs[0].ds_addr;
+}
+
+/**
+ * mrsas_free_frames: Frees memory for MFI frames
+ * input: Adapter soft state
+ *
+ * Deallocates MFI frames memory. Called from mrsas_free_mem() during
+ * detach and error case during creation of frame pool.
+ */
+void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
+{
+ if (cmd->frame_phys_addr)
+ bus_dmamap_unload(sc->mficmd_frame_tag, cmd->frame_dmamap);
+ if (cmd->frame_mem != NULL)
+ bus_dmamem_free(sc->mficmd_frame_tag, cmd->frame_mem, cmd->frame_dmamap);
+}
diff --git a/sys/dev/mrsas/mrsas_ioctl.h b/sys/dev/mrsas/mrsas_ioctl.h
new file mode 100644
index 0000000..3604842
--- /dev/null
+++ b/sys/dev/mrsas/mrsas_ioctl.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2014, LSI Corp.
+ * All rights reserved.
+ * Author: Marian Choy
+ * Support: freebsdraid@lsi.com
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * 3. Neither the name of the <ORGANIZATION> nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation
+ * are those of the authors and should not be interpreted as representing
+ * official policies,either expressed or implied, of the FreeBSD Project.
+ *
+ * Send feedback to: <megaraidfbsd@lsi.com>
+ * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035
+ * ATTN: MegaRaid FreeBSD
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#ifndef MRSAS_IOCTL_H
+#define MRSAS_IOCTL_H
+
+#ifndef _IOWR
+#include <sys/ioccom.h>
+#endif /* !_IOWR */
+
+/*
+ * We need to use the same values as the mfi driver until MegaCli adds
+ * support for this (mrsas) driver:
+ * M is for MegaRAID. (This is typically the vendor or product initial)
+ * 1 arbitrary. (This may be used to segment kinds of commands.
+ * (1-9 status, 10-20 policy, etc.)
+ * struct mrsas_iocpacket (sizeof() this parameter will be used.)
+ * These three values are encoded into a somewhat unique, 32-bit value.
+ */
+
+#define MRSAS_IOC_FIRMWARE_PASS_THROUGH _IOWR('M', 1, struct mrsas_iocpacket)
+
+#define MRSAS_IOC_SCAN_BUS _IO('M', 10)
+
+#define MAX_IOCTL_SGE 16
+#define MFI_FRAME_DIR_READ 0x0010
+#define MFI_CMD_LD_SCSI_IO 0x03
+
+#define INQUIRY_CMD 0x12
+#define INQUIRY_CMDLEN 6
+#define INQUIRY_REPLY_LEN 96
+#define INQUIRY_VENDOR 8 /* Offset in reply data to vendor name */
+#define SCSI_SENSE_BUFFERSIZE 96
+
+#define MEGAMFI_RAW_FRAME_SIZE 128
+
+
+#pragma pack(1)
+struct mrsas_iocpacket {
+ u_int16_t host_no;
+ u_int16_t __pad1;
+ u_int32_t sgl_off;
+ u_int32_t sge_count;
+ u_int32_t sense_off;
+ u_int32_t sense_len;
+ union {
+ u_int8_t raw[MEGAMFI_RAW_FRAME_SIZE];
+ struct mrsas_header hdr;
+ } frame;
+ struct iovec sgl[MAX_IOCTL_SGE];
+};
+#pragma pack()
+
+#endif /* MRSAS_IOCTL_H */
diff --git a/sys/modules/Makefile b/sys/modules/Makefile
index bce70b5..a644208 100644
--- a/sys/modules/Makefile
+++ b/sys/modules/Makefile
@@ -220,6 +220,7 @@ SUBDIR= \
mps \
mpt \
mqueue \
+ mrsas \
msdosfs \
msdosfs_iconv \
${_mse} \
diff --git a/sys/modules/mrsas/Makefile b/sys/modules/mrsas/Makefile
new file mode 100644
index 0000000..7ff6c81
--- /dev/null
+++ b/sys/modules/mrsas/Makefile
@@ -0,0 +1,20 @@
+# $FreeBSD$
+
+.PATH: ${.CURDIR}/../../dev/mrsas
+
+KMOD= mrsas
+SRCS= mrsas.c mrsas_cam.c mrsas_ioctl.c mrsas_fp.c
+SRCS+= device_if.h bus_if.h pci_if.h opt_cam.h opt_scsi.h
+
+#CFLAGS+= -MRSAS_DEBUG
+.include <bsd.kmod.mk>
+#CFLAGS+= -fgnu89-inline
+
+clean_cscope:
+ rm -f cscope*
+
+cscope: clean_cscope
+ /usr/local/bin/cscope -b *.[ch]
+
+cleanall: clean clean_cscope
+ rm -f '@' machine
OpenPOWER on IntegriCloud