summaryrefslogtreecommitdiffstats
path: root/sys/dev/mly
diff options
context:
space:
mode:
authormsmith <msmith@FreeBSD.org>2000-08-23 03:22:41 +0000
committermsmith <msmith@FreeBSD.org>2000-08-23 03:22:41 +0000
commit19a7cf4f83591586186dbc002cbfc9f1076b9dec (patch)
tree2d6ff12b8e748962ab52a60dfcdec1c102173d1f /sys/dev/mly
parent13504cf93eec576a6d9bd497673694455a501c7e (diff)
downloadFreeBSD-src-19a7cf4f83591586186dbc002cbfc9f1076b9dec.zip
FreeBSD-src-19a7cf4f83591586186dbc002cbfc9f1076b9dec.tar.gz
This is the initial import of a new driver for the current family of
PCI:SCSI RAID controllers from Mylex.
Diffstat (limited to 'sys/dev/mly')
-rw-r--r--sys/dev/mly/mly.c1711
-rw-r--r--sys/dev/mly/mly_cam.c513
-rw-r--r--sys/dev/mly/mly_pci.c590
-rw-r--r--sys/dev/mly/mly_tables.h335
-rw-r--r--sys/dev/mly/mlyreg.h1270
-rw-r--r--sys/dev/mly/mlyvar.h423
6 files changed, 4842 insertions, 0 deletions
diff --git a/sys/dev/mly/mly.c b/sys/dev/mly/mly.c
new file mode 100644
index 0000000..02c68af
--- /dev/null
+++ b/sys/dev/mly/mly.c
@@ -0,0 +1,1711 @@
+/*-
+ * Copyright (c) 2000 Michael Smith
+ * Copyright (c) 2000 BSDi
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+#include <sys/kernel.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/ctype.h>
+
+#include <machine/bus_memio.h>
+#include <machine/bus.h>
+#include <machine/clock.h>
+#include <machine/resource.h>
+#include <sys/rman.h>
+
+#include <cam/scsi/scsi_all.h>
+
+#include <dev/mly/mlyreg.h>
+#include <dev/mly/mlyvar.h>
+#define MLY_DEFINE_TABLES
+#include <dev/mly/mly_tables.h>
+
+static int mly_get_controllerinfo(struct mly_softc *sc);
+static void mly_scan_devices(struct mly_softc *sc);
+static void mly_rescan_btl(struct mly_softc *sc, int bus, int target);
+static void mly_complete_rescan(struct mly_command *mc);
+static int mly_get_eventstatus(struct mly_softc *sc);
+static int mly_enable_mmbox(struct mly_softc *sc);
+static int mly_flush(struct mly_softc *sc);
+static int mly_ioctl(struct mly_softc *sc, struct mly_command_ioctl *ioctl, void **data,
+ size_t datasize, u_int8_t *status, void *sense_buffer, size_t *sense_length);
+static void mly_fetch_event(struct mly_softc *sc);
+static void mly_complete_event(struct mly_command *mc);
+static void mly_process_event(struct mly_softc *sc, struct mly_event *me);
+static void mly_periodic(void *data);
+
+static int mly_immediate_command(struct mly_command *mc);
+static int mly_start(struct mly_command *mc);
+static void mly_complete(void *context, int pending);
+
+static int mly_get_slot(struct mly_command *mc);
+static void mly_alloc_command_cluster_map(void *arg, bus_dma_segment_t *segs, int nseg, int error);
+static void mly_alloc_command_cluster(struct mly_softc *sc);
+static void mly_map_command(struct mly_command *mc);
+static void mly_unmap_command(struct mly_command *mc);
+
+static int mly_fwhandshake(struct mly_softc *sc);
+
+static void mly_describe_controller(struct mly_softc *sc);
+#ifdef MLY_DEBUG
+static void mly_printstate(struct mly_softc *sc);
+static void mly_print_command(struct mly_command *mc);
+static void mly_print_packet(struct mly_command *mc);
+static void mly_panic(struct mly_softc *sc, char *reason);
+#endif
+
+/********************************************************************************
+ ********************************************************************************
+ Device Interface
+ ********************************************************************************
+ ********************************************************************************/
+
+/********************************************************************************
+ * Initialise the controller and softc
+ */
+int
+mly_attach(struct mly_softc *sc)
+{
+ int error;
+
+ debug_called(1);
+
+ /*
+ * Initialise per-controller queues.
+ */
+ TAILQ_INIT(&sc->mly_freecmds);
+ TAILQ_INIT(&sc->mly_ready);
+ TAILQ_INIT(&sc->mly_completed);
+ TAILQ_INIT(&sc->mly_clusters);
+
+#if __FreeBSD_version >= 500005
+ /*
+ * Initialise command-completion task.
+ */
+ TASK_INIT(&sc->mly_task_complete, 0, mly_complete, sc);
+#endif
+
+ /* disable interrupts before we start talking to the controller */
+ MLY_MASK_INTERRUPTS(sc);
+
+ /*
+ * Wait for the controller to come ready, handshake with the firmware if required.
+ * This is typically only necessary on platforms where the controller BIOS does not
+ * run.
+ */
+ if ((error = mly_fwhandshake(sc)))
+ return(error);
+
+ /*
+ * Initialise the slot allocator so that we can issue commands.
+ */
+ sc->mly_max_commands = MLY_SLOT_MAX;
+ sc->mly_last_slot = MLY_SLOT_START;
+
+ /*
+ * Obtain controller feature information
+ */
+ if ((error = mly_get_controllerinfo(sc)))
+ return(error);
+
+ /*
+ * Update the slot allocator limit based on the controller inquiry.
+ */
+ sc->mly_max_commands = imin(sc->mly_controllerinfo->maximum_parallel_commands, MLY_SLOT_MAX);
+
+ /*
+ * Get the current event counter for health purposes, populate the initial
+ * health status buffer.
+ */
+ if ((error = mly_get_eventstatus(sc)))
+ return(error);
+
+ /*
+ * Enable memory-mailbox mode
+ */
+ if ((error = mly_enable_mmbox(sc)))
+ return(error);
+
+ /*
+ * Attach to CAM.
+ */
+ if ((error = mly_cam_attach(sc)))
+ return(error);
+
+ /*
+ * Print a little information about the controller
+ */
+ mly_describe_controller(sc);
+
+ /*
+ * Mark all attached devices for rescan
+ */
+ mly_scan_devices(sc);
+
+ /*
+ * Instigate the first status poll immediately. Rescan completions won't
+ * happen until interrupts are enabled, which should still be before
+ * the SCSI subsystem gets to us. (XXX assuming CAM and interrupt-driven
+ * discovery here...)
+ */
+ mly_periodic((void *)sc);
+
+ /* enable interrupts now */
+ MLY_UNMASK_INTERRUPTS(sc);
+
+ return(0);
+}
+
+/********************************************************************************
+ * Bring the controller to a state where it can be safely left alone.
+ */
+void
+mly_detach(struct mly_softc *sc)
+{
+
+ debug_called(1);
+
+ /* kill the periodic event */
+ untimeout(mly_periodic, sc, sc->mly_periodic);
+
+ sc->mly_state |= MLY_STATE_SUSPEND;
+
+ /* flush controller */
+ mly_printf(sc, "flushing cache...");
+ printf("%s\n", mly_flush(sc) ? "failed" : "done");
+
+ MLY_MASK_INTERRUPTS(sc);
+}
+
+/********************************************************************************
+ ********************************************************************************
+ Command Wrappers
+ ********************************************************************************
+ ********************************************************************************/
+
+/********************************************************************************
+ * Fill in the mly_controllerinfo and mly_controllerparam fields in the softc.
+ */
+static int
+mly_get_controllerinfo(struct mly_softc *sc)
+{
+ struct mly_command_ioctl mci;
+ u_int8_t status;
+ int error;
+
+ debug_called(1);
+
+ if (sc->mly_controllerinfo != NULL)
+ free(sc->mly_controllerinfo, M_DEVBUF);
+
+ /* build the getcontrollerinfo ioctl and send it */
+ bzero(&mci, sizeof(mci));
+ sc->mly_controllerinfo = NULL;
+ mci.sub_ioctl = MDACIOCTL_GETCONTROLLERINFO;
+ if ((error = mly_ioctl(sc, &mci, (void **)&sc->mly_controllerinfo, sizeof(*sc->mly_controllerinfo),
+ &status, NULL, NULL)))
+ return(error);
+ if (status != 0)
+ return(EIO);
+
+ if (sc->mly_controllerparam != NULL)
+ free(sc->mly_controllerparam, M_DEVBUF);
+
+ /* build the getcontrollerparameter ioctl and send it */
+ bzero(&mci, sizeof(mci));
+ sc->mly_controllerparam = NULL;
+ mci.sub_ioctl = MDACIOCTL_GETCONTROLLERPARAMETER;
+ if ((error = mly_ioctl(sc, &mci, (void **)&sc->mly_controllerparam, sizeof(*sc->mly_controllerparam),
+ &status, NULL, NULL)))
+ return(error);
+ if (status != 0)
+ return(EIO);
+
+ return(0);
+}
+
+/********************************************************************************
+ * Schedule all possible devices for a rescan.
+ *
+ */
+static void
+mly_scan_devices(struct mly_softc *sc)
+{
+ int bus, target, nchn;
+
+ debug_called(1);
+
+ /*
+ * Clear any previous BTL information.
+ */
+ bzero(&sc->mly_btl, sizeof(sc->mly_btl));
+
+ /*
+ * Mark all devices as requiring a rescan, and let the early periodic scan collect them.
+ */
+ nchn = sc->mly_controllerinfo->physical_channels_present +
+ sc->mly_controllerinfo->virtual_channels_present;
+ for (bus = 0; bus < nchn; bus++)
+ for (target = 0; target < MLY_MAX_TARGETS; target++)
+ sc->mly_btl[bus][target].mb_flags = MLY_BTL_RESCAN;
+
+}
+
+/********************************************************************************
+ * Rescan a device, possibly as a consequence of getting an event which suggests
+ * that it may have changed.
+ */
+static void
+mly_rescan_btl(struct mly_softc *sc, int bus, int target)
+{
+ struct mly_command *mc;
+ struct mly_command_ioctl *mci;
+
+ debug_called(2);
+
+ /* get a command */
+ mc = NULL;
+ if (mly_alloc_command(sc, &mc))
+ return; /* we'll be retried soon */
+
+ /* set up the data buffer */
+ if ((mc->mc_data = malloc(sizeof(union mly_devinfo), M_DEVBUF, M_NOWAIT)) == NULL) {
+ mly_release_command(mc);
+ return; /* we'll get retried the next time a command completes */
+ }
+ bzero(mc->mc_data, sizeof(union mly_devinfo));
+ mc->mc_flags |= MLY_CMD_DATAIN;
+ mc->mc_complete = mly_complete_rescan;
+
+ sc->mly_btl[bus][target].mb_flags &= ~MLY_BTL_RESCAN;
+
+ /*
+ * Build the ioctl.
+ *
+ * At this point we are committed to sending this request, as it
+ * will be the only one constructed for this particular update.
+ */
+ mci = (struct mly_command_ioctl *)&mc->mc_packet->ioctl;
+ mci->opcode = MDACMD_IOCTL;
+ mci->addr.phys.controller = 0;
+ mci->timeout.value = 30;
+ mci->timeout.scale = MLY_TIMEOUT_SECONDS;
+ if (bus >= sc->mly_controllerinfo->physical_channels_present) {
+ mc->mc_length = mci->data_size = sizeof(struct mly_ioctl_getlogdevinfovalid);
+ mci->sub_ioctl = MDACIOCTL_GETLOGDEVINFOVALID;
+ mci->addr.log.logdev = ((bus - sc->mly_controllerinfo->physical_channels_present) * MLY_MAX_TARGETS)
+ + target;
+ debug(2, "logical device %d", mci->addr.log.logdev);
+ } else {
+ mc->mc_length = mci->data_size = sizeof(struct mly_ioctl_getphysdevinfovalid);
+ mci->sub_ioctl = MDACIOCTL_GETPHYSDEVINFOVALID;
+ mci->addr.phys.lun = 0;
+ mci->addr.phys.target = target;
+ mci->addr.phys.channel = bus;
+ debug(2, "physical device %d:%d", mci->addr.phys.channel, mci->addr.phys.target);
+ }
+
+ /*
+ * Use the ready queue to get this command dispatched.
+ */
+ mly_enqueue_ready(mc);
+ mly_startio(sc);
+}
+
+/********************************************************************************
+ * Handle the completion of a rescan operation
+ */
+static void
+mly_complete_rescan(struct mly_command *mc)
+{
+ struct mly_softc *sc = mc->mc_sc;
+ struct mly_ioctl_getlogdevinfovalid *ldi;
+ struct mly_ioctl_getphysdevinfovalid *pdi;
+ int bus, target;
+
+ debug_called(2);
+
+ /* iff the command completed OK, we should use the result to update our data */
+ if (mc->mc_status == 0) {
+ if (mc->mc_length == sizeof(*ldi)) {
+ ldi = (struct mly_ioctl_getlogdevinfovalid *)mc->mc_data;
+ bus = MLY_LOGDEV_BUS(sc, ldi->logical_device_number);
+ target = MLY_LOGDEV_TARGET(ldi->logical_device_number);
+ sc->mly_btl[bus][target].mb_flags = MLY_BTL_LOGICAL; /* clears all other flags */
+ sc->mly_btl[bus][target].mb_type = ldi->raid_level;
+ sc->mly_btl[bus][target].mb_state = ldi->state;
+ debug(2, "BTL rescan for %d returns %s, %s", ldi->logical_device_number,
+ mly_describe_code(mly_table_device_type, ldi->raid_level),
+ mly_describe_code(mly_table_device_state, ldi->state));
+ } else if (mc->mc_length == sizeof(*pdi)) {
+ pdi = (struct mly_ioctl_getphysdevinfovalid *)mc->mc_data;
+ bus = pdi->channel;
+ target = pdi->target;
+ sc->mly_btl[bus][target].mb_flags = MLY_BTL_PHYSICAL; /* clears all other flags */
+ sc->mly_btl[bus][target].mb_type = MLY_DEVICE_TYPE_PHYSICAL;
+ sc->mly_btl[bus][target].mb_state = pdi->state;
+ if (pdi->state != MLY_DEVICE_STATE_UNCONFIGURED)
+ sc->mly_btl[bus][target].mb_flags |= MLY_BTL_PROTECTED;
+ debug(2, "BTL rescan for %d:%d returns %s", bus, target,
+ mly_describe_code(mly_table_device_state, pdi->state));
+ } else {
+ mly_printf(sc, "BTL rescan result corrupted\n");
+ }
+ } else {
+ /*
+ * A request sent for a device beyond the last device present will fail.
+ * We don't care about this, so we do nothing about it.
+ */
+ }
+ free(mc->mc_data, M_DEVBUF);
+ mly_release_command(mc);
+}
+
+/********************************************************************************
+ * Get the current health status and set the 'next event' counter to suit.
+ */
+static int
+mly_get_eventstatus(struct mly_softc *sc)
+{
+ struct mly_command_ioctl mci;
+ struct mly_health_status *mh;
+ u_int8_t status;
+ int error;
+
+ /* build the gethealthstatus ioctl and send it */
+ bzero(&mci, sizeof(mci));
+ mh = NULL;
+ mci.sub_ioctl = MDACIOCTL_GETHEALTHSTATUS;
+
+ if ((error = mly_ioctl(sc, &mci, (void **)&mh, sizeof(*mh), &status, NULL, NULL)))
+ return(error);
+ if (status != 0)
+ return(EIO);
+
+ /* get the event counter */
+ sc->mly_event_change = mh->change_counter;
+ sc->mly_event_waiting = mh->next_event;
+ sc->mly_event_counter = mh->next_event;
+
+ /* save the health status into the memory mailbox */
+ bcopy(mh, &sc->mly_mmbox->mmm_health.status, sizeof(*mh));
+
+ debug(1, "initial change counter %d, event counter %d", mh->change_counter, mh->next_event);
+
+ free(mh, M_DEVBUF);
+ return(0);
+}
+
+/********************************************************************************
+ * Enable the memory mailbox mode.
+ */
+static int
+mly_enable_mmbox(struct mly_softc *sc)
+{
+ struct mly_command_ioctl mci;
+ u_int8_t *sp, status;
+ int error;
+
+ debug_called(1);
+
+ /* build the ioctl and send it */
+ bzero(&mci, sizeof(mci));
+ mci.sub_ioctl = MDACIOCTL_SETMEMORYMAILBOX;
+ /* set buffer addresses */
+ mci.param.setmemorymailbox.command_mailbox_physaddr = sc->mly_mmbox_busaddr + fldoff(mly_mmbox, mmm_command);
+ mci.param.setmemorymailbox.status_mailbox_physaddr = sc->mly_mmbox_busaddr + fldoff(mly_mmbox, mmm_status);
+ mci.param.setmemorymailbox.health_buffer_physaddr = sc->mly_mmbox_busaddr + fldoff(mly_mmbox, mmm_health);
+
+ /* set buffer sizes - abuse of data_size field is revolting */
+ sp = (u_int8_t *)&mci.data_size;
+ sp[0] = ((sizeof(union mly_command_packet) * MLY_MMBOX_COMMANDS) / 1024);
+ sp[1] = (sizeof(union mly_status_packet) * MLY_MMBOX_STATUS) / 1024;
+ mci.param.setmemorymailbox.health_buffer_size = sizeof(union mly_health_region) / 1024;
+
+ debug(1, "memory mailbox at %p (0x%llx/%d 0x%llx/%d 0x%llx/%d", sc->mly_mmbox,
+ mci.param.setmemorymailbox.command_mailbox_physaddr, sp[0],
+ mci.param.setmemorymailbox.status_mailbox_physaddr, sp[1],
+ mci.param.setmemorymailbox.health_buffer_physaddr, mci.param.setmemorymailbox.health_buffer_size);
+
+ if ((error = mly_ioctl(sc, &mci, NULL, 0, &status, NULL, NULL)))
+ return(error);
+ if (status != 0)
+ return(EIO);
+ sc->mly_state |= MLY_STATE_MMBOX_ACTIVE;
+ debug(1, "memory mailbox active");
+ return(0);
+}
+
+/********************************************************************************
+ * Flush all pending I/O from the controller.
+ */
+static int
+mly_flush(struct mly_softc *sc)
+{
+ struct mly_command_ioctl mci;
+ u_int8_t status;
+ int error;
+
+ debug_called(1);
+
+ /* build the ioctl */
+ bzero(&mci, sizeof(mci));
+ mci.sub_ioctl = MDACIOCTL_FLUSHDEVICEDATA;
+ mci.param.deviceoperation.operation_device = MLY_OPDEVICE_PHYSICAL_CONTROLLER;
+
+ /* pass it off to the controller */
+ if ((error = mly_ioctl(sc, &mci, NULL, 0, &status, NULL, NULL)))
+ return(error);
+
+ return((status == 0) ? 0 : EIO);
+}
+
+/********************************************************************************
+ * Perform an ioctl command.
+ *
+ * If (data) is not NULL, the command requires data transfer. If (*data) is NULL
+ * the command requires data transfer from the controller, and we will allocate
+ * a buffer for it. If (*data) is not NULL, the command requires data transfer
+ * to the controller.
+ *
+ * XXX passing in the whole ioctl structure is ugly. Better ideas?
+ *
+ * XXX we don't even try to handle the case where datasize > 4k. We should.
+ */
+static int
+mly_ioctl(struct mly_softc *sc, struct mly_command_ioctl *ioctl, void **data, size_t datasize,
+ u_int8_t *status, void *sense_buffer, size_t *sense_length)
+{
+ struct mly_command *mc;
+ struct mly_command_ioctl *mci;
+ int error;
+
+ debug_called(1);
+
+ mc = NULL;
+ if (mly_alloc_command(sc, &mc)) {
+ error = ENOMEM;
+ goto out;
+ }
+
+ /* copy the ioctl structure, but save some important fields and then fixup */
+ mci = &mc->mc_packet->ioctl;
+ ioctl->sense_buffer_address = mci->sense_buffer_address;
+ ioctl->maximum_sense_size = mci->maximum_sense_size;
+ *mci = *ioctl;
+ mci->opcode = MDACMD_IOCTL;
+ mci->timeout.value = 30;
+ mci->timeout.scale = MLY_TIMEOUT_SECONDS;
+
+ /* handle the data buffer */
+ if (data != NULL) {
+ if (*data == NULL) {
+ /* allocate data buffer */
+ if ((mc->mc_data = malloc(datasize, M_DEVBUF, M_NOWAIT)) == NULL) {
+ error = ENOMEM;
+ goto out;
+ }
+ mc->mc_flags |= MLY_CMD_DATAIN;
+ } else {
+ mc->mc_data = *data;
+ mc->mc_flags |= MLY_CMD_DATAOUT;
+ }
+ mc->mc_length = datasize;
+ mc->mc_packet->generic.data_size = datasize;
+ }
+
+ /* run the command */
+ if ((error = mly_immediate_command(mc)))
+ goto out;
+
+ /* clean up and return any data */
+ *status = mc->mc_status;
+ if ((mc->mc_sense > 0) && (sense_buffer != NULL)) {
+ bcopy(mc->mc_packet, sense_buffer, mc->mc_sense);
+ *sense_length = mc->mc_sense;
+ goto out;
+ }
+
+ /* should we return a data pointer? */
+ if ((data != NULL) && (*data == NULL))
+ *data = mc->mc_data;
+
+ /* command completed OK */
+ error = 0;
+
+out:
+ if (mc != NULL) {
+ /* do we need to free a data buffer we allocated? */
+ if (error && (mc->mc_data != NULL) && (*data == NULL))
+ free(mc->mc_data, M_DEVBUF);
+ mly_release_command(mc);
+ }
+ return(error);
+}
+
+/********************************************************************************
+ * Fetch one event from the controller.
+ */
+static void
+mly_fetch_event(struct mly_softc *sc)
+{
+ struct mly_command *mc;
+ struct mly_command_ioctl *mci;
+ int s;
+ u_int32_t event;
+
+ debug_called(2);
+
+ /* get a command */
+ mc = NULL;
+ if (mly_alloc_command(sc, &mc))
+ return; /* we'll get retried the next time a command completes */
+
+ /* set up the data buffer */
+ if ((mc->mc_data = malloc(sizeof(struct mly_event), M_DEVBUF, M_NOWAIT)) == NULL) {
+ mly_release_command(mc);
+ return; /* we'll get retried the next time a command completes */
+ }
+ bzero(mc->mc_data, sizeof(struct mly_event));
+ mc->mc_length = sizeof(struct mly_event);
+ mc->mc_flags |= MLY_CMD_DATAIN;
+ mc->mc_complete = mly_complete_event;
+
+ /*
+ * Get an event number to fetch. It's possible that we've raced with another
+ * context for the last event, in which case there will be no more events.
+ */
+ s = splcam();
+ if (sc->mly_event_counter == sc->mly_event_waiting) {
+ mly_release_command(mc);
+ splx(s);
+ return;
+ }
+ event = sc->mly_event_counter++;
+ splx(s);
+
+ /*
+ * Build the ioctl.
+ *
+ * At this point we are committed to sending this request, as it
+ * will be the only one constructed for this particular event number.
+ */
+ mci = (struct mly_command_ioctl *)&mc->mc_packet->ioctl;
+ mci->opcode = MDACMD_IOCTL;
+ mci->data_size = sizeof(struct mly_event);
+ mci->addr.phys.lun = (event >> 16) & 0xff;
+ mci->addr.phys.target = (event >> 24) & 0xff;
+ mci->addr.phys.channel = 0;
+ mci->addr.phys.controller = 0;
+ mci->timeout.value = 30;
+ mci->timeout.scale = MLY_TIMEOUT_SECONDS;
+ mci->sub_ioctl = MDACIOCTL_GETEVENT;
+ mci->param.getevent.sequence_number_low = event & 0xffff;
+
+ debug(2, "fetch event %u", event);
+
+ /*
+ * Use the ready queue to get this command dispatched.
+ */
+ mly_enqueue_ready(mc);
+ mly_startio(sc);
+}
+
+/********************************************************************************
+ * Handle the completion of an event poll.
+ *
+ * Note that we don't actually have to instigate another poll; the completion of
+ * this command will trigger that if there are any more events to poll for.
+ */
+static void
+mly_complete_event(struct mly_command *mc)
+{
+ struct mly_softc *sc = mc->mc_sc;
+ struct mly_event *me = (struct mly_event *)mc->mc_data;
+
+ debug_called(2);
+
+ /*
+ * If the event was successfully fetched, process it.
+ */
+ if (mc->mc_status == SCSI_STATUS_OK) {
+ mly_process_event(sc, me);
+ free(me, M_DEVBUF);
+ }
+ mly_release_command(mc);
+}
+
+/********************************************************************************
+ * Process a controller event.
+ */
+static void
+mly_process_event(struct mly_softc *sc, struct mly_event *me)
+{
+ struct scsi_sense_data *ssd = (struct scsi_sense_data *)&me->sense[0];
+ char *fp, *tp;
+ int bus, target, event, class, action;
+
+ /*
+ * Errors can be reported using vendor-unique sense data. In this case, the
+ * event code will be 0x1c (Request sense data present), the sense key will
+ * be 0x09 (vendor specific), the MSB of the ASC will be set, and the
+ * actual event code will be a 16-bit value comprised of the ASCQ (low byte)
+ * and low seven bits of the ASC (low seven bits of the high byte).
+ */
+ if ((me->code == 0x1c) &&
+ ((ssd->flags & SSD_KEY) == SSD_KEY_Vendor_Specific) &&
+ (ssd->add_sense_code & 0x80)) {
+ event = ((int)(ssd->add_sense_code & ~0x80) << 8) + ssd->add_sense_code_qual;
+ } else {
+ event = me->code;
+ }
+
+ /* look up event, get codes */
+ fp = mly_describe_code(mly_table_event, event);
+
+ debug(2, "Event %d code 0x%x", me->sequence_number, me->code);
+
+ /* quiet event? */
+ class = fp[0];
+ if (isupper(class) && bootverbose)
+ class = tolower(class);
+
+ /* get action code, text string */
+ action = fp[1];
+ tp = &fp[2];
+
+ /*
+ * Print some information about the event.
+ *
+ * This code uses a table derived from the corresponding portion of the Linux
+ * driver, and thus the parser is very similar.
+ */
+ switch(class) {
+ case 'p': /* error on physical device */
+ mly_printf(sc, "physical device %d:%d %s\n", me->channel, me->target, tp);
+ if (action == 'r')
+ sc->mly_btl[me->channel][me->target].mb_flags |= MLY_BTL_RESCAN;
+ break;
+ case 'l': /* error on logical unit */
+ case 'm': /* message about logical unit */
+ bus = MLY_LOGDEV_BUS(sc, me->lun);
+ target = MLY_LOGDEV_TARGET(me->lun);
+ mly_name_device(sc, bus, target);
+ mly_printf(sc, "logical device %d (%s) %s\n", me->lun, sc->mly_btl[bus][target].mb_name, tp);
+ if (action == 'r')
+ sc->mly_btl[bus][target].mb_flags |= MLY_BTL_RESCAN;
+ break;
+ break;
+ case 's': /* report of sense data */
+ if (((ssd->flags & SSD_KEY) == SSD_KEY_NO_SENSE) ||
+ (((ssd->flags & SSD_KEY) == SSD_KEY_NOT_READY) &&
+ (ssd->add_sense_code == 0x04) &&
+ ((ssd->add_sense_code_qual == 0x01) || (ssd->add_sense_code_qual == 0x02))))
+ break; /* ignore NO_SENSE or NOT_READY in one case */
+
+ mly_printf(sc, "physical device %d:%d %s\n", me->channel, me->target, tp);
+ mly_printf(sc, " sense key %d asc %02x ascq %02x\n",
+ ssd->flags & SSD_KEY, ssd->add_sense_code, ssd->add_sense_code_qual);
+ mly_printf(sc, " info %4D csi %4D\n", ssd->info, "", ssd->cmd_spec_info, "");
+ if (action == 'r')
+ sc->mly_btl[me->channel][me->target].mb_flags |= MLY_BTL_RESCAN;
+ break;
+ case 'e':
+ mly_printf(sc, tp, me->target, me->lun);
+ break;
+ case 'c':
+ mly_printf(sc, "controller %s\n", tp);
+ break;
+ case '?':
+ mly_printf(sc, "%s - %d\n", tp, me->code);
+ break;
+ default: /* probably a 'noisy' event being ignored */
+ break;
+ }
+}
+
+/********************************************************************************
+ * Perform periodic activities.
+ */
+static void
+mly_periodic(void *data)
+{
+ struct mly_softc *sc = (struct mly_softc *)data;
+ int nchn, bus, target;
+
+ debug_called(2);
+
+ /*
+ * Scan devices.
+ */
+ nchn = sc->mly_controllerinfo->physical_channels_present +
+ sc->mly_controllerinfo->virtual_channels_present;
+ for (bus = 0; bus < nchn; bus++) {
+ for (target = 0; target < MLY_MAX_TARGETS; target++) {
+
+ /* ignore the controller in this scan */
+ if (target == sc->mly_controllerparam->initiator_id)
+ continue;
+
+ /* perform device rescan? */
+ if (sc->mly_btl[bus][target].mb_flags & MLY_BTL_RESCAN)
+ mly_rescan_btl(sc, bus, target);
+ }
+ }
+
+ sc->mly_periodic = timeout(mly_periodic, sc, hz);
+}
+
+/********************************************************************************
+ ********************************************************************************
+ Command Processing
+ ********************************************************************************
+ ********************************************************************************/
+
+/********************************************************************************
+ * Run a command and wait for it to complete.
+ *
+ */
+static int
+mly_immediate_command(struct mly_command *mc)
+{
+ struct mly_softc *sc = mc->mc_sc;
+ int error, s;
+
+ debug_called(2);
+
+ /* spinning at splcam is ugly, but we're only used during controller init */
+ s = splcam();
+ if ((error = mly_start(mc)))
+ return(error);
+
+ if (sc->mly_state & MLY_STATE_INTERRUPTS_ON) {
+ /* sleep on the command */
+ while(MLY_CMD_STATE(mc) != MLY_CMD_COMPLETE) {
+ tsleep(mc, PRIBIO, "mlywait", 0);
+ }
+ } else {
+ /* spin and collect status while we do */
+ while(MLY_CMD_STATE(mc) != MLY_CMD_COMPLETE)
+ mly_done(mc->mc_sc);
+ }
+ splx(s);
+ return(0);
+}
+
+/********************************************************************************
+ * Start as much queued I/O as possible on the controller
+ */
+void
+mly_startio(struct mly_softc *sc)
+{
+ struct mly_command *mc;
+
+ debug_called(2);
+
+ for (;;) {
+
+ /* try for a ready command */
+ mc = mly_dequeue_ready(sc);
+
+ /* try to build a command from a queued ccb */
+ if (!mc)
+ mly_cam_command(sc, &mc);
+
+ /* no command == nothing to do */
+ if (!mc)
+ break;
+
+ /* try to post the command */
+ if (mly_start(mc)) {
+ /* controller busy, or no resources - defer for later */
+ mly_requeue_ready(mc);
+ break;
+ }
+ }
+}
+
+/********************************************************************************
+ * Deliver a command to the controller; allocate controller resources at the
+ * last moment.
+ */
+static int
+mly_start(struct mly_command *mc)
+{
+ struct mly_softc *sc = mc->mc_sc;
+ union mly_command_packet *pkt;
+ int s;
+
+ debug_called(2);
+
+ /*
+ * Set the command up for delivery to the controller. This may fail
+ * due to resource shortages.
+ */
+ if (mly_get_slot(mc))
+ return(EBUSY);
+ mly_map_command(mc);
+
+ s = splcam();
+ /*
+ * Do we have to use the hardware mailbox?
+ */
+ if (!(sc->mly_state & MLY_STATE_MMBOX_ACTIVE)) {
+ /*
+ * Check to see if the controller is ready for us.
+ */
+ if (MLY_IDBR_TRUE(sc, MLY_HM_CMDSENT)) {
+ splx(s);
+ return(EBUSY);
+ }
+
+ /*
+ * It's ready, send the command.
+ */
+ MLY_SET_MBOX(sc, sc->mly_command_mailbox, &mc->mc_packetphys);
+ MLY_SET_REG(sc, sc->mly_idbr, MLY_HM_CMDSENT);
+
+ } else { /* use memory-mailbox mode */
+
+ pkt = &sc->mly_mmbox->mmm_command[sc->mly_mmbox_command_index];
+
+ /* check to see if the next slot is free yet */
+ if (pkt->mmbox.flag != 0) {
+ splx(s);
+ return(EBUSY);
+ }
+
+ /* copy in new command */
+ bcopy(mc->mc_packet->mmbox.data, pkt->mmbox.data, sizeof(pkt->mmbox.data));
+ /* barrier to ensure completion of previous write before we write the flag */
+ bus_space_barrier(NULL, NULL, 0, 0, BUS_SPACE_BARRIER_WRITE); /* tag/handle? */
+ /* copy flag last */
+ pkt->mmbox.flag = mc->mc_packet->mmbox.flag;
+ /* barrier to ensure completion of previous write before we notify the controller */
+ bus_space_barrier(NULL, NULL, 0, 0, BUS_SPACE_BARRIER_WRITE); /* tag/handle */
+
+ /* signal controller, update index */
+ MLY_SET_REG(sc, sc->mly_idbr, MLY_AM_CMDSENT);
+ sc->mly_mmbox_command_index = (sc->mly_mmbox_command_index + 1) % MLY_MMBOX_COMMANDS;
+ }
+
+ splx(s);
+ return(0);
+}
+
+/********************************************************************************
+ * Pick up command status from the controller, schedule a completion event
+ */
+void
+mly_done(struct mly_softc *sc)
+{
+ struct mly_command *mc;
+ union mly_status_packet *sp;
+ u_int16_t slot;
+ int s, worked;
+
+ s = splcam();
+ worked = 0;
+
+ /* pick up hardware-mailbox commands */
+ if (MLY_ODBR_TRUE(sc, MLY_HM_STSREADY)) {
+ slot = MLY_GET_REG2(sc, sc->mly_status_mailbox);
+ if (slot < MLY_SLOT_MAX) {
+ mc = sc->mly_busycmds[slot];
+ if (mc != NULL) {
+ mc->mc_status = MLY_GET_REG(sc, sc->mly_status_mailbox + 2);
+ mc->mc_sense = MLY_GET_REG(sc, sc->mly_status_mailbox + 3);
+ mc->mc_resid = MLY_GET_REG4(sc, sc->mly_status_mailbox + 4);
+ mly_enqueue_completed(mc);
+ sc->mly_busycmds[slot] = NULL;
+ worked = 1;
+ } else {
+ mly_printf(sc, "got HM completion for nonbusy slot %u\n", slot);
+ }
+ } else {
+ /* slot 0xffff may mean "extremely bogus command" */
+ mly_printf(sc, "got HM completion for illegal slot %u\n", slot);
+ }
+ /* unconditionally acknowledge status */
+ MLY_SET_REG(sc, sc->mly_odbr, MLY_HM_STSREADY);
+ MLY_SET_REG(sc, sc->mly_idbr, MLY_HM_STSACK);
+ }
+
+ /* pick up memory-mailbox commands */
+ if (MLY_ODBR_TRUE(sc, MLY_AM_STSREADY)) {
+ for (;;) {
+ sp = &sc->mly_mmbox->mmm_status[sc->mly_mmbox_status_index];
+
+ /* check for more status */
+ if (sp->mmbox.flag == 0)
+ break;
+
+ /* get slot number */
+ slot = sp->status.command_id;
+ if (slot < MLY_SLOT_MAX) {
+ mc = sc->mly_busycmds[slot];
+ if (mc != NULL) {
+ mc->mc_status = sp->status.status;
+ mc->mc_sense = sp->status.sense_length;
+ mc->mc_resid = sp->status.residue;
+ mly_enqueue_completed(mc);
+ sc->mly_busycmds[slot] = NULL;
+ worked = 1;
+ } else {
+ mly_printf(sc, "got AM completion for nonbusy slot %u\n", slot);
+ }
+ } else {
+ /* slot 0xffff may mean "extremely bogus command" */
+ mly_printf(sc, "got AM completion for illegal slot %u at %d\n", slot, sc->mly_mmbox_status_index);
+ }
+
+ /* clear and move to next slot */
+ sp->mmbox.flag = 0;
+ sc->mly_mmbox_status_index = (sc->mly_mmbox_status_index + 1) % MLY_MMBOX_STATUS;
+ }
+ /* acknowledge that we have collected status value(s) */
+ MLY_SET_REG(sc, sc->mly_odbr, MLY_AM_STSREADY);
+ }
+
+ splx(s);
+ if (worked) {
+#if __FreeBSD_version >= 500005
+ if (sc->mly_state & MLY_STATE_INTERRUPTS_ON)
+ taskqueue_enqueue(taskqueue_swi, &sc->mly_task_complete);
+ else
+#endif
+ mly_complete(sc, 0);
+ }
+}
+
+/********************************************************************************
+ * Process completed commands
+ */
+static void
+mly_complete(void *context, int pending)
+{
+ struct mly_softc *sc = (struct mly_softc *)context;
+ struct mly_command *mc;
+ void (* mc_complete)(struct mly_command *mc);
+
+
+ debug_called(2);
+
+ /*
+ * Spin pulling commands off the completed queue and processing them.
+ */
+ while ((mc = mly_dequeue_completed(sc)) != NULL) {
+
+ /*
+ * Free controller resources, mark command complete.
+ *
+ * Note that as soon as we mark the command complete, it may be freed
+ * out from under us, so we need to save the mc_complete field in
+ * order to later avoid dereferencing mc. (We would not expect to
+ * have a polling/sleeping consumer with mc_complete != NULL).
+ */
+ mly_unmap_command(mc);
+ mc_complete = mc->mc_complete;
+ MLY_CMD_SETSTATE(mc, MLY_CMD_COMPLETE);
+
+ /*
+ * Call completion handler or wake up sleeping consumer.
+ */
+ if (mc_complete != NULL) {
+ mc_complete(mc);
+ } else {
+ wakeup(mc);
+ }
+ }
+
+ /*
+ * We may have freed up controller resources which would allow us
+ * to push more commands onto the controller, so we check here.
+ */
+ mly_startio(sc);
+
+ /*
+ * The controller may have updated the health status information,
+ * so check for it here.
+ *
+ * Note that we only check for health status after a completed command. It
+ * might be wise to ping the controller occasionally if it's been idle for
+ * a while just to check up on it. While a filesystem is mounted, or I/O is
+ * active this isn't really an issue.
+ */
+ if (sc->mly_mmbox->mmm_health.status.change_counter != sc->mly_event_change) {
+ sc->mly_event_change = sc->mly_mmbox->mmm_health.status.change_counter;
+ debug(1, "event change %d, event status update, %d -> %d", sc->mly_event_change,
+ sc->mly_event_waiting, sc->mly_mmbox->mmm_health.status.next_event);
+ sc->mly_event_waiting = sc->mly_mmbox->mmm_health.status.next_event;
+ }
+ if (sc->mly_event_counter != sc->mly_event_waiting)
+ mly_fetch_event(sc);
+}
+
+/********************************************************************************
+ ********************************************************************************
+ Command Buffer Management
+ ********************************************************************************
+ ********************************************************************************/
+
+/********************************************************************************
+ * Give a command a slot in our lookup table, so that we can recover it when
+ * the controller returns the slot number.
+ *
+ * Slots are freed in mly_done().
+ */
+static int
+mly_get_slot(struct mly_command *mc)
+{
+ struct mly_softc *sc = mc->mc_sc;
+ u_int16_t slot;
+ int tries;
+
+ debug_called(3);
+
+ if (mc->mc_flags & MLY_CMD_SLOTTED)
+ return(0);
+
+ /*
+ * Optimisation for the controller-busy case - check to see whether
+ * we are already over the limit and stop immediately.
+ */
+ if (sc->mly_busy_count >= sc->mly_max_commands)
+ return(EBUSY);
+
+ /*
+ * Scan forward from the last slot that we assigned looking for a free
+ * slot. Don't scan more than the maximum number of commands that we
+ * support (we should never reach the limit here due to the optimisation
+ * above)
+ */
+ slot = sc->mly_last_slot;
+ for (tries = sc->mly_max_commands; tries > 0; tries--) {
+ if (sc->mly_busycmds[slot] == NULL) {
+ sc->mly_busycmds[slot] = mc;
+ mc->mc_slot = slot;
+ mc->mc_packet->generic.command_id = slot;
+ mc->mc_flags |= MLY_CMD_SLOTTED;
+ sc->mly_last_slot = slot;
+ return(0);
+ }
+ slot++;
+ if (slot >= MLY_SLOT_MAX)
+ slot = MLY_SLOT_START;
+ }
+ return(EBUSY);
+}
+
+/********************************************************************************
+ * Allocate a command.
+ */
+int
+mly_alloc_command(struct mly_softc *sc, struct mly_command **mcp)
+{
+ struct mly_command *mc;
+
+ debug_called(3);
+
+ if ((mc = mly_dequeue_free(sc)) == NULL) {
+ mly_alloc_command_cluster(sc);
+ mc = mly_dequeue_free(sc);
+ }
+ if (mc != NULL)
+ TAILQ_REMOVE(&sc->mly_freecmds, mc, mc_link);
+
+ if (mc == NULL)
+ return(ENOMEM);
+
+ MLY_CMD_SETSTATE(mc, MLY_CMD_SETUP);
+ *mcp = mc;
+ return(0);
+}
+
+/********************************************************************************
+ * Release a command back to the freelist.
+ */
+void
+mly_release_command(struct mly_command *mc)
+{
+ debug_called(3);
+
+ /*
+ * Fill in parts of the command that may cause confusion if
+ * a consumer doesn't when we are later allocated.
+ */
+ MLY_CMD_SETSTATE(mc, MLY_CMD_FREE);
+ mc->mc_data = NULL;
+ mc->mc_flags = 0;
+ mc->mc_complete = NULL;
+ mc->mc_private = NULL;
+
+ /*
+ * By default, we set up to overwrite the command packet with
+ * sense information.
+ */
+ mc->mc_packet->generic.sense_buffer_address = mc->mc_packetphys;
+ mc->mc_packet->generic.maximum_sense_size = sizeof(union mly_command_packet);
+
+ mly_enqueue_free(mc);
+}
+
+/********************************************************************************
+ * Map helper for command cluster allocation.
+ *
+ * Note that there are never more command packets in a cluster than will fit in
+ * a page, so there is no need to look at anything other than the base of the
+ * allocation (which will be page-aligned).
+ */
+static void
+mly_alloc_command_cluster_map(void *arg, bus_dma_segment_t *segs, int nseg, int error)
+{
+ struct mly_command_cluster *mcc = (struct mly_command_cluster *)arg;
+
+ debug_called(2);
+
+ mcc->mcc_packetphys = segs[0].ds_addr;
+}
+
+/********************************************************************************
+ * Allocate and initialise a cluster of commands.
+ */
+static void
+mly_alloc_command_cluster(struct mly_softc *sc)
+{
+ struct mly_command_cluster *mcc;
+ struct mly_command *mc;
+ int i;
+
+ debug_called(1);
+
+ mcc = malloc(sizeof(struct mly_command_cluster), M_DEVBUF, M_NOWAIT);
+ if (mcc != NULL) {
+
+ /*
+ * Allocate enough space for all the command packets for this cluster and
+ * map them permanently into controller-visible space.
+ */
+ if (bus_dmamem_alloc(sc->mly_packet_dmat, (void **)&mcc->mcc_packet,
+ BUS_DMA_NOWAIT, &mcc->mcc_packetmap)) {
+ free(mcc, M_DEVBUF);
+ return;
+ }
+ bus_dmamap_load(sc->mly_packet_dmat, mcc->mcc_packetmap, mcc->mcc_packet,
+ MLY_CMD_CLUSTERCOUNT * sizeof(union mly_command_packet),
+ mly_alloc_command_cluster_map, mcc, 0);
+
+ mly_enqueue_cluster(sc, mcc);
+ for (i = 0; i < MLY_CMD_CLUSTERCOUNT; i++) {
+ mc = &mcc->mcc_command[i];
+ bzero(mc, sizeof(*mc));
+ mc->mc_sc = sc;
+ mc->mc_packet = mcc->mcc_packet + i;
+ mc->mc_packetphys = mcc->mcc_packetphys + (i * sizeof(union mly_command_packet));
+ if (!bus_dmamap_create(sc->mly_buffer_dmat, 0, &mc->mc_datamap))
+ mly_release_command(mc);
+ }
+ }
+}
+
+/********************************************************************************
+ * Command-mapping helper function - populate this command slot's s/g table
+ * with the s/g entries for this command.
+ */
+static void
+mly_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error)
+{
+ struct mly_command *mc = (struct mly_command *)arg;
+ struct mly_softc *sc = mc->mc_sc;
+ struct mly_command_generic *gen = &(mc->mc_packet->generic);
+ struct mly_sg_entry *sg;
+ int i, tabofs;
+
+ debug_called(3);
+
+ /* can we use the transfer structure directly? */
+ if (nseg <= 2) {
+ sg = &gen->transfer.direct.sg[0];
+ gen->command_control.extended_sg_table = 0;
+ } else {
+ tabofs = (mc->mc_slot * MLY_MAXSGENTRIES);
+ sg = sc->mly_sg_table + tabofs;
+ gen->transfer.indirect.entries[0] = nseg;
+ gen->transfer.indirect.table_physaddr[0] = sc->mly_sg_busaddr + (tabofs * sizeof(struct mly_sg_entry));
+ gen->command_control.extended_sg_table = 1;
+ }
+
+ /* copy the s/g table */
+ for (i = 0; i < nseg; i++) {
+ sg[i].physaddr = segs[i].ds_addr;
+ sg[i].length = segs[i].ds_len;
+ }
+
+}
+
+#if 0
+/********************************************************************************
+ * Command-mapping helper function - save the cdb's physical address.
+ *
+ * We don't support 'large' SCSI commands at this time, so this is unused.
+ */
+static void
+mly_map_command_cdb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
+{
+ struct mly_command *mc = (struct mly_command *)arg;
+
+ debug_called(3);
+
+ /* XXX can we safely assume that a CDB will never cross a page boundary? */
+ if ((segs[0].ds_addr % PAGE_SIZE) >
+ ((segs[0].ds_addr + mc->mc_packet->scsi_large.cdb_length) % PAGE_SIZE))
+ panic("cdb crosses page boundary");
+
+ /* fix up fields in the command packet */
+ mc->mc_packet->scsi_large.cdb_physaddr = segs[0].ds_addr;
+}
+#endif
+
+/********************************************************************************
+ * Map a command into controller-visible space
+ */
+static void
+mly_map_command(struct mly_command *mc)
+{
+ struct mly_softc *sc = mc->mc_sc;
+
+ debug_called(2);
+
+ /* don't map more than once */
+ if (mc->mc_flags & MLY_CMD_MAPPED)
+ return;
+
+ /* does the command have a data buffer? */
+ if (mc->mc_data != NULL)
+ bus_dmamap_load(sc->mly_buffer_dmat, mc->mc_datamap, mc->mc_data, mc->mc_length,
+ mly_map_command_sg, mc, 0);
+
+ if (mc->mc_flags & MLY_CMD_DATAIN)
+ bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_PREREAD);
+ if (mc->mc_flags & MLY_CMD_DATAOUT)
+ bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_PREWRITE);
+
+ mc->mc_flags |= MLY_CMD_MAPPED;
+}
+
+/********************************************************************************
+ * Unmap a command from controller-visible space
+ */
+static void
+mly_unmap_command(struct mly_command *mc)
+{
+ struct mly_softc *sc = mc->mc_sc;
+
+ debug_called(2);
+
+ if (!(mc->mc_flags & MLY_CMD_MAPPED))
+ return;
+
+ if (mc->mc_flags & MLY_CMD_DATAIN)
+ bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_POSTREAD);
+ if (mc->mc_flags & MLY_CMD_DATAOUT)
+ bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_POSTWRITE);
+
+ /* does the command have a data buffer? */
+ if (mc->mc_data != NULL)
+ bus_dmamap_unload(sc->mly_buffer_dmat, mc->mc_datamap);
+
+ mc->mc_flags &= ~MLY_CMD_MAPPED;
+}
+
+/********************************************************************************
+ ********************************************************************************
+ Hardware Control
+ ********************************************************************************
+ ********************************************************************************/
+
+/********************************************************************************
+ * Handshake with the firmware while the card is being initialised.
+ */
+static int
+mly_fwhandshake(struct mly_softc *sc)
+{
+ u_int8_t error, param0, param1;
+ int spinup = 0;
+
+ debug_called(1);
+
+ /* set HM_STSACK and let the firmware initialise */
+ MLY_SET_REG(sc, sc->mly_idbr, MLY_HM_STSACK);
+ DELAY(1000); /* too short? */
+
+ /* if HM_STSACK is still true, the controller is initialising */
+ if (!MLY_IDBR_TRUE(sc, MLY_HM_STSACK))
+ return(0);
+ mly_printf(sc, "controller initialisation started\n");
+
+ /* spin waiting for initialisation to finish, or for a message to be delivered */
+ while (MLY_IDBR_TRUE(sc, MLY_HM_STSACK)) {
+ /* check for a message */
+ if (MLY_ERROR_VALID(sc)) {
+ error = MLY_GET_REG(sc, sc->mly_error_status) & ~MLY_MSG_EMPTY;
+ param0 = MLY_GET_REG(sc, sc->mly_command_mailbox);
+ param1 = MLY_GET_REG(sc, sc->mly_command_mailbox + 1);
+
+ switch(error) {
+ case MLY_MSG_SPINUP:
+ if (!spinup) {
+ mly_printf(sc, "drive spinup in progress\n");
+ spinup = 1; /* only print this once (should print drive being spun?) */
+ }
+ break;
+ case MLY_MSG_RACE_RECOVERY_FAIL:
+ mly_printf(sc, "mirror race recovery failed, one or more drives offline\n");
+ break;
+ case MLY_MSG_RACE_IN_PROGRESS:
+ mly_printf(sc, "mirror race recovery in progress\n");
+ break;
+ case MLY_MSG_RACE_ON_CRITICAL:
+ mly_printf(sc, "mirror race recovery on a critical drive\n");
+ break;
+ case MLY_MSG_PARITY_ERROR:
+ mly_printf(sc, "FATAL MEMORY PARITY ERROR\n");
+ return(ENXIO);
+ default:
+ mly_printf(sc, "unknown initialisation code 0x%x\n", error);
+ }
+ }
+ }
+ return(0);
+}
+
+/********************************************************************************
+ ********************************************************************************
+ Debugging and Diagnostics
+ ********************************************************************************
+ ********************************************************************************/
+
+/********************************************************************************
+ * Print some information about the controller.
+ */
+static void
+mly_describe_controller(struct mly_softc *sc)
+{
+ struct mly_ioctl_getcontrollerinfo *mi = sc->mly_controllerinfo;
+
+ mly_printf(sc, "%16s, %d channel%s, firmware %d.%02d-%d-%02d (%02d%02d%02d%02d), %dMB RAM\n",
+ mi->controller_name, mi->physical_channels_present, (mi->physical_channels_present) > 1 ? "s" : "",
+ mi->fw_major, mi->fw_minor, mi->fw_turn, mi->fw_build, /* XXX turn encoding? */
+ mi->fw_century, mi->fw_year, mi->fw_month, mi->fw_day,
+ mi->memory_size);
+
+ if (bootverbose) {
+ mly_printf(sc, "%s %s (%x), %dMHz %d-bit %.16s\n",
+ mly_describe_code(mly_table_oemname, mi->oem_information),
+ mly_describe_code(mly_table_controllertype, mi->controller_type), mi->controller_type,
+ mi->interface_speed, mi->interface_width, mi->interface_name);
+ mly_printf(sc, "%dMB %dMHz %d-bit %s%s%s, cache %dMB\n",
+ mi->memory_size, mi->memory_speed, mi->memory_width,
+ mly_describe_code(mly_table_memorytype, mi->memory_type),
+ mi->memory_parity ? "+parity": "",mi->memory_ecc ? "+ECC": "",
+ mi->cache_size);
+ mly_printf(sc, "CPU: %s @ %dMHZ\n",
+ mly_describe_code(mly_table_cputype, mi->cpu[0].type), mi->cpu[0].speed);
+ if (mi->l2cache_size != 0)
+ mly_printf(sc, "%dKB L2 cache\n", mi->l2cache_size);
+ if (mi->exmemory_size != 0)
+ mly_printf(sc, "%dMB %dMHz %d-bit private %s%s%s\n",
+ mi->exmemory_size, mi->exmemory_speed, mi->exmemory_width,
+ mly_describe_code(mly_table_memorytype, mi->exmemory_type),
+ mi->exmemory_parity ? "+parity": "",mi->exmemory_ecc ? "+ECC": "");
+ mly_printf(sc, "battery backup %s\n", mi->bbu_present ? "present" : "not installed");
+ mly_printf(sc, "maximum data transfer %d blocks, maximum sg entries/command %d\n",
+ mi->maximum_block_count, mi->maximum_sg_entries);
+ mly_printf(sc, "logical devices present/critical/offline %d/%d/%d\n",
+ mi->logical_devices_present, mi->logical_devices_critical, mi->logical_devices_offline);
+ mly_printf(sc, "physical devices present %d\n",
+ mi->physical_devices_present);
+ mly_printf(sc, "physical disks present/offline %d/%d\n",
+ mi->physical_disks_present, mi->physical_disks_offline);
+ mly_printf(sc, "%d physical channel%s, %d virtual channel%s of %d possible\n",
+ mi->physical_channels_present, mi->physical_channels_present == 1 ? "" : "s",
+ mi->virtual_channels_present, mi->virtual_channels_present == 1 ? "" : "s",
+ mi->virtual_channels_possible);
+ mly_printf(sc, "%d parallel commands supported\n", mi->maximum_parallel_commands);
+ mly_printf(sc, "%dMB flash ROM, %d of %d maximum cycles\n",
+ mi->flash_size, mi->flash_age, mi->flash_maximum_age);
+ }
+}
+
+#ifdef MLY_DEBUG
+/********************************************************************************
+ * Print some controller state
+ */
+static void
+mly_printstate(struct mly_softc *sc)
+{
+ mly_printf(sc, "IDBR %02x ODBR %02x ERROR %02x (%x %x %x)\n",
+ MLY_GET_REG(sc, sc->mly_idbr),
+ MLY_GET_REG(sc, sc->mly_odbr),
+ MLY_GET_REG(sc, sc->mly_error_status),
+ sc->mly_idbr,
+ sc->mly_odbr,
+ sc->mly_error_status);
+ mly_printf(sc, "IMASK %02x ISTATUS %02x\n",
+ MLY_GET_REG(sc, sc->mly_interrupt_mask),
+ MLY_GET_REG(sc, sc->mly_interrupt_status));
+ mly_printf(sc, "COMMAND %02x %02x %02x %02x %02x %02x %02x %02x\n",
+ MLY_GET_REG(sc, sc->mly_command_mailbox),
+ MLY_GET_REG(sc, sc->mly_command_mailbox + 1),
+ MLY_GET_REG(sc, sc->mly_command_mailbox + 2),
+ MLY_GET_REG(sc, sc->mly_command_mailbox + 3),
+ MLY_GET_REG(sc, sc->mly_command_mailbox + 4),
+ MLY_GET_REG(sc, sc->mly_command_mailbox + 5),
+ MLY_GET_REG(sc, sc->mly_command_mailbox + 6),
+ MLY_GET_REG(sc, sc->mly_command_mailbox + 7));
+ mly_printf(sc, "STATUS %02x %02x %02x %02x %02x %02x %02x %02x\n",
+ MLY_GET_REG(sc, sc->mly_status_mailbox),
+ MLY_GET_REG(sc, sc->mly_status_mailbox + 1),
+ MLY_GET_REG(sc, sc->mly_status_mailbox + 2),
+ MLY_GET_REG(sc, sc->mly_status_mailbox + 3),
+ MLY_GET_REG(sc, sc->mly_status_mailbox + 4),
+ MLY_GET_REG(sc, sc->mly_status_mailbox + 5),
+ MLY_GET_REG(sc, sc->mly_status_mailbox + 6),
+ MLY_GET_REG(sc, sc->mly_status_mailbox + 7));
+ mly_printf(sc, " %04x %08x\n",
+ MLY_GET_REG2(sc, sc->mly_status_mailbox),
+ MLY_GET_REG4(sc, sc->mly_status_mailbox + 4));
+}
+
+struct mly_softc *mly_softc0 = NULL;
+void
+mly_printstate0(void)
+{
+ if (mly_softc0 != NULL)
+ mly_printstate(mly_softc0);
+}
+
+/********************************************************************************
+ * Print a command
+ */
+static void
+mly_print_command(struct mly_command *mc)
+{
+ struct mly_softc *sc = mc->mc_sc;
+
+ mly_printf(sc, "COMMAND @ %p\n", mc);
+ mly_printf(sc, " slot %d\n", mc->mc_slot);
+ mly_printf(sc, " state %d\n", MLY_CMD_STATE(mc));
+ mly_printf(sc, " status 0x%x\n", mc->mc_status);
+ mly_printf(sc, " sense len %d\n", mc->mc_sense);
+ mly_printf(sc, " resid %d\n", mc->mc_resid);
+ mly_printf(sc, " packet %p/0x%llx\n", mc->mc_packet, mc->mc_packetphys);
+ if (mc->mc_packet != NULL)
+ mly_print_packet(mc);
+ mly_printf(sc, " data %p/%d\n", mc->mc_data, mc->mc_length);
+ mly_printf(sc, " flags %b\n", mc->mc_flags, "\20\11slotted\12mapped\13priority\14datain\15dataout\n");
+ mly_printf(sc, " complete %p\n", mc->mc_complete);
+ mly_printf(sc, " private %p\n", mc->mc_private);
+}
+
+/********************************************************************************
+ * Print a command packet
+ */
+static void
+mly_print_packet(struct mly_command *mc)
+{
+ struct mly_softc *sc = mc->mc_sc;
+ struct mly_command_generic *ge = (struct mly_command_generic *)mc->mc_packet;
+ struct mly_command_scsi_small *ss = (struct mly_command_scsi_small *)mc->mc_packet;
+ struct mly_command_scsi_large *sl = (struct mly_command_scsi_large *)mc->mc_packet;
+ struct mly_command_ioctl *io = (struct mly_command_ioctl *)mc->mc_packet;
+ int transfer;
+
+ mly_printf(sc, " command_id %d\n", ge->command_id);
+ mly_printf(sc, " opcode %d\n", ge->opcode);
+ mly_printf(sc, " command_control fua %d dpo %d est %d dd %s nas %d ddis %d\n",
+ ge->command_control.force_unit_access,
+ ge->command_control.disable_page_out,
+ ge->command_control.extended_sg_table,
+ (ge->command_control.data_direction == MLY_CCB_WRITE) ? "WRITE" : "READ",
+ ge->command_control.no_auto_sense,
+ ge->command_control.disable_disconnect);
+ mly_printf(sc, " data_size %d\n", ge->data_size);
+ mly_printf(sc, " sense_buffer_address 0x%llx\n", ge->sense_buffer_address);
+ mly_printf(sc, " lun %d\n", ge->addr.phys.lun);
+ mly_printf(sc, " target %d\n", ge->addr.phys.target);
+ mly_printf(sc, " channel %d\n", ge->addr.phys.channel);
+ mly_printf(sc, " logical device %d\n", ge->addr.log.logdev);
+ mly_printf(sc, " controller %d\n", ge->addr.phys.controller);
+ mly_printf(sc, " timeout %d %s\n",
+ ge->timeout.value,
+ (ge->timeout.scale == MLY_TIMEOUT_SECONDS) ? "seconds" :
+ ((ge->timeout.scale == MLY_TIMEOUT_MINUTES) ? "minutes" : "hours"));
+ mly_printf(sc, " maximum_sense_size %d\n", ge->maximum_sense_size);
+ switch(ge->opcode) {
+ case MDACMD_SCSIPT:
+ case MDACMD_SCSI:
+ mly_printf(sc, " cdb length %d\n", ss->cdb_length);
+ mly_printf(sc, " cdb %*D\n", ss->cdb_length, ss->cdb, " ");
+ transfer = 1;
+ break;
+ case MDACMD_SCSILC:
+ case MDACMD_SCSILCPT:
+ mly_printf(sc, " cdb length %d\n", sl->cdb_length);
+ mly_printf(sc, " cdb 0x%llx\n", sl->cdb_physaddr);
+ transfer = 1;
+ break;
+ case MDACMD_IOCTL:
+ mly_printf(sc, " sub_ioctl 0x%x\n", io->sub_ioctl);
+ switch(io->sub_ioctl) {
+ case MDACIOCTL_SETMEMORYMAILBOX:
+ mly_printf(sc, " health_buffer_size %d\n",
+ io->param.setmemorymailbox.health_buffer_size);
+ mly_printf(sc, " health_buffer_phys 0x%llx\n",
+ io->param.setmemorymailbox.health_buffer_physaddr);
+ mly_printf(sc, " command_mailbox 0x%llx\n",
+ io->param.setmemorymailbox.command_mailbox_physaddr);
+ mly_printf(sc, " status_mailbox 0x%llx\n",
+ io->param.setmemorymailbox.status_mailbox_physaddr);
+ transfer = 0;
+ break;
+
+ case MDACIOCTL_SETREALTIMECLOCK:
+ case MDACIOCTL_GETHEALTHSTATUS:
+ case MDACIOCTL_GETCONTROLLERINFO:
+ case MDACIOCTL_GETLOGDEVINFOVALID:
+ case MDACIOCTL_GETPHYSDEVINFOVALID:
+ case MDACIOCTL_GETPHYSDEVSTATISTICS:
+ case MDACIOCTL_GETLOGDEVSTATISTICS:
+ case MDACIOCTL_GETCONTROLLERSTATISTICS:
+ case MDACIOCTL_GETBDT_FOR_SYSDRIVE:
+ case MDACIOCTL_CREATENEWCONF:
+ case MDACIOCTL_ADDNEWCONF:
+ case MDACIOCTL_GETDEVCONFINFO:
+ case MDACIOCTL_GETFREESPACELIST:
+ case MDACIOCTL_MORE:
+ case MDACIOCTL_SETPHYSDEVPARAMETER:
+ case MDACIOCTL_GETPHYSDEVPARAMETER:
+ case MDACIOCTL_GETLOGDEVPARAMETER:
+ case MDACIOCTL_SETLOGDEVPARAMETER:
+ mly_printf(sc, " param %10D\n", io->param.data.param, " ");
+ transfer = 1;
+ break;
+
+ case MDACIOCTL_GETEVENT:
+ mly_printf(sc, " event %d\n",
+ io->param.getevent.sequence_number_low + ((u_int32_t)io->addr.log.logdev << 16));
+ transfer = 1;
+ break;
+
+ case MDACIOCTL_SETRAIDDEVSTATE:
+ mly_printf(sc, " state %d\n", io->param.setraiddevstate.state);
+ transfer = 0;
+ break;
+
+ case MDACIOCTL_XLATEPHYSDEVTORAIDDEV:
+ mly_printf(sc, " raid_device %d\n", io->param.xlatephysdevtoraiddev.raid_device);
+ mly_printf(sc, " controller %d\n", io->param.xlatephysdevtoraiddev.controller);
+ mly_printf(sc, " channel %d\n", io->param.xlatephysdevtoraiddev.channel);
+ mly_printf(sc, " target %d\n", io->param.xlatephysdevtoraiddev.target);
+ mly_printf(sc, " lun %d\n", io->param.xlatephysdevtoraiddev.lun);
+ transfer = 0;
+ break;
+
+ case MDACIOCTL_GETGROUPCONFINFO:
+ mly_printf(sc, " group %d\n", io->param.getgroupconfinfo.group);
+ transfer = 1;
+ break;
+
+ case MDACIOCTL_GET_SUBSYSTEM_DATA:
+ case MDACIOCTL_SET_SUBSYSTEM_DATA:
+ case MDACIOCTL_STARTDISOCVERY:
+ case MDACIOCTL_INITPHYSDEVSTART:
+ case MDACIOCTL_INITPHYSDEVSTOP:
+ case MDACIOCTL_INITRAIDDEVSTART:
+ case MDACIOCTL_INITRAIDDEVSTOP:
+ case MDACIOCTL_REBUILDRAIDDEVSTART:
+ case MDACIOCTL_REBUILDRAIDDEVSTOP:
+ case MDACIOCTL_MAKECONSISTENTDATASTART:
+ case MDACIOCTL_MAKECONSISTENTDATASTOP:
+ case MDACIOCTL_CONSISTENCYCHECKSTART:
+ case MDACIOCTL_CONSISTENCYCHECKSTOP:
+ case MDACIOCTL_RESETDEVICE:
+ case MDACIOCTL_FLUSHDEVICEDATA:
+ case MDACIOCTL_PAUSEDEVICE:
+ case MDACIOCTL_UNPAUSEDEVICE:
+ case MDACIOCTL_LOCATEDEVICE:
+ case MDACIOCTL_SETMASTERSLAVEMODE:
+ case MDACIOCTL_DELETERAIDDEV:
+ case MDACIOCTL_REPLACEINTERNALDEV:
+ case MDACIOCTL_CLEARCONF:
+ case MDACIOCTL_GETCONTROLLERPARAMETER:
+ case MDACIOCTL_SETCONTRLLERPARAMETER:
+ case MDACIOCTL_CLEARCONFSUSPMODE:
+ case MDACIOCTL_STOREIMAGE:
+ case MDACIOCTL_READIMAGE:
+ case MDACIOCTL_FLASHIMAGES:
+ case MDACIOCTL_RENAMERAIDDEV:
+ default: /* no idea what to print */
+ transfer = 0;
+ break;
+ }
+ break;
+
+ case MDACMD_IOCTLCHECK:
+ case MDACMD_MEMCOPY:
+ default:
+ transfer = 0;
+ break; /* print nothing */
+ }
+ if (transfer) {
+ if (ge->command_control.extended_sg_table) {
+ mly_printf(sc, " sg table 0x%llx/%d\n",
+ ge->transfer.indirect.table_physaddr[0], ge->transfer.indirect.entries[0]);
+ } else {
+ mly_printf(sc, " 0000 0x%llx/%lld\n",
+ ge->transfer.direct.sg[0].physaddr, ge->transfer.direct.sg[0].length);
+ mly_printf(sc, " 0001 0x%llx/%lld\n",
+ ge->transfer.direct.sg[1].physaddr, ge->transfer.direct.sg[1].length);
+ }
+ }
+}
+
+/********************************************************************************
+ * Panic in a slightly informative fashion
+ */
+static void
+mly_panic(struct mly_softc *sc, char *reason)
+{
+ mly_printstate(sc);
+ panic(reason);
+}
+#endif
diff --git a/sys/dev/mly/mly_cam.c b/sys/dev/mly/mly_cam.c
new file mode 100644
index 0000000..9175b27
--- /dev/null
+++ b/sys/dev/mly/mly_cam.c
@@ -0,0 +1,513 @@
+/*-
+ * Copyright (c) 2000 Michael Smith
+ * Copyright (c) 2000 BSDi
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+/*
+ * CAM interface for FreeBSD
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/devicestat.h>
+
+#include <cam/cam.h>
+#include <cam/cam_ccb.h>
+#include <cam/cam_periph.h>
+#include <cam/cam_sim.h>
+#include <cam/cam_xpt_sim.h>
+#include <cam/scsi/scsi_all.h>
+
+#include <machine/resource.h>
+#include <machine/bus.h>
+
+#include <dev/mly/mlyreg.h>
+#include <dev/mly/mlyvar.h>
+#include <dev/mly/mly_tables.h>
+
+static void mly_cam_poll(struct cam_sim *sim);
+static void mly_cam_action(struct cam_sim *sim, union ccb *ccb);
+static void mly_cam_complete(struct mly_command *mc);
+static struct cam_periph *mly_find_periph(struct mly_softc *sc, int bus, int target);
+
+/********************************************************************************
+ * CAM-specific queue primitives
+ */
+static __inline void
+mly_enqueue_ccb(struct mly_softc *sc, union ccb *ccb)
+{
+ int s;
+
+ s = splcam();
+ TAILQ_INSERT_TAIL(&sc->mly_cam_ccbq, &ccb->ccb_h, sim_links.tqe);
+ splx(s);
+}
+
+static __inline void
+mly_requeue_ccb(struct mly_softc *sc, union ccb *ccb)
+{
+ int s;
+
+ s = splcam();
+ TAILQ_INSERT_HEAD(&sc->mly_cam_ccbq, &ccb->ccb_h, sim_links.tqe);
+ splx(s);
+}
+
+static __inline union ccb *
+mly_dequeue_ccb(struct mly_softc *sc)
+{
+ union ccb *ccb;
+ int s;
+
+ s = splcam();
+ if ((ccb = (union ccb *)TAILQ_FIRST(&sc->mly_cam_ccbq)) != NULL)
+ TAILQ_REMOVE(&sc->mly_cam_ccbq, &ccb->ccb_h, sim_links.tqe);
+ splx(s);
+ return(ccb);
+}
+
+/********************************************************************************
+ * space-fill a character string
+ */
+static __inline void
+padstr(char *targ, char *src, int len)
+{
+ while (len-- > 0) {
+ if (*src != 0) {
+ *targ++ = *src++;
+ } else {
+ *targ++ = ' ';
+ }
+ }
+}
+
+/********************************************************************************
+ * Attach the real and virtual SCSI busses to CAM
+ */
+int
+mly_cam_attach(struct mly_softc *sc)
+{
+ struct cam_devq *devq;
+ int chn, nchn;
+
+ debug_called(1);
+
+ /* initialise the CCB queue */
+ TAILQ_INIT(&sc->mly_cam_ccbq);
+
+ /*
+ * Allocate a devq for all our channels combined.
+ */
+ if ((devq = cam_simq_alloc(sc->mly_controllerinfo->maximum_parallel_commands)) == NULL) {
+ mly_printf(sc, "can't allocate CAM SIM\n");
+ return(ENOMEM);
+ }
+
+ /*
+ * Iterate over channels, registering them with CAM.
+ */
+ nchn = sc->mly_controllerinfo->physical_channels_present +
+ sc->mly_controllerinfo->virtual_channels_present;
+ for (chn = 0; chn < nchn; chn++) {
+
+ /* allocate a sim */
+ if ((sc->mly_cam_sim[chn] = cam_sim_alloc(mly_cam_action,
+ mly_cam_poll,
+ "mly",
+ sc,
+ device_get_unit(sc->mly_dev),
+ 1,
+ sc->mly_controllerinfo->maximum_parallel_commands,
+ devq)) == NULL) {
+ cam_simq_free(devq);
+ mly_printf(sc, "CAM SIM attach failed\n");
+ return(ENOMEM);
+ }
+
+ /* register the bus ID so we can get it later */
+ if (xpt_bus_register(sc->mly_cam_sim[chn], chn)) {
+ mly_printf(sc, "CAM XPT bus registration failed\n");
+ return(ENXIO);
+ }
+ debug(1, "registered sim %p bus %d", sc->mly_cam_sim[chn], chn);
+
+ }
+
+ return(0);
+}
+
+/********************************************************************************
+ * Detach from CAM
+ */
+void
+mly_cam_detach(struct mly_softc *sc)
+{
+ int chn, nchn, first;
+
+ debug_called(1);
+
+ nchn = sc->mly_controllerinfo->physical_channels_present +
+ sc->mly_controllerinfo->virtual_channels_present;
+
+ /*
+ * Iterate over channels, deregistering as we go.
+ */
+ nchn = sc->mly_controllerinfo->physical_channels_present +
+ sc->mly_controllerinfo->virtual_channels_present;
+ for (chn = 0, first = 1; chn < nchn; chn++) {
+
+ /*
+ * If a sim was registered for this channel, free it.
+ */
+ if (sc->mly_cam_sim[chn] != NULL) {
+ debug(1, "deregister bus %d", chn);
+ xpt_bus_deregister(cam_sim_path(sc->mly_cam_sim[chn]));
+ debug(1, "free sim for channel %d (%sfree queue)", chn, first ? "" : "don't ");
+ cam_sim_free(sc->mly_cam_sim[chn], first ? TRUE : FALSE);
+ first = 0;
+ }
+ }
+}
+
+/********************************************************************************
+ * Handle an action requested by CAM
+ */
+static void
+mly_cam_action(struct cam_sim *sim, union ccb *ccb)
+{
+ struct mly_softc *sc = cam_sim_softc(sim);
+
+ debug_called(2);
+
+ switch (ccb->ccb_h.func_code) {
+
+ /* perform SCSI I/O */
+ case XPT_SCSI_IO:
+ {
+ struct ccb_scsiio *csio = &ccb->csio;
+ int bus, target;
+
+ bus = cam_sim_bus(sim);
+ target = csio->ccb_h.target_id;
+
+ debug(2, "XPT_SCSI_IO %d:%d:%d", bus, target, ccb->ccb_h.target_lun);
+
+ /* check for I/O attempt to a protected device */
+ if (sc->mly_btl[bus][target].mb_flags & MLY_BTL_PROTECTED) {
+ debug(2, " device protected");
+ csio->ccb_h.status = CAM_REQ_CMP_ERR;
+ }
+
+ /* check for I/O attempt to nonexistent device */
+ if (!(sc->mly_btl[bus][target].mb_flags & (MLY_BTL_LOGICAL | MLY_BTL_PHYSICAL))) {
+ debug(2, " device does not exist");
+ csio->ccb_h.status = CAM_REQ_CMP_ERR;
+ }
+
+ /* XXX increase if/when we support large SCSI commands */
+ if (csio->cdb_len > MLY_CMD_SCSI_SMALL_CDB) {
+ debug(2, " command too large (%d > %d)", csio->cdb_len, MLY_CMD_SCSI_SMALL_CDB);
+ csio->ccb_h.status = CAM_REQ_CMP_ERR;
+ }
+
+ /* check that the CDB pointer is not to a physical address */
+ if ((csio->ccb_h.flags & CAM_CDB_POINTER) && (csio->ccb_h.flags & CAM_CDB_PHYS)) {
+ debug(2, " CDB pointer is to physical address");
+ csio->ccb_h.status = CAM_REQ_CMP_ERR;
+ }
+
+ /* if there is data transfer, it must be to/from a virtual address */
+ if ((csio->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
+ if (csio->ccb_h.flags & CAM_DATA_PHYS) { /* we can't map it */
+ debug(2, " data pointer is to physical address");
+ csio->ccb_h.status = CAM_REQ_CMP_ERR;
+ }
+ if (csio->ccb_h.flags & CAM_SCATTER_VALID) { /* we want to do the s/g setup */
+ debug(2, " data has premature s/g setup");
+ csio->ccb_h.status = CAM_REQ_CMP_ERR;
+ }
+ }
+
+ /* abandon aborted ccbs or those that have failed validation */
+ if ((csio->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
+ debug(2, "abandoning CCB due to abort/validation failure");
+ break;
+ }
+
+ /* save the channel number in the ccb */
+ csio->ccb_h.sim_priv.entries[0].field = bus;
+
+ /* enqueue the ccb and start I/O */
+ mly_enqueue_ccb(sc, ccb);
+ mly_startio(sc);
+ return;
+ }
+
+ /* perform geometry calculations */
+ case XPT_CALC_GEOMETRY:
+ {
+ struct ccb_calc_geometry *ccg = &ccb->ccg;
+ u_int32_t secs_per_cylinder;
+
+ debug(2, "XPT_CALC_GEOMETRY %d:%d:%d", cam_sim_bus(sim), ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
+
+ if (sc->mly_controllerparam->bios_geometry == MLY_BIOSGEOM_8G) {
+ ccg->heads = 255;
+ ccg->secs_per_track = 63;
+ } else { /* MLY_BIOSGEOM_2G */
+ ccg->heads = 128;
+ ccg->secs_per_track = 32;
+ }
+ secs_per_cylinder = ccg->heads * ccg->secs_per_track;
+ ccg->cylinders = ccg->volume_size / secs_per_cylinder;
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ break;
+ }
+
+ /* handle path attribute inquiry */
+ case XPT_PATH_INQ:
+ {
+ struct ccb_pathinq *cpi = &ccb->cpi;
+
+ debug(2, "XPT_PATH_INQ %d:%d:%d", cam_sim_bus(sim), ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
+
+ cpi->version_num = 1;
+ cpi->hba_inquiry = PI_TAG_ABLE; /* XXX extra flags for physical channels? */
+ cpi->target_sprt = 0;
+ cpi->hba_misc = 0;
+ cpi->max_target = MLY_MAX_TARGETS - 1;
+ cpi->max_lun = MLY_MAX_LUNS - 1;
+ cpi->initiator_id = sc->mly_controllerparam->initiator_id;
+ strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
+ strncpy(cpi->hba_vid, "BSDi", HBA_IDLEN);
+ strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
+ cpi->unit_number = cam_sim_unit(sim);
+ cpi->bus_id = cam_sim_bus(sim);
+ cpi->base_transfer_speed = 132 * 1024; /* XXX what to set this to? */
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ break;
+ }
+
+ default: /* we can't do this */
+ debug(2, "unspported func_code = 0x%x", ccb->ccb_h.func_code);
+ ccb->ccb_h.status = CAM_REQ_INVALID;
+ break;
+ }
+
+ xpt_done(ccb);
+}
+
+/********************************************************************************
+ * Check for possibly-completed commands.
+ */
+static void
+mly_cam_poll(struct cam_sim *sim)
+{
+ struct mly_softc *sc = cam_sim_softc(sim);
+
+ debug_called(2);
+
+ mly_done(sc);
+}
+
+/********************************************************************************
+ * Pull a CCB off the work queue and turn it into a command.
+ */
+int
+mly_cam_command(struct mly_softc *sc, struct mly_command **mcp)
+{
+ struct mly_command *mc;
+ struct mly_command_scsi_small *ss;
+ struct ccb_scsiio *csio;
+ int error;
+
+ debug_called(2);
+
+ error = 0;
+ mc = NULL;
+ csio = NULL;
+
+ /* check for a CCB */
+ if (!(csio = (struct ccb_scsiio *)mly_dequeue_ccb(sc)))
+ goto out;
+
+ /* get a command to back it */
+ if (mly_alloc_command(sc, &mc)) {
+ error = ENOMEM;
+ goto out;
+ }
+
+ /* build the command */
+ MLY_CMD_SETSTATE(mc, MLY_CMD_SETUP);
+ mc->mc_data = csio->data_ptr;
+ mc->mc_length = csio->dxfer_len;
+ mc->mc_complete = mly_cam_complete;
+ mc->mc_private = csio;
+
+ /* build the packet for the controller */
+ ss = &mc->mc_packet->scsi_small;
+ ss->opcode = MDACMD_SCSI;
+ if (csio->ccb_h.flags * CAM_DIS_DISCONNECT)
+ ss->command_control.disable_disconnect = 1;
+ if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
+ ss->command_control.data_direction = MLY_CCB_WRITE;
+ ss->data_size = csio->dxfer_len;
+ ss->addr.phys.lun = csio->ccb_h.target_lun;
+ ss->addr.phys.target = csio->ccb_h.target_id;
+ ss->addr.phys.channel = csio->ccb_h.sim_priv.entries[0].field;
+ if (csio->ccb_h.timeout < (60 * 1000)) {
+ ss->timeout.value = csio->ccb_h.timeout / 1000;
+ ss->timeout.scale = MLY_TIMEOUT_SECONDS;
+ } else if (csio->ccb_h.timeout < (60 * 60 * 1000)) {
+ ss->timeout.value = csio->ccb_h.timeout / (60 * 1000);
+ ss->timeout.scale = MLY_TIMEOUT_MINUTES;
+ } else {
+ ss->timeout.value = csio->ccb_h.timeout / (60 * 60 * 1000); /* overflow? */
+ ss->timeout.scale = MLY_TIMEOUT_HOURS;
+ }
+ ss->maximum_sense_size = csio->sense_len;
+ ss->cdb_length = csio->cdb_len;
+ if (csio->ccb_h.flags & CAM_CDB_POINTER) {
+ bcopy(csio->cdb_io.cdb_ptr, ss->cdb, csio->cdb_len);
+ } else {
+ bcopy(csio->cdb_io.cdb_bytes, ss->cdb, csio->cdb_len);
+ }
+
+out:
+ if (error != 0) {
+ if (mc != NULL) {
+ mly_release_command(mc);
+ mc = NULL;
+ }
+ if (csio != NULL)
+ mly_requeue_ccb(sc, (union ccb *)csio);
+ }
+ *mcp = mc;
+ return(error);
+}
+
+/********************************************************************************
+ * Handle completion of a command - pass results back through the CCB
+ */
+static void
+mly_cam_complete(struct mly_command *mc)
+{
+ struct mly_softc *sc = mc->mc_sc;
+ struct ccb_scsiio *csio = (struct ccb_scsiio *)mc->mc_private;
+ struct scsi_inquiry_data *inq = (struct scsi_inquiry_data *)csio->data_ptr;
+ struct mly_btl *btl;
+ u_int8_t cmd;
+ int bus, target;
+
+ debug_called(2);
+
+ csio->scsi_status = mc->mc_status;
+ switch(mc->mc_status) {
+ case SCSI_STATUS_OK:
+ /*
+ * In order to report logical device type and status, we overwrite
+ * the result of the INQUIRY command to logical devices.
+ */
+ bus = csio->ccb_h.sim_priv.entries[0].field;
+ if (bus >= sc->mly_controllerinfo->physical_channels_present) {
+ if (csio->ccb_h.flags & CAM_CDB_POINTER) {
+ cmd = *csio->cdb_io.cdb_ptr;
+ } else {
+ cmd = csio->cdb_io.cdb_bytes[0];
+ }
+ if (cmd == INQUIRY) {
+ target = csio->ccb_h.target_id;
+ btl = &sc->mly_btl[bus][target];
+ padstr(inq->vendor, mly_describe_code(mly_table_device_type, btl->mb_type), 8);
+ padstr(inq->product, mly_describe_code(mly_table_device_state, btl->mb_state), 16);
+ padstr(inq->revision, "", 4);
+ }
+ }
+
+ debug(2, "SCSI_STATUS_OK");
+ csio->ccb_h.status = CAM_REQ_CMP;
+ break;
+
+ case SCSI_STATUS_CHECK_COND:
+ debug(2, "SCSI_STATUS_CHECK_COND sense %d resid %d", mc->mc_sense, mc->mc_resid);
+ csio->ccb_h.status = CAM_SCSI_STATUS_ERROR;
+ bzero(&csio->sense_data, SSD_FULL_SIZE);
+ bcopy(mc->mc_packet, &csio->sense_data, mc->mc_sense);
+ csio->sense_len = mc->mc_sense;
+ csio->ccb_h.status |= CAM_AUTOSNS_VALID;
+ csio->resid = mc->mc_resid; /* XXX this is a signed value... */
+ break;
+
+ case SCSI_STATUS_BUSY:
+ debug(2, "SCSI_STATUS_BUSY");
+ csio->ccb_h.status = CAM_SCSI_BUSY;
+ break;
+
+ default:
+ debug(2, "unknown status 0x%x", csio->scsi_status);
+ csio->ccb_h.status = CAM_REQ_CMP_ERR;
+ break;
+ }
+ xpt_done((union ccb *)csio);
+ mly_release_command(mc);
+}
+
+/********************************************************************************
+ * Find a peripheral attahed at (bus),(target)
+ */
+static struct cam_periph *
+mly_find_periph(struct mly_softc *sc, int bus, int target)
+{
+ struct cam_periph *periph;
+ struct cam_path *path;
+ int status;
+
+ status = xpt_create_path(&path, NULL, cam_sim_path(sc->mly_cam_sim[bus]), target, 0);
+ if (status == CAM_REQ_CMP) {
+ periph = cam_periph_find(path, NULL);
+ xpt_free_path(path);
+ } else {
+ periph = NULL;
+ }
+ return(periph);
+}
+
+/********************************************************************************
+ * Name the device at (bus)(target)
+ */
+int
+mly_name_device(struct mly_softc *sc, int bus, int target)
+{
+ struct cam_periph *periph;
+
+ if ((periph = mly_find_periph(sc, bus, target)) != NULL) {
+ sprintf(sc->mly_btl[bus][target].mb_name, "%s%d", periph->periph_name, periph->unit_number);
+ return(0);
+ }
+ sc->mly_btl[bus][target].mb_name[0] = 0;
+ return(ENOENT);
+}
diff --git a/sys/dev/mly/mly_pci.c b/sys/dev/mly/mly_pci.c
new file mode 100644
index 0000000..c0dbd45
--- /dev/null
+++ b/sys/dev/mly/mly_pci.c
@@ -0,0 +1,590 @@
+/*-
+ * Copyright (c) 2000 Michael Smith
+ * Copyright (c) 2000 BSDi
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+#include <sys/kernel.h>
+
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/devicestat.h>
+#include <sys/disk.h>
+
+#include <machine/bus_memio.h>
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <sys/rman.h>
+
+#include <pci/pcireg.h>
+#include <pci/pcivar.h>
+
+#include <dev/mly/mlyreg.h>
+#include <dev/mly/mlyvar.h>
+
+static int mly_pci_probe(device_t dev);
+static int mly_pci_attach(device_t dev);
+static int mly_pci_detach(device_t dev);
+static int mly_pci_shutdown(device_t dev);
+static int mly_pci_suspend(device_t dev);
+static int mly_pci_resume(device_t dev);
+static void mly_pci_intr(void *arg);
+
+static int mly_sg_map(struct mly_softc *sc);
+static void mly_sg_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error);
+static int mly_mmbox_map(struct mly_softc *sc);
+static void mly_mmbox_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error);
+static void mly_free_command_cluster(struct mly_command_cluster *mcc);
+
+static device_method_t mly_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, mly_pci_probe),
+ DEVMETHOD(device_attach, mly_pci_attach),
+ DEVMETHOD(device_detach, mly_pci_detach),
+ DEVMETHOD(device_shutdown, mly_pci_shutdown),
+ DEVMETHOD(device_suspend, mly_pci_suspend),
+ DEVMETHOD(device_resume, mly_pci_resume),
+ { 0, 0 }
+};
+
+static driver_t mly_pci_driver = {
+ "mly",
+ mly_methods,
+ sizeof(struct mly_softc)
+};
+
+static devclass_t mly_devclass;
+DRIVER_MODULE(mly, pci, mly_pci_driver, mly_devclass, 0, 0);
+
+struct mly_ident
+{
+ u_int16_t vendor;
+ u_int16_t device;
+ u_int16_t subvendor;
+ u_int16_t subdevice;
+ int hwif;
+ char *desc;
+} mly_identifiers[] = {
+ {0x1069, 0xba56, 0x1069, 0x0040, MLY_HWIF_STRONGARM, "Mylex eXtremeRAID 2000"},
+ {0x1069, 0xba56, 0x1069, 0x0030, MLY_HWIF_STRONGARM, "Mylex eXtremeRAID 3000"},
+ {0x1069, 0x0050, 0x1069, 0x0050, MLY_HWIF_I960RX, "Mylex AcceleRAID 352"},
+ {0x1069, 0x0050, 0x1069, 0x0052, MLY_HWIF_I960RX, "Mylex AcceleRAID 170"},
+ {0x1069, 0x0050, 0x1069, 0x0054, MLY_HWIF_I960RX, "Mylex AcceleRAID 160"},
+ {0, 0, 0, 0, 0, 0}
+};
+
+/********************************************************************************
+ ********************************************************************************
+ Bus Interface
+ ********************************************************************************
+ ********************************************************************************/
+
+static int
+mly_pci_probe(device_t dev)
+{
+ struct mly_ident *m;
+
+ debug_called(1);
+
+ for (m = mly_identifiers; m->vendor != 0; m++) {
+ if ((m->vendor == pci_get_vendor(dev)) &&
+ (m->device == pci_get_device(dev)) &&
+ ((m->subvendor == 0) || ((m->subvendor == pci_get_subvendor(dev)) &&
+ (m->subdevice == pci_get_subdevice(dev))))) {
+
+ device_set_desc(dev, m->desc);
+ return(-10); /* allow room to be overridden */
+ }
+ }
+ return(ENXIO);
+}
+
+static int
+mly_pci_attach(device_t dev)
+{
+ struct mly_softc *sc;
+ int i, error;
+ u_int32_t command;
+
+ debug_called(1);
+
+ /*
+ * Initialise softc.
+ */
+ sc = device_get_softc(dev);
+ bzero(sc, sizeof(*sc));
+ sc->mly_dev = dev;
+
+#ifdef MLY_DEBUG
+ if (device_get_unit(sc->mly_dev) == 0)
+ mly_softc0 = sc;
+#endif
+
+ /* assume failure is 'not configured' */
+ error = ENXIO;
+
+ /*
+ * Verify that the adapter is correctly set up in PCI space.
+ */
+ command = pci_read_config(sc->mly_dev, PCIR_COMMAND, 2);
+ command |= PCIM_CMD_BUSMASTEREN;
+ pci_write_config(dev, PCIR_COMMAND, command, 2);
+ command = pci_read_config(sc->mly_dev, PCIR_COMMAND, 2);
+ if (!(command & PCIM_CMD_BUSMASTEREN)) {
+ mly_printf(sc, "can't enable busmaster feature\n");
+ goto fail;
+ }
+ if ((command & PCIM_CMD_MEMEN) == 0) {
+ mly_printf(sc, "memory window not available\n");
+ goto fail;
+ }
+
+ /*
+ * Allocate the PCI register window.
+ */
+ sc->mly_regs_rid = PCIR_MAPS; /* first base address register */
+ if ((sc->mly_regs_resource = bus_alloc_resource(sc->mly_dev, SYS_RES_MEMORY, &sc->mly_regs_rid,
+ 0, ~0, 1, RF_ACTIVE)) == NULL) {
+ mly_printf(sc, "can't allocate register window\n");
+ goto fail;
+ }
+ sc->mly_btag = rman_get_bustag(sc->mly_regs_resource);
+ sc->mly_bhandle = rman_get_bushandle(sc->mly_regs_resource);
+
+ /*
+ * Allocate and connect our interrupt.
+ */
+ sc->mly_irq_rid = 0;
+ if ((sc->mly_irq = bus_alloc_resource(sc->mly_dev, SYS_RES_IRQ, &sc->mly_irq_rid,
+ 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
+ mly_printf(sc, "can't allocate interrupt\n");
+ goto fail;
+ }
+ if (bus_setup_intr(sc->mly_dev, sc->mly_irq, INTR_TYPE_CAM, mly_pci_intr, sc, &sc->mly_intr)) {
+ mly_printf(sc, "can't set up interrupt\n");
+ goto fail;
+ }
+
+ /* assume failure is 'out of memory' */
+ error = ENOMEM;
+
+ /*
+ * Allocate the parent bus DMA tag appropriate for our PCI interface.
+ *
+ * Note that all of these controllers are 64-bit capable.
+ */
+ if (bus_dma_tag_create(NULL, /* parent */
+ 1, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ MAXBSIZE, MLY_MAXSGENTRIES, /* maxsize, nsegments */
+ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
+ BUS_DMA_ALLOCNOW, /* flags */
+ &sc->mly_parent_dmat)) {
+ mly_printf(sc, "can't allocate parent DMA tag\n");
+ goto fail;
+ }
+
+ /*
+ * Create DMA tag for mapping buffers into controller-addressable space.
+ */
+ if (bus_dma_tag_create(sc->mly_parent_dmat, /* parent */
+ 1, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ MAXBSIZE, MLY_MAXSGENTRIES, /* maxsize, nsegments */
+ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
+ 0, /* flags */
+ &sc->mly_buffer_dmat)) {
+ mly_printf(sc, "can't allocate buffer DMA tag\n");
+ goto fail;
+ }
+
+ /*
+ * Initialise the DMA tag for command packets.
+ */
+ if (bus_dma_tag_create(sc->mly_parent_dmat, /* parent */
+ 1, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ sizeof(union mly_command_packet) * MLY_CMD_CLUSTERCOUNT, 1, /* maxsize, nsegments */
+ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
+ 0, /* flags */
+ &sc->mly_packet_dmat)) {
+ mly_printf(sc, "can't allocate command packet DMA tag\n");
+ goto fail;
+ }
+
+ /*
+ * Detect the hardware interface version
+ */
+ for (i = 0; mly_identifiers[i].vendor != 0; i++) {
+ if ((mly_identifiers[i].vendor == pci_get_vendor(dev)) &&
+ (mly_identifiers[i].device == pci_get_device(dev))) {
+ sc->mly_hwif = mly_identifiers[i].hwif;
+ switch(sc->mly_hwif) {
+ case MLY_HWIF_I960RX:
+ debug(2, "set hardware up for i960RX");
+ sc->mly_doorbell_true = 0x00;
+ sc->mly_command_mailbox = MLY_I960RX_COMMAND_MAILBOX;
+ sc->mly_status_mailbox = MLY_I960RX_STATUS_MAILBOX;
+ sc->mly_idbr = MLY_I960RX_IDBR;
+ sc->mly_odbr = MLY_I960RX_ODBR;
+ sc->mly_error_status = MLY_I960RX_ERROR_STATUS;
+ sc->mly_interrupt_status = MLY_I960RX_INTERRUPT_STATUS;
+ sc->mly_interrupt_mask = MLY_I960RX_INTERRUPT_MASK;
+ break;
+ case MLY_HWIF_STRONGARM:
+ debug(2, "set hardware up for StrongARM");
+ sc->mly_doorbell_true = 0xff; /* doorbell 'true' is 0 */
+ sc->mly_command_mailbox = MLY_STRONGARM_COMMAND_MAILBOX;
+ sc->mly_status_mailbox = MLY_STRONGARM_STATUS_MAILBOX;
+ sc->mly_idbr = MLY_STRONGARM_IDBR;
+ sc->mly_odbr = MLY_STRONGARM_ODBR;
+ sc->mly_error_status = MLY_STRONGARM_ERROR_STATUS;
+ sc->mly_interrupt_status = MLY_STRONGARM_INTERRUPT_STATUS;
+ sc->mly_interrupt_mask = MLY_STRONGARM_INTERRUPT_MASK;
+ break;
+ }
+ break;
+ }
+ }
+
+ /*
+ * Create the scatter/gather mappings.
+ */
+ if ((error = mly_sg_map(sc)))
+ goto fail;
+
+ /*
+ * Allocate and map the memory mailbox
+ */
+ if ((error = mly_mmbox_map(sc)))
+ goto fail;
+
+ /*
+ * Do bus-independent initialisation.
+ */
+ if ((error = mly_attach(sc)))
+ goto fail;
+
+ return(0);
+
+fail:
+ mly_free(sc);
+ return(error);
+}
+
+/********************************************************************************
+ * Disconnect from the controller completely, in preparation for unload.
+ */
+static int
+mly_pci_detach(device_t dev)
+{
+ struct mly_softc *sc = device_get_softc(dev);
+ int error;
+
+ debug_called(1);
+
+ if (sc->mly_state & MLY_STATE_OPEN)
+ return(EBUSY);
+
+ if ((error = mly_pci_shutdown(dev)))
+ return(error);
+
+ mly_free(sc);
+
+ return(0);
+}
+
+/********************************************************************************
+ * Bring the controller down to a dormant state and detach all child devices.
+ *
+ * This function is called before detach or system shutdown.
+ *
+ * Note that we can assume that the camq on the controller is empty, as we won't
+ * allow shutdown if any device is open.
+ */
+static int
+mly_pci_shutdown(device_t dev)
+{
+ struct mly_softc *sc = device_get_softc(dev);
+
+ debug_called(1);
+
+ mly_detach(sc);
+ return(0);
+}
+
+/********************************************************************************
+ * Bring the controller to a quiescent state, ready for system suspend.
+ *
+ * We can't assume that the controller is not active at this point, so we need
+ * to mask interrupts.
+ */
+static int
+mly_pci_suspend(device_t dev)
+{
+ struct mly_softc *sc = device_get_softc(dev);
+ int s;
+
+ debug_called(1);
+ s = splcam();
+ mly_detach(sc);
+ splx(s);
+ return(0);
+}
+
+/********************************************************************************
+ * Bring the controller back to a state ready for operation.
+ */
+static int
+mly_pci_resume(device_t dev)
+{
+ struct mly_softc *sc = device_get_softc(dev);
+
+ debug_called(1);
+ sc->mly_state &= ~MLY_STATE_SUSPEND;
+ MLY_UNMASK_INTERRUPTS(sc);
+ return(0);
+}
+
+/*******************************************************************************
+ * Take an interrupt, or be poked by other code to look for interrupt-worthy
+ * status.
+ */
+static void
+mly_pci_intr(void *arg)
+{
+ struct mly_softc *sc = (struct mly_softc *)arg;
+
+ debug_called(3);
+
+ /* collect finished commands, queue anything waiting */
+ mly_done(sc);
+};
+
+/********************************************************************************
+ ********************************************************************************
+ Bus-dependant Resource Management
+ ********************************************************************************
+ ********************************************************************************/
+
+/********************************************************************************
+ * Allocate memory for the scatter/gather tables
+ */
+static int
+mly_sg_map(struct mly_softc *sc)
+{
+ size_t segsize;
+
+ debug_called(1);
+
+ /*
+ * Create a single tag describing a region large enough to hold all of
+ * the s/g lists we will need.
+ */
+ segsize = sizeof(struct mly_sg_entry) * MLY_MAXCOMMANDS * MLY_MAXSGENTRIES;
+ if (bus_dma_tag_create(sc->mly_parent_dmat, /* parent */
+ 1, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ segsize, 1, /* maxsize, nsegments */
+ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
+ 0, /* flags */
+ &sc->mly_sg_dmat)) {
+ mly_printf(sc, "can't allocate scatter/gather DMA tag\n");
+ return(ENOMEM);
+ }
+
+ /*
+ * Allocate enough s/g maps for all commands and permanently map them into
+ * controller-visible space.
+ *
+ * XXX this assumes we can get enough space for all the s/g maps in one
+ * contiguous slab.
+ */
+ if (bus_dmamem_alloc(sc->mly_sg_dmat, (void **)&sc->mly_sg_table, BUS_DMA_NOWAIT, &sc->mly_sg_dmamap)) {
+ mly_printf(sc, "can't allocate s/g table\n");
+ return(ENOMEM);
+ }
+ bus_dmamap_load(sc->mly_sg_dmat, sc->mly_sg_dmamap, sc->mly_sg_table, segsize, mly_sg_map_helper, sc, 0);
+ return(0);
+}
+
+/********************************************************************************
+ * Save the physical address of the base of the s/g table.
+ */
+static void
+mly_sg_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error)
+{
+ struct mly_softc *sc = (struct mly_softc *)arg;
+
+ debug_called(2);
+
+ /* save base of s/g table's address in bus space */
+ sc->mly_sg_busaddr = segs->ds_addr;
+}
+
+/********************************************************************************
+ * Allocate memory for the memory-mailbox interface
+ */
+static int
+mly_mmbox_map(struct mly_softc *sc)
+{
+
+ /*
+ * Create a DMA tag for a single contiguous region large enough for the
+ * memory mailbox structure.
+ */
+ if (bus_dma_tag_create(sc->mly_parent_dmat, /* parent */
+ 1, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ sizeof(struct mly_mmbox), 1, /* maxsize, nsegments */
+ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
+ 0, /* flags */
+ &sc->mly_mmbox_dmat)) {
+ mly_printf(sc, "can't allocate memory mailbox DMA tag\n");
+ return(ENOMEM);
+ }
+
+ /*
+ * Allocate the buffer
+ */
+ if (bus_dmamem_alloc(sc->mly_mmbox_dmat, (void **)&sc->mly_mmbox, BUS_DMA_NOWAIT, &sc->mly_mmbox_dmamap)) {
+ mly_printf(sc, "can't allocate memory mailbox\n");
+ return(ENOMEM);
+ }
+ bus_dmamap_load(sc->mly_mmbox_dmat, sc->mly_mmbox_dmamap, sc->mly_mmbox, sizeof(struct mly_mmbox),
+ mly_mmbox_map_helper, sc, 0);
+ bzero(sc->mly_mmbox, sizeof(*sc->mly_mmbox));
+ return(0);
+
+}
+
+/********************************************************************************
+ * Save the physical address of the memory mailbox
+ */
+static void
+mly_mmbox_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error)
+{
+ struct mly_softc *sc = (struct mly_softc *)arg;
+
+ debug_called(2);
+
+ sc->mly_mmbox_busaddr = segs->ds_addr;
+}
+
+/********************************************************************************
+ * Free all of the resources associated with (sc)
+ *
+ * Should not be called if the controller is active.
+ */
+void
+mly_free(struct mly_softc *sc)
+{
+ struct mly_command_cluster *mcc;
+
+ debug_called(1);
+
+ /* detach from CAM */
+ mly_cam_detach(sc);
+
+ /* throw away any command buffers */
+ while ((mcc = mly_dequeue_cluster(sc)) != NULL)
+ mly_free_command_cluster(mcc);
+
+ /* throw away the controllerinfo structure */
+ if (sc->mly_controllerinfo != NULL)
+ free(sc->mly_controllerinfo, M_DEVBUF);
+
+ /* throw away the controllerparam structure */
+ if (sc->mly_controllerparam != NULL)
+ free(sc->mly_controllerparam, M_DEVBUF);
+
+ /* destroy data-transfer DMA tag */
+ if (sc->mly_buffer_dmat)
+ bus_dma_tag_destroy(sc->mly_buffer_dmat);
+
+ /* free and destroy DMA memory and tag for s/g lists */
+ if (sc->mly_sg_table) {
+ bus_dmamap_unload(sc->mly_sg_dmat, sc->mly_sg_dmamap);
+ bus_dmamem_free(sc->mly_sg_dmat, sc->mly_sg_table, sc->mly_sg_dmamap);
+ }
+ if (sc->mly_sg_dmat)
+ bus_dma_tag_destroy(sc->mly_sg_dmat);
+
+ /* free and destroy DMA memory and tag for memory mailbox */
+ if (sc->mly_mmbox) {
+ bus_dmamap_unload(sc->mly_mmbox_dmat, sc->mly_mmbox_dmamap);
+ bus_dmamem_free(sc->mly_mmbox_dmat, sc->mly_mmbox, sc->mly_mmbox_dmamap);
+ }
+ if (sc->mly_mmbox_dmat)
+ bus_dma_tag_destroy(sc->mly_mmbox_dmat);
+
+ /* disconnect the interrupt handler */
+ if (sc->mly_intr)
+ bus_teardown_intr(sc->mly_dev, sc->mly_irq, sc->mly_intr);
+ if (sc->mly_irq != NULL)
+ bus_release_resource(sc->mly_dev, SYS_RES_IRQ, sc->mly_irq_rid, sc->mly_irq);
+
+ /* destroy the parent DMA tag */
+ if (sc->mly_parent_dmat)
+ bus_dma_tag_destroy(sc->mly_parent_dmat);
+
+ /* release the register window mapping */
+ if (sc->mly_regs_resource != NULL)
+ bus_release_resource(sc->mly_dev, SYS_RES_MEMORY, sc->mly_regs_rid, sc->mly_regs_resource);
+}
+
+/********************************************************************************
+ * Free a command cluster.
+ */
+static void
+mly_free_command_cluster(struct mly_command_cluster *mcc)
+{
+ struct mly_softc *sc = mcc->mcc_command[0].mc_sc;
+ int i;
+
+ debug_called(1);
+
+ for (i = 0; i < MLY_CMD_CLUSTERCOUNT; i++)
+ bus_dmamap_destroy(sc->mly_buffer_dmat, mcc->mcc_command[i].mc_datamap);
+
+ bus_dmamap_unload(sc->mly_packet_dmat, mcc->mcc_packetmap);
+ bus_dmamem_free(sc->mly_packet_dmat, mcc->mcc_packet, mcc->mcc_packetmap);
+ free(mcc, M_DEVBUF);
+}
+
diff --git a/sys/dev/mly/mly_tables.h b/sys/dev/mly/mly_tables.h
new file mode 100644
index 0000000..5a83f17
--- /dev/null
+++ b/sys/dev/mly/mly_tables.h
@@ -0,0 +1,335 @@
+/*-
+ * Copyright (c) 2000 Michael Smith
+ * Copyright (c) 2000 BSDi
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * Lookup table for code-to-text translations.
+ */
+struct mly_code_lookup {
+ char *string;
+ u_int32_t code;
+};
+
+extern char *mly_describe_code(struct mly_code_lookup *table, u_int32_t code);
+
+#ifndef MLY_DEFINE_TABLES
+extern struct mly_code_lookup mly_table_bustype[];
+extern struct mly_code_lookup mly_table_controllertype[];
+extern struct mly_code_lookup mly_table_oemname[];
+extern struct mly_code_lookup mly_table_memorytype[];
+extern struct mly_code_lookup mly_table_cputype[];
+extern struct mly_code_lookup mly_table_event[];
+extern struct mly_code_lookup mly_table_device_state[];
+extern struct mly_code_lookup mly_table_device_type[];
+extern struct mly_code_lookup mly_table_stripe_size[];
+extern struct mly_code_lookup mly_table_cacheline_size[];
+
+#else /* MLY_DEFINE_TABLES */
+
+/********************************************************************************
+ * Look up a text description of a numeric code and return a pointer to same.
+ */
+char *
+mly_describe_code(struct mly_code_lookup *table, u_int32_t code)
+{
+ int i;
+
+ for (i = 0; table[i].string != NULL; i++)
+ if (table[i].code == code)
+ return(table[i].string);
+ return(table[i+1].string);
+}
+
+struct mly_code_lookup mly_table_bustype[] = {
+ {"SCSI", 0x00},
+ {"FC-AL", 0x01},
+ {"PCI", 0x03},
+ {NULL, 0},
+ {"unknown bus", 0}
+};
+
+struct mly_code_lookup mly_table_controllertype[] = {
+#if 0 /* not supported by this driver */
+ {"DAC960E", 0x01}, /* EISA */
+ {"DAC960M", 0x08}, /* MCA */
+ {"DAC960PD", 0x10}, /* PCI Dual */
+ {"DAC960PL", 0x11}, /* PCU low-cost */
+ {"DAC960PDU", 0x12}, /* PD Ultra */
+ {"DAC960PE", 0x13}, /* Peregrine low-cost */
+ {"DAC960PG", 0x14}, /* Peregrine high-performance */
+ {"DAC960PJ", 0x15}, /* Road Runner */
+ {"DAC960PTL0", 0x16}, /* Jaguar */
+ {"DAC960PR", 0x17}, /* Road Runner (again?) */
+ {"DAC960PRL", 0x18}, /* Tomcat */
+ {"DAC960PT", 0x19}, /* Road Runner (yet again?) */
+ {"DAC1164P", 0x1a}, /* Little Apple */
+ {"DAC960PTL1", 0x1b}, /* Jaguar+ */
+#endif
+ {"EXR2000P", 0x1c}, /* Big Apple */
+ {"EXR3000P", 0x1d}, /* Fibre Apple */
+ {"AcceleRAID 352", 0x1e}, /* Leopard */
+ {"AcceleRAID 170", 0x1f}, /* Lynx */
+ {"AcceleRAID 160", 0x20}, /* Bobcat */
+ {NULL, 0},
+ {"unknown adapter", 0}
+};
+
+struct mly_code_lookup mly_table_oemname[] = {
+ {"Mylex", MLY_OEM_MYLEX},
+ {"IBM", MLY_OEM_IBM},
+ {"Hewlett-Packard", MLY_OEM_HP},
+ {"DEC/Compaq", MLY_OEM_DEC},
+ {"Siemens", MLY_OEM_SIEMENS},
+ {"Intel", MLY_OEM_INTEL},
+ {NULL, 0},
+ {"unknown OEM", 0}
+};
+
+struct mly_code_lookup mly_table_memorytype[] = {
+ {"DRAM", 0x01},
+ {"EDRAM", 0x02},
+ {"EDO RAM", 0x03},
+ {"SDRAM", 0x04},
+ {NULL, 0},
+ {"unknown memory", 0}
+};
+
+struct mly_code_lookup mly_table_cputype[] = {
+ {"i960CA", 0x01},
+ {"i960RD", 0x02},
+ {"i960RN", 0x03},
+ {"i960RP", 0x04},
+ {"NorthBay(?)", 0x05},
+ {"StrongArm", 0x06},
+ {"i960RM", 0x07},
+ {NULL, 0},
+ {"unknown CPU", 0}
+};
+
+/*
+ * This table is directly derived from the corresponding table in the
+ * Linux driver, and uses a derivative encoding for simplicity's sake.
+ *
+ * The first character of the string determines the format of the message.
+ *
+ * p "physical device <channel>:<target> <text>" (physical device status)
+ * s "physical device <channel>:<target> <text>" (scsi message or error)
+ * " sense key <key> asc <asc> ascq <ascq>"
+ * " info <info> csi <csi>"
+ * l "logical drive <unit>: <text>" (logical device status)
+ * m "logical drive <unit>: <text>" (logical device message)
+ *
+ * Messages which are typically suppressed have the first character capitalised.
+ * These messages will only be printed if bootverbose is set.
+ *
+ * The second character in the string indicates an action to be taken as a
+ * result of the event.
+ *
+ * r rescan the device for possible state change
+ *
+ */
+struct mly_code_lookup mly_table_event[] = {
+ /* physical device events (0x0000 - 0x007f) */
+ {"pr online", 0x0001},
+ {"pr standby", 0x0002},
+ {"p automatic rebuild started", 0x0005},
+ {"p manual rebuild started", 0x0006},
+ {"pr rebuild completed", 0x0007},
+ {"pr rebuild cancelled", 0x0008},
+ {"pr rebuild failed for unknown reasons", 0x0009},
+ {"pr rebuild failed due to new physical device", 0x000a},
+ {"pr rebuild failed due to logical drive failure", 0x000b},
+ {"sr offline", 0x000c},
+ {"pr found", 0x000d},
+ {"pr gone", 0x000e},
+ {"p unconfigured", 0x000f},
+ {"p expand capacity started", 0x0010},
+ {"pr expand capacity completed", 0x0011},
+ {"pr expand capacity failed", 0x0012},
+ {"p parity error", 0x0016},
+ {"p soft error", 0x0017},
+ {"p miscellaneous error", 0x0018},
+ {"p reset", 0x0019},
+ {"p active spare found", 0x001a},
+ {"p warm spare found", 0x001b},
+ {"s sense data received", 0x001c},
+ {"p initialization started", 0x001d},
+ {"pr initialization completed", 0x001e},
+ {"pr initialization failed", 0x001f},
+ {"pr initialization cancelled", 0x0020},
+ {"P write recovery failed", 0x0021},
+ {"p scsi bus reset failed", 0x0022},
+ {"p double check condition", 0x0023},
+ {"p device cannot be accessed", 0x0024},
+ {"p gross error on scsi processor", 0x0025},
+ {"p bad tag from device", 0x0026},
+ {"p command timeout", 0x0027},
+ {"pr system reset", 0x0028},
+ {"p busy status or parity error", 0x0029},
+ {"pr host set device to failed state", 0x002a},
+ {"pr selection timeout", 0x002b},
+ {"p scsi bus phase error", 0x002c},
+ {"pr device returned unknown status", 0x002d},
+ {"pr device not ready", 0x002e},
+ {"p device not found at startup", 0x002f},
+ {"p COD write operation failed", 0x0030},
+ {"p BDT write operation failed", 0x0031},
+ {"p missing at startup", 0x0039},
+ {"p start rebuild failed due to physical drive too small", 0x003a},
+ /* logical device events (0x0080 - 0x00ff) */
+ {"m consistency check started", 0x0080},
+ {"mr consistency check completed", 0x0081},
+ {"mr consistency check cancelled", 0x0082},
+ {"mr consistency check completed with errors", 0x0083},
+ {"mr consistency check failed due to logical drive failure", 0x0084},
+ {"mr consistency check failed due to physical device failure", 0x0085},
+ {"lr offline", 0x0086},
+ {"lr critical", 0x0087},
+ {"lr online", 0x0088},
+ {"m automatic rebuild started", 0x0089},
+ {"m manual rebuild started", 0x008a},
+ {"mr rebuild completed", 0x008b},
+ {"mr rebuild cancelled", 0x008c},
+ {"mr rebuild failed for unknown reasons", 0x008d},
+ {"mr rebuild failed due to new physical device", 0x008e},
+ {"mr rebuild failed due to logical drive failure", 0x008f},
+ {"l initialization started", 0x0090},
+ {"lr initialization completed", 0x0091},
+ {"lr initialization cancelled", 0x0092},
+ {"lr initialization failed", 0x0093},
+ {"lr found", 0x0094},
+ {"lr gone", 0x0095},
+ {"l expand capacity started", 0x0096},
+ {"lr expand capacity completed", 0x0097},
+ {"lr expand capacity failed", 0x0098},
+ {"l bad block found", 0x0099},
+ {"lr size changed", 0x009a},
+ {"lr type changed", 0x009b},
+ {"l bad data block found", 0x009c},
+ {"l read of data block in bdt", 0x009e},
+ {"l write back data for disk block lost", 0x009f},
+ /* enclosure management events (0x0100 - 0x017f) */
+ {"e enclosure %d fan %d failed", 0x0140},
+ {"e enclosure %d fan %d ok", 0x0141},
+ {"e enclosure %d fan %d not present", 0x0142},
+ {"e enclosure %d power supply %d failed", 0x0143},
+ {"e enclosure %d power supply %d ok", 0x0144},
+ {"e enclosure %d power supply %d not present", 0x0145},
+ {"e enclosure %d temperature sensor %d failed", 0x0146},
+ {"e enclosure %d temperature sensor %d critical", 0x0147},
+ {"e enclosure %d temperature sensor %d ok", 0x0148},
+ {"e enclosure %d temperature sensor %d not present", 0x0149},
+ {"e enclosure %d unit %d access critical", 0x014a},
+ {"e enclosure %d unit %d access ok", 0x014b},
+ {"e enclosure %d unit %d access offline", 0x014c},
+ /* controller events (0x0180 - 0x01ff) */
+ {"c cache write back error", 0x0181},
+ {"c battery backup unit found", 0x0188},
+ {"c battery backup unit charge level low", 0x0189},
+ {"c battery backup unit charge level ok", 0x018a},
+ {"c installation aborted", 0x0193},
+ {"c mirror race recovery in progress", 0x0195},
+ {"c mirror race on critical drive", 0x0196},
+ {"c memory soft ecc error", 0x019e},
+ {"c memory hard ecc error", 0x019f},
+ {"c battery backup unit failed", 0x01a2},
+ {NULL, 0},
+ {"? unknown event code", 0}
+};
+
+/*
+ * Values here must be 16 characters or less, as they are packed into
+ * the 'product' field in the SCSI inquiry data.
+ */
+struct mly_code_lookup mly_table_device_state[] = {
+ {"offline", MLY_DEVICE_STATE_OFFLINE},
+ {"unconfigured", MLY_DEVICE_STATE_UNCONFIGURED},
+ {"online", MLY_DEVICE_STATE_ONLINE},
+ {"critical", MLY_DEVICE_STATE_CRITICAL},
+ {"writeonly", MLY_DEVICE_STATE_WRITEONLY},
+ {"standby", MLY_DEVICE_STATE_STANDBY},
+ {"missing", MLY_DEVICE_STATE_MISSING},
+ {NULL, 0},
+ {"unknown state", 0}
+};
+
+/*
+ * Values here must be 8 characters or less, as they are packed into
+ * the 'vendor' field in the SCSI inquiry data.
+ */
+struct mly_code_lookup mly_table_device_type[] = {
+ {"RAID 0", MLY_DEVICE_TYPE_RAID0},
+ {"RAID 1", MLY_DEVICE_TYPE_RAID1},
+ {"RAID 3", MLY_DEVICE_TYPE_RAID3}, /* right asymmetric parity */
+ {"RAID 5", MLY_DEVICE_TYPE_RAID5}, /* right asymmetric parity */
+ {"RAID 6", MLY_DEVICE_TYPE_RAID6}, /* Mylex RAID 6 */
+ {"RAID 7", MLY_DEVICE_TYPE_RAID7}, /* JBOD */
+ {"SPAN", MLY_DEVICE_TYPE_NEWSPAN}, /* New Mylex SPAN */
+ {"RAID 3", MLY_DEVICE_TYPE_RAID3F}, /* fixed parity */
+ {"RAID 3", MLY_DEVICE_TYPE_RAID3L}, /* left symmetric parity */
+ {"SPAN", MLY_DEVICE_TYPE_SPAN}, /* current spanning implementation */
+ {"RAID 5", MLY_DEVICE_TYPE_RAID5L}, /* left symmetric parity */
+ {"RAID E", MLY_DEVICE_TYPE_RAIDE}, /* concatenation */
+ {"PHYSICAL", MLY_DEVICE_TYPE_PHYSICAL}, /* physical device */
+ {NULL, 0},
+ {"UNKNOWN", 0}
+};
+
+struct mly_code_lookup mly_table_stripe_size[] = {
+ {"NONE", MLY_STRIPE_ZERO},
+ {"512B", MLY_STRIPE_512b},
+ {"1k", MLY_STRIPE_1k},
+ {"2k", MLY_STRIPE_2k},
+ {"4k", MLY_STRIPE_4k},
+ {"8k", MLY_STRIPE_8k},
+ {"16k", MLY_STRIPE_16k},
+ {"32k", MLY_STRIPE_32k},
+ {"64k", MLY_STRIPE_64k},
+ {"128k", MLY_STRIPE_128k},
+ {"256k", MLY_STRIPE_256k},
+ {"512k", MLY_STRIPE_512k},
+ {"1M", MLY_STRIPE_1m},
+ {NULL, 0},
+ {"unknown", 0}
+};
+
+struct mly_code_lookup mly_table_cacheline_size[] = {
+ {"NONE", MLY_CACHELINE_ZERO},
+ {"512B", MLY_CACHELINE_512b},
+ {"1k", MLY_CACHELINE_1k},
+ {"2k", MLY_CACHELINE_2k},
+ {"4k", MLY_CACHELINE_4k},
+ {"8k", MLY_CACHELINE_8k},
+ {"16k", MLY_CACHELINE_16k},
+ {"32k", MLY_CACHELINE_32k},
+ {"64k", MLY_CACHELINE_64k},
+ {NULL, 0},
+ {"unknown", 0}
+};
+
+#endif /* MLY_DEFINE_TABLES */
diff --git a/sys/dev/mly/mlyreg.h b/sys/dev/mly/mlyreg.h
new file mode 100644
index 0000000..2809eb5
--- /dev/null
+++ b/sys/dev/mly/mlyreg.h
@@ -0,0 +1,1270 @@
+/*-
+ * Copyright (c) 2000 Michael Smith
+ * Copyright (c) 2000 BSDi
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * Section numbers in this document refer to the Mylex "Firmware Software Interface"
+ * document ('FSI'), revision 0.11 04/11/00 unless otherwise qualified.
+ *
+ * Reference is made to the Mylex "Programming Guide for 6.x Controllers" document
+ * ('PG6'), document #771242 revision 0.02, 04/11/00
+ *
+ * Note that fields marked N/A are not supported by the PCI controllers, but are
+ * defined here to hold place in datastructures that are shared with the SCSI
+ * controllers. Items not relevant to PCI controllers are not described here.
+ *
+ * Ordering of items in this file is a little odd due to the constraints of
+ * nested declarations.
+ */
+
+/*
+ * 2.1 (Scatter Gather List Format)
+ */
+struct mly_sg_entry {
+ u_int64_t physaddr;
+ u_int64_t length;
+} __attribute__ ((packed));
+
+/*
+ * 5.2 System Device Access
+ *
+ * This is corroborated by the layout of the MDACIOCTL_GETCONTROLLERINFO data
+ * in 21.8
+ */
+#define MLY_MAX_CHANNELS 6
+#define MLY_MAX_TARGETS 16
+#define MLY_MAX_LUNS 1
+
+/*
+ * 8.1 Different Device States
+ */
+#define MLY_DEVICE_STATE_OFFLINE 0x08 /* DEAD/OFFLINE */
+#define MLY_DEVICE_STATE_UNCONFIGURED 0x00
+#define MLY_DEVICE_STATE_ONLINE 0x01
+#define MLY_DEVICE_STATE_CRITICAL 0x09
+#define MLY_DEVICE_STATE_WRITEONLY 0x03
+#define MLY_DEVICE_STATE_STANDBY 0x21
+#define MLY_DEVICE_STATE_MISSING 0x04 /* or-ed with (ONLINE or WRITEONLY or STANDBY) */
+
+/*
+ * 8.2 Device Type Field definitions
+ */
+#define MLY_DEVICE_TYPE_RAID0 0x0 /* RAID 0 */
+#define MLY_DEVICE_TYPE_RAID1 0x1 /* RAID 1 */
+#define MLY_DEVICE_TYPE_RAID3 0x3 /* RAID 3 right asymmetric parity */
+#define MLY_DEVICE_TYPE_RAID5 0x5 /* RAID 5 right asymmetric parity */
+#define MLY_DEVICE_TYPE_RAID6 0x6 /* RAID 6 (Mylex RAID 6) */
+#define MLY_DEVICE_TYPE_RAID7 0x7 /* RAID 7 (JBOD) */
+#define MLY_DEVICE_TYPE_NEWSPAN 0x8 /* New Mylex SPAN */
+#define MLY_DEVICE_TYPE_RAID3F 0x9 /* RAID 3 fixed parity */
+#define MLY_DEVICE_TYPE_RAID3L 0xb /* RAID 3 left symmetric parity */
+#define MLY_DEVICE_TYPE_SPAN 0xc /* current spanning implementation */
+#define MLY_DEVICE_TYPE_RAID5L 0xd /* RAID 5 left symmetric parity */
+#define MLY_DEVICE_TYPE_RAIDE 0xe /* RAID E (concatenation) */
+#define MLY_DEVICE_TYPE_PHYSICAL 0xf /* physical device */
+
+/*
+ * 8.3 Stripe Size
+ */
+#define MLY_STRIPE_ZERO 0x0 /* no stripe (RAID 1, RAID 7, etc) */
+#define MLY_STRIPE_512b 0x1
+#define MLY_STRIPE_1k 0x2
+#define MLY_STRIPE_2k 0x3
+#define MLY_STRIPE_4k 0x4
+#define MLY_STRIPE_8k 0x5
+#define MLY_STRIPE_16k 0x6
+#define MLY_STRIPE_32k 0x7
+#define MLY_STRIPE_64k 0x8
+#define MLY_STRIPE_128k 0x9
+#define MLY_STRIPE_256k 0xa
+#define MLY_STRIPE_512k 0xb
+#define MLY_STRIPE_1m 0xc
+
+/*
+ * 8.4 Cacheline Size
+ */
+#define MLY_CACHELINE_ZERO 0x0 /* caching cannot be enabled */
+#define MLY_CACHELINE_512b 0x1
+#define MLY_CACHELINE_1k 0x2
+#define MLY_CACHELINE_2k 0x3
+#define MLY_CACHELINE_4k 0x4
+#define MLY_CACHELINE_8k 0x5
+#define MLY_CACHELINE_16k 0x6
+#define MLY_CACHELINE_32k 0x7
+#define MLY_CACHELINE_64k 0x8
+
+/*
+ * 8.5 Read/Write control
+ */
+#define MLY_RWCtl_INITTED (1<<7) /* if set, the logical device is initialised */
+ /* write control */
+#define MLY_RWCtl_WCD (0) /* write cache disabled */
+#define MLY_RWCtl_WDISABLE (1<<3) /* writing disabled */
+#define MLY_RWCtl_WCE (2<<3) /* write cache enabled */
+#define MLY_RWCtl_IWCE (3<<3) /* intelligent write cache enabled */
+ /* read control */
+#define MLY_RWCtl_RCD (0) /* read cache is disabled */
+#define MLY_RWCtl_RCE (1) /* read cache enabled */
+#define MLY_RWCtl_RAHEAD (2) /* readahead enabled */
+#define MLY_RWCtl_IRAHEAD (3) /* intelligent readahead enabled */
+
+/*
+ * 9.0 LUN Map Format
+ */
+struct mly_lun_map {
+ u_int8_t res1:4;
+ u_int8_t host_port_mapped:1; /* this system drive visibile to host on this controller/port combination */
+ u_int8_t tid_valid:1; /* target ID valid */
+ u_int8_t hid_valid:1; /* host ID valid */
+ u_int8_t lun_valid:1; /* LUN valid */
+ u_int8_t res2;
+ u_int8_t lun; /* LUN */
+ u_int8_t tid; /* TID */
+ u_int8_t hid[32]; /* HID (one bit for each host) */
+} __attribute__ ((packed));
+
+/*
+ * 10.1 Controller Parameters
+ */
+struct mly_param_controller {
+ u_int8_t rdahen:1; /* N/A */
+ u_int8_t bilodly:1; /* N/A */
+ u_int8_t fua_disable:1;
+ u_int8_t reass1s:1; /* N/A */
+ u_int8_t truvrfy:1; /* N/A */
+ u_int8_t dwtvrfy:1; /* N/A */
+ u_int8_t background_initialisation:1;
+ u_int8_t clustering:1; /* N/A */
+
+ u_int8_t bios_disable:1;
+ u_int8_t boot_from_cdrom:1;
+ u_int8_t drive_coercion:1;
+ u_int8_t write_same_disable:1;
+ u_int8_t hba_mode:1; /* N/A */
+ u_int8_t bios_geometry:2;
+#define MLY_BIOSGEOM_2G 0x0
+#define MLY_BIOSGEOM_8G 0x1
+ u_int8_t res1:1; /* N/A */
+
+ u_int8_t res2[2]; /* N/A */
+
+ u_int8_t v_dec:1;
+ u_int8_t safte:1; /* N/A */
+ u_int8_t ses:1; /* N/A */
+ u_int8_t res3:2; /* N/A */
+ u_int8_t v_arm:1;
+ u_int8_t v_ofm:1;
+ u_int8_t res4:1; /* N/A */
+
+ u_int8_t rebuild_check_rate;
+ u_int8_t cache_line_size; /* see 8.4 */
+ u_int8_t oem_code;
+#define MLY_OEM_MYLEX 0x00
+#define MLY_OEM_IBM 0x08
+#define MLY_OEM_HP 0x0a
+#define MLY_OEM_DEC 0x0c
+#define MLY_OEM_SIEMENS 0x10
+#define MLY_OEM_INTEL 0x12
+ u_int8_t spinup_mode;
+#define MLY_SPIN_AUTO 0
+#define MLY_SPIN_PWRSPIN 1
+#define MLY_SPIN_WSSUSPIN 2
+ u_int8_t spinup_devices;
+ u_int8_t spinup_interval;
+ u_int8_t spinup_wait_time;
+
+ u_int8_t res5:3; /* N/A */
+ u_int8_t vutursns:1; /* N/A */
+ u_int8_t dccfil:1; /* N/A */
+ u_int8_t nopause:1; /* N/A */
+ u_int8_t disqfull:1; /* N/A */
+ u_int8_t disbusy:1; /* N/A */
+
+ u_int8_t res6:2; /* N/A */
+ u_int8_t failover_node_name; /* N/A */
+ u_int8_t res7:1; /* N/A */
+ u_int8_t ftopo:3; /* N/A */
+ u_int8_t disable_ups:1; /* N/A */
+
+ u_int8_t res8:1; /* N/A */
+ u_int8_t propagate_reset:1; /* N/A */
+ u_int8_t nonstd_mp_reset:1; /* N/A */
+ u_int8_t res9:5; /* N/A */
+
+ u_int8_t res10; /* N/A */
+ u_int8_t serial_port_baud_rate; /* N/A */
+ u_int8_t serial_port_control; /* N/A */
+ u_int8_t change_stripe_ok_developer_flag_only; /* N/A */
+
+ u_int8_t small_large_host_transfers:2; /* N/A */
+ u_int8_t frame_control:2; /* N/A */
+ u_int8_t pci_latency_control:2; /* N/A */
+ u_int8_t treat_lip_as_reset:1; /* N/A */
+ u_int8_t res11:1; /* N/A */
+
+ u_int8_t ms_autorest:1; /* N/A */
+ u_int8_t res12:7; /* N/A */
+
+ u_int8_t ms_aa_fsim:1; /* N/A */
+ u_int8_t ms_aa_ccach:1; /* N/A */
+ u_int8_t ms_aa_fault_signals:1; /* N/A */
+ u_int8_t ms_aa_c4_faults:1; /* N/A */
+ u_int8_t ms_aa_host_reset_delay_mask:4; /* N/A */
+
+ u_int8_t ms_flg_simplex_no_rstcom:1; /* N/A */
+ u_int8_t res13:7; /* N/A */
+
+ u_int8_t res14; /* N/A */
+ u_int8_t hardloopid[2][2]; /* N/A */
+ u_int8_t ctrlname[2][16+1]; /* N/A */
+ u_int8_t initiator_id;
+ u_int8_t startup_option;
+#define MLY_STARTUP_IF_NO_CHANGE 0x0
+#define MLY_STARTUP_IF_NO_LUN_CHANGE 0x1
+#define MLY_STARTUP_IF_NO_LUN_OFFLINE 0x2
+#define MLY_STARTUP_IF_LUN0_NO_CHANGE 0x3
+#define MLY_STARTUP_IF_LUN0_NOT_OFFLINE 0x4
+#define MLY_STARTUP_ALWAYS 0x5
+
+ u_int8_t res15[62];
+} __attribute__ ((packed));
+
+/*
+ * 10.2 Physical Device Parameters
+ */
+struct mly_param_physical_device {
+ u_int16_t tags;
+ u_int16_t speed;
+ u_int8_t width;
+ u_int8_t combing:1;
+ u_int8_t res1:7;
+ u_int8_t res2[3];
+} __attribute__ ((packed));
+
+/*
+ * 10.3 Logical Device Parameters
+ */
+struct mly_param_logical_device {
+ u_int8_t type; /* see 8.2 */
+ u_int8_t state; /* see 8.1 */
+ u_int16_t raid_device;
+ u_int8_t res1;
+ u_int8_t bios_geometry; /* BIOS control word? */
+ u_int8_t stripe_size; /* see 8.3 */
+ u_int8_t read_write_control; /* see 8.5 */
+ u_int8_t res2[8];
+} __attribute__ ((packed));
+
+/*
+ * 12.3 Health Status Buffer
+ *
+ * Pad to 128 bytes.
+ */
+struct mly_health_status {
+ u_int32_t uptime_us; /* N/A */
+ u_int32_t uptime_ms; /* N/A */
+ u_int32_t realtime; /* N/A */
+ u_int32_t res1; /* N/A */
+ u_int32_t change_counter;
+ u_int32_t res2; /* N/A */
+ u_int32_t debug_message_index; /* N/A */
+ u_int32_t bios_message_index; /* N/A */
+ u_int32_t trace_page; /* N/A */
+ u_int32_t profiler_page; /* N/A */
+ u_int32_t next_event;
+ u_int8_t res3[4 + 16 + 64]; /* N/A */
+} __attribute__ ((packed));
+
+/*
+ * 14.2 Timeout Bit Format
+ */
+struct mly_timeout {
+ u_int8_t value:6;
+ u_int8_t scale:2;
+#define MLY_TIMEOUT_SECONDS 0x0
+#define MLY_TIMEOUT_MINUTES 0x1
+#define MLY_TIMEOUT_HOURS 0x2
+} __attribute__ ((packed));
+
+/*
+ * 14.3 Operation Device
+ */
+#define MLY_OPDEVICE_PHYSICAL_DEVICE 0x0
+#define MLY_OPDEVICE_RAID_DEVICE 0x1
+#define MLY_OPDEVICE_PHYSICAL_CHANNEL 0x2
+#define MLY_OPDEVICE_RAID_CHANNEL 0x3
+#define MLY_OPDEVICE_PHYSICAL_CONTROLLER 0x4
+#define MLY_OPDEVICE_RAID_CONTROLLER 0x5
+#define MLY_OPDEVICE_CONFIGURATION_GROUP 0x10
+
+/*
+ * 14.4 Status Bit Format
+ *
+ * AKA Status Mailbox Format
+ *
+ * XXX format conflict between FSI and PG6 over the ordering of the
+ * status and sense length fields.
+ */
+struct mly_status {
+ u_int16_t command_id;
+ u_int8_t status;
+ u_int8_t sense_length;
+ int32_t residue;
+} __attribute__ ((packed));
+
+/*
+ * 14.5 Command Control Bit (CCB) format
+ *
+ * This byte is unfortunately named.
+ */
+struct mly_command_control {
+ u_int8_t force_unit_access:1;
+ u_int8_t disable_page_out:1;
+ u_int8_t res1:1;
+ u_int8_t extended_sg_table:1;
+ u_int8_t data_direction:1;
+#define MLY_CCB_WRITE 1
+#define MLY_CCB_READ 0
+ u_int8_t res2:1;
+ u_int8_t no_auto_sense:1;
+ u_int8_t disable_disconnect:1;
+} __attribute__ ((packed));
+
+/*
+ * 15.0 Commands
+ *
+ * We use the command names as given by Mylex
+ */
+#define MDACMD_MEMCOPY 0x1 /* memory to memory copy */
+#define MDACMD_SCSIPT 0x2 /* SCSI passthrough (small command) */
+#define MDACMD_SCSILCPT 0x3 /* SCSI passthrough (large command) */
+#define MDACMD_SCSI 0x4 /* SCSI command for logical/phyiscal device (small command) */
+#define MDACMD_SCSILC 0x5 /* SCSI command for logical/phyiscal device (large command) */
+#define MDACMD_IOCTL 0x20 /* Management command */
+#define MDACMD_IOCTLCHECK 0x23 /* Validate management command (not implemented) */
+
+/*
+ * 16.0 IOCTL command
+ *
+ * We use the IOCTL names as given by Mylex
+ * Note that only ioctls supported by the PCI controller family are listed
+ */
+#define MDACIOCTL_GETCONTROLLERINFO 0x1
+#define MDACIOCTL_GETLOGDEVINFOVALID 0x3
+#define MDACIOCTL_GETPHYSDEVINFOVALID 0x5
+#define MDACIOCTL_GETCONTROLLERSTATISTICS 0xb
+#define MDACIOCTL_GETLOGDEVSTATISTICS 0xd
+#define MDACIOCTL_GETPHYSDEVSTATISTICS 0xf
+#define MDACIOCTL_GETHEALTHSTATUS 0x11
+#define MDACIOCTL_GETEVENT 0x15
+/* flash update */
+#define MDACIOCTL_STOREIMAGE 0x2c
+#define MDACIOCTL_READIMAGE 0x2d
+#define MDACIOCTL_FLASHIMAGES 0x2e
+/* battery backup unit */
+#define MDACIOCTL_GET_SUBSYSTEM_DATA 0x70
+#define MDACIOCTL_SET_SUBSYSTEM_DATA 0x71
+/* non-data commands */
+#define MDACIOCTL_STARTDISOCVERY 0x81
+#define MDACIOCTL_SETRAIDDEVSTATE 0x82
+#define MDACIOCTL_INITPHYSDEVSTART 0x84
+#define MDACIOCTL_INITPHYSDEVSTOP 0x85
+#define MDACIOCTL_INITRAIDDEVSTART 0x86
+#define MDACIOCTL_INITRAIDDEVSTOP 0x87
+#define MDACIOCTL_REBUILDRAIDDEVSTART 0x88
+#define MDACIOCTL_REBUILDRAIDDEVSTOP 0x89
+#define MDACIOCTL_MAKECONSISTENTDATASTART 0x8a
+#define MDACIOCTL_MAKECONSISTENTDATASTOP 0x8b
+#define MDACIOCTL_CONSISTENCYCHECKSTART 0x8c
+#define MDACIOCTL_CONSISTENCYCHECKSTOP 0x8d
+#define MDACIOCTL_SETMEMORYMAILBOX 0x8e
+#define MDACIOCTL_RESETDEVICE 0x90
+#define MDACIOCTL_FLUSHDEVICEDATA 0x91
+#define MDACIOCTL_PAUSEDEVICE 0x92
+#define MDACIOCTL_UNPAUSEDEVICE 0x93
+#define MDACIOCTL_LOCATEDEVICE 0x94
+#define MDACIOCTL_SETMASTERSLAVEMODE 0x95
+#define MDACIOCTL_SETREALTIMECLOCK 0xac
+/* RAID configuration */
+#define MDACIOCTL_CREATENEWCONF 0xc0
+#define MDACIOCTL_DELETERAIDDEV 0xc1
+#define MDACIOCTL_REPLACEINTERNALDEV 0xc2
+#define MDACIOCTL_RENAMERAIDDEV 0xc3
+#define MDACIOCTL_ADDNEWCONF 0xc4
+#define MDACIOCTL_XLATEPHYSDEVTORAIDDEV 0xc5
+#define MDACIOCTL_MORE 0xc6
+#define MDACIOCTL_SETPHYSDEVPARAMETER 0xc8
+#define MDACIOCTL_GETPHYSDEVPARAMETER 0xc9
+#define MDACIOCTL_CLEARCONF 0xca
+#define MDACIOCTL_GETDEVCONFINFO 0xcb
+#define MDACIOCTL_GETGROUPCONFINFO 0xcc
+#define MDACIOCTL_GETFREESPACELIST 0xcd
+#define MDACIOCTL_GETLOGDEVPARAMETER 0xce
+#define MDACIOCTL_SETLOGDEVPARAMETER 0xcf
+#define MDACIOCTL_GETCONTROLLERPARAMETER 0xd0
+#define MDACIOCTL_SETCONTRLLERPARAMETER 0xd1
+#define MDACIOCTL_CLEARCONFSUSPMODE 0xd2
+#define MDACIOCTL_GETBDT_FOR_SYSDRIVE 0xe0
+
+/*
+ * 17.1.4 Data Transfer Memory Address Without SG List
+ */
+struct mly_short_transfer {
+ struct mly_sg_entry sg[2];
+} __attribute__ ((packed));
+
+/*
+ * 17.1.5 Data Transfer Memory Address With SG List
+ *
+ * Note that only the first s/g table is currently used.
+ */
+struct mly_sg_transfer {
+ u_int16_t entries[3];
+ u_int16_t res1;
+ u_int64_t table_physaddr[3];
+} __attribute__ ((packed));
+
+/*
+ * 17.1.3 Data Transfer Memory Address Format
+ */
+union mly_command_transfer {
+ struct mly_short_transfer direct;
+ struct mly_sg_transfer indirect;
+};
+
+/*
+ * 21.1 MDACIOCTL_SETREALTIMECLOCK
+ * 21.7 MDACIOCTL_GETHEALTHSTATUS
+ * 21.8 MDACIOCTL_GETCONTROLLERINFO
+ * 21.9 MDACIOCTL_GETLOGDEVINFOVALID
+ * 21.10 MDACIOCTL_GETPHYSDEVINFOVALID
+ * 21.11 MDACIOCTL_GETPHYSDEVSTATISTICS
+ * 21.12 MDACIOCTL_GETLOGDEVSTATISTICS
+ * 21.13 MDACIOCTL_GETCONTROLLERSTATISTICS
+ * 21.27 MDACIOCTL_GETBDT_FOR_SYSDRIVE
+ * 23.4 MDACIOCTL_CREATENEWCONF
+ * 23.5 MDACIOCTL_ADDNEWCONF
+ * 23.8 MDACIOCTL_GETDEVCONFINFO
+ * 23.9 MDACIOCTL_GETFREESPACELIST
+ * 24.1 MDACIOCTL_MORE
+ * 25.1 MDACIOCTL_GETPHYSDEVPARAMETER
+ * 25.2 MDACIOCTL_SETPHYSDEVPARAMETER
+ * 25.3 MDACIOCTL_GETLOGDEVPARAMETER
+ * 25.4 MDACIOCTL_SETLOGDEVPARAMETER
+ * 25.5 MDACIOCTL_GETCONTROLLERPARAMETER
+ * 25.6 MDACIOCTL_SETCONTROLLERPARAMETER
+ *
+ * These commands just transfer data
+ */
+struct mly_ioctl_param_data {
+ u_int8_t param[10];
+ union mly_command_transfer transfer;
+} __attribute__ ((packed));
+
+/*
+ * 21.2 MDACIOCTL_SETMEMORYMAILBOX
+ */
+struct mly_ioctl_param_setmemorymailbox {
+ u_int8_t health_buffer_size;
+ u_int8_t res1;
+ u_int64_t health_buffer_physaddr;
+ u_int64_t command_mailbox_physaddr;
+ u_int64_t status_mailbox_physaddr;
+ u_int64_t res2[2];
+} __attribute__ ((packed));
+
+/*
+ * 21.8.2 MDACIOCTL_GETCONTROLLERINFO: Data Format
+ */
+struct mly_ioctl_getcontrollerinfo {
+ u_int8_t res1; /* N/A */
+ u_int8_t interface_type;
+ u_int8_t controller_type;
+ u_int8_t res2; /* N/A */
+ u_int16_t interface_speed;
+ u_int8_t interface_width;
+ u_int8_t res3[9]; /* N/A */
+ char interface_name[16];
+ char controller_name[16];
+ u_int8_t res4[16]; /* N/A */
+ /* firmware release information */
+ u_int8_t fw_major;
+ u_int8_t fw_minor;
+ u_int8_t fw_turn;
+ u_int8_t fw_build;
+ u_int8_t fw_day;
+ u_int8_t fw_month;
+ u_int8_t fw_century;
+ u_int8_t fw_year;
+ /* hardware release information */
+ u_int8_t hw_revision; /* N/A */
+ u_int8_t res5[3]; /* N/A */
+ u_int8_t hw_release_day; /* N/A */
+ u_int8_t hw_release_month; /* N/A */
+ u_int8_t hw_release_century; /* N/A */
+ u_int8_t hw_release_year; /* N/A */
+ /* hardware manufacturing information */
+ u_int8_t batch_number; /* N/A */
+ u_int8_t res6; /* N/A */
+ u_int8_t plant_number;
+ u_int8_t res7;
+ u_int8_t hw_manuf_day;
+ u_int8_t hw_manuf_month;
+ u_int8_t hw_manuf_century;
+ u_int8_t hw_manuf_year;
+ u_int8_t max_pdd_per_xldd;
+ u_int8_t max_ildd_per_xldd;
+ u_int16_t nvram_size;
+ u_int8_t max_number_of_xld; /* N/A */
+ u_int8_t res8[3]; /* N/A */
+ /* unique information per controller */
+ char serial_number[16];
+ u_int8_t res9[16]; /* N/A */
+ /* vendor information */
+ u_int8_t res10[3]; /* N/A */
+ u_int8_t oem_information;
+ char vendor_name[16]; /* N/A */
+ /* other physical/controller/operation information */
+ u_int8_t bbu_present:1;
+ u_int8_t active_clustering:1;
+ u_int8_t res11:6; /* N/A */
+ u_int8_t res12[3]; /* N/A */
+ /* physical device scan information */
+ u_int8_t physical_scan_active:1;
+ u_int8_t res13:7; /* N/A */
+ u_int8_t physical_scan_channel;
+ u_int8_t physical_scan_target;
+ u_int8_t physical_scan_lun;
+ /* maximum command data transfer size */
+ u_int16_t maximum_block_count;
+ u_int16_t maximum_sg_entries;
+ /* logical/physical device counts */
+ u_int16_t logical_devices_present;
+ u_int16_t logical_devices_critical;
+ u_int16_t logical_devices_offline;
+ u_int16_t physical_devices_present;
+ u_int16_t physical_disks_present;
+ u_int16_t physical_disks_critical; /* N/A */
+ u_int16_t physical_disks_offline;
+ u_int16_t maximum_parallel_commands;
+ /* channel and target ID information */
+ u_int8_t physical_channels_present;
+ u_int8_t virtual_channels_present;
+ u_int8_t physical_channels_possible;
+ u_int8_t virtual_channels_possible;
+ u_int8_t maximum_targets_possible[16]; /* N/A (6 and up) */
+ u_int8_t res14[12]; /* N/A */
+ /* memory/cache information */
+ u_int16_t memory_size;
+ u_int16_t cache_size;
+ u_int32_t valid_cache_size; /* N/A */
+ u_int32_t dirty_cache_size; /* N/A */
+ u_int16_t memory_speed;
+ u_int8_t memory_width;
+ u_int8_t memory_type:5;
+ u_int8_t res15:1; /* N/A */
+ u_int8_t memory_parity:1;
+ u_int8_t memory_ecc:1;
+ char memory_information[16]; /* N/A */
+ /* execution memory information */
+ u_int16_t exmemory_size;
+ u_int16_t l2cache_size; /* N/A */
+ u_int8_t res16[8]; /* N/A */
+ u_int16_t exmemory_speed;
+ u_int8_t exmemory_width;
+ u_int8_t exmemory_type:5;
+ u_int8_t res17:1; /* N/A */
+ u_int8_t exmemory_parity:1;
+ u_int8_t exmemory_ecc:1;
+ char exmemory_name[16]; /* N/A */
+ /* CPU information */
+ struct {
+ u_int16_t speed;
+ u_int8_t type;
+ u_int8_t number;
+ u_int8_t res1[12]; /* N/A */
+ char name[16]; /* N/A */
+ } cpu[2] __attribute__ ((packed));
+ /* debugging/profiling/command time tracing information */
+ u_int16_t profiling_page; /* N/A */
+ u_int16_t profiling_programs; /* N/A */
+ u_int16_t time_trace_page; /* N/A */
+ u_int16_t time_trace_programs; /* N/A */
+ u_int8_t res18[8]; /* N/A */
+ /* error counters on physical devices */
+ u_int16_t physical_device_bus_resets; /* N/A */
+ u_int16_t physical_device_parity_errors; /* N/A */
+ u_int16_t physical_device_soft_errors; /* N/A */
+ u_int16_t physical_device_commands_failed; /* N/A */
+ u_int16_t physical_device_miscellaneous_errors; /* N/A */
+ u_int16_t physical_device_command_timeouts; /* N/A */
+ u_int16_t physical_device_selection_timeouts; /* N/A */
+ u_int16_t physical_device_retries; /* N/A */
+ u_int16_t physical_device_aborts; /* N/A */
+ u_int16_t physical_device_host_command_aborts; /* N/A */
+ u_int16_t physical_device_PFAs_detected; /* N/A */
+ u_int16_t physical_device_host_commands_failed; /* N/A */
+ u_int8_t res19[8]; /* N/A */
+ /* error counters on logical devices */
+ u_int16_t logical_device_soft_errors; /* N/A */
+ u_int16_t logical_device_commands_failed; /* N/A */
+ u_int16_t logical_device_host_command_aborts; /* N/A */
+ u_int16_t res20; /* N/A */
+ /* error counters on controller */
+ u_int16_t controller_parity_ecc_errors;
+ u_int16_t controller_host_command_aborts; /* N/A */
+ u_int8_t res21[4]; /* N/A */
+ /* long duration activity information */
+ u_int16_t background_inits_active;
+ u_int16_t logical_inits_active;
+ u_int16_t physical_inits_active;
+ u_int16_t consistency_checks_active;
+ u_int16_t rebuilds_active;
+ u_int16_t MORE_active;
+ u_int16_t patrol_active; /* N/A */
+ u_int8_t long_operation_status; /* N/A */
+ u_int8_t res22; /* N/A */
+ /* flash ROM information */
+ u_int8_t flash_type; /* N/A */
+ u_int8_t res23; /* N/A */
+ u_int16_t flash_size;
+ u_int32_t flash_maximum_age;
+ u_int32_t flash_age;
+ u_int8_t res24[4]; /* N/A */
+ char flash_name[16]; /* N/A */
+ /* firmware runtime information */
+ u_int8_t rebuild_rate;
+ u_int8_t background_init_rate;
+ u_int8_t init_rate;
+ u_int8_t consistency_check_rate;
+ u_int8_t res25[4]; /* N/A */
+ u_int32_t maximum_dp;
+ u_int32_t free_dp;
+ u_int32_t maximum_iop;
+ u_int32_t free_iop;
+ u_int16_t maximum_comb_length;
+ u_int16_t maximum_configuration_groups;
+ u_int8_t installation_abort:1;
+ u_int8_t maintenance:1;
+ u_int8_t res26:6; /* N/A */
+ u_int8_t res27[3]; /* N/A */
+ u_int8_t res28[32 + 512]; /* N/A */
+} __attribute__ ((packed));
+
+/*
+ * 21.9.2 MDACIOCTL_GETLOGDEVINFOVALID
+ */
+struct mly_ioctl_getlogdevinfovalid {
+ u_int8_t res1; /* N/A */
+ u_int8_t channel;
+ u_int8_t target;
+ u_int8_t lun;
+ u_int8_t state; /* see 8.1 */
+ u_int8_t raid_level; /* see 8.2 */
+ u_int8_t stripe_size; /* see 8.3 */
+ u_int8_t cache_line_size; /* see 8.4 */
+ u_int8_t read_write_control; /* see 8.5 */
+ u_int8_t consistency_check:1;
+ u_int8_t rebuild:1;
+ u_int8_t make_consistent:1;
+ u_int8_t initialisation:1;
+ u_int8_t migration:1;
+ u_int8_t patrol:1;
+ u_int8_t res2:2; /* N/A */
+ u_int8_t ar5_limit;
+ u_int8_t ar5_algo;
+ u_int16_t logical_device_number;
+ u_int16_t bios_control;
+ /* erorr counters */
+ u_int16_t soft_errors; /* N/A */
+ u_int16_t commands_failed; /* N/A */
+ u_int16_t host_command_aborts; /* N/A */
+ u_int16_t deferred_write_errors; /* N/A */
+ u_int8_t res3[8]; /* N/A */
+ /* device size information */
+ u_int8_t res4[2]; /* N/A */
+ u_int16_t device_block_size;
+ u_int32_t original_device_size; /* N/A */
+ u_int32_t device_size; /* XXX "blocks or MB" Huh? */
+ u_int8_t res5[4]; /* N/A */
+ char device_name[32]; /* N/A */
+ u_int8_t inquiry[36];
+ u_int8_t res6[12]; /* N/A */
+ u_int64_t last_read_block; /* N/A */
+ u_int64_t last_written_block; /* N/A */
+ u_int64_t consistency_check_block;
+ u_int64_t rebuild_block;
+ u_int64_t make_consistent_block;
+ u_int64_t initialisation_block;
+ u_int64_t migration_block;
+ u_int64_t patrol_block; /* N/A */
+ u_int8_t res7[64]; /* N/A */
+} __attribute__ ((packed));
+
+/*
+ * 21.10.2 MDACIOCTL_GETPHYSDEVINFOVALID: Data Format
+ */
+struct mly_ioctl_getphysdevinfovalid {
+ u_int8_t res1;
+ u_int8_t channel;
+ u_int8_t target;
+ u_int8_t lun;
+ u_int8_t raid_ft:1; /* configuration status */
+ u_int8_t res2:1; /* N/A */
+ u_int8_t local:1;
+ u_int8_t res3:5;
+ u_int8_t host_dead:1; /* multiple host/controller status *//* N/A */
+ u_int8_t host_connection_dead:1; /* N/A */
+ u_int8_t res4:6; /* N/A */
+ u_int8_t state; /* see 8.1 */
+ u_int8_t width;
+ u_int16_t speed;
+ /* multiported physical device information */
+ u_int8_t ports_available; /* N/A */
+ u_int8_t ports_inuse; /* N/A */
+ u_int8_t res5[4];
+ u_int8_t ether_address[16]; /* N/A */
+ u_int16_t command_tags;
+ u_int8_t consistency_check:1; /* N/A */
+ u_int8_t rebuild:1; /* N/A */
+ u_int8_t make_consistent:1; /* N/A */
+ u_int8_t initialisation:1;
+ u_int8_t migration:1; /* N/A */
+ u_int8_t patrol:1; /* N/A */
+ u_int8_t res6:2;
+ u_int8_t long_operation_status; /* N/A */
+ u_int8_t parity_errors;
+ u_int8_t soft_errors;
+ u_int8_t hard_errors;
+ u_int8_t miscellaneous_errors;
+ u_int8_t command_timeouts; /* N/A */
+ u_int8_t retries; /* N/A */
+ u_int8_t aborts; /* N/A */
+ u_int8_t PFAs_detected; /* N/A */
+ u_int8_t res7[6];
+ u_int16_t block_size;
+ u_int32_t original_device_size; /* XXX "blocks or MB" Huh? */
+ u_int32_t device_size; /* XXX "blocks or MB" Huh? */
+ u_int8_t res8[4];
+ char name[16]; /* N/A */
+ u_int8_t res9[16 + 32];
+ u_int8_t inquiry[36];
+ u_int8_t res10[12 + 16];
+ u_int64_t last_read_block; /* N/A */
+ u_int64_t last_written_block; /* N/A */
+ u_int64_t consistency_check_block; /* N/A */
+ u_int64_t rebuild_block; /* N/A */
+ u_int64_t make_consistent_block; /* N/A */
+ u_int64_t initialisation_block; /* N/A */
+ u_int64_t migration_block; /* N/A */
+ u_int64_t patrol_block; /* N/A */
+ u_int8_t res11[256];
+} __attribute__ ((packed));
+
+union mly_devinfo {
+ struct mly_ioctl_getlogdevinfovalid logdev;
+ struct mly_ioctl_getphysdevinfovalid physdev;
+};
+
+/*
+ * 21.11.2 MDACIOCTL_GETPHYSDEVSTATISTICS: Data Format
+ * 21.12.2 MDACIOCTL_GETLOGDEVSTATISTICS: Data Format
+ */
+struct mly_ioctl_getdevstatistics {
+ u_int32_t uptime_ms; /* getphysedevstatistics only */
+ u_int8_t res1[5]; /* N/A */
+ u_int8_t channel;
+ u_int8_t target;
+ u_int8_t lun;
+ u_int16_t raid_device; /* getlogdevstatistics only */
+ u_int8_t res2[2]; /* N/A */
+ /* total read/write performance including cache data */
+ u_int32_t total_reads;
+ u_int32_t total_writes;
+ u_int32_t total_read_size;
+ u_int32_t total_write_size;
+ /* cache read/write performance */
+ u_int32_t cache_reads; /* N/A */
+ u_int32_t cache_writes; /* N/A */
+ u_int32_t cache_read_size; /* N/A */
+ u_int32_t cache_write_size; /* N/A */
+ /* commands active/wait information */
+ u_int32_t command_waits_done; /* N/A */
+ u_int16_t active_commands; /* N/A */
+ u_int16_t waiting_commands; /* N/A */
+ u_int8_t res3[8]; /* N/A */
+} __attribute__ ((packed));
+
+/*
+ * 21.13.2 MDACIOCTL_GETCONTROLLERSTATISTICS: Data Format
+ */
+struct mly_ioctl_getcontrollerstatistics {
+ u_int32_t uptime_ms; /* N/A */
+ u_int8_t res1[12]; /* N/A */
+ /* target physical device performance data information */
+ u_int32_t target_physical_device_interrupts; /* N/A */
+ u_int32_t target_physical_device_stray_interrupts; /* N/A */
+ u_int8_t res2[8]; /* N/A */
+ u_int32_t target_physical_device_reads; /* N/A */
+ u_int32_t target_physical_device_writes; /* N/A */
+ u_int32_t target_physical_device_read_size; /* N/A */
+ u_int32_t target_physical_device_write_size; /* N/A */
+ /* host system performance data information */
+ u_int32_t host_system_interrupts; /* N/A */
+ u_int32_t host_system_stray_interrupts; /* N/A */
+ u_int32_t host_system_sent_interrupts; /* N/A */
+ u_int8_t res3[4]; /* N/A */
+ u_int32_t physical_device_reads; /* N/A */
+ u_int32_t physical_device_writes; /* N/A */
+ u_int32_t physical_device_read_size; /* N/A */
+ u_int32_t physical_device_write_size; /* N/A */
+ u_int32_t physical_device_cache_reads; /* N/A */
+ u_int32_t physical_device_cache_writes; /* N/A */
+ u_int32_t physical_device_cache_read_size; /* N/A */
+ u_int32_t physical_device_cache_write_size; /* N/A */
+ u_int32_t logical_device_reads; /* N/A */
+ u_int32_t logical_device_writes; /* N/A */
+ u_int32_t logical_device_read_size; /* N/A */
+ u_int32_t logical_device_write_size; /* N/A */
+ u_int32_t logical_device_cache_reads; /* N/A */
+ u_int32_t logical_device_cache_writes; /* N/A */
+ u_int32_t logical_device_cache_read_size; /* N/A */
+ u_int32_t logical_device_cache_write_size; /* N/A */
+ u_int16_t target_physical_device_commands_active; /* N/A */
+ u_int16_t target_physical_device_commands_waiting; /* N/A */
+ u_int16_t host_system_commands_active; /* N/A */
+ u_int16_t host_system_commands_waiting; /* N/A */
+ u_int8_t res4[48 + 64]; /* N/A */
+} __attribute__ ((packed));
+
+/*
+ * 21.2 MDACIOCTL_SETRAIDDEVSTATE
+ */
+struct mly_ioctl_param_setraiddevstate {
+ u_int8_t state;
+} __attribute__ ((packed));
+
+/*
+ * 21.27.2 MDACIOCTL_GETBDT_FOR_SYSDRIVE: Data Format
+ */
+#define MLY_MAX_BDT_ENTRIES 1022
+struct mly_ioctl_getbdt_for_sysdrive {
+ u_int32_t num_of_bdt_entries;
+ u_int32_t bad_data_block_address[MLY_MAX_BDT_ENTRIES];
+} __attribute__ ((packed));
+
+/*
+ * 22.1 Physical Device Definition (PDD)
+ */
+struct mly_pdd {
+ u_int8_t type; /* see 8.2 */
+ u_int8_t state; /* see 8.1 */
+ u_int16_t raid_device;
+ u_int32_t device_size; /* XXX "block or MB" Huh? */
+ u_int8_t controller;
+ u_int8_t channel;
+ u_int8_t target;
+ u_int8_t lun;
+ u_int32_t start_address;
+} __attribute__ ((packed));
+
+/*
+ * 22.2 RAID Device Use Definition (UDD)
+ */
+struct mly_udd {
+ u_int8_t res1;
+ u_int8_t state; /* see 8.1 */
+ u_int16_t raid_device;
+ u_int32_t start_address;
+} __attribute__ ((packed));
+
+/*
+ * RAID Device Definition (LDD)
+ */
+struct mly_ldd {
+ u_int8_t type; /* see 8.2 */
+ u_int8_t state; /* see 8.1 */
+ u_int16_t raid_device;
+ u_int32_t device_size; /* XXX "block or MB" Huh? */
+ u_int8_t devices_used_count;
+ u_int8_t stripe_size; /* see 8.3 */
+ u_int8_t cache_line_size; /* see 8.4 */
+ u_int8_t read_write_control; /* see 8.5 */
+ u_int32_t devices_used_size; /* XXX "block or MB" Huh? */
+ u_int16_t devices_used[32]; /* XXX actual size of this field unknown! */
+} __attribute__ ((packed));
+
+/*
+ * Define a datastructure giving the smallest allocation that will hold
+ * a PDD, UDD or LDD for MDACIOCTL_GETDEVCONFINFO.
+ */
+struct mly_devconf_hdr {
+ u_int8_t type; /* see 8.2 */
+ u_int8_t state; /* see 8.1 */
+ u_int16_t raid_device;
+};
+
+union mly_ioctl_devconfinfo {
+ struct mly_pdd pdd;
+ struct mly_udd udd;
+ struct mly_ldd ldd;
+ struct mly_devconf_hdr hdr;
+};
+
+/*
+ * 22.3 MDACIOCTL_RENAMERAIDDEV
+ *
+ * XXX this command is listed as transferring data, but does not define the data.
+ */
+struct mly_ioctl_param_renameraiddev {
+ u_int8_t new_raid_device;
+} __attribute__ ((packed));
+
+/*
+ * 23.6.2 MDACIOCTL_XLATEPHYSDEVTORAIDDEV
+ *
+ * XXX documentation suggests this format will change
+ */
+struct mly_ioctl_param_xlatephysdevtoraiddev {
+ u_int16_t raid_device;
+ u_int8_t res1[2];
+ u_int8_t controller;
+ u_int8_t channel;
+ u_int8_t target;
+ u_int8_t lun;
+} __attribute__ ((packed));
+
+/*
+ * 23.7 MDACIOCTL_GETGROUPCONFINFO
+ */
+struct mly_ioctl_param_getgroupconfinfo {
+ u_int16_t group;
+ u_int8_t res1[8];
+ union mly_command_transfer transfer;
+} __attribute__ ((packed));
+
+/*
+ * 23.9.2 MDACIOCTL_GETFREESPACELIST: Data Format
+ *
+ * The controller will populate as much of this structure as is provided,
+ * or as is required to fully list the free space available.
+ */
+struct mly_ioctl_getfreespacelist_entry {
+ u_int16_t raid_device;
+ u_int8_t res1[6];
+ u_int32_t address; /* XXX "blocks or MB" Huh? */
+ u_int32_t size; /* XXX "blocks or MB" Huh? */
+} __attribute__ ((packed));
+
+struct mly_ioctl_getfrespacelist {
+ u_int16_t returned_entries;
+ u_int16_t total_entries;
+ u_int8_t res1[12];
+ struct mly_ioctl_getfreespacelist_entry space[0]; /* expand to suit */
+} __attribute__ ((packed));
+
+/*
+ * 27.1 MDACIOCTL_GETSUBSYSTEMDATA
+ * 27.2 MDACIOCTL_SETSUBSYSTEMDATA
+ *
+ * PCI controller only supports a limited subset of the possible operations.
+ *
+ * XXX where does the status end up? (the command transfers no data)
+ */
+struct mly_ioctl_param_subsystemdata {
+ u_int8_t operation:4;
+#define MLY_BBU_GETSTATUS 0x00
+#define MLY_BBU_SET_THRESHOLD 0x00 /* minutes in param[0,1] */
+ u_int8_t subsystem:4;
+#define MLY_SUBSYSTEM_BBU 0x01
+ u_int parameter[3]; /* only for SETSUBSYSTEMDATA */
+} __attribute__ ((packed));
+
+struct mly_ioctl_getsubsystemdata_bbustatus {
+ u_int16_t current_power;
+ u_int16_t maximum_power;
+ u_int16_t power_threshold;
+ u_int8_t charge_level;
+ u_int8_t hardware_version;
+ u_int8_t battery_type;
+#define MLY_BBU_TYPE_UNKNOWN 0x00
+#define MLY_BBU_TYPE_NICAD 0x01
+#define MLY_BBU_TYPE_MISSING 0xfe
+ u_int8_t res1;
+ u_int8_t operation_status;
+#define MLY_BBU_STATUS_NO_SYNC 0x01
+#define MLY_BBU_STATUS_OUT_OF_SYNC 0x02
+#define MLY_BBU_STATUS_FIRST_WARNING 0x04
+#define MLY_BBU_STATUS_SECOND_WARNING 0x08
+#define MLY_BBU_STATUS_RECONDITIONING 0x10
+#define MLY_BBU_STATUS_DISCHARGING 0x20
+#define MLY_BBU_STATUS_FASTCHARGING 0x40
+ u_int8_t res2;
+} __attribute__ ((packed));
+
+/*
+ * 28.9 MDACIOCTL_RESETDEVICE
+ * 28.10 MDACIOCTL_FLUSHDEVICEDATA
+ * 28.11 MDACIOCTL_PAUSEDEVICE
+ * 28.12 MDACIOCTL_UNPAUSEDEVICE
+ */
+struct mly_ioctl_param_deviceoperation {
+ u_int8_t operation_device; /* see 14.3 */
+} __attribute__ ((packed));
+
+/*
+ * 31.1 Event Data Format
+ */
+struct mly_event {
+ u_int32_t sequence_number;
+ u_int32_t timestamp;
+ u_int32_t code;
+ u_int8_t controller;
+ u_int8_t channel;
+ u_int8_t target; /* also enclosure */
+ u_int8_t lun; /* also enclosure unit */
+ u_int8_t res1[4];
+ u_int32_t param;
+ u_int8_t sense[40];
+} __attribute__ ((packed));
+
+/*
+ * 31.2 MDACIOCTL_GETEVENT
+ */
+struct mly_ioctl_param_getevent {
+ u_int16_t sequence_number_low;
+ u_int8_t res1[8];
+ union mly_command_transfer transfer;
+} __attribute__ ((packed));
+
+union mly_ioctl_param {
+ struct mly_ioctl_param_data data;
+ struct mly_ioctl_param_setmemorymailbox setmemorymailbox;
+ struct mly_ioctl_param_setraiddevstate setraiddevstate;
+ struct mly_ioctl_param_renameraiddev renameraiddev;
+ struct mly_ioctl_param_xlatephysdevtoraiddev xlatephysdevtoraiddev;
+ struct mly_ioctl_param_getgroupconfinfo getgroupconfinfo;
+ struct mly_ioctl_param_subsystemdata subsystemdata;
+ struct mly_ioctl_param_deviceoperation deviceoperation;
+ struct mly_ioctl_param_getevent getevent;
+};
+
+/*
+ * 19 SCSI Command Format
+ */
+struct mly_command_address_physical {
+ u_int8_t lun;
+ u_int8_t target;
+ u_int8_t channel:3;
+ u_int8_t controller:5;
+} __attribute__ ((packed));
+
+struct mly_command_address_logical {
+ u_int16_t logdev;
+ u_int8_t res1:3;
+ u_int8_t controller:5;
+} __attribute__ ((packed));
+
+union mly_command_address {
+ struct mly_command_address_physical phys;
+ struct mly_command_address_logical log;
+};
+
+struct mly_command_generic {
+ u_int16_t command_id;
+ u_int8_t opcode;
+ struct mly_command_control command_control;
+ u_int32_t data_size;
+ u_int64_t sense_buffer_address;
+ union mly_command_address addr;
+ struct mly_timeout timeout;
+ u_int8_t maximum_sense_size;
+ u_int8_t res1[11];
+ union mly_command_transfer transfer;
+} __attribute__ ((packed));
+
+
+/*
+ * 19.1 MDACMD_SCSI & MDACMD_SCSIPT
+ */
+#define MLY_CMD_SCSI_SMALL_CDB 10
+struct mly_command_scsi_small {
+ u_int16_t command_id;
+ u_int8_t opcode;
+ struct mly_command_control command_control;
+ u_int32_t data_size;
+ u_int64_t sense_buffer_address;
+ union mly_command_address addr;
+ struct mly_timeout timeout;
+ u_int8_t maximum_sense_size;
+ u_int8_t cdb_length;
+ u_int8_t cdb[MLY_CMD_SCSI_SMALL_CDB];
+ union mly_command_transfer transfer;
+} __attribute__ ((packed));
+
+/*
+ * 19.2 MDACMD_SCSILC & MDACMD_SCSILCPT
+ */
+struct mly_command_scsi_large {
+ u_int16_t command_id;
+ u_int8_t opcode;
+ struct mly_command_control command_control;
+ u_int32_t data_size;
+ u_int64_t sense_buffer_address;
+ union mly_command_address addr;
+ struct mly_timeout timeout;
+ u_int8_t maximum_sense_size;
+ u_int8_t cdb_length;
+ u_int16_t res1;
+ u_int64_t cdb_physaddr;
+ union mly_command_transfer transfer;
+} __attribute__ ((packed));
+
+/*
+ * 20.1 IOCTL Command Format: Internal Bus
+ */
+struct mly_command_ioctl {
+ u_int16_t command_id;
+ u_int8_t opcode;
+ struct mly_command_control command_control;
+ u_int32_t data_size;
+ u_int64_t sense_buffer_address;
+ union mly_command_address addr;
+ struct mly_timeout timeout;
+ u_int8_t maximum_sense_size;
+ u_int8_t sub_ioctl;
+ union mly_ioctl_param param;
+} __attribute__ ((packed));
+
+/*
+ * PG6: 8.2.2
+ */
+struct mly_command_mmbox {
+ u_int32_t flag;
+ u_int8_t data[60];
+} __attribute__ ((packed));
+
+union mly_command_packet {
+ struct mly_command_generic generic;
+ struct mly_command_scsi_small scsi_small;
+ struct mly_command_scsi_large scsi_large;
+ struct mly_command_ioctl ioctl;
+ struct mly_command_mmbox mmbox;
+};
+
+/*
+ * PG6: 5.3
+ */
+#define MLY_I960RX_COMMAND_MAILBOX 0x10
+#define MLY_I960RX_STATUS_MAILBOX 0x18
+#define MLY_I960RX_IDBR 0x20
+#define MLY_I960RX_ODBR 0x2c
+#define MLY_I960RX_ERROR_STATUS 0x2e
+#define MLY_I960RX_INTERRUPT_STATUS 0x30
+#define MLY_I960RX_INTERRUPT_MASK 0x34
+
+#define MLY_STRONGARM_COMMAND_MAILBOX 0x50
+#define MLY_STRONGARM_STATUS_MAILBOX 0x58
+#define MLY_STRONGARM_IDBR 0x60
+#define MLY_STRONGARM_ODBR 0x61
+#define MLY_STRONGARM_ERROR_STATUS 0x63
+#define MLY_STRONGARM_INTERRUPT_STATUS 0x30
+#define MLY_STRONGARM_INTERRUPT_MASK 0x34
+
+/*
+ * PG6: 5.4.3 Doorbell 0
+ */
+#define MLY_HM_CMDSENT (1<<0)
+#define MLY_HM_STSACK (1<<1)
+#define MLY_SOFT_RST (1<<3)
+#define MLY_AM_CMDSENT (1<<4)
+
+/*
+ * PG6: 5.4.4 Doorbell 1
+ *
+ * Note that the documentation claims that these bits are set when the
+ * status queue(s) are empty, wheras the Linux driver and experience
+ * suggest they are set when there is status available.
+ */
+#define MLY_HM_STSREADY (1<<0)
+#define MLY_AM_STSREADY (1<<1)
+
+/*
+ * PG6: 5.4.6 Doorbell 3
+ */
+#define MLY_MSG_EMPTY (1<<3)
+#define MLY_MSG_SPINUP 0x08
+#define MLY_MSG_RACE_RECOVERY_FAIL 0x60
+#define MLY_MSG_RACE_IN_PROGRESS 0x70
+#define MLY_MSG_RACE_ON_CRITICAL 0xb0
+#define MLY_MSG_PARITY_ERROR 0xf0
+
+/*
+ * PG6: 5.4.8 Outbound Interrupt Mask
+ */
+#define MLY_INTERRUPT_MASK_DISABLE 0xff
+#define MLY_INTERRUPT_MASK_ENABLE (0xff & ~(1<<2))
+
+/*
+ * PG6: 8.2 Advanced Mailbox Scheme
+ *
+ * Note that this must be allocated on a 4k boundary, and all internal
+ * fields must also reside on a 4k boundary.
+ * We could dynamically size this structure, but the extra effort
+ * is probably unjustified. Note that these buffers do not need to be
+ * adjacent - we just group them to simplify allocation of the bus-visible
+ * buffer.
+ *
+ * XXX Note that for some reason, if MLY_MMBOX_COMMANDS is > 64, the controller
+ * fails to respond to the command at (MLY_MMBOX_COMMANDS - 64). It's not
+ * wrapping to 0 at this point (determined by experimentation). This is not
+ * consistent with the Linux driver's implementation.
+ * Whilst it's handy to have lots of room for status returns in case we end up
+ * being slow getting back to completed commands, it seems unlikely that we
+ * would get 64 commands ahead of the controller on the submissions side, so
+ * the current workaround is to simply limit the command ring to 64 entries.
+ */
+union mly_status_packet {
+ struct mly_status status;
+ struct {
+ u_int32_t flag;
+ u_int8_t data[4];
+ } __attribute__ ((packed)) mmbox;
+};
+union mly_health_region {
+ struct mly_health_status status;
+ u_int8_t pad[1024];
+};
+
+#define MLY_MMBOX_COMMANDS 64
+#define MLY_MMBOX_STATUS 512
+struct mly_mmbox {
+ union mly_command_packet mmm_command[MLY_MMBOX_COMMANDS];
+ union mly_status_packet mmm_status[MLY_MMBOX_STATUS];
+ union mly_health_region mmm_health;
+} __attribute__ ((packed));
diff --git a/sys/dev/mly/mlyvar.h b/sys/dev/mly/mlyvar.h
new file mode 100644
index 0000000..00a9c9b
--- /dev/null
+++ b/sys/dev/mly/mlyvar.h
@@ -0,0 +1,423 @@
+/*-
+ * Copyright (c) 2000 Michael Smith
+ * Copyright (c) 2000 BSDi
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/********************************************************************************
+ ********************************************************************************
+ Driver Parameter Definitions
+ ********************************************************************************
+ ********************************************************************************/
+
+/*
+ * The firmware interface allows for a 16-bit command identifier. A lookup
+ * table this size (256k) would be too expensive, so we cap ourselves at a
+ * reasonable limit.
+ */
+#define MLY_MAXCOMMANDS 256 /* max outstanding commands per controller, limit 65535 */
+
+/*
+ * The firmware interface allows for a 16-bit s/g list length. We limit
+ * ourselves to a reasonable maximum and ensure alignment.
+ */
+#define MLY_MAXSGENTRIES 64 /* max S/G entries, limit 65535 */
+
+/********************************************************************************
+ ********************************************************************************
+ Driver Variable Definitions
+ ********************************************************************************
+ ********************************************************************************/
+
+#if __FreeBSD_version >= 500005
+# include <sys/taskqueue.h>
+#endif
+
+/*
+ * Debugging levels:
+ * 0 - quiet, only emit warnings
+ * 1 - noisy, emit major function points and things done
+ * 2 - extremely noisy, emit trace items in loops, etc.
+ */
+#ifdef MLY_DEBUG
+# define debug(level, fmt, args...) do { if (level <= MLY_DEBUG) printf("%s: " fmt "\n", __FUNCTION__ , ##args); } while(0)
+# define debug_called(level) do { if (level <= MLY_DEBUG) printf(__FUNCTION__ ": called\n"); } while(0)
+# define debug_struct(s) printf(" SIZE %s: %d\n", #s, sizeof(struct s))
+# define debug_union(s) printf(" SIZE %s: %d\n", #s, sizeof(union s))
+# define debug_field(s, f) printf(" OFFSET %s.%s: %d\n", #s, #f, ((int)&(((struct s *)0)->f)))
+extern void mly_printstate0(void);
+extern struct mly_softc *mly_softc0;
+#else
+# define debug(level, fmt, args...)
+# define debug_called(level)
+# define debug_struct(s)
+#endif
+
+#define mly_printf(sc, fmt, args...) device_printf(sc->mly_dev, fmt , ##args)
+
+/*
+ * Per-device structure, used to save persistent state on devices.
+ *
+ * Note that this isn't really Bus/Target/Lun since we don't support
+ * lun != 0 at this time.
+ */
+struct mly_btl {
+ int mb_flags;
+#define MLY_BTL_PHYSICAL (1<<0) /* physical device */
+#define MLY_BTL_LOGICAL (1<<1) /* logical device */
+#define MLY_BTL_PROTECTED (1<<2) /* device is protected - I/O not allowed */
+#define MLY_BTL_RESCAN (1<<3) /* device needs to be rescanned */
+ char mb_name[16]; /* peripheral attached to this device */
+ int mb_state; /* see 8.1 */
+ int mb_type; /* see 8.2 */
+};
+
+/*
+ * Per-command control structure.
+ */
+struct mly_command {
+ TAILQ_ENTRY(mly_command) mc_link; /* list linkage */
+
+ struct mly_softc *mc_sc; /* controller that owns us */
+ u_int16_t mc_slot; /* command slot we occupy */
+ int mc_flags;
+#define MLY_CMD_STATEMASK ((1<<8)-1)
+#define MLY_CMD_STATE(mc) ((mc)->mc_flags & MLY_CMD_STATEMASK)
+#define MLY_CMD_SETSTATE(mc, s) ((mc)->mc_flags = ((mc)->mc_flags &= ~MLY_CMD_STATEMASK) | (s))
+#define MLY_CMD_FREE 0 /* command is on the free list */
+#define MLY_CMD_SETUP 1 /* command is being built */
+#define MLY_CMD_BUSY 2 /* command is being run, or ready to run, or not completed */
+#define MLY_CMD_COMPLETE 3 /* command has been completed */
+#define MLY_CMD_SLOTTED (1<<8) /* command has a slot number */
+#define MLY_CMD_MAPPED (1<<9) /* command has had its data mapped */
+#define MLY_CMD_PRIORITY (1<<10) /* allow use of "priority" slots */
+#define MLY_CMD_DATAIN (1<<11) /* data moves controller->system */
+#define MLY_CMD_DATAOUT (1<<12) /* data moves system->controller */
+ u_int16_t mc_status; /* command completion status */
+ u_int8_t mc_sense; /* sense data length */
+ int32_t mc_resid; /* I/O residual count */
+
+ union mly_command_packet *mc_packet; /* our controller command */
+ u_int64_t mc_packetphys; /* physical address of the mapped packet */
+
+ void *mc_data; /* data buffer */
+ size_t mc_length; /* data length */
+ bus_dmamap_t mc_datamap; /* DMA map for data */
+
+ void (* mc_complete)(struct mly_command *mc); /* completion handler */
+ void *mc_private; /* caller-private data */
+
+};
+
+/*
+ * Command slot regulation.
+ *
+ * We can't use slot 0 due to the memory mailbox implementation.
+ */
+#define MLY_SLOT_START 1
+#define MLY_SLOT_MAX (MLY_SLOT_START + MLY_MAXCOMMANDS)
+
+/*
+ * Command/command packet cluster.
+ *
+ * Due to the difficulty of using the zone allocator to create a new
+ * zone from within a module, we use our own clustering to reduce
+ * memory wastage caused by allocating lots of these small structures.
+ *
+ * Note that it is possible to require more than MLY_MAXCOMMANDS
+ * command structures.
+ *
+ * Since we may need to allocate extra clusters at any time, and since this
+ * process needs to allocate a physically contiguous slab of controller
+ * addressible memory in which to place the command packets, do not allow more
+ * command packets in a cluster than will fit in a page.
+ */
+#define MLY_CMD_CLUSTERCOUNT (PAGE_SIZE / sizeof(union mly_command_packet))
+
+struct mly_command_cluster {
+ TAILQ_ENTRY(mly_command_cluster) mcc_link;
+ union mly_command_packet *mcc_packet;
+ bus_dmamap_t mcc_packetmap;
+ u_int64_t mcc_packetphys;
+ struct mly_command mcc_command[MLY_CMD_CLUSTERCOUNT];
+};
+
+/*
+ * Per-controller structure.
+ */
+struct mly_softc {
+ /* bus connections */
+ device_t mly_dev;
+ struct resource *mly_regs_resource; /* register interface window */
+ int mly_regs_rid; /* resource ID */
+ bus_space_handle_t mly_bhandle; /* bus space handle */
+ bus_space_tag_t mly_btag; /* bus space tag */
+ bus_dma_tag_t mly_parent_dmat; /* parent DMA tag */
+ bus_dma_tag_t mly_buffer_dmat; /* data buffer/command DMA tag */
+ struct resource *mly_irq; /* interrupt */
+ int mly_irq_rid;
+ void *mly_intr; /* interrupt handle */
+
+ /* scatter/gather lists and their controller-visible mappings */
+ struct mly_sg_entry *mly_sg_table; /* s/g lists */
+ u_int32_t mly_sg_busaddr; /* s/g table base address in bus space */
+ bus_dma_tag_t mly_sg_dmat; /* s/g buffer DMA tag */
+ bus_dmamap_t mly_sg_dmamap; /* map for s/g buffers */
+
+ /* controller hardware interface */
+ int mly_hwif;
+#define MLY_HWIF_I960RX 0
+#define MLY_HWIF_STRONGARM 1
+ u_int8_t mly_doorbell_true; /* xor map to make hardware doorbell 'true' bits into 1s */
+ u_int8_t mly_command_mailbox; /* register offsets */
+ u_int8_t mly_status_mailbox;
+ u_int8_t mly_idbr;
+ u_int8_t mly_odbr;
+ u_int8_t mly_error_status;
+ u_int8_t mly_interrupt_status;
+ u_int8_t mly_interrupt_mask;
+ struct mly_mmbox *mly_mmbox; /* kernel-space address of memory mailbox */
+ u_int64_t mly_mmbox_busaddr; /* bus-space address of memory mailbox */
+ bus_dma_tag_t mly_mmbox_dmat; /* memory mailbox DMA tag */
+ bus_dmamap_t mly_mmbox_dmamap; /* memory mailbox DMA map */
+ u_int32_t mly_mmbox_command_index; /* next slot to use */
+ u_int32_t mly_mmbox_status_index; /* slot we next expect status in */
+
+ /* controller features, limits and status */
+ int mly_state;
+#define MLY_STATE_SUSPEND (1<<0)
+#define MLY_STATE_OPEN (1<<1)
+#define MLY_STATE_INTERRUPTS_ON (1<<2)
+#define MLY_STATE_MMBOX_ACTIVE (1<<3)
+ int mly_max_commands; /* max parallel commands we allow */
+ struct mly_ioctl_getcontrollerinfo *mly_controllerinfo;
+ struct mly_param_controller *mly_controllerparam;
+ struct mly_btl mly_btl[MLY_MAX_CHANNELS][MLY_MAX_TARGETS];
+
+ /* command management */
+ struct mly_command *mly_busycmds[MLY_SLOT_MAX]; /* busy commands */
+ int mly_busy_count;
+ int mly_last_slot;
+ TAILQ_HEAD(,mly_command) mly_freecmds; /* commands available for reuse */
+ TAILQ_HEAD(,mly_command) mly_ready; /* commands ready to be submitted */
+ TAILQ_HEAD(,mly_command) mly_completed; /* commands which have been returned by the controller */
+ TAILQ_HEAD(,mly_command_cluster) mly_clusters; /* command memory blocks */
+ bus_dma_tag_t mly_packet_dmat; /* command packet DMA tag */
+
+ /* health monitoring */
+ u_int32_t mly_event_change; /* event status change indicator */
+ u_int32_t mly_event_counter; /* next event for which we anticpiate status */
+ u_int32_t mly_event_waiting; /* next event the controller will post status for */
+ struct callout_handle mly_periodic; /* periodic event handling */
+
+ /* CAM connection */
+ TAILQ_HEAD(,ccb_hdr) mly_cam_ccbq; /* outstanding I/O from CAM */
+ struct cam_sim *mly_cam_sim[MLY_MAX_CHANNELS];
+ int mly_cam_lowbus;
+
+#if __FreeBSD_version >= 500005
+ /* command-completion task */
+ struct task mly_task_complete; /* deferred-completion task */
+#endif
+};
+
+/*
+ * Register access helpers.
+ */
+#define MLY_SET_REG(sc, reg, val) bus_space_write_1(sc->mly_btag, sc->mly_bhandle, reg, val)
+#define MLY_GET_REG(sc, reg) bus_space_read_1 (sc->mly_btag, sc->mly_bhandle, reg)
+#define MLY_GET_REG2(sc, reg) bus_space_read_2 (sc->mly_btag, sc->mly_bhandle, reg)
+#define MLY_GET_REG4(sc, reg) bus_space_read_4 (sc->mly_btag, sc->mly_bhandle, reg)
+
+#define MLY_SET_MBOX(sc, mbox, ptr) \
+ do { \
+ bus_space_write_4(sc->mly_btag, sc->mly_bhandle, mbox, *((u_int32_t *)ptr)); \
+ bus_space_write_4(sc->mly_btag, sc->mly_bhandle, mbox + 4, *((u_int32_t *)ptr + 1)); \
+ bus_space_write_4(sc->mly_btag, sc->mly_bhandle, mbox + 8, *((u_int32_t *)ptr + 2)); \
+ bus_space_write_4(sc->mly_btag, sc->mly_bhandle, mbox + 12, *((u_int32_t *)ptr + 3)); \
+ } while(0);
+#define MLY_GET_MBOX(sc, mbox, ptr) \
+ do { \
+ *((u_int32_t *)ptr) = bus_space_read_4(sc->mly_btag, sc->mly_bhandle, mbox); \
+ *((u_int32_t *)ptr + 1) = bus_space_read_4(sc->mly_btag, sc->mly_bhandle, mbox + 4); \
+ *((u_int32_t *)ptr + 2) = bus_space_read_4(sc->mly_btag, sc->mly_bhandle, mbox + 8); \
+ *((u_int32_t *)ptr + 3) = bus_space_read_4(sc->mly_btag, sc->mly_bhandle, mbox + 12); \
+ } while(0);
+
+#define MLY_IDBR_TRUE(sc, mask) \
+ ((((MLY_GET_REG((sc), (sc)->mly_idbr)) ^ (sc)->mly_doorbell_true) & (mask)) == (mask))
+#define MLY_ODBR_TRUE(sc, mask) \
+ ((MLY_GET_REG((sc), (sc)->mly_odbr) & (mask)) == (mask))
+#define MLY_ERROR_VALID(sc) \
+ ((((MLY_GET_REG((sc), (sc)->mly_error_status)) ^ (sc)->mly_doorbell_true) & (MLY_MSG_EMPTY)) == 0)
+
+#define MLY_MASK_INTERRUPTS(sc) \
+ do { \
+ MLY_SET_REG((sc), (sc)->mly_interrupt_mask, MLY_INTERRUPT_MASK_DISABLE); \
+ sc->mly_state &= ~MLY_STATE_INTERRUPTS_ON; \
+ } while(0);
+#define MLY_UNMASK_INTERRUPTS(sc) \
+ do { \
+ MLY_SET_REG((sc), (sc)->mly_interrupt_mask, MLY_INTERRUPT_MASK_ENABLE); \
+ sc->mly_state |= MLY_STATE_INTERRUPTS_ON; \
+ } while(0);
+
+/*
+ * Logical device number -> bus/target translation
+ */
+#define MLY_LOGDEV_BUS(sc, x) (((x) / MLY_MAX_TARGETS) + (sc)->mly_controllerinfo->physical_channels_present)
+#define MLY_LOGDEV_TARGET(x) ((x) % MLY_MAX_TARGETS)
+
+/*
+ * Public functions/variables
+ */
+/* mly.c */
+extern int mly_attach(struct mly_softc *sc);
+extern void mly_detach(struct mly_softc *sc);
+extern void mly_free(struct mly_softc *sc);
+extern void mly_startio(struct mly_softc *sc);
+extern void mly_done(struct mly_softc *sc);
+extern int mly_alloc_command(struct mly_softc *sc, struct mly_command **mcp);
+extern void mly_release_command(struct mly_command *mc);
+
+/* mly_cam.c */
+extern int mly_cam_attach(struct mly_softc *sc);
+extern void mly_cam_detach(struct mly_softc *sc);
+extern int mly_cam_command(struct mly_softc *sc, struct mly_command **mcp);
+extern int mly_name_device(struct mly_softc *sc, int bus, int target);
+
+/********************************************************************************
+ * Queue primitives
+ *
+ * These are broken out individually to make statistics gathering easier.
+ */
+
+static __inline void
+mly_enqueue_ready(struct mly_command *mc)
+{
+ int s;
+
+ s = splcam();
+ TAILQ_INSERT_TAIL(&mc->mc_sc->mly_ready, mc, mc_link);
+ MLY_CMD_SETSTATE(mc, MLY_CMD_BUSY);
+ splx(s);
+}
+
+static __inline void
+mly_requeue_ready(struct mly_command *mc)
+{
+ int s;
+
+ s = splcam();
+ TAILQ_INSERT_HEAD(&mc->mc_sc->mly_ready, mc, mc_link);
+ splx(s);
+}
+
+static __inline struct mly_command *
+mly_dequeue_ready(struct mly_softc *sc)
+{
+ struct mly_command *mc;
+ int s;
+
+ s = splcam();
+ if ((mc = TAILQ_FIRST(&sc->mly_ready)) != NULL)
+ TAILQ_REMOVE(&sc->mly_ready, mc, mc_link);
+ splx(s);
+ return(mc);
+}
+
+static __inline void
+mly_enqueue_completed(struct mly_command *mc)
+{
+ int s;
+
+ s = splcam();
+ TAILQ_INSERT_TAIL(&mc->mc_sc->mly_completed, mc, mc_link);
+ /* don't set MLY_CMD_COMPLETE here to avoid wakeup race */
+ splx(s);
+}
+
+static __inline struct mly_command *
+mly_dequeue_completed(struct mly_softc *sc)
+{
+ struct mly_command *mc;
+ int s;
+
+ s = splcam();
+ if ((mc = TAILQ_FIRST(&sc->mly_completed)) != NULL)
+ TAILQ_REMOVE(&sc->mly_completed, mc, mc_link);
+ splx(s);
+ return(mc);
+}
+
+static __inline void
+mly_enqueue_free(struct mly_command *mc)
+{
+ int s;
+
+ s = splcam();
+ TAILQ_INSERT_HEAD(&mc->mc_sc->mly_freecmds, mc, mc_link);
+ MLY_CMD_SETSTATE(mc, MLY_CMD_FREE);
+ splx(s);
+}
+
+static __inline struct mly_command *
+mly_dequeue_free(struct mly_softc *sc)
+{
+ struct mly_command *mc;
+ int s;
+
+ s = splcam();
+ if ((mc = TAILQ_FIRST(&sc->mly_freecmds)) != NULL)
+ TAILQ_REMOVE(&sc->mly_freecmds, mc, mc_link);
+ splx(s);
+ return(mc);
+}
+
+static __inline void
+mly_enqueue_cluster(struct mly_softc *sc, struct mly_command_cluster *mcc)
+{
+ int s;
+
+ s = splcam();
+ TAILQ_INSERT_HEAD(&sc->mly_clusters, mcc, mcc_link);
+ splx(s);
+}
+
+static __inline struct mly_command_cluster *
+mly_dequeue_cluster(struct mly_softc *sc)
+{
+ struct mly_command_cluster *mcc;
+ int s;
+
+ s = splcam();
+ if ((mcc = TAILQ_FIRST(&sc->mly_clusters)) != NULL)
+ TAILQ_REMOVE(&sc->mly_clusters, mcc, mcc_link);
+ splx(s);
+ return(mcc);
+}
+
+
OpenPOWER on IntegriCloud