From 10ab6671008c9ae3ac81b60ae8fde39d2aa133ad Mon Sep 17 00:00:00 2001 From: achim Date: Fri, 24 May 2013 09:22:43 +0000 Subject: Driver 'aacraid' added. Supports Adaptec by PMC RAID controller families Series 6, 7, 8 and upcoming products. Older Adaptec RAID controller families are supported by the 'aac' driver. Approved by: scottl (mentor) --- sys/dev/aacraid/aacraid.c | 3501 +++++++++++++++++++++++++++++++++++++++ sys/dev/aacraid/aacraid_cam.c | 1400 ++++++++++++++++ sys/dev/aacraid/aacraid_debug.c | 715 ++++++++ sys/dev/aacraid/aacraid_debug.h | 64 + sys/dev/aacraid/aacraid_linux.c | 97 ++ sys/dev/aacraid/aacraid_pci.c | 265 +++ sys/dev/aacraid/aacraid_reg.h | 1598 ++++++++++++++++++ sys/dev/aacraid/aacraid_var.h | 663 ++++++++ 8 files changed, 8303 insertions(+) create mode 100644 sys/dev/aacraid/aacraid.c create mode 100644 sys/dev/aacraid/aacraid_cam.c create mode 100644 sys/dev/aacraid/aacraid_debug.c create mode 100644 sys/dev/aacraid/aacraid_debug.h create mode 100644 sys/dev/aacraid/aacraid_linux.c create mode 100644 sys/dev/aacraid/aacraid_pci.c create mode 100644 sys/dev/aacraid/aacraid_reg.h create mode 100644 sys/dev/aacraid/aacraid_var.h (limited to 'sys/dev') diff --git a/sys/dev/aacraid/aacraid.c b/sys/dev/aacraid/aacraid.c new file mode 100644 index 0000000..6f52a25 --- /dev/null +++ b/sys/dev/aacraid/aacraid.c @@ -0,0 +1,3501 @@ +/*- + * Copyright (c) 2000 Michael Smith + * Copyright (c) 2001 Scott Long + * Copyright (c) 2000 BSDi + * Copyright (c) 2001-2010 Adaptec, Inc. + * Copyright (c) 2010-2012 PMC-Sierra, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +/* + * Driver for the Adaptec by PMC Series 6,7,8,... families of RAID controllers + */ +#define AAC_DRIVERNAME "aacraid" + +#include "opt_aacraid.h" + +/* #include */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include +#include +#include +#include + +#ifndef FILTER_HANDLED +#define FILTER_HANDLED 0x02 +#endif + +static void aac_add_container(struct aac_softc *sc, + struct aac_mntinforesp *mir, int f, + u_int32_t uid); +static void aac_get_bus_info(struct aac_softc *sc); +static void aac_container_bus(struct aac_softc *sc); +static void aac_daemon(void *arg); +static int aac_convert_sgraw2(struct aac_softc *sc, struct aac_raw_io2 *raw, + int pages, int nseg, int nseg_new); + +/* Command Processing */ +static void aac_timeout(struct aac_softc *sc); +static void aac_command_thread(struct aac_softc *sc); +static int aac_sync_fib(struct aac_softc *sc, u_int32_t command, + u_int32_t xferstate, struct aac_fib *fib, + u_int16_t datasize); +/* Command Buffer Management */ +static void aac_map_command_helper(void *arg, bus_dma_segment_t *segs, + int nseg, int error); +static int aac_alloc_commands(struct aac_softc *sc); +static void aac_free_commands(struct aac_softc *sc); +static void aac_unmap_command(struct aac_command *cm); + +/* Hardware Interface */ +static int aac_alloc(struct aac_softc *sc); +static void aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg, + int error); +static int aac_check_firmware(struct aac_softc *sc); +static int aac_init(struct aac_softc *sc); +static int aac_setup_intr(struct aac_softc *sc); + +/* PMC SRC interface */ +static int aac_src_get_fwstatus(struct aac_softc *sc); +static void aac_src_qnotify(struct aac_softc *sc, int qbit); +static int aac_src_get_istatus(struct aac_softc *sc); +static void aac_src_clear_istatus(struct aac_softc *sc, int mask); +static void aac_src_set_mailbox(struct aac_softc *sc, u_int32_t command, + u_int32_t arg0, u_int32_t arg1, + u_int32_t arg2, u_int32_t arg3); +static int aac_src_get_mailbox(struct aac_softc *sc, int mb); +static void aac_src_set_interrupts(struct aac_softc *sc, int enable); +static int aac_src_send_command(struct aac_softc *sc, struct aac_command *cm); +static int aac_src_get_outb_queue(struct aac_softc *sc); +static void aac_src_set_outb_queue(struct aac_softc *sc, int index); + +struct aac_interface aacraid_src_interface = { + aac_src_get_fwstatus, + aac_src_qnotify, + aac_src_get_istatus, + aac_src_clear_istatus, + aac_src_set_mailbox, + aac_src_get_mailbox, + aac_src_set_interrupts, + aac_src_send_command, + aac_src_get_outb_queue, + aac_src_set_outb_queue +}; + +/* PMC SRCv interface */ +static void aac_srcv_set_mailbox(struct aac_softc *sc, u_int32_t command, + u_int32_t arg0, u_int32_t arg1, + u_int32_t arg2, u_int32_t arg3); +static int aac_srcv_get_mailbox(struct aac_softc *sc, int mb); + +struct aac_interface aacraid_srcv_interface = { + aac_src_get_fwstatus, + aac_src_qnotify, + aac_src_get_istatus, + aac_src_clear_istatus, + aac_srcv_set_mailbox, + aac_srcv_get_mailbox, + aac_src_set_interrupts, + aac_src_send_command, + aac_src_get_outb_queue, + aac_src_set_outb_queue +}; + +/* Debugging and Diagnostics */ +static struct aac_code_lookup aac_cpu_variant[] = { + {"i960JX", CPUI960_JX}, + {"i960CX", CPUI960_CX}, + {"i960HX", CPUI960_HX}, + {"i960RX", CPUI960_RX}, + {"i960 80303", CPUI960_80303}, + {"StrongARM SA110", CPUARM_SA110}, + {"PPC603e", CPUPPC_603e}, + {"XScale 80321", CPU_XSCALE_80321}, + {"MIPS 4KC", CPU_MIPS_4KC}, + {"MIPS 5KC", CPU_MIPS_5KC}, + {"Unknown StrongARM", CPUARM_xxx}, + {"Unknown PowerPC", CPUPPC_xxx}, + {NULL, 0}, + {"Unknown processor", 0} +}; + +static struct aac_code_lookup aac_battery_platform[] = { + {"required battery present", PLATFORM_BAT_REQ_PRESENT}, + {"REQUIRED BATTERY NOT PRESENT", PLATFORM_BAT_REQ_NOTPRESENT}, + {"optional battery present", PLATFORM_BAT_OPT_PRESENT}, + {"optional battery not installed", PLATFORM_BAT_OPT_NOTPRESENT}, + {"no battery support", PLATFORM_BAT_NOT_SUPPORTED}, + {NULL, 0}, + {"unknown battery platform", 0} +}; +static void aac_describe_controller(struct aac_softc *sc); +static char *aac_describe_code(struct aac_code_lookup *table, + u_int32_t code); + +/* Management Interface */ +static d_open_t aac_open; +static d_ioctl_t aac_ioctl; +static d_poll_t aac_poll; +#if __FreeBSD_version >= 702000 +static void aac_cdevpriv_dtor(void *arg); +#else +static d_close_t aac_close; +#endif +static int aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib); +static int aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg); +static void aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib); +static void aac_request_aif(struct aac_softc *sc); +static int aac_rev_check(struct aac_softc *sc, caddr_t udata); +static int aac_open_aif(struct aac_softc *sc, caddr_t arg); +static int aac_close_aif(struct aac_softc *sc, caddr_t arg); +static int aac_getnext_aif(struct aac_softc *sc, caddr_t arg); +static int aac_return_aif(struct aac_softc *sc, + struct aac_fib_context *ctx, caddr_t uptr); +static int aac_query_disk(struct aac_softc *sc, caddr_t uptr); +static int aac_get_pci_info(struct aac_softc *sc, caddr_t uptr); +static int aac_supported_features(struct aac_softc *sc, caddr_t uptr); +static void aac_ioctl_event(struct aac_softc *sc, + struct aac_event *event, void *arg); +static int aac_reset_adapter(struct aac_softc *sc); +static int aac_get_container_info(struct aac_softc *sc, + struct aac_fib *fib, int cid, + struct aac_mntinforesp *mir, + u_int32_t *uid); +static u_int32_t + aac_check_adapter_health(struct aac_softc *sc, u_int8_t *bled); + +static struct cdevsw aacraid_cdevsw = { + .d_version = D_VERSION, + .d_flags = D_NEEDGIANT, + .d_open = aac_open, +#if __FreeBSD_version < 702000 + .d_close = aac_close, +#endif + .d_ioctl = aac_ioctl, + .d_poll = aac_poll, + .d_name = "aacraid", +}; + +MALLOC_DEFINE(M_AACRAIDBUF, "aacraid_buf", "Buffers for the AACRAID driver"); + +/* sysctl node */ +SYSCTL_NODE(_hw, OID_AUTO, aacraid, CTLFLAG_RD, 0, "AACRAID driver parameters"); + +/* + * Device Interface + */ + +/* + * Initialize the controller and softc + */ +int +aacraid_attach(struct aac_softc *sc) +{ + int error, unit; + struct aac_fib *fib; + struct aac_mntinforesp mir; + int count = 0, i = 0; + u_int32_t uid; + + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + sc->hint_flags = device_get_flags(sc->aac_dev); + /* + * Initialize per-controller queues. + */ + aac_initq_free(sc); + aac_initq_ready(sc); + aac_initq_busy(sc); + + /* mark controller as suspended until we get ourselves organised */ + sc->aac_state |= AAC_STATE_SUSPEND; + + /* + * Check that the firmware on the card is supported. + */ + if ((error = aac_check_firmware(sc)) != 0) + return(error); + + /* + * Initialize locks + */ + mtx_init(&sc->aac_io_lock, "AACRAID I/O lock", NULL, MTX_DEF); + TAILQ_INIT(&sc->aac_container_tqh); + TAILQ_INIT(&sc->aac_ev_cmfree); + +#if __FreeBSD_version >= 800000 + /* Initialize the clock daemon callout. */ + callout_init_mtx(&sc->aac_daemontime, &sc->aac_io_lock, 0); +#endif + /* + * Initialize the adapter. + */ + if ((error = aac_alloc(sc)) != 0) + return(error); + if (!(sc->flags & AAC_FLAGS_SYNC_MODE)) { + if ((error = aac_init(sc)) != 0) + return(error); + } + + /* + * Allocate and connect our interrupt. + */ + if ((error = aac_setup_intr(sc)) != 0) + return(error); + + /* + * Print a little information about the controller. + */ + aac_describe_controller(sc); + + /* + * Make the control device. + */ + unit = device_get_unit(sc->aac_dev); + sc->aac_dev_t = make_dev(&aacraid_cdevsw, unit, UID_ROOT, GID_OPERATOR, + 0640, "aacraid%d", unit); + sc->aac_dev_t->si_drv1 = sc; + + /* Create the AIF thread */ + if (aac_kthread_create((void(*)(void *))aac_command_thread, sc, + &sc->aifthread, 0, 0, "aacraid%daif", unit)) + panic("Could not create AIF thread"); + + /* Register the shutdown method to only be called post-dump */ + if ((sc->eh = EVENTHANDLER_REGISTER(shutdown_final, aacraid_shutdown, + sc->aac_dev, SHUTDOWN_PRI_DEFAULT)) == NULL) + device_printf(sc->aac_dev, + "shutdown event registration failed\n"); + + /* Find containers */ + mtx_lock(&sc->aac_io_lock); + aac_alloc_sync_fib(sc, &fib); + /* loop over possible containers */ + do { + if ((aac_get_container_info(sc, fib, i, &mir, &uid)) != 0) + continue; + if (i == 0) + count = mir.MntRespCount; + aac_add_container(sc, &mir, 0, uid); + i++; + } while ((i < count) && (i < AAC_MAX_CONTAINERS)); + aac_release_sync_fib(sc); + mtx_unlock(&sc->aac_io_lock); + + /* Register with CAM for the containers */ + TAILQ_INIT(&sc->aac_sim_tqh); + aac_container_bus(sc); + /* Register with CAM for the non-DASD devices */ + if ((sc->flags & AAC_FLAGS_ENABLE_CAM) != 0) + aac_get_bus_info(sc); + + /* poke the bus to actually attach the child devices */ + bus_generic_attach(sc->aac_dev); + + /* mark the controller up */ + sc->aac_state &= ~AAC_STATE_SUSPEND; + + /* enable interrupts now */ + AAC_UNMASK_INTERRUPTS(sc); + +#if __FreeBSD_version >= 800000 + mtx_lock(&sc->aac_io_lock); + callout_reset(&sc->aac_daemontime, 60 * hz, aac_daemon, sc); + mtx_unlock(&sc->aac_io_lock); +#else + { + struct timeval tv; + tv.tv_sec = 60; + tv.tv_usec = 0; + sc->timeout_id = timeout(aac_daemon, (void *)sc, tvtohz(&tv)); + } +#endif + + return(0); +} + +static void +aac_daemon(void *arg) +{ + struct aac_softc *sc; + struct timeval tv; + struct aac_command *cm; + struct aac_fib *fib; + + sc = arg; + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + +#if __FreeBSD_version >= 800000 + mtx_assert(&sc->aac_io_lock, MA_OWNED); + if (callout_pending(&sc->aac_daemontime) || + callout_active(&sc->aac_daemontime) == 0) + return; +#else + mtx_lock(&sc->aac_io_lock); +#endif + getmicrotime(&tv); + + if (!aacraid_alloc_command(sc, &cm)) { + fib = cm->cm_fib; + cm->cm_timestamp = time_uptime; + cm->cm_datalen = 0; + cm->cm_flags |= AAC_CMD_WAIT; + + fib->Header.Size = + sizeof(struct aac_fib_header) + sizeof(u_int32_t); + fib->Header.XferState = + AAC_FIBSTATE_HOSTOWNED | + AAC_FIBSTATE_INITIALISED | + AAC_FIBSTATE_EMPTY | + AAC_FIBSTATE_FROMHOST | + AAC_FIBSTATE_REXPECTED | + AAC_FIBSTATE_NORM | + AAC_FIBSTATE_ASYNC | + AAC_FIBSTATE_FAST_RESPONSE; + fib->Header.Command = SendHostTime; + *(uint32_t *)fib->data = tv.tv_sec; + + aacraid_map_command_sg(cm, NULL, 0, 0); + aacraid_release_command(cm); + } + +#if __FreeBSD_version >= 800000 + callout_schedule(&sc->aac_daemontime, 30 * 60 * hz); +#else + mtx_unlock(&sc->aac_io_lock); + tv.tv_sec = 30 * 60; + tv.tv_usec = 0; + sc->timeout_id = timeout(aac_daemon, (void *)sc, tvtohz(&tv)); +#endif +} + +void +aacraid_add_event(struct aac_softc *sc, struct aac_event *event) +{ + + switch (event->ev_type & AAC_EVENT_MASK) { + case AAC_EVENT_CMFREE: + TAILQ_INSERT_TAIL(&sc->aac_ev_cmfree, event, ev_links); + break; + default: + device_printf(sc->aac_dev, "aac_add event: unknown event %d\n", + event->ev_type); + break; + } + + return; +} + +/* + * Request information of container #cid + */ +static int +aac_get_container_info(struct aac_softc *sc, struct aac_fib *sync_fib, int cid, + struct aac_mntinforesp *mir, u_int32_t *uid) +{ + struct aac_command *cm; + struct aac_fib *fib; + struct aac_mntinfo *mi; + struct aac_cnt_config *ccfg; + + if (sync_fib == NULL) { + if (aacraid_alloc_command(sc, &cm)) { + device_printf(sc->aac_dev, + "Warning, no free command available\n"); + return (-1); + } + fib = cm->cm_fib; + } else { + fib = sync_fib; + } + + mi = (struct aac_mntinfo *)&fib->data[0]; + /* 4KB support?, 64-bit LBA? */ + if (sc->aac_support_opt2 & AAC_SUPPORTED_VARIABLE_BLOCK_SIZE) + mi->Command = VM_NameServeAllBlk; + else if (sc->flags & AAC_FLAGS_LBA_64BIT) + mi->Command = VM_NameServe64; + else + mi->Command = VM_NameServe; + mi->MntType = FT_FILESYS; + mi->MntCount = cid; + + if (sync_fib) { + if (aac_sync_fib(sc, ContainerCommand, 0, fib, + sizeof(struct aac_mntinfo))) { + device_printf(sc->aac_dev, "Error probing container %d\n", cid); + return (-1); + } + } else { + cm->cm_timestamp = time_uptime; + cm->cm_datalen = 0; + + fib->Header.Size = + sizeof(struct aac_fib_header) + sizeof(struct aac_mntinfo); + fib->Header.XferState = + AAC_FIBSTATE_HOSTOWNED | + AAC_FIBSTATE_INITIALISED | + AAC_FIBSTATE_EMPTY | + AAC_FIBSTATE_FROMHOST | + AAC_FIBSTATE_REXPECTED | + AAC_FIBSTATE_NORM | + AAC_FIBSTATE_ASYNC | + AAC_FIBSTATE_FAST_RESPONSE; + fib->Header.Command = ContainerCommand; + if (aacraid_wait_command(cm) != 0) { + device_printf(sc->aac_dev, "Error probing container %d\n", cid); + aacraid_release_command(cm); + return (-1); + } + } + bcopy(&fib->data[0], mir, sizeof(struct aac_mntinforesp)); + + /* UID */ + *uid = cid; + if (mir->MntTable[0].VolType != CT_NONE && + !(mir->MntTable[0].ContentState & AAC_FSCS_HIDDEN)) { + if (!(sc->aac_support_opt2 & AAC_SUPPORTED_VARIABLE_BLOCK_SIZE)) + mir->MntTable[0].ObjExtension.BlockSize = 0x200; + + ccfg = (struct aac_cnt_config *)&fib->data[0]; + bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE); + ccfg->Command = VM_ContainerConfig; + ccfg->CTCommand.command = CT_CID_TO_32BITS_UID; + ccfg->CTCommand.param[0] = cid; + + if (sync_fib) { + if (aac_sync_fib(sc, ContainerCommand, 0, fib, + sizeof(struct aac_cnt_config) == 0) && + ccfg->CTCommand.param[0] == ST_OK && + mir->MntTable[0].VolType != CT_PASSTHRU) + *uid = ccfg->CTCommand.param[1]; + } else { + fib->Header.Size = + sizeof(struct aac_fib_header) + sizeof(struct aac_cnt_config); + fib->Header.XferState = + AAC_FIBSTATE_HOSTOWNED | + AAC_FIBSTATE_INITIALISED | + AAC_FIBSTATE_EMPTY | + AAC_FIBSTATE_FROMHOST | + AAC_FIBSTATE_REXPECTED | + AAC_FIBSTATE_NORM | + AAC_FIBSTATE_ASYNC | + AAC_FIBSTATE_FAST_RESPONSE; + fib->Header.Command = ContainerCommand; + if (aacraid_wait_command(cm) == 0 && + ccfg->CTCommand.param[0] == ST_OK && + mir->MntTable[0].VolType != CT_PASSTHRU) + *uid = ccfg->CTCommand.param[1]; + aacraid_release_command(cm); + } + } + + return (0); +} + +/* + * Create a device to represent a new container + */ +static void +aac_add_container(struct aac_softc *sc, struct aac_mntinforesp *mir, int f, + u_int32_t uid) +{ + struct aac_container *co; + + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + + /* + * Check container volume type for validity. Note that many of + * the possible types may never show up. + */ + if ((mir->Status == ST_OK) && (mir->MntTable[0].VolType != CT_NONE)) { + co = (struct aac_container *)malloc(sizeof *co, M_AACRAIDBUF, + M_NOWAIT | M_ZERO); + if (co == NULL) { + panic("Out of memory?!"); + } + + co->co_found = f; + bcopy(&mir->MntTable[0], &co->co_mntobj, + sizeof(struct aac_mntobj)); + co->co_uid = uid; + TAILQ_INSERT_TAIL(&sc->aac_container_tqh, co, co_link); + } +} + +/* + * Allocate resources associated with (sc) + */ +static int +aac_alloc(struct aac_softc *sc) +{ + bus_size_t maxsize; + + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + + /* + * Create DMA tag for mapping buffers into controller-addressable space. + */ + if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */ + 1, 0, /* algnmnt, boundary */ + (sc->flags & AAC_FLAGS_SG_64BIT) ? + BUS_SPACE_MAXADDR : + BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + MAXBSIZE, /* maxsize */ + sc->aac_sg_tablesize, /* nsegments */ + MAXBSIZE, /* maxsegsize */ + BUS_DMA_ALLOCNOW, /* flags */ + busdma_lock_mutex, /* lockfunc */ + &sc->aac_io_lock, /* lockfuncarg */ + &sc->aac_buffer_dmat)) { + device_printf(sc->aac_dev, "can't allocate buffer DMA tag\n"); + return (ENOMEM); + } + + /* + * Create DMA tag for mapping FIBs into controller-addressable space.. + */ + if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) + maxsize = sc->aac_max_fibs_alloc * (sc->aac_max_fib_size + + sizeof(struct aac_fib_xporthdr) + 31); + else + maxsize = sc->aac_max_fibs_alloc * (sc->aac_max_fib_size + 31); + if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */ + 1, 0, /* algnmnt, boundary */ + (sc->flags & AAC_FLAGS_4GB_WINDOW) ? + BUS_SPACE_MAXADDR_32BIT : + 0x7fffffff, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + maxsize, /* maxsize */ + 1, /* nsegments */ + maxsize, /* maxsize */ + 0, /* flags */ + NULL, NULL, /* No locking needed */ + &sc->aac_fib_dmat)) { + device_printf(sc->aac_dev, "can't allocate FIB DMA tag\n"); + return (ENOMEM); + } + + /* + * Create DMA tag for the common structure and allocate it. + */ + maxsize = sizeof(struct aac_common); + maxsize += sc->aac_max_fibs * sizeof(u_int32_t); + if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */ + 1, 0, /* algnmnt, boundary */ + (sc->flags & AAC_FLAGS_4GB_WINDOW) ? + BUS_SPACE_MAXADDR_32BIT : + 0x7fffffff, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + maxsize, /* maxsize */ + 1, /* nsegments */ + maxsize, /* maxsegsize */ + 0, /* flags */ + NULL, NULL, /* No locking needed */ + &sc->aac_common_dmat)) { + device_printf(sc->aac_dev, + "can't allocate common structure DMA tag\n"); + return (ENOMEM); + } + if (bus_dmamem_alloc(sc->aac_common_dmat, (void **)&sc->aac_common, + BUS_DMA_NOWAIT, &sc->aac_common_dmamap)) { + device_printf(sc->aac_dev, "can't allocate common structure\n"); + return (ENOMEM); + } + + (void)bus_dmamap_load(sc->aac_common_dmat, sc->aac_common_dmamap, + sc->aac_common, maxsize, + aac_common_map, sc, 0); + bzero(sc->aac_common, maxsize); + + /* Allocate some FIBs and associated command structs */ + TAILQ_INIT(&sc->aac_fibmap_tqh); + sc->aac_commands = malloc(sc->aac_max_fibs * sizeof(struct aac_command), + M_AACRAIDBUF, M_WAITOK|M_ZERO); + mtx_lock(&sc->aac_io_lock); + while (sc->total_fibs < sc->aac_max_fibs) { + if (aac_alloc_commands(sc) != 0) + break; + } + mtx_unlock(&sc->aac_io_lock); + if (sc->total_fibs == 0) + return (ENOMEM); + + return (0); +} + +/* + * Free all of the resources associated with (sc) + * + * Should not be called if the controller is active. + */ +void +aacraid_free(struct aac_softc *sc) +{ + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + + /* remove the control device */ + if (sc->aac_dev_t != NULL) + destroy_dev(sc->aac_dev_t); + + /* throw away any FIB buffers, discard the FIB DMA tag */ + aac_free_commands(sc); + if (sc->aac_fib_dmat) + bus_dma_tag_destroy(sc->aac_fib_dmat); + + free(sc->aac_commands, M_AACRAIDBUF); + + /* destroy the common area */ + if (sc->aac_common) { + bus_dmamap_unload(sc->aac_common_dmat, sc->aac_common_dmamap); + bus_dmamem_free(sc->aac_common_dmat, sc->aac_common, + sc->aac_common_dmamap); + } + if (sc->aac_common_dmat) + bus_dma_tag_destroy(sc->aac_common_dmat); + + /* disconnect the interrupt handler */ + if (sc->aac_intr) + bus_teardown_intr(sc->aac_dev, sc->aac_irq, sc->aac_intr); + if (sc->aac_irq != NULL) + bus_release_resource(sc->aac_dev, SYS_RES_IRQ, sc->aac_irq_rid, + sc->aac_irq); + + /* destroy data-transfer DMA tag */ + if (sc->aac_buffer_dmat) + bus_dma_tag_destroy(sc->aac_buffer_dmat); + + /* destroy the parent DMA tag */ + if (sc->aac_parent_dmat) + bus_dma_tag_destroy(sc->aac_parent_dmat); + + /* release the register window mapping */ + if (sc->aac_regs_res0 != NULL) + bus_release_resource(sc->aac_dev, SYS_RES_MEMORY, + sc->aac_regs_rid0, sc->aac_regs_res0); + if (sc->aac_regs_res1 != NULL) + bus_release_resource(sc->aac_dev, SYS_RES_MEMORY, + sc->aac_regs_rid1, sc->aac_regs_res1); +} + +/* + * Disconnect from the controller completely, in preparation for unload. + */ +int +aacraid_detach(device_t dev) +{ + struct aac_softc *sc; + struct aac_container *co; + struct aac_sim *sim; + int error; + + sc = device_get_softc(dev); + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + +#if __FreeBSD_version >= 800000 + callout_drain(&sc->aac_daemontime); +#else + untimeout(aac_daemon, (void *)sc, sc->timeout_id); +#endif + /* Remove the child containers */ + while ((co = TAILQ_FIRST(&sc->aac_container_tqh)) != NULL) { + TAILQ_REMOVE(&sc->aac_container_tqh, co, co_link); + free(co, M_AACRAIDBUF); + } + + /* Remove the CAM SIMs */ + while ((sim = TAILQ_FIRST(&sc->aac_sim_tqh)) != NULL) { + TAILQ_REMOVE(&sc->aac_sim_tqh, sim, sim_link); + error = device_delete_child(dev, sim->sim_dev); + if (error) + return (error); + free(sim, M_AACRAIDBUF); + } + + if (sc->aifflags & AAC_AIFFLAGS_RUNNING) { + sc->aifflags |= AAC_AIFFLAGS_EXIT; + wakeup(sc->aifthread); + tsleep(sc->aac_dev, PUSER | PCATCH, "aac_dch", 30 * hz); + } + + if (sc->aifflags & AAC_AIFFLAGS_RUNNING) + panic("Cannot shutdown AIF thread"); + + if ((error = aacraid_shutdown(dev))) + return(error); + + EVENTHANDLER_DEREGISTER(shutdown_final, sc->eh); + + aacraid_free(sc); + + mtx_destroy(&sc->aac_io_lock); + + return(0); +} + +/* + * Bring the controller down to a dormant state and detach all child devices. + * + * This function is called before detach or system shutdown. + * + * Note that we can assume that the bioq on the controller is empty, as we won't + * allow shutdown if any device is open. + */ +int +aacraid_shutdown(device_t dev) +{ + struct aac_softc *sc; + struct aac_fib *fib; + struct aac_close_command *cc; + + sc = device_get_softc(dev); + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + + sc->aac_state |= AAC_STATE_SUSPEND; + + /* + * Send a Container shutdown followed by a HostShutdown FIB to the + * controller to convince it that we don't want to talk to it anymore. + * We've been closed and all I/O completed already + */ + device_printf(sc->aac_dev, "shutting down controller..."); + + mtx_lock(&sc->aac_io_lock); + aac_alloc_sync_fib(sc, &fib); + cc = (struct aac_close_command *)&fib->data[0]; + + bzero(cc, sizeof(struct aac_close_command)); + cc->Command = VM_CloseAll; + cc->ContainerId = 0xffffffff; + if (aac_sync_fib(sc, ContainerCommand, 0, fib, + sizeof(struct aac_close_command))) + printf("FAILED.\n"); + else + printf("done\n"); + + AAC_MASK_INTERRUPTS(sc); + aac_release_sync_fib(sc); + mtx_unlock(&sc->aac_io_lock); + + return(0); +} + +/* + * Bring the controller to a quiescent state, ready for system suspend. + */ +int +aacraid_suspend(device_t dev) +{ + struct aac_softc *sc; + + sc = device_get_softc(dev); + + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + sc->aac_state |= AAC_STATE_SUSPEND; + + AAC_MASK_INTERRUPTS(sc); + return(0); +} + +/* + * Bring the controller back to a state ready for operation. + */ +int +aacraid_resume(device_t dev) +{ + struct aac_softc *sc; + + sc = device_get_softc(dev); + + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + sc->aac_state &= ~AAC_STATE_SUSPEND; + AAC_UNMASK_INTERRUPTS(sc); + return(0); +} + +/* + * Interrupt handler for NEW_COMM_TYPE1, NEW_COMM_TYPE2, NEW_COMM_TYPE34 interface. + */ +void +aacraid_new_intr_type1(void *arg) +{ + struct aac_softc *sc; + struct aac_command *cm; + struct aac_fib *fib; + u_int32_t bellbits, bellbits_shifted, index, handle; + int isFastResponse, isAif, noMoreAif; + + sc = (struct aac_softc *)arg; + + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + mtx_lock(&sc->aac_io_lock); + bellbits = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R); + if (bellbits & AAC_DB_RESPONSE_SENT_NS) { + bellbits = AAC_DB_RESPONSE_SENT_NS; + AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, bellbits); + AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R); /* ODR readback,Prep #238630 */ + /* handle async. status */ + index = sc->aac_host_rrq_idx; + for (;;) { + isFastResponse = isAif = noMoreAif = 0; + /* remove toggle bit (31) */ + handle = (sc->aac_common->ac_host_rrq[index] & 0x7fffffff); + /* check fast response bit (30) */ + if (handle & 0x40000000) + isFastResponse = 1; + /* check AIF bit (23) */ + else if (handle & 0x00800000) + isAif = TRUE; + handle &= 0x0000ffff; + if (handle == 0) + break; + + cm = sc->aac_commands + (handle - 1); + fib = cm->cm_fib; + if (isAif) { + noMoreAif = (fib->Header.XferState & AAC_FIBSTATE_NOMOREAIF) ? 1:0; + if (!noMoreAif) + aac_handle_aif(sc, fib); + aac_remove_busy(cm); + aacraid_release_command(cm); + } else { + if (isFastResponse) { + fib->Header.XferState |= AAC_FIBSTATE_DONEADAP; + *((u_int32_t *)(fib->data)) = ST_OK; + cm->cm_flags |= AAC_CMD_FASTRESP; + } + aac_remove_busy(cm); + aac_unmap_command(cm); + cm->cm_flags |= AAC_CMD_COMPLETED; + + /* is there a completion handler? */ + if (cm->cm_complete != NULL) { + cm->cm_complete(cm); + } else { + /* assume that someone is sleeping on this command */ + wakeup(cm); + } + sc->flags &= ~AAC_QUEUE_FRZN; + } + + sc->aac_common->ac_host_rrq[index++] = 0; + if (index == sc->aac_max_fibs) + index = 0; + sc->aac_host_rrq_idx = index; + + if ((isAif && !noMoreAif) || sc->aif_pending) + aac_request_aif(sc); + } + } else { + bellbits_shifted = (bellbits >> AAC_SRC_ODR_SHIFT); + AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, bellbits); + if (bellbits_shifted & AAC_DB_AIF_PENDING) { + /* handle AIF */ + aac_request_aif(sc); + } else if (bellbits_shifted & AAC_DB_SYNC_COMMAND) { + if (sc->aac_sync_cm) { + cm = sc->aac_sync_cm; + cm->cm_flags |= AAC_CMD_COMPLETED; + /* is there a completion handler? */ + if (cm->cm_complete != NULL) { + cm->cm_complete(cm); + } else { + /* assume that someone is sleeping on this command */ + wakeup(cm); + } + sc->flags &= ~AAC_QUEUE_FRZN; + sc->aac_sync_cm = NULL; + } + } + } + + /* see if we can start some more I/O */ + if ((sc->flags & AAC_QUEUE_FRZN) == 0) + aacraid_startio(sc); + mtx_unlock(&sc->aac_io_lock); +} + +/* + * Handle notification of one or more FIBs coming from the controller. + */ +static void +aac_command_thread(struct aac_softc *sc) +{ + int retval; + + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + + mtx_lock(&sc->aac_io_lock); + sc->aifflags = AAC_AIFFLAGS_RUNNING; + + while ((sc->aifflags & AAC_AIFFLAGS_EXIT) == 0) { + + retval = 0; + if ((sc->aifflags & AAC_AIFFLAGS_PENDING) == 0) + retval = msleep(sc->aifthread, &sc->aac_io_lock, PRIBIO, + "aacraid_aifthd", AAC_PERIODIC_INTERVAL * hz); + + /* + * First see if any FIBs need to be allocated. This needs + * to be called without the driver lock because contigmalloc + * will grab Giant, and would result in an LOR. + */ + if ((sc->aifflags & AAC_AIFFLAGS_ALLOCFIBS) != 0) { + aac_alloc_commands(sc); + sc->aifflags &= ~AAC_AIFFLAGS_ALLOCFIBS; + aacraid_startio(sc); + } + + /* + * While we're here, check to see if any commands are stuck. + * This is pretty low-priority, so it's ok if it doesn't + * always fire. + */ + if (retval == EWOULDBLOCK) + aac_timeout(sc); + + /* Check the hardware printf message buffer */ + if (sc->aac_common->ac_printf[0] != 0) + aac_print_printf(sc); + } + sc->aifflags &= ~AAC_AIFFLAGS_RUNNING; + mtx_unlock(&sc->aac_io_lock); + wakeup(sc->aac_dev); + + aac_kthread_exit(0); +} + +/* + * Submit a command to the controller, return when it completes. + * XXX This is very dangerous! If the card has gone out to lunch, we could + * be stuck here forever. At the same time, signals are not caught + * because there is a risk that a signal could wakeup the sleep before + * the card has a chance to complete the command. Since there is no way + * to cancel a command that is in progress, we can't protect against the + * card completing a command late and spamming the command and data + * memory. So, we are held hostage until the command completes. + */ +int +aacraid_wait_command(struct aac_command *cm) +{ + struct aac_softc *sc; + int error; + + sc = cm->cm_sc; + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + mtx_assert(&sc->aac_io_lock, MA_OWNED); + + /* Put the command on the ready queue and get things going */ + aac_enqueue_ready(cm); + aacraid_startio(sc); + error = msleep(cm, &sc->aac_io_lock, PRIBIO, "aacraid_wait", 0); + return(error); +} + +/* + *Command Buffer Management + */ + +/* + * Allocate a command. + */ +int +aacraid_alloc_command(struct aac_softc *sc, struct aac_command **cmp) +{ + struct aac_command *cm; + + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + + if ((cm = aac_dequeue_free(sc)) == NULL) { + if (sc->total_fibs < sc->aac_max_fibs) { + sc->aifflags |= AAC_AIFFLAGS_ALLOCFIBS; + wakeup(sc->aifthread); + } + return (EBUSY); + } + + *cmp = cm; + return(0); +} + +/* + * Release a command back to the freelist. + */ +void +aacraid_release_command(struct aac_command *cm) +{ + struct aac_event *event; + struct aac_softc *sc; + + sc = cm->cm_sc; + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + mtx_assert(&sc->aac_io_lock, MA_OWNED); + + /* (re)initialize the command/FIB */ + cm->cm_sgtable = NULL; + cm->cm_flags = 0; + cm->cm_complete = NULL; + cm->cm_ccb = NULL; + cm->cm_passthr_dmat = 0; + cm->cm_fib->Header.XferState = AAC_FIBSTATE_EMPTY; + cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB; + cm->cm_fib->Header.Unused = 0; + cm->cm_fib->Header.SenderSize = cm->cm_sc->aac_max_fib_size; + + /* + * These are duplicated in aac_start to cover the case where an + * intermediate stage may have destroyed them. They're left + * initialized here for debugging purposes only. + */ + cm->cm_fib->Header.u.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys; + cm->cm_fib->Header.Handle = 0; + + aac_enqueue_free(cm); + + /* + * Dequeue all events so that there's no risk of events getting + * stranded. + */ + while ((event = TAILQ_FIRST(&sc->aac_ev_cmfree)) != NULL) { + TAILQ_REMOVE(&sc->aac_ev_cmfree, event, ev_links); + event->ev_callback(sc, event, event->ev_arg); + } +} + +/* + * Map helper for command/FIB allocation. + */ +static void +aac_map_command_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error) +{ + uint64_t *fibphys; + + fibphys = (uint64_t *)arg; + + *fibphys = segs[0].ds_addr; +} + +/* + * Allocate and initialize commands/FIBs for this adapter. + */ +static int +aac_alloc_commands(struct aac_softc *sc) +{ + struct aac_command *cm; + struct aac_fibmap *fm; + uint64_t fibphys; + int i, error; + u_int32_t maxsize; + + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + mtx_assert(&sc->aac_io_lock, MA_OWNED); + + if (sc->total_fibs + sc->aac_max_fibs_alloc > sc->aac_max_fibs) + return (ENOMEM); + + fm = malloc(sizeof(struct aac_fibmap), M_AACRAIDBUF, M_NOWAIT|M_ZERO); + if (fm == NULL) + return (ENOMEM); + + mtx_unlock(&sc->aac_io_lock); + /* allocate the FIBs in DMAable memory and load them */ + if (bus_dmamem_alloc(sc->aac_fib_dmat, (void **)&fm->aac_fibs, + BUS_DMA_NOWAIT, &fm->aac_fibmap)) { + device_printf(sc->aac_dev, + "Not enough contiguous memory available.\n"); + free(fm, M_AACRAIDBUF); + mtx_lock(&sc->aac_io_lock); + return (ENOMEM); + } + + maxsize = sc->aac_max_fib_size + 31; + if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) + maxsize += sizeof(struct aac_fib_xporthdr); + /* Ignore errors since this doesn't bounce */ + (void)bus_dmamap_load(sc->aac_fib_dmat, fm->aac_fibmap, fm->aac_fibs, + sc->aac_max_fibs_alloc * maxsize, + aac_map_command_helper, &fibphys, 0); + mtx_lock(&sc->aac_io_lock); + + /* initialize constant fields in the command structure */ + bzero(fm->aac_fibs, sc->aac_max_fibs_alloc * maxsize); + for (i = 0; i < sc->aac_max_fibs_alloc; i++) { + cm = sc->aac_commands + sc->total_fibs; + fm->aac_commands = cm; + cm->cm_sc = sc; + cm->cm_fib = (struct aac_fib *) + ((u_int8_t *)fm->aac_fibs + i * maxsize); + cm->cm_fibphys = fibphys + i * maxsize; + if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) { + u_int64_t fibphys_aligned; + fibphys_aligned = + (cm->cm_fibphys + sizeof(struct aac_fib_xporthdr) + 31) & ~31; + cm->cm_fib = (struct aac_fib *) + ((u_int8_t *)cm->cm_fib + (fibphys_aligned - cm->cm_fibphys)); + cm->cm_fibphys = fibphys_aligned; + } else { + u_int64_t fibphys_aligned; + fibphys_aligned = (cm->cm_fibphys + 31) & ~31; + cm->cm_fib = (struct aac_fib *) + ((u_int8_t *)cm->cm_fib + (fibphys_aligned - cm->cm_fibphys)); + cm->cm_fibphys = fibphys_aligned; + } + cm->cm_index = sc->total_fibs; + + if ((error = bus_dmamap_create(sc->aac_buffer_dmat, 0, + &cm->cm_datamap)) != 0) + break; + if (sc->aac_max_fibs <= 1 || sc->aac_max_fibs - sc->total_fibs > 1) + aacraid_release_command(cm); + sc->total_fibs++; + } + + if (i > 0) { + TAILQ_INSERT_TAIL(&sc->aac_fibmap_tqh, fm, fm_link); + fwprintf(sc, HBA_FLAGS_DBG_COMM_B, "total_fibs= %d\n", sc->total_fibs); + return (0); + } + + bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap); + bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap); + free(fm, M_AACRAIDBUF); + return (ENOMEM); +} + +/* + * Free FIBs owned by this adapter. + */ +static void +aac_free_commands(struct aac_softc *sc) +{ + struct aac_fibmap *fm; + struct aac_command *cm; + int i; + + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + + while ((fm = TAILQ_FIRST(&sc->aac_fibmap_tqh)) != NULL) { + + TAILQ_REMOVE(&sc->aac_fibmap_tqh, fm, fm_link); + /* + * We check against total_fibs to handle partially + * allocated blocks. + */ + for (i = 0; i < sc->aac_max_fibs_alloc && sc->total_fibs--; i++) { + cm = fm->aac_commands + i; + bus_dmamap_destroy(sc->aac_buffer_dmat, cm->cm_datamap); + } + bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap); + bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap); + free(fm, M_AACRAIDBUF); + } +} + +/* + * Command-mapping helper function - populate this command's s/g table. + */ +void +aacraid_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error) +{ + struct aac_softc *sc; + struct aac_command *cm; + struct aac_fib *fib; + int i; + + cm = (struct aac_command *)arg; + sc = cm->cm_sc; + fib = cm->cm_fib; + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "nseg %d", nseg); + mtx_assert(&sc->aac_io_lock, MA_OWNED); + + /* copy into the FIB */ + if (cm->cm_sgtable != NULL) { + if (fib->Header.Command == RawIo2) { + struct aac_raw_io2 *raw; + struct aac_sge_ieee1212 *sg; + u_int32_t min_size = PAGE_SIZE, cur_size; + int conformable = TRUE; + + raw = (struct aac_raw_io2 *)&fib->data[0]; + sg = (struct aac_sge_ieee1212 *)cm->cm_sgtable; + raw->sgeCnt = nseg; + + for (i = 0; i < nseg; i++) { + cur_size = segs[i].ds_len; + sg[i].addrHigh = 0; + *(bus_addr_t *)&sg[i].addrLow = segs[i].ds_addr; + sg[i].length = cur_size; + sg[i].flags = 0; + if (i == 0) { + raw->sgeFirstSize = cur_size; + } else if (i == 1) { + raw->sgeNominalSize = cur_size; + min_size = cur_size; + } else if ((i+1) < nseg && + cur_size != raw->sgeNominalSize) { + conformable = FALSE; + if (cur_size < min_size) + min_size = cur_size; + } + } + + /* not conformable: evaluate required sg elements */ + if (!conformable) { + int j, err_found, nseg_new = nseg; + for (i = min_size / PAGE_SIZE; i >= 1; --i) { + err_found = FALSE; + nseg_new = 2; + for (j = 1; j < nseg - 1; ++j) { + if (sg[j].length % (i*PAGE_SIZE)) { + err_found = TRUE; + break; + } + nseg_new += (sg[j].length / (i*PAGE_SIZE)); + } + if (!err_found) + break; + } + if (i>0 && nseg_new<=sc->aac_sg_tablesize && + !(sc->hint_flags & 4)) + nseg = aac_convert_sgraw2(sc, + raw, i, nseg, nseg_new); + } else { + raw->flags |= RIO2_SGL_CONFORMANT; + } + + /* update the FIB size for the s/g count */ + fib->Header.Size += nseg * + sizeof(struct aac_sge_ieee1212); + + } else if (fib->Header.Command == RawIo) { + struct aac_sg_tableraw *sg; + sg = (struct aac_sg_tableraw *)cm->cm_sgtable; + sg->SgCount = nseg; + for (i = 0; i < nseg; i++) { + sg->SgEntryRaw[i].SgAddress = segs[i].ds_addr; + sg->SgEntryRaw[i].SgByteCount = segs[i].ds_len; + sg->SgEntryRaw[i].Next = 0; + sg->SgEntryRaw[i].Prev = 0; + sg->SgEntryRaw[i].Flags = 0; + } + /* update the FIB size for the s/g count */ + fib->Header.Size += nseg*sizeof(struct aac_sg_entryraw); + } else if ((cm->cm_sc->flags & AAC_FLAGS_SG_64BIT) == 0) { + struct aac_sg_table *sg; + sg = cm->cm_sgtable; + sg->SgCount = nseg; + for (i = 0; i < nseg; i++) { + sg->SgEntry[i].SgAddress = segs[i].ds_addr; + sg->SgEntry[i].SgByteCount = segs[i].ds_len; + } + /* update the FIB size for the s/g count */ + fib->Header.Size += nseg*sizeof(struct aac_sg_entry); + } else { + struct aac_sg_table64 *sg; + sg = (struct aac_sg_table64 *)cm->cm_sgtable; + sg->SgCount = nseg; + for (i = 0; i < nseg; i++) { + sg->SgEntry64[i].SgAddress = segs[i].ds_addr; + sg->SgEntry64[i].SgByteCount = segs[i].ds_len; + } + /* update the FIB size for the s/g count */ + fib->Header.Size += nseg*sizeof(struct aac_sg_entry64); + } + } + + /* Fix up the address values in the FIB. Use the command array index + * instead of a pointer since these fields are only 32 bits. Shift + * the SenderFibAddress over to make room for the fast response bit + * and for the AIF bit + */ + cm->cm_fib->Header.SenderFibAddress = (cm->cm_index << 2); + cm->cm_fib->Header.u.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys; + + /* save a pointer to the command for speedy reverse-lookup */ + cm->cm_fib->Header.Handle += cm->cm_index + 1; + + if (cm->cm_passthr_dmat == 0) { + if (cm->cm_flags & AAC_CMD_DATAIN) + bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, + BUS_DMASYNC_PREREAD); + if (cm->cm_flags & AAC_CMD_DATAOUT) + bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, + BUS_DMASYNC_PREWRITE); + } + + cm->cm_flags |= AAC_CMD_MAPPED; + + if (sc->flags & AAC_FLAGS_SYNC_MODE) { + u_int32_t wait = 0; + aacraid_sync_command(sc, AAC_MONKER_SYNCFIB, cm->cm_fibphys, 0, 0, 0, &wait, NULL); + } else if (cm->cm_flags & AAC_CMD_WAIT) { + aacraid_sync_command(sc, AAC_MONKER_SYNCFIB, cm->cm_fibphys, 0, 0, 0, NULL, NULL); + } else { + int count = 10000000L; + while (AAC_SEND_COMMAND(sc, cm) != 0) { + if (--count == 0) { + aac_unmap_command(cm); + sc->flags |= AAC_QUEUE_FRZN; + aac_requeue_ready(cm); + } + DELAY(5); /* wait 5 usec. */ + } + } +} + + +static int +aac_convert_sgraw2(struct aac_softc *sc, struct aac_raw_io2 *raw, + int pages, int nseg, int nseg_new) +{ + struct aac_sge_ieee1212 *sge; + int i, j, pos; + u_int32_t addr_low; + + sge = malloc(nseg_new * sizeof(struct aac_sge_ieee1212), + M_AACRAIDBUF, M_NOWAIT|M_ZERO); + if (sge == NULL) + return nseg; + + for (i = 1, pos = 1; i < nseg - 1; ++i) { + for (j = 0; j < raw->sge[i].length / (pages*PAGE_SIZE); ++j) { + addr_low = raw->sge[i].addrLow + j * pages * PAGE_SIZE; + sge[pos].addrLow = addr_low; + sge[pos].addrHigh = raw->sge[i].addrHigh; + if (addr_low < raw->sge[i].addrLow) + sge[pos].addrHigh++; + sge[pos].length = pages * PAGE_SIZE; + sge[pos].flags = 0; + pos++; + } + } + sge[pos] = raw->sge[nseg-1]; + for (i = 1; i < nseg_new; ++i) + raw->sge[i] = sge[i]; + + free(sge, M_AACRAIDBUF); + raw->sgeCnt = nseg_new; + raw->flags |= RIO2_SGL_CONFORMANT; + raw->sgeNominalSize = pages * PAGE_SIZE; + return nseg_new; +} + + +/* + * Unmap a command from controller-visible space. + */ +static void +aac_unmap_command(struct aac_command *cm) +{ + struct aac_softc *sc; + + sc = cm->cm_sc; + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + + if (!(cm->cm_flags & AAC_CMD_MAPPED)) + return; + + if (cm->cm_datalen != 0 && cm->cm_passthr_dmat == 0) { + if (cm->cm_flags & AAC_CMD_DATAIN) + bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, + BUS_DMASYNC_POSTREAD); + if (cm->cm_flags & AAC_CMD_DATAOUT) + bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, + BUS_DMASYNC_POSTWRITE); + + bus_dmamap_unload(sc->aac_buffer_dmat, cm->cm_datamap); + } + cm->cm_flags &= ~AAC_CMD_MAPPED; +} + +/* + * Hardware Interface + */ + +/* + * Initialize the adapter. + */ +static void +aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg, int error) +{ + struct aac_softc *sc; + + sc = (struct aac_softc *)arg; + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + + sc->aac_common_busaddr = segs[0].ds_addr; +} + +static int +aac_check_firmware(struct aac_softc *sc) +{ + u_int32_t code, major, minor, maxsize; + u_int32_t options = 0, atu_size = 0, status; + time_t then; + + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + /* + * Wait for the adapter to come ready. + */ + then = time_uptime; + do { + code = AAC_GET_FWSTATUS(sc); + if (code & AAC_SELF_TEST_FAILED) { + device_printf(sc->aac_dev, "FATAL: selftest failed\n"); + return(ENXIO); + } + if (code & AAC_KERNEL_PANIC) { + device_printf(sc->aac_dev, + "FATAL: controller kernel panic"); + return(ENXIO); + } + if (time_uptime > (then + AAC_BOOT_TIMEOUT)) { + device_printf(sc->aac_dev, + "FATAL: controller not coming ready, " + "status %x\n", code); + return(ENXIO); + } + } while (!(code & AAC_UP_AND_RUNNING)); + + /* + * Retrieve the firmware version numbers. Dell PERC2/QC cards with + * firmware version 1.x are not compatible with this driver. + */ + if (sc->flags & AAC_FLAGS_PERC2QC) { + if (aacraid_sync_command(sc, AAC_MONKER_GETKERNVER, 0, 0, 0, 0, + NULL, NULL)) { + device_printf(sc->aac_dev, + "Error reading firmware version\n"); + return (EIO); + } + + /* These numbers are stored as ASCII! */ + major = (AAC_GET_MAILBOX(sc, 1) & 0xff) - 0x30; + minor = (AAC_GET_MAILBOX(sc, 2) & 0xff) - 0x30; + if (major == 1) { + device_printf(sc->aac_dev, + "Firmware version %d.%d is not supported.\n", + major, minor); + return (EINVAL); + } + } + /* + * Retrieve the capabilities/supported options word so we know what + * work-arounds to enable. Some firmware revs don't support this + * command. + */ + if (aacraid_sync_command(sc, AAC_MONKER_GETINFO, 0, 0, 0, 0, &status, NULL)) { + if (status != AAC_SRB_STS_INVALID_REQUEST) { + device_printf(sc->aac_dev, + "RequestAdapterInfo failed\n"); + return (EIO); + } + } else { + options = AAC_GET_MAILBOX(sc, 1); + atu_size = AAC_GET_MAILBOX(sc, 2); + sc->supported_options = options; + + if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 && + (sc->flags & AAC_FLAGS_NO4GB) == 0) + sc->flags |= AAC_FLAGS_4GB_WINDOW; + if (options & AAC_SUPPORTED_NONDASD) + sc->flags |= AAC_FLAGS_ENABLE_CAM; + if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0 + && (sizeof(bus_addr_t) > 4) + && (sc->hint_flags & 0x1)) { + device_printf(sc->aac_dev, + "Enabling 64-bit address support\n"); + sc->flags |= AAC_FLAGS_SG_64BIT; + } + if (sc->aac_if.aif_send_command) { + if ((options & AAC_SUPPORTED_NEW_COMM_TYPE3) || + (options & AAC_SUPPORTED_NEW_COMM_TYPE4)) + sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE34; + else if (options & AAC_SUPPORTED_NEW_COMM_TYPE1) + sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE1; + else if (options & AAC_SUPPORTED_NEW_COMM_TYPE2) + sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE2; + } + if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE) + sc->flags |= AAC_FLAGS_ARRAY_64BIT; + } + + if (!(sc->flags & AAC_FLAGS_NEW_COMM)) { + device_printf(sc->aac_dev, "Communication interface not supported!\n"); + return (ENXIO); + } + + if (sc->hint_flags & 2) { + device_printf(sc->aac_dev, + "Sync. mode enforced by driver parameter. This will cause a significant performance decrease!\n"); + sc->flags |= AAC_FLAGS_SYNC_MODE; + } else if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE34) { + device_printf(sc->aac_dev, + "Async. mode not supported by current driver, sync. mode enforced.\nPlease update driver to get full performance.\n"); + sc->flags |= AAC_FLAGS_SYNC_MODE; + } + + /* Check for broken hardware that does a lower number of commands */ + sc->aac_max_fibs = (sc->flags & AAC_FLAGS_256FIBS ? 256:512); + + /* Remap mem. resource, if required */ + if (atu_size > rman_get_size(sc->aac_regs_res0)) { + bus_release_resource( + sc->aac_dev, SYS_RES_MEMORY, + sc->aac_regs_rid0, sc->aac_regs_res0); + sc->aac_regs_res0 = bus_alloc_resource( + sc->aac_dev, SYS_RES_MEMORY, &sc->aac_regs_rid0, + 0ul, ~0ul, atu_size, RF_ACTIVE); + if (sc->aac_regs_res0 == NULL) { + sc->aac_regs_res0 = bus_alloc_resource_any( + sc->aac_dev, SYS_RES_MEMORY, + &sc->aac_regs_rid0, RF_ACTIVE); + if (sc->aac_regs_res0 == NULL) { + device_printf(sc->aac_dev, + "couldn't allocate register window\n"); + return (ENXIO); + } + } + sc->aac_btag0 = rman_get_bustag(sc->aac_regs_res0); + sc->aac_bhandle0 = rman_get_bushandle(sc->aac_regs_res0); + } + + /* Read preferred settings */ + sc->aac_max_fib_size = sizeof(struct aac_fib); + sc->aac_max_sectors = 128; /* 64KB */ + sc->aac_max_aif = 1; + if (sc->flags & AAC_FLAGS_SG_64BIT) + sc->aac_sg_tablesize = (AAC_FIB_DATASIZE + - sizeof(struct aac_blockwrite64)) + / sizeof(struct aac_sg_entry64); + else + sc->aac_sg_tablesize = (AAC_FIB_DATASIZE + - sizeof(struct aac_blockwrite)) + / sizeof(struct aac_sg_entry); + + if (!aacraid_sync_command(sc, AAC_MONKER_GETCOMMPREF, 0, 0, 0, 0, NULL, NULL)) { + options = AAC_GET_MAILBOX(sc, 1); + sc->aac_max_fib_size = (options & 0xFFFF); + sc->aac_max_sectors = (options >> 16) << 1; + options = AAC_GET_MAILBOX(sc, 2); + sc->aac_sg_tablesize = (options >> 16); + options = AAC_GET_MAILBOX(sc, 3); + sc->aac_max_fibs = (options & 0xFFFF); + options = AAC_GET_MAILBOX(sc, 4); + sc->aac_max_aif = (options & 0xFFFF); + } + + maxsize = sc->aac_max_fib_size + 31; + if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) + maxsize += sizeof(struct aac_fib_xporthdr); + if (maxsize > PAGE_SIZE) { + sc->aac_max_fib_size -= (maxsize - PAGE_SIZE); + maxsize = PAGE_SIZE; + } + sc->aac_max_fibs_alloc = PAGE_SIZE / maxsize; + + if (sc->aac_max_fib_size > sizeof(struct aac_fib)) { + sc->flags |= AAC_FLAGS_RAW_IO; + device_printf(sc->aac_dev, "Enable Raw I/O\n"); + } + if ((sc->flags & AAC_FLAGS_RAW_IO) && + (sc->flags & AAC_FLAGS_ARRAY_64BIT)) { + sc->flags |= AAC_FLAGS_LBA_64BIT; + device_printf(sc->aac_dev, "Enable 64-bit array\n"); + } + + aacraid_get_fw_debug_buffer(sc); + return (0); +} + +static int +aac_init(struct aac_softc *sc) +{ + struct aac_adapter_init *ip; + int error; + + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + + /* reset rrq index */ + sc->aac_host_rrq_idx = 0; + + /* + * Fill in the init structure. This tells the adapter about the + * physical location of various important shared data structures. + */ + ip = &sc->aac_common->ac_init; + ip->InitStructRevision = AAC_INIT_STRUCT_REVISION; + if (sc->aac_max_fib_size > sizeof(struct aac_fib)) { + ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_4; + sc->flags |= AAC_FLAGS_RAW_IO; + } + ip->MiniPortRevision = AAC_INIT_STRUCT_MINIPORT_REVISION; + + ip->AdapterFibsPhysicalAddress = sc->aac_common_busaddr + + offsetof(struct aac_common, ac_fibs); + ip->AdapterFibsVirtualAddress = 0; + ip->AdapterFibsSize = AAC_ADAPTER_FIBS * sizeof(struct aac_fib); + ip->AdapterFibAlign = sizeof(struct aac_fib); + + ip->PrintfBufferAddress = sc->aac_common_busaddr + + offsetof(struct aac_common, ac_printf); + ip->PrintfBufferSize = AAC_PRINTF_BUFSIZE; + + /* + * The adapter assumes that pages are 4K in size, except on some + * broken firmware versions that do the page->byte conversion twice, + * therefore 'assuming' that this value is in 16MB units (2^24). + * Round up since the granularity is so high. + */ + ip->HostPhysMemPages = ctob(physmem) / AAC_PAGE_SIZE; + if (sc->flags & AAC_FLAGS_BROKEN_MEMMAP) { + ip->HostPhysMemPages = + (ip->HostPhysMemPages + AAC_PAGE_SIZE) / AAC_PAGE_SIZE; + } + ip->HostElapsedSeconds = time_uptime; /* reset later if invalid */ + + ip->InitFlags = AAC_INITFLAGS_NEW_COMM_SUPPORTED; + if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) { + ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_6; + ip->InitFlags |= (AAC_INITFLAGS_NEW_COMM_TYPE1_SUPPORTED | + AAC_INITFLAGS_FAST_JBOD_SUPPORTED); + ip->MiniPortRevision = 0L; + device_printf(sc->aac_dev, "New comm. interface type1 enabled\n"); + } else if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) { + ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_7; + ip->InitFlags |= (AAC_INITFLAGS_NEW_COMM_TYPE2_SUPPORTED | + AAC_INITFLAGS_FAST_JBOD_SUPPORTED); + device_printf(sc->aac_dev, "New comm. interface type2 enabled\n"); + } + ip->MaxNumAif = sc->aac_max_aif; + ip->HostRRQ_AddrLow = + sc->aac_common_busaddr + offsetof(struct aac_common, ac_host_rrq); + /* always 32-bit address */ + ip->HostRRQ_AddrHigh = 0; + + if (sc->aac_support_opt2 & AAC_SUPPORTED_POWER_MANAGEMENT) { + ip->InitFlags |= AAC_INITFLAGS_DRIVER_SUPPORTS_PM; + ip->InitFlags |= AAC_INITFLAGS_DRIVER_USES_UTC_TIME; + device_printf(sc->aac_dev, "Power Management enabled\n"); + } + + ip->MaxIoCommands = sc->aac_max_fibs; + ip->MaxIoSize = sc->aac_max_sectors << 9; + ip->MaxFibSize = sc->aac_max_fib_size; + + /* + * Do controller-type-specific initialisation + */ + AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, ~0); + + /* + * Give the init structure to the controller. + */ + if (aacraid_sync_command(sc, AAC_MONKER_INITSTRUCT, + sc->aac_common_busaddr + + offsetof(struct aac_common, ac_init), 0, 0, 0, + NULL, NULL)) { + device_printf(sc->aac_dev, + "error establishing init structure\n"); + error = EIO; + goto out; + } + + error = 0; +out: + return(error); +} + +static int +aac_setup_intr(struct aac_softc *sc) +{ + sc->aac_irq_rid = 0; + if ((sc->aac_irq = bus_alloc_resource_any(sc->aac_dev, SYS_RES_IRQ, + &sc->aac_irq_rid, + RF_SHAREABLE | + RF_ACTIVE)) == NULL) { + device_printf(sc->aac_dev, "can't allocate interrupt\n"); + return (EINVAL); + } + if (aac_bus_setup_intr(sc->aac_dev, sc->aac_irq, + INTR_MPSAFE|INTR_TYPE_BIO, NULL, + aacraid_new_intr_type1, sc, &sc->aac_intr)) { + device_printf(sc->aac_dev, "can't set up interrupt\n"); + return (EINVAL); + } + return (0); +} + +/* + * Send a synchronous command to the controller and wait for a result. + * Indicate if the controller completed the command with an error status. + */ +int +aacraid_sync_command(struct aac_softc *sc, u_int32_t command, + u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3, + u_int32_t *sp, u_int32_t *r1) +{ + time_t then; + u_int32_t status; + + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + + /* populate the mailbox */ + AAC_SET_MAILBOX(sc, command, arg0, arg1, arg2, arg3); + + /* ensure the sync command doorbell flag is cleared */ + AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND); + + /* then set it to signal the adapter */ + AAC_QNOTIFY(sc, AAC_DB_SYNC_COMMAND); + + if ((command != AAC_MONKER_SYNCFIB) || (sp == NULL) || (*sp != 0)) { + /* spin waiting for the command to complete */ + then = time_uptime; + do { + if (time_uptime > (then + AAC_IMMEDIATE_TIMEOUT)) { + fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "timed out"); + return(EIO); + } + } while (!(AAC_GET_ISTATUS(sc) & AAC_DB_SYNC_COMMAND)); + + /* clear the completion flag */ + AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND); + + /* get the command status */ + status = AAC_GET_MAILBOX(sc, 0); + if (sp != NULL) + *sp = status; + + /* return parameter */ + if (r1 != NULL) + *r1 = AAC_GET_MAILBOX(sc, 1); + + if (status != AAC_SRB_STS_SUCCESS) + return (-1); + } + return(0); +} + +static int +aac_sync_fib(struct aac_softc *sc, u_int32_t command, u_int32_t xferstate, + struct aac_fib *fib, u_int16_t datasize) +{ + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + mtx_assert(&sc->aac_io_lock, MA_OWNED); + + if (datasize > AAC_FIB_DATASIZE) + return(EINVAL); + + /* + * Set up the sync FIB + */ + fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED | + AAC_FIBSTATE_INITIALISED | + AAC_FIBSTATE_EMPTY; + fib->Header.XferState |= xferstate; + fib->Header.Command = command; + fib->Header.StructType = AAC_FIBTYPE_TFIB; + fib->Header.Size = sizeof(struct aac_fib_header) + datasize; + fib->Header.SenderSize = sizeof(struct aac_fib); + fib->Header.SenderFibAddress = 0; /* Not needed */ + fib->Header.u.ReceiverFibAddress = sc->aac_common_busaddr + + offsetof(struct aac_common, + ac_sync_fib); + + /* + * Give the FIB to the controller, wait for a response. + */ + if (aacraid_sync_command(sc, AAC_MONKER_SYNCFIB, + fib->Header.u.ReceiverFibAddress, 0, 0, 0, NULL, NULL)) { + fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "IO error"); + return(EIO); + } + + return (0); +} + +/* + * Check for commands that have been outstanding for a suspiciously long time, + * and complain about them. + */ +static void +aac_timeout(struct aac_softc *sc) +{ + struct aac_command *cm; + time_t deadline; + int timedout, code; + + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + /* + * Traverse the busy command list, bitch about late commands once + * only. + */ + timedout = 0; + deadline = time_uptime - AAC_CMD_TIMEOUT; + TAILQ_FOREACH(cm, &sc->aac_busy, cm_link) { + if ((cm->cm_timestamp < deadline) + /* && !(cm->cm_flags & AAC_CMD_TIMEDOUT) */) { + cm->cm_flags |= AAC_CMD_TIMEDOUT; + device_printf(sc->aac_dev, + "COMMAND %p TIMEOUT AFTER %d SECONDS\n", + cm, (int)(time_uptime-cm->cm_timestamp)); + AAC_PRINT_FIB(sc, cm->cm_fib); + timedout++; + } + } + + if (timedout) { + code = AAC_GET_FWSTATUS(sc); + if (code != AAC_UP_AND_RUNNING) { + device_printf(sc->aac_dev, "WARNING! Controller is no " + "longer running! code= 0x%x\n", code); + aac_reset_adapter(sc); + } + } + aacraid_print_queues(sc); +} + +/* + * Interface Function Vectors + */ + +/* + * Read the current firmware status word. + */ +static int +aac_src_get_fwstatus(struct aac_softc *sc) +{ + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + + return(AAC_MEM0_GETREG4(sc, AAC_SRC_OMR)); +} + +/* + * Notify the controller of a change in a given queue + */ +static void +aac_src_qnotify(struct aac_softc *sc, int qbit) +{ + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + + AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, qbit << AAC_SRC_IDR_SHIFT); +} + +/* + * Get the interrupt reason bits + */ +static int +aac_src_get_istatus(struct aac_softc *sc) +{ + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + + return(AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R) >> AAC_SRC_ODR_SHIFT); +} + +/* + * Clear some interrupt reason bits + */ +static void +aac_src_clear_istatus(struct aac_softc *sc, int mask) +{ + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + + AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, mask << AAC_SRC_ODR_SHIFT); +} + +/* + * Populate the mailbox and set the command word + */ +static void +aac_src_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0, + u_int32_t arg1, u_int32_t arg2, u_int32_t arg3) +{ + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + + AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX, command); + AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 4, arg0); + AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 8, arg1); + AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 12, arg2); + AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 16, arg3); +} + +static void +aac_srcv_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0, + u_int32_t arg1, u_int32_t arg2, u_int32_t arg3) +{ + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + + AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX, command); + AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 4, arg0); + AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 8, arg1); + AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 12, arg2); + AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 16, arg3); +} + +/* + * Fetch the immediate command status word + */ +static int +aac_src_get_mailbox(struct aac_softc *sc, int mb) +{ + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + + return(AAC_MEM0_GETREG4(sc, AAC_SRC_MAILBOX + (mb * 4))); +} + +static int +aac_srcv_get_mailbox(struct aac_softc *sc, int mb) +{ + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + + return(AAC_MEM0_GETREG4(sc, AAC_SRCV_MAILBOX + (mb * 4))); +} + +/* + * Set/clear interrupt masks + */ +static void +aac_src_set_interrupts(struct aac_softc *sc, int enable) +{ + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "%sable interrupts", enable ? "en" : "dis"); + + if (enable) { + AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR, ~AAC_DB_INT_NEW_COMM_TYPE1); + } else { + AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR, ~0); + } +} + +/* + * New comm. interface: Send command functions + */ +static int +aac_src_send_command(struct aac_softc *sc, struct aac_command *cm) +{ + struct aac_fib_xporthdr *pFibX; + u_int32_t fibsize, high_addr; + u_int64_t address; + + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "send command (new comm. type1)"); + + if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) { + /* Calculate the amount to the fibsize bits */ + fibsize = (cm->cm_fib->Header.Size + 127) / 128 - 1; + /* Fill new FIB header */ + address = cm->cm_fibphys; + high_addr = (u_int32_t)(address >> 32); + if (high_addr == 0L) { + cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB2; + cm->cm_fib->Header.u.TimeStamp = 0L; + } else { + cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB2_64; + cm->cm_fib->Header.u.SenderFibAddressHigh = high_addr; + } + cm->cm_fib->Header.SenderFibAddress = (u_int32_t)address; + } else { + /* Calculate the amount to the fibsize bits */ + fibsize = (sizeof(struct aac_fib_xporthdr) + + cm->cm_fib->Header.Size + 127) / 128 - 1; + /* Fill XPORT header */ + pFibX = (struct aac_fib_xporthdr *) + ((unsigned char *)cm->cm_fib - sizeof(struct aac_fib_xporthdr)); + pFibX->Handle = cm->cm_fib->Header.Handle; + pFibX->HostAddress = cm->cm_fibphys; + pFibX->Size = cm->cm_fib->Header.Size; + address = cm->cm_fibphys - sizeof(struct aac_fib_xporthdr); + high_addr = (u_int32_t)(address >> 32); + } + + if (fibsize > 31) + fibsize = 31; + aac_enqueue_busy(cm); + if (high_addr) { + AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE64_H, high_addr); + AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE64_L, (u_int32_t)address + fibsize); + } else { + AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE32, (u_int32_t)address + fibsize); + } + return 0; +} + +/* + * New comm. interface: get, set outbound queue index + */ +static int +aac_src_get_outb_queue(struct aac_softc *sc) +{ + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + + return(-1); +} + +static void +aac_src_set_outb_queue(struct aac_softc *sc, int index) +{ + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); +} + +/* + * Debugging and Diagnostics + */ + +/* + * Print some information about the controller. + */ +static void +aac_describe_controller(struct aac_softc *sc) +{ + struct aac_fib *fib; + struct aac_adapter_info *info; + char *adapter_type = "Adaptec RAID controller"; + + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + + mtx_lock(&sc->aac_io_lock); + aac_alloc_sync_fib(sc, &fib); + + if (sc->supported_options & AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO) { + fib->data[0] = 0; + if (aac_sync_fib(sc, RequestSupplementAdapterInfo, 0, fib, 1)) + device_printf(sc->aac_dev, "RequestSupplementAdapterInfo failed\n"); + else { + struct aac_supplement_adapter_info *supp_info; + + supp_info = ((struct aac_supplement_adapter_info *)&fib->data[0]); + adapter_type = (char *)supp_info->AdapterTypeText; + sc->aac_feature_bits = supp_info->FeatureBits; + sc->aac_support_opt2 = supp_info->SupportedOptions2; + } + } + device_printf(sc->aac_dev, "%s, aacraid driver %d.%d.%d-%d\n", + adapter_type, + AAC_DRIVER_MAJOR_VERSION, AAC_DRIVER_MINOR_VERSION, + AAC_DRIVER_BUGFIX_LEVEL, AAC_DRIVER_BUILD); + + fib->data[0] = 0; + if (aac_sync_fib(sc, RequestAdapterInfo, 0, fib, 1)) { + device_printf(sc->aac_dev, "RequestAdapterInfo failed\n"); + aac_release_sync_fib(sc); + mtx_unlock(&sc->aac_io_lock); + return; + } + + /* save the kernel revision structure for later use */ + info = (struct aac_adapter_info *)&fib->data[0]; + sc->aac_revision = info->KernelRevision; + + if (bootverbose) { + device_printf(sc->aac_dev, "%s %dMHz, %dMB memory " + "(%dMB cache, %dMB execution), %s\n", + aac_describe_code(aac_cpu_variant, info->CpuVariant), + info->ClockSpeed, info->TotalMem / (1024 * 1024), + info->BufferMem / (1024 * 1024), + info->ExecutionMem / (1024 * 1024), + aac_describe_code(aac_battery_platform, + info->batteryPlatform)); + + device_printf(sc->aac_dev, + "Kernel %d.%d-%d, Build %d, S/N %6X\n", + info->KernelRevision.external.comp.major, + info->KernelRevision.external.comp.minor, + info->KernelRevision.external.comp.dash, + info->KernelRevision.buildNumber, + (u_int32_t)(info->SerialNumber & 0xffffff)); + + device_printf(sc->aac_dev, "Supported Options=%b\n", + sc->supported_options, + "\20" + "\1SNAPSHOT" + "\2CLUSTERS" + "\3WCACHE" + "\4DATA64" + "\5HOSTTIME" + "\6RAID50" + "\7WINDOW4GB" + "\10SCSIUPGD" + "\11SOFTERR" + "\12NORECOND" + "\13SGMAP64" + "\14ALARM" + "\15NONDASD" + "\16SCSIMGT" + "\17RAIDSCSI" + "\21ADPTINFO" + "\22NEWCOMM" + "\23ARRAY64BIT" + "\24HEATSENSOR"); + } + + aac_release_sync_fib(sc); + mtx_unlock(&sc->aac_io_lock); +} + +/* + * Look up a text description of a numeric error code and return a pointer to + * same. + */ +static char * +aac_describe_code(struct aac_code_lookup *table, u_int32_t code) +{ + int i; + + for (i = 0; table[i].string != NULL; i++) + if (table[i].code == code) + return(table[i].string); + return(table[i + 1].string); +} + +/* + * Management Interface + */ + +static int +aac_open(struct cdev *dev, int flags, int fmt, struct thread *td) +{ + struct aac_softc *sc; + + sc = dev->si_drv1; + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); +#if __FreeBSD_version >= 702000 + device_busy(sc->aac_dev); + devfs_set_cdevpriv(sc, aac_cdevpriv_dtor); +#endif + return 0; +} + +static int +aac_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td) +{ + union aac_statrequest *as; + struct aac_softc *sc; + int error = 0; + + as = (union aac_statrequest *)arg; + sc = dev->si_drv1; + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + + switch (cmd) { + case AACIO_STATS: + switch (as->as_item) { + case AACQ_FREE: + case AACQ_READY: + case AACQ_BUSY: + bcopy(&sc->aac_qstat[as->as_item], &as->as_qstat, + sizeof(struct aac_qstat)); + break; + default: + error = ENOENT; + break; + } + break; + + case FSACTL_SENDFIB: + case FSACTL_SEND_LARGE_FIB: + arg = *(caddr_t*)arg; + case FSACTL_LNX_SENDFIB: + case FSACTL_LNX_SEND_LARGE_FIB: + fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SENDFIB"); + error = aac_ioctl_sendfib(sc, arg); + break; + case FSACTL_SEND_RAW_SRB: + arg = *(caddr_t*)arg; + case FSACTL_LNX_SEND_RAW_SRB: + fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SEND_RAW_SRB"); + error = aac_ioctl_send_raw_srb(sc, arg); + break; + case FSACTL_AIF_THREAD: + case FSACTL_LNX_AIF_THREAD: + fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_AIF_THREAD"); + error = EINVAL; + break; + case FSACTL_OPEN_GET_ADAPTER_FIB: + arg = *(caddr_t*)arg; + case FSACTL_LNX_OPEN_GET_ADAPTER_FIB: + fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_OPEN_GET_ADAPTER_FIB"); + error = aac_open_aif(sc, arg); + break; + case FSACTL_GET_NEXT_ADAPTER_FIB: + arg = *(caddr_t*)arg; + case FSACTL_LNX_GET_NEXT_ADAPTER_FIB: + fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_NEXT_ADAPTER_FIB"); + error = aac_getnext_aif(sc, arg); + break; + case FSACTL_CLOSE_GET_ADAPTER_FIB: + arg = *(caddr_t*)arg; + case FSACTL_LNX_CLOSE_GET_ADAPTER_FIB: + fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_CLOSE_GET_ADAPTER_FIB"); + error = aac_close_aif(sc, arg); + break; + case FSACTL_MINIPORT_REV_CHECK: + arg = *(caddr_t*)arg; + case FSACTL_LNX_MINIPORT_REV_CHECK: + fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_MINIPORT_REV_CHECK"); + error = aac_rev_check(sc, arg); + break; + case FSACTL_QUERY_DISK: + arg = *(caddr_t*)arg; + case FSACTL_LNX_QUERY_DISK: + fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_QUERY_DISK"); + error = aac_query_disk(sc, arg); + break; + case FSACTL_DELETE_DISK: + case FSACTL_LNX_DELETE_DISK: + /* + * We don't trust the underland to tell us when to delete a + * container, rather we rely on an AIF coming from the + * controller + */ + error = 0; + break; + case FSACTL_GET_PCI_INFO: + arg = *(caddr_t*)arg; + case FSACTL_LNX_GET_PCI_INFO: + fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_PCI_INFO"); + error = aac_get_pci_info(sc, arg); + break; + case FSACTL_GET_FEATURES: + arg = *(caddr_t*)arg; + case FSACTL_LNX_GET_FEATURES: + fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_FEATURES"); + error = aac_supported_features(sc, arg); + break; + default: + fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "unsupported cmd 0x%lx\n", cmd); + error = EINVAL; + break; + } + return(error); +} + +static int +aac_poll(struct cdev *dev, int poll_events, struct thread *td) +{ + struct aac_softc *sc; + struct aac_fib_context *ctx; + int revents; + + sc = dev->si_drv1; + revents = 0; + + mtx_lock(&sc->aac_io_lock); + if ((poll_events & (POLLRDNORM | POLLIN)) != 0) { + for (ctx = sc->fibctx; ctx; ctx = ctx->next) { + if (ctx->ctx_idx != sc->aifq_idx || ctx->ctx_wrap) { + revents |= poll_events & (POLLIN | POLLRDNORM); + break; + } + } + } + mtx_unlock(&sc->aac_io_lock); + + if (revents == 0) { + if (poll_events & (POLLIN | POLLRDNORM)) + selrecord(td, &sc->rcv_select); + } + + return (revents); +} + +static void +aac_ioctl_event(struct aac_softc *sc, struct aac_event *event, void *arg) +{ + + switch (event->ev_type) { + case AAC_EVENT_CMFREE: + mtx_assert(&sc->aac_io_lock, MA_OWNED); + if (aacraid_alloc_command(sc, (struct aac_command **)arg)) { + aacraid_add_event(sc, event); + return; + } + free(event, M_AACRAIDBUF); + wakeup(arg); + break; + default: + break; + } +} + +/* + * Send a FIB supplied from userspace + */ +static int +aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib) +{ + struct aac_command *cm; + int size, error; + + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + + cm = NULL; + + /* + * Get a command + */ + mtx_lock(&sc->aac_io_lock); + if (aacraid_alloc_command(sc, &cm)) { + struct aac_event *event; + + event = malloc(sizeof(struct aac_event), M_AACRAIDBUF, + M_NOWAIT | M_ZERO); + if (event == NULL) { + error = EBUSY; + mtx_unlock(&sc->aac_io_lock); + goto out; + } + event->ev_type = AAC_EVENT_CMFREE; + event->ev_callback = aac_ioctl_event; + event->ev_arg = &cm; + aacraid_add_event(sc, event); + msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsfib", 0); + } + mtx_unlock(&sc->aac_io_lock); + + /* + * Fetch the FIB header, then re-copy to get data as well. + */ + if ((error = copyin(ufib, cm->cm_fib, + sizeof(struct aac_fib_header))) != 0) + goto out; + size = cm->cm_fib->Header.Size + sizeof(struct aac_fib_header); + if (size > sc->aac_max_fib_size) { + device_printf(sc->aac_dev, "incoming FIB oversized (%d > %d)\n", + size, sc->aac_max_fib_size); + size = sc->aac_max_fib_size; + } + if ((error = copyin(ufib, cm->cm_fib, size)) != 0) + goto out; + cm->cm_fib->Header.Size = size; + cm->cm_timestamp = time_uptime; + cm->cm_datalen = 0; + + /* + * Pass the FIB to the controller, wait for it to complete. + */ + mtx_lock(&sc->aac_io_lock); + error = aacraid_wait_command(cm); + mtx_unlock(&sc->aac_io_lock); + if (error != 0) { + device_printf(sc->aac_dev, + "aacraid_wait_command return %d\n", error); + goto out; + } + + /* + * Copy the FIB and data back out to the caller. + */ + size = cm->cm_fib->Header.Size; + if (size > sc->aac_max_fib_size) { + device_printf(sc->aac_dev, "outbound FIB oversized (%d > %d)\n", + size, sc->aac_max_fib_size); + size = sc->aac_max_fib_size; + } + error = copyout(cm->cm_fib, ufib, size); + +out: + if (cm != NULL) { + mtx_lock(&sc->aac_io_lock); + aacraid_release_command(cm); + mtx_unlock(&sc->aac_io_lock); + } + return(error); +} + +/* + * Send a passthrough FIB supplied from userspace + */ +static int +aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg) +{ + struct aac_command *cm; + struct aac_fib *fib; + struct aac_srb *srbcmd; + struct aac_srb *user_srb = (struct aac_srb *)arg; + void *user_reply; + int error, transfer_data = 0; + bus_dmamap_t orig_map = 0; + u_int32_t fibsize = 0; + u_int64_t srb_sg_address; + u_int32_t srb_sg_bytecount; + + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + + cm = NULL; + + mtx_lock(&sc->aac_io_lock); + if (aacraid_alloc_command(sc, &cm)) { + struct aac_event *event; + + event = malloc(sizeof(struct aac_event), M_AACRAIDBUF, + M_NOWAIT | M_ZERO); + if (event == NULL) { + error = EBUSY; + mtx_unlock(&sc->aac_io_lock); + goto out; + } + event->ev_type = AAC_EVENT_CMFREE; + event->ev_callback = aac_ioctl_event; + event->ev_arg = &cm; + aacraid_add_event(sc, event); + msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsraw", 0); + } + mtx_unlock(&sc->aac_io_lock); + + cm->cm_data = NULL; + /* save original dma map */ + orig_map = cm->cm_datamap; + + fib = cm->cm_fib; + srbcmd = (struct aac_srb *)fib->data; + if ((error = copyin((void *)&user_srb->data_len, &fibsize, + sizeof (u_int32_t)) != 0)) + goto out; + if (fibsize > (sc->aac_max_fib_size-sizeof(struct aac_fib_header))) { + error = EINVAL; + goto out; + } + if ((error = copyin((void *)user_srb, srbcmd, fibsize) != 0)) + goto out; + + srbcmd->function = 0; /* SRBF_ExecuteScsi */ + srbcmd->retry_limit = 0; /* obsolete */ + + /* only one sg element from userspace supported */ + if (srbcmd->sg_map.SgCount > 1) { + error = EINVAL; + goto out; + } + /* check fibsize */ + if (fibsize == (sizeof(struct aac_srb) + + srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry))) { + struct aac_sg_entry *sgp = srbcmd->sg_map.SgEntry; + srb_sg_bytecount = sgp->SgByteCount; + srb_sg_address = (u_int64_t)sgp->SgAddress; + } else if (fibsize == (sizeof(struct aac_srb) + + srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry64))) { +#ifdef __amd64__ + struct aac_sg_entry64 *sgp = + (struct aac_sg_entry64 *)srbcmd->sg_map.SgEntry; + srb_sg_bytecount = sgp->SgByteCount; + srb_sg_address = sgp->SgAddress; + if (srb_sg_address > 0xffffffffull && + !(sc->flags & AAC_FLAGS_SG_64BIT)) +#endif + { + error = EINVAL; + goto out; + } + } else { + error = EINVAL; + goto out; + } + user_reply = (char *)arg + fibsize; + srbcmd->data_len = srb_sg_bytecount; + if (srbcmd->sg_map.SgCount == 1) + transfer_data = 1; + + if (transfer_data) { + /* + * Create DMA tag for the passthr. data buffer and allocate it. + */ + if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */ + 1, 0, /* algnmnt, boundary */ + (sc->flags & AAC_FLAGS_SG_64BIT) ? + BUS_SPACE_MAXADDR_32BIT : + 0x7fffffff, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + srb_sg_bytecount, /* size */ + sc->aac_sg_tablesize, /* nsegments */ + srb_sg_bytecount, /* maxsegsize */ + 0, /* flags */ + NULL, NULL, /* No locking needed */ + &cm->cm_passthr_dmat)) { + error = ENOMEM; + goto out; + } + if (bus_dmamem_alloc(cm->cm_passthr_dmat, (void **)&cm->cm_data, + BUS_DMA_NOWAIT, &cm->cm_datamap)) { + error = ENOMEM; + goto out; + } + /* fill some cm variables */ + cm->cm_datalen = srb_sg_bytecount; + if (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN) + cm->cm_flags |= AAC_CMD_DATAIN; + if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT) + cm->cm_flags |= AAC_CMD_DATAOUT; + + if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT) { + if ((error = copyin( +#ifdef __amd64__ + (void *)srb_sg_address, +#else + (void *)(u_int32_t)srb_sg_address, +#endif + cm->cm_data, cm->cm_datalen)) != 0) + goto out; + /* sync required for bus_dmamem_alloc() alloc. mem.? */ + bus_dmamap_sync(cm->cm_passthr_dmat, cm->cm_datamap, + BUS_DMASYNC_PREWRITE); + } + } + + /* build the FIB */ + fib->Header.Size = sizeof(struct aac_fib_header) + + sizeof(struct aac_srb); + fib->Header.XferState = + AAC_FIBSTATE_HOSTOWNED | + AAC_FIBSTATE_INITIALISED | + AAC_FIBSTATE_EMPTY | + AAC_FIBSTATE_FROMHOST | + AAC_FIBSTATE_REXPECTED | + AAC_FIBSTATE_NORM | + AAC_FIBSTATE_ASYNC; + + fib->Header.Command = (sc->flags & AAC_FLAGS_SG_64BIT) ? + ScsiPortCommandU64 : ScsiPortCommand; + cm->cm_sgtable = (struct aac_sg_table *)&srbcmd->sg_map; + + /* send command */ + if (transfer_data) { + bus_dmamap_load(cm->cm_passthr_dmat, + cm->cm_datamap, cm->cm_data, + cm->cm_datalen, + aacraid_map_command_sg, cm, 0); + } else { + aacraid_map_command_sg(cm, NULL, 0, 0); + } + + /* wait for completion */ + mtx_lock(&sc->aac_io_lock); + while (!(cm->cm_flags & AAC_CMD_COMPLETED)) + msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsrw2", 0); + mtx_unlock(&sc->aac_io_lock); + + /* copy data */ + if (transfer_data && (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN)) { + if ((error = copyout(cm->cm_data, +#ifdef __amd64__ + (void *)srb_sg_address, +#else + (void *)(u_int32_t)srb_sg_address, +#endif + cm->cm_datalen)) != 0) + goto out; + /* sync required for bus_dmamem_alloc() allocated mem.? */ + bus_dmamap_sync(cm->cm_passthr_dmat, cm->cm_datamap, + BUS_DMASYNC_POSTREAD); + } + + /* status */ + error = copyout(fib->data, user_reply, sizeof(struct aac_srb_response)); + +out: + if (cm && cm->cm_data) { + if (transfer_data) + bus_dmamap_unload(cm->cm_passthr_dmat, cm->cm_datamap); + bus_dmamem_free(cm->cm_passthr_dmat, cm->cm_data, cm->cm_datamap); + cm->cm_datamap = orig_map; + } + if (cm && cm->cm_passthr_dmat) + bus_dma_tag_destroy(cm->cm_passthr_dmat); + if (cm) { + mtx_lock(&sc->aac_io_lock); + aacraid_release_command(cm); + mtx_unlock(&sc->aac_io_lock); + } + return(error); +} + +/* + * Request an AIF from the controller (new comm. type1) + */ +static void +aac_request_aif(struct aac_softc *sc) +{ + struct aac_command *cm; + struct aac_fib *fib; + + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + + if (aacraid_alloc_command(sc, &cm)) { + sc->aif_pending = 1; + return; + } + sc->aif_pending = 0; + + /* build the FIB */ + fib = cm->cm_fib; + fib->Header.Size = sizeof(struct aac_fib); + fib->Header.XferState = + AAC_FIBSTATE_HOSTOWNED | + AAC_FIBSTATE_INITIALISED | + AAC_FIBSTATE_EMPTY | + AAC_FIBSTATE_FROMHOST | + AAC_FIBSTATE_REXPECTED | + AAC_FIBSTATE_NORM | + AAC_FIBSTATE_ASYNC; + /* set AIF marker */ + fib->Header.Handle = 0x00800000; + fib->Header.Command = AifRequest; + ((struct aac_aif_command *)fib->data)->command = AifReqEvent; + + aacraid_map_command_sg(cm, NULL, 0, 0); +} + + +#if __FreeBSD_version >= 702000 +/* + * cdevpriv interface private destructor. + */ +static void +aac_cdevpriv_dtor(void *arg) +{ + struct aac_softc *sc; + + sc = arg; + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + mtx_lock(&Giant); + device_unbusy(sc->aac_dev); + mtx_unlock(&Giant); +} +#else +static int +aac_close(struct cdev *dev, int flags, int fmt, struct thread *td) +{ + struct aac_softc *sc; + + sc = dev->si_drv1; + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + return 0; +} +#endif + +/* + * Handle an AIF sent to us by the controller; queue it for later reference. + * If the queue fills up, then drop the older entries. + */ +static void +aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib) +{ + struct aac_aif_command *aif; + struct aac_container *co, *co_next; + struct aac_fib_context *ctx; + struct aac_fib *sync_fib; + struct aac_mntinforesp mir; + int next, current, found; + int count = 0, changed = 0, i = 0; + u_int32_t channel, uid; + + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + + aif = (struct aac_aif_command*)&fib->data[0]; + aacraid_print_aif(sc, aif); + + /* Is it an event that we should care about? */ + switch (aif->command) { + case AifCmdEventNotify: + switch (aif->data.EN.type) { + case AifEnAddContainer: + case AifEnDeleteContainer: + /* + * A container was added or deleted, but the message + * doesn't tell us anything else! Re-enumerate the + * containers and sort things out. + */ + aac_alloc_sync_fib(sc, &sync_fib); + do { + /* + * Ask the controller for its containers one at + * a time. + * XXX What if the controller's list changes + * midway through this enumaration? + * XXX This should be done async. + */ + if (aac_get_container_info(sc, sync_fib, i, + &mir, &uid) != 0) + continue; + if (i == 0) + count = mir.MntRespCount; + /* + * Check the container against our list. + * co->co_found was already set to 0 in a + * previous run. + */ + if ((mir.Status == ST_OK) && + (mir.MntTable[0].VolType != CT_NONE)) { + found = 0; + TAILQ_FOREACH(co, + &sc->aac_container_tqh, + co_link) { + if (co->co_mntobj.ObjectId == + mir.MntTable[0].ObjectId) { + co->co_found = 1; + found = 1; + break; + } + } + /* + * If the container matched, continue + * in the list. + */ + if (found) { + i++; + continue; + } + + /* + * This is a new container. Do all the + * appropriate things to set it up. + */ + aac_add_container(sc, &mir, 1, uid); + changed = 1; + } + i++; + } while ((i < count) && (i < AAC_MAX_CONTAINERS)); + aac_release_sync_fib(sc); + + /* + * Go through our list of containers and see which ones + * were not marked 'found'. Since the controller didn't + * list them they must have been deleted. Do the + * appropriate steps to destroy the device. Also reset + * the co->co_found field. + */ + co = TAILQ_FIRST(&sc->aac_container_tqh); + while (co != NULL) { + if (co->co_found == 0) { + co_next = TAILQ_NEXT(co, co_link); + TAILQ_REMOVE(&sc->aac_container_tqh, co, + co_link); + free(co, M_AACRAIDBUF); + changed = 1; + co = co_next; + } else { + co->co_found = 0; + co = TAILQ_NEXT(co, co_link); + } + } + + /* Attach the newly created containers */ + if (changed) { + if (sc->cam_rescan_cb != NULL) + sc->cam_rescan_cb(sc, 0, + AAC_CAM_TARGET_WILDCARD); + } + + break; + + case AifEnEnclosureManagement: + switch (aif->data.EN.data.EEE.eventType) { + case AIF_EM_DRIVE_INSERTION: + case AIF_EM_DRIVE_REMOVAL: + channel = aif->data.EN.data.EEE.unitID; + if (sc->cam_rescan_cb != NULL) + sc->cam_rescan_cb(sc, + ((channel>>24) & 0xF) + 1, + (channel & 0xFFFF)); + break; + } + break; + + case AifEnAddJBOD: + case AifEnDeleteJBOD: + case AifRawDeviceRemove: + channel = aif->data.EN.data.ECE.container; + if (sc->cam_rescan_cb != NULL) + sc->cam_rescan_cb(sc, ((channel>>24) & 0xF) + 1, + AAC_CAM_TARGET_WILDCARD); + break; + + default: + break; + } + + default: + break; + } + + /* Copy the AIF data to the AIF queue for ioctl retrieval */ + current = sc->aifq_idx; + next = (current + 1) % AAC_AIFQ_LENGTH; + if (next == 0) + sc->aifq_filled = 1; + bcopy(fib, &sc->aac_aifq[current], sizeof(struct aac_fib)); + /* modify AIF contexts */ + if (sc->aifq_filled) { + for (ctx = sc->fibctx; ctx; ctx = ctx->next) { + if (next == ctx->ctx_idx) + ctx->ctx_wrap = 1; + else if (current == ctx->ctx_idx && ctx->ctx_wrap) + ctx->ctx_idx = next; + } + } + sc->aifq_idx = next; + /* On the off chance that someone is sleeping for an aif... */ + if (sc->aac_state & AAC_STATE_AIF_SLEEPER) + wakeup(sc->aac_aifq); + /* Wakeup any poll()ers */ + selwakeuppri(&sc->rcv_select, PRIBIO); + + return; +} + +/* + * Return the Revision of the driver to userspace and check to see if the + * userspace app is possibly compatible. This is extremely bogus since + * our driver doesn't follow Adaptec's versioning system. Cheat by just + * returning what the card reported. + */ +static int +aac_rev_check(struct aac_softc *sc, caddr_t udata) +{ + struct aac_rev_check rev_check; + struct aac_rev_check_resp rev_check_resp; + int error = 0; + + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + + /* + * Copyin the revision struct from userspace + */ + if ((error = copyin(udata, (caddr_t)&rev_check, + sizeof(struct aac_rev_check))) != 0) { + return error; + } + + fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "Userland revision= %d\n", + rev_check.callingRevision.buildNumber); + + /* + * Doctor up the response struct. + */ + rev_check_resp.possiblyCompatible = 1; + rev_check_resp.adapterSWRevision.external.comp.major = + AAC_DRIVER_MAJOR_VERSION; + rev_check_resp.adapterSWRevision.external.comp.minor = + AAC_DRIVER_MINOR_VERSION; + rev_check_resp.adapterSWRevision.external.comp.type = + AAC_DRIVER_TYPE; + rev_check_resp.adapterSWRevision.external.comp.dash = + AAC_DRIVER_BUGFIX_LEVEL; + rev_check_resp.adapterSWRevision.buildNumber = + AAC_DRIVER_BUILD; + + return(copyout((caddr_t)&rev_check_resp, udata, + sizeof(struct aac_rev_check_resp))); +} + +/* + * Pass the fib context to the caller + */ +static int +aac_open_aif(struct aac_softc *sc, caddr_t arg) +{ + struct aac_fib_context *fibctx, *ctx; + int error = 0; + + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + + fibctx = malloc(sizeof(struct aac_fib_context), M_AACRAIDBUF, M_NOWAIT|M_ZERO); + if (fibctx == NULL) + return (ENOMEM); + + mtx_lock(&sc->aac_io_lock); + /* all elements are already 0, add to queue */ + if (sc->fibctx == NULL) + sc->fibctx = fibctx; + else { + for (ctx = sc->fibctx; ctx->next; ctx = ctx->next) + ; + ctx->next = fibctx; + fibctx->prev = ctx; + } + + /* evaluate unique value */ + fibctx->unique = (*(u_int32_t *)&fibctx & 0xffffffff); + ctx = sc->fibctx; + while (ctx != fibctx) { + if (ctx->unique == fibctx->unique) { + fibctx->unique++; + ctx = sc->fibctx; + } else { + ctx = ctx->next; + } + } + + error = copyout(&fibctx->unique, (void *)arg, sizeof(u_int32_t)); + mtx_unlock(&sc->aac_io_lock); + if (error) + aac_close_aif(sc, (caddr_t)ctx); + return error; +} + +/* + * Close the caller's fib context + */ +static int +aac_close_aif(struct aac_softc *sc, caddr_t arg) +{ + struct aac_fib_context *ctx; + + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + + mtx_lock(&sc->aac_io_lock); + for (ctx = sc->fibctx; ctx; ctx = ctx->next) { + if (ctx->unique == *(uint32_t *)&arg) { + if (ctx == sc->fibctx) + sc->fibctx = NULL; + else { + ctx->prev->next = ctx->next; + if (ctx->next) + ctx->next->prev = ctx->prev; + } + break; + } + } + if (ctx) + free(ctx, M_AACRAIDBUF); + + mtx_unlock(&sc->aac_io_lock); + return 0; +} + +/* + * Pass the caller the next AIF in their queue + */ +static int +aac_getnext_aif(struct aac_softc *sc, caddr_t arg) +{ + struct get_adapter_fib_ioctl agf; + struct aac_fib_context *ctx; + int error; + + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + + mtx_lock(&sc->aac_io_lock); + if ((error = copyin(arg, &agf, sizeof(agf))) == 0) { + for (ctx = sc->fibctx; ctx; ctx = ctx->next) { + if (agf.AdapterFibContext == ctx->unique) + break; + } + if (!ctx) { + mtx_unlock(&sc->aac_io_lock); + return (EFAULT); + } + + error = aac_return_aif(sc, ctx, agf.AifFib); + if (error == EAGAIN && agf.Wait) { + fwprintf(sc, HBA_FLAGS_DBG_AIF_B, "aac_getnext_aif(): waiting for AIF"); + sc->aac_state |= AAC_STATE_AIF_SLEEPER; + while (error == EAGAIN) { + mtx_unlock(&sc->aac_io_lock); + error = tsleep(sc->aac_aifq, PRIBIO | + PCATCH, "aacaif", 0); + mtx_lock(&sc->aac_io_lock); + if (error == 0) + error = aac_return_aif(sc, ctx, agf.AifFib); + } + sc->aac_state &= ~AAC_STATE_AIF_SLEEPER; + } + } + mtx_unlock(&sc->aac_io_lock); + return(error); +} + +/* + * Hand the next AIF off the top of the queue out to userspace. + */ +static int +aac_return_aif(struct aac_softc *sc, struct aac_fib_context *ctx, caddr_t uptr) +{ + int current, error; + + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + + current = ctx->ctx_idx; + if (current == sc->aifq_idx && !ctx->ctx_wrap) { + /* empty */ + return (EAGAIN); + } + error = + copyout(&sc->aac_aifq[current], (void *)uptr, sizeof(struct aac_fib)); + if (error) + device_printf(sc->aac_dev, + "aac_return_aif: copyout returned %d\n", error); + else { + ctx->ctx_wrap = 0; + ctx->ctx_idx = (current + 1) % AAC_AIFQ_LENGTH; + } + return(error); +} + +static int +aac_get_pci_info(struct aac_softc *sc, caddr_t uptr) +{ + struct aac_pci_info { + u_int32_t bus; + u_int32_t slot; + } pciinf; + int error; + + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + + pciinf.bus = pci_get_bus(sc->aac_dev); + pciinf.slot = pci_get_slot(sc->aac_dev); + + error = copyout((caddr_t)&pciinf, uptr, + sizeof(struct aac_pci_info)); + + return (error); +} + +static int +aac_supported_features(struct aac_softc *sc, caddr_t uptr) +{ + struct aac_features f; + int error; + + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + + if ((error = copyin(uptr, &f, sizeof (f))) != 0) + return (error); + + /* + * When the management driver receives FSACTL_GET_FEATURES ioctl with + * ALL zero in the featuresState, the driver will return the current + * state of all the supported features, the data field will not be + * valid. + * When the management driver receives FSACTL_GET_FEATURES ioctl with + * a specific bit set in the featuresState, the driver will return the + * current state of this specific feature and whatever data that are + * associated with the feature in the data field or perform whatever + * action needed indicates in the data field. + */ + if (f.feat.fValue == 0) { + f.feat.fBits.largeLBA = + (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0; + f.feat.fBits.JBODSupport = 1; + /* TODO: In the future, add other features state here as well */ + } else { + if (f.feat.fBits.largeLBA) + f.feat.fBits.largeLBA = + (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0; + /* TODO: Add other features state and data in the future */ + } + + error = copyout(&f, uptr, sizeof (f)); + return (error); +} + +/* + * Give the userland some information about the container. The AAC arch + * expects the driver to be a SCSI passthrough type driver, so it expects + * the containers to have b:t:l numbers. Fake it. + */ +static int +aac_query_disk(struct aac_softc *sc, caddr_t uptr) +{ + struct aac_query_disk query_disk; + struct aac_container *co; + int error, id; + + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + + mtx_lock(&sc->aac_io_lock); + error = copyin(uptr, (caddr_t)&query_disk, + sizeof(struct aac_query_disk)); + if (error) { + mtx_unlock(&sc->aac_io_lock); + return (error); + } + + id = query_disk.ContainerNumber; + if (id == -1) { + mtx_unlock(&sc->aac_io_lock); + return (EINVAL); + } + + TAILQ_FOREACH(co, &sc->aac_container_tqh, co_link) { + if (co->co_mntobj.ObjectId == id) + break; + } + + if (co == NULL) { + query_disk.Valid = 0; + query_disk.Locked = 0; + query_disk.Deleted = 1; /* XXX is this right? */ + } else { + query_disk.Valid = 1; + query_disk.Locked = 1; + query_disk.Deleted = 0; + query_disk.Bus = device_get_unit(sc->aac_dev); + query_disk.Target = 0; + query_disk.Lun = 0; + query_disk.UnMapped = 0; + } + + error = copyout((caddr_t)&query_disk, uptr, + sizeof(struct aac_query_disk)); + + mtx_unlock(&sc->aac_io_lock); + return (error); +} + +static void +aac_container_bus(struct aac_softc *sc) +{ + struct aac_sim *sim; + device_t child; + + sim =(struct aac_sim *)malloc(sizeof(struct aac_sim), + M_AACRAIDBUF, M_NOWAIT | M_ZERO); + if (sim == NULL) { + device_printf(sc->aac_dev, + "No memory to add container bus\n"); + panic("Out of memory?!"); + }; + child = device_add_child(sc->aac_dev, "aacraidp", -1); + if (child == NULL) { + device_printf(sc->aac_dev, + "device_add_child failed for container bus\n"); + free(sim, M_AACRAIDBUF); + panic("Out of memory?!"); + } + + sim->TargetsPerBus = AAC_MAX_CONTAINERS; + sim->BusNumber = 0; + sim->BusType = CONTAINER_BUS; + sim->InitiatorBusId = -1; + sim->aac_sc = sc; + sim->sim_dev = child; + sim->aac_cam = NULL; + + device_set_ivars(child, sim); + device_set_desc(child, "Container Bus"); + TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, sim, sim_link); + /* + device_set_desc(child, aac_describe_code(aac_container_types, + mir->MntTable[0].VolType)); + */ + bus_generic_attach(sc->aac_dev); +} + +static void +aac_get_bus_info(struct aac_softc *sc) +{ + struct aac_fib *fib; + struct aac_ctcfg *c_cmd; + struct aac_ctcfg_resp *c_resp; + struct aac_vmioctl *vmi; + struct aac_vmi_businf_resp *vmi_resp; + struct aac_getbusinf businfo; + struct aac_sim *caminf; + device_t child; + int i, error; + + mtx_lock(&sc->aac_io_lock); + aac_alloc_sync_fib(sc, &fib); + c_cmd = (struct aac_ctcfg *)&fib->data[0]; + bzero(c_cmd, sizeof(struct aac_ctcfg)); + + c_cmd->Command = VM_ContainerConfig; + c_cmd->cmd = CT_GET_SCSI_METHOD; + c_cmd->param = 0; + + error = aac_sync_fib(sc, ContainerCommand, 0, fib, + sizeof(struct aac_ctcfg)); + if (error) { + device_printf(sc->aac_dev, "Error %d sending " + "VM_ContainerConfig command\n", error); + aac_release_sync_fib(sc); + mtx_unlock(&sc->aac_io_lock); + return; + } + + c_resp = (struct aac_ctcfg_resp *)&fib->data[0]; + if (c_resp->Status != ST_OK) { + device_printf(sc->aac_dev, "VM_ContainerConfig returned 0x%x\n", + c_resp->Status); + aac_release_sync_fib(sc); + mtx_unlock(&sc->aac_io_lock); + return; + } + + sc->scsi_method_id = c_resp->param; + + vmi = (struct aac_vmioctl *)&fib->data[0]; + bzero(vmi, sizeof(struct aac_vmioctl)); + + vmi->Command = VM_Ioctl; + vmi->ObjType = FT_DRIVE; + vmi->MethId = sc->scsi_method_id; + vmi->ObjId = 0; + vmi->IoctlCmd = GetBusInfo; + + error = aac_sync_fib(sc, ContainerCommand, 0, fib, + sizeof(struct aac_vmi_businf_resp)); + if (error) { + device_printf(sc->aac_dev, "Error %d sending VMIoctl command\n", + error); + aac_release_sync_fib(sc); + mtx_unlock(&sc->aac_io_lock); + return; + } + + vmi_resp = (struct aac_vmi_businf_resp *)&fib->data[0]; + if (vmi_resp->Status != ST_OK) { + device_printf(sc->aac_dev, "VM_Ioctl returned %d\n", + vmi_resp->Status); + aac_release_sync_fib(sc); + mtx_unlock(&sc->aac_io_lock); + return; + } + + bcopy(&vmi_resp->BusInf, &businfo, sizeof(struct aac_getbusinf)); + aac_release_sync_fib(sc); + mtx_unlock(&sc->aac_io_lock); + + for (i = 0; i < businfo.BusCount; i++) { + if (businfo.BusValid[i] != AAC_BUS_VALID) + continue; + + caminf = (struct aac_sim *)malloc( sizeof(struct aac_sim), + M_AACRAIDBUF, M_NOWAIT | M_ZERO); + if (caminf == NULL) { + device_printf(sc->aac_dev, + "No memory to add passthrough bus %d\n", i); + break; + }; + + child = device_add_child(sc->aac_dev, "aacraidp", -1); + if (child == NULL) { + device_printf(sc->aac_dev, + "device_add_child failed for passthrough bus %d\n", + i); + free(caminf, M_AACRAIDBUF); + break; + } + + caminf->TargetsPerBus = businfo.TargetsPerBus; + caminf->BusNumber = i+1; + caminf->BusType = PASSTHROUGH_BUS; + caminf->InitiatorBusId = businfo.InitiatorBusId[i]; + caminf->aac_sc = sc; + caminf->sim_dev = child; + caminf->aac_cam = NULL; + + device_set_ivars(child, caminf); + device_set_desc(child, "SCSI Passthrough Bus"); + TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, caminf, sim_link); + } +} + +/* + * Check to see if the kernel is up and running. If we are in a + * BlinkLED state, return the BlinkLED code. + */ +static u_int32_t +aac_check_adapter_health(struct aac_softc *sc, u_int8_t *bled) +{ + u_int32_t ret; + + ret = AAC_GET_FWSTATUS(sc); + + if (ret & AAC_UP_AND_RUNNING) + ret = 0; + else if (ret & AAC_KERNEL_PANIC && bled) + *bled = (ret >> 16) & 0xff; + + return (ret); +} + +/* + * Once do an IOP reset, basically have to re-initialize the card as + * if coming up from a cold boot, and the driver is responsible for + * any IO that was outstanding to the adapter at the time of the IOP + * RESET. And prepare the driver for IOP RESET by making the init code + * modular with the ability to call it from multiple places. + */ +static int +aac_reset_adapter(struct aac_softc *sc) +{ + struct aac_command *cm; + struct aac_fib *fib; + struct aac_pause_command *pc; + u_int32_t status, old_flags, reset_mask, waitCount; + + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + + if (sc->aac_state & AAC_STATE_RESET) { + device_printf(sc->aac_dev, "aac_reset_adapter() already in progress\n"); + return (EINVAL); + } + sc->aac_state |= AAC_STATE_RESET; + + /* disable interrupt */ + AAC_MASK_INTERRUPTS(sc); + + /* + * Abort all pending commands: + * a) on the controller + */ + while ((cm = aac_dequeue_busy(sc)) != NULL) { + cm->cm_flags |= AAC_CMD_RESET; + + /* is there a completion handler? */ + if (cm->cm_complete != NULL) { + cm->cm_complete(cm); + } else { + /* assume that someone is sleeping on this + * command + */ + wakeup(cm); + } + } + + /* b) in the waiting queues */ + while ((cm = aac_dequeue_ready(sc)) != NULL) { + cm->cm_flags |= AAC_CMD_RESET; + + /* is there a completion handler? */ + if (cm->cm_complete != NULL) { + cm->cm_complete(cm); + } else { + /* assume that someone is sleeping on this + * command + */ + wakeup(cm); + } + } + + /* flush drives */ + if (aac_check_adapter_health(sc, NULL) == 0) { + mtx_unlock(&sc->aac_io_lock); + (void) aacraid_shutdown(sc->aac_dev); + mtx_lock(&sc->aac_io_lock); + } + + /* execute IOP reset */ + if (sc->aac_support_opt2 & AAC_SUPPORTED_MU_RESET) { + AAC_MEM0_SETREG4(sc, AAC_IRCSR, AAC_IRCSR_CORES_RST); + + /* We need to wait for 5 seconds before accessing the MU again + * 10000 * 100us = 1000,000us = 1000ms = 1s + */ + waitCount = 5 * 10000; + while (waitCount) { + DELAY(100); /* delay 100 microseconds */ + waitCount--; + } + } else if ((aacraid_sync_command(sc, + AAC_IOP_RESET_ALWAYS, 0, 0, 0, 0, &status, &reset_mask)) != 0) { + /* call IOP_RESET for older firmware */ + if ((aacraid_sync_command(sc, + AAC_IOP_RESET, 0, 0, 0, 0, &status, NULL)) != 0) { + + if (status == AAC_SRB_STS_INVALID_REQUEST) + device_printf(sc->aac_dev, "IOP_RESET not supported\n"); + else + /* probably timeout */ + device_printf(sc->aac_dev, "IOP_RESET failed\n"); + + /* unwind aac_shutdown() */ + aac_alloc_sync_fib(sc, &fib); + pc = (struct aac_pause_command *)&fib->data[0]; + pc->Command = VM_ContainerConfig; + pc->Type = CT_PAUSE_IO; + pc->Timeout = 1; + pc->Min = 1; + pc->NoRescan = 1; + + (void) aac_sync_fib(sc, ContainerCommand, 0, fib, + sizeof (struct aac_pause_command)); + aac_release_sync_fib(sc); + + goto finish; + } + } else if (sc->aac_support_opt2 & AAC_SUPPORTED_DOORBELL_RESET) { + AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, reset_mask); + /* We need to wait for 5 seconds before accessing the doorbell again + * 10000 * 100us = 1000,000us = 1000ms = 1s + */ + waitCount = 5 * 10000; + while (waitCount) { + DELAY(100); /* delay 100 microseconds */ + waitCount--; + } + } + + /* + * Re-read and renegotiate the FIB parameters, as one of the actions + * that can result from an IOP reset is the running of a new firmware + * image. + */ + old_flags = sc->flags; + /* + * Initialize the adapter. + */ + if (aac_check_firmware(sc) != 0) + goto finish; + if (!(sc->flags & AAC_FLAGS_SYNC_MODE)) { + if (aac_init(sc) != 0) + goto finish; + } + +finish: + sc->aac_state &= ~AAC_STATE_RESET; + AAC_UNMASK_INTERRUPTS(sc); + aacraid_startio(sc); + return (0); +} diff --git a/sys/dev/aacraid/aacraid_cam.c b/sys/dev/aacraid/aacraid_cam.c new file mode 100644 index 0000000..602e961 --- /dev/null +++ b/sys/dev/aacraid/aacraid_cam.c @@ -0,0 +1,1400 @@ +/*- + * Copyright (c) 2002-2010 Adaptec, Inc. + * Copyright (c) 2010-2012 PMC-Sierra, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +/* + * CAM front-end for communicating with non-DASD devices + */ + +#include "opt_aacraid.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#if __FreeBSD_version < 801000 +#include +#endif +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include +#include +#include +#include + +#if __FreeBSD_version >= 700025 +#ifndef CAM_NEW_TRAN_CODE +#define CAM_NEW_TRAN_CODE 1 +#endif +#endif + +#ifndef SVPD_SUPPORTED_PAGE_LIST +struct scsi_vpd_supported_page_list +{ + u_int8_t device; + u_int8_t page_code; +#define SVPD_SUPPORTED_PAGE_LIST 0x00 + u_int8_t reserved; + u_int8_t length; /* number of VPD entries */ +#define SVPD_SUPPORTED_PAGES_SIZE 251 + u_int8_t list[SVPD_SUPPORTED_PAGES_SIZE]; +}; +#endif + +/************************** Version Compatibility *************************/ +#if __FreeBSD_version < 700031 +#define aac_sim_alloc(a,b,c,d,e,f,g,h,i) cam_sim_alloc(a,b,c,d,e,g,h,i) +#else +#define aac_sim_alloc cam_sim_alloc +#endif + +struct aac_cam { + device_t dev; + struct aac_sim *inf; + struct cam_sim *sim; + struct cam_path *path; +}; + +static int aac_cam_probe(device_t dev); +static int aac_cam_attach(device_t dev); +static int aac_cam_detach(device_t dev); +static void aac_cam_action(struct cam_sim *, union ccb *); +static void aac_cam_poll(struct cam_sim *); +static void aac_cam_complete(struct aac_command *); +static void aac_container_complete(struct aac_command *); +#if __FreeBSD_version >= 700000 +static void aac_cam_rescan(struct aac_softc *sc, uint32_t channel, + uint32_t target_id); +#endif +static void aac_set_scsi_error(struct aac_softc *sc, union ccb *ccb, + u_int8_t status, u_int8_t key, u_int8_t asc, u_int8_t ascq); +static int aac_load_map_command_sg(struct aac_softc *, struct aac_command *); +static u_int64_t aac_eval_blockno(u_int8_t *); +static void aac_container_rw_command(struct cam_sim *, union ccb *, u_int8_t *); +static void aac_container_special_command(struct cam_sim *, union ccb *, + u_int8_t *); +static void aac_passthrough_command(struct cam_sim *, union ccb *); + +static u_int32_t aac_cam_reset_bus(struct cam_sim *, union ccb *); +static u_int32_t aac_cam_abort_ccb(struct cam_sim *, union ccb *); +static u_int32_t aac_cam_term_io(struct cam_sim *, union ccb *); + +static devclass_t aacraid_pass_devclass; + +static device_method_t aacraid_pass_methods[] = { + DEVMETHOD(device_probe, aac_cam_probe), + DEVMETHOD(device_attach, aac_cam_attach), + DEVMETHOD(device_detach, aac_cam_detach), + { 0, 0 } +}; + +static driver_t aacraid_pass_driver = { + "aacraidp", + aacraid_pass_methods, + sizeof(struct aac_cam) +}; + +DRIVER_MODULE(aacraidp, aacraid, aacraid_pass_driver, aacraid_pass_devclass, 0, 0); +MODULE_DEPEND(aacraidp, cam, 1, 1, 1); + +MALLOC_DEFINE(M_AACRAIDCAM, "aacraidcam", "AACRAID CAM info"); + +static void +aac_set_scsi_error(struct aac_softc *sc, union ccb *ccb, u_int8_t status, + u_int8_t key, u_int8_t asc, u_int8_t ascq) +{ +#if __FreeBSD_version >= 900000 + struct scsi_sense_data_fixed *sense = + (struct scsi_sense_data_fixed *)&ccb->csio.sense_data; +#else + struct scsi_sense_data *sense = &ccb->csio.sense_data; +#endif + + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "Error %d!", status); + + ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; + ccb->csio.scsi_status = status; + if (status == SCSI_STATUS_CHECK_COND) { + ccb->ccb_h.status |= CAM_AUTOSNS_VALID; + bzero(&ccb->csio.sense_data, ccb->csio.sense_len); + ccb->csio.sense_data.error_code = + SSD_CURRENT_ERROR | SSD_ERRCODE_VALID; + sense->flags = key; + if (ccb->csio.sense_len >= 14) { + sense->extra_len = 6; + sense->add_sense_code = asc; + sense->add_sense_code_qual = ascq; + } + } +} + +#if __FreeBSD_version >= 700000 +static void +aac_cam_rescan(struct aac_softc *sc, uint32_t channel, uint32_t target_id) +{ + union ccb *ccb; + struct aac_sim *sim; + struct aac_cam *camsc; + + if (target_id == AAC_CAM_TARGET_WILDCARD) + target_id = CAM_TARGET_WILDCARD; + + TAILQ_FOREACH(sim, &sc->aac_sim_tqh, sim_link) { + camsc = sim->aac_cam; + if (camsc == NULL || camsc->inf == NULL || + camsc->inf->BusNumber != channel) + continue; + + ccb = xpt_alloc_ccb_nowait(); + if (ccb == NULL) { + device_printf(sc->aac_dev, + "Cannot allocate ccb for bus rescan.\n"); + return; + } + + if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, + cam_sim_path(camsc->sim), + target_id, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { + xpt_free_ccb(ccb); + device_printf(sc->aac_dev, + "Cannot create path for bus rescan.\n"); + return; + } + xpt_rescan(ccb); + break; + } +} +#endif + +static void +aac_cam_event(struct aac_softc *sc, struct aac_event *event, void *arg) +{ + union ccb *ccb; + struct aac_cam *camsc; + + switch (event->ev_type) { + case AAC_EVENT_CMFREE: + ccb = arg; + camsc = ccb->ccb_h.sim_priv.entries[0].ptr; + free(event, M_AACRAIDCAM); + xpt_release_simq(camsc->sim, 1); + ccb->ccb_h.status = CAM_REQUEUE_REQ; + xpt_done(ccb); + break; + default: + device_printf(sc->aac_dev, "unknown event %d in aac_cam\n", + event->ev_type); + break; + } + + return; +} + +static int +aac_cam_probe(device_t dev) +{ + struct aac_softc *sc; + struct aac_cam *camsc; + + camsc = (struct aac_cam *)device_get_softc(dev); + if (!camsc->inf) + return (0); + sc = camsc->inf->aac_sc; + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + return (0); +} + +static int +aac_cam_detach(device_t dev) +{ + struct aac_softc *sc; + struct aac_cam *camsc; + + camsc = (struct aac_cam *)device_get_softc(dev); + if (!camsc->inf) + return (0); + sc = camsc->inf->aac_sc; + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + camsc->inf->aac_cam = NULL; + + mtx_lock(&sc->aac_io_lock); + + xpt_async(AC_LOST_DEVICE, camsc->path, NULL); + xpt_free_path(camsc->path); + xpt_bus_deregister(cam_sim_path(camsc->sim)); + cam_sim_free(camsc->sim, /*free_devq*/TRUE); + + sc->cam_rescan_cb = NULL; + + mtx_unlock(&sc->aac_io_lock); + + return (0); +} + +/* + * Register the driver as a CAM SIM + */ +static int +aac_cam_attach(device_t dev) +{ + struct cam_devq *devq; + struct cam_sim *sim; + struct cam_path *path; + struct aac_cam *camsc; + struct aac_sim *inf; + + camsc = (struct aac_cam *)device_get_softc(dev); + inf = (struct aac_sim *)device_get_ivars(dev); + if (!inf) + return (EIO); + fwprintf(inf->aac_sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + camsc->inf = inf; + camsc->inf->aac_cam = camsc; + + devq = cam_simq_alloc(inf->TargetsPerBus); + if (devq == NULL) + return (EIO); + + sim = aac_sim_alloc(aac_cam_action, aac_cam_poll, "aacraidp", camsc, + device_get_unit(dev), &inf->aac_sc->aac_io_lock, 1, 1, devq); + if (sim == NULL) { + cam_simq_free(devq); + return (EIO); + } + + /* Since every bus has it's own sim, every bus 'appears' as bus 0 */ + mtx_lock(&inf->aac_sc->aac_io_lock); + if (aac_xpt_bus_register(sim, dev, 0) != CAM_SUCCESS) { + cam_sim_free(sim, TRUE); + mtx_unlock(&inf->aac_sc->aac_io_lock); + return (EIO); + } + + if (xpt_create_path(&path, NULL, cam_sim_path(sim), + CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { + xpt_bus_deregister(cam_sim_path(sim)); + cam_sim_free(sim, TRUE); + mtx_unlock(&inf->aac_sc->aac_io_lock); + return (EIO); + } + +#if __FreeBSD_version >= 700000 + inf->aac_sc->cam_rescan_cb = aac_cam_rescan; +#endif + mtx_unlock(&inf->aac_sc->aac_io_lock); + + camsc->sim = sim; + camsc->path = path; + + return (0); +} + +static u_int64_t +aac_eval_blockno(u_int8_t *cmdp) +{ + u_int64_t blockno; + + switch (cmdp[0]) { + case READ_6: + case WRITE_6: + blockno = scsi_3btoul(((struct scsi_rw_6 *)cmdp)->addr); + break; + case READ_10: + case WRITE_10: + blockno = scsi_4btoul(((struct scsi_rw_10 *)cmdp)->addr); + break; + case READ_12: + case WRITE_12: + blockno = scsi_4btoul(((struct scsi_rw_12 *)cmdp)->addr); + break; + case READ_16: + case WRITE_16: + blockno = scsi_8btou64(((struct scsi_rw_16 *)cmdp)->addr); + break; + default: + blockno = 0; + break; + } + return(blockno); +} + +static void +aac_container_rw_command(struct cam_sim *sim, union ccb *ccb, u_int8_t *cmdp) +{ + struct aac_cam *camsc; + struct aac_softc *sc; + struct aac_command *cm; + struct aac_fib *fib; + u_int64_t blockno; + + camsc = (struct aac_cam *)cam_sim_softc(sim); + sc = camsc->inf->aac_sc; + mtx_assert(&sc->aac_io_lock, MA_OWNED); + + if (aacraid_alloc_command(sc, &cm)) { + struct aac_event *event; + + xpt_freeze_simq(sim, 1); + ccb->ccb_h.status = CAM_RESRC_UNAVAIL; + ccb->ccb_h.sim_priv.entries[0].ptr = camsc; + event = malloc(sizeof(struct aac_event), M_AACRAIDCAM, + M_NOWAIT | M_ZERO); + if (event == NULL) { + device_printf(sc->aac_dev, + "Warning, out of memory for event\n"); + return; + } + event->ev_callback = aac_cam_event; + event->ev_arg = ccb; + event->ev_type = AAC_EVENT_CMFREE; + aacraid_add_event(sc, event); + return; + } + + fib = cm->cm_fib; + switch (ccb->ccb_h.flags & CAM_DIR_MASK) { + case CAM_DIR_IN: + cm->cm_flags |= AAC_CMD_DATAIN; + break; + case CAM_DIR_OUT: + cm->cm_flags |= AAC_CMD_DATAOUT; + break; + case CAM_DIR_NONE: + break; + default: + cm->cm_flags |= AAC_CMD_DATAIN | AAC_CMD_DATAOUT; + break; + } + + blockno = aac_eval_blockno(cmdp); + + cm->cm_complete = aac_container_complete; + cm->cm_ccb = ccb; + cm->cm_timestamp = time_uptime; + cm->cm_data = (void *)ccb->csio.data_ptr; + cm->cm_datalen = ccb->csio.dxfer_len; + + fib->Header.Size = sizeof(struct aac_fib_header); + fib->Header.XferState = + AAC_FIBSTATE_HOSTOWNED | + AAC_FIBSTATE_INITIALISED | + AAC_FIBSTATE_EMPTY | + AAC_FIBSTATE_FROMHOST | + AAC_FIBSTATE_REXPECTED | + AAC_FIBSTATE_NORM | + AAC_FIBSTATE_ASYNC | + AAC_FIBSTATE_FAST_RESPONSE; + + if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) { + struct aac_raw_io2 *raw; + raw = (struct aac_raw_io2 *)&fib->data[0]; + bzero(raw, sizeof(struct aac_raw_io2)); + fib->Header.Command = RawIo2; + raw->strtBlkLow = (u_int32_t)blockno; + raw->strtBlkHigh = (u_int32_t)(blockno >> 32); + raw->byteCnt = cm->cm_datalen; + raw->ldNum = ccb->ccb_h.target_id; + fib->Header.Size += sizeof(struct aac_raw_io2); + cm->cm_sgtable = (struct aac_sg_table *)raw->sge; + if (cm->cm_flags & AAC_CMD_DATAIN) + raw->flags = RIO2_IO_TYPE_READ | RIO2_SG_FORMAT_IEEE1212; + else + raw->flags = RIO2_IO_TYPE_WRITE | RIO2_SG_FORMAT_IEEE1212; + } else if (sc->flags & AAC_FLAGS_RAW_IO) { + struct aac_raw_io *raw; + raw = (struct aac_raw_io *)&fib->data[0]; + bzero(raw, sizeof(struct aac_raw_io)); + fib->Header.Command = RawIo; + raw->BlockNumber = blockno; + raw->ByteCount = cm->cm_datalen; + raw->ContainerId = ccb->ccb_h.target_id; + fib->Header.Size += sizeof(struct aac_raw_io); + cm->cm_sgtable = (struct aac_sg_table *) + &raw->SgMapRaw; + if (cm->cm_flags & AAC_CMD_DATAIN) + raw->Flags = 1; + } else if ((sc->flags & AAC_FLAGS_SG_64BIT) == 0) { + fib->Header.Command = ContainerCommand; + if (cm->cm_flags & AAC_CMD_DATAIN) { + struct aac_blockread *br; + br = (struct aac_blockread *)&fib->data[0]; + br->Command = VM_CtBlockRead; + br->ContainerId = ccb->ccb_h.target_id; + br->BlockNumber = blockno; + br->ByteCount = cm->cm_datalen; + fib->Header.Size += sizeof(struct aac_blockread); + cm->cm_sgtable = &br->SgMap; + } else { + struct aac_blockwrite *bw; + bw = (struct aac_blockwrite *)&fib->data[0]; + bw->Command = VM_CtBlockWrite; + bw->ContainerId = ccb->ccb_h.target_id; + bw->BlockNumber = blockno; + bw->ByteCount = cm->cm_datalen; + bw->Stable = CUNSTABLE; + fib->Header.Size += sizeof(struct aac_blockwrite); + cm->cm_sgtable = &bw->SgMap; + } + } else { + fib->Header.Command = ContainerCommand64; + if (cm->cm_flags & AAC_CMD_DATAIN) { + struct aac_blockread64 *br; + br = (struct aac_blockread64 *)&fib->data[0]; + br->Command = VM_CtHostRead64; + br->ContainerId = ccb->ccb_h.target_id; + br->SectorCount = cm->cm_datalen/AAC_BLOCK_SIZE; + br->BlockNumber = blockno; + br->Pad = 0; + br->Flags = 0; + fib->Header.Size += sizeof(struct aac_blockread64); + cm->cm_sgtable = (struct aac_sg_table *)&br->SgMap64; + } else { + struct aac_blockwrite64 *bw; + bw = (struct aac_blockwrite64 *)&fib->data[0]; + bw->Command = VM_CtHostWrite64; + bw->ContainerId = ccb->ccb_h.target_id; + bw->SectorCount = cm->cm_datalen/AAC_BLOCK_SIZE; + bw->BlockNumber = blockno; + bw->Pad = 0; + bw->Flags = 0; + fib->Header.Size += sizeof(struct aac_blockwrite64); + cm->cm_sgtable = (struct aac_sg_table *)&bw->SgMap64; + } + } + aac_enqueue_ready(cm); + aacraid_startio(cm->cm_sc); +} + +static void +aac_container_special_command(struct cam_sim *sim, union ccb *ccb, + u_int8_t *cmdp) +{ + struct aac_cam *camsc; + struct aac_softc *sc; + struct aac_container *co; + + camsc = (struct aac_cam *)cam_sim_softc(sim); + sc = camsc->inf->aac_sc; + mtx_assert(&sc->aac_io_lock, MA_OWNED); + + TAILQ_FOREACH(co, &sc->aac_container_tqh, co_link) { + fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "found container %d search for %d", co->co_mntobj.ObjectId, ccb->ccb_h.target_id); + if (co->co_mntobj.ObjectId == ccb->ccb_h.target_id) + break; + } + if (co == NULL || ccb->ccb_h.target_lun != 0) { + fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, + "Container not present: cmd 0x%x id %d lun %d len %d", + *cmdp, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, + ccb->csio.dxfer_len); + ccb->ccb_h.status = CAM_DEV_NOT_THERE; + xpt_done(ccb); + return; + } + + if (ccb->csio.dxfer_len) + bzero(ccb->csio.data_ptr, ccb->csio.dxfer_len); + + switch (*cmdp) { + case INQUIRY: + { + struct scsi_inquiry *inq = (struct scsi_inquiry *)cmdp; + + fwprintf(sc, HBA_FLAGS_DBG_COMM_B, + "Container INQUIRY id %d lun %d len %d VPD 0x%x Page 0x%x", + ccb->ccb_h.target_id, ccb->ccb_h.target_lun, + ccb->csio.dxfer_len, inq->byte2, inq->page_code); + if (!(inq->byte2 & SI_EVPD)) { + struct scsi_inquiry_data *p = + (struct scsi_inquiry_data *)ccb->csio.data_ptr; + if (inq->page_code != 0) { + aac_set_scsi_error(sc, ccb, + SCSI_STATUS_CHECK_COND, + SSD_KEY_ILLEGAL_REQUEST, 0x24, 0x00); + xpt_done(ccb); + return; + } + p->device = T_DIRECT; + p->version = SCSI_REV_SPC2; + p->response_format = 2; + if (ccb->csio.dxfer_len >= 36) { + p->additional_length = 31; + p->flags = SID_WBus16|SID_Sync|SID_CmdQue; + /* OEM Vendor defines */ + strcpy(p->vendor,"Adaptec "); + strcpy(p->product,"Array "); + strcpy(p->revision,"V1.0"); + } + } else { + if (inq->page_code == SVPD_SUPPORTED_PAGE_LIST) { + struct scsi_vpd_supported_page_list *p = + (struct scsi_vpd_supported_page_list *) + ccb->csio.data_ptr; + p->device = T_DIRECT; + p->page_code = SVPD_SUPPORTED_PAGE_LIST; + p->length = 2; + p->list[0] = SVPD_SUPPORTED_PAGE_LIST; + p->list[1] = SVPD_UNIT_SERIAL_NUMBER; + } else if (inq->page_code == SVPD_UNIT_SERIAL_NUMBER) { + struct scsi_vpd_unit_serial_number *p = + (struct scsi_vpd_unit_serial_number *) + ccb->csio.data_ptr; + p->device = T_DIRECT; + p->page_code = SVPD_UNIT_SERIAL_NUMBER; + p->length = sprintf((char *)p->serial_num, + "%08X%02X", co->co_uid, + ccb->ccb_h.target_id); + } else { + aac_set_scsi_error(sc, ccb, + SCSI_STATUS_CHECK_COND, + SSD_KEY_ILLEGAL_REQUEST, 0x24, 0x00); + xpt_done(ccb); + return; + } + } + ccb->ccb_h.status = CAM_REQ_CMP; + break; + } + + case REPORT_LUNS: + fwprintf(sc, HBA_FLAGS_DBG_COMM_B, + "Container REPORT_LUNS id %d lun %d len %d", + ccb->ccb_h.target_id, ccb->ccb_h.target_lun, + ccb->csio.dxfer_len); + ccb->ccb_h.status = CAM_REQ_CMP; + break; + + case START_STOP: + { + struct scsi_start_stop_unit *ss = + (struct scsi_start_stop_unit *)cmdp; + fwprintf(sc, HBA_FLAGS_DBG_COMM_B, + "Container START_STOP id %d lun %d len %d", + ccb->ccb_h.target_id, ccb->ccb_h.target_lun, + ccb->csio.dxfer_len); + if (sc->aac_support_opt2 & AAC_SUPPORTED_POWER_MANAGEMENT) { + struct aac_command *cm; + struct aac_fib *fib; + struct aac_cnt_config *ccfg; + + if (aacraid_alloc_command(sc, &cm)) { + struct aac_event *event; + + xpt_freeze_simq(sim, 1); + ccb->ccb_h.status = CAM_RESRC_UNAVAIL; + ccb->ccb_h.sim_priv.entries[0].ptr = camsc; + event = malloc(sizeof(struct aac_event), M_AACRAIDCAM, + M_NOWAIT | M_ZERO); + if (event == NULL) { + device_printf(sc->aac_dev, + "Warning, out of memory for event\n"); + return; + } + event->ev_callback = aac_cam_event; + event->ev_arg = ccb; + event->ev_type = AAC_EVENT_CMFREE; + aacraid_add_event(sc, event); + return; + } + + fib = cm->cm_fib; + cm->cm_timestamp = time_uptime; + cm->cm_datalen = 0; + + fib->Header.Size = + sizeof(struct aac_fib_header) + sizeof(struct aac_cnt_config); + fib->Header.XferState = + AAC_FIBSTATE_HOSTOWNED | + AAC_FIBSTATE_INITIALISED | + AAC_FIBSTATE_EMPTY | + AAC_FIBSTATE_FROMHOST | + AAC_FIBSTATE_REXPECTED | + AAC_FIBSTATE_NORM | + AAC_FIBSTATE_ASYNC | + AAC_FIBSTATE_FAST_RESPONSE; + fib->Header.Command = ContainerCommand; + + /* Start unit */ + ccfg = (struct aac_cnt_config *)&fib->data[0]; + bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE); + ccfg->Command = VM_ContainerConfig; + ccfg->CTCommand.command = CT_PM_DRIVER_SUPPORT; + ccfg->CTCommand.param[0] = (ss->how & SSS_START ? + AAC_PM_DRIVERSUP_START_UNIT : + AAC_PM_DRIVERSUP_STOP_UNIT); + ccfg->CTCommand.param[1] = co->co_mntobj.ObjectId; + ccfg->CTCommand.param[2] = 0; /* 1 - immediate */ + + if (aacraid_wait_command(cm) != 0 || + *(u_int32_t *)&fib->data[0] != 0) { + printf("Power Management: Error start/stop container %d\n", + co->co_mntobj.ObjectId); + } + aacraid_release_command(cm); + } + ccb->ccb_h.status = CAM_REQ_CMP; + break; + } + + case TEST_UNIT_READY: + fwprintf(sc, HBA_FLAGS_DBG_COMM_B, + "Container TEST_UNIT_READY id %d lun %d len %d", + ccb->ccb_h.target_id, ccb->ccb_h.target_lun, + ccb->csio.dxfer_len); + ccb->ccb_h.status = CAM_REQ_CMP; + break; + + case REQUEST_SENSE: + fwprintf(sc, HBA_FLAGS_DBG_COMM_B, + "Container REQUEST_SENSE id %d lun %d len %d", + ccb->ccb_h.target_id, ccb->ccb_h.target_lun, + ccb->csio.dxfer_len); + ccb->ccb_h.status = CAM_REQ_CMP; + break; + + case READ_CAPACITY: + { + struct scsi_read_capacity_data *p = + (struct scsi_read_capacity_data *)ccb->csio.data_ptr; + fwprintf(sc, HBA_FLAGS_DBG_COMM_B, + "Container READ_CAPACITY id %d lun %d len %d", + ccb->ccb_h.target_id, ccb->ccb_h.target_lun, + ccb->csio.dxfer_len); + scsi_ulto4b(co->co_mntobj.ObjExtension.BlockSize, p->length); + /* check if greater than 2TB */ + if (co->co_mntobj.CapacityHigh) { + if (sc->flags & AAC_FLAGS_LBA_64BIT) + scsi_ulto4b(0xffffffff, p->addr); + } else { + scsi_ulto4b(co->co_mntobj.Capacity-1, p->addr); + } + ccb->ccb_h.status = CAM_REQ_CMP; + break; + } + + case SERVICE_ACTION_IN: + { + struct scsi_read_capacity_data_long *p = + (struct scsi_read_capacity_data_long *) + ccb->csio.data_ptr; + fwprintf(sc, HBA_FLAGS_DBG_COMM_B, + "Container SERVICE_ACTION_IN id %d lun %d len %d", + ccb->ccb_h.target_id, ccb->ccb_h.target_lun, + ccb->csio.dxfer_len); + if (((struct scsi_read_capacity_16 *)cmdp)->service_action != + SRC16_SERVICE_ACTION) { + aac_set_scsi_error(sc, ccb, SCSI_STATUS_CHECK_COND, + SSD_KEY_ILLEGAL_REQUEST, 0x24, 0x00); + xpt_done(ccb); + return; + } + scsi_ulto4b(co->co_mntobj.ObjExtension.BlockSize, p->length); + scsi_ulto4b(co->co_mntobj.CapacityHigh, p->addr); + scsi_ulto4b(co->co_mntobj.Capacity-1, &p->addr[4]); + ccb->ccb_h.status = CAM_REQ_CMP; + break; + } + + case MODE_SENSE_6: + { + struct scsi_mode_sense_6 *msp =(struct scsi_mode_sense_6 *)cmdp; + struct ms6_data { + struct scsi_mode_hdr_6 hd; + struct scsi_mode_block_descr bd; + char pages; + } *p = (struct ms6_data *)ccb->csio.data_ptr; + char *pagep; + int return_all_pages = FALSE; + + fwprintf(sc, HBA_FLAGS_DBG_COMM_B, + "Container MODE_SENSE id %d lun %d len %d page %d", + ccb->ccb_h.target_id, ccb->ccb_h.target_lun, + ccb->csio.dxfer_len, msp->page); + p->hd.datalen = sizeof(struct scsi_mode_hdr_6) - 1; + if (co->co_mntobj.ContentState & AAC_FSCS_READONLY) + p->hd.dev_specific = 0x80; /* WP */ + p->hd.dev_specific |= 0x10; /* DPOFUA */ + if (msp->byte2 & SMS_DBD) { + p->hd.block_descr_len = 0; + } else { + p->hd.block_descr_len = + sizeof(struct scsi_mode_block_descr); + p->hd.datalen += p->hd.block_descr_len; + scsi_ulto3b(co->co_mntobj.ObjExtension.BlockSize, p->bd.block_len); + if (co->co_mntobj.Capacity > 0xffffff || + co->co_mntobj.CapacityHigh) { + p->bd.num_blocks[0] = 0xff; + p->bd.num_blocks[1] = 0xff; + p->bd.num_blocks[2] = 0xff; + } else { + p->bd.num_blocks[0] = (u_int8_t) + (co->co_mntobj.Capacity >> 16); + p->bd.num_blocks[1] = (u_int8_t) + (co->co_mntobj.Capacity >> 8); + p->bd.num_blocks[2] = (u_int8_t) + (co->co_mntobj.Capacity); + } + } + pagep = &p->pages; + switch (msp->page & SMS_PAGE_CODE) { + case SMS_ALL_PAGES_PAGE: + return_all_pages = TRUE; + case SMS_CONTROL_MODE_PAGE: + { + struct scsi_control_page *cp = + (struct scsi_control_page *)pagep; + + if (ccb->csio.dxfer_len <= p->hd.datalen + 8) { + aac_set_scsi_error(sc, ccb, + SCSI_STATUS_CHECK_COND, + SSD_KEY_ILLEGAL_REQUEST, 0x24, 0x00); + xpt_done(ccb); + return; + } + cp->page_code = SMS_CONTROL_MODE_PAGE; + cp->page_length = 6; + p->hd.datalen += 8; + pagep += 8; + if (!return_all_pages) + break; + } + case SMS_VENDOR_SPECIFIC_PAGE: + break; + default: + aac_set_scsi_error(sc, ccb, SCSI_STATUS_CHECK_COND, + SSD_KEY_ILLEGAL_REQUEST, 0x24, 0x00); + xpt_done(ccb); + return; + } + ccb->ccb_h.status = CAM_REQ_CMP; + break; + } + + case SYNCHRONIZE_CACHE: + fwprintf(sc, HBA_FLAGS_DBG_COMM_B, + "Container SYNCHRONIZE_CACHE id %d lun %d len %d", + ccb->ccb_h.target_id, ccb->ccb_h.target_lun, + ccb->csio.dxfer_len); + ccb->ccb_h.status = CAM_REQ_CMP; + break; + + default: + fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, + "Container unsupp. cmd 0x%x id %d lun %d len %d", + *cmdp, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, + ccb->csio.dxfer_len); + ccb->ccb_h.status = CAM_REQ_CMP; /*CAM_REQ_INVALID*/ + break; + } + xpt_done(ccb); +} + +static void +aac_passthrough_command(struct cam_sim *sim, union ccb *ccb) +{ + struct aac_cam *camsc; + struct aac_softc *sc; + struct aac_command *cm; + struct aac_fib *fib; + struct aac_srb *srb; + + camsc = (struct aac_cam *)cam_sim_softc(sim); + sc = camsc->inf->aac_sc; + mtx_assert(&sc->aac_io_lock, MA_OWNED); + + if (aacraid_alloc_command(sc, &cm)) { + struct aac_event *event; + + xpt_freeze_simq(sim, 1); + ccb->ccb_h.status = CAM_RESRC_UNAVAIL; + ccb->ccb_h.sim_priv.entries[0].ptr = camsc; + event = malloc(sizeof(struct aac_event), M_AACRAIDCAM, + M_NOWAIT | M_ZERO); + if (event == NULL) { + device_printf(sc->aac_dev, + "Warning, out of memory for event\n"); + return; + } + event->ev_callback = aac_cam_event; + event->ev_arg = ccb; + event->ev_type = AAC_EVENT_CMFREE; + aacraid_add_event(sc, event); + return; + } + + fib = cm->cm_fib; + switch (ccb->ccb_h.flags & CAM_DIR_MASK) { + case CAM_DIR_IN: + cm->cm_flags |= AAC_CMD_DATAIN; + break; + case CAM_DIR_OUT: + cm->cm_flags |= AAC_CMD_DATAOUT; + break; + case CAM_DIR_NONE: + break; + default: + cm->cm_flags |= AAC_CMD_DATAIN | AAC_CMD_DATAOUT; + break; + } + + srb = (struct aac_srb *)&fib->data[0]; + srb->function = AAC_SRB_FUNC_EXECUTE_SCSI; + if (cm->cm_flags & (AAC_CMD_DATAIN|AAC_CMD_DATAOUT)) + srb->flags = AAC_SRB_FLAGS_UNSPECIFIED_DIRECTION; + if (cm->cm_flags & AAC_CMD_DATAIN) + srb->flags = AAC_SRB_FLAGS_DATA_IN; + else if (cm->cm_flags & AAC_CMD_DATAOUT) + srb->flags = AAC_SRB_FLAGS_DATA_OUT; + else + srb->flags = AAC_SRB_FLAGS_NO_DATA_XFER; + + /* + * Copy the CDB into the SRB. It's only 6-16 bytes, + * so a copy is not too expensive. + */ + srb->cdb_len = ccb->csio.cdb_len; + if (ccb->ccb_h.flags & CAM_CDB_POINTER) + bcopy(ccb->csio.cdb_io.cdb_ptr, (u_int8_t *)&srb->cdb[0], + srb->cdb_len); + else + bcopy(ccb->csio.cdb_io.cdb_bytes, (u_int8_t *)&srb->cdb[0], + srb->cdb_len); + + /* Set command */ + fib->Header.Command = (sc->flags & AAC_FLAGS_SG_64BIT) ? + ScsiPortCommandU64 : ScsiPortCommand; + fib->Header.Size = sizeof(struct aac_fib_header) + + sizeof(struct aac_srb); + + /* Map the s/g list */ + cm->cm_sgtable = &srb->sg_map; + if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { + /* + * Arrange things so that the S/G + * map will get set up automagically + */ + cm->cm_data = (void *)ccb->csio.data_ptr; + cm->cm_datalen = ccb->csio.dxfer_len; + srb->data_len = ccb->csio.dxfer_len; + } else { + cm->cm_data = NULL; + cm->cm_datalen = 0; + srb->data_len = 0; + } + + srb->bus = camsc->inf->BusNumber - 1; /* Bus no. rel. to the card */ + srb->target = ccb->ccb_h.target_id; + srb->lun = ccb->ccb_h.target_lun; + srb->timeout = ccb->ccb_h.timeout; /* XXX */ + srb->retry_limit = 0; + + cm->cm_complete = aac_cam_complete; + cm->cm_ccb = ccb; + cm->cm_timestamp = time_uptime; + + fib->Header.XferState = + AAC_FIBSTATE_HOSTOWNED | + AAC_FIBSTATE_INITIALISED | + AAC_FIBSTATE_FROMHOST | + AAC_FIBSTATE_REXPECTED | + AAC_FIBSTATE_NORM | + AAC_FIBSTATE_ASYNC | + AAC_FIBSTATE_FAST_RESPONSE; + + aac_enqueue_ready(cm); + aacraid_startio(cm->cm_sc); +} + +static void +aac_cam_action(struct cam_sim *sim, union ccb *ccb) +{ + struct aac_cam *camsc; + struct aac_softc *sc; + + camsc = (struct aac_cam *)cam_sim_softc(sim); + sc = camsc->inf->aac_sc; + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + mtx_assert(&sc->aac_io_lock, MA_OWNED); + + /* Synchronous ops, and ops that don't require communication with the + * controller */ + switch(ccb->ccb_h.func_code) { + case XPT_SCSI_IO: + /* This is handled down below */ + break; + case XPT_CALC_GEOMETRY: + { + struct ccb_calc_geometry *ccg; + u_int32_t size_mb; + u_int32_t secs_per_cylinder; + + ccg = &ccb->ccg; + size_mb = ccg->volume_size / + ((1024L * 1024L) / ccg->block_size); + if (size_mb >= (2 * 1024)) { /* 2GB */ + ccg->heads = 255; + ccg->secs_per_track = 63; + } else if (size_mb >= (1 * 1024)) { /* 1GB */ + ccg->heads = 128; + ccg->secs_per_track = 32; + } else { + ccg->heads = 64; + ccg->secs_per_track = 32; + } + secs_per_cylinder = ccg->heads * ccg->secs_per_track; + ccg->cylinders = ccg->volume_size / secs_per_cylinder; + + ccb->ccb_h.status = CAM_REQ_CMP; + xpt_done(ccb); + return; + } + case XPT_PATH_INQ: + { + struct ccb_pathinq *cpi = &ccb->cpi; + + cpi->version_num = 1; + cpi->target_sprt = 0; + cpi->hba_eng_cnt = 0; + cpi->max_target = camsc->inf->TargetsPerBus; + cpi->max_lun = 8; /* Per the controller spec */ + cpi->initiator_id = camsc->inf->InitiatorBusId; + cpi->bus_id = camsc->inf->BusNumber; +#if __FreeBSD_version >= 800000 + cpi->maxio = sc->aac_max_sectors << 9; +#endif + + /* + * Resetting via the passthrough or parallel bus scan + * causes problems. + */ + cpi->hba_misc = PIM_NOBUSRESET; + cpi->hba_inquiry = PI_TAG_ABLE; + cpi->base_transfer_speed = 300000; +#ifdef CAM_NEW_TRAN_CODE + cpi->hba_misc |= PIM_SEQSCAN; + cpi->protocol = PROTO_SCSI; + cpi->transport = XPORT_SAS; + cpi->transport_version = 0; + cpi->protocol_version = SCSI_REV_SPC2; +#endif + strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); + strncpy(cpi->hba_vid, "PMC-Sierra", HBA_IDLEN); + strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); + cpi->unit_number = cam_sim_unit(sim); + ccb->ccb_h.status = CAM_REQ_CMP; + xpt_done(ccb); + return; + } + case XPT_GET_TRAN_SETTINGS: + { +#ifdef CAM_NEW_TRAN_CODE + struct ccb_trans_settings_scsi *scsi = + &ccb->cts.proto_specific.scsi; + struct ccb_trans_settings_spi *spi = + &ccb->cts.xport_specific.spi; + ccb->cts.protocol = PROTO_SCSI; + ccb->cts.protocol_version = SCSI_REV_SPC2; + ccb->cts.transport = XPORT_SAS; + ccb->cts.transport_version = 0; + scsi->valid = CTS_SCSI_VALID_TQ; + scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; + spi->valid |= CTS_SPI_VALID_DISC; + spi->flags |= CTS_SPI_FLAGS_DISC_ENB; +#else + ccb->cts.flags = ~(CCB_TRANS_DISC_ENB | CCB_TRANS_TAG_ENB); + ccb->cts.valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; +#endif + ccb->ccb_h.status = CAM_REQ_CMP; + xpt_done(ccb); + return; + } + case XPT_SET_TRAN_SETTINGS: + ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; + xpt_done(ccb); + return; + case XPT_RESET_BUS: + if (!(sc->flags & AAC_FLAGS_CAM_NORESET) && + camsc->inf->BusType != CONTAINER_BUS) { + ccb->ccb_h.status = aac_cam_reset_bus(sim, ccb); + } else { + ccb->ccb_h.status = CAM_REQ_CMP; + } + xpt_done(ccb); + return; + case XPT_RESET_DEV: + ccb->ccb_h.status = CAM_REQ_CMP; + xpt_done(ccb); + return; + case XPT_ABORT: + ccb->ccb_h.status = aac_cam_abort_ccb(sim, ccb); + xpt_done(ccb); + return; + case XPT_TERM_IO: + ccb->ccb_h.status = aac_cam_term_io(sim, ccb); + xpt_done(ccb); + return; + default: + device_printf(sc->aac_dev, "Unsupported command 0x%x\n", + ccb->ccb_h.func_code); + ccb->ccb_h.status = CAM_PROVIDE_FAIL; + xpt_done(ccb); + return; + } + + /* Async ops that require communcation with the controller */ + if (camsc->inf->BusType == CONTAINER_BUS) { + u_int8_t *cmdp; + + if (ccb->ccb_h.flags & CAM_CDB_POINTER) + cmdp = ccb->csio.cdb_io.cdb_ptr; + else + cmdp = &ccb->csio.cdb_io.cdb_bytes[0]; + + if (*cmdp==READ_6 || *cmdp==WRITE_6 || *cmdp==READ_10 || + *cmdp==WRITE_10 || *cmdp==READ_12 || *cmdp==WRITE_12 || + *cmdp==READ_16 || *cmdp==WRITE_16) + aac_container_rw_command(sim, ccb, cmdp); + else + aac_container_special_command(sim, ccb, cmdp); + } else { + aac_passthrough_command(sim, ccb); + } +} + +static void +aac_cam_poll(struct cam_sim *sim) +{ + /* + * Pinging the interrupt routine isn't very safe, nor is it + * really necessary. Do nothing. + */ +} + +static void +aac_container_complete(struct aac_command *cm) +{ + struct aac_softc *sc; + union ccb *ccb; + u_int32_t status; + + sc = cm->cm_sc; + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + ccb = cm->cm_ccb; + status = ((u_int32_t *)cm->cm_fib->data)[0]; + + if (cm->cm_flags & AAC_CMD_RESET) { + ccb->ccb_h.status = CAM_SCSI_BUS_RESET; + } else if (status == ST_OK) { + ccb->ccb_h.status = CAM_REQ_CMP; + } else if (status == ST_NOT_READY) { + ccb->ccb_h.status = CAM_BUSY; + } else { + ccb->ccb_h.status = CAM_REQ_CMP_ERR; + } + + aacraid_release_command(cm); + xpt_done(ccb); +} + +static void +aac_cam_complete(struct aac_command *cm) +{ + union ccb *ccb; + struct aac_srb_response *srbr; + struct aac_softc *sc; + + sc = cm->cm_sc; + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + ccb = cm->cm_ccb; + srbr = (struct aac_srb_response *)&cm->cm_fib->data[0]; + + if (cm->cm_flags & AAC_CMD_FASTRESP) { + /* fast response */ + srbr->srb_status = CAM_REQ_CMP; + srbr->scsi_status = SCSI_STATUS_OK; + srbr->sense_len = 0; + } + + if (cm->cm_flags & AAC_CMD_RESET) { + ccb->ccb_h.status = CAM_SCSI_BUS_RESET; + } else if (srbr->fib_status != 0) { + device_printf(sc->aac_dev, "Passthru FIB failed!\n"); + ccb->ccb_h.status = CAM_REQ_ABORTED; + } else { + /* + * The SRB error codes just happen to match the CAM error + * codes. How convienient! + */ + ccb->ccb_h.status = srbr->srb_status; + + /* Take care of SCSI_IO ops. */ + if (ccb->ccb_h.func_code == XPT_SCSI_IO) { + u_int8_t command, device; + + ccb->csio.scsi_status = srbr->scsi_status; + + /* Take care of autosense */ + if (srbr->sense_len) { + int sense_len, scsi_sense_len; + + scsi_sense_len = sizeof(struct scsi_sense_data); + bzero(&ccb->csio.sense_data, scsi_sense_len); + sense_len = (srbr->sense_len > + scsi_sense_len) ? scsi_sense_len : + srbr->sense_len; + bcopy(&srbr->sense[0], &ccb->csio.sense_data, + srbr->sense_len); + ccb->csio.sense_len = sense_len; + ccb->ccb_h.status |= CAM_AUTOSNS_VALID; + // scsi_sense_print(&ccb->csio); + } + + /* If this is an inquiry command, fake things out */ + if (ccb->ccb_h.flags & CAM_CDB_POINTER) + command = ccb->csio.cdb_io.cdb_ptr[0]; + else + command = ccb->csio.cdb_io.cdb_bytes[0]; + + if (command == INQUIRY) { + if (ccb->ccb_h.status == CAM_REQ_CMP) { + device = ccb->csio.data_ptr[0] & 0x1f; + /* + * We want DASD and PROC devices to only be + * visible through the pass device. + */ + if ((device == T_DIRECT && + !(sc->aac_feature_bits & AAC_SUPPL_SUPPORTED_JBOD)) || + (device == T_PROCESSOR) || + (sc->flags & AAC_FLAGS_CAM_PASSONLY)) + ccb->csio.data_ptr[0] = + ((device & 0xe0) | T_NODEVICE); + + } else if (ccb->ccb_h.status == CAM_SEL_TIMEOUT && + ccb->ccb_h.target_lun != 0) { + /* fix for INQUIRYs on Lun>0 */ + ccb->ccb_h.status = CAM_DEV_NOT_THERE; + } + } + } + } + + aacraid_release_command(cm); + xpt_done(ccb); +} + +static u_int32_t +aac_cam_reset_bus(struct cam_sim *sim, union ccb *ccb) +{ + struct aac_command *cm; + struct aac_fib *fib; + struct aac_softc *sc; + struct aac_cam *camsc; + struct aac_vmioctl *vmi; + struct aac_resetbus *rbc; + u_int32_t rval; + + camsc = (struct aac_cam *)cam_sim_softc(sim); + sc = camsc->inf->aac_sc; + + if (sc == NULL) { + printf("aac: Null sc?\n"); + return (CAM_REQ_ABORTED); + } + + if (aacraid_alloc_command(sc, &cm)) { + struct aac_event *event; + + xpt_freeze_simq(sim, 1); + ccb->ccb_h.status = CAM_RESRC_UNAVAIL; + ccb->ccb_h.sim_priv.entries[0].ptr = camsc; + event = malloc(sizeof(struct aac_event), M_AACRAIDCAM, + M_NOWAIT | M_ZERO); + if (event == NULL) { + device_printf(sc->aac_dev, + "Warning, out of memory for event\n"); + return (CAM_REQ_ABORTED); + } + event->ev_callback = aac_cam_event; + event->ev_arg = ccb; + event->ev_type = AAC_EVENT_CMFREE; + aacraid_add_event(sc, event); + return (CAM_REQ_ABORTED); + } + + fib = cm->cm_fib; + cm->cm_timestamp = time_uptime; + cm->cm_datalen = 0; + + fib->Header.Size = + sizeof(struct aac_fib_header) + sizeof(struct aac_vmioctl); + fib->Header.XferState = + AAC_FIBSTATE_HOSTOWNED | + AAC_FIBSTATE_INITIALISED | + AAC_FIBSTATE_EMPTY | + AAC_FIBSTATE_FROMHOST | + AAC_FIBSTATE_REXPECTED | + AAC_FIBSTATE_NORM | + AAC_FIBSTATE_ASYNC | + AAC_FIBSTATE_FAST_RESPONSE; + fib->Header.Command = ContainerCommand; + + vmi = (struct aac_vmioctl *)&fib->data[0]; + bzero(vmi, sizeof(struct aac_vmioctl)); + + vmi->Command = VM_Ioctl; + vmi->ObjType = FT_DRIVE; + vmi->MethId = sc->scsi_method_id; + vmi->ObjId = 0; + vmi->IoctlCmd = ResetBus; + + rbc = (struct aac_resetbus *)&vmi->IoctlBuf[0]; + rbc->BusNumber = camsc->inf->BusNumber - 1; + + if (aacraid_wait_command(cm) != 0) { + device_printf(sc->aac_dev,"Error sending ResetBus command\n"); + rval = CAM_REQ_ABORTED; + } else { + rval = CAM_REQ_CMP; + } + aacraid_release_command(cm); + return (rval); +} + +static u_int32_t +aac_cam_abort_ccb(struct cam_sim *sim, union ccb *ccb) +{ + return (CAM_UA_ABORT); +} + +static u_int32_t +aac_cam_term_io(struct cam_sim *sim, union ccb *ccb) +{ + return (CAM_UA_TERMIO); +} + +static int +aac_load_map_command_sg(struct aac_softc *sc, struct aac_command *cm) +{ + int error; + + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + error = bus_dmamap_load(sc->aac_buffer_dmat, + cm->cm_datamap, cm->cm_data, cm->cm_datalen, + aacraid_map_command_sg, cm, 0); + if (error == EINPROGRESS) { + fwprintf(sc, HBA_FLAGS_DBG_INIT_B, "freezing queue\n"); + sc->flags |= AAC_QUEUE_FRZN; + error = 0; + } else if (error != 0) { + panic("aac_load_map_command_sg: unexpected error %d from " + "busdma", error); + } + return(error); +} + +/* + * Start as much queued I/O as possible on the controller + */ +void +aacraid_startio(struct aac_softc *sc) +{ + struct aac_command *cm; + + fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + + for (;;) { + if (sc->aac_state & AAC_STATE_RESET) { + fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "AAC_STATE_RESET"); + break; + } + /* + * This flag might be set if the card is out of resources. + * Checking it here prevents an infinite loop of deferrals. + */ + if (sc->flags & AAC_QUEUE_FRZN) { + fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "AAC_QUEUE_FRZN"); + break; + } + + /* + * Try to get a command that's been put off for lack of + * resources + */ + if (sc->flags & AAC_FLAGS_SYNC_MODE) { + /* sync. transfer mode */ + if (sc->aac_sync_cm) + break; + cm = aac_dequeue_ready(sc); + sc->aac_sync_cm = cm; + } else { + cm = aac_dequeue_ready(sc); + } + + /* nothing to do? */ + if (cm == NULL) + break; + + /* don't map more than once */ + if (cm->cm_flags & AAC_CMD_MAPPED) + panic("aac: command %p already mapped", cm); + + /* + * Set up the command to go to the controller. If there are no + * data buffers associated with the command then it can bypass + * busdma. + */ + if (cm->cm_datalen) + aac_load_map_command_sg(sc, cm); + else + aacraid_map_command_sg(cm, NULL, 0, 0); + } +} diff --git a/sys/dev/aacraid/aacraid_debug.c b/sys/dev/aacraid/aacraid_debug.c new file mode 100644 index 0000000..6062d72 --- /dev/null +++ b/sys/dev/aacraid/aacraid_debug.c @@ -0,0 +1,715 @@ + +/*- + * Copyright (c) 2006-2010 Adaptec, Inc. + * Copyright (c) 2010-2012 PMC-Sierra, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#include +__FBSDID("$FreeBSD$"); + +/* + * Debugging support. + */ +#include "opt_aacraid.h" + +#include +#include +#include +#include + +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include + +#include + +#ifdef AACRAID_DEBUG +/* + * Dump the command queue indices + */ +void +aacraid_print_queues(struct aac_softc *sc) +{ + device_printf(sc->aac_dev, "AACQ_FREE %d/%d\n", + sc->aac_qstat[AACQ_FREE].q_length, sc->aac_qstat[AACQ_FREE].q_max); + device_printf(sc->aac_dev, "AACQ_READY %d/%d\n", + sc->aac_qstat[AACQ_READY].q_length, + sc->aac_qstat[AACQ_READY].q_max); + device_printf(sc->aac_dev, "AACQ_BUSY %d/%d\n", + sc->aac_qstat[AACQ_BUSY].q_length, sc->aac_qstat[AACQ_BUSY].q_max); +} + +/* + * Print a FIB + */ +void +aacraid_print_fib(struct aac_softc *sc, struct aac_fib *fib, const char *caller) +{ + if (fib == NULL) { + device_printf(sc->aac_dev, + "aac_print_fib called with NULL fib\n"); + return; + } + device_printf(sc->aac_dev, "%s: FIB @ %p\n", caller, fib); + device_printf(sc->aac_dev, " XferState %b\n", fib->Header.XferState, + "\20" + "\1HOSTOWNED" + "\2ADAPTEROWNED" + "\3INITIALISED" + "\4EMPTY" + "\5FROMPOOL" + "\6FROMHOST" + "\7FROMADAP" + "\10REXPECTED" + "\11RNOTEXPECTED" + "\12DONEADAP" + "\13DONEHOST" + "\14HIGH" + "\15NORM" + "\16ASYNC" + "\17PAGEFILEIO" + "\20SHUTDOWN" + "\21LAZYWRITE" + "\22ADAPMICROFIB" + "\23BIOSFIB" + "\24FAST_RESPONSE" + "\25APIFIB\n"); + device_printf(sc->aac_dev, " Command %d\n", fib->Header.Command); + device_printf(sc->aac_dev, " StructType %d\n", + fib->Header.StructType); + device_printf(sc->aac_dev, " Size %d\n", fib->Header.Size); + device_printf(sc->aac_dev, " SenderSize %d\n", + fib->Header.SenderSize); + device_printf(sc->aac_dev, " SenderAddress 0x%x\n", + fib->Header.SenderFibAddress); + device_printf(sc->aac_dev, " RcvrAddress 0x%x\n", + fib->Header.u.ReceiverFibAddress); + device_printf(sc->aac_dev, " Handle 0x%x\n", + fib->Header.Handle); + switch(fib->Header.Command) { + case ContainerCommand: + { + struct aac_blockread *br; + struct aac_blockwrite *bw; + struct aac_sg_table *sg; + int i; + + br = (struct aac_blockread*)fib->data; + bw = (struct aac_blockwrite*)fib->data; + sg = NULL; + + if (br->Command == VM_CtBlockRead) { + device_printf(sc->aac_dev, + " BlockRead: container %d 0x%x/%d\n", + br->ContainerId, br->BlockNumber, + br->ByteCount); + sg = &br->SgMap; + } + if (bw->Command == VM_CtBlockWrite) { + device_printf(sc->aac_dev, + " BlockWrite: container %d 0x%x/%d " + "(%s)\n", bw->ContainerId, + bw->BlockNumber, bw->ByteCount, + bw->Stable == CSTABLE ? "stable" : + "unstable"); + sg = &bw->SgMap; + } + if (sg != NULL) { + device_printf(sc->aac_dev, + " %d s/g entries\n", sg->SgCount); + for (i = 0; i < sg->SgCount; i++) + device_printf(sc->aac_dev, " 0x%08x/%d\n", + sg->SgEntry[i].SgAddress, + sg->SgEntry[i].SgByteCount); + } + break; + } + default: + device_printf(sc->aac_dev, " %16D\n", fib->data, " "); + device_printf(sc->aac_dev, " %16D\n", fib->data + 16, " "); + break; + } +} + +/* + * Describe an AIF we have received. + */ +void +aacraid_print_aif(struct aac_softc *sc, struct aac_aif_command *aif) +{ + switch(aif->command) { + case AifCmdEventNotify: + device_printf(sc->aac_dev, "EventNotify(%d)\n", aif->seqNumber); + switch(aif->data.EN.type) { + case AifEnGeneric: /* Generic notification */ + device_printf(sc->aac_dev, "(Generic) %.*s\n", + (int)sizeof(aif->data.EN.data.EG), + aif->data.EN.data.EG.text); + break; + case AifEnTaskComplete: /* Task has completed */ + device_printf(sc->aac_dev, "(TaskComplete)\n"); + break; + case AifEnConfigChange: /* Adapter configuration change + * occurred */ + device_printf(sc->aac_dev, "(ConfigChange)\n"); + break; + case AifEnContainerChange: /* Adapter specific container + * configuration change */ + device_printf(sc->aac_dev, "(ContainerChange) " + "container %d,%d\n", + aif->data.EN.data.ECC.container[0], + aif->data.EN.data.ECC.container[1]); + break; + case AifEnDeviceFailure: /* SCSI device failed */ + device_printf(sc->aac_dev, "(DeviceFailure) " + "handle %d\n", + aif->data.EN.data.EDF.deviceHandle); + break; + case AifEnMirrorFailover: /* Mirror failover started */ + device_printf(sc->aac_dev, "(MirrorFailover) " + "container %d failed, " + "migrating from slice %d to %d\n", + aif->data.EN.data.EMF.container, + aif->data.EN.data.EMF.failedSlice, + aif->data.EN.data.EMF.creatingSlice); + break; + case AifEnContainerEvent: /* Significant container + * event */ + device_printf(sc->aac_dev, "(ContainerEvent) " + "container %d event " + "%d\n", aif->data.EN.data.ECE.container, + aif->data.EN.data.ECE.eventType); + break; + case AifEnFileSystemChange: /* File system changed */ + device_printf(sc->aac_dev, "(FileSystemChange)\n"); + break; + case AifEnConfigPause: /* Container pause event */ + device_printf(sc->aac_dev, "(ConfigPause)\n"); + break; + case AifEnConfigResume: /* Container resume event */ + device_printf(sc->aac_dev, "(ConfigResume)\n"); + break; + case AifEnFailoverChange: /* Failover space assignment + * changed */ + device_printf(sc->aac_dev, "(FailoverChange)\n"); + break; + case AifEnRAID5RebuildDone: /* RAID5 rebuild finished */ + device_printf(sc->aac_dev, "(RAID5RebuildDone)\n"); + break; + case AifEnEnclosureManagement: /* Enclosure management event */ + device_printf(sc->aac_dev, "(EnclosureManagement) " + "EMPID %d unit %d " + "event %d\n", aif->data.EN.data.EEE.empID, + aif->data.EN.data.EEE.unitID, + aif->data.EN.data.EEE.eventType); + break; + case AifEnBatteryEvent: /* Significant NV battery + * event */ + device_printf(sc->aac_dev, "(BatteryEvent) %d " + "(state was %d, is %d\n", + aif->data.EN.data.EBE.transition_type, + aif->data.EN.data.EBE.current_state, + aif->data.EN.data.EBE.prior_state); + break; + case AifEnAddContainer: /* A new container was + * created. */ + device_printf(sc->aac_dev, "(AddContainer)\n"); + break; + case AifEnDeleteContainer: /* A container was deleted. */ + device_printf(sc->aac_dev, "(DeleteContainer)\n"); + break; + case AifEnBatteryNeedsRecond: /* The battery needs + * reconditioning */ + device_printf(sc->aac_dev, "(BatteryNeedsRecond)\n"); + break; + case AifEnClusterEvent: /* Some cluster event */ + device_printf(sc->aac_dev, "(ClusterEvent) event %d\n", + aif->data.EN.data.ECLE.eventType); + break; + case AifEnDiskSetEvent: /* A disk set event occured. */ + device_printf(sc->aac_dev, "(DiskSetEvent) event %d " + "diskset %jd creator %jd\n", + aif->data.EN.data.EDS.eventType, + (intmax_t)aif->data.EN.data.EDS.DsNum, + (intmax_t)aif->data.EN.data.EDS.CreatorId); + break; + case AifDenMorphComplete: /* A morph operation + * completed */ + device_printf(sc->aac_dev, "(MorphComplete)\n"); + break; + case AifDenVolumeExtendComplete: /* A volume expand operation + * completed */ + device_printf(sc->aac_dev, "(VolumeExtendComplete)\n"); + break; + default: + device_printf(sc->aac_dev, "(%d)\n", aif->data.EN.type); + break; + } + break; + case AifCmdJobProgress: + { + char *status; + switch(aif->data.PR[0].status) { + case AifJobStsSuccess: + status = "success"; break; + case AifJobStsFinished: + status = "finished"; break; + case AifJobStsAborted: + status = "aborted"; break; + case AifJobStsFailed: + status = "failed"; break; + case AifJobStsSuspended: + status = "suspended"; break; + case AifJobStsRunning: + status = "running"; break; + default: + status = "unknown status"; break; + } + + device_printf(sc->aac_dev, "JobProgress (%d) - %s (%d, %d)\n", + aif->seqNumber, status, + aif->data.PR[0].currentTick, + aif->data.PR[0].finalTick); + switch(aif->data.PR[0].jd.type) { + case AifJobScsiZero: /* SCSI dev clear operation */ + device_printf(sc->aac_dev, "(ScsiZero) handle %d\n", + aif->data.PR[0].jd.client.scsi_dh); + break; + case AifJobScsiVerify: /* SCSI device Verify operation + * NO REPAIR */ + device_printf(sc->aac_dev, "(ScsiVerify) handle %d\n", + aif->data.PR[0].jd.client.scsi_dh); + break; + case AifJobScsiExercise: /* SCSI device Exercise + * operation */ + device_printf(sc->aac_dev, "(ScsiExercise) handle %d\n", + aif->data.PR[0].jd.client.scsi_dh); + break; + case AifJobScsiVerifyRepair: /* SCSI device Verify operation + * WITH repair */ + device_printf(sc->aac_dev, + "(ScsiVerifyRepair) handle %d\n", + aif->data.PR[0].jd.client.scsi_dh); + break; + case AifJobCtrZero: /* Container clear operation */ + device_printf(sc->aac_dev, + "(ContainerZero) container %d\n", + aif->data.PR[0].jd.client.container.src); + break; + case AifJobCtrCopy: /* Container copy operation */ + device_printf(sc->aac_dev, + "(ContainerCopy) container %d to %d\n", + aif->data.PR[0].jd.client.container.src, + aif->data.PR[0].jd.client.container.dst); + break; + case AifJobCtrCreateMirror: /* Container Create Mirror + * operation */ + device_printf(sc->aac_dev, + "(ContainerCreateMirror) container %d\n", + aif->data.PR[0].jd.client.container.src); + /* XXX two containers? */ + break; + case AifJobCtrMergeMirror: /* Container Merge Mirror + * operation */ + device_printf(sc->aac_dev, + "(ContainerMergeMirror) container %d\n", + aif->data.PR[0].jd.client.container.src); + /* XXX two containers? */ + break; + case AifJobCtrScrubMirror: /* Container Scrub Mirror + * operation */ + device_printf(sc->aac_dev, + "(ContainerScrubMirror) container %d\n", + aif->data.PR[0].jd.client.container.src); + break; + case AifJobCtrRebuildRaid5: /* Container Rebuild Raid5 + * operation */ + device_printf(sc->aac_dev, + "(ContainerRebuildRaid5) container %d\n", + aif->data.PR[0].jd.client.container.src); + break; + case AifJobCtrScrubRaid5: /* Container Scrub Raid5 + * operation */ + device_printf(sc->aac_dev, + "(ContainerScrubRaid5) container %d\n", + aif->data.PR[0].jd.client.container.src); + break; + case AifJobCtrMorph: /* Container morph operation */ + device_printf(sc->aac_dev, + "(ContainerMorph) container %d\n", + aif->data.PR[0].jd.client.container.src); + /* XXX two containers? */ + break; + case AifJobCtrPartCopy: /* Container Partition copy + * operation */ + device_printf(sc->aac_dev, + "(ContainerPartCopy) container %d to " + "%d\n", + aif->data.PR[0].jd.client.container.src, + aif->data.PR[0].jd.client.container.dst); + break; + case AifJobCtrRebuildMirror: /* Container Rebuild Mirror + * operation */ + device_printf(sc->aac_dev, + "(ContainerRebuildMirror) container " + "%d\n", + aif->data.PR[0].jd.client.container.src); + break; + case AifJobCtrCrazyCache: /* crazy cache */ + device_printf(sc->aac_dev, + "(ContainerCrazyCache) container %d\n", + aif->data.PR[0].jd.client.container.src); + /* XXX two containers? */ + break; + case AifJobFsCreate: /* File System Create + * operation */ + device_printf(sc->aac_dev, "(FsCreate)\n"); + break; + case AifJobFsVerify: /* File System Verify + * operation */ + device_printf(sc->aac_dev, "(FsVerivy)\n"); + break; + case AifJobFsExtend: /* File System Extend + * operation */ + device_printf(sc->aac_dev, "(FsExtend)\n"); + break; + case AifJobApiFormatNTFS: /* Format a drive to NTFS */ + device_printf(sc->aac_dev, "(FormatNTFS)\n"); + break; + case AifJobApiFormatFAT: /* Format a drive to FAT */ + device_printf(sc->aac_dev, "(FormatFAT)\n"); + break; + case AifJobApiUpdateSnapshot: /* update the read/write half + * of a snapshot */ + device_printf(sc->aac_dev, "(UpdateSnapshot)\n"); + break; + case AifJobApiFormatFAT32: /* Format a drive to FAT32 */ + device_printf(sc->aac_dev, "(FormatFAT32)\n"); + break; + case AifJobCtlContinuousCtrVerify: /* Adapter operation */ + device_printf(sc->aac_dev, "(ContinuousCtrVerify)\n"); + break; + default: + device_printf(sc->aac_dev, "(%d)\n", + aif->data.PR[0].jd.type); + break; + } + break; + } + case AifCmdAPIReport: + device_printf(sc->aac_dev, "APIReport (%d)\n", aif->seqNumber); + break; + case AifCmdDriverNotify: + device_printf(sc->aac_dev, "DriverNotify (%d)\n", + aif->seqNumber); + break; + default: + device_printf(sc->aac_dev, "AIF %d (%d)\n", aif->command, + aif->seqNumber); + break; + } +} +#endif /* AACRAID_DEBUG */ + +/* + * Debug flags to be put into the HBA flags field when initialized + */ +const unsigned long aacraid_debug_flags = /* Variable to setup with above flags. */ +/* HBA_FLAGS_DBG_KERNEL_PRINT_B | */ + HBA_FLAGS_DBG_FW_PRINT_B | +/* HBA_FLAGS_DBG_FUNCTION_ENTRY_B | */ + HBA_FLAGS_DBG_FUNCTION_EXIT_B | + HBA_FLAGS_DBG_ERROR_B | + HBA_FLAGS_DBG_INIT_B | +/* HBA_FLAGS_DBG_OS_COMMANDS_B | */ +/* HBA_FLAGS_DBG_SCAN_B | */ +/* HBA_FLAGS_DBG_COALESCE_B | */ +/* HBA_FLAGS_DBG_IOCTL_COMMANDS_B | */ +/* HBA_FLAGS_DBG_SYNC_COMMANDS_B | */ + HBA_FLAGS_DBG_COMM_B | +/* HBA_FLAGS_DBG_AIF_B | */ +/* HBA_FLAGS_DBG_CSMI_COMMANDS_B | */ + HBA_FLAGS_DBG_DEBUG_B | +/* HBA_FLAGS_DBG_FLAGS_MASK | */ +0; + +int aacraid_get_fw_debug_buffer(struct aac_softc *sc) +{ + u_int32_t MonDriverBufferPhysAddrLow = 0; + u_int32_t MonDriverBufferPhysAddrHigh = 0; + u_int32_t MonDriverBufferSize = 0; + u_int32_t MonDriverHeaderSize = 0; + + /* + * Get the firmware print buffer parameters from the firmware + * If the command was successful map in the address. + */ + if (!aacraid_sync_command(sc, AAC_MONKER_GETDRVPROP, 0, 0, 0, 0, NULL, NULL)) { + MonDriverBufferPhysAddrLow = AAC_GET_MAILBOX(sc, 1); + MonDriverBufferPhysAddrHigh = AAC_GET_MAILBOX(sc, 2); + MonDriverBufferSize = AAC_GET_MAILBOX(sc, 3); + MonDriverHeaderSize = AAC_GET_MAILBOX(sc, 4); + if (MonDriverBufferSize) { + unsigned long Offset = MonDriverBufferPhysAddrLow + - rman_get_start(sc->aac_regs_res1); + + /* + * See if the address is already mapped in and if so set it up + * from the base address + */ + if ((MonDriverBufferPhysAddrHigh == 0) && + (Offset + MonDriverBufferSize < + rman_get_size(sc->aac_regs_res1))) { + sc->DebugOffset = Offset; + sc->DebugHeaderSize = MonDriverHeaderSize; + sc->FwDebugBufferSize = MonDriverBufferSize; + sc->FwDebugFlags = 0; + sc->DebugFlags = aacraid_debug_flags; + return 1; + } + } + } + + /* + * The GET_DRIVER_BUFFER_PROPERTIES command failed + */ + return 0; +} + +#define PRINT_TIMEOUT 250000 /* 1/4 second */ + +void aacraid_fw_printf(struct aac_softc *sc, unsigned long PrintFlags, const char * fmt, ...) +{ + va_list args; + u_int32_t Count, i; + char PrintBuffer_P[PRINT_BUFFER_SIZE]; + unsigned long PrintType; + + PrintType = PrintFlags & + ~(HBA_FLAGS_DBG_KERNEL_PRINT_B|HBA_FLAGS_DBG_FW_PRINT_B); + if (((PrintType!=0) && (sc!=NULL) && ((sc->DebugFlags & PrintType)==0)) + || ((sc!=NULL) && (sc->DebugFlags + & (HBA_FLAGS_DBG_KERNEL_PRINT_B|HBA_FLAGS_DBG_FW_PRINT_B)) == 0)) + return; + + /* + * Set up parameters and call sprintf function to format the data + */ + va_start(args, fmt); + vsprintf(PrintBuffer_P, fmt, args); + va_end(args); + + /* + * Make sure the HBA structure has been passed in for this section + */ + if ((sc != NULL) && (sc->FwDebugBufferSize)) { + /* + * If we are set up for a Firmware print + */ + if ((sc->DebugFlags & HBA_FLAGS_DBG_FW_PRINT_B) + && ((PrintFlags + & (HBA_FLAGS_DBG_KERNEL_PRINT_B|HBA_FLAGS_DBG_FW_PRINT_B)) + != HBA_FLAGS_DBG_KERNEL_PRINT_B)) { + /* + * Make sure the string size is within boundaries + */ + Count = strlen(PrintBuffer_P); + if (Count > sc->FwDebugBufferSize) + Count = (u_int16_t)sc->FwDebugBufferSize; + + /* + * Wait for no more than PRINT_TIMEOUT for the previous + * message length to clear (the handshake). + */ + for (i = 0; i < PRINT_TIMEOUT; ++i) { + if (!AAC_MEM1_GETREG4(sc, + sc->DebugOffset + FW_DEBUG_STR_LENGTH_OFFSET)) { + break; + } + DELAY(1); + } + + /* + * If the Length is clear, copy over the message, the + * flags, and the length. Make sure the length is the + * last because that is the signal for the Firmware to + * pick it up. + */ + if (!AAC_MEM1_GETREG4(sc, + sc->DebugOffset + FW_DEBUG_STR_LENGTH_OFFSET)) { + for (i = 0; i < Count; ++i) { + AAC_MEM1_SETREG1(sc, sc->DebugOffset + sc->DebugHeaderSize + i, + PrintBuffer_P[i]); + } + AAC_MEM1_SETREG4(sc, sc->DebugOffset + FW_DEBUG_FLAGS_OFFSET, + sc->FwDebugFlags); + AAC_MEM1_SETREG4(sc, sc->DebugOffset + FW_DEBUG_STR_LENGTH_OFFSET, + Count); + } else + sc->DebugFlags &= ~HBA_FLAGS_DBG_FW_PRINT_B; + } + + /* + * If the Kernel Debug Print flag is set, send it off to the + * Kernel debugger + */ + if ((sc->DebugFlags & HBA_FLAGS_DBG_KERNEL_PRINT_B) + && ((PrintFlags + & (HBA_FLAGS_DBG_KERNEL_PRINT_B|HBA_FLAGS_DBG_FW_PRINT_B)) + != HBA_FLAGS_DBG_FW_PRINT_B)) { + if (sc->FwDebugFlags & FW_DEBUG_FLAGS_NO_HEADERS_B) + printf ("%s\n", PrintBuffer_P); + else + device_printf (sc->aac_dev, "%s\n", PrintBuffer_P); + } + + } else { + /* + * No HBA structure passed in so it has to be for the Kernel Debugger + */ + if ((sc != NULL) && (sc->FwDebugFlags & FW_DEBUG_FLAGS_NO_HEADERS_B)) + printf ("%s\n", PrintBuffer_P); + else if (sc != NULL) + device_printf (sc->aac_dev, "%s\n", PrintBuffer_P); + else + printf("%s\n", PrintBuffer_P); + } +} + +void aacraid_fw_print_mem(struct aac_softc *sc, unsigned long PrintFlags, u_int8_t *Addr, int Count) +{ + int Offset, i; + u_int32_t DebugFlags = 0; + char Buffer[100]; + char *LineBuffer_P; + + /* + * If we have an HBA structure, save off the flags and set the no + * headers flag so we don't have garbage between our lines of data + */ + if (sc != NULL) { + DebugFlags = sc->FwDebugFlags; + sc->FwDebugFlags |= FW_DEBUG_FLAGS_NO_HEADERS_B; + } + + Offset = 0; + + /* + * Loop through all the data + */ + while (Offset < Count) { + /* + * We will format each line into a buffer and then print out + * the entire line so set the pointer to the beginning of the + * buffer + */ + LineBuffer_P = Buffer; + + /* + * Set up the address in HEX + */ + sprintf(LineBuffer_P, "\n%04x ", Offset); + LineBuffer_P += 6; + + /* + * Set up 16 bytes in HEX format + */ + for (i = 0; i < 16; ++i) { + /* + * If we are past the count of data bytes to output, + * pad with blanks + */ + if ((Offset + i) >= Count) + sprintf (LineBuffer_P, " "); + else + sprintf (LineBuffer_P, "%02x ", Addr[Offset+i]); + LineBuffer_P += 3; + + /* + * At the mid point we will put in a divider + */ + if (i == 7) { + sprintf (LineBuffer_P, "- "); + LineBuffer_P += 2; + } + } + /* + * Now do the same 16 bytes at the end of the line in ASCII + * format + */ + sprintf (LineBuffer_P, " "); + LineBuffer_P += 2; + for (i = 0; i < 16; ++i) { + /* + * If all data processed, OUT-O-HERE + */ + if ((Offset + i) >= Count) + break; + + /* + * If this is a printable ASCII character, convert it + */ + if ((Addr[Offset+i] > 0x1F) && (Addr[Offset+i] < 0x7F)) + sprintf (LineBuffer_P, "%c", Addr[Offset+i]); + else + sprintf (LineBuffer_P, "."); + ++LineBuffer_P; + } + /* + * The line is now formatted, so print it out + */ + aacraid_fw_printf(sc, PrintFlags, "%s", Buffer); + + /* + * Bump the offset by 16 for the next line + */ + Offset += 16; + + } + + /* + * Restore the saved off flags + */ + if (sc != NULL) + sc->FwDebugFlags = DebugFlags; +} + diff --git a/sys/dev/aacraid/aacraid_debug.h b/sys/dev/aacraid/aacraid_debug.h new file mode 100644 index 0000000..d79bb92 --- /dev/null +++ b/sys/dev/aacraid/aacraid_debug.h @@ -0,0 +1,64 @@ +/*- + * Copyright (c) 2010 Adaptec, Inc. + * Copyright (c) 2010-2012 PMC-Sierra, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef PRINT_BUFFER_SIZE + +#define PRINT_BUFFER_SIZE 512 + +#define HBA_FLAGS_DBG_FLAGS_MASK 0x0000ffff +#define HBA_FLAGS_DBG_KERNEL_PRINT_B 0x00000001 +#define HBA_FLAGS_DBG_FW_PRINT_B 0x00000002 +#define HBA_FLAGS_DBG_FUNCTION_ENTRY_B 0x00000004 +#define HBA_FLAGS_DBG_FUNCTION_EXIT_B 0x00000008 +#define HBA_FLAGS_DBG_ERROR_B 0x00000010 +#define HBA_FLAGS_DBG_INIT_B 0x00000020 +#define HBA_FLAGS_DBG_OS_COMMANDS_B 0x00000040 +#define HBA_FLAGS_DBG_SCAN_B 0x00000080 +#define HBA_FLAGS_DBG_COALESCE_B 0x00000100 +#define HBA_FLAGS_DBG_IOCTL_COMMANDS_B 0x00000200 +#define HBA_FLAGS_DBG_SYNC_COMMANDS_B 0x00000400 +#define HBA_FLAGS_DBG_COMM_B 0x00000800 +#define HBA_FLAGS_DBG_CSMI_COMMANDS_B 0x00001000 +#define HBA_FLAGS_DBG_AIF_B 0x00001000 +#define HBA_FLAGS_DBG_DEBUG_B 0x00002000 + +#define FW_DEBUG_STR_LENGTH_OFFSET 0x00 +#define FW_DEBUG_FLAGS_OFFSET 0x04 +#define FW_DEBUG_BLED_OFFSET 0x08 +#define FW_DEBUG_FLAGS_NO_HEADERS_B 0x01 + +struct aac_softc; +extern int aacraid_get_fw_debug_buffer(struct aac_softc *); +extern void aacraid_fw_printf(struct aac_softc *, unsigned long, const char *, ...); +extern void aacraid_fw_print_mem(struct aac_softc *, unsigned long, u_int8_t *,int); +extern int aacraid_sync_command(struct aac_softc *sc, u_int32_t command, + u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, + u_int32_t arg3, u_int32_t *sp, u_int32_t *r1); + +#endif diff --git a/sys/dev/aacraid/aacraid_linux.c b/sys/dev/aacraid/aacraid_linux.c new file mode 100644 index 0000000..3d85445 --- /dev/null +++ b/sys/dev/aacraid/aacraid_linux.c @@ -0,0 +1,97 @@ +/*- + * Copyright (c) 2002 Scott Long + * Copyright (c) 2002-2010 Adaptec, Inc. + * Copyright (c) 2010-2012 PMC-Sierra, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +/* + * Linux ioctl handler for the aac device driver + */ + +#include +#include +#include +#include +#include +#include +#include +#ifdef __amd64__ +#include +#include +#else +#include +#include +#endif +#include + +/* There are multiple ioctl number ranges that need to be handled */ +#define AAC_LINUX_IOCTL_MIN 0x0000 +#define AAC_LINUX_IOCTL_MAX 0x21ff + +static linux_ioctl_function_t aacraid_linux_ioctl; +static struct linux_ioctl_handler aacraid_linux_handler = {aacraid_linux_ioctl, + AAC_LINUX_IOCTL_MIN, + AAC_LINUX_IOCTL_MAX}; + +SYSINIT (aacraid_linux_register, SI_SUB_KLD, SI_ORDER_MIDDLE, + linux_ioctl_register_handler, &aacraid_linux_handler); +SYSUNINIT(aacraid_linux_unregister, SI_SUB_KLD, SI_ORDER_MIDDLE, + linux_ioctl_unregister_handler, &aacraid_linux_handler); + +static int +aacraid_linux_modevent(module_t mod, int type, void *data) +{ + /* Do we care about any specific load/unload actions? */ + return (0); +} + +DEV_MODULE(aacraid_linux, aacraid_linux_modevent, NULL); +MODULE_DEPEND(aacraid_linux, linux, 1, 1, 1); + +static int +aacraid_linux_ioctl(struct thread *td, struct linux_ioctl_args *args) +{ + struct file *fp; + u_long cmd; + int error; + +#if __FreeBSD_version >= 900000 + if ((error = fget(td, args->fd, 0, &fp)) != 0) +#else + if ((error = fget(td, args->fd, &fp)) != 0) +#endif + return (error); + cmd = args->cmd; + + /* + * Pass the ioctl off to our standard handler. + */ + error = (fo_ioctl(fp, cmd, (caddr_t)args->arg, td->td_ucred, td)); + fdrop(fp, td); + return (error); +} diff --git a/sys/dev/aacraid/aacraid_pci.c b/sys/dev/aacraid/aacraid_pci.c new file mode 100644 index 0000000..6204fdf --- /dev/null +++ b/sys/dev/aacraid/aacraid_pci.c @@ -0,0 +1,265 @@ +/*- + * Copyright (c) 2000 Michael Smith + * Copyright (c) 2001 Scott Long + * Copyright (c) 2000 BSDi + * Copyright (c) 2001-2010 Adaptec, Inc. + * Copyright (c) 2010-2012 PMC-Sierra, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +/* + * PCI bus interface and resource allocation. + */ + +#include "opt_aacraid.h" + +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include +#include +#include +#include + +static int aacraid_pci_probe(device_t dev); +static int aacraid_pci_attach(device_t dev); + +static device_method_t aacraid_methods[] = { + /* Device interface */ + DEVMETHOD(device_probe, aacraid_pci_probe), + DEVMETHOD(device_attach, aacraid_pci_attach), + DEVMETHOD(device_detach, aacraid_detach), + DEVMETHOD(device_suspend, aacraid_suspend), + DEVMETHOD(device_resume, aacraid_resume), + + DEVMETHOD(bus_print_child, bus_generic_print_child), + DEVMETHOD(bus_driver_added, bus_generic_driver_added), + { 0, 0 } +}; + +static driver_t aacraid_pci_driver = { + "aacraid", + aacraid_methods, + sizeof(struct aac_softc) +}; + +static devclass_t aacraid_devclass; + +DRIVER_MODULE(aacraid, pci, aacraid_pci_driver, aacraid_devclass, 0, 0); +MODULE_DEPEND(aacraid, pci, 1, 1, 1); + +struct aac_ident +{ + u_int16_t vendor; + u_int16_t device; + u_int16_t subvendor; + u_int16_t subdevice; + int hwif; + int quirks; + char *desc; +} aacraid_family_identifiers[] = { + {0x9005, 0x028b, 0, 0, AAC_HWIF_SRC, 0, + "Adaptec RAID Controller"}, + {0x9005, 0x028c, 0, 0, AAC_HWIF_SRCV, 0, + "Adaptec RAID Controller"}, + {0x9005, 0x028d, 0, 0, AAC_HWIF_SRCV, 0, + "Adaptec RAID Controller"}, + {0x9005, 0x028f, 0, 0, AAC_HWIF_SRCV, 0, + "Adaptec RAID Controller"}, + {0, 0, 0, 0, 0, 0, 0} +}; + +static struct aac_ident * +aac_find_ident(device_t dev) +{ + struct aac_ident *m; + u_int16_t vendid, devid, sub_vendid, sub_devid; + + vendid = pci_get_vendor(dev); + devid = pci_get_device(dev); + sub_vendid = pci_get_subvendor(dev); + sub_devid = pci_get_subdevice(dev); + + for (m = aacraid_family_identifiers; m->vendor != 0; m++) { + if ((m->vendor == vendid) && (m->device == devid)) + return (m); + } + + return (NULL); +} + +/* + * Determine whether this is one of our supported adapters. + */ +static int +aacraid_pci_probe(device_t dev) +{ + struct aac_ident *id; + + fwprintf(NULL, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + + if ((id = aac_find_ident(dev)) != NULL) { + device_set_desc(dev, id->desc); + return(BUS_PROBE_DEFAULT); + } + return(ENXIO); +} + +/* + * Allocate resources for our device, set up the bus interface. + */ +static int +aacraid_pci_attach(device_t dev) +{ + struct aac_softc *sc; + struct aac_ident *id; + int error; + u_int32_t command; + + fwprintf(NULL, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + + /* + * Initialise softc. + */ + sc = device_get_softc(dev); + bzero(sc, sizeof(*sc)); + sc->aac_dev = dev; + + /* assume failure is 'not configured' */ + error = ENXIO; + + /* + * Verify that the adapter is correctly set up in PCI space. + */ + command = pci_read_config(sc->aac_dev, PCIR_COMMAND, 2); + command |= PCIM_CMD_BUSMASTEREN; + pci_write_config(dev, PCIR_COMMAND, command, 2); + command = pci_read_config(sc->aac_dev, PCIR_COMMAND, 2); + if (!(command & PCIM_CMD_BUSMASTEREN)) { + device_printf(sc->aac_dev, "can't enable bus-master feature\n"); + goto out; + } + if ((command & PCIM_CMD_MEMEN) == 0) { + device_printf(sc->aac_dev, "memory window not available\n"); + goto out; + } + + /* + * Detect the hardware interface version, set up the bus interface + * indirection. + */ + id = aac_find_ident(dev); + sc->aac_hwif = id->hwif; + switch(sc->aac_hwif) { + case AAC_HWIF_SRC: + fwprintf(sc, HBA_FLAGS_DBG_INIT_B, "set hardware up for PMC SRC"); + sc->aac_if = aacraid_src_interface; + break; + case AAC_HWIF_SRCV: + fwprintf(sc, HBA_FLAGS_DBG_INIT_B, "set hardware up for PMC SRCv"); + sc->aac_if = aacraid_srcv_interface; + break; + default: + sc->aac_hwif = AAC_HWIF_UNKNOWN; + device_printf(sc->aac_dev, "unknown hardware type\n"); + error = ENXIO; + goto out; + } + + /* assume failure is 'out of memory' */ + error = ENOMEM; + + /* + * Allocate the PCI register window. + */ + sc->aac_regs_rid0 = PCIR_BAR(0); + if ((sc->aac_regs_res0 = bus_alloc_resource_any(sc->aac_dev, + SYS_RES_MEMORY, &sc->aac_regs_rid0, RF_ACTIVE)) == NULL) { + device_printf(sc->aac_dev, + "couldn't allocate register window 0\n"); + goto out; + } + sc->aac_btag0 = rman_get_bustag(sc->aac_regs_res0); + sc->aac_bhandle0 = rman_get_bushandle(sc->aac_regs_res0); + + sc->aac_regs_rid1 = PCIR_BAR(2); + if ((sc->aac_regs_res1 = bus_alloc_resource_any(sc->aac_dev, + SYS_RES_MEMORY, &sc->aac_regs_rid1, RF_ACTIVE)) == NULL) { + device_printf(sc->aac_dev, + "couldn't allocate register window 1\n"); + goto out; + } + sc->aac_btag1 = rman_get_bustag(sc->aac_regs_res1); + sc->aac_bhandle1 = rman_get_bushandle(sc->aac_regs_res1); + + /* + * Allocate the parent bus DMA tag appropriate for our PCI interface. + * + * Note that some of these controllers are 64-bit capable. + */ + if (bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ + PAGE_SIZE, 0, /* algnmnt, boundary */ + BUS_SPACE_MAXADDR, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ + BUS_SPACE_UNRESTRICTED, /* nsegments */ + BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ + 0, /* flags */ + NULL, NULL, /* No locking needed */ + &sc->aac_parent_dmat)) { + device_printf(sc->aac_dev, "can't allocate parent DMA tag\n"); + goto out; + } + + /* Set up quirks */ + sc->flags = id->quirks; + + /* + * Do bus-independent initialisation. + */ + error = aacraid_attach(sc); + +out: + if (error) + aacraid_free(sc); + return(error); +} diff --git a/sys/dev/aacraid/aacraid_reg.h b/sys/dev/aacraid/aacraid_reg.h new file mode 100644 index 0000000..988520c --- /dev/null +++ b/sys/dev/aacraid/aacraid_reg.h @@ -0,0 +1,1598 @@ +/*- + * Copyright (c) 2000 Michael Smith + * Copyright (c) 2000-2001 Scott Long + * Copyright (c) 2000 BSDi + * Copyright (c) 2001-2010 Adaptec, Inc. + * Copyright (c) 2010-2012 PMC-Sierra, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +/* + * Data structures defining the interface between the driver and the Adaptec + * 'FSA' adapters. Note that many field names and comments here are taken + * verbatim from the Adaptec driver source in order to make comparing the + * two slightly easier. + */ + +/* + * Misc. magic numbers. + */ +#define AAC_MAX_CONTAINERS 64 +#define AAC_BLOCK_SIZE 512 + +/* + * Communications interface. + * + * Where datastructure layouts are closely parallel to the Adaptec sample code, + * retain their naming conventions (for now) to aid in cross-referencing. + */ + +/* transport FIB header (PMC) */ +struct aac_fib_xporthdr { + u_int64_t HostAddress; /* FIB host address w/o xport header */ + u_int32_t Size; /* FIB size excluding xport header */ + u_int32_t Handle; /* driver handle to reference the FIB */ + u_int64_t Reserved[2]; +} __packed; + +/* + * List structure used to chain FIBs (used by the adapter - we hang FIBs off + * our private command structure and don't touch these) + */ +struct aac_fib_list_entry { + u_int32_t Flink; + u_int32_t Blink; +} __packed; + +/* + * FIB (FSA Interface Block?); this is the datastructure passed between the host + * and adapter. + */ +struct aac_fib_header { + u_int32_t XferState; + u_int16_t Command; + u_int8_t StructType; + u_int8_t Unused; + u_int16_t Size; + u_int16_t SenderSize; + u_int32_t SenderFibAddress; + union { + u_int32_t ReceiverFibAddress; + u_int32_t SenderFibAddressHigh; + u_int32_t TimeStamp; + } u; + u_int32_t Handle; + u_int32_t Previous; + u_int32_t Next; +} __packed; + +#define AAC_FIB_DATASIZE (512 - sizeof(struct aac_fib_header)) + +struct aac_fib { + struct aac_fib_header Header; + u_int8_t data[AAC_FIB_DATASIZE]; +} __packed; + +/* + * FIB commands + */ +typedef enum { + TestCommandResponse = 1, + TestAdapterCommand = 2, + + /* lowlevel and comm commands */ + LastTestCommand = 100, + ReinitHostNormCommandQueue = 101, + ReinitHostHighCommandQueue = 102, + ReinitHostHighRespQueue = 103, + ReinitHostNormRespQueue = 104, + ReinitAdapNormCommandQueue = 105, + ReinitAdapHighCommandQueue = 107, + ReinitAdapHighRespQueue = 108, + ReinitAdapNormRespQueue = 109, + InterfaceShutdown = 110, + DmaCommandFib = 120, + StartProfile = 121, + TermProfile = 122, + SpeedTest = 123, + TakeABreakPt = 124, + RequestPerfData = 125, + SetInterruptDefTimer= 126, + SetInterruptDefCount= 127, + GetInterruptDefStatus= 128, + LastCommCommand = 129, + + /* filesystem commands */ + NuFileSystem = 300, + UFS = 301, + HostFileSystem = 302, + LastFileSystemCommand = 303, + + /* Container Commands */ + ContainerCommand = 500, + ContainerCommand64 = 501, + RawIo = 502, + RawIo2 = 503, + + /* Cluster Commands */ + ClusterCommand = 550, + + /* Scsi Port commands (scsi passthrough) */ + ScsiPortCommand = 600, + ScsiPortCommandU64 = 601, + SataPortCommandU64 = 602, + SasSmpPassThrough = 603, + SasRequestPhyInfo = 612, + + /* misc house keeping and generic adapter initiated commands */ + AifRequest = 700, + CheckRevision = 701, + FsaHostShutdown = 702, + RequestAdapterInfo = 703, + IsAdapterPaused = 704, + SendHostTime = 705, + RequestSupplementAdapterInfo = 706, /* Supp. Info for set in UCC + * use only if supported + * (RequestAdapterInfo first) */ + LastMiscCommand = 707, + + OnLineDiagnostic = 800, + FduAdapterTest = 801, + RequestCompatibilityId = 802, + AdapterEnvironmentInfo = 803, /* temp. sensors */ + NvsramEventLog = 900, + ResetNvsramEventLogPointers = 901, + EnableEventLog = 902, + DisableEventLog = 903, + EncryptedKeyTransportFIB= 904, + KeyableFeaturesFIB= 905 +} AAC_FibCommands; + +/* + * FIB types + */ +#define AAC_FIBTYPE_TFIB 1 +#define AAC_FIBTYPE_TQE 2 +#define AAC_FIBTYPE_TCTPERF 3 +#define AAC_FIBTYPE_TFIB2 4 +#define AAC_FIBTYPE_TFIB2_64 5 + +/* + * FIB transfer state + */ +#define AAC_FIBSTATE_HOSTOWNED (1<<0) /* owned by the host */ +#define AAC_FIBSTATE_ADAPTEROWNED (1<<1) /* owned by the adapter */ +#define AAC_FIBSTATE_INITIALISED (1<<2) /* initialised */ +#define AAC_FIBSTATE_EMPTY (1<<3) /* empty */ +#define AAC_FIBSTATE_FROMPOOL (1<<4) /* allocated from pool */ +#define AAC_FIBSTATE_FROMHOST (1<<5) /* sent from the host */ +#define AAC_FIBSTATE_FROMADAP (1<<6) /* sent from the adapter */ +#define AAC_FIBSTATE_REXPECTED (1<<7) /* response is expected */ +#define AAC_FIBSTATE_RNOTEXPECTED (1<<8) /* response is not expected */ +#define AAC_FIBSTATE_DONEADAP (1<<9) /* processed by the adapter */ +#define AAC_FIBSTATE_DONEHOST (1<<10) /* processed by the host */ +#define AAC_FIBSTATE_HIGH (1<<11) /* high priority */ +#define AAC_FIBSTATE_NORM (1<<12) /* normal priority */ +#define AAC_FIBSTATE_ASYNC (1<<13) +#define AAC_FIBSTATE_ASYNCIO (1<<13) /* to be removed */ +#define AAC_FIBSTATE_PAGEFILEIO (1<<14) /* to be removed */ +#define AAC_FIBSTATE_SHUTDOWN (1<<15) +#define AAC_FIBSTATE_LAZYWRITE (1<<16) /* to be removed */ +#define AAC_FIBSTATE_ADAPMICROFIB (1<<17) +#define AAC_FIBSTATE_BIOSFIB (1<<18) +#define AAC_FIBSTATE_FAST_RESPONSE (1<<19) /* fast response capable */ +#define AAC_FIBSTATE_APIFIB (1<<20) +#define AAC_FIBSTATE_NOMOREAIF (1<<21) + +/* + * FIB error values + */ +#define AAC_ERROR_NORMAL 0x00 +#define AAC_ERROR_PENDING 0x01 +#define AAC_ERROR_FATAL 0x02 +#define AAC_ERROR_INVALID_QUEUE 0x03 +#define AAC_ERROR_NOENTRIES 0x04 +#define AAC_ERROR_SENDFAILED 0x05 +#define AAC_ERROR_INVALID_QUEUE_PRIORITY 0x06 +#define AAC_ERROR_FIB_ALLOCATION_FAILED 0x07 +#define AAC_ERROR_FIB_DEALLOCATION_FAILED 0x08 + +/* + * Adapter Init Structure: this is passed to the adapter with the + * AAC_MONKER_INITSTRUCT command to point it at our control structures. + */ +struct aac_adapter_init { + u_int32_t InitStructRevision; +#define AAC_INIT_STRUCT_REVISION 3 +#define AAC_INIT_STRUCT_REVISION_4 4 +#define AAC_INIT_STRUCT_REVISION_6 6 +#define AAC_INIT_STRUCT_REVISION_7 7 + u_int32_t MiniPortRevision; +#define AAC_INIT_STRUCT_MINIPORT_REVISION 1 + u_int32_t FilesystemRevision; + u_int32_t CommHeaderAddress; + u_int32_t FastIoCommAreaAddress; + u_int32_t AdapterFibsPhysicalAddress; + u_int32_t AdapterFibsVirtualAddress; + u_int32_t AdapterFibsSize; + u_int32_t AdapterFibAlign; + u_int32_t PrintfBufferAddress; + u_int32_t PrintfBufferSize; +#define AAC_PAGE_SIZE 4096 + u_int32_t HostPhysMemPages; + u_int32_t HostElapsedSeconds; + /* ADAPTER_INIT_STRUCT_REVISION_4 begins here */ + u_int32_t InitFlags; /* flags for supported features */ +#define AAC_INITFLAGS_NEW_COMM_SUPPORTED 1 +#define AAC_INITFLAGS_DRIVER_USES_UTC_TIME 0x10 +#define AAC_INITFLAGS_DRIVER_SUPPORTS_PM 0x20 +#define AAC_INITFLAGS_NEW_COMM_TYPE1_SUPPORTED 0x40 +#define AAC_INITFLAGS_FAST_JBOD_SUPPORTED 0x80 +#define AAC_INITFLAGS_NEW_COMM_TYPE2_SUPPORTED 0x100 + u_int32_t MaxIoCommands; /* max outstanding commands */ + u_int32_t MaxIoSize; /* largest I/O command */ + u_int32_t MaxFibSize; /* largest FIB to adapter */ + /* ADAPTER_INIT_STRUCT_REVISION_5 begins here */ + u_int32_t MaxNumAif; /* max number of aif */ + /* ADAPTER_INIT_STRUCT_REVISION_6 begins here */ + u_int32_t HostRRQ_AddrLow; + u_int32_t HostRRQ_AddrHigh; /* Host RRQ (response queue) for SRC */ +} __packed; + +/* + * Shared data types + */ +/* + * Container types + */ +typedef enum { + CT_NONE = 0, + CT_VOLUME, + CT_MIRROR, + CT_STRIPE, + CT_RAID5, + CT_SSRW, + CT_SSRO, + CT_MORPH, + CT_PASSTHRU, + CT_RAID4, + CT_RAID10, /* stripe of mirror */ + CT_RAID00, /* stripe of stripe */ + CT_VOLUME_OF_MIRRORS, /* volume of mirror */ + CT_PSEUDO_RAID3, /* really raid4 */ + CT_RAID50, /* stripe of raid5 */ + CT_RAID5D, /* raid5 distributed hot-sparing */ + CT_RAID5D0, + CT_RAID1E, /* extended raid1 mirroring */ + CT_RAID6, + CT_RAID60, +} AAC_FSAVolType; + +/* + * Host-addressable object types + */ +typedef enum { + FT_REG = 1, /* regular file */ + FT_DIR, /* directory */ + FT_BLK, /* "block" device - reserved */ + FT_CHR, /* "character special" device - reserved */ + FT_LNK, /* symbolic link */ + FT_SOCK, /* socket */ + FT_FIFO, /* fifo */ + FT_FILESYS, /* ADAPTEC's "FSA"(tm) filesystem */ + FT_DRIVE, /* physical disk - addressable in scsi by b/t/l */ + FT_SLICE, /* virtual disk - raw volume - slice */ + FT_PARTITION, /* FSA partition - carved out of a slice - building + * block for containers */ + FT_VOLUME, /* Container - Volume Set */ + FT_STRIPE, /* Container - Stripe Set */ + FT_MIRROR, /* Container - Mirror Set */ + FT_RAID5, /* Container - Raid 5 Set */ + FT_DATABASE /* Storage object with "foreign" content manager */ +} AAC_FType; + +/* + * Host-side scatter/gather list for 32-bit commands. + */ +struct aac_sg_entry { + u_int32_t SgAddress; + u_int32_t SgByteCount; +} __packed; + +struct aac_sg_entry64 { + u_int64_t SgAddress; + u_int32_t SgByteCount; +} __packed; + +struct aac_sg_entryraw { + u_int32_t Next; /* reserved for FW use */ + u_int32_t Prev; /* reserved for FW use */ + u_int64_t SgAddress; + u_int32_t SgByteCount; + u_int32_t Flags; /* reserved for FW use */ +} __packed; + +struct aac_sg_table { + u_int32_t SgCount; + struct aac_sg_entry SgEntry[0]; +} __packed; + +/* + * Host-side scatter/gather list for 64-bit commands. + */ +struct aac_sg_table64 { + u_int32_t SgCount; + struct aac_sg_entry64 SgEntry64[0]; +} __packed; + +/* + * s/g list for raw commands + */ +struct aac_sg_tableraw { + u_int32_t SgCount; + struct aac_sg_entryraw SgEntryRaw[0]; +} __packed; + +/* + * new ieee1212 s/g element + */ +struct aac_sge_ieee1212 { + u_int32_t addrLow; + u_int32_t addrHigh; + u_int32_t length; + u_int32_t flags; /* always 0 from host side */ +} __packed; + +/* + * Container creation data + */ +struct aac_container_creation { + u_int8_t ViaBuildNumber; + u_int8_t MicroSecond; + u_int8_t Via; /* 1 = FSU, 2 = API, etc. */ + u_int8_t YearsSince1900; + u_int32_t Month:4; /* 1-12 */ + u_int32_t Day:6; /* 1-32 */ + u_int32_t Hour:6; /* 0-23 */ + u_int32_t Minute:6; /* 0-59 */ + u_int32_t Second:6; /* 0-59 */ + u_int64_t ViaAdapterSerialNumber; +} __packed; + +/* + * Revision number handling + */ + +typedef enum { + RevApplication = 1, + RevDkiCli, + RevNetService, + RevApi, + RevFileSysDriver, + RevMiniportDriver, + RevAdapterSW, + RevMonitor, + RevRemoteApi +} RevComponent; + +struct FsaRevision { + union { + struct { + u_int8_t dash; + u_int8_t type; + u_int8_t minor; + u_int8_t major; + } comp; + u_int32_t ul; + } external; + u_int32_t buildNumber; +} __packed; + +/* + * Adapter Information + */ + +typedef enum { + CPU_NTSIM = 1, + CPU_I960, + CPU_ARM, + CPU_SPARC, + CPU_POWERPC, + CPU_ALPHA, + CPU_P7, + CPU_I960_RX, + CPU_MIPS, + CPU_XSCALE, + CPU__last +} AAC_CpuType; + +typedef enum { + CPUI960_JX = 1, + CPUI960_CX, + CPUI960_HX, + CPUI960_RX, + CPUARM_SA110, + CPUARM_xxx, + CPUPPC_603e, + CPUPPC_xxx, + CPUI960_80303, + CPU_XSCALE_80321, + CPU_MIPS_4KC, + CPU_MIPS_5KC, + CPUSUBTYPE__last +} AAC_CpuSubType; + +typedef enum { + PLAT_NTSIM = 1, + PLAT_V3ADU, + PLAT_CYCLONE, + PLAT_CYCLONE_HD, + PLAT_BATBOARD, + PLAT_BATBOARD_HD, + PLAT_YOLO, + PLAT_COBRA, + PLAT_ANAHEIM, + PLAT_JALAPENO, + PLAT_QUEENS, + PLAT_JALAPENO_DELL, + PLAT_POBLANO, + PLAT_POBLANO_OPAL, + PLAT_POBLANO_SL0, + PLAT_POBLANO_SL1, + PLAT_POBLANO_SL2, + PLAT_POBLANO_XXX, + PLAT_JALAPENO_P2, + PLAT_HABANERO, + PLAT_VULCAN, + PLAT_CRUSADER, + PLAT_LANCER, + PLAT_HARRIER, + PLAT_TERMINATOR, + PLAT_SKYHAWK, + PLAT_CORSAIR, + PLAT_JAGUAR, + PLAT_SATAHAWK, + PLAT_SATANATOR, + PLAT_PROWLER, + PLAT_BLACKBIRD, + PLAT_SABREEXPRESS, + PLAT_INTRUDER, + PLAT__last +} AAC_Platform; + +typedef enum { + OEM_FLAVOR_ADAPTEC = 1, + OEM_FLAVOR_DELL, + OEM_FLAVOR_HP, + OEM_FLAVOR_IBM, + OEM_FLAVOR_CPQ, + OEM_FLAVOR_FSC, + OEM_FLAVOR_DWS, + OEM_FLAVOR_BRAND_Z, + OEM_FLAVOR_LEGEND, + OEM_FLAVOR_HITACHI, + OEM_FLAVOR_ESG, + OEM_FLAVOR_ICP, + OEM_FLAVOR_SCM, + OEM_FLAVOR__last +} AAC_OemFlavor; + +/* + * XXX the aac-2622 with no battery present reports PLATFORM_BAT_OPT_PRESENT + */ +typedef enum +{ + PLATFORM_BAT_REQ_PRESENT = 1, /* BATTERY REQUIRED AND PRESENT */ + PLATFORM_BAT_REQ_NOTPRESENT, /* BATTERY REQUIRED AND NOT PRESENT */ + PLATFORM_BAT_OPT_PRESENT, /* BATTERY OPTIONAL AND PRESENT */ + PLATFORM_BAT_OPT_NOTPRESENT, /* BATTERY OPTIONAL AND NOT PRESENT */ + PLATFORM_BAT_NOT_SUPPORTED /* BATTERY NOT SUPPORTED */ +} AAC_BatteryPlatform; + +/* + * options supported by this board + * there has to be a one to one mapping of these defines and the ones in + * fsaapi.h, search for FSA_SUPPORT_SNAPSHOT + */ +#define AAC_SUPPORTED_SNAPSHOT 0x01 +#define AAC_SUPPORTED_CLUSTERS 0x02 +#define AAC_SUPPORTED_WRITE_CACHE 0x04 +#define AAC_SUPPORTED_64BIT_DATA 0x08 +#define AAC_SUPPORTED_HOST_TIME_FIB 0x10 +#define AAC_SUPPORTED_RAID50 0x20 +#define AAC_SUPPORTED_4GB_WINDOW 0x40 +#define AAC_SUPPORTED_SCSI_UPGRADEABLE 0x80 +#define AAC_SUPPORTED_SOFT_ERR_REPORT 0x100 +#define AAC_SUPPORTED_NOT_RECONDITION 0x200 +#define AAC_SUPPORTED_SGMAP_HOST64 0x400 +#define AAC_SUPPORTED_ALARM 0x800 +#define AAC_SUPPORTED_NONDASD 0x1000 +#define AAC_SUPPORTED_SCSI_MANAGED 0x2000 +#define AAC_SUPPORTED_RAID_SCSI_MODE 0x4000 +#define AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO 0x10000 +#define AAC_SUPPORTED_NEW_COMM 0x20000 +#define AAC_SUPPORTED_64BIT_ARRAYSIZE 0x40000 +#define AAC_SUPPORTED_HEAT_SENSOR 0x80000 +#define AAC_SUPPORTED_NEW_COMM_TYPE1 0x10000000 /* Tupelo new comm */ +#define AAC_SUPPORTED_NEW_COMM_TYPE2 0x20000000 /* Denali new comm */ +#define AAC_SUPPORTED_NEW_COMM_TYPE3 0x40000000 /* Series 8 new comm */ +#define AAC_SUPPORTED_NEW_COMM_TYPE4 0x80000000 /* Series 9 new comm */ + +/* + * Structure used to respond to a RequestAdapterInfo fib. + */ +struct aac_adapter_info { + AAC_Platform PlatformBase; /* adapter type */ + AAC_CpuType CpuArchitecture; /* adapter CPU type */ + AAC_CpuSubType CpuVariant; /* adapter CPU subtype */ + u_int32_t ClockSpeed; /* adapter CPU clockspeed */ + u_int32_t ExecutionMem; /* adapter Execution Memory + * size */ + u_int32_t BufferMem; /* adapter Data Memory */ + u_int32_t TotalMem; /* adapter Total Memory */ + struct FsaRevision KernelRevision; /* adapter Kernel Software + * Revision */ + struct FsaRevision MonitorRevision; /* adapter Monitor/Diagnostic + * Software Revision */ + struct FsaRevision HardwareRevision;/* TBD */ + struct FsaRevision BIOSRevision; /* adapter BIOS Revision */ + u_int32_t ClusteringEnabled; + u_int32_t ClusterChannelMask; + u_int64_t SerialNumber; + AAC_BatteryPlatform batteryPlatform; + u_int32_t SupportedOptions; /* supported features of this + * controller */ + AAC_OemFlavor OemVariant; +} __packed; + +/* + * More options from supplement info - SupportedOptions2 + */ +#define AAC_SUPPORTED_MU_RESET 0x01 +#define AAC_SUPPORTED_IGNORE_RESET 0x02 +#define AAC_SUPPORTED_POWER_MANAGEMENT 0x04 +#define AAC_SUPPORTED_ARCIO_PHYDEV 0x08 +#define AAC_SUPPORTED_DOORBELL_RESET 0x4000 +#define AAC_SUPPORTED_VARIABLE_BLOCK_SIZE 0x40000 /* 4KB sector size */ + +/* + * FeatureBits of RequestSupplementAdapterInfo used in the driver + */ +#define AAC_SUPPL_SUPPORTED_JBOD 0x08000000 + +/* + * Structure used to respond to a RequestSupplementAdapterInfo fib. + */ +struct vpd_info { + u_int8_t AssemblyPn[8]; + u_int8_t FruPn[8]; + u_int8_t BatteryFruPn[8]; + u_int8_t EcVersionString[8]; + u_int8_t Tsid[12]; +} __packed; + +#define MFG_PCBA_SERIAL_NUMBER_WIDTH 12 +#define MFG_WWN_WIDTH 8 + +struct aac_supplement_adapter_info { + /* The assigned Adapter Type Text, extra byte for null termination */ + int8_t AdapterTypeText[17+1]; + /* Pad for the text above */ + int8_t Pad[2]; + /* Size in bytes of the memory that is flashed */ + u_int32_t FlashMemoryByteSize; + /* The assigned IMAGEID_xxx for this adapter */ + u_int32_t FlashImageId; + /* + * The maximum number of Phys available on a SATA/SAS + * Controller, 0 otherwise + */ + u_int32_t MaxNumberPorts; + /* Version of expansion area */ + u_int32_t Version; + u_int32_t FeatureBits; + u_int8_t SlotNumber; + u_int8_t ReservedPad0[3]; + u_int8_t BuildDate[12]; + /* The current number of Ports on a SAS controller, 0 otherwise */ + u_int32_t CurrentNumberPorts; + + struct vpd_info VpdInfo; + + /* Firmware Revision (Vmaj.min-dash.) */ + struct FsaRevision FlashFirmwareRevision; + u_int32_t RaidTypeMorphOptions; + /* Firmware's boot code Revision (Vmaj.min-dash.) */ + struct FsaRevision FlashFirmwareBootRevision; + /* PCBA serial no. from th MFG sector */ + u_int8_t MfgPcbaSerialNo[MFG_PCBA_SERIAL_NUMBER_WIDTH]; + /* WWN from the MFG sector */ + u_int8_t MfgWWNName[MFG_WWN_WIDTH]; + u_int32_t SupportedOptions2; /* more supported features */ + u_int32_t ExpansionFlag; /* 1 - following fields are valid */ + u_int32_t FeatureBits3; + u_int32_t SupportedPerformanceMode; + /* Growth Area for future expansion */ + u_int32_t ReservedGrowth[80]; +} __packed; + +/* + * Monitor/Kernel interface. + */ + +/* + * Synchronous commands to the monitor/kernel. + */ +#define AAC_MONKER_BREAKPOINT 0x04 +#define AAC_MONKER_INITSTRUCT 0x05 +#define AAC_MONKER_SYNCFIB 0x0c +#define AAC_MONKER_GETKERNVER 0x11 +#define AAC_MONKER_POSTRESULTS 0x14 +#define AAC_MONKER_GETINFO 0x19 +#define AAC_MONKER_GETDRVPROP 0x23 +#define AAC_MONKER_RCVTEMP 0x25 +#define AAC_MONKER_GETCOMMPREF 0x26 +#define AAC_MONKER_REINIT 0xee +#define AAC_IOP_RESET 0x1000 +#define AAC_IOP_RESET_ALWAYS 0x1001 + +/* + * Adapter Status Register + * + * Phase Staus mailbox is 32bits: + * <31:16> = Phase Status + * <15:0> = Phase + * + * The adapter reports its present state through the phase. Only + * a single phase should be ever be set. Each phase can have multiple + * phase status bits to provide more detailed information about the + * state of the adapter. + */ +#define AAC_SELF_TEST_FAILED 0x00000004 +#define AAC_MONITOR_PANIC 0x00000020 +#define AAC_UP_AND_RUNNING 0x00000080 +#define AAC_KERNEL_PANIC 0x00000100 + +/* + * Data types relating to control and monitoring of the NVRAM/WriteCache + * subsystem. + */ + +#define AAC_NFILESYS 24 /* maximum number of filesystems */ + +/* + * NVRAM/Write Cache subsystem states + */ +typedef enum { + NVSTATUS_DISABLED = 0, /* present, clean, not being used */ + NVSTATUS_ENABLED, /* present, possibly dirty, ready for use */ + NVSTATUS_ERROR, /* present, dirty, contains dirty data */ + NVSTATUS_BATTERY, /* present, bad or low battery, may contain + * dirty data */ + NVSTATUS_UNKNOWN /* for bad/missing device */ +} AAC_NVSTATUS; + +/* + * NVRAM/Write Cache subsystem battery component states + * + */ +typedef enum { + NVBATTSTATUS_NONE = 0, /* battery has no power or is not present */ + NVBATTSTATUS_LOW, /* battery is low on power */ + NVBATTSTATUS_OK, /* battery is okay - normal operation possible + * only in this state */ + NVBATTSTATUS_RECONDITIONING /* no battery present - reconditioning + * in process */ +} AAC_NVBATTSTATUS; + +/* + * Battery transition type + */ +typedef enum { + NVBATT_TRANSITION_NONE = 0, /* battery now has no power or is not + * present */ + NVBATT_TRANSITION_LOW, /* battery is now low on power */ + NVBATT_TRANSITION_OK /* battery is now okay - normal + * operation possible only in this + * state */ +} AAC_NVBATT_TRANSITION; + +/* + * NVRAM Info structure returned for NVRAM_GetInfo call + */ +struct aac_nvramdevinfo { + u_int32_t NV_Enabled; /* write caching enabled */ + u_int32_t NV_Error; /* device in error state */ + u_int32_t NV_NDirty; /* count of dirty NVRAM buffers */ + u_int32_t NV_NActive; /* count of NVRAM buffers being + * written */ +} __packed; + +struct aac_nvraminfo { + AAC_NVSTATUS NV_Status; /* nvram subsystem status */ + AAC_NVBATTSTATUS NV_BattStatus; /* battery status */ + u_int32_t NV_Size; /* size of WriteCache NVRAM in + * bytes */ + u_int32_t NV_BufSize; /* size of NVRAM buffers in + * bytes */ + u_int32_t NV_NBufs; /* number of NVRAM buffers */ + u_int32_t NV_NDirty; /* Num dirty NVRAM buffers */ + u_int32_t NV_NClean; /* Num clean NVRAM buffers */ + u_int32_t NV_NActive; /* Num NVRAM buffers being + * written */ + u_int32_t NV_NBrokered; /* Num brokered NVRAM buffers */ + struct aac_nvramdevinfo NV_DevInfo[AAC_NFILESYS]; /* per device + * info */ + u_int32_t NV_BattNeedsReconditioning; /* boolean */ + u_int32_t NV_TotalSize; /* size of all non-volatile + * memories in bytes */ +} __packed; + +/* + * Data types relating to adapter-initiated FIBs + * + * Based on types and structures in + */ + +/* + * Progress Reports + */ +typedef enum { + AifJobStsSuccess = 1, + AifJobStsFinished, + AifJobStsAborted, + AifJobStsFailed, + AifJobStsLastReportMarker = 100, /* All prior mean last report */ + AifJobStsSuspended, + AifJobStsRunning +} AAC_AifJobStatus; + +typedef enum { + AifJobScsiMin = 1, /* Minimum value for Scsi operation */ + AifJobScsiZero, /* SCSI device clear operation */ + AifJobScsiVerify, /* SCSI device Verify operation NO + * REPAIR */ + AifJobScsiExercise, /* SCSI device Exercise operation */ + AifJobScsiVerifyRepair, /* SCSI device Verify operation WITH + * repair */ + AifJobScsiWritePattern, /* write pattern */ + AifJobScsiMax = 99, /* Max Scsi value */ + AifJobCtrMin, /* Min Ctr op value */ + AifJobCtrZero, /* Container clear operation */ + AifJobCtrCopy, /* Container copy operation */ + AifJobCtrCreateMirror, /* Container Create Mirror operation */ + AifJobCtrMergeMirror, /* Container Merge Mirror operation */ + AifJobCtrScrubMirror, /* Container Scrub Mirror operation */ + AifJobCtrRebuildRaid5, /* Container Rebuild Raid5 operation */ + AifJobCtrScrubRaid5, /* Container Scrub Raid5 operation */ + AifJobCtrMorph, /* Container morph operation */ + AifJobCtrPartCopy, /* Container Partition copy operation */ + AifJobCtrRebuildMirror, /* Container Rebuild Mirror operation */ + AifJobCtrCrazyCache, /* crazy cache */ + AifJobCtrCopyback, /* Container Copyback operation */ + AifJobCtrCompactRaid5D, /* Container Compaction operation */ + AifJobCtrExpandRaid5D, /* Container Expansion operation */ + AifJobCtrRebuildRaid6, /* Container Rebuild Raid6 operation */ + AifJobCtrScrubRaid6, /* Container Scrub Raid6 operation */ + AifJobCtrSSBackup, /* Container snapshot backup task */ + AifJobCtrMax = 199, /* Max Ctr type operation */ + AifJobFsMin, /* Min Fs type operation */ + AifJobFsCreate, /* File System Create operation */ + AifJobFsVerify, /* File System Verify operation */ + AifJobFsExtend, /* File System Extend operation */ + AifJobFsMax = 299, /* Max Fs type operation */ + AifJobApiFormatNTFS, /* Format a drive to NTFS */ + AifJobApiFormatFAT, /* Format a drive to FAT */ + AifJobApiUpdateSnapshot, /* update the read/write half of a + * snapshot */ + AifJobApiFormatFAT32, /* Format a drive to FAT32 */ + AifJobApiMax = 399, /* Max API type operation */ + AifJobCtlContinuousCtrVerify, /* Adapter operation */ + AifJobCtlMax = 499 /* Max Adapter type operation */ +} AAC_AifJobType; + +struct aac_AifContainers { + u_int32_t src; /* from/master */ + u_int32_t dst; /* to/slave */ +} __packed; + +union aac_AifJobClient { + struct aac_AifContainers container; /* For Container and + * filesystem progress + * ops; */ + int32_t scsi_dh; /* For SCSI progress + * ops */ +}; + +struct aac_AifJobDesc { + u_int32_t jobID; /* DO NOT FILL IN! Will be + * filled in by AIF */ + AAC_AifJobType type; /* Operation that is being + * performed */ + union aac_AifJobClient client; /* Details */ +} __packed; + +struct aac_AifJobProgressReport { + struct aac_AifJobDesc jd; + AAC_AifJobStatus status; + u_int32_t finalTick; + u_int32_t currentTick; + u_int32_t jobSpecificData1; + u_int32_t jobSpecificData2; +} __packed; + +/* + * Event Notification + */ +typedef enum { + /* General application notifies start here */ + AifEnGeneric = 1, /* Generic notification */ + AifEnTaskComplete, /* Task has completed */ + AifEnConfigChange, /* Adapter config change occurred */ + AifEnContainerChange, /* Adapter specific container + * configuration change */ + AifEnDeviceFailure, /* SCSI device failed */ + AifEnMirrorFailover, /* Mirror failover started */ + AifEnContainerEvent, /* Significant container event */ + AifEnFileSystemChange, /* File system changed */ + AifEnConfigPause, /* Container pause event */ + AifEnConfigResume, /* Container resume event */ + AifEnFailoverChange, /* Failover space assignment changed */ + AifEnRAID5RebuildDone, /* RAID5 rebuild finished */ + AifEnEnclosureManagement, /* Enclosure management event */ + AifEnBatteryEvent, /* Significant NV battery event */ + AifEnAddContainer, /* A new container was created. */ + AifEnDeleteContainer, /* A container was deleted. */ + AifEnSMARTEvent, /* SMART Event */ + AifEnBatteryNeedsRecond, /* The battery needs reconditioning */ + AifEnClusterEvent, /* Some cluster event */ + AifEnDiskSetEvent, /* A disk set event occured. */ + AifEnContainerScsiEvent, /* a container event with no. and scsi id */ + AifEnPicBatteryEvent, /* An event gen. by pic_battery.c for an ABM */ + AifEnExpEvent, /* Exp. Event Type to replace CTPopUp messages */ + AifEnRAID6RebuildDone, /* RAID6 rebuild finished */ + AifEnSensorOverHeat, /* Heat Sensor indicate overheat */ + AifEnSensorCoolDown, /* Heat Sensor ind. cooled down after overheat */ + AifFeatureKeysModified, /* notif. of updated feature keys */ + AifApplicationExpirationEvent, /* notif. on app. expiration status */ + AifEnBackgroundConsistencyCheck,/* BCC notif. for NEC - DDTS 94700 */ + AifEnAddJBOD, /* A new JBOD type drive was created (30) */ + AifEnDeleteJBOD, /* A JBOD type drive was deleted (31) */ + AifDriverNotifyStart=199, /* Notifies for host driver go here */ + /* Host driver notifications start here */ + AifDenMorphComplete, /* A morph operation completed */ + AifDenVolumeExtendComplete, /* Volume expand operation completed */ + AifDriverNotifyDelay, + AifRawDeviceRemove /* Raw device Failure event */ +} AAC_AifEventNotifyType; + +struct aac_AifEnsGeneric { + char text[132]; /* Generic text */ +} __packed; + +struct aac_AifEnsDeviceFailure { + u_int32_t deviceHandle; /* SCSI device handle */ +} __packed; + +struct aac_AifEnsMirrorFailover { + u_int32_t container; /* Container with failed element */ + u_int32_t failedSlice; /* Old slice which failed */ + u_int32_t creatingSlice; /* New slice used for auto-create */ +} __packed; + +struct aac_AifEnsContainerChange { + u_int32_t container[2]; /* container that changed, -1 if no + * container */ +} __packed; + +struct aac_AifEnsContainerEvent { + u_int32_t container; /* container number */ + u_int32_t eventType; /* event type */ +} __packed; + +struct aac_AifEnsEnclosureEvent { + u_int32_t empID; /* enclosure management proc number */ + u_int32_t unitID; /* unitId, fan id, power supply id, + * slot id, tempsensor id. */ + u_int32_t eventType; /* event type */ +} __packed; + +typedef enum { + AIF_EM_DRIVE_INSERTION=31, + AIF_EM_DRIVE_REMOVAL +} aac_AifEMEventType; + +struct aac_AifEnsBatteryEvent { + AAC_NVBATT_TRANSITION transition_type; /* eg from low to ok */ + AAC_NVBATTSTATUS current_state; /* current batt state */ + AAC_NVBATTSTATUS prior_state; /* prev batt state */ +} __packed; + +struct aac_AifEnsDiskSetEvent { + u_int32_t eventType; + u_int64_t DsNum; + u_int64_t CreatorId; +} __packed; + +typedef enum { + CLUSTER_NULL_EVENT = 0, + CLUSTER_PARTNER_NAME_EVENT, /* change in partner hostname or + * adaptername from NULL to non-NULL */ + /* (partner's agent may be up) */ + CLUSTER_PARTNER_NULL_NAME_EVENT /* change in partner hostname or + * adaptername from non-null to NULL */ + /* (partner has rebooted) */ +} AAC_ClusterAifEvent; + +struct aac_AifEnsClusterEvent { + AAC_ClusterAifEvent eventType; +} __packed; + +struct aac_AifEventNotify { + AAC_AifEventNotifyType type; + union { + struct aac_AifEnsGeneric EG; + struct aac_AifEnsDeviceFailure EDF; + struct aac_AifEnsMirrorFailover EMF; + struct aac_AifEnsContainerChange ECC; + struct aac_AifEnsContainerEvent ECE; + struct aac_AifEnsEnclosureEvent EEE; + struct aac_AifEnsBatteryEvent EBE; + struct aac_AifEnsDiskSetEvent EDS; +/* struct aac_AifEnsSMARTEvent ES;*/ + struct aac_AifEnsClusterEvent ECLE; + } data; +} __packed; + +/* + * Adapter Initiated FIB command structures. Start with the adapter + * initiated FIBs that really come from the adapter, and get responded + * to by the host. + */ +#define AAC_AIF_REPORT_MAX_SIZE 64 + +typedef enum { + AifCmdEventNotify = 1, /* Notify of event */ + AifCmdJobProgress, /* Progress report */ + AifCmdAPIReport, /* Report from other user of API */ + AifCmdDriverNotify, /* Notify host driver of event */ + AifReqJobList = 100, /* Gets back complete job list */ + AifReqJobsForCtr, /* Gets back jobs for specific container */ + AifReqJobsForScsi, /* Gets back jobs for specific SCSI device */ + AifReqJobReport, /* Gets back a specific job report or list */ + AifReqTerminateJob, /* Terminates job */ + AifReqSuspendJob, /* Suspends a job */ + AifReqResumeJob, /* Resumes a job */ + AifReqSendAPIReport, /* API generic report requests */ + AifReqAPIJobStart, /* Start a job from the API */ + AifReqAPIJobUpdate, /* Update a job report from the API */ + AifReqAPIJobFinish, /* Finish a job from the API */ + AifReqEvent = 200 /* PMC NEW COMM: Request the event data */ +} AAC_AifCommand; + +struct aac_aif_command { + AAC_AifCommand command; /* Tell host what type of + * notify this is */ + u_int32_t seqNumber; /* To allow ordering of + * reports (if necessary) */ + union { + struct aac_AifEventNotify EN; /* Event notify */ + struct aac_AifJobProgressReport PR[1]; /* Progress report */ + u_int8_t AR[AAC_AIF_REPORT_MAX_SIZE]; + u_int8_t data[AAC_FIB_DATASIZE - 8]; + } data; +} __packed; + +/* + * Filesystem commands/data + * + * The adapter has a very complex filesystem interface, most of which we ignore. + * (And which seems not to be implemented, anyway.) + */ + +/* + * FSA commands + * (not used?) + */ +typedef enum { + Null = 0, + GetAttributes, + SetAttributes, + Lookup, + ReadLink, + Read, + Write, + Create, + MakeDirectory, + SymbolicLink, + MakeNode, + Removex, + RemoveDirectory, + Rename, + Link, + ReadDirectory, + ReadDirectoryPlus, + FileSystemStatus, + FileSystemInfo, + PathConfigure, + Commit, + Mount, + UnMount, + Newfs, + FsCheck, + FsSync, + SimReadWrite, + SetFileSystemStatus, + BlockRead, + BlockWrite, + NvramIoctl, + FsSyncWait, + ClearArchiveBit, + SetAcl, + GetAcl, + AssignAcl, + FaultInsertion, + CrazyCache +} AAC_FSACommand; + +/* + * Command status values + */ +typedef enum { + ST_OK = 0, + ST_PERM = 1, + ST_NOENT = 2, + ST_IO = 5, + ST_NXIO = 6, + ST_E2BIG = 7, + ST_ACCES = 13, + ST_EXIST = 17, + ST_XDEV = 18, + ST_NODEV = 19, + ST_NOTDIR = 20, + ST_ISDIR = 21, + ST_INVAL = 22, + ST_FBIG = 27, + ST_NOSPC = 28, + ST_ROFS = 30, + ST_MLINK = 31, + ST_WOULDBLOCK = 35, + ST_NAMETOOLONG = 63, + ST_NOTEMPTY = 66, + ST_DQUOT = 69, + ST_STALE = 70, + ST_REMOTE = 71, + ST_NOT_READY = 72, + ST_BADHANDLE = 10001, + ST_NOT_SYNC = 10002, + ST_BAD_COOKIE = 10003, + ST_NOTSUPP = 10004, + ST_TOOSMALL = 10005, + ST_SERVERFAULT = 10006, + ST_BADTYPE = 10007, + ST_JUKEBOX = 10008, + ST_NOTMOUNTED = 10009, + ST_MAINTMODE = 10010, + ST_STALEACL = 10011, + ST_BUS_RESET = 20001 +} AAC_FSAStatus; + +/* + * Volume manager commands + */ +typedef enum _VM_COMMANDS { + VM_Null = 0, + VM_NameServe, /* query for mountable objects (containers) */ + VM_ContainerConfig, + VM_Ioctl, + VM_FilesystemIoctl, + VM_CloseAll, + VM_CtBlockRead, + VM_CtBlockWrite, + VM_SliceBlockRead, /* raw access to configured "storage objects" */ + VM_SliceBlockWrite, + VM_DriveBlockRead, /* raw access to physical devices */ + VM_DriveBlockWrite, + VM_EnclosureMgt, /* enclosure management */ + VM_Unused, /* used to be diskset management */ + VM_CtBlockVerify, + VM_CtPerf, /* performance test */ + VM_CtBlockRead64, + VM_CtBlockWrite64, + VM_CtBlockVerify64, + VM_CtHostRead64, + VM_CtHostWrite64, + VM_DrvErrTblLog, /* drive error table/log type of command */ + VM_NameServe64, /* query also for containers >2TB */ + VM_SasNvsramAccess, /* for sas nvsram layout function */ + VM_HandleExpiration, /* handles application expiration, internal use! */ + VM_GetDynAdapProps, /* retrieves dynamic adapter properties */ + VM_SetDynAdapProps, /* sets a dynamic adapter property */ + VM_UpdateSSDODM, /* updates the on-disk metadata for SSD caching */ + VM_GetSPMParameters, /* get SPM parameters for one of the perf. modes */ + VM_SetSPMParameters, /* set SPM parameters for user defined perf. mode */ + VM_NameServeAllBlk, /* query also for containers with 4KB sector size */ + MAX_VMCOMMAND_NUM /* used for sizing stats array - leave last */ +} AAC_VMCommand; + +/* Container Configuration Sub-Commands */ +#define CT_GET_SCSI_METHOD 64 +#define CT_PAUSE_IO 65 +#define CT_RELEASE_IO 66 +#define CT_GET_CONFIG_STATUS 147 +#define CT_COMMIT_CONFIG 152 +#define CT_CID_TO_32BITS_UID 165 +#define CT_PM_DRIVER_SUPPORT 245 + +/* CT_PM_DRIVER_SUPPORT parameter */ +typedef enum { + AAC_PM_DRIVERSUP_GET_STATUS = 1, + AAC_PM_DRIVERSUP_START_UNIT, + AAC_PM_DRIVERSUP_STOP_UNIT +} AAC_CT_PM_DRIVER_SUPPORT_SUB_COM; + +/* + * CT_PAUSE_IO is immediate minimal runtime command that is used + * to restart the applications and cache. + */ +struct aac_pause_command { + u_int32_t Command; + u_int32_t Type; + u_int32_t Timeout; + u_int32_t Min; + u_int32_t NoRescan; + u_int32_t Parm3; + u_int32_t Parm4; + u_int32_t Count; +} __packed; + +/* Flag values for ContentState */ +#define AAC_FSCS_NOTCLEAN 0x1 /* fscheck is necessary before mounting */ +#define AAC_FSCS_READONLY 0x2 /* possible result of broken mirror */ +#define AAC_FSCS_HIDDEN 0x4 /* container should be ignored by driver */ +#define AAC_FSCS_NOT_READY 0x8 /* cnt is in spinn. state, not rdy for IO's */ + +/* + * "mountable object" + */ +struct aac_mntobj { + u_int32_t ObjectId; + char FileSystemName[16]; + struct aac_container_creation CreateInfo; + u_int32_t Capacity; + u_int32_t VolType; + u_int32_t ObjType; + u_int32_t ContentState; + union { + u_int32_t pad[8]; + u_int32_t BlockSize; + } ObjExtension; + u_int32_t AlterEgoId; + u_int32_t CapacityHigh; +} __packed; + +struct aac_mntinfo { + u_int32_t Command; + u_int32_t MntType; + u_int32_t MntCount; +} __packed; + +struct aac_mntinforesp { + u_int32_t Status; + u_int32_t MntType; + u_int32_t MntRespCount; + struct aac_mntobj MntTable[1]; +} __packed; + +/* + * Container shutdown command. + */ +struct aac_closecommand { + u_int32_t Command; + u_int32_t ContainerId; +} __packed; + +/* + * Container Config Command + */ +struct aac_ctcfg { + u_int32_t Command; + u_int32_t cmd; + u_int32_t param; +} __packed; + +struct aac_ctcfg_resp { + u_int32_t Status; + u_int32_t resp; + u_int32_t param; +} __packed; + +/* + * 'Ioctl' commads + */ +#define AAC_SCSI_MAX_PORTS 10 +#define AAC_BUS_NO_EXIST 0 +#define AAC_BUS_VALID 1 +#define AAC_BUS_FAULTED 2 +#define AAC_BUS_DISABLED 3 +#define GetBusInfo 0x9 + +struct aac_getbusinf { + u_int32_t ProbeComplete; + u_int32_t BusCount; + u_int32_t TargetsPerBus; + u_int8_t InitiatorBusId[AAC_SCSI_MAX_PORTS]; + u_int8_t BusValid[AAC_SCSI_MAX_PORTS]; +} __packed; + +struct aac_vmioctl { + u_int32_t Command; + u_int32_t ObjType; + u_int32_t MethId; + u_int32_t ObjId; + u_int32_t IoctlCmd; + u_int32_t IoctlBuf[1]; /* Placeholder? */ +} __packed; + +struct aac_vmi_businf_resp { + u_int32_t Status; + u_int32_t ObjType; + u_int32_t MethId; + u_int32_t ObjId; + u_int32_t IoctlCmd; + struct aac_getbusinf BusInf; +} __packed; + +struct aac_vmi_devinfo_resp { + u_int32_t Status; + u_int32_t ObjType; + u_int32_t MethId; + u_int32_t ObjId; + u_int32_t IoctlCmd; + u_int8_t VendorId[8]; + u_int8_t ProductId[16]; + u_int8_t ProductRev[4]; + u_int32_t Inquiry7; + u_int32_t align1; + u_int32_t Inquiry0; + u_int32_t align2; + u_int32_t Inquiry1; + u_int32_t align3; + u_int32_t reserved[2]; + u_int8_t VendorSpecific[20]; + u_int32_t Smart:1; + u_int32_t AAC_Managed:1; + u_int32_t align4; + u_int32_t reserved2:6; + u_int32_t Bus; + u_int32_t Target; + u_int32_t Lun; + u_int32_t ultraEnable:1, + disconnectEnable:1, + fast20EnabledW:1, + scamDevice:1, + scamTolerant:1, + setForSync:1, + setForWide:1, + syncDevice:1, + wideDevice:1, + reserved1:7, + ScsiRate:8, + ScsiOffset:8; +}; /* Do not pack */ + +#define ResetBus 0x16 +struct aac_resetbus { + u_int32_t BusNumber; +}; + +/* + * Write 'stability' options. + */ +typedef enum { + CSTABLE = 1, + CUNSTABLE +} AAC_CacheLevel; + +/* + * Commit level response for a write request. + */ +typedef enum { + CMFILE_SYNC_NVRAM = 1, + CMDATA_SYNC_NVRAM, + CMFILE_SYNC, + CMDATA_SYNC, + CMUNSTABLE +} AAC_CommitLevel; + + +#define CT_FIB_PARAMS 6 +#define MAX_FIB_PARAMS 10 +#define CT_PACKET_SIZE \ + (AAC_FIB_DATASIZE - sizeof (u_int32_t) - \ + ((sizeof (u_int32_t)) * (MAX_FIB_PARAMS + 1))) + +struct aac_fsa_ctm { + u_int32_t command; + u_int32_t param[CT_FIB_PARAMS]; + int8_t data[CT_PACKET_SIZE]; +}; + +struct aac_cnt_config { + u_int32_t Command; + struct aac_fsa_ctm CTCommand; +}; + +/* + * Block read/write operations. + * These structures are packed into the 'data' area in the FIB. + */ + +struct aac_blockread { + u_int32_t Command; /* not FSACommand! */ + u_int32_t ContainerId; + u_int32_t BlockNumber; + u_int32_t ByteCount; + struct aac_sg_table SgMap; /* variable size */ +} __packed; + +struct aac_blockread64 { + u_int32_t Command; + u_int16_t ContainerId; + u_int16_t SectorCount; + u_int32_t BlockNumber; + u_int16_t Pad; + u_int16_t Flags; + struct aac_sg_table64 SgMap64; +} __packed; + +struct aac_blockread_response { + u_int32_t Status; + u_int32_t ByteCount; +} __packed; + +struct aac_blockwrite { + u_int32_t Command; /* not FSACommand! */ + u_int32_t ContainerId; + u_int32_t BlockNumber; + u_int32_t ByteCount; + u_int32_t Stable; + struct aac_sg_table SgMap; /* variable size */ +} __packed; + +struct aac_blockwrite64 { + u_int32_t Command; /* not FSACommand! */ + u_int16_t ContainerId; + u_int16_t SectorCount; + u_int32_t BlockNumber; + u_int16_t Pad; + u_int16_t Flags; + struct aac_sg_table64 SgMap64; /* variable size */ +} __packed; + +struct aac_blockwrite_response { + u_int32_t Status; + u_int32_t ByteCount; + u_int32_t Committed; +} __packed; + +struct aac_raw_io { + u_int64_t BlockNumber; + u_int32_t ByteCount; + u_int16_t ContainerId; + u_int16_t Flags; /* 0: W, 1: R */ + u_int16_t BpTotal; /* reserved for FW use */ + u_int16_t BpComplete; /* reserved for FW use */ + struct aac_sg_tableraw SgMapRaw; /* variable size */ +} __packed; + +#define RIO2_IO_TYPE 0x0003 +#define RIO2_IO_TYPE_WRITE 0x0000 +#define RIO2_IO_TYPE_READ 0x0001 +#define RIO2_IO_TYPE_VERIFY 0x0002 +#define RIO2_IO_ERROR 0x0004 +#define RIO2_IO_SUREWRITE 0x0008 +#define RIO2_SGL_CONFORMANT 0x0010 +#define RIO2_SG_FORMAT 0xF000 +#define RIO2_SG_FORMAT_ARC 0x0000 +#define RIO2_SG_FORMAT_SRL 0x1000 +#define RIO2_SG_FORMAT_IEEE1212 0x2000 +struct aac_raw_io2 { + u_int32_t strtBlkLow; + u_int32_t strtBlkHigh; + u_int32_t byteCnt; + u_int16_t ldNum; + u_int16_t flags; /* RIO2_xxx */ + u_int32_t sgeFirstSize; /* size of first SG element */ + u_int32_t sgeNominalSize; /* size of 2nd SG element */ + u_int8_t sgeCnt; + u_int8_t bpTotal; /* reserved for FW use */ + u_int8_t bpComplete; /* reserved for FW use */ + u_int8_t sgeFirstIndex; /* reserved for FW use */ + u_int8_t unused[4]; + struct aac_sge_ieee1212 sge[0]; /* variable size */ +} __packed; + +/* + * Container shutdown command. + */ +struct aac_close_command { + u_int32_t Command; + u_int32_t ContainerId; +} __packed; + +/* + * SCSI Passthrough structures + */ +struct aac_srb { + u_int32_t function; + u_int32_t bus; + u_int32_t target; + u_int32_t lun; + u_int32_t timeout; + u_int32_t flags; + u_int32_t data_len; + u_int32_t retry_limit; + u_int32_t cdb_len; + u_int8_t cdb[16]; + struct aac_sg_table sg_map; +} __packed; + +enum { + AAC_SRB_FUNC_EXECUTE_SCSI = 0x00, + AAC_SRB_FUNC_CLAIM_DEVICE, + AAC_SRB_FUNC_IO_CONTROL, + AAC_SRB_FUNC_RECEIVE_EVENT, + AAC_SRB_FUNC_RELEASE_QUEUE, + AAC_SRB_FUNC_ATTACH_DEVICE, + AAC_SRB_FUNC_RELEASE_DEVICE, + AAC_SRB_FUNC_SHUTDOWN, + AAC_SRB_FUNC_FLUSH, + AAC_SRB_FUNC_ABORT_COMMAND = 0x10, + AAC_SRB_FUNC_RELEASE_RECOVERY, + AAC_SRB_FUNC_RESET_BUS, + AAC_SRB_FUNC_RESET_DEVICE, + AAC_SRB_FUNC_TERMINATE_IO, + AAC_SRB_FUNC_FLUSH_QUEUE, + AAC_SRB_FUNC_REMOVE_DEVICE, + AAC_SRB_FUNC_DOMAIN_VALIDATION +}; + +#define AAC_SRB_FLAGS_NO_DATA_XFER 0x0000 +#define AAC_SRB_FLAGS_DISABLE_DISCONNECT 0x0004 +#define AAC_SRB_FLAGS_DISABLE_SYNC_TRANSFER 0x0008 +#define AAC_SRB_FLAGS_BYPASS_FROZEN_QUEUE 0x0010 +#define AAC_SRB_FLAGS_DISABLE_AUTOSENSE 0x0020 +#define AAC_SRB_FLAGS_DATA_IN 0x0040 +#define AAC_SRB_FLAGS_DATA_OUT 0x0080 +#define AAC_SRB_FLAGS_UNSPECIFIED_DIRECTION \ + (AAC_SRB_FLAGS_DATA_IN | AAC_SRB_FLAGS_DATA_OUT) + +#define AAC_HOST_SENSE_DATA_MAX 30 + +struct aac_srb_response { + u_int32_t fib_status; + u_int32_t srb_status; + u_int32_t scsi_status; + u_int32_t data_len; + u_int32_t sense_len; + u_int8_t sense[AAC_HOST_SENSE_DATA_MAX]; +} __packed; + +/* + * Status codes for SCSI passthrough commands. Since they are based on ASPI, + * they also exactly match CAM status codes in both enumeration and meaning. + * They seem to also be used as status codes for synchronous FIBs. + */ +enum { + AAC_SRB_STS_PENDING = 0x00, + AAC_SRB_STS_SUCCESS, + AAC_SRB_STS_ABORTED, + AAC_SRB_STS_ABORT_FAILED, + AAC_SRB_STS_ERROR, + AAC_SRB_STS_BUSY, + AAC_SRB_STS_INVALID_REQUEST, + AAC_SRB_STS_INVALID_PATH_ID, + AAC_SRB_STS_NO_DEVICE, + AAC_SRB_STS_TIMEOUT, + AAC_SRB_STS_SELECTION_TIMEOUT, + AAC_SRB_STS_COMMAND_TIMEOUT, + AAC_SRB_STS_MESSAGE_REJECTED = 0x0D, + AAC_SRB_STS_BUS_RESET, + AAC_SRB_STS_PARITY_ERROR, + AAC_SRB_STS_REQUEST_SENSE_FAILED, + AAC_SRB_STS_NO_HBA, + AAC_SRB_STS_DATA_OVERRUN, + AAC_SRB_STS_UNEXPECTED_BUS_FREE, + AAC_SRB_STS_PHASE_SEQUENCE_FAILURE, + AAC_SRB_STS_BAD_SRB_BLOCK_LENGTH, + AAC_SRB_STS_REQUEST_FLUSHED, + AAC_SRB_STS_INVALID_LUN = 0x20, + AAC_SRB_STS_INVALID_TARGET_ID, + AAC_SRB_STS_BAD_FUNCTION, + AAC_SRB_STS_ERROR_RECOVERY +}; + +/* + * Register definitions for the Adaptec PMC SRC/SRCv adapters. + */ +/* accessible via BAR0 */ +#define AAC_SRC_OMR 0xbc /* outbound message register */ +#define AAC_SRC_IDBR 0x20 /* inbound doorbell register */ +#define AAC_SRC_IISR 0x24 /* inbound interrupt status register */ +#define AAC_SRC_ODBR_R 0x9c /* outbound doorbell register read */ +#define AAC_SRC_ODBR_C 0xa0 /* outbound doorbell register clear */ +#define AAC_SRC_OIMR 0x34 /* outbound interrupt mask register */ +#define AAC_SRC_IQUE32 0x40 /* inbound queue address 32-bit */ +#define AAC_SRC_IQUE64_L 0xc0 /* inbound queue address 64-bit (low) */ +#define AAC_SRC_IQUE64_H 0xc4 /* inbound queue address 64-bit (high) */ + +#define AAC_SRC_MAILBOX 0x7fc60 /* mailbox (20 bytes) */ +#define AAC_SRCV_MAILBOX 0x1000 /* mailbox (20 bytes) */ + +#define AAC_SRC_ODR_SHIFT 12 /* outbound doorbell shift */ +#define AAC_SRC_IDR_SHIFT 9 /* inbound doorbell shift */ + +/* Sunrise Lake dual core reset */ +#define AAC_IRCSR 0x38 /* inbound dual cores reset */ +#define AAC_IRCSR_CORES_RST 3 + + +/* + * Common bit definitions for the doorbell registers. + */ + +/* + * Status bits in the doorbell registers. + */ +#define AAC_DB_SYNC_COMMAND (1<<0) /* send/completed synchronous FIB */ +#define AAC_DB_COMMAND_READY (1<<1) /* posted one or more commands */ +#define AAC_DB_RESPONSE_READY (1<<2) /* one or more commands complete */ +#define AAC_DB_COMMAND_NOT_FULL (1<<3) /* command queue not full */ +#define AAC_DB_RESPONSE_NOT_FULL (1<<4) /* response queue not full */ +#define AAC_DB_AIF_PENDING (1<<6) /* pending AIF (new comm. type1) */ +/* PMC specific outbound doorbell bits */ +#define AAC_DB_RESPONSE_SENT_NS (1<<1) /* response sent (not shifted) */ + +/* + * The adapter can request the host print a message by setting the + * DB_PRINTF flag in DOORBELL0. The driver responds by collecting the + * message from the printf buffer, clearing the DB_PRINTF flag in + * DOORBELL0 and setting it in DOORBELL1. + * (ODBR and IDBR respectively for the i960Rx adapters) + */ +#define AAC_DB_PRINTF (1<<5) /* adapter requests host printf */ +#define AAC_PRINTF_DONE (1<<5) /* Host completed printf processing */ + +/* + * Mask containing the interrupt bits we care about. We don't anticipate (or + * want) interrupts not in this mask. + */ +#define AAC_DB_INTERRUPTS (AAC_DB_COMMAND_READY | \ + AAC_DB_RESPONSE_READY | \ + AAC_DB_PRINTF) +#define AAC_DB_INT_NEW_COMM 0x08 +#define AAC_DB_INT_NEW_COMM_TYPE1 0x04 diff --git a/sys/dev/aacraid/aacraid_var.h b/sys/dev/aacraid/aacraid_var.h new file mode 100644 index 0000000..07546ff --- /dev/null +++ b/sys/dev/aacraid/aacraid_var.h @@ -0,0 +1,663 @@ +/*- + * Copyright (c) 2000 Michael Smith + * Copyright (c) 2001 Scott Long + * Copyright (c) 2000 BSDi + * Copyright (c) 2001-2010 Adaptec, Inc. + * Copyright (c) 2010-2012 PMC-Sierra, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#include +#if __FreeBSD_version >= 800000 +#include +#endif +#include +#include +#include +#include +#include + +#define AAC_TYPE_DEVO 1 +#define AAC_TYPE_ALPHA 2 +#define AAC_TYPE_BETA 3 +#define AAC_TYPE_RELEASE 4 + +#define AAC_DRIVER_MAJOR_VERSION 3 +#define AAC_DRIVER_MINOR_VERSION 1 +#define AAC_DRIVER_BUGFIX_LEVEL 1 +#define AAC_DRIVER_TYPE AAC_TYPE_RELEASE + +#ifndef AAC_DRIVER_BUILD +# define AAC_DRIVER_BUILD 1 +#endif + +#if __FreeBSD_version <= 601000 +#define bus_get_dma_tag(x) NULL +#endif + +/* **************************** NewBUS interrupt Crock ************************/ +#if __FreeBSD_version < 700031 +#define aac_bus_setup_intr(d, i, f, U, if, ifa, hp) \ + bus_setup_intr(d, i, f, if, ifa, hp) +#else +#define aac_bus_setup_intr bus_setup_intr +#endif + +/* **************************** NewBUS CAM Support ****************************/ +#if __FreeBSD_version < 700049 +#define aac_xpt_bus_register(sim, parent, bus) \ + xpt_bus_register(sim, bus) +#else +#define aac_xpt_bus_register xpt_bus_register +#endif + +/**************************** Kernel Thread Support ***************************/ +#if __FreeBSD_version > 800001 +#define aac_kthread_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) \ + kproc_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) +#define aac_kthread_exit(status) \ + kproc_exit(status) +#else +#define aac_kthread_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) \ + kthread_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) +#define aac_kthread_exit(status) \ + kthread_exit(status) +#endif + +/* + * Driver Parameter Definitions + */ + +/* + * We allocate a small set of FIBs for the adapter to use to send us messages. + */ +#define AAC_ADAPTER_FIBS 8 + +/* + * The controller reports status events in AIFs. We hang on to a number of + * these in order to pass them out to user-space management tools. + */ +#define AAC_AIFQ_LENGTH 64 + +/* + * Firmware messages are passed in the printf buffer. + */ +#define AAC_PRINTF_BUFSIZE 256 + +/* + * We wait this many seconds for the adapter to come ready if it is still + * booting + */ +#define AAC_BOOT_TIMEOUT (3 * 60) + +/* + * Timeout for immediate commands. + */ +#define AAC_IMMEDIATE_TIMEOUT 30 /* seconds */ + +/* + * Timeout for normal commands + */ +#define AAC_CMD_TIMEOUT 120 /* seconds */ + +/* + * Rate at which we periodically check for timed out commands and kick the + * controller. + */ +#define AAC_PERIODIC_INTERVAL 20 /* seconds */ + +#define PASSTHROUGH_BUS 0 +#define CONTAINER_BUS 1 +/* + * Per-container data structure + */ +struct aac_container +{ + struct aac_mntobj co_mntobj; + int co_found; + u_int32_t co_uid; + TAILQ_ENTRY(aac_container) co_link; +}; + +/* + * Per-SIM data structure + */ +struct aac_cam; +struct aac_sim +{ + device_t sim_dev; + int TargetsPerBus; + int BusNumber; + int BusType; + int InitiatorBusId; + struct aac_softc *aac_sc; + struct aac_cam *aac_cam; + TAILQ_ENTRY(aac_sim) sim_link; +}; + +/* + * Per-disk structure + */ +struct aac_disk +{ + device_t ad_dev; + struct aac_softc *ad_controller; + struct aac_container *ad_container; + struct disk *ad_disk; + int ad_flags; +#define AAC_DISK_OPEN (1<<0) + int ad_cylinders; + int ad_heads; + int ad_sectors; + u_int64_t ad_size; + int unit; +}; + +/* + * Per-command control structure. + */ +struct aac_command +{ + TAILQ_ENTRY(aac_command) cm_link; /* list linkage */ + + struct aac_softc *cm_sc; /* controller that owns us */ + + struct aac_fib *cm_fib; /* FIB associated with this + * command */ + u_int64_t cm_fibphys; /* bus address of the FIB */ + struct bio *cm_data; /* pointer to data in kernel + * space */ + u_int32_t cm_datalen; /* data length */ + bus_dmamap_t cm_datamap; /* DMA map for bio data */ + struct aac_sg_table *cm_sgtable; /* pointer to s/g table in + * command */ + int cm_flags; +#define AAC_CMD_MAPPED (1<<0) /* command has had its data + * mapped */ +#define AAC_CMD_DATAIN (1<<1) /* command involves data moving + * from controller to host */ +#define AAC_CMD_DATAOUT (1<<2) /* command involves data moving + * from host to controller */ +#define AAC_CMD_COMPLETED (1<<3) /* command has been completed */ +#define AAC_CMD_TIMEDOUT (1<<4) /* command taken too long */ +#define AAC_ON_AACQ_FREE (1<<5) +#define AAC_ON_AACQ_READY (1<<6) +#define AAC_ON_AACQ_BUSY (1<<7) +#define AAC_ON_AACQ_AIF (1<<8) +#define AAC_ON_AACQ_NORM (1<<10) +#define AAC_ON_AACQ_MASK ((1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<10)) +#define AAC_CMD_RESET (1<<9) +#define AAC_CMD_FASTRESP (1<<11) +#define AAC_CMD_WAIT (1<<12) + + void (* cm_complete)(struct aac_command *cm); + union ccb *cm_ccb; + time_t cm_timestamp; /* command creation time */ + int cm_index; + bus_dma_tag_t cm_passthr_dmat; /* passthrough buffer/command + * DMA tag */ +}; + +struct aac_fibmap { + TAILQ_ENTRY(aac_fibmap) fm_link; /* list linkage */ + struct aac_fib *aac_fibs; + bus_dmamap_t aac_fibmap; + struct aac_command *aac_commands; +}; + +/* + * We gather a number of adapter-visible items into a single structure. + * + * The ordering of this strucure may be important; we copy the Linux driver: + * + * Adapter FIBs + * Init struct + * Queue headers (Comm Area) + * Printf buffer + * + * In addition, we add: + * Sync Fib + */ +struct aac_common { + /* fibs for the controller to send us messages */ + struct aac_fib ac_fibs[AAC_ADAPTER_FIBS]; + + /* the init structure */ + struct aac_adapter_init ac_init; + + /* buffer for text messages from the controller */ + char ac_printf[AAC_PRINTF_BUFSIZE]; + + /* fib for synchronous commands */ + struct aac_fib ac_sync_fib; + + /* response buffer for SRC (new comm. type1) - must be last element */ + u_int32_t ac_host_rrq[0]; +}; + +/* + * Interface operations + */ +struct aac_interface +{ + int (*aif_get_fwstatus)(struct aac_softc *sc); + void (*aif_qnotify)(struct aac_softc *sc, int qbit); + int (*aif_get_istatus)(struct aac_softc *sc); + void (*aif_clr_istatus)(struct aac_softc *sc, int mask); + void (*aif_set_mailbox)(struct aac_softc *sc, u_int32_t command, + u_int32_t arg0, u_int32_t arg1, + u_int32_t arg2, u_int32_t arg3); + int (*aif_get_mailbox)(struct aac_softc *sc, int mb); + void (*aif_set_interrupts)(struct aac_softc *sc, int enable); + int (*aif_send_command)(struct aac_softc *sc, struct aac_command *cm); + int (*aif_get_outb_queue)(struct aac_softc *sc); + void (*aif_set_outb_queue)(struct aac_softc *sc, int index); +}; +extern struct aac_interface aacraid_src_interface; +extern struct aac_interface aacraid_srcv_interface; + +#define AAC_GET_FWSTATUS(sc) ((sc)->aac_if.aif_get_fwstatus((sc))) +#define AAC_QNOTIFY(sc, qbit) ((sc)->aac_if.aif_qnotify((sc), (qbit))) +#define AAC_GET_ISTATUS(sc) ((sc)->aac_if.aif_get_istatus((sc))) +#define AAC_CLEAR_ISTATUS(sc, mask) ((sc)->aac_if.aif_clr_istatus((sc), \ + (mask))) +#define AAC_SET_MAILBOX(sc, command, arg0, arg1, arg2, arg3) \ + ((sc)->aac_if.aif_set_mailbox((sc), (command), (arg0), (arg1), (arg2), \ + (arg3))) +#define AAC_GET_MAILBOX(sc, mb) ((sc)->aac_if.aif_get_mailbox((sc), \ + (mb))) +#define AAC_MASK_INTERRUPTS(sc) ((sc)->aac_if.aif_set_interrupts((sc), \ + 0)) +#define AAC_UNMASK_INTERRUPTS(sc) ((sc)->aac_if.aif_set_interrupts((sc), \ + 1)) +#define AAC_SEND_COMMAND(sc, cm) ((sc)->aac_if.aif_send_command((sc), (cm))) +#define AAC_GET_OUTB_QUEUE(sc) ((sc)->aac_if.aif_get_outb_queue((sc))) +#define AAC_SET_OUTB_QUEUE(sc, idx) ((sc)->aac_if.aif_set_outb_queue((sc), (idx))) + +#define AAC_MEM0_SETREG4(sc, reg, val) bus_space_write_4(sc->aac_btag0, \ + sc->aac_bhandle0, reg, val) +#define AAC_MEM0_GETREG4(sc, reg) bus_space_read_4(sc->aac_btag0, \ + sc->aac_bhandle0, reg) +#define AAC_MEM0_SETREG2(sc, reg, val) bus_space_write_2(sc->aac_btag0, \ + sc->aac_bhandle0, reg, val) +#define AAC_MEM0_GETREG2(sc, reg) bus_space_read_2(sc->aac_btag0, \ + sc->aac_bhandle0, reg) +#define AAC_MEM0_SETREG1(sc, reg, val) bus_space_write_1(sc->aac_btag0, \ + sc->aac_bhandle0, reg, val) +#define AAC_MEM0_GETREG1(sc, reg) bus_space_read_1(sc->aac_btag0, \ + sc->aac_bhandle0, reg) + +#define AAC_MEM1_SETREG4(sc, reg, val) bus_space_write_4(sc->aac_btag1, \ + sc->aac_bhandle1, reg, val) +#define AAC_MEM1_GETREG4(sc, reg) bus_space_read_4(sc->aac_btag1, \ + sc->aac_bhandle1, reg) +#define AAC_MEM1_SETREG2(sc, reg, val) bus_space_write_2(sc->aac_btag1, \ + sc->aac_bhandle1, reg, val) +#define AAC_MEM1_GETREG2(sc, reg) bus_space_read_2(sc->aac_btag1, \ + sc->aac_bhandle1, reg) +#define AAC_MEM1_SETREG1(sc, reg, val) bus_space_write_1(sc->aac_btag1, \ + sc->aac_bhandle1, reg, val) +#define AAC_MEM1_GETREG1(sc, reg) bus_space_read_1(sc->aac_btag1, \ + sc->aac_bhandle1, reg) + +/* fib context (IOCTL) */ +struct aac_fib_context { + u_int32_t unique; + int ctx_idx; + int ctx_wrap; + struct aac_fib_context *next, *prev; +}; + +/* + * Per-controller structure. + */ +struct aac_softc +{ + /* bus connections */ + device_t aac_dev; + struct resource *aac_regs_res0, *aac_regs_res1; /* reg. if. window */ + int aac_regs_rid0, aac_regs_rid1; /* resource ID */ + bus_space_handle_t aac_bhandle0, aac_bhandle1; /* bus space handle */ + bus_space_tag_t aac_btag0, aac_btag1; /* bus space tag */ + bus_dma_tag_t aac_parent_dmat; /* parent DMA tag */ + bus_dma_tag_t aac_buffer_dmat; /* data buffer/command + * DMA tag */ + struct resource *aac_irq; /* interrupt */ + int aac_irq_rid; + void *aac_intr; /* interrupt handle */ + eventhandler_tag eh; +#if __FreeBSD_version >= 800000 + struct callout aac_daemontime; /* clock daemon callout */ +#else + struct callout_handle timeout_id; /* timeout handle */ +#endif + + /* controller features, limits and status */ + int aac_state; +#define AAC_STATE_SUSPEND (1<<0) +#define AAC_STATE_UNUSED0 (1<<1) +#define AAC_STATE_INTERRUPTS_ON (1<<2) +#define AAC_STATE_AIF_SLEEPER (1<<3) +#define AAC_STATE_RESET (1<<4) + struct FsaRevision aac_revision; + + /* controller hardware interface */ + int aac_hwif; +#define AAC_HWIF_SRC 5 +#define AAC_HWIF_SRCV 6 +#define AAC_HWIF_UNKNOWN -1 + bus_dma_tag_t aac_common_dmat; /* common structure + * DMA tag */ + bus_dmamap_t aac_common_dmamap; /* common structure + * DMA map */ + struct aac_common *aac_common; + u_int32_t aac_common_busaddr; + u_int32_t aac_host_rrq_idx; + struct aac_interface aac_if; + + /* command/fib resources */ + bus_dma_tag_t aac_fib_dmat; /* DMA tag for allocing FIBs */ + TAILQ_HEAD(,aac_fibmap) aac_fibmap_tqh; + u_int total_fibs; + struct aac_command *aac_commands; + + /* command management */ + TAILQ_HEAD(,aac_command) aac_free; /* command structures + * available for reuse */ + TAILQ_HEAD(,aac_command) aac_ready; /* commands on hold for + * controller resources */ + TAILQ_HEAD(,aac_command) aac_busy; + TAILQ_HEAD(,aac_event) aac_ev_cmfree; + struct bio_queue_head aac_bioq; + + struct aac_qstat aac_qstat[AACQ_COUNT]; /* queue statistics */ + + /* connected containters */ + TAILQ_HEAD(,aac_container) aac_container_tqh; + struct mtx aac_container_lock; + + /* + * The general I/O lock. This protects the sync fib, the lists, the + * queues, and the registers. + */ + struct mtx aac_io_lock; + + struct intr_config_hook aac_ich; + + /* sync. transfer mode */ + struct aac_command *aac_sync_cm; + + /* management interface */ + struct cdev *aac_dev_t; + struct mtx aac_aifq_lock; + struct aac_fib aac_aifq[AAC_AIFQ_LENGTH]; + int aifq_idx; + int aifq_filled; + int aif_pending; + struct aac_fib_context *fibctx; + struct selinfo rcv_select; + struct proc *aifthread; + int aifflags; +#define AAC_AIFFLAGS_RUNNING (1 << 0) +#define AAC_AIFFLAGS_AIF (1 << 1) +#define AAC_AIFFLAGS_EXIT (1 << 2) +#define AAC_AIFFLAGS_EXITED (1 << 3) +#define AAC_AIFFLAGS_PRINTF (1 << 4) +#define AAC_AIFFLAGS_ALLOCFIBS (1 << 5) +#define AAC_AIFFLAGS_PENDING (AAC_AIFFLAGS_AIF | AAC_AIFFLAGS_PRINTF | \ + AAC_AIFFLAGS_ALLOCFIBS) + u_int32_t flags; +#define AAC_FLAGS_PERC2QC (1 << 0) +#define AAC_FLAGS_ENABLE_CAM (1 << 1) /* No SCSI passthrough */ +#define AAC_FLAGS_CAM_NORESET (1 << 2) /* Fake SCSI resets */ +#define AAC_FLAGS_CAM_PASSONLY (1 << 3) /* Only create pass devices */ +#define AAC_FLAGS_SG_64BIT (1 << 4) /* Use 64-bit S/G addresses */ +#define AAC_FLAGS_4GB_WINDOW (1 << 5) /* Device can access host mem + * 2GB-4GB range */ +#define AAC_FLAGS_NO4GB (1 << 6) /* Can't access host mem >2GB */ +#define AAC_FLAGS_256FIBS (1 << 7) /* Can only do 256 commands */ +#define AAC_FLAGS_BROKEN_MEMMAP (1 << 8) /* Broken HostPhysMemPages */ +#define AAC_FLAGS_SLAVE (1 << 9) +#define AAC_FLAGS_MASTER (1 << 10) +#define AAC_FLAGS_NEW_COMM (1 << 11) /* New comm. interface supported */ +#define AAC_FLAGS_RAW_IO (1 << 12) /* Raw I/O interface */ +#define AAC_FLAGS_ARRAY_64BIT (1 << 13) /* 64-bit array size */ +#define AAC_FLAGS_LBA_64BIT (1 << 14) /* 64-bit LBA support */ +#define AAC_QUEUE_FRZN (1 << 15) /* Freeze the processing of + * commands on the queue. */ +#define AAC_FLAGS_NEW_COMM_TYPE1 (1 << 16) /* New comm. type1 supported */ +#define AAC_FLAGS_NEW_COMM_TYPE2 (1 << 17) /* New comm. type2 supported */ +#define AAC_FLAGS_NEW_COMM_TYPE34 (1 << 18) /* New comm. type3/4 */ +#define AAC_FLAGS_SYNC_MODE (1 << 18) /* Sync. transfer mode */ + u_int32_t hint_flags; /* driver parameters */ + int sim_freezed; /* flag for sim_freeze/release */ + u_int32_t supported_options; + u_int32_t scsi_method_id; + TAILQ_HEAD(,aac_sim) aac_sim_tqh; + + u_int32_t aac_max_fibs; /* max. FIB count */ + u_int32_t aac_max_fibs_alloc; /* max. alloc. per alloc_commands() */ + u_int32_t aac_max_fib_size; /* max. FIB size */ + u_int32_t aac_sg_tablesize; /* max. sg count from host */ + u_int32_t aac_max_sectors; /* max. I/O size from host (blocks) */ + u_int32_t aac_feature_bits; /* feature bits from suppl. info */ + u_int32_t aac_support_opt2; /* supp. options from suppl. info */ + u_int32_t aac_max_aif; /* max. AIF count */ +#define AAC_CAM_TARGET_WILDCARD ~0 + void (*cam_rescan_cb)(struct aac_softc *, uint32_t, + uint32_t); + u_int32_t DebugFlags; /* Debug print flags bitmap */ + u_int32_t DebugOffset; /* Offset from DPMEM start */ + u_int32_t DebugHeaderSize; /* Size of debug header */ + u_int32_t FwDebugFlags; /* FW Debug Flags */ + u_int32_t FwDebugBufferSize; /* FW Debug Buffer size */ +}; + +/* + * Event callback mechanism for the driver + */ +#define AAC_EVENT_NONE 0x00 +#define AAC_EVENT_CMFREE 0x01 +#define AAC_EVENT_MASK 0xff +#define AAC_EVENT_REPEAT 0x100 + +typedef void aac_event_cb_t(struct aac_softc *sc, struct aac_event *event, + void *arg); +struct aac_event { + TAILQ_ENTRY(aac_event) ev_links; + int ev_type; + aac_event_cb_t *ev_callback; + void *ev_arg; +}; + +/* + * Public functions + */ +extern void aacraid_free(struct aac_softc *sc); +extern int aacraid_attach(struct aac_softc *sc); +extern int aacraid_detach(device_t dev); +extern int aacraid_shutdown(device_t dev); +extern int aacraid_suspend(device_t dev); +extern int aacraid_resume(device_t dev); +extern void aacraid_new_intr_type1(void *arg); +extern void aacraid_submit_bio(struct bio *bp); +extern void aacraid_biodone(struct bio *bp); +extern void aacraid_startio(struct aac_softc *sc); +extern int aacraid_alloc_command(struct aac_softc *sc, + struct aac_command **cmp); +extern void aacraid_release_command(struct aac_command *cm); +extern void aacraid_add_event(struct aac_softc *sc, struct aac_event + *event); +extern void aacraid_map_command_sg(void *arg, bus_dma_segment_t *segs, + int nseg, int error); +extern int aacraid_wait_command(struct aac_command *cmp); + +/* #define AACRAID_DEBUG */ + +#ifdef AACRAID_DEBUG +# define fwprintf(sc, flags, fmt, args...) \ + aacraid_fw_printf(sc, flags, "%s: " fmt, __func__, ##args); + +extern void aacraid_print_queues(struct aac_softc *sc); +extern void aacraid_print_fib(struct aac_softc *sc, struct aac_fib *fib, + const char *caller); +extern void aacraid_print_aif(struct aac_softc *sc, + struct aac_aif_command *aif); + +#define AAC_PRINT_FIB(sc, fib) aacraid_print_fib(sc, fib, __func__) + +#else +# define fwprintf(sc, flags, fmt, args...) + +# define aacraid_print_queues(sc) + +# define AAC_PRINT_FIB(sc, fib) +# define aacraid_print_aif(sc, aac_aif_command) +#endif + +struct aac_code_lookup { + char *string; + u_int32_t code; +}; + +/* + * Queue primitives for driver queues. + */ +#define AACQ_ADD(sc, qname) \ + do { \ + struct aac_qstat *qs; \ + \ + qs = &(sc)->aac_qstat[qname]; \ + \ + qs->q_length++; \ + if (qs->q_length > qs->q_max) \ + qs->q_max = qs->q_length; \ + } while (0) + +#define AACQ_REMOVE(sc, qname) (sc)->aac_qstat[qname].q_length-- +#define AACQ_INIT(sc, qname) \ + do { \ + sc->aac_qstat[qname].q_length = 0; \ + sc->aac_qstat[qname].q_max = 0; \ + } while (0) + + +#define AACQ_COMMAND_QUEUE(name, index) \ +static __inline void \ +aac_initq_ ## name (struct aac_softc *sc) \ +{ \ + TAILQ_INIT(&sc->aac_ ## name); \ + AACQ_INIT(sc, index); \ +} \ +static __inline void \ +aac_enqueue_ ## name (struct aac_command *cm) \ +{ \ + if ((cm->cm_flags & AAC_ON_AACQ_MASK) != 0) { \ + printf("command %p is on another queue, flags = %#x\n", \ + cm, cm->cm_flags); \ + panic("command is on another queue"); \ + } \ + TAILQ_INSERT_TAIL(&cm->cm_sc->aac_ ## name, cm, cm_link); \ + cm->cm_flags |= AAC_ON_ ## index; \ + AACQ_ADD(cm->cm_sc, index); \ +} \ +static __inline void \ +aac_requeue_ ## name (struct aac_command *cm) \ +{ \ + if ((cm->cm_flags & AAC_ON_AACQ_MASK) != 0) { \ + printf("command %p is on another queue, flags = %#x\n", \ + cm, cm->cm_flags); \ + panic("command is on another queue"); \ + } \ + TAILQ_INSERT_HEAD(&cm->cm_sc->aac_ ## name, cm, cm_link); \ + cm->cm_flags |= AAC_ON_ ## index; \ + AACQ_ADD(cm->cm_sc, index); \ +} \ +static __inline struct aac_command * \ +aac_dequeue_ ## name (struct aac_softc *sc) \ +{ \ + struct aac_command *cm; \ + \ + if ((cm = TAILQ_FIRST(&sc->aac_ ## name)) != NULL) { \ + if ((cm->cm_flags & AAC_ON_ ## index) == 0) { \ + printf("command %p not in queue, flags = %#x, " \ + "bit = %#x\n", cm, cm->cm_flags, \ + AAC_ON_ ## index); \ + panic("command not in queue"); \ + } \ + TAILQ_REMOVE(&sc->aac_ ## name, cm, cm_link); \ + cm->cm_flags &= ~AAC_ON_ ## index; \ + AACQ_REMOVE(sc, index); \ + } \ + return(cm); \ +} \ +static __inline void \ +aac_remove_ ## name (struct aac_command *cm) \ +{ \ + if ((cm->cm_flags & AAC_ON_ ## index) == 0) { \ + printf("command %p not in queue, flags = %#x, " \ + "bit = %#x\n", cm, cm->cm_flags, \ + AAC_ON_ ## index); \ + panic("command not in queue"); \ + } \ + TAILQ_REMOVE(&cm->cm_sc->aac_ ## name, cm, cm_link); \ + cm->cm_flags &= ~AAC_ON_ ## index; \ + AACQ_REMOVE(cm->cm_sc, index); \ +} \ +struct hack + +AACQ_COMMAND_QUEUE(free, AACQ_FREE); +AACQ_COMMAND_QUEUE(ready, AACQ_READY); +AACQ_COMMAND_QUEUE(busy, AACQ_BUSY); + +static __inline void +aac_print_printf(struct aac_softc *sc) +{ + /* + * XXX We have the ability to read the length of the printf string + * from out of the mailboxes. + */ + device_printf(sc->aac_dev, "**Monitor** %.*s", AAC_PRINTF_BUFSIZE, + sc->aac_common->ac_printf); + sc->aac_common->ac_printf[0] = 0; + AAC_QNOTIFY(sc, AAC_DB_PRINTF); +} + +static __inline int +aac_alloc_sync_fib(struct aac_softc *sc, struct aac_fib **fib) +{ + + mtx_assert(&sc->aac_io_lock, MA_OWNED); + *fib = &sc->aac_common->ac_sync_fib; + return (0); +} + +static __inline void +aac_release_sync_fib(struct aac_softc *sc) +{ + + mtx_assert(&sc->aac_io_lock, MA_OWNED); +} -- cgit v1.1