summaryrefslogtreecommitdiffstats
path: root/sys/dev/iir
diff options
context:
space:
mode:
authormsmith <msmith@FreeBSD.org>2002-01-20 08:51:08 +0000
committermsmith <msmith@FreeBSD.org>2002-01-20 08:51:08 +0000
commit4b0b46d216d8238e21d10eaa4345c85d79a350d9 (patch)
tree49e673a5e715f655e276a2f789608f01a979760a /sys/dev/iir
parentbaca124ccb63309a83f4bea30e0bf7ecdc4b38ce (diff)
downloadFreeBSD-src-4b0b46d216d8238e21d10eaa4345c85d79a350d9.zip
FreeBSD-src-4b0b46d216d8238e21d10eaa4345c85d79a350d9.tar.gz
Add the 'iir' driver, for the Intel Integrated RAID controllers and
prior ICP Vortex models. This driver was developed by Achim Leubner of Intel (previously with ICP Vortex) and Boji Kannanthanam of Intel. Submitted by: "Kannanthanam, Boji T" <boji.t.kannanthanam@intel.com> MFC after: 2 weeks
Diffstat (limited to 'sys/dev/iir')
-rw-r--r--sys/dev/iir/iir.c2018
-rw-r--r--sys/dev/iir/iir.h712
-rw-r--r--sys/dev/iir/iir_ctrl.c371
-rw-r--r--sys/dev/iir/iir_pci.c476
4 files changed, 3577 insertions, 0 deletions
diff --git a/sys/dev/iir/iir.c b/sys/dev/iir/iir.c
new file mode 100644
index 0000000..4e07233
--- /dev/null
+++ b/sys/dev/iir/iir.c
@@ -0,0 +1,2018 @@
+/* $FreeBSD$ */
+/*
+ * Copyright (c) 2000-01 Intel Corporation
+ * All Rights Reserved
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * iir.c: SCSI dependant code for the Intel Integrated RAID Controller driver
+ *
+ * Written by: Achim Leubner <achim.leubner@intel.com>
+ * Fixes/Additions: Boji Tony Kannanthanam <boji.t.kannanthanam@intel.com>
+ *
+ * credits: Niklas Hallqvist; OpenBSD driver for the ICP Controllers.
+ * Mike Smith; Some driver source code.
+ * FreeBSD.ORG; Great O/S to work on and for.
+ *
+ * TODO:
+ */
+
+#ident "$Id: iir.c 1.2 2001/06/21 20:28:32 achim Exp $"
+
+#define _IIR_C_
+
+/* #include "opt_iir.h" */
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/eventhandler.h>
+#include <sys/malloc.h>
+#include <sys/kernel.h>
+#include <sys/bus.h>
+
+#include <stddef.h> /* For offsetof */
+
+#include <machine/bus_memio.h>
+#include <machine/bus_pio.h>
+#include <machine/bus.h>
+#include <machine/clock.h>
+#include <machine/stdarg.h>
+
+#include <cam/cam.h>
+#include <cam/cam_ccb.h>
+#include <cam/cam_sim.h>
+#include <cam/cam_xpt_sim.h>
+#include <cam/cam_debug.h>
+#include <cam/scsi/scsi_all.h>
+#include <cam/scsi/scsi_message.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include <dev/iir/iir.h>
+
+struct gdt_softc *gdt_wait_gdt;
+int gdt_wait_index;
+
+#ifdef GDT_DEBUG
+int gdt_debug = GDT_DEBUG;
+#ifdef __SERIAL__
+#define MAX_SERBUF 160
+static void ser_init(void);
+static void ser_puts(char *str);
+static void ser_putc(int c);
+static char strbuf[MAX_SERBUF+1];
+#ifdef __COM2__
+#define COM_BASE 0x2f8
+#else
+#define COM_BASE 0x3f8
+#endif
+static void ser_init()
+{
+ unsigned port=COM_BASE;
+
+ outb(port+3, 0x80);
+ outb(port+1, 0);
+ /* 19200 Baud, if 9600: outb(12,port) */
+ outb(port, 6);
+ outb(port+3, 3);
+ outb(port+1, 0);
+}
+
+static void ser_puts(char *str)
+{
+ char *ptr;
+
+ ser_init();
+ for (ptr=str;*ptr;++ptr)
+ ser_putc((int)(*ptr));
+}
+
+static void ser_putc(int c)
+{
+ unsigned port=COM_BASE;
+
+ while ((inb(port+5) & 0x20)==0);
+ outb(port, c);
+ if (c==0x0a)
+ {
+ while ((inb(port+5) & 0x20)==0);
+ outb(port, 0x0d);
+ }
+}
+
+int ser_printf(const char *fmt, ...)
+{
+ va_list args;
+ int i;
+
+ va_start(args,fmt);
+ i = vsprintf(strbuf,fmt,args);
+ ser_puts(strbuf);
+ va_end(args);
+ return i;
+}
+#endif
+#endif
+
+/* The linked list of softc structures */
+struct gdt_softc_list gdt_softcs = TAILQ_HEAD_INITIALIZER(gdt_softcs);
+/* controller cnt. */
+int gdt_cnt = 0;
+/* event buffer */
+static gdt_evt_str ebuffer[GDT_MAX_EVENTS];
+static int elastidx, eoldidx;
+/* statistics */
+gdt_statist_t gdt_stat;
+
+/* Definitions for our use of the SIM private CCB area */
+#define ccb_sim_ptr spriv_ptr0
+#define ccb_priority spriv_field1
+
+static void iir_action(struct cam_sim *sim, union ccb *ccb);
+static void iir_poll(struct cam_sim *sim);
+static void iir_shutdown(void *arg, int howto);
+static void iir_timeout(void *arg);
+static void iir_watchdog(void *arg);
+
+static void gdt_eval_mapping(u_int32_t size, int *cyls, int *heads,
+ int *secs);
+static int gdt_internal_cmd(struct gdt_softc *gdt, struct gdt_ccb *gccb,
+ u_int8_t service, u_int16_t opcode,
+ u_int32_t arg1, u_int32_t arg2, u_int32_t arg3);
+static int gdt_wait(struct gdt_softc *gdt, struct gdt_ccb *ccb,
+ int timeout);
+
+static struct gdt_ccb *gdt_get_ccb(struct gdt_softc *gdt);
+static u_int32_t gdt_ccb_vtop(struct gdt_softc *gdt,
+ struct gdt_ccb *gccb);
+
+static int gdt_sync_event(struct gdt_softc *gdt, int service,
+ u_int8_t index, struct gdt_ccb *gccb);
+static int gdt_async_event(struct gdt_softc *gdt, int service);
+static struct gdt_ccb *gdt_raw_cmd(struct gdt_softc *gdt,
+ union ccb *ccb, int *lock);
+static struct gdt_ccb *gdt_cache_cmd(struct gdt_softc *gdt,
+ union ccb *ccb, int *lock);
+static struct gdt_ccb *gdt_ioctl_cmd(struct gdt_softc *gdt,
+ gdt_ucmd_t *ucmd, int *lock);
+static void gdt_internal_cache_cmd(struct gdt_softc *gdt,union ccb *ccb);
+
+static void gdtmapmem(void *arg, bus_dma_segment_t *dm_segs,
+ int nseg, int error);
+static void gdtexecuteccb(void *arg, bus_dma_segment_t *dm_segs,
+ int nseg, int error);
+
+int
+iir_init(struct gdt_softc *gdt)
+{
+ u_int16_t cdev_cnt;
+ int i, id, drv_cyls, drv_hds, drv_secs;
+ struct gdt_ccb *gccb;
+
+ GDT_DPRINTF(GDT_D_DEBUG, ("iir_init()\n"));
+
+ gdt->sc_state = GDT_POLLING;
+ gdt_clear_events();
+ bzero(&gdt_stat, sizeof(gdt_statist_t));
+
+ SLIST_INIT(&gdt->sc_free_gccb);
+ SLIST_INIT(&gdt->sc_pending_gccb);
+ TAILQ_INIT(&gdt->sc_ccb_queue);
+ TAILQ_INIT(&gdt->sc_ucmd_queue);
+ TAILQ_INSERT_TAIL(&gdt_softcs, gdt, links);
+
+ /* DMA tag for mapping buffers into device visible space. */
+ if (bus_dma_tag_create(gdt->sc_parent_dmat, /*alignment*/1, /*boundary*/0,
+ /*lowaddr*/BUS_SPACE_MAXADDR,
+ /*highaddr*/BUS_SPACE_MAXADDR,
+ /*filter*/NULL, /*filterarg*/NULL,
+ /*maxsize*/MAXBSIZE, /*nsegments*/GDT_MAXSG,
+ /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
+ /*flags*/BUS_DMA_ALLOCNOW,
+ &gdt->sc_buffer_dmat) != 0) {
+ printf("iir%d: bus_dma_tag_create(...,gdt->sc_buffer_dmat) failed\n",
+ gdt->sc_hanum);
+ return (1);
+ }
+ gdt->sc_init_level++;
+
+ /* DMA tag for our ccb structures */
+ if (bus_dma_tag_create(gdt->sc_parent_dmat, /*alignment*/1, /*boundary*/0,
+ /*lowaddr*/BUS_SPACE_MAXADDR,
+ /*highaddr*/BUS_SPACE_MAXADDR,
+ /*filter*/NULL, /*filterarg*/NULL,
+ GDT_MAXCMDS * sizeof(struct gdt_ccb),
+ /*nsegments*/1,
+ /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
+ /*flags*/0, &gdt->sc_gccb_dmat) != 0) {
+ printf("iir%d: bus_dma_tag_create(...,gdt->sc_gccb_dmat) failed\n",
+ gdt->sc_hanum);
+ return (1);
+ }
+ gdt->sc_init_level++;
+
+ /* Allocation for our ccbs */
+ if (bus_dmamem_alloc(gdt->sc_gccb_dmat, (void **)&gdt->sc_gccbs,
+ BUS_DMA_NOWAIT, &gdt->sc_gccb_dmamap) != 0) {
+ printf("iir%d: bus_dmamem_alloc(...,&gdt->sc_gccbs,...) failed\n",
+ gdt->sc_hanum);
+ return (1);
+ }
+ gdt->sc_init_level++;
+
+ /* And permanently map them */
+ bus_dmamap_load(gdt->sc_gccb_dmat, gdt->sc_gccb_dmamap,
+ gdt->sc_gccbs, GDT_MAXCMDS * sizeof(struct gdt_ccb),
+ gdtmapmem, &gdt->sc_gccb_busbase, /*flags*/0);
+ gdt->sc_init_level++;
+
+ /* Clear them out. */
+ bzero(gdt->sc_gccbs, GDT_MAXCMDS * sizeof(struct gdt_ccb));
+
+ /* Initialize the ccbs */
+ for (i = GDT_MAXCMDS-1; i >= 0; i--) {
+ gdt->sc_gccbs[i].gc_cmd_index = i + 2;
+ gdt->sc_gccbs[i].gc_flags = GDT_GCF_UNUSED;
+ gdt->sc_gccbs[i].gc_map_flag = FALSE;
+ if (bus_dmamap_create(gdt->sc_buffer_dmat, /*flags*/0,
+ &gdt->sc_gccbs[i].gc_dmamap) != 0)
+ return(1);
+ gdt->sc_gccbs[i].gc_map_flag = TRUE;
+ SLIST_INSERT_HEAD(&gdt->sc_free_gccb, &gdt->sc_gccbs[i], sle);
+ }
+ gdt->sc_init_level++;
+
+ /* create the control device */
+ gdt->sc_dev = gdt_make_dev(gdt->sc_hanum);
+
+ /* allocate ccb for gdt_internal_cmd() */
+ gccb = gdt_get_ccb(gdt);
+ if (gccb == NULL) {
+ printf("iir%d: No free command index found\n",
+ gdt->sc_hanum);
+ return (1);
+ }
+
+ if (!gdt_internal_cmd(gdt, gccb, GDT_SCREENSERVICE, GDT_INIT,
+ 0, 0, 0)) {
+ printf("iir%d: Screen service initialization error %d\n",
+ gdt->sc_hanum, gdt->sc_status);
+ gdt_free_ccb(gdt, gccb);
+ return (1);
+ }
+
+ if (!gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_INIT,
+ GDT_LINUX_OS, 0, 0)) {
+ printf("iir%d: Cache service initialization error %d\n",
+ gdt->sc_hanum, gdt->sc_status);
+ gdt_free_ccb(gdt, gccb);
+ return (1);
+ }
+ gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_UNFREEZE_IO,
+ 0, 0, 0);
+
+ if (!gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_MOUNT,
+ 0xffff, 1, 0)) {
+ printf("iir%d: Cache service mount error %d\n",
+ gdt->sc_hanum, gdt->sc_status);
+ gdt_free_ccb(gdt, gccb);
+ return (1);
+ }
+
+ if (!gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_INIT,
+ GDT_LINUX_OS, 0, 0)) {
+ printf("iir%d: Cache service post-mount initialization error %d\n",
+ gdt->sc_hanum, gdt->sc_status);
+ gdt_free_ccb(gdt, gccb);
+ return (1);
+ }
+ cdev_cnt = (u_int16_t)gdt->sc_info;
+ gdt->sc_fw_vers = gdt->sc_service;
+
+ /* Detect number of buses */
+ gdt_enc32(gccb->gc_scratch + GDT_IOC_VERSION, GDT_IOC_NEWEST);
+ gccb->gc_scratch[GDT_IOC_LIST_ENTRIES] = GDT_MAXBUS;
+ gccb->gc_scratch[GDT_IOC_FIRST_CHAN] = 0;
+ gccb->gc_scratch[GDT_IOC_LAST_CHAN] = GDT_MAXBUS - 1;
+ gdt_enc32(gccb->gc_scratch + GDT_IOC_LIST_OFFSET, GDT_IOC_HDR_SZ);
+ if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_IOCTL,
+ GDT_IOCHAN_RAW_DESC, GDT_INVALID_CHANNEL,
+ GDT_IOC_HDR_SZ + GDT_MAXBUS * GDT_RAWIOC_SZ)) {
+ gdt->sc_bus_cnt = gccb->gc_scratch[GDT_IOC_CHAN_COUNT];
+ for (i = 0; i < gdt->sc_bus_cnt; i++) {
+ id = gccb->gc_scratch[GDT_IOC_HDR_SZ +
+ i * GDT_RAWIOC_SZ + GDT_RAWIOC_PROC_ID];
+ gdt->sc_bus_id[i] = id < GDT_MAXID_FC ? id : 0xff;
+ }
+ } else {
+ /* New method failed, use fallback. */
+ for (i = 0; i < GDT_MAXBUS; i++) {
+ gdt_enc32(gccb->gc_scratch + GDT_GETCH_CHANNEL_NO, i);
+ if (!gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_IOCTL,
+ GDT_SCSI_CHAN_CNT | GDT_L_CTRL_PATTERN,
+ GDT_IO_CHANNEL | GDT_INVALID_CHANNEL,
+ GDT_GETCH_SZ)) {
+ if (i == 0) {
+ printf("iir%d: Cannot get channel count, "
+ "error %d\n", gdt->sc_hanum, gdt->sc_status);
+ gdt_free_ccb(gdt, gccb);
+ return (1);
+ }
+ break;
+ }
+ gdt->sc_bus_id[i] =
+ (gccb->gc_scratch[GDT_GETCH_SIOP_ID] < GDT_MAXID_FC) ?
+ gccb->gc_scratch[GDT_GETCH_SIOP_ID] : 0xff;
+ }
+ gdt->sc_bus_cnt = i;
+ }
+ /* add one "virtual" channel for the host drives */
+ gdt->sc_virt_bus = gdt->sc_bus_cnt;
+ gdt->sc_bus_cnt++;
+
+ if (!gdt_internal_cmd(gdt, gccb, GDT_SCSIRAWSERVICE, GDT_INIT,
+ 0, 0, 0)) {
+ printf("iir%d: Raw service initialization error %d\n",
+ gdt->sc_hanum, gdt->sc_status);
+ gdt_free_ccb(gdt, gccb);
+ return (1);
+ }
+
+ /* Set/get features raw service (scatter/gather) */
+ gdt->sc_raw_feat = 0;
+ if (gdt_internal_cmd(gdt, gccb, GDT_SCSIRAWSERVICE, GDT_SET_FEAT,
+ GDT_SCATTER_GATHER, 0, 0)) {
+ if (gdt_internal_cmd(gdt, gccb, GDT_SCSIRAWSERVICE, GDT_GET_FEAT,
+ 0, 0, 0)) {
+ gdt->sc_raw_feat = gdt->sc_info;
+ if (!(gdt->sc_info & GDT_SCATTER_GATHER)) {
+ panic("iir%d: Scatter/Gather Raw Service "
+ "required but not supported!\n", gdt->sc_hanum);
+ gdt_free_ccb(gdt, gccb);
+ return (1);
+ }
+ }
+ }
+
+ /* Set/get features cache service (scatter/gather) */
+ gdt->sc_cache_feat = 0;
+ if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_SET_FEAT,
+ 0, GDT_SCATTER_GATHER, 0)) {
+ if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_GET_FEAT,
+ 0, 0, 0)) {
+ gdt->sc_cache_feat = gdt->sc_info;
+ if (!(gdt->sc_info & GDT_SCATTER_GATHER)) {
+ panic("iir%d: Scatter/Gather Cache Service "
+ "required but not supported!\n", gdt->sc_hanum);
+ gdt_free_ccb(gdt, gccb);
+ return (1);
+ }
+ }
+ }
+
+ /* Scan for cache devices */
+ for (i = 0; i < cdev_cnt && i < GDT_MAX_HDRIVES; i++) {
+ if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_INFO,
+ i, 0, 0)) {
+ gdt->sc_hdr[i].hd_present = 1;
+ gdt->sc_hdr[i].hd_size = gdt->sc_info;
+
+ /*
+ * Evaluate mapping (sectors per head, heads per cyl)
+ */
+ gdt->sc_hdr[i].hd_size &= ~GDT_SECS32;
+ if (gdt->sc_info2 == 0)
+ gdt_eval_mapping(gdt->sc_hdr[i].hd_size,
+ &drv_cyls, &drv_hds, &drv_secs);
+ else {
+ drv_hds = gdt->sc_info2 & 0xff;
+ drv_secs = (gdt->sc_info2 >> 8) & 0xff;
+ drv_cyls = gdt->sc_hdr[i].hd_size / drv_hds /
+ drv_secs;
+ }
+ gdt->sc_hdr[i].hd_heads = drv_hds;
+ gdt->sc_hdr[i].hd_secs = drv_secs;
+ /* Round the size */
+ gdt->sc_hdr[i].hd_size = drv_cyls * drv_hds * drv_secs;
+
+ if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE,
+ GDT_DEVTYPE, i, 0, 0))
+ gdt->sc_hdr[i].hd_devtype = gdt->sc_info;
+ }
+ }
+
+ GDT_DPRINTF(GDT_D_INIT, ("dpmem %x %d-bus %d cache device%s\n",
+ gdt->sc_dpmembase,
+ gdt->sc_bus_cnt, cdev_cnt,
+ cdev_cnt == 1 ? "" : "s"));
+ gdt_free_ccb(gdt, gccb);
+
+ gdt_cnt++;
+ return (0);
+}
+
+void
+iir_free(struct gdt_softc *gdt)
+{
+ int i;
+
+ GDT_DPRINTF(GDT_D_INIT, ("iir_free()\n"));
+
+ switch (gdt->sc_init_level) {
+ default:
+ gdt_destroy_dev(gdt->sc_dev);
+ case 5:
+ for (i = GDT_MAXCMDS-1; i >= 0; i--)
+ if (gdt->sc_gccbs[i].gc_map_flag)
+ bus_dmamap_destroy(gdt->sc_buffer_dmat,
+ gdt->sc_gccbs[i].gc_dmamap);
+ bus_dmamap_unload(gdt->sc_gccb_dmat, gdt->sc_gccb_dmamap);
+ case 4:
+ bus_dmamem_free(gdt->sc_gccb_dmat, gdt->sc_gccbs, gdt->sc_gccb_dmamap);
+ case 3:
+ bus_dma_tag_destroy(gdt->sc_gccb_dmat);
+ case 2:
+ bus_dma_tag_destroy(gdt->sc_buffer_dmat);
+ case 1:
+ bus_dma_tag_destroy(gdt->sc_parent_dmat);
+ case 0:
+ break;
+ }
+ TAILQ_REMOVE(&gdt_softcs, gdt, links);
+}
+
+void
+iir_attach(struct gdt_softc *gdt)
+{
+ struct cam_devq *devq;
+ int i;
+
+ GDT_DPRINTF(GDT_D_INIT, ("iir_attach()\n"));
+
+ /*
+ * Create the device queue for our SIM.
+ */
+ devq = cam_simq_alloc(GDT_MAXCMDS);
+ if (devq == NULL)
+ return;
+
+ for (i = 0; i < gdt->sc_bus_cnt; i++) {
+ /*
+ * Construct our SIM entry
+ */
+ gdt->sims[i] = cam_sim_alloc(iir_action, iir_poll, "iir",
+ gdt, gdt->sc_hanum, /*untagged*/2,
+ /*tagged*/GDT_MAXCMDS, devq);
+ if (xpt_bus_register(gdt->sims[i], i) != CAM_SUCCESS) {
+ cam_sim_free(gdt->sims[i], /*free_devq*/i == 0);
+ break;
+ }
+
+ if (xpt_create_path(&gdt->paths[i], /*periph*/NULL,
+ cam_sim_path(gdt->sims[i]),
+ CAM_TARGET_WILDCARD,
+ CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
+ xpt_bus_deregister(cam_sim_path(gdt->sims[i]));
+ cam_sim_free(gdt->sims[i], /*free_devq*/i == 0);
+ break;
+ }
+ }
+ if (i > 0)
+ EVENTHANDLER_REGISTER(shutdown_final, iir_shutdown,
+ gdt, SHUTDOWN_PRI_DEFAULT);
+ /* iir_watchdog(gdt); */
+ gdt->sc_state = GDT_NORMAL;
+}
+
+static void
+gdt_eval_mapping(u_int32_t size, int *cyls, int *heads, int *secs)
+{
+ *cyls = size / GDT_HEADS / GDT_SECS;
+ if (*cyls < GDT_MAXCYLS) {
+ *heads = GDT_HEADS;
+ *secs = GDT_SECS;
+ } else {
+ /* Too high for 64 * 32 */
+ *cyls = size / GDT_MEDHEADS / GDT_MEDSECS;
+ if (*cyls < GDT_MAXCYLS) {
+ *heads = GDT_MEDHEADS;
+ *secs = GDT_MEDSECS;
+ } else {
+ /* Too high for 127 * 63 */
+ *cyls = size / GDT_BIGHEADS / GDT_BIGSECS;
+ *heads = GDT_BIGHEADS;
+ *secs = GDT_BIGSECS;
+ }
+ }
+}
+
+static int
+gdt_wait(struct gdt_softc *gdt, struct gdt_ccb *gccb,
+ int timeout)
+{
+ int rv = 0;
+
+ GDT_DPRINTF(GDT_D_INIT,
+ ("gdt_wait(%p, %p, %d)\n", gdt, gccb, timeout));
+
+ gdt->sc_state |= GDT_POLL_WAIT;
+ do {
+ iir_intr(gdt);
+ if (gdt == gdt_wait_gdt &&
+ gccb->gc_cmd_index == gdt_wait_index) {
+ rv = 1;
+ break;
+ }
+ DELAY(1);
+ } while (--timeout);
+ gdt->sc_state &= ~GDT_POLL_WAIT;
+
+ while (gdt->sc_test_busy(gdt))
+ DELAY(1); /* XXX correct? */
+
+ return (rv);
+}
+
+static int
+gdt_internal_cmd(struct gdt_softc *gdt, struct gdt_ccb *gccb,
+ u_int8_t service, u_int16_t opcode,
+ u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
+{
+ int retries;
+
+ GDT_DPRINTF(GDT_D_CMD, ("gdt_internal_cmd(%p, %d, %d, %d, %d, %d)\n",
+ gdt, service, opcode, arg1, arg2, arg3));
+
+ bzero(gdt->sc_cmd, GDT_CMD_SZ);
+
+ for (retries = GDT_RETRIES; ; ) {
+ gccb->gc_service = service;
+ gccb->gc_flags = GDT_GCF_INTERNAL;
+
+ gdt->sc_set_sema0(gdt);
+ gdt_enc32(gdt->sc_cmd + GDT_CMD_COMMANDINDEX,
+ gccb->gc_cmd_index);
+ gdt_enc16(gdt->sc_cmd + GDT_CMD_OPCODE, opcode);
+
+ switch (service) {
+ case GDT_CACHESERVICE:
+ if (opcode == GDT_IOCTL) {
+ gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION +
+ GDT_IOCTL_SUBFUNC, arg1);
+ gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION +
+ GDT_IOCTL_CHANNEL, arg2);
+ gdt_enc16(gdt->sc_cmd + GDT_CMD_UNION +
+ GDT_IOCTL_PARAM_SIZE, (u_int16_t)arg3);
+ gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_IOCTL_P_PARAM,
+ gdt_ccb_vtop(gdt, gccb) +
+ offsetof(struct gdt_ccb, gc_scratch[0]));
+ } else {
+ gdt_enc16(gdt->sc_cmd + GDT_CMD_UNION +
+ GDT_CACHE_DEVICENO, (u_int16_t)arg1);
+ gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION +
+ GDT_CACHE_BLOCKNO, arg2);
+ }
+ break;
+
+ case GDT_SCSIRAWSERVICE:
+ gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION +
+ GDT_RAW_DIRECTION, arg1);
+ gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_BUS] =
+ (u_int8_t)arg2;
+ gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_TARGET] =
+ (u_int8_t)arg3;
+ gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_LUN] =
+ (u_int8_t)(arg3 >> 8);
+ }
+
+ gdt->sc_cmd_len = GDT_CMD_SZ;
+ gdt->sc_cmd_off = 0;
+ gdt->sc_cmd_cnt = 0;
+ gdt->sc_copy_cmd(gdt, gccb);
+ gdt->sc_release_event(gdt);
+ DELAY(20);
+ if (!gdt_wait(gdt, gccb, GDT_POLL_TIMEOUT))
+ return (0);
+ if (gdt->sc_status != GDT_S_BSY || --retries == 0)
+ break;
+ DELAY(1);
+ }
+ return (gdt->sc_status == GDT_S_OK);
+}
+
+static struct gdt_ccb *
+gdt_get_ccb(struct gdt_softc *gdt)
+{
+ struct gdt_ccb *gccb;
+ int lock;
+
+ GDT_DPRINTF(GDT_D_QUEUE, ("gdt_get_ccb(%p)\n", gdt));
+
+ lock = splcam();
+ gccb = SLIST_FIRST(&gdt->sc_free_gccb);
+ if (gccb != NULL) {
+ SLIST_REMOVE_HEAD(&gdt->sc_free_gccb, sle);
+ SLIST_INSERT_HEAD(&gdt->sc_pending_gccb, gccb, sle);
+ ++gdt_stat.cmd_index_act;
+ if (gdt_stat.cmd_index_act > gdt_stat.cmd_index_max)
+ gdt_stat.cmd_index_max = gdt_stat.cmd_index_act;
+ }
+ splx(lock);
+ return (gccb);
+}
+
+void
+gdt_free_ccb(struct gdt_softc *gdt, struct gdt_ccb *gccb)
+{
+ int lock;
+
+ GDT_DPRINTF(GDT_D_QUEUE, ("gdt_free_ccb(%p, %p)\n", gdt, gccb));
+
+ lock = splcam();
+ gccb->gc_flags = GDT_GCF_UNUSED;
+ SLIST_REMOVE(&gdt->sc_pending_gccb, gccb, gdt_ccb, sle);
+ SLIST_INSERT_HEAD(&gdt->sc_free_gccb, gccb, sle);
+ --gdt_stat.cmd_index_act;
+ splx(lock);
+ if (gdt->sc_state & GDT_SHUTDOWN)
+ wakeup(gccb);
+}
+
+static u_int32_t
+gdt_ccb_vtop(struct gdt_softc *gdt, struct gdt_ccb *gccb)
+{
+ return (gdt->sc_gccb_busbase
+ + (u_int32_t)((caddr_t)gccb - (caddr_t)gdt->sc_gccbs));
+}
+
+void
+gdt_next(struct gdt_softc *gdt)
+{
+ int lock;
+ union ccb *ccb;
+ gdt_ucmd_t *ucmd;
+ struct cam_sim *sim;
+ int bus, target, lun;
+ int next_cmd;
+
+ struct ccb_scsiio *csio;
+ struct ccb_hdr *ccbh;
+ struct gdt_ccb *gccb = NULL;
+ u_int8_t cmd;
+
+ GDT_DPRINTF(GDT_D_QUEUE, ("gdt_next(%p)\n", gdt));
+
+ lock = splcam();
+ if (gdt->sc_test_busy(gdt)) {
+ if (!(gdt->sc_state & GDT_POLLING)) {
+ splx(lock);
+ return;
+ }
+ while (gdt->sc_test_busy(gdt))
+ DELAY(1);
+ }
+
+ gdt->sc_cmd_cnt = gdt->sc_cmd_off = 0;
+ next_cmd = TRUE;
+ for (;;) {
+ /* I/Os in queue? controller ready? */
+ if (!TAILQ_FIRST(&gdt->sc_ucmd_queue) &&
+ !TAILQ_FIRST(&gdt->sc_ccb_queue))
+ break;
+
+ /* 1.: I/Os without ccb (IOCTLs) */
+ ucmd = TAILQ_FIRST(&gdt->sc_ucmd_queue);
+ if (ucmd != NULL) {
+ TAILQ_REMOVE(&gdt->sc_ucmd_queue, ucmd, links);
+ if ((gccb = gdt_ioctl_cmd(gdt, ucmd, &lock)) == NULL) {
+ TAILQ_INSERT_HEAD(&gdt->sc_ucmd_queue, ucmd, links);
+ break;
+ }
+ break;
+ /* wenn mehrere Kdos. zulassen: if (!gdt_polling) continue; */
+ }
+
+ /* 2.: I/Os with ccb */
+ ccb = (union ccb *)TAILQ_FIRST(&gdt->sc_ccb_queue);
+ /* ist dann immer != NULL, da oben getestet */
+ sim = (struct cam_sim *)ccb->ccb_h.ccb_sim_ptr;
+ bus = cam_sim_bus(sim);
+ target = ccb->ccb_h.target_id;
+ lun = ccb->ccb_h.target_lun;
+
+ TAILQ_REMOVE(&gdt->sc_ccb_queue, &ccb->ccb_h, sim_links.tqe);
+ --gdt_stat.req_queue_act;
+ /* ccb->ccb_h.func_code is XPT_SCSI_IO */
+ GDT_DPRINTF(GDT_D_QUEUE, ("XPT_SCSI_IO flags 0x%x)\n",
+ ccb->ccb_h.flags));
+ csio = &ccb->csio;
+ ccbh = &ccb->ccb_h;
+ cmd = csio->cdb_io.cdb_bytes[0];
+ /* Max CDB length is 12 bytes */
+ if (csio->cdb_len > 12) {
+ ccbh->status = CAM_REQ_INVALID;
+ --gdt_stat.io_count_act;
+ xpt_done(ccb);
+ } else if (bus != gdt->sc_virt_bus) {
+ /* raw service command */
+ if ((gccb = gdt_raw_cmd(gdt, ccb, &lock)) == NULL) {
+ TAILQ_INSERT_HEAD(&gdt->sc_ccb_queue, &ccb->ccb_h,
+ sim_links.tqe);
+ ++gdt_stat.req_queue_act;
+ if (gdt_stat.req_queue_act > gdt_stat.req_queue_max)
+ gdt_stat.req_queue_max = gdt_stat.req_queue_act;
+ next_cmd = FALSE;
+ }
+ } else if (target >= GDT_MAX_HDRIVES ||
+ !gdt->sc_hdr[target].hd_present || lun != 0) {
+ ccbh->status = CAM_SEL_TIMEOUT;
+ --gdt_stat.io_count_act;
+ xpt_done(ccb);
+ } else {
+ /* cache service command */
+ if (cmd == READ_6 || cmd == WRITE_6 ||
+ cmd == READ_10 || cmd == WRITE_10) {
+ if ((gccb = gdt_cache_cmd(gdt, ccb, &lock)) == NULL) {
+ TAILQ_INSERT_HEAD(&gdt->sc_ccb_queue, &ccb->ccb_h,
+ sim_links.tqe);
+ ++gdt_stat.req_queue_act;
+ if (gdt_stat.req_queue_act > gdt_stat.req_queue_max)
+ gdt_stat.req_queue_max = gdt_stat.req_queue_act;
+ next_cmd = FALSE;
+ }
+ } else {
+ splx(lock);
+ gdt_internal_cache_cmd(gdt, ccb);
+ lock = splcam();
+ }
+ }
+ if ((gdt->sc_state & GDT_POLLING) || !next_cmd)
+ break;
+ }
+ if (gdt->sc_cmd_cnt > 0)
+ gdt->sc_release_event(gdt);
+
+ splx(lock);
+
+ if ((gdt->sc_state & GDT_POLLING) && gdt->sc_cmd_cnt > 0) {
+ gdt_wait(gdt, gccb, GDT_POLL_TIMEOUT);
+ }
+}
+
+static struct gdt_ccb *
+gdt_raw_cmd(struct gdt_softc *gdt, union ccb *ccb, int *lock)
+{
+ struct gdt_ccb *gccb;
+ struct cam_sim *sim;
+
+ GDT_DPRINTF(GDT_D_CMD, ("gdt_raw_cmd(%p, %p)\n", gdt, ccb));
+
+ if (roundup(GDT_CMD_UNION + GDT_RAW_SZ, sizeof(u_int32_t)) +
+ gdt->sc_cmd_off + GDT_DPMEM_COMMAND_OFFSET >
+ gdt->sc_ic_all_size) {
+ GDT_DPRINTF(GDT_D_INVALID, ("iir%d: gdt_raw_cmd(): DPMEM overflow\n",
+ gdt->sc_hanum));
+ return (NULL);
+ }
+
+ bzero(gdt->sc_cmd, GDT_CMD_SZ);
+
+ gccb = gdt_get_ccb(gdt);
+ if (gccb == NULL) {
+ GDT_DPRINTF(GDT_D_INVALID, ("iir%d: No free command index found\n",
+ gdt->sc_hanum));
+ return (gccb);
+ }
+ sim = (struct cam_sim *)ccb->ccb_h.ccb_sim_ptr;
+ gccb->gc_ccb = ccb;
+ gccb->gc_service = GDT_SCSIRAWSERVICE;
+ gccb->gc_flags = GDT_GCF_SCSI;
+
+ if (gdt->sc_cmd_cnt == 0)
+ gdt->sc_set_sema0(gdt);
+ splx(*lock);
+ gdt_enc32(gdt->sc_cmd + GDT_CMD_COMMANDINDEX,
+ gccb->gc_cmd_index);
+ gdt_enc16(gdt->sc_cmd + GDT_CMD_OPCODE, GDT_WRITE);
+
+ gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_DIRECTION,
+ (ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN ?
+ GDT_DATA_IN : GDT_DATA_OUT);
+ gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SDLEN,
+ ccb->csio.dxfer_len);
+ gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_CLEN,
+ ccb->csio.cdb_len);
+ bcopy(ccb->csio.cdb_io.cdb_bytes, gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_CMD,
+ ccb->csio.cdb_len);
+ gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_TARGET] =
+ ccb->ccb_h.target_id;
+ gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_LUN] =
+ ccb->ccb_h.target_lun;
+ gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_BUS] =
+ cam_sim_bus(sim);
+ gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SENSE_LEN,
+ sizeof(struct scsi_sense_data));
+ gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SENSE_DATA,
+ gdt_ccb_vtop(gdt, gccb) +
+ offsetof(struct gdt_ccb, gc_scratch[0]));
+
+ /*
+ * If we have any data to send with this command,
+ * map it into bus space.
+ */
+ /* Only use S/G if there is a transfer */
+ if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
+ if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
+ if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
+ int s;
+ int error;
+
+ /* vorher unlock von splcam() ??? */
+ s = splsoftvm();
+ error =
+ bus_dmamap_load(gdt->sc_buffer_dmat,
+ gccb->gc_dmamap,
+ ccb->csio.data_ptr,
+ ccb->csio.dxfer_len,
+ gdtexecuteccb,
+ gccb, /*flags*/0);
+ if (error == EINPROGRESS) {
+ xpt_freeze_simq(sim, 1);
+ gccb->gc_state |= CAM_RELEASE_SIMQ;
+ }
+ splx(s);
+ } else {
+ struct bus_dma_segment seg;
+
+ /* Pointer to physical buffer */
+ seg.ds_addr =
+ (bus_addr_t)ccb->csio.data_ptr;
+ seg.ds_len = ccb->csio.dxfer_len;
+ gdtexecuteccb(gccb, &seg, 1, 0);
+ }
+ } else {
+ struct bus_dma_segment *segs;
+
+ if ((ccb->ccb_h.flags & CAM_DATA_PHYS) != 0)
+ panic("iir%d: iir_action - Physical "
+ "segment pointers unsupported", gdt->sc_hanum);
+
+ if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS)==0)
+ panic("iir%d: iir_action - Virtual "
+ "segment addresses unsupported", gdt->sc_hanum);
+
+ /* Just use the segments provided */
+ segs = (struct bus_dma_segment *)ccb->csio.data_ptr;
+ gdtexecuteccb(gccb, segs, ccb->csio.sglist_cnt, 0);
+ }
+ } else {
+ gdtexecuteccb(gccb, NULL, 0, 0);
+ }
+
+ *lock = splcam();
+ return (gccb);
+}
+
+static struct gdt_ccb *
+gdt_cache_cmd(struct gdt_softc *gdt, union ccb *ccb, int *lock)
+{
+ struct gdt_ccb *gccb;
+ struct cam_sim *sim;
+ u_int8_t *cmdp;
+ u_int16_t opcode;
+ u_int32_t blockno, blockcnt;
+
+ GDT_DPRINTF(GDT_D_CMD, ("gdt_cache_cmd(%p, %p)\n", gdt, ccb));
+
+ if (roundup(GDT_CMD_UNION + GDT_CACHE_SZ, sizeof(u_int32_t)) +
+ gdt->sc_cmd_off + GDT_DPMEM_COMMAND_OFFSET >
+ gdt->sc_ic_all_size) {
+ GDT_DPRINTF(GDT_D_INVALID, ("iir%d: gdt_cache_cmd(): DPMEM overflow\n",
+ gdt->sc_hanum));
+ return (NULL);
+ }
+
+ bzero(gdt->sc_cmd, GDT_CMD_SZ);
+
+ gccb = gdt_get_ccb(gdt);
+ if (gccb == NULL) {
+ GDT_DPRINTF(GDT_D_DEBUG, ("iir%d: No free command index found\n",
+ gdt->sc_hanum));
+ return (gccb);
+ }
+ sim = (struct cam_sim *)ccb->ccb_h.ccb_sim_ptr;
+ gccb->gc_ccb = ccb;
+ gccb->gc_service = GDT_CACHESERVICE;
+ gccb->gc_flags = GDT_GCF_SCSI;
+
+ if (gdt->sc_cmd_cnt == 0)
+ gdt->sc_set_sema0(gdt);
+ splx(*lock);
+ gdt_enc32(gdt->sc_cmd + GDT_CMD_COMMANDINDEX,
+ gccb->gc_cmd_index);
+ cmdp = ccb->csio.cdb_io.cdb_bytes;
+ opcode = (*cmdp == WRITE_6 || *cmdp == WRITE_10) ? GDT_WRITE : GDT_READ;
+ if ((gdt->sc_state & GDT_SHUTDOWN) && opcode == GDT_WRITE)
+ opcode = GDT_WRITE_THR;
+ gdt_enc16(gdt->sc_cmd + GDT_CMD_OPCODE, opcode);
+
+ gdt_enc16(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_DEVICENO,
+ ccb->ccb_h.target_id);
+ if (ccb->csio.cdb_len == 6) {
+ struct scsi_rw_6 *rw = (struct scsi_rw_6 *)cmdp;
+ blockno = scsi_3btoul(rw->addr) & ((SRW_TOPADDR<<16) | 0xffff);
+ blockcnt = rw->length ? rw->length : 0x100;
+ } else {
+ struct scsi_rw_10 *rw = (struct scsi_rw_10 *)cmdp;
+ blockno = scsi_4btoul(rw->addr);
+ blockcnt = scsi_2btoul(rw->length);
+ }
+ gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKNO,
+ blockno);
+ gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKCNT,
+ blockcnt);
+
+ /*
+ * If we have any data to send with this command,
+ * map it into bus space.
+ */
+ /* Only use S/G if there is a transfer */
+ if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
+ if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
+ int s;
+ int error;
+
+ /* vorher unlock von splcam() ??? */
+ s = splsoftvm();
+ error =
+ bus_dmamap_load(gdt->sc_buffer_dmat,
+ gccb->gc_dmamap,
+ ccb->csio.data_ptr,
+ ccb->csio.dxfer_len,
+ gdtexecuteccb,
+ gccb, /*flags*/0);
+ if (error == EINPROGRESS) {
+ xpt_freeze_simq(sim, 1);
+ gccb->gc_state |= CAM_RELEASE_SIMQ;
+ }
+ splx(s);
+ } else {
+ struct bus_dma_segment seg;
+
+ /* Pointer to physical buffer */
+ seg.ds_addr =
+ (bus_addr_t)ccb->csio.data_ptr;
+ seg.ds_len = ccb->csio.dxfer_len;
+ gdtexecuteccb(gccb, &seg, 1, 0);
+ }
+ } else {
+ struct bus_dma_segment *segs;
+
+ if ((ccb->ccb_h.flags & CAM_DATA_PHYS) != 0)
+ panic("iir%d: iir_action - Physical "
+ "segment pointers unsupported", gdt->sc_hanum);
+
+ if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS)==0)
+ panic("iir%d: iir_action - Virtual "
+ "segment addresses unsupported", gdt->sc_hanum);
+
+ /* Just use the segments provided */
+ segs = (struct bus_dma_segment *)ccb->csio.data_ptr;
+ gdtexecuteccb(gccb, segs, ccb->csio.sglist_cnt, 0);
+ }
+
+ *lock = splcam();
+ return (gccb);
+}
+
+static struct gdt_ccb *
+gdt_ioctl_cmd(struct gdt_softc *gdt, gdt_ucmd_t *ucmd, int *lock)
+{
+ struct gdt_ccb *gccb;
+ u_int32_t cnt;
+
+ GDT_DPRINTF(GDT_D_DEBUG, ("gdt_ioctl_cmd(%p, %p)\n", gdt, ucmd));
+
+ bzero(gdt->sc_cmd, GDT_CMD_SZ);
+
+ gccb = gdt_get_ccb(gdt);
+ if (gccb == NULL) {
+ GDT_DPRINTF(GDT_D_DEBUG, ("iir%d: No free command index found\n",
+ gdt->sc_hanum));
+ return (gccb);
+ }
+ gccb->gc_ucmd = ucmd;
+ gccb->gc_service = ucmd->service;
+ gccb->gc_flags = GDT_GCF_IOCTL;
+
+ /* check DPMEM space, copy data buffer from user space */
+ if (ucmd->service == GDT_CACHESERVICE) {
+ if (ucmd->OpCode == GDT_IOCTL) {
+ gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_IOCTL_SZ,
+ sizeof(u_int32_t));
+ cnt = ucmd->u.ioctl.param_size;
+ if (cnt > GDT_SCRATCH_SZ) {
+ printf("iir%d: Scratch buffer too small (%d/%d)\n",
+ gdt->sc_hanum, GDT_SCRATCH_SZ, cnt);
+ gdt_free_ccb(gdt, gccb);
+ return (NULL);
+ }
+ } else {
+ gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_CACHE_SG_LST +
+ GDT_SG_SZ, sizeof(u_int32_t));
+ cnt = ucmd->u.cache.BlockCnt * GDT_SECTOR_SIZE;
+ if (cnt > GDT_SCRATCH_SZ) {
+ printf("iir%d: Scratch buffer too small (%d/%d)\n",
+ gdt->sc_hanum, GDT_SCRATCH_SZ, cnt);
+ gdt_free_ccb(gdt, gccb);
+ return (NULL);
+ }
+ }
+ } else {
+ gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_RAW_SG_LST +
+ GDT_SG_SZ, sizeof(u_int32_t));
+ cnt = ucmd->u.raw.sdlen;
+ if (cnt + ucmd->u.raw.sense_len > GDT_SCRATCH_SZ) {
+ printf("iir%d: Scratch buffer too small (%d/%d)\n",
+ gdt->sc_hanum, GDT_SCRATCH_SZ, cnt + ucmd->u.raw.sense_len);
+ gdt_free_ccb(gdt, gccb);
+ return (NULL);
+ }
+ }
+ if (cnt != 0)
+ bcopy(ucmd->data, gccb->gc_scratch, cnt);
+
+ if (gdt->sc_cmd_off + gdt->sc_cmd_len + GDT_DPMEM_COMMAND_OFFSET >
+ gdt->sc_ic_all_size) {
+ GDT_DPRINTF(GDT_D_INVALID, ("iir%d: gdt_ioctl_cmd(): DPMEM overflow\n",
+ gdt->sc_hanum));
+ gdt_free_ccb(gdt, gccb);
+ return (NULL);
+ }
+
+ if (gdt->sc_cmd_cnt == 0)
+ gdt->sc_set_sema0(gdt);
+ splx(*lock);
+
+ /* fill cmd structure */
+ gdt_enc32(gdt->sc_cmd + GDT_CMD_COMMANDINDEX,
+ gccb->gc_cmd_index);
+ gdt_enc16(gdt->sc_cmd + GDT_CMD_OPCODE,
+ ucmd->OpCode);
+
+ if (ucmd->service == GDT_CACHESERVICE) {
+ if (ucmd->OpCode == GDT_IOCTL) {
+ /* IOCTL */
+ gdt_enc16(gdt->sc_cmd + GDT_CMD_UNION + GDT_IOCTL_PARAM_SIZE,
+ ucmd->u.ioctl.param_size);
+ gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_IOCTL_SUBFUNC,
+ ucmd->u.ioctl.subfunc);
+ gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_IOCTL_CHANNEL,
+ ucmd->u.ioctl.channel);
+ gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_IOCTL_P_PARAM,
+ gdt_ccb_vtop(gdt, gccb) +
+ offsetof(struct gdt_ccb, gc_scratch[0]));
+ } else {
+ /* cache service command */
+ gdt_enc16(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_DEVICENO,
+ ucmd->u.cache.DeviceNo);
+ gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKNO,
+ ucmd->u.cache.BlockNo);
+ gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKCNT,
+ ucmd->u.cache.BlockCnt);
+ gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_DESTADDR,
+ 0xffffffffUL);
+ gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_CANZ,
+ 1);
+ gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST +
+ GDT_SG_PTR, gdt_ccb_vtop(gdt, gccb) +
+ offsetof(struct gdt_ccb, gc_scratch[0]));
+ gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST +
+ GDT_SG_LEN, ucmd->u.cache.BlockCnt * GDT_SECTOR_SIZE);
+ }
+ } else {
+ /* raw service command */
+ gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_DIRECTION,
+ ucmd->u.raw.direction);
+ gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SDATA,
+ 0xffffffffUL);
+ gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SDLEN,
+ ucmd->u.raw.sdlen);
+ gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_CLEN,
+ ucmd->u.raw.clen);
+ bcopy(ucmd->u.raw.cmd, gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_CMD,
+ 12);
+ gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_TARGET] =
+ ucmd->u.raw.target;
+ gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_LUN] =
+ ucmd->u.raw.lun;
+ gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_BUS] =
+ ucmd->u.raw.bus;
+ gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SENSE_LEN,
+ ucmd->u.raw.sense_len);
+ gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SENSE_DATA,
+ gdt_ccb_vtop(gdt, gccb) +
+ offsetof(struct gdt_ccb, gc_scratch[ucmd->u.raw.sdlen]));
+ gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SG_RANZ,
+ 1);
+ gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SG_LST +
+ GDT_SG_PTR, gdt_ccb_vtop(gdt, gccb) +
+ offsetof(struct gdt_ccb, gc_scratch[0]));
+ gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SG_LST +
+ GDT_SG_LEN, ucmd->u.raw.sdlen);
+ }
+
+ *lock = splcam();
+ gdt_stat.sg_count_act = 1;
+ gdt->sc_copy_cmd(gdt, gccb);
+ return (gccb);
+}
+
+static void
+gdt_internal_cache_cmd(struct gdt_softc *gdt,union ccb *ccb)
+{
+ int t;
+
+ t = ccb->ccb_h.target_id;
+ GDT_DPRINTF(GDT_D_CMD, ("gdt_internal_cache_cmd(%p, %p, 0x%x, %d)\n",
+ gdt, ccb, ccb->csio.cdb_io.cdb_bytes[0], t));
+
+ switch (ccb->csio.cdb_io.cdb_bytes[0]) {
+ case TEST_UNIT_READY:
+ case START_STOP:
+ break;
+ case REQUEST_SENSE:
+ GDT_DPRINTF(GDT_D_MISC, ("REQUEST_SENSE\n"));
+ break;
+ case INQUIRY:
+ {
+ struct scsi_inquiry_data *inq;
+
+ inq = (struct scsi_inquiry_data *)ccb->csio.data_ptr;
+ bzero(inq, sizeof(struct scsi_inquiry_data));
+ inq->device = (gdt->sc_hdr[t].hd_devtype & 4) ?
+ T_CDROM : T_DIRECT;
+ inq->dev_qual2 = (gdt->sc_hdr[t].hd_devtype & 1) ? 0x80 : 0;
+ inq->version = SCSI_REV_2;
+ inq->response_format = 2;
+ inq->additional_length = 32;
+ inq->flags = SID_CmdQue | SID_Sync;
+ strcpy(inq->vendor, "IIR ");
+ sprintf(inq->product, "Host Drive #%02d", t);
+ strcpy(inq->revision, " ");
+ break;
+ }
+ case MODE_SENSE_6:
+ {
+ struct mpd_data {
+ struct scsi_mode_hdr_6 hd;
+ struct scsi_mode_block_descr bd;
+ struct scsi_control_page cp;
+ } *mpd;
+ u_int8_t page;
+
+ mpd = (struct mpd_data *)ccb->csio.data_ptr;
+ bzero(mpd, sizeof(struct mpd_data));
+ mpd->hd.datalen = sizeof(struct scsi_mode_hdr_6) +
+ sizeof(struct scsi_mode_block_descr);
+ mpd->hd.dev_specific = (gdt->sc_hdr[t].hd_devtype & 2) ? 0x80 : 0;
+ mpd->hd.block_descr_len = sizeof(struct scsi_mode_block_descr);
+ mpd->bd.block_len[0] = (GDT_SECTOR_SIZE & 0x00ff0000) >> 16;
+ mpd->bd.block_len[1] = (GDT_SECTOR_SIZE & 0x0000ff00) >> 8;
+ mpd->bd.block_len[2] = (GDT_SECTOR_SIZE & 0x000000ff);
+ page=((struct scsi_mode_sense_6 *)ccb->csio.cdb_io.cdb_bytes)->page;
+ switch (page) {
+ default:
+ GDT_DPRINTF(GDT_D_MISC, ("MODE_SENSE_6: page 0x%x\n", page));
+ break;
+ }
+ break;
+ }
+ case READ_CAPACITY:
+ {
+ struct scsi_read_capacity_data *rcd;
+
+ rcd = (struct scsi_read_capacity_data *)ccb->csio.data_ptr;
+ bzero(rcd, sizeof(struct scsi_read_capacity_data));
+ scsi_ulto4b(gdt->sc_hdr[t].hd_size - 1, rcd->addr);
+ scsi_ulto4b(GDT_SECTOR_SIZE, rcd->length);
+ break;
+ }
+ default:
+ GDT_DPRINTF(GDT_D_MISC, ("gdt_internal_cache_cmd(%d) unknown\n",
+ ccb->csio.cdb_io.cdb_bytes[0]));
+ break;
+ }
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ --gdt_stat.io_count_act;
+ xpt_done(ccb);
+}
+
+static void
+gdtmapmem(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
+{
+ bus_addr_t *busaddrp;
+
+ busaddrp = (bus_addr_t *)arg;
+ *busaddrp = dm_segs->ds_addr;
+}
+
+static void
+gdtexecuteccb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
+{
+ struct gdt_ccb *gccb;
+ union ccb *ccb;
+ struct gdt_softc *gdt;
+ int i, lock;
+
+ lock = splcam();
+
+ gccb = (struct gdt_ccb *)arg;
+ ccb = gccb->gc_ccb;
+ gdt = cam_sim_softc((struct cam_sim *)ccb->ccb_h.ccb_sim_ptr);
+
+ GDT_DPRINTF(GDT_D_CMD, ("gdtexecuteccb(%p, %p, %p, %d, %d)\n",
+ gdt, gccb, dm_segs, nseg, error));
+ gdt_stat.sg_count_act = nseg;
+ if (nseg > gdt_stat.sg_count_max)
+ gdt_stat.sg_count_max = nseg;
+
+ /* Copy the segments into our SG list */
+ if (gccb->gc_service == GDT_CACHESERVICE) {
+ for (i = 0; i < nseg; ++i) {
+ gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST +
+ i * GDT_SG_SZ + GDT_SG_PTR, dm_segs->ds_addr);
+ gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST +
+ i * GDT_SG_SZ + GDT_SG_LEN, dm_segs->ds_len);
+ dm_segs++;
+ }
+ gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_CANZ,
+ nseg);
+ gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_DESTADDR,
+ 0xffffffffUL);
+
+ gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_CACHE_SG_LST +
+ nseg * GDT_SG_SZ, sizeof(u_int32_t));
+ } else {
+ for (i = 0; i < nseg; ++i) {
+ gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SG_LST +
+ i * GDT_SG_SZ + GDT_SG_PTR, dm_segs->ds_addr);
+ gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SG_LST +
+ i * GDT_SG_SZ + GDT_SG_LEN, dm_segs->ds_len);
+ dm_segs++;
+ }
+ gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SG_RANZ,
+ nseg);
+ gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SDATA,
+ 0xffffffffUL);
+
+ gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_RAW_SG_LST +
+ nseg * GDT_SG_SZ, sizeof(u_int32_t));
+ }
+
+ if (nseg != 0) {
+ bus_dmasync_op_t op;
+
+ if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
+ op = BUS_DMASYNC_PREREAD;
+ else
+ op = BUS_DMASYNC_PREWRITE;
+ bus_dmamap_sync(gdt->sc_buffer_dmat, gccb->gc_dmamap, op);
+ }
+
+ /* We must NOT abort the command here if CAM_REQ_INPROG is not set,
+ * because command semaphore is already set!
+ */
+
+ ccb->ccb_h.status |= CAM_SIM_QUEUED;
+ /* timeout handling */
+ ccb->ccb_h.timeout_ch =
+ timeout(iir_timeout, (caddr_t)gccb,
+ (ccb->ccb_h.timeout * hz) / 1000);
+
+ gdt->sc_copy_cmd(gdt, gccb);
+ splx(lock);
+}
+
+
+static void
+iir_action( struct cam_sim *sim, union ccb *ccb )
+{
+ struct gdt_softc *gdt;
+ int lock, bus, target, lun;
+
+ gdt = (struct gdt_softc *)cam_sim_softc( sim );
+ ccb->ccb_h.ccb_sim_ptr = sim;
+ bus = cam_sim_bus(sim);
+ target = ccb->ccb_h.target_id;
+ lun = ccb->ccb_h.target_lun;
+ GDT_DPRINTF(GDT_D_CMD,
+ ("iir_action(%p) func 0x%x cmd 0x%x bus %d target %d lun %d\n",
+ gdt, ccb->ccb_h.func_code, ccb->csio.cdb_io.cdb_bytes[0],
+ bus, target, lun));
+ ++gdt_stat.io_count_act;
+ if (gdt_stat.io_count_act > gdt_stat.io_count_max)
+ gdt_stat.io_count_max = gdt_stat.io_count_act;
+
+ switch (ccb->ccb_h.func_code) {
+ case XPT_SCSI_IO:
+ lock = splcam();
+ TAILQ_INSERT_TAIL(&gdt->sc_ccb_queue, &ccb->ccb_h, sim_links.tqe);
+ ++gdt_stat.req_queue_act;
+ if (gdt_stat.req_queue_act > gdt_stat.req_queue_max)
+ gdt_stat.req_queue_max = gdt_stat.req_queue_act;
+ splx(lock);
+ gdt_next(gdt);
+ break;
+ case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */
+ case XPT_ABORT: /* Abort the specified CCB */
+ /* XXX Implement */
+ ccb->ccb_h.status = CAM_REQ_INVALID;
+ --gdt_stat.io_count_act;
+ xpt_done(ccb);
+ break;
+ case XPT_SET_TRAN_SETTINGS:
+ ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
+ --gdt_stat.io_count_act;
+ xpt_done(ccb);
+ break;
+ case XPT_GET_TRAN_SETTINGS:
+ /* Get default/user set transfer settings for the target */
+ {
+ struct ccb_trans_settings *cts;
+ u_int target_mask;
+
+ cts = &ccb->cts;
+ target_mask = 0x01 << target;
+ if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) {
+ cts->flags = CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB;
+ cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
+ cts->sync_period = 25; /* 10MHz */
+ if (cts->sync_period != 0)
+ cts->sync_offset = 15;
+
+ cts->valid = CCB_TRANS_SYNC_RATE_VALID
+ | CCB_TRANS_SYNC_OFFSET_VALID
+ | CCB_TRANS_BUS_WIDTH_VALID
+ | CCB_TRANS_DISC_VALID
+ | CCB_TRANS_TQ_VALID;
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ } else {
+ ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
+ }
+ --gdt_stat.io_count_act;
+ xpt_done(ccb);
+ break;
+ }
+ case XPT_CALC_GEOMETRY:
+ {
+ struct ccb_calc_geometry *ccg;
+ u_int32_t secs_per_cylinder;
+
+ ccg = &ccb->ccg;
+ ccg->heads = gdt->sc_hdr[target].hd_heads;
+ ccg->secs_per_track = gdt->sc_hdr[target].hd_secs;
+ secs_per_cylinder = ccg->heads * ccg->secs_per_track;
+ ccg->cylinders = ccg->volume_size / secs_per_cylinder;
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ --gdt_stat.io_count_act;
+ xpt_done(ccb);
+ break;
+ }
+ case XPT_RESET_BUS: /* Reset the specified SCSI bus */
+ {
+ /* XXX Implement */
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ --gdt_stat.io_count_act;
+ xpt_done(ccb);
+ break;
+ }
+ case XPT_TERM_IO: /* Terminate the I/O process */
+ /* XXX Implement */
+ ccb->ccb_h.status = CAM_REQ_INVALID;
+ --gdt_stat.io_count_act;
+ xpt_done(ccb);
+ break;
+ case XPT_PATH_INQ: /* Path routing inquiry */
+ {
+ struct ccb_pathinq *cpi = &ccb->cpi;
+
+ cpi->version_num = 1;
+ cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE;
+ cpi->hba_inquiry |= PI_WIDE_16;
+ cpi->target_sprt = 1;
+ cpi->hba_misc = 0;
+ cpi->hba_eng_cnt = 0;
+ if (bus == gdt->sc_virt_bus)
+ cpi->max_target = GDT_MAX_HDRIVES - 1;
+ else if (gdt->sc_class & GDT_FC)
+ cpi->max_target = GDT_MAXID_FC - 1;
+ else
+ cpi->max_target = GDT_MAXID - 1;
+ cpi->max_lun = 7;
+ cpi->unit_number = cam_sim_unit(sim);
+ cpi->bus_id = bus;
+ cpi->initiator_id =
+ (bus == gdt->sc_virt_bus ? 127 : gdt->sc_bus_id[bus]);
+ cpi->base_transfer_speed = 3300;
+ strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
+ strncpy(cpi->hba_vid, "Intel Corp.", HBA_IDLEN);
+ strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
+ cpi->ccb_h.status = CAM_REQ_CMP;
+ --gdt_stat.io_count_act;
+ xpt_done(ccb);
+ break;
+ }
+ default:
+ GDT_DPRINTF(GDT_D_INVALID, ("gdt_next(%p) cmd 0x%x invalid\n",
+ gdt, ccb->ccb_h.func_code));
+ ccb->ccb_h.status = CAM_REQ_INVALID;
+ --gdt_stat.io_count_act;
+ xpt_done(ccb);
+ break;
+ }
+}
+
+static void
+iir_poll( struct cam_sim *sim )
+{
+ struct gdt_softc *gdt;
+
+ gdt = (struct gdt_softc *)cam_sim_softc( sim );
+ GDT_DPRINTF(GDT_D_CMD, ("iir_poll sim %p gdt %p\n", sim, gdt));
+ iir_intr(gdt);
+}
+
+static void
+iir_timeout(void *arg)
+{
+ GDT_DPRINTF(GDT_D_TIMEOUT, ("iir_timeout(%p)\n", arg));
+}
+
+static void
+iir_watchdog(void *arg)
+{
+ struct gdt_softc *gdt;
+
+ gdt = (struct gdt_softc *)arg;
+ GDT_DPRINTF(GDT_D_DEBUG, ("iir_watchdog(%p)\n", gdt));
+
+ {
+ int ccbs = 0, ucmds = 0, frees = 0, pends = 0;
+ struct gdt_ccb *p;
+ struct ccb_hdr *h;
+ struct gdt_ucmd *u;
+
+ for (h = TAILQ_FIRST(&gdt->sc_ccb_queue); h != NULL;
+ h = TAILQ_NEXT(h, sim_links.tqe))
+ ccbs++;
+ for (u = TAILQ_FIRST(&gdt->sc_ucmd_queue); u != NULL;
+ u = TAILQ_NEXT(u, links))
+ ucmds++;
+ for (p = SLIST_FIRST(&gdt->sc_free_gccb); p != NULL;
+ p = SLIST_NEXT(p, sle))
+ frees++;
+ for (p = SLIST_FIRST(&gdt->sc_pending_gccb); p != NULL;
+ p = SLIST_NEXT(p, sle))
+ pends++;
+
+ GDT_DPRINTF(GDT_D_TIMEOUT, ("ccbs %d ucmds %d frees %d pends %d\n",
+ ccbs, ucmds, frees, pends));
+ }
+
+ timeout(iir_watchdog, (caddr_t)gdt, hz * 15);
+}
+
+static void
+iir_shutdown( void *arg, int howto )
+{
+ struct gdt_softc *gdt;
+ struct gdt_ccb *gccb;
+ gdt_ucmd_t *ucmd;
+ int lock, i;
+
+ gdt = (struct gdt_softc *)arg;
+ GDT_DPRINTF(GDT_D_CMD, ("iir_shutdown(%p, %d)\n", gdt, howto));
+
+ printf("iir%d: Flushing all Host Drives. Please wait ... ",
+ gdt->sc_hanum);
+
+ /* allocate ucmd buffer */
+ ucmd = malloc(sizeof(gdt_ucmd_t), M_DEVBUF, M_NOWAIT);
+ if (ucmd == NULL) {
+ printf("iir%d: iir_shutdown(): Cannot allocate resource\n",
+ gdt->sc_hanum);
+ return;
+ }
+ bzero(ucmd, sizeof(gdt_ucmd_t));
+
+ /* wait for pending IOs */
+ lock = splcam();
+ gdt->sc_state = GDT_SHUTDOWN;
+ splx(lock);
+ if ((gccb = SLIST_FIRST(&gdt->sc_pending_gccb)) != NULL)
+ (void) tsleep((void *)gccb, PCATCH | PRIBIO, "iirshw", 100 * hz);
+
+ /* flush */
+ for (i = 0; i < GDT_MAX_HDRIVES; ++i) {
+ if (gdt->sc_hdr[i].hd_present) {
+ ucmd->service = GDT_CACHESERVICE;
+ ucmd->OpCode = GDT_FLUSH;
+ ucmd->u.cache.DeviceNo = i;
+ lock = splcam();
+ TAILQ_INSERT_TAIL(&gdt->sc_ucmd_queue, ucmd, links);
+ ucmd->complete_flag = FALSE;
+ splx(lock);
+ gdt_next(gdt);
+ if (!ucmd->complete_flag)
+ (void) tsleep((void *)ucmd, PCATCH|PRIBIO, "iirshw", 10*hz);
+ }
+ }
+
+ free(ucmd, M_DEVBUF);
+ printf("Done.\n");
+}
+
+void
+iir_intr(void *arg)
+{
+ struct gdt_softc *gdt = arg;
+ struct gdt_intr_ctx ctx;
+ int lock = 0;
+ struct gdt_ccb *gccb;
+ gdt_ucmd_t *ucmd;
+ u_int32_t cnt;
+
+ GDT_DPRINTF(GDT_D_INTR, ("gdt_intr(%p)\n", gdt));
+
+ /* If polling and we were not called from gdt_wait, just return */
+ if ((gdt->sc_state & GDT_POLLING) &&
+ !(gdt->sc_state & GDT_POLL_WAIT))
+ return;
+
+ if (!(gdt->sc_state & GDT_POLLING))
+ lock = splcam();
+ gdt_wait_index = 0;
+
+ ctx.istatus = gdt->sc_get_status(gdt);
+ if (!ctx.istatus) {
+ if (!(gdt->sc_state & GDT_POLLING))
+ splx(lock);
+ gdt->sc_status = GDT_S_NO_STATUS;
+ return;
+ }
+
+ gdt->sc_intr(gdt, &ctx);
+
+ gdt->sc_status = ctx.cmd_status;
+ gdt->sc_service = ctx.service;
+ gdt->sc_info = ctx.info;
+ gdt->sc_info2 = ctx.info2;
+
+ if (gdt->sc_state & GDT_POLL_WAIT) {
+ gdt_wait_gdt = gdt;
+ gdt_wait_index = ctx.istatus;
+ }
+
+ if (ctx.istatus == GDT_ASYNCINDEX) {
+ gdt_async_event(gdt, ctx.service);
+ if (!(gdt->sc_state & GDT_POLLING))
+ splx(lock);
+ return;
+ }
+ if (ctx.istatus == GDT_SPEZINDEX) {
+ GDT_DPRINTF(GDT_D_INVALID,
+ ("iir%d: Service unknown or not initialized!\n",
+ gdt->sc_hanum));
+ gdt->sc_dvr.size = sizeof(gdt->sc_dvr.eu.driver);
+ gdt->sc_dvr.eu.driver.ionode = gdt->sc_hanum;
+ gdt_store_event(GDT_ES_DRIVER, 4, &gdt->sc_dvr);
+ if (!(gdt->sc_state & GDT_POLLING))
+ splx(lock);
+ return;
+ }
+
+ gccb = &gdt->sc_gccbs[ctx.istatus - 2];
+ ctx.service = gccb->gc_service;
+
+ switch (gccb->gc_flags) {
+ case GDT_GCF_UNUSED:
+ GDT_DPRINTF(GDT_D_INVALID, ("iir%d: Index (%d) to unused command!\n",
+ gdt->sc_hanum, ctx.istatus));
+ gdt->sc_dvr.size = sizeof(gdt->sc_dvr.eu.driver);
+ gdt->sc_dvr.eu.driver.ionode = gdt->sc_hanum;
+ gdt->sc_dvr.eu.driver.index = ctx.istatus;
+ gdt_store_event(GDT_ES_DRIVER, 1, &gdt->sc_dvr);
+ gdt_free_ccb(gdt, gccb);
+ /* fallthrough */
+
+ case GDT_GCF_INTERNAL:
+ if (!(gdt->sc_state & GDT_POLLING))
+ splx(lock);
+ break;
+
+ case GDT_GCF_IOCTL:
+ ucmd = gccb->gc_ucmd;
+ if (gdt->sc_status == GDT_S_BSY) {
+ GDT_DPRINTF(GDT_D_DEBUG, ("iir_intr(%p) ioctl: gccb %p busy\n",
+ gdt, gccb));
+ TAILQ_INSERT_HEAD(&gdt->sc_ucmd_queue, ucmd, links);
+ if (!(gdt->sc_state & GDT_POLLING))
+ splx(lock);
+ } else {
+ ucmd->status = gdt->sc_status;
+ ucmd->info = gdt->sc_info;
+ ucmd->complete_flag = TRUE;
+ if (ucmd->service == GDT_CACHESERVICE) {
+ if (ucmd->OpCode == GDT_IOCTL) {
+ cnt = ucmd->u.ioctl.param_size;
+ if (cnt != 0)
+ bcopy(gccb->gc_scratch, ucmd->data, cnt);
+ } else {
+ cnt = ucmd->u.cache.BlockCnt * GDT_SECTOR_SIZE;
+ if (cnt != 0)
+ bcopy(gccb->gc_scratch, ucmd->data, cnt);
+ }
+ } else {
+ cnt = ucmd->u.raw.sdlen;
+ if (cnt != 0)
+ bcopy(gccb->gc_scratch, ucmd->data, cnt);
+ if (ucmd->u.raw.sense_len != 0)
+ bcopy(gccb->gc_scratch, ucmd->data, cnt);
+ }
+ gdt_free_ccb(gdt, gccb);
+ if (!(gdt->sc_state & GDT_POLLING))
+ splx(lock);
+ /* wakeup */
+ wakeup(ucmd);
+ }
+ gdt_next(gdt);
+ break;
+
+ default:
+ gdt_free_ccb(gdt, gccb);
+ gdt_sync_event(gdt, ctx.service, ctx.istatus, gccb);
+ if (!(gdt->sc_state & GDT_POLLING))
+ splx(lock);
+ gdt_next(gdt);
+ break;
+ }
+}
+
+int
+gdt_async_event(struct gdt_softc *gdt, int service)
+{
+ struct gdt_ccb *gccb;
+
+ GDT_DPRINTF(GDT_D_INTR, ("gdt_async_event(%p, %d)\n", gdt, service));
+
+ if (service == GDT_SCREENSERVICE) {
+ if (gdt->sc_status == GDT_MSG_REQUEST) {
+ while (gdt->sc_test_busy(gdt))
+ DELAY(1);
+ bzero(gdt->sc_cmd, GDT_CMD_SZ);
+ gccb = gdt_get_ccb(gdt);
+ if (gccb == NULL) {
+ printf("iir%d: No free command index found\n",
+ gdt->sc_hanum);
+ return (1);
+ }
+ gccb->gc_service = service;
+ gccb->gc_flags = GDT_GCF_SCREEN;
+ gdt->sc_set_sema0(gdt);
+ gdt_enc32(gdt->sc_cmd + GDT_CMD_COMMANDINDEX,
+ gccb->gc_cmd_index);
+ gdt_enc16(gdt->sc_cmd + GDT_CMD_OPCODE, GDT_READ);
+ gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_HANDLE,
+ GDT_MSG_INV_HANDLE);
+ gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_ADDR,
+ gdt_ccb_vtop(gdt, gccb) +
+ offsetof(struct gdt_ccb, gc_scratch[0]));
+ gdt->sc_cmd_off = 0;
+ gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_SCREEN_SZ,
+ sizeof(u_int32_t));
+ gdt->sc_cmd_cnt = 0;
+ gdt->sc_copy_cmd(gdt, gccb);
+ printf("iir%d: [PCI %d/%d] ",
+ gdt->sc_hanum,gdt->sc_bus,gdt->sc_slot);
+ gdt->sc_release_event(gdt);
+ }
+
+ } else {
+ if ((gdt->sc_fw_vers & 0xff) >= 0x1a) {
+ gdt->sc_dvr.size = 0;
+ gdt->sc_dvr.eu.async.ionode = gdt->sc_hanum;
+ gdt->sc_dvr.eu.async.status = gdt->sc_status;
+ /* severity and event_string already set! */
+ } else {
+ gdt->sc_dvr.size = sizeof(gdt->sc_dvr.eu.async);
+ gdt->sc_dvr.eu.async.ionode = gdt->sc_hanum;
+ gdt->sc_dvr.eu.async.service = service;
+ gdt->sc_dvr.eu.async.status = gdt->sc_status;
+ gdt->sc_dvr.eu.async.info = gdt->sc_info;
+ *(u_int32_t *)gdt->sc_dvr.eu.async.scsi_coord = gdt->sc_info2;
+ }
+ gdt_store_event(GDT_ES_ASYNC, service, &gdt->sc_dvr);
+ printf("iir%d: %s\n", gdt->sc_hanum, gdt->sc_dvr.event_string);
+ }
+
+ return (0);
+}
+
+int
+gdt_sync_event(struct gdt_softc *gdt, int service,
+ u_int8_t index, struct gdt_ccb *gccb)
+{
+ union ccb *ccb;
+ bus_dmasync_op_t op;
+
+ GDT_DPRINTF(GDT_D_INTR,
+ ("gdt_sync_event(%p, %d, %d, %p)\n", gdt,service,index,gccb));
+
+ ccb = gccb->gc_ccb;
+
+ if (service == GDT_SCREENSERVICE) {
+ u_int32_t msg_len;
+
+ msg_len = gdt_dec32(gccb->gc_scratch + GDT_SCR_MSG_LEN);
+ if (msg_len)
+ if (!(gccb->gc_scratch[GDT_SCR_MSG_ANSWER] &&
+ gccb->gc_scratch[GDT_SCR_MSG_EXT])) {
+ gccb->gc_scratch[GDT_SCR_MSG_TEXT + msg_len] = '\0';
+ printf("%s",&gccb->gc_scratch[GDT_SCR_MSG_TEXT]);
+ }
+
+ if (gccb->gc_scratch[GDT_SCR_MSG_EXT] &&
+ !gccb->gc_scratch[GDT_SCR_MSG_ANSWER]) {
+ while (gdt->sc_test_busy(gdt))
+ DELAY(1);
+ bzero(gdt->sc_cmd, GDT_CMD_SZ);
+ gccb = gdt_get_ccb(gdt);
+ if (gccb == NULL) {
+ printf("iir%d: No free command index found\n",
+ gdt->sc_hanum);
+ return (1);
+ }
+ gccb->gc_service = service;
+ gccb->gc_flags = GDT_GCF_SCREEN;
+ gdt->sc_set_sema0(gdt);
+ gdt_enc32(gdt->sc_cmd + GDT_CMD_COMMANDINDEX,
+ gccb->gc_cmd_index);
+ gdt_enc16(gdt->sc_cmd + GDT_CMD_OPCODE, GDT_READ);
+ gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_HANDLE,
+ gccb->gc_scratch[GDT_SCR_MSG_HANDLE]);
+ gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_ADDR,
+ gdt_ccb_vtop(gdt, gccb) +
+ offsetof(struct gdt_ccb, gc_scratch[0]));
+ gdt->sc_cmd_off = 0;
+ gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_SCREEN_SZ,
+ sizeof(u_int32_t));
+ gdt->sc_cmd_cnt = 0;
+ gdt->sc_copy_cmd(gdt, gccb);
+ gdt->sc_release_event(gdt);
+ return (0);
+ }
+
+ if (gccb->gc_scratch[GDT_SCR_MSG_ANSWER] &&
+ gdt_dec32(gccb->gc_scratch + GDT_SCR_MSG_ALEN)) {
+ /* default answers (getchar() not possible) */
+ if (gdt_dec32(gccb->gc_scratch + GDT_SCR_MSG_ALEN) == 1) {
+ gdt_enc32(gccb->gc_scratch + GDT_SCR_MSG_ALEN, 0);
+ gdt_enc32(gccb->gc_scratch + GDT_SCR_MSG_LEN, 1);
+ gccb->gc_scratch[GDT_SCR_MSG_TEXT] = 0;
+ } else {
+ gdt_enc32(gccb->gc_scratch + GDT_SCR_MSG_ALEN,
+ gdt_dec32(gccb->gc_scratch + GDT_SCR_MSG_ALEN) - 2);
+ gdt_enc32(gccb->gc_scratch + GDT_SCR_MSG_LEN, 2);
+ gccb->gc_scratch[GDT_SCR_MSG_TEXT] = 1;
+ gccb->gc_scratch[GDT_SCR_MSG_TEXT + 1] = 0;
+ }
+ gccb->gc_scratch[GDT_SCR_MSG_EXT] = 0;
+ gccb->gc_scratch[GDT_SCR_MSG_ANSWER] = 0;
+ while (gdt->sc_test_busy(gdt))
+ DELAY(1);
+ bzero(gdt->sc_cmd, GDT_CMD_SZ);
+ gccb = gdt_get_ccb(gdt);
+ if (gccb == NULL) {
+ printf("iir%d: No free command index found\n",
+ gdt->sc_hanum);
+ return (1);
+ }
+ gccb->gc_service = service;
+ gccb->gc_flags = GDT_GCF_SCREEN;
+ gdt->sc_set_sema0(gdt);
+ gdt_enc32(gdt->sc_cmd + GDT_CMD_COMMANDINDEX,
+ gccb->gc_cmd_index);
+ gdt_enc16(gdt->sc_cmd + GDT_CMD_OPCODE, GDT_WRITE);
+ gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_HANDLE,
+ gccb->gc_scratch[GDT_SCR_MSG_HANDLE]);
+ gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_ADDR,
+ gdt_ccb_vtop(gdt, gccb) +
+ offsetof(struct gdt_ccb, gc_scratch[0]));
+ gdt->sc_cmd_off = 0;
+ gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_SCREEN_SZ,
+ sizeof(u_int32_t));
+ gdt->sc_cmd_cnt = 0;
+ gdt->sc_copy_cmd(gdt, gccb);
+ gdt->sc_release_event(gdt);
+ return (0);
+ }
+ printf("\n");
+ return (0);
+ } else {
+ untimeout(iir_timeout, gccb, ccb->ccb_h.timeout_ch);
+ if (gdt->sc_status == GDT_S_BSY) {
+ GDT_DPRINTF(GDT_D_DEBUG, ("gdt_sync_event(%p) gccb %p busy\n",
+ gdt, gccb));
+ TAILQ_INSERT_HEAD(&gdt->sc_ccb_queue, &ccb->ccb_h, sim_links.tqe);
+ ++gdt_stat.req_queue_act;
+ if (gdt_stat.req_queue_act > gdt_stat.req_queue_max)
+ gdt_stat.req_queue_max = gdt_stat.req_queue_act;
+ return (2);
+ }
+
+ if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
+ op = BUS_DMASYNC_POSTREAD;
+ else
+ op = BUS_DMASYNC_POSTWRITE;
+ bus_dmamap_sync(gdt->sc_buffer_dmat, gccb->gc_dmamap, op);
+
+ ccb->csio.resid = 0;
+ if (gdt->sc_status == GDT_S_OK) {
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ } else {
+ /* error */
+ if (gccb->gc_service == GDT_CACHESERVICE) {
+ ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID;
+ ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
+ bzero(&ccb->csio.sense_data, ccb->csio.sense_len);
+ ccb->csio.sense_data.error_code =
+ SSD_CURRENT_ERROR | SSD_ERRCODE_VALID;
+ ccb->csio.sense_data.flags = SSD_KEY_NOT_READY;
+
+ gdt->sc_dvr.size = sizeof(gdt->sc_dvr.eu.sync);
+ gdt->sc_dvr.eu.sync.ionode = gdt->sc_hanum;
+ gdt->sc_dvr.eu.sync.service = service;
+ gdt->sc_dvr.eu.sync.status = gdt->sc_status;
+ gdt->sc_dvr.eu.sync.info = gdt->sc_info;
+ gdt->sc_dvr.eu.sync.hostdrive = ccb->ccb_h.target_id;
+ if (gdt->sc_status >= 0x8000)
+ gdt_store_event(GDT_ES_SYNC, 0, &gdt->sc_dvr);
+ else
+ gdt_store_event(GDT_ES_SYNC, service, &gdt->sc_dvr);
+ } else {
+ /* raw service */
+ if (gdt->sc_status != GDT_S_RAW_SCSI || gdt->sc_info >= 0x100) {
+ ccb->ccb_h.status = CAM_SEL_TIMEOUT;
+ } else {
+ ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR|CAM_AUTOSNS_VALID;
+ ccb->csio.scsi_status = gdt->sc_info;
+ bcopy(gccb->gc_scratch, &ccb->csio.sense_data,
+ ccb->csio.sense_len);
+ }
+ }
+ }
+ --gdt_stat.io_count_act;
+ xpt_done(ccb);
+ }
+ return (0);
+}
+
+/* Controller event handling functions */
+gdt_evt_str *gdt_store_event(u_int16_t source, u_int16_t idx,
+ gdt_evt_data *evt)
+{
+ gdt_evt_str *e;
+ struct timeval tv;
+
+ GDT_DPRINTF(GDT_D_MISC, ("gdt_store_event(%d, %d)\n", source, idx));
+ if (source == 0) /* no source -> no event */
+ return 0;
+
+ if (ebuffer[elastidx].event_source == source &&
+ ebuffer[elastidx].event_idx == idx &&
+ ((evt->size != 0 && ebuffer[elastidx].event_data.size != 0 &&
+ !memcmp((char *)&ebuffer[elastidx].event_data.eu,
+ (char *)&evt->eu, evt->size)) ||
+ (evt->size == 0 && ebuffer[elastidx].event_data.size == 0 &&
+ !strcmp((char *)&ebuffer[elastidx].event_data.event_string,
+ (char *)&evt->event_string)))) {
+ e = &ebuffer[elastidx];
+ getmicrotime(&tv);
+ e->last_stamp = tv.tv_sec;
+ ++e->same_count;
+ } else {
+ if (ebuffer[elastidx].event_source != 0) { /* entry not free ? */
+ ++elastidx;
+ if (elastidx == GDT_MAX_EVENTS)
+ elastidx = 0;
+ if (elastidx == eoldidx) { /* reached mark ? */
+ ++eoldidx;
+ if (eoldidx == GDT_MAX_EVENTS)
+ eoldidx = 0;
+ }
+ }
+ e = &ebuffer[elastidx];
+ e->event_source = source;
+ e->event_idx = idx;
+ getmicrotime(&tv);
+ e->first_stamp = e->last_stamp = tv.tv_sec;
+ e->same_count = 1;
+ e->event_data = *evt;
+ e->application = 0;
+ }
+ return e;
+}
+
+int gdt_read_event(int handle, gdt_evt_str *estr)
+{
+ gdt_evt_str *e;
+ int eindex, lock;
+
+ GDT_DPRINTF(GDT_D_MISC, ("gdt_read_event(%d)\n", handle));
+ lock = splcam();
+ if (handle == -1)
+ eindex = eoldidx;
+ else
+ eindex = handle;
+ estr->event_source = 0;
+
+ if (eindex >= GDT_MAX_EVENTS) {
+ splx(lock);
+ return eindex;
+ }
+ e = &ebuffer[eindex];
+ if (e->event_source != 0) {
+ if (eindex != elastidx) {
+ if (++eindex == GDT_MAX_EVENTS)
+ eindex = 0;
+ } else {
+ eindex = -1;
+ }
+ memcpy(estr, e, sizeof(gdt_evt_str));
+ }
+ splx(lock);
+ return eindex;
+}
+
+void gdt_readapp_event(u_int8_t application, gdt_evt_str *estr)
+{
+ gdt_evt_str *e;
+ int found = FALSE;
+ int eindex, lock;
+
+ GDT_DPRINTF(GDT_D_MISC, ("gdt_readapp_event(%d)\n", application));
+ lock = splcam();
+ eindex = eoldidx;
+ for (;;) {
+ e = &ebuffer[eindex];
+ if (e->event_source == 0)
+ break;
+ if ((e->application & application) == 0) {
+ e->application |= application;
+ found = TRUE;
+ break;
+ }
+ if (eindex == elastidx)
+ break;
+ if (++eindex == GDT_MAX_EVENTS)
+ eindex = 0;
+ }
+ if (found)
+ memcpy(estr, e, sizeof(gdt_evt_str));
+ else
+ estr->event_source = 0;
+ splx(lock);
+}
+
+void gdt_clear_events()
+{
+ GDT_DPRINTF(GDT_D_MISC, ("gdt_clear_events\n"));
+
+ eoldidx = elastidx = 0;
+ ebuffer[0].event_source = 0;
+}
diff --git a/sys/dev/iir/iir.h b/sys/dev/iir/iir.h
new file mode 100644
index 0000000..b165c84
--- /dev/null
+++ b/sys/dev/iir/iir.h
@@ -0,0 +1,712 @@
+/* $FreeBSD$ */
+/*
+ * Copyright (c) 2000-01 Intel Corporation
+ * All Rights Reserved
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+/*
+ *
+ * iir.h: Definitions/Constants used by the Intel Integrated RAID driver
+ *
+ * Written by: Achim Leubner <achim.leubner@intel.com>
+ * Fixes/Additions: Boji Tony Kannanthanam <boji.t.kannanthanam@intel.com>
+ *
+ * credits: Niklas Hallqvist; OpenBSD driver for the ICP Controllers.
+ * FreeBSD.ORG; Great O/S to work on and for.
+ */
+
+
+#ident "$Id: iir.h 1.3 2001/07/03 11:28:57 achim Exp $"
+
+#ifndef _IIR_H
+#define _IIR_H
+
+#define IIR_DRIVER_VERSION 1
+#define IIR_DRIVER_SUBVERSION 1
+
+#define IIR_CDEV_MAJOR 164
+
+#define GDT_VENDOR_ID 0x1119
+#define GDT_DEVICE_ID_MIN 0x100
+#define GDT_DEVICE_ID_MAX 0x2ff
+#define GDT_DEVICE_ID_NEWRX 0x300
+
+#define INTEL_VENDOR_ID 0x8086
+#define INTEL_DEVICE_ID_IIR 0x600
+
+#define GDT_MAXBUS 6 /* XXX Why not 5? */
+#define GDT_MAX_HDRIVES 100 /* max 100 host drives */
+#define GDT_MAXID_FC 127 /* Fibre-channel IDs */
+#define GDT_MAXID 16 /* SCSI IDs */
+#define GDT_MAXOFFSETS 128
+#define GDT_MAXSG 32 /* Max. s/g elements */
+#define GDT_PROTOCOL_VERSION 1
+#define GDT_LINUX_OS 8 /* Used for cache optimization */
+#define GDT_SCATTER_GATHER 1 /* s/g feature */
+#define GDT_SECS32 0x1f /* round capacity */
+#define GDT_LOCALBOARD 0 /* Board node always 0 */
+#define GDT_MAXCMDS 124
+#define GDT_SECTOR_SIZE 0x200 /* Always 512 bytes for cache devs */
+#define GDT_MAX_EVENTS 0x100 /* event buffer */
+
+/* DPMEM constants */
+#define GDT_MPR_MAGIC 0xc0ffee11
+#define GDT_IC_HEADER_BYTES 48
+#define GDT_IC_QUEUE_BYTES 4
+#define GDT_DPMEM_COMMAND_OFFSET \
+ (GDT_IC_HEADER_BYTES + GDT_IC_QUEUE_BYTES * GDT_MAXOFFSETS)
+
+/* geometry constants */
+#define GDT_MAXCYLS 1024
+#define GDT_HEADS 64
+#define GDT_SECS 32 /* mapping 64*32 */
+#define GDT_MEDHEADS 127
+#define GDT_MEDSECS 63 /* mapping 127*63 */
+#define GDT_BIGHEADS 255
+#define GDT_BIGSECS 63 /* mapping 255*63 */
+
+/* data direction raw service */
+#define GDT_DATA_IN 0x01000000L
+#define GDT_DATA_OUT 0x00000000L
+
+/* Cache/raw service commands */
+#define GDT_INIT 0 /* service initialization */
+#define GDT_READ 1 /* read command */
+#define GDT_WRITE 2 /* write command */
+#define GDT_INFO 3 /* information about devices */
+#define GDT_FLUSH 4 /* flush dirty cache buffers */
+#define GDT_IOCTL 5 /* ioctl command */
+#define GDT_DEVTYPE 9 /* additional information */
+#define GDT_MOUNT 10 /* mount cache device */
+#define GDT_UNMOUNT 11 /* unmount cache device */
+#define GDT_SET_FEAT 12 /* set features (scatter/gather) */
+#define GDT_GET_FEAT 13 /* get features */
+#define GDT_WRITE_THR 16 /* write through */
+#define GDT_READ_THR 17 /* read through */
+#define GDT_EXT_INFO 18 /* extended info */
+#define GDT_RESET 19 /* controller reset */
+#define GDT_FREEZE_IO 25 /* freeze all IOs */
+#define GDT_UNFREEZE_IO 26 /* unfreeze all IOs */
+
+/* Additional raw service commands */
+#define GDT_RESERVE 14 /* reserve device to raw service */
+#define GDT_RELEASE 15 /* release device */
+#define GDT_RESERVE_ALL 16 /* reserve all devices */
+#define GDT_RELEASE_ALL 17 /* release all devices */
+#define GDT_RESET_BUS 18 /* reset bus */
+#define GDT_SCAN_START 19 /* start device scan */
+#define GDT_SCAN_END 20 /* stop device scan */
+
+/* IOCTL command defines */
+#define GDT_SCSI_DR_INFO 0x00 /* SCSI drive info */
+#define GDT_SCSI_CHAN_CNT 0x05 /* SCSI channel count */
+#define GDT_SCSI_DR_LIST 0x06 /* SCSI drive list */
+#define GDT_SCSI_DEF_CNT 0x15 /* grown/primary defects */
+#define GDT_DSK_STATISTICS 0x4b /* SCSI disk statistics */
+#define GDT_IOCHAN_DESC 0x5d /* description of IO channel */
+#define GDT_IOCHAN_RAW_DESC 0x5e /* description of raw IO channel */
+
+#define GDT_L_CTRL_PATTERN 0x20000000 /* SCSI IOCTL mask */
+#define GDT_ARRAY_INFO 0x12 /* array drive info */
+#define GDT_ARRAY_DRV_LIST 0x0f /* array drive list */
+#define GDT_LA_CTRL_PATTERN 0x10000000 /* array IOCTL mask */
+#define GDT_CACHE_DRV_CNT 0x01 /* cache drive count */
+#define GDT_CACHE_DRV_LIST 0x02 /* cache drive list */
+#define GDT_CACHE_INFO 0x04 /* cache info */
+#define GDT_CACHE_CONFIG 0x05 /* cache configuration */
+#define GDT_CACHE_DRV_INFO 0x07 /* cache drive info */
+#define GDT_BOARD_FEATURES 0x15 /* controller features */
+#define GDT_BOARD_INFO 0x28 /* controller info */
+#define GDT_HOST_GET 0x10001 /* get host drive list */
+#define GDT_IO_CHANNEL 0x20000 /* default IO channel */
+#define GDT_INVALID_CHANNEL 0xffff /* invalid channel */
+
+/* IOCTLs */
+#define GDT_IOCTL_GENERAL _IOWR('J', 0, gdt_ucmd_t) /* general IOCTL */
+#define GDT_IOCTL_DRVERS _IOWR('J', 1, int) /* get driver version */
+#define GDT_IOCTL_CTRTYPE _IOR('J', 2, gdt_ctrt_t) /* get ctr. type */
+#define GDT_IOCTL_OSVERS _IOR('J', 3, gdt_osv_t) /* get OS version */
+#define GDT_IOCTL_CTRCNT _IOR('J', 5, int) /* get ctr. count */
+#define GDT_IOCTL_EVENT _IOWR('J', 8, gdt_event_t) /* get event */
+#define GDT_IOCTL_STATIST _IOR('J', 9, gdt_statist_t) /* get statistics */
+
+/* Service errors */
+#define GDT_S_OK 1 /* no error */
+#define GDT_S_BSY 7 /* controller busy */
+#define GDT_S_RAW_SCSI 12 /* raw service: target error */
+#define GDT_S_RAW_ILL 0xff /* raw service: illegal */
+#define GDT_S_NO_STATUS 0x1000 /* got no status (driver-generated) */
+
+/* Controller services */
+#define GDT_SCSIRAWSERVICE 3
+#define GDT_CACHESERVICE 9
+#define GDT_SCREENSERVICE 11
+
+/* Scatter/gather element */
+#define GDT_SG_PTR 0x00 /* u_int32_t, address */
+#define GDT_SG_LEN 0x04 /* u_int32_t, length */
+#define GDT_SG_SZ 0x08
+
+/* Cache service command */
+#define GDT_CACHE_DEVICENO 0x00 /* u_int16_t, number of cache drive */
+#define GDT_CACHE_BLOCKNO 0x02 /* u_int32_t, block number */
+#define GDT_CACHE_BLOCKCNT 0x06 /* u_int32_t, block count */
+#define GDT_CACHE_DESTADDR 0x0a /* u_int32_t, dest. addr. (-1: s/g) */
+#define GDT_CACHE_SG_CANZ 0x0e /* u_int32_t, s/g element count */
+#define GDT_CACHE_SG_LST 0x12 /* [GDT_MAXSG], s/g list */
+#define GDT_CACHE_SZ (0x12 + GDT_MAXSG * GDT_SG_SZ)
+
+/* Ioctl command */
+#define GDT_IOCTL_PARAM_SIZE 0x00 /* u_int16_t, size of buffer */
+#define GDT_IOCTL_SUBFUNC 0x02 /* u_int32_t, ioctl function */
+#define GDT_IOCTL_CHANNEL 0x06 /* u_int32_t, device */
+#define GDT_IOCTL_P_PARAM 0x0a /* u_int32_t, buffer */
+#define GDT_IOCTL_SZ 0x0e
+
+/* Screen service defines */
+#define GDT_MSG_INV_HANDLE -1 /* special message handle */
+#define GDT_MSGLEN 16 /* size of message text */
+#define GDT_MSG_SIZE 34 /* size of message structure */
+#define GDT_MSG_REQUEST 0 /* async. event. message */
+
+/* Screen service command */
+#define GDT_SCREEN_MSG_HANDLE 0x02 /* u_int32_t, message handle */
+#define GDT_SCREEN_MSG_ADDR 0x06 /* u_int32_t, message buffer address */
+#define GDT_SCREEN_SZ 0x0a
+
+/* Screen service message */
+#define GDT_SCR_MSG_HANDLE 0x00 /* u_int32_t, message handle */
+#define GDT_SCR_MSG_LEN 0x04 /* u_int32_t, size of message */
+#define GDT_SCR_MSG_ALEN 0x08 /* u_int32_t, answer length */
+#define GDT_SCR_MSG_ANSWER 0x0c /* u_int8_t, answer flag */
+#define GDT_SCR_MSG_EXT 0x0d /* u_int8_t, more messages? */
+#define GDT_SCR_MSG_RES 0x0e /* u_int16_t, reserved */
+#define GDT_SCR_MSG_TEXT 0x10 /* GDT_MSGLEN+2, message text */
+#define GDT_SCR_MSG_SZ (0x12 + GDT_MSGLEN)
+
+/* Raw service command */
+#define GDT_RAW_DIRECTION 0x02 /* u_int32_t, data direction */
+#define GDT_RAW_MDISC_TIME 0x06 /* u_int32_t, disc. time (0: none) */
+#define GDT_RAW_MCON_TIME 0x0a /* u_int32_t, conn. time (0: none) */
+#define GDT_RAW_SDATA 0x0e /* u_int32_t, dest. addr. (-1: s/g) */
+#define GDT_RAW_SDLEN 0x12 /* u_int32_t, data length */
+#define GDT_RAW_CLEN 0x16 /* u_int32_t, SCSI cmd len (6/10/12) */
+#define GDT_RAW_CMD 0x1a /* u_int8_t [12], SCSI command */
+#define GDT_RAW_TARGET 0x26 /* u_int8_t, target ID */
+#define GDT_RAW_LUN 0x27 /* u_int8_t, LUN */
+#define GDT_RAW_BUS 0x28 /* u_int8_t, SCSI bus number */
+#define GDT_RAW_PRIORITY 0x29 /* u_int8_t, only 0 used */
+#define GDT_RAW_SENSE_LEN 0x2a /* u_int32_t, sense data length */
+#define GDT_RAW_SENSE_DATA 0x2e /* u_int32_t, sense data address */
+#define GDT_RAW_SG_RANZ 0x36 /* u_int32_t, s/g element count */
+#define GDT_RAW_SG_LST 0x3a /* [GDT_MAXSG], s/g list */
+#define GDT_RAW_SZ (0x3a + GDT_MAXSG * GDT_SG_SZ)
+
+/* Command structure */
+#define GDT_CMD_BOARDNODE 0x00 /* u_int32_t, board node (always 0) */
+#define GDT_CMD_COMMANDINDEX 0x04 /* u_int32_t, command number */
+#define GDT_CMD_OPCODE 0x08 /* u_int16_t, opcode (READ, ...) */
+#define GDT_CMD_UNION 0x0a /* cache/screen/raw service command */
+#define GDT_CMD_UNION_SZ GDT_RAW_SZ
+#define GDT_CMD_SZ (0x0a + GDT_CMD_UNION_SZ)
+
+/* Command queue entries */
+#define GDT_OFFSET 0x00 /* u_int16_t, command offset in the DP RAM */
+#define GDT_SERV_ID 0x02 /* u_int16_t, service */
+#define GDT_COMM_Q_SZ 0x04
+
+/* Interface area */
+#define GDT_S_CMD_INDX 0x00 /* u_int8_t, special command */
+#define GDT_S_STATUS 0x01 /* volatile u_int8_t, status special command */
+#define GDT_S_INFO 0x04 /* u_int32_t [4], add. info special command */
+#define GDT_SEMA0 0x14 /* volatile u_int8_t, command semaphore */
+#define GDT_CMD_INDEX 0x18 /* u_int8_t, command number */
+#define GDT_STATUS 0x1c /* volatile u_int16_t, command status */
+#define GDT_SERVICE 0x1e /* u_int16_t, service (for asynch. events) */
+#define GDT_DPR_INFO 0x20 /* u_int32_t [2], additional info */
+#define GDT_COMM_QUEUE 0x28 /* command queue */
+#define GDT_DPR_CMD (0x30 + GDT_MAXOFFSETS * GDT_COMM_Q_SZ)
+ /* u_int8_t [], commands */
+
+/* I/O channel header */
+#define GDT_IOC_VERSION 0x00 /* u_int32_t, version (~0: newest) */
+#define GDT_IOC_LIST_ENTRIES 0x04 /* u_int8_t, list entry count */
+#define GDT_IOC_FIRST_CHAN 0x05 /* u_int8_t, first channel number */
+#define GDT_IOC_LAST_CHAN 0x06 /* u_int8_t, last channel number */
+#define GDT_IOC_CHAN_COUNT 0x07 /* u_int8_t, (R) channel count */
+#define GDT_IOC_LIST_OFFSET 0x08 /* u_int32_t, offset of list[0] */
+#define GDT_IOC_HDR_SZ 0x0c
+
+#define GDT_IOC_NEWEST 0xffffffff /* goes into GDT_IOC_VERSION */
+
+/* Get I/O channel description */
+#define GDT_IOC_ADDRESS 0x00 /* u_int32_t, channel address */
+#define GDT_IOC_TYPE 0x04 /* u_int8_t, type (SCSI/FCSL) */
+#define GDT_IOC_LOCAL_NO 0x05 /* u_int8_t, local number */
+#define GDT_IOC_FEATURES 0x06 /* u_int16_t, channel features */
+#define GDT_IOC_SZ 0x08
+
+/* Get raw I/O channel description */
+#define GDT_RAWIOC_PROC_ID 0x00 /* u_int8_t, processor id */
+#define GDT_RAWIOC_PROC_DEFECT 0x01 /* u_int8_t, defect? */
+#define GDT_RAWIOC_SZ 0x04
+
+/* Get SCSI channel count */
+#define GDT_GETCH_CHANNEL_NO 0x00 /* u_int32_t, channel number */
+#define GDT_GETCH_DRIVE_CNT 0x04 /* u_int32_t, drive count */
+#define GDT_GETCH_SIOP_ID 0x08 /* u_int8_t, SCSI processor ID */
+#define GDT_GETCH_SIOP_STATE 0x09 /* u_int8_t, SCSI processor state */
+#define GDT_GETCH_SZ 0x0a
+
+/* Cache info/config IOCTL structures */
+#define GDT_CPAR_VERSION 0x00 /* u_int32_t, firmware version */
+#define GDT_CPAR_STATE 0x04 /* u_int16_t, cache state (on/off) */
+#define GDT_CPAR_STRATEGY 0x06 /* u_int16_t, cache strategy */
+#define GDT_CPAR_WRITE_BACK 0x08 /* u_int16_t, write back (on/off) */
+#define GDT_CPAR_BLOCK_SIZE 0x0a /* u_int16_t, cache block size */
+#define GDT_CPAR_SZ 0x0c
+
+#define GDT_CSTAT_CSIZE 0x00 /* u_int32_t, cache size */
+#define GDT_CSTAT_READ_CNT 0x04 /* u_int32_t, read counter */
+#define GDT_CSTAT_WRITE_CNT 0x08 /* u_int32_t, write counter */
+#define GDT_CSTAT_TR_HITS 0x0c /* u_int32_t, track hits */
+#define GDT_CSTAT_SEC_HITS 0x10 /* u_int32_t, sector hits */
+#define GDT_CSTAT_SEC_MISS 0x14 /* u_int32_t, sector misses */
+#define GDT_CSTAT_SZ 0x18
+
+/* Get cache info */
+#define GDT_CINFO_CPAR 0x00
+#define GDT_CINFO_CSTAT GDT_CPAR_SZ
+#define GDT_CINFO_SZ (GDT_CPAR_SZ + GDT_CSTAT_SZ)
+
+/* Get board info */
+#define GDT_BINFO_SER_NO 0x00 /* u_int32_t, serial number */
+#define GDT_BINFO_OEM_ID 0x04 /* u_int8_t [2], OEM ID */
+#define GDT_BINFO_EP_FLAGS 0x06 /* u_int16_t, eprom flags */
+#define GDT_BINFO_PROC_ID 0x08 /* u_int32_t, processor ID */
+#define GDT_BINFO_MEMSIZE 0x0c /* u_int32_t, memory size (bytes) */
+#define GDT_BINFO_MEM_BANKS 0x10 /* u_int8_t, memory banks */
+#define GDT_BINFO_CHAN_TYPE 0x11 /* u_int8_t, channel type */
+#define GDT_BINFO_CHAN_COUNT 0x12 /* u_int8_t, channel count */
+#define GDT_BINFO_RDONGLE_PRES 0x13 /* u_int8_t, dongle present */
+#define GDT_BINFO_EPR_FW_VER 0x14 /* u_int32_t, (eprom) firmware ver */
+#define GDT_BINFO_UPD_FW_VER 0x18 /* u_int32_t, (update) firmware ver */
+#define GDT_BINFO_UPD_REVISION 0x1c /* u_int32_t, update revision */
+#define GDT_BINFO_TYPE_STRING 0x20 /* char [16], controller name */
+#define GDT_BINFO_RAID_STRING 0x30 /* char [16], RAID firmware name */
+#define GDT_BINFO_UPDATE_PRES 0x40 /* u_int8_t, update present? */
+#define GDT_BINFO_XOR_PRES 0x41 /* u_int8_t, XOR engine present */
+#define GDT_BINFO_PROM_TYPE 0x42 /* u_int8_t, ROM type (eprom/flash) */
+#define GDT_BINFO_PROM_COUNT 0x43 /* u_int8_t, number of ROM devices */
+#define GDT_BINFO_DUP_PRES 0x44 /* u_int32_t, duplexing module pres? */
+#define GDT_BINFO_CHAN_PRES 0x48 /* u_int32_t, # of exp. channels */
+#define GDT_BINFO_MEM_PRES 0x4c /* u_int32_t, memory expansion inst? */
+#define GDT_BINFO_FT_BUS_SYSTEM 0x50 /* u_int8_t, fault bus supported? */
+#define GDT_BINFO_SUBTYPE_VALID 0x51 /* u_int8_t, board_subtype valid */
+#define GDT_BINFO_BOARD_SUBTYPE 0x52 /* u_int8_t, subtype/hardware level */
+#define GDT_BINFO_RAMPAR_PRES 0x53 /* u_int8_t, RAM parity check hw? */
+#define GDT_BINFO_SZ 0x54
+
+/* Get board features */
+#define GDT_BFEAT_CHAINING 0x00 /* u_int8_t, chaining supported */
+#define GDT_BFEAT_STRIPING 0x01 /* u_int8_t, striping (RAID-0) supp. */
+#define GDT_BFEAT_MIRRORING 0x02 /* u_int8_t, mirroring (RAID-1) supp */
+#define GDT_BFEAT_RAID 0x03 /* u_int8_t, RAID-4/5/10 supported */
+#define GDT_BFEAT_SZ 0x04
+
+/* Other defines */
+#define GDT_ASYNCINDEX 0 /* command index asynchronous event */
+#define GDT_SPEZINDEX 1 /* command index unknown service */
+
+/* Debugging */
+#ifdef GDT_DEBUG
+#define GDT_D_INTR 0x01
+#define GDT_D_MISC 0x02
+#define GDT_D_CMD 0x04
+#define GDT_D_QUEUE 0x08
+#define GDT_D_TIMEOUT 0x10
+#define GDT_D_INIT 0x20
+#define GDT_D_INVALID 0x40
+#define GDT_D_DEBUG 0x80
+extern int gdt_debug;
+#ifdef __SERIAL__
+extern int ser_printf(const char *fmt, ...);
+#define GDT_DPRINTF(mask, args) if (gdt_debug & (mask)) ser_printf args
+#else
+#define GDT_DPRINTF(mask, args) if (gdt_debug & (mask)) printf args
+#endif
+#else
+#define GDT_DPRINTF(mask, args)
+#endif
+
+/* Miscellaneous constants */
+#define GDT_RETRIES 100000000 /* 100000 * 1us = 100s */
+#define GDT_TIMEOUT 100000000 /* 100000 * 1us = 100s */
+#define GDT_POLL_TIMEOUT 10000000 /* 10000 * 1us = 10s */
+#define GDT_WATCH_TIMEOUT 10000000 /* 10000 * 1us = 10s */
+#define GDT_SCRATCH_SZ 3072 /* 3KB scratch buffer */
+
+/* macros */
+#define htole32(v) (v)
+#define htole16(v) (v)
+#define letoh32(v) (v)
+#define letoh16(v) (v)
+
+/* Map minor numbers to device identity */
+#define LUN_MASK 0x0007
+#define TARGET_MASK 0x03f8
+#define BUS_MASK 0x1c00
+#define HBA_MASK 0xe000
+
+#define minor2lun(minor) ( minor & LUN_MASK )
+#define minor2target(minor) ( (minor & TARGET_MASK) >> 3 )
+#define minor2bus(minor) ( (minor & BUS_MASK) >> 10 )
+#define minor2hba(minor) ( (minor & HBA_MASK) >> 13 )
+#define hba2minor(hba) ( (hba << 13) & HBA_MASK )
+
+
+/* struct for GDT_IOCTL_GENERAL */
+#pragma pack(1)
+typedef struct gdt_ucmd {
+ u_int16_t io_node;
+ u_int16_t service;
+ u_int32_t timeout;
+ u_int16_t status;
+ u_int32_t info;
+
+ u_int32_t BoardNode; /* board node (always 0) */
+ u_int32_t CommandIndex; /* command number */
+ u_int16_t OpCode; /* the command (READ,..) */
+ union {
+ struct {
+ u_int16_t DeviceNo; /* number of cache drive */
+ u_int32_t BlockNo; /* block number */
+ u_int32_t BlockCnt; /* block count */
+ void *DestAddr; /* data */
+ } cache; /* cache service cmd. str. */
+ struct {
+ u_int16_t param_size; /* size of p_param buffer */
+ u_int32_t subfunc; /* IOCTL function */
+ u_int32_t channel; /* device */
+ void *p_param; /* data */
+ } ioctl; /* IOCTL command structure */
+ struct {
+ u_int16_t reserved;
+ u_int32_t direction; /* data direction */
+ u_int32_t mdisc_time; /* disc. time (0: no timeout)*/
+ u_int32_t mcon_time; /* connect time(0: no to.) */
+ void *sdata; /* dest. addr. (if s/g: -1) */
+ u_int32_t sdlen; /* data length (bytes) */
+ u_int32_t clen; /* SCSI cmd. length(6,10,12) */
+ u_int8_t cmd[12]; /* SCSI command */
+ u_int8_t target; /* target ID */
+ u_int8_t lun; /* LUN */
+ u_int8_t bus; /* SCSI bus number */
+ u_int8_t priority; /* only 0 used */
+ u_int32_t sense_len; /* sense data length */
+ void *sense_data; /* sense data addr. */
+ u_int32_t link_p; /* linked cmds (not supp.) */
+ } raw; /* raw service cmd. struct. */
+ } u;
+ u_int8_t data[GDT_SCRATCH_SZ];
+ int complete_flag;
+ TAILQ_ENTRY(gdt_ucmd) links;
+} gdt_ucmd_t;
+
+/* struct for GDT_IOCTL_CTRTYPE */
+typedef struct gdt_ctrt {
+ u_int16_t io_node;
+ u_int16_t oem_id;
+ u_int16_t type;
+ u_int32_t info;
+ u_int8_t access;
+ u_int8_t remote;
+ u_int16_t ext_type;
+ u_int16_t device_id;
+ u_int16_t sub_device_id;
+} gdt_ctrt_t;
+
+/* struct for GDT_IOCTL_OSVERS */
+typedef struct gdt_osv {
+ u_int8_t oscode;
+ u_int8_t version;
+ u_int8_t subversion;
+ u_int16_t revision;
+ char name[64];
+} gdt_osv_t;
+
+/* controller event structure */
+#define GDT_ES_ASYNC 1
+#define GDT_ES_DRIVER 2
+#define GDT_ES_TEST 3
+#define GDT_ES_SYNC 4
+typedef struct {
+ u_int16_t size; /* size of structure */
+ union {
+ char stream[16];
+ struct {
+ u_int16_t ionode;
+ u_int16_t service;
+ u_int32_t index;
+ } driver;
+ struct {
+ u_int16_t ionode;
+ u_int16_t service;
+ u_int16_t status;
+ u_int32_t info;
+ u_int8_t scsi_coord[3];
+ } async;
+ struct {
+ u_int16_t ionode;
+ u_int16_t service;
+ u_int16_t status;
+ u_int32_t info;
+ u_int16_t hostdrive;
+ u_int8_t scsi_coord[3];
+ u_int8_t sense_key;
+ } sync;
+ struct {
+ u_int32_t l1, l2, l3, l4;
+ } test;
+ } eu;
+ u_int32_t severity;
+ u_int8_t event_string[256];
+} gdt_evt_data;
+
+/* dvrevt structure */
+typedef struct {
+ u_int32_t first_stamp;
+ u_int32_t last_stamp;
+ u_int16_t same_count;
+ u_int16_t event_source;
+ u_int16_t event_idx;
+ u_int8_t application;
+ u_int8_t reserved;
+ gdt_evt_data event_data;
+} gdt_evt_str;
+
+/* struct for GDT_IOCTL_EVENT */
+typedef struct gdt_event {
+ int erase;
+ int handle;
+ gdt_evt_str dvr;
+} gdt_event_t;
+
+/* struct for GDT_IOCTL_STATIST */
+typedef struct gdt_statist {
+ u_int16_t io_count_act;
+ u_int16_t io_count_max;
+ u_int16_t req_queue_act;
+ u_int16_t req_queue_max;
+ u_int16_t cmd_index_act;
+ u_int16_t cmd_index_max;
+ u_int16_t sg_count_act;
+ u_int16_t sg_count_max;
+} gdt_statist_t;
+
+#pragma pack()
+
+/* Context structure for interrupt services */
+struct gdt_intr_ctx {
+ u_int32_t info, info2;
+ u_int16_t cmd_status, service;
+ u_int8_t istatus;
+};
+
+/* softc structure */
+struct gdt_softc {
+ int sc_hanum;
+ int sc_class; /* Controller class */
+#define GDT_MPR 0x05
+#define GDT_CLASS_MASK 0x07
+#define GDT_FC 0x10
+#define GDT_CLASS(gdt) ((gdt)->sc_class & GDT_CLASS_MASK)
+ int sc_bus, sc_slot;
+ u_int16_t sc_device, sc_subdevice;
+ u_int16_t sc_fw_vers;
+ int sc_init_level;
+ int sc_state;
+#define GDT_NORMAL 0x00
+#define GDT_POLLING 0x01
+#define GDT_SHUTDOWN 0x02
+#define GDT_POLL_WAIT 0x80
+ dev_t sc_dev;
+ bus_space_tag_t sc_dpmemt;
+ bus_space_handle_t sc_dpmemh;
+ bus_addr_t sc_dpmembase;
+ bus_dma_tag_t sc_parent_dmat;
+ bus_dma_tag_t sc_buffer_dmat;
+ bus_dma_tag_t sc_gccb_dmat;
+ bus_dmamap_t sc_gccb_dmamap;
+ bus_addr_t sc_gccb_busbase;
+
+ struct gdt_ccb *sc_gccbs;
+ SLIST_HEAD(, gdt_ccb) sc_free_gccb, sc_pending_gccb;
+ TAILQ_HEAD(, ccb_hdr) sc_ccb_queue;
+ TAILQ_HEAD(, gdt_ucmd) sc_ucmd_queue;
+
+ u_int16_t sc_ic_all_size;
+ u_int16_t sc_cmd_len;
+ u_int16_t sc_cmd_off;
+ u_int16_t sc_cmd_cnt;
+ u_int8_t sc_cmd[GDT_CMD_SZ];
+
+ u_int32_t sc_info;
+ u_int32_t sc_info2;
+ u_int16_t sc_status;
+ u_int16_t sc_service;
+
+ u_int8_t sc_bus_cnt;
+ u_int8_t sc_virt_bus;
+ u_int8_t sc_bus_id[GDT_MAXBUS];
+ u_int8_t sc_more_proc;
+
+ struct {
+ u_int8_t hd_present;
+ u_int8_t hd_is_logdrv;
+ u_int8_t hd_is_arraydrv;
+ u_int8_t hd_is_master;
+ u_int8_t hd_is_parity;
+ u_int8_t hd_is_hotfix;
+ u_int8_t hd_master_no;
+ u_int8_t hd_lock;
+ u_int8_t hd_heads;
+ u_int8_t hd_secs;
+ u_int16_t hd_devtype;
+ u_int32_t hd_size;
+ u_int8_t hd_ldr_no;
+ u_int8_t hd_rw_attribs;
+ u_int32_t hd_start_sec;
+ } sc_hdr[GDT_MAX_HDRIVES];
+
+ u_int16_t sc_raw_feat;
+ u_int16_t sc_cache_feat;
+
+ gdt_evt_data sc_dvr;
+
+ struct cam_sim *sims[GDT_MAXBUS];
+ struct cam_path *paths[GDT_MAXBUS];
+
+ void (*sc_copy_cmd) __P((struct gdt_softc *, struct gdt_ccb *));
+ u_int8_t (*sc_get_status) __P((struct gdt_softc *));
+ void (*sc_intr) __P((struct gdt_softc *, struct gdt_intr_ctx *));
+ void (*sc_release_event) __P((struct gdt_softc *));
+ void (*sc_set_sema0) __P((struct gdt_softc *));
+ int (*sc_test_busy) __P((struct gdt_softc *));
+
+ TAILQ_ENTRY(gdt_softc) links;
+};
+
+/*
+ * A command control block, one for each corresponding command index of the
+ * controller.
+ */
+struct gdt_ccb {
+ u_int8_t gc_scratch[GDT_SCRATCH_SZ];
+ union ccb *gc_ccb;
+ gdt_ucmd_t *gc_ucmd;
+ bus_dmamap_t gc_dmamap;
+ int gc_map_flag;
+ int gc_timeout;
+ int gc_state;
+ u_int8_t gc_service;
+ u_int8_t gc_cmd_index;
+ u_int8_t gc_flags;
+#define GDT_GCF_UNUSED 0
+#define GDT_GCF_INTERNAL 1
+#define GDT_GCF_SCREEN 2
+#define GDT_GCF_SCSI 3
+#define GDT_GCF_IOCTL 4
+ SLIST_ENTRY(gdt_ccb) sle;
+};
+
+
+int iir_init __P((struct gdt_softc *));
+void iir_free __P((struct gdt_softc *));
+void iir_attach __P((struct gdt_softc *));
+void iir_intr __P((void *arg));
+
+#ifdef __GNUC__
+/* These all require correctly aligned buffers */
+static __inline__ void gdt_enc16 __P((u_int8_t *, u_int16_t));
+static __inline__ void gdt_enc32 __P((u_int8_t *, u_int32_t));
+static __inline__ u_int16_t gdt_dec16 __P((u_int8_t *));
+static __inline__ u_int32_t gdt_dec32 __P((u_int8_t *));
+
+static __inline__ void
+gdt_enc16(addr, value)
+ u_int8_t *addr;
+ u_int16_t value;
+{
+ *(u_int16_t *)addr = htole16(value);
+}
+
+static __inline__ void
+gdt_enc32(addr, value)
+ u_int8_t *addr;
+ u_int32_t value;
+{
+ *(u_int32_t *)addr = htole32(value);
+}
+
+static __inline__ u_int16_t
+gdt_dec16(addr)
+ u_int8_t *addr;
+{
+ return letoh16(*(u_int16_t *)addr);
+}
+
+static __inline__ u_int32_t
+gdt_dec32(addr)
+ u_int8_t *addr;
+{
+ return letoh32(*(u_int32_t *)addr);
+}
+#endif
+
+#if defined(__alpha__)
+/* XXX XXX NEED REAL DMA MAPPING SUPPORT XXX XXX */
+#undef vtophys
+#define vtophys(va) alpha_XXX_dmamap((vm_offset_t)(va))
+#endif
+
+extern TAILQ_HEAD(gdt_softc_list, gdt_softc) gdt_softcs;
+extern u_int8_t gdt_polling;
+
+dev_t gdt_make_dev(int unit);
+void gdt_destroy_dev(dev_t dev);
+void gdt_next(struct gdt_softc *gdt);
+void gdt_free_ccb(struct gdt_softc *gdt, struct gdt_ccb *gccb);
+
+gdt_evt_str *gdt_store_event(u_int16_t source, u_int16_t idx,
+ gdt_evt_data *evt);
+int gdt_read_event(int handle, gdt_evt_str *estr);
+void gdt_readapp_event(u_int8_t app, gdt_evt_str *estr);
+void gdt_clear_events(void);
+
+#endif
diff --git a/sys/dev/iir/iir_ctrl.c b/sys/dev/iir/iir_ctrl.c
new file mode 100644
index 0000000..ca3fffe
--- /dev/null
+++ b/sys/dev/iir/iir_ctrl.c
@@ -0,0 +1,371 @@
+/* $FreeBSD$ */
+/*
+ * Copyright (c) 2000-01 Intel Corporation
+ * All Rights Reserved
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * iir_ctrl.c: Control functions and /dev entry points for /dev/iir*
+ *
+ * Written by: Achim Leubner <achim.leubner@intel.com>
+ * Fixes/Additions: Boji Tony Kannanthanam <boji.t.kannanthanam@intel.com>
+ *
+ * TODO:
+ */
+
+#ident "$Id: iir_ctrl.c 1.2 2001/07/18 11:17:22 achim Exp $"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+#include <sys/kernel.h>
+#include <sys/uio.h>
+#include <sys/conf.h>
+#include <sys/stat.h>
+#include <sys/disklabel.h>
+#include <machine/bus.h>
+#include <vm/vm.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_extern.h>
+#include <vm/pmap.h>
+
+#include <dev/iir/iir.h>
+
+/* Entry points and other prototypes */
+static struct gdt_softc *gdt_minor2softc(int minor_no);
+
+static d_open_t iir_open;
+static d_close_t iir_close;
+static d_write_t iir_write;
+static d_read_t iir_read;
+static d_ioctl_t iir_ioctl;
+
+#define CDEV_MAJOR IIR_CDEV_MAJOR
+
+/* Normally, this is a static structure. But we need it in pci/iir_pci.c */
+static struct cdevsw iir_cdevsw = {
+ /* open */ iir_open,
+ /* close */ iir_close,
+ /* read */ iir_read,
+ /* write */ iir_write,
+ /* ioctl */ iir_ioctl,
+ /* poll */ nopoll,
+ /* mmap */ nommap,
+ /* strategy */ nostrategy,
+ /* name */ "iir",
+ /* maj */ CDEV_MAJOR,
+ /* dump */ nodump,
+ /* psize */ nopsize,
+ /* flags */ 0,
+ /* kq */ nokqfilter
+};
+
+static int iir_devsw_installed = 0;
+#ifndef SDEV_PER_HBA
+static int sdev_made = 0;
+#endif
+extern int gdt_cnt;
+extern char ostype[];
+extern char osrelease[];
+extern gdt_statist_t gdt_stat;
+
+/*
+ * Given a controller number,
+ * make a special device and return the dev_t
+ */
+dev_t
+gdt_make_dev(int unit)
+{
+ dev_t dev;
+
+#ifdef SDEV_PER_HBA
+ dev = make_dev(&iir_cdevsw, hba2minor(unit), UID_ROOT, GID_OPERATOR,
+ S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH, "iir%d", unit);
+#else
+ if (sdev_made)
+ return (0);
+ dev = make_dev(&iir_cdevsw, 0, UID_ROOT, GID_OPERATOR,
+ S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH, "iir");
+ sdev_made = 1;
+#endif
+ return (dev);
+}
+
+void
+gdt_destroy_dev(dev_t dev)
+{
+ if (dev != NULL)
+ destroy_dev(dev);
+}
+
+/*
+ * Given a minor device number,
+ * return the pointer to its softc structure
+ */
+static struct gdt_softc *
+gdt_minor2softc(int minor_no)
+{
+ struct gdt_softc *gdt;
+ int hanum;
+
+#ifdef SDEV_PER_HBA
+ hanum = minor2hba(minor_no);
+#else
+ hanum = minor_no;
+#endif
+
+ for (gdt = TAILQ_FIRST(&gdt_softcs);
+ gdt != NULL && gdt->sc_hanum != hanum;
+ gdt = TAILQ_NEXT(gdt, links));
+
+ return (gdt);
+}
+
+static int
+iir_open(dev_t dev, int flags, int fmt, d_thread_t * p)
+{
+ GDT_DPRINTF(GDT_D_DEBUG, ("iir_open()\n"));
+
+#ifdef SDEV_PER_HBA
+ int minor_no;
+ struct gdt_softc *gdt;
+
+ minor_no = minor(dev);
+ gdt = gdt_minor2softc(minor_no);
+ if (gdt == NULL)
+ return (ENXIO);
+#endif
+
+ return (0);
+}
+
+static int
+iir_close(dev_t dev, int flags, int fmt, d_thread_t * p)
+{
+ GDT_DPRINTF(GDT_D_DEBUG, ("iir_close()\n"));
+
+#ifdef SDEV_PER_HBA
+ int minor_no;
+ struct gdt_softc *gdt;
+
+ minor_no = minor(dev);
+ gdt = gdt_minor2softc(minor_no);
+ if (gdt == NULL)
+ return (ENXIO);
+#endif
+
+ return (0);
+}
+
+static int
+iir_write(dev_t dev, struct uio * uio, int ioflag)
+{
+ GDT_DPRINTF(GDT_D_DEBUG, ("iir_write()\n"));
+
+#ifdef SDEV_PER_HBA
+ int minor_no;
+ struct gdt_softc *gdt;
+
+ minor_no = minor(dev);
+ gdt = gdt_minor2softc(minor_no);
+ if (gdt == NULL)
+ return (ENXIO);
+#endif
+
+ return (0);
+}
+
+static int
+iir_read(dev_t dev, struct uio * uio, int ioflag)
+{
+ GDT_DPRINTF(GDT_D_DEBUG, ("iir_read()\n"));
+
+#ifdef SDEV_PER_HBA
+ int minor_no;
+ struct gdt_softc *gdt;
+
+ minor_no = minor(dev);
+ gdt = gdt_minor2softc(minor_no);
+ if (gdt == NULL)
+ return (ENXIO);
+#endif
+
+ return (0);
+}
+
+/**
+ * This is the control syscall interface.
+ * It should be binary compatible with UnixWare,
+ * if not totally syntatically so.
+ */
+
+static int
+iir_ioctl(dev_t dev, u_long cmd, caddr_t cmdarg, int flags, d_thread_t * p)
+{
+ GDT_DPRINTF(GDT_D_DEBUG, ("iir_ioctl() cmd 0x%lx\n",cmd));
+
+#ifdef SDEV_PER_HBA
+ int minor_no;
+ struct gdt_softc *gdt;
+
+ minor_no = minor(dev);
+ gdt = gdt_minor2softc(minor_no);
+ if (gdt == NULL)
+ return (ENXIO);
+#endif
+ ++gdt_stat.io_count_act;
+ if (gdt_stat.io_count_act > gdt_stat.io_count_max)
+ gdt_stat.io_count_max = gdt_stat.io_count_act;
+
+ switch (cmd) {
+ case GDT_IOCTL_GENERAL:
+ {
+ gdt_ucmd_t *ucmd;
+ struct gdt_softc *gdt;
+ int lock;
+
+ ucmd = (gdt_ucmd_t *)cmdarg;
+ gdt = gdt_minor2softc(ucmd->io_node);
+ if (gdt == NULL)
+ return (ENXIO);
+ lock = splcam();
+ TAILQ_INSERT_TAIL(&gdt->sc_ucmd_queue, ucmd, links);
+ ucmd->complete_flag = FALSE;
+ splx(lock);
+ gdt_next(gdt);
+ if (!ucmd->complete_flag)
+ (void) tsleep((void *)ucmd, PCATCH | PRIBIO, "iirucw", 0);
+ break;
+ }
+
+ case GDT_IOCTL_DRVERS:
+ *(int *)cmdarg =
+ (IIR_DRIVER_VERSION << 8) | IIR_DRIVER_SUBVERSION;
+ break;
+
+ case GDT_IOCTL_CTRTYPE:
+ {
+ gdt_ctrt_t *p;
+ struct gdt_softc *gdt;
+
+ p = (gdt_ctrt_t *)cmdarg;
+ gdt = gdt_minor2softc(p->io_node);
+ if (gdt == NULL)
+ return (ENXIO);
+ p->oem_id = 0x8000;
+ p->type = 0xfd;
+ p->info = (gdt->sc_bus << 8) | (gdt->sc_slot << 3);
+ p->ext_type = 0x6000 | gdt->sc_subdevice;
+ p->device_id = gdt->sc_device;
+ p->sub_device_id = gdt->sc_subdevice;
+ break;
+ }
+
+ case GDT_IOCTL_OSVERS:
+ {
+ gdt_osv_t *p;
+
+ p = (gdt_osv_t *)cmdarg;
+ p->oscode = 10;
+ p->version = osrelease[0] - '0';
+ if (osrelease[1] == '.')
+ p->subversion = osrelease[2] - '0';
+ else
+ p->subversion = 0;
+ if (osrelease[3] == '.')
+ p->revision = osrelease[4] - '0';
+ else
+ p->revision = 0;
+ strcpy(p->name, ostype);
+ break;
+ }
+
+ case GDT_IOCTL_CTRCNT:
+ *(int *)cmdarg = gdt_cnt;
+ break;
+
+ case GDT_IOCTL_EVENT:
+ {
+ gdt_event_t *p;
+ int lock;
+
+ p = (gdt_event_t *)cmdarg;
+ if (p->erase == 0xff) {
+ if (p->dvr.event_source == GDT_ES_TEST)
+ p->dvr.event_data.size = sizeof(p->dvr.event_data.eu.test);
+ else if (p->dvr.event_source == GDT_ES_DRIVER)
+ p->dvr.event_data.size= sizeof(p->dvr.event_data.eu.driver);
+ else if (p->dvr.event_source == GDT_ES_SYNC)
+ p->dvr.event_data.size = sizeof(p->dvr.event_data.eu.sync);
+ else
+ p->dvr.event_data.size = sizeof(p->dvr.event_data.eu.async);
+ lock = splcam();
+ gdt_store_event(p->dvr.event_source, p->dvr.event_idx,
+ &p->dvr.event_data);
+ splx(lock);
+ } else if (p->erase == 0xfe) {
+ lock = splcam();
+ gdt_clear_events();
+ splx(lock);
+ } else if (p->erase == 0) {
+ p->handle = gdt_read_event(p->handle, &p->dvr);
+ } else {
+ gdt_readapp_event((u_int8_t)p->erase, &p->dvr);
+ }
+ break;
+ }
+
+ case GDT_IOCTL_STATIST:
+ {
+ gdt_statist_t *p;
+
+ p = (gdt_statist_t *)cmdarg;
+ bcopy(&gdt_stat, p, sizeof(gdt_statist_t));
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ --gdt_stat.io_count_act;
+ return (0);
+}
+
+static void
+iir_drvinit(void *unused)
+{
+ GDT_DPRINTF(GDT_D_DEBUG, ("iir_drvinit()\n"));
+
+ if (!iir_devsw_installed) {
+ /* Add the I/O (data) channel */
+ cdevsw_add(&iir_cdevsw);
+ iir_devsw_installed = 1;
+ }
+}
+
+SYSINIT(iir_dev, SI_SUB_DRIVERS, SI_ORDER_MIDDLE + CDEV_MAJOR, iir_drvinit, NULL)
diff --git a/sys/dev/iir/iir_pci.c b/sys/dev/iir/iir_pci.c
new file mode 100644
index 0000000..d751e3d
--- /dev/null
+++ b/sys/dev/iir/iir_pci.c
@@ -0,0 +1,476 @@
+/* $FreeBSD$ */
+/*
+ * Copyright (c) 2000-01 Intel Corporation
+ * All Rights Reserved
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+/*
+ * iir_pci.c: PCI Bus Attachment for Intel Integrated RAID Controller driver
+ *
+ * Written by: Achim Leubner <achim.leubner@intel.com>
+ * Fixes/Additions: Boji Tony Kannanthanam <boji.t.kannanthanam@intel.com>
+ *
+ * TODO:
+ */
+
+#ident "$Id: iir_pci.c 1.1 2001/05/22 20:14:12 achim Exp $"
+
+/* #include "opt_iir.h" */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/bus.h>
+
+#include <machine/bus_memio.h>
+#include <machine/bus_pio.h>
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <machine/clock.h>
+#include <sys/rman.h>
+
+#include <pci/pcireg.h>
+#include <pci/pcivar.h>
+
+#include <cam/scsi/scsi_all.h>
+
+#include <dev/iir/iir.h>
+
+/* Mapping registers for various areas */
+#define PCI_DPMEM PCIR_MAPS
+
+/* Product numbers for Fibre-Channel are greater than or equal to 0x200 */
+#define GDT_PCI_PRODUCT_FC 0x200
+
+/* PCI SRAM structure */
+#define GDT_MAGIC 0x00 /* u_int32_t, controller ID from BIOS */
+#define GDT_NEED_DEINIT 0x04 /* u_int16_t, switch between BIOS/driver */
+#define GDT_SWITCH_SUPPORT 0x06 /* u_int8_t, see GDT_NEED_DEINIT */
+#define GDT_OS_USED 0x10 /* u_int8_t [16], OS code per service */
+#define GDT_FW_MAGIC 0x3c /* u_int8_t, controller ID from firmware */
+#define GDT_SRAM_SZ 0x40
+
+/* DPRAM PCI controllers */
+#define GDT_DPR_IF 0x00 /* interface area */
+#define GDT_6SR (0xff0 - GDT_SRAM_SZ)
+#define GDT_SEMA1 0xff1 /* volatile u_int8_t, command semaphore */
+#define GDT_IRQEN 0xff5 /* u_int8_t, board interrupts enable */
+#define GDT_EVENT 0xff8 /* u_int8_t, release event */
+#define GDT_IRQDEL 0xffc /* u_int8_t, acknowledge board interrupt */
+#define GDT_DPRAM_SZ 0x1000
+
+/* PLX register structure (new PCI controllers) */
+#define GDT_CFG_REG 0x00 /* u_int8_t, DPRAM cfg. (2: < 1MB, 0: any) */
+#define GDT_SEMA0_REG 0x40 /* volatile u_int8_t, command semaphore */
+#define GDT_SEMA1_REG 0x41 /* volatile u_int8_t, status semaphore */
+#define GDT_PLX_STATUS 0x44 /* volatile u_int16_t, command status */
+#define GDT_PLX_SERVICE 0x46 /* u_int16_t, service */
+#define GDT_PLX_INFO 0x48 /* u_int32_t [2], additional info */
+#define GDT_LDOOR_REG 0x60 /* u_int8_t, PCI to local doorbell */
+#define GDT_EDOOR_REG 0x64 /* volatile u_int8_t, local to PCI doorbell */
+#define GDT_CONTROL0 0x68 /* u_int8_t, control0 register (unused) */
+#define GDT_CONTROL1 0x69 /* u_int8_t, board interrupts enable */
+#define GDT_PLX_SZ 0x80
+
+/* DPRAM new PCI controllers */
+#define GDT_IC 0x00 /* interface */
+#define GDT_PCINEW_6SR (0x4000 - GDT_SRAM_SZ)
+ /* SRAM structure */
+#define GDT_PCINEW_SZ 0x4000
+
+/* i960 register structure (PCI MPR controllers) */
+#define GDT_MPR_SEMA0 0x10 /* volatile u_int8_t, command semaphore */
+#define GDT_MPR_SEMA1 0x12 /* volatile u_int8_t, status semaphore */
+#define GDT_MPR_STATUS 0x14 /* volatile u_int16_t, command status */
+#define GDT_MPR_SERVICE 0x16 /* u_int16_t, service */
+#define GDT_MPR_INFO 0x18 /* u_int32_t [2], additional info */
+#define GDT_MPR_LDOOR 0x20 /* u_int8_t, PCI to local doorbell */
+#define GDT_MPR_EDOOR 0x2c /* volatile u_int8_t, locl to PCI doorbell */
+#define GDT_EDOOR_EN 0x34 /* u_int8_t, board interrupts enable */
+#define GDT_SEVERITY 0xefc /* u_int8_t, event severity */
+#define GDT_EVT_BUF 0xf00 /* u_int8_t [256], event buffer */
+#define GDT_I960_SZ 0x1000
+
+/* DPRAM PCI MPR controllers */
+#define GDT_I960R 0x00 /* 4KB i960 registers */
+#define GDT_MPR_IC GDT_I960_SZ
+ /* i960 register area */
+#define GDT_MPR_6SR (GDT_I960_SZ + 0x3000 - GDT_SRAM_SZ)
+ /* DPRAM struct. */
+#define GDT_MPR_SZ (0x3000 - GDT_SRAM_SZ)
+
+static int iir_pci_probe __P((device_t dev));
+static int iir_pci_attach __P((device_t dev));
+
+void gdt_pci_enable_intr __P((struct gdt_softc *));
+
+void gdt_mpr_copy_cmd __P((struct gdt_softc *, struct gdt_ccb *));
+u_int8_t gdt_mpr_get_status __P((struct gdt_softc *));
+void gdt_mpr_intr __P((struct gdt_softc *, struct gdt_intr_ctx *));
+void gdt_mpr_release_event __P((struct gdt_softc *));
+void gdt_mpr_set_sema0 __P((struct gdt_softc *));
+int gdt_mpr_test_busy __P((struct gdt_softc *));
+
+static device_method_t iir_pci_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, iir_pci_probe),
+ DEVMETHOD(device_attach, iir_pci_attach),
+ { 0, 0}
+};
+
+
+static driver_t iir_pci_driver =
+{
+ "iir",
+ iir_pci_methods,
+ sizeof(struct gdt_softc)
+};
+
+static devclass_t iir_devclass;
+
+DRIVER_MODULE(iir, pci, iir_pci_driver, iir_devclass, 0, 0);
+
+static int
+iir_pci_probe(device_t dev)
+{
+ if (pci_get_vendor(dev) == INTEL_VENDOR_ID &&
+ pci_get_device(dev) == INTEL_DEVICE_ID_IIR) {
+ device_set_desc(dev, "Intel Integrated RAID Controller");
+ return (0);
+ }
+ if (pci_get_vendor(dev) == GDT_VENDOR_ID &&
+ ((pci_get_device(dev) >= GDT_DEVICE_ID_MIN &&
+ pci_get_device(dev) <= GDT_DEVICE_ID_MAX) ||
+ pci_get_device(dev) == GDT_DEVICE_ID_NEWRX)) {
+ device_set_desc(dev, "ICP Disk Array Controller");
+ return (0);
+ }
+ return (ENXIO);
+}
+
+
+static int
+iir_pci_attach(device_t dev)
+{
+ struct gdt_softc *gdt;
+ struct resource *io = NULL, *irq = NULL;
+ int retries, rid, error = 0;
+ void *ih;
+ u_int8_t protocol;
+
+ /* map DPMEM */
+ rid = PCI_DPMEM;
+ io = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 0, ~0, 1, RF_ACTIVE);
+ if (io == NULL) {
+ device_printf(dev, "can't allocate register resources\n");
+ error = ENOMEM;
+ goto err;
+ }
+
+ /* get IRQ */
+ rid = 0;
+ irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
+ RF_ACTIVE | RF_SHAREABLE);
+ if (io == NULL) {
+ device_printf(dev, "can't find IRQ value\n");
+ error = ENOMEM;
+ goto err;
+ }
+
+ gdt = device_get_softc(dev);
+ bzero(gdt, sizeof(struct gdt_softc));
+ gdt->sc_init_level = 0;
+ gdt->sc_dpmemt = rman_get_bustag(io);
+ gdt->sc_dpmemh = rman_get_bushandle(io);
+ gdt->sc_dpmembase = rman_get_start(io);
+ gdt->sc_hanum = device_get_unit(dev);
+ gdt->sc_bus = pci_get_bus(dev);
+ gdt->sc_slot = pci_get_slot(dev);
+ gdt->sc_device = pci_get_device(dev);
+ gdt->sc_subdevice = pci_get_subdevice(dev);
+ gdt->sc_class = GDT_MPR;
+/* no FC ctr.
+ if (gdt->sc_device >= GDT_PCI_PRODUCT_FC)
+ gdt->sc_class |= GDT_FC;
+*/
+
+ /* initialize RP controller */
+ /* check and reset interface area */
+ bus_space_write_4(gdt->sc_dpmemt, gdt->sc_dpmemh, GDT_MPR_IC,
+ htole32(GDT_MPR_MAGIC));
+ if (bus_space_read_4(gdt->sc_dpmemt, gdt->sc_dpmemh, GDT_MPR_IC) !=
+ htole32(GDT_MPR_MAGIC)) {
+ printf("cannot access DPMEM at 0x%x (shadowed?)\n",
+ gdt->sc_dpmembase);
+ error = ENXIO;
+ goto err;
+ }
+ bus_space_set_region_4(gdt->sc_dpmemt, gdt->sc_dpmemh, GDT_I960_SZ, htole32(0),
+ GDT_MPR_SZ >> 2);
+
+ /* Disable everything */
+ bus_space_write_1(gdt->sc_dpmemt, gdt->sc_dpmemh, GDT_EDOOR_EN,
+ bus_space_read_1(gdt->sc_dpmemt, gdt->sc_dpmemh,
+ GDT_EDOOR_EN) | 4);
+ bus_space_write_1(gdt->sc_dpmemt, gdt->sc_dpmemh, GDT_MPR_EDOOR, 0xff);
+ bus_space_write_1(gdt->sc_dpmemt, gdt->sc_dpmemh, GDT_MPR_IC + GDT_S_STATUS,
+ 0);
+ bus_space_write_1(gdt->sc_dpmemt, gdt->sc_dpmemh, GDT_MPR_IC + GDT_CMD_INDEX,
+ 0);
+
+ bus_space_write_4(gdt->sc_dpmemt, gdt->sc_dpmemh, GDT_MPR_IC + GDT_S_INFO,
+ htole32(gdt->sc_dpmembase));
+ bus_space_write_1(gdt->sc_dpmemt, gdt->sc_dpmemh, GDT_MPR_IC + GDT_S_CMD_INDX,
+ 0xff);
+ bus_space_write_1(gdt->sc_dpmemt, gdt->sc_dpmemh, GDT_MPR_LDOOR, 1);
+
+ DELAY(20);
+ retries = GDT_RETRIES;
+ while (bus_space_read_1(gdt->sc_dpmemt, gdt->sc_dpmemh,
+ GDT_MPR_IC + GDT_S_STATUS) != 0xff) {
+ if (--retries == 0) {
+ printf("DEINIT failed\n");
+ error = ENXIO;
+ goto err;
+ }
+ DELAY(1);
+ }
+
+ protocol = (u_int8_t)letoh32(bus_space_read_4(gdt->sc_dpmemt, gdt->sc_dpmemh,
+ GDT_MPR_IC + GDT_S_INFO));
+ bus_space_write_1(gdt->sc_dpmemt, gdt->sc_dpmemh, GDT_MPR_IC + GDT_S_STATUS,
+ 0);
+ if (protocol != GDT_PROTOCOL_VERSION) {
+ printf("unsupported protocol %d\n", protocol);
+ error = ENXIO;
+ goto err;
+ }
+
+ /* special commnd to controller BIOS */
+ bus_space_write_4(gdt->sc_dpmemt, gdt->sc_dpmemh, GDT_MPR_IC + GDT_S_INFO,
+ htole32(0));
+ bus_space_write_4(gdt->sc_dpmemt, gdt->sc_dpmemh,
+ GDT_MPR_IC + GDT_S_INFO + sizeof (u_int32_t), htole32(0));
+ bus_space_write_4(gdt->sc_dpmemt, gdt->sc_dpmemh,
+ GDT_MPR_IC + GDT_S_INFO + 2 * sizeof (u_int32_t),
+ htole32(1));
+ bus_space_write_4(gdt->sc_dpmemt, gdt->sc_dpmemh,
+ GDT_MPR_IC + GDT_S_INFO + 3 * sizeof (u_int32_t),
+ htole32(0));
+ bus_space_write_1(gdt->sc_dpmemt, gdt->sc_dpmemh, GDT_MPR_IC + GDT_S_CMD_INDX,
+ 0xfe);
+ bus_space_write_1(gdt->sc_dpmemt, gdt->sc_dpmemh, GDT_MPR_LDOOR, 1);
+
+ DELAY(20);
+ retries = GDT_RETRIES;
+ while (bus_space_read_1(gdt->sc_dpmemt, gdt->sc_dpmemh,
+ GDT_MPR_IC + GDT_S_STATUS) != 0xfe) {
+ if (--retries == 0) {
+ printf("initialization error\n");
+ error = ENXIO;
+ goto err;
+ }
+ DELAY(1);
+ }
+
+ bus_space_write_1(gdt->sc_dpmemt, gdt->sc_dpmemh, GDT_MPR_IC + GDT_S_STATUS,
+ 0);
+
+ gdt->sc_ic_all_size = GDT_MPR_SZ;
+
+ gdt->sc_copy_cmd = gdt_mpr_copy_cmd;
+ gdt->sc_get_status = gdt_mpr_get_status;
+ gdt->sc_intr = gdt_mpr_intr;
+ gdt->sc_release_event = gdt_mpr_release_event;
+ gdt->sc_set_sema0 = gdt_mpr_set_sema0;
+ gdt->sc_test_busy = gdt_mpr_test_busy;
+
+ /* Allocate a dmatag representing the capabilities of this attachment */
+ /* XXX Should be a child of the PCI bus dma tag */
+ if (bus_dma_tag_create(/*parent*/NULL, /*alignemnt*/1, /*boundary*/0,
+ /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
+ /*highaddr*/BUS_SPACE_MAXADDR,
+ /*filter*/NULL, /*filterarg*/NULL,
+ /*maxsize*/BUS_SPACE_MAXSIZE_32BIT,
+ /*nsegments*/GDT_MAXSG,
+ /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
+ /*flags*/0, &gdt->sc_parent_dmat) != 0) {
+ error = ENXIO;
+ goto err;
+ }
+ gdt->sc_init_level++;
+
+ if (iir_init(gdt) != 0) {
+ iir_free(gdt);
+ error = ENXIO;
+ goto err;
+ }
+
+ /* Register with the XPT */
+ iir_attach(gdt);
+
+ /* associate interrupt handler */
+ if (bus_setup_intr( dev, irq, INTR_TYPE_CAM,
+ iir_intr, gdt, &ih )) {
+ device_printf(dev, "Unable to register interrupt handler\n");
+ error = ENXIO;
+ goto err;
+ }
+
+ gdt_pci_enable_intr(gdt);
+ return (0);
+
+err:
+ if (irq)
+ bus_release_resource( dev, SYS_RES_IRQ, 0, irq );
+/*
+ if (io)
+ bus_release_resource( dev, SYS_RES_MEMORY, rid, io );
+*/
+ return (error);
+}
+
+
+/* Enable interrupts */
+void
+gdt_pci_enable_intr(struct gdt_softc *gdt)
+{
+ GDT_DPRINTF(GDT_D_INTR, ("gdt_pci_enable_intr(%p) ", gdt));
+
+ switch(GDT_CLASS(gdt)) {
+ case GDT_MPR:
+ bus_space_write_1(gdt->sc_dpmemt, gdt->sc_dpmemh,
+ GDT_MPR_EDOOR, 0xff);
+ bus_space_write_1(gdt->sc_dpmemt, gdt->sc_dpmemh, GDT_EDOOR_EN,
+ bus_space_read_1(gdt->sc_dpmemt, gdt->sc_dpmemh,
+ GDT_EDOOR_EN) & ~4);
+ break;
+ }
+}
+
+
+/*
+ * MPR PCI controller-specific functions
+ */
+
+void
+gdt_mpr_copy_cmd(struct gdt_softc *gdt, struct gdt_ccb *ccb)
+{
+ u_int16_t cp_count = roundup(gdt->sc_cmd_len, sizeof (u_int32_t));
+ u_int16_t dp_offset = gdt->sc_cmd_off;
+ u_int16_t cmd_no = gdt->sc_cmd_cnt++;
+
+ GDT_DPRINTF(GDT_D_CMD, ("gdt_mpr_copy_cmd(%p) ", gdt));
+
+ gdt->sc_cmd_off += cp_count;
+
+ bus_space_write_2(gdt->sc_dpmemt, gdt->sc_dpmemh,
+ GDT_MPR_IC + GDT_COMM_QUEUE + cmd_no * GDT_COMM_Q_SZ + GDT_OFFSET,
+ htole16(GDT_DPMEM_COMMAND_OFFSET + dp_offset));
+ bus_space_write_2(gdt->sc_dpmemt, gdt->sc_dpmemh,
+ GDT_MPR_IC + GDT_COMM_QUEUE + cmd_no * GDT_COMM_Q_SZ + GDT_SERV_ID,
+ htole16(ccb->gc_service));
+ bus_space_write_region_4(gdt->sc_dpmemt, gdt->sc_dpmemh,
+ GDT_MPR_IC + GDT_DPR_CMD + dp_offset,
+ (u_int32_t *)gdt->sc_cmd, cp_count >> 2);
+}
+
+u_int8_t
+gdt_mpr_get_status(struct gdt_softc *gdt)
+{
+ GDT_DPRINTF(GDT_D_MISC, ("gdt_mpr_get_status(%p) ", gdt));
+
+ return bus_space_read_1(gdt->sc_dpmemt, gdt->sc_dpmemh, GDT_MPR_EDOOR);
+}
+
+void
+gdt_mpr_intr(struct gdt_softc *gdt, struct gdt_intr_ctx *ctx)
+{
+ int i;
+
+ GDT_DPRINTF(GDT_D_INTR, ("gdt_mpr_intr(%p) ", gdt));
+
+ if (ctx->istatus & 0x80) { /* error flag */
+ ctx->istatus &= ~0x80;
+ ctx->cmd_status = bus_space_read_2(gdt->sc_dpmemt,
+ gdt->sc_dpmemh, GDT_MPR_STATUS);
+ } else /* no error */
+ ctx->cmd_status = GDT_S_OK;
+
+ ctx->info =
+ bus_space_read_4(gdt->sc_dpmemt, gdt->sc_dpmemh, GDT_MPR_INFO);
+ ctx->service =
+ bus_space_read_2(gdt->sc_dpmemt, gdt->sc_dpmemh, GDT_MPR_SERVICE);
+ ctx->info2 =
+ bus_space_read_4(gdt->sc_dpmemt, gdt->sc_dpmemh,
+ GDT_MPR_INFO + sizeof (u_int32_t));
+
+ /* event string */
+ if (ctx->istatus == GDT_ASYNCINDEX) {
+ if (ctx->service != GDT_SCREENSERVICE &&
+ (gdt->sc_fw_vers & 0xff) >= 0x1a) {
+ gdt->sc_dvr.severity =
+ bus_space_read_1(gdt->sc_dpmemt,gdt->sc_dpmemh, GDT_SEVERITY);
+ for (i = 0; i < 256; ++i) {
+ gdt->sc_dvr.event_string[i] =
+ bus_space_read_1(gdt->sc_dpmemt, gdt->sc_dpmemh,
+ GDT_EVT_BUF + i);
+ if (gdt->sc_dvr.event_string[i] == 0)
+ break;
+ }
+ }
+ }
+ bus_space_write_1(gdt->sc_dpmemt, gdt->sc_dpmemh, GDT_MPR_EDOOR, 0xff);
+ bus_space_write_1(gdt->sc_dpmemt, gdt->sc_dpmemh, GDT_MPR_SEMA1, 0);
+}
+
+void
+gdt_mpr_release_event(struct gdt_softc *gdt)
+{
+ GDT_DPRINTF(GDT_D_MISC, ("gdt_mpr_release_event(%p) ", gdt));
+
+ bus_space_write_1(gdt->sc_dpmemt, gdt->sc_dpmemh, GDT_MPR_LDOOR, 1);
+}
+
+void
+gdt_mpr_set_sema0(struct gdt_softc *gdt)
+{
+ GDT_DPRINTF(GDT_D_MISC, ("gdt_mpr_set_sema0(%p) ", gdt));
+
+ bus_space_write_1(gdt->sc_dpmemt, gdt->sc_dpmemh, GDT_MPR_SEMA0, 1);
+}
+
+int
+gdt_mpr_test_busy(struct gdt_softc *gdt)
+{
+ GDT_DPRINTF(GDT_D_MISC, ("gdt_mpr_test_busy(%p) ", gdt));
+
+ return (bus_space_read_1(gdt->sc_dpmemt, gdt->sc_dpmemh,
+ GDT_MPR_SEMA0) & 1);
+}
OpenPOWER on IntegriCloud