diff options
Diffstat (limited to 'sys')
-rw-r--r-- | sys/dev/aic7xxx/ahd_pci.c | 302 | ||||
-rw-r--r-- | sys/dev/aic7xxx/aic79xx.c | 8345 | ||||
-rw-r--r-- | sys/dev/aic7xxx/aic79xx.h | 1298 | ||||
-rw-r--r-- | sys/dev/aic7xxx/aic79xx.reg | 3716 | ||||
-rw-r--r-- | sys/dev/aic7xxx/aic79xx.seq | 1723 | ||||
-rw-r--r-- | sys/dev/aic7xxx/aic79xx_inline.h | 997 | ||||
-rw-r--r-- | sys/dev/aic7xxx/aic79xx_osm.c | 1992 | ||||
-rw-r--r-- | sys/dev/aic7xxx/aic79xx_osm.h | 577 | ||||
-rw-r--r-- | sys/dev/aic7xxx/aic79xx_pci.c | 792 |
9 files changed, 19742 insertions, 0 deletions
diff --git a/sys/dev/aic7xxx/ahd_pci.c b/sys/dev/aic7xxx/ahd_pci.c new file mode 100644 index 0000000..834cbe8 --- /dev/null +++ b/sys/dev/aic7xxx/ahd_pci.c @@ -0,0 +1,302 @@ +/* + * FreeBSD, PCI product support functions + * + * Copyright (c) 1995-2001 Justin T. Gibbs + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification, immediately at the beginning of the file. + * 2. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU Public License ("GPL"). + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR + * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $Id$ + * + * $FreeBSD$ + */ + +#include <dev/aic7xxx/aic79xx_osm.h> + +#define AHD_PCI_IOADDR0 PCIR_MAPS /* Primary I/O BAR */ +#define AHD_PCI_MEMADDR (PCIR_MAPS + 4) /* Mem I/O Address */ +#define AHD_PCI_IOADDR1 (PCIR_MAPS + 12)/* Secondary I/O BAR */ + +static int ahd_pci_probe(device_t dev); +static int ahd_pci_attach(device_t dev); + +static device_method_t ahd_pci_device_methods[] = { + /* Device interface */ + DEVMETHOD(device_probe, ahd_pci_probe), + DEVMETHOD(device_attach, ahd_pci_attach), + DEVMETHOD(device_detach, ahd_detach), + { 0, 0 } +}; + +#if __FreeBSD_version >= 500027 + +DEVINTERFACE(ahd_pci_device, ahd_pci_device_methods, device); + +static device_interface_t *ahd_pci_interfaces[] = { + &ahd_pci_device_interface, + + NULL +}; + +static driver_t ahd_pci_driver = { + "ahd", + ahd_pci_interfaces, + sizeof(struct ahd_softc) +}; + +#else /* FreeBSD 4.X */ + +static driver_t ahd_pci_driver = { + "ahd", + ahd_pci_device_methods, + sizeof(struct ahd_softc) +}; + +#endif /* __FreeBSD_version */ + +static devclass_t ahd_devclass; + +DRIVER_MODULE(ahd, pci, ahd_pci_driver, ahd_devclass, 0, 0); +DRIVER_MODULE(ahd, cardbus, ahd_pci_driver, ahd_devclass, 0, 0); +MODULE_DEPEND(ahd_pci, ahd, 1, 1, 1); +MODULE_VERSION(ahd_pci, 1); + +static int +ahd_pci_probe(device_t dev) +{ + struct ahd_pci_identity *entry; + + entry = ahd_find_pci_device(dev); + if (entry != NULL) { + device_set_desc(dev, entry->name); + return (0); + } + return (ENXIO); +} + +static int +ahd_pci_attach(device_t dev) +{ + struct ahd_pci_identity *entry; + struct ahd_softc *ahd; + char *name; + int error; + + entry = ahd_find_pci_device(dev); + if (entry == NULL) + return (ENXIO); + + /* + * Allocate a softc for this card and + * set it up for attachment by our + * common detect routine. + */ + name = malloc(strlen(device_get_nameunit(dev)) + 1, M_DEVBUF, M_NOWAIT); + if (name == NULL) + return (ENOMEM); + strcpy(name, device_get_nameunit(dev)); + ahd = ahd_alloc(dev, name); + if (ahd == NULL) + return (ENOMEM); + + ahd_set_unit(ahd, device_get_unit(dev)); + + /* + * Should we bother disabling 39Bit addressing + * based on installed memory? + */ + if (sizeof(bus_addr_t) > 4) + ahd->flags |= AHD_39BIT_ADDRESSING; + + /* Allocate a dmatag for our SCB DMA maps */ + /* XXX Should be a child of the PCI bus dma tag */ + error = bus_dma_tag_create(/*parent*/NULL, /*alignment*/1, + /*boundary*/0, + (ahd->flags & AHD_39BIT_ADDRESSING) + ? 0x7FFFFFFFFF + : BUS_SPACE_MAXADDR_32BIT, + /*highaddr*/BUS_SPACE_MAXADDR, + /*filter*/NULL, /*filterarg*/NULL, + /*maxsize*/MAXBSIZE, /*nsegments*/AHD_NSEG, + /*maxsegsz*/AHD_MAXTRANSFER_SIZE, + /*flags*/BUS_DMA_ALLOCNOW, + &ahd->parent_dmat); + + if (error != 0) { + printf("ahd_pci_attach: Could not allocate DMA tag " + "- error %d\n", error); + ahd_free(ahd); + return (ENOMEM); + } + ahd->dev_softc = dev; + error = ahd_pci_config(ahd, entry); + if (error != 0) { + ahd_free(ahd); + return (error); + } + + ahd_attach(ahd); + return (0); +} + +int +ahd_pci_map_registers(struct ahd_softc *ahd) +{ + struct resource *regs; + struct resource *regs2; + u_int command; + int regs_type; + int regs_id; + int regs_id2; + + command = ahd_pci_read_config(ahd->dev_softc, PCIR_COMMAND, /*bytes*/1); + regs = NULL; + regs2 = NULL; + regs_type = 0; + regs_id = 0; + if ((command & PCIM_CMD_MEMEN) != 0) { + + regs_type = SYS_RES_MEMORY; + regs_id = AHD_PCI_MEMADDR; + regs = bus_alloc_resource(ahd->dev_softc, regs_type, + ®s_id, 0, ~0, 1, RF_ACTIVE); + if (regs != NULL) { + int error; + + ahd->tags[0] = rman_get_bustag(regs); + ahd->bshs[0] = rman_get_bushandle(regs); + ahd->tags[1] = ahd->tags[0]; + error = bus_space_subregion(ahd->tags[0], ahd->bshs[0], + /*offset*/0x100, + /*size*/0x100, + &ahd->bshs[1]); + /* + * Do a quick test to see if memory mapped + * I/O is functioning correctly. + */ + if (error != 0 || ahd_inb(ahd, HCNTRL) == 0xFF) { + device_printf(ahd->dev_softc, + "PCI Device %d:%d:%d failed memory " + "mapped test. Using PIO.\n", + ahd_get_pci_bus(ahd->dev_softc), + ahd_get_pci_slot(ahd->dev_softc), + ahd_get_pci_function(ahd->dev_softc)); + bus_release_resource(ahd->dev_softc, regs_type, + regs_id, regs); + regs = NULL; + } else { + command &= ~PCIM_CMD_PORTEN; + ahd_pci_write_config(ahd->dev_softc, + PCIR_COMMAND, + command, /*bytes*/1); + } + } + } + if (regs == NULL && (command & PCIM_CMD_PORTEN) != 0) { + regs_type = SYS_RES_IOPORT; + regs_id = AHD_PCI_IOADDR0; + regs = bus_alloc_resource(ahd->dev_softc, regs_type, + ®s_id, 0, ~0, 1, RF_ACTIVE); + if (regs == NULL) { + device_printf(ahd->dev_softc, + "can't allocate register resources\n"); + return (ENOMEM); + } + ahd->tags[0] = rman_get_bustag(regs); + ahd->bshs[0] = rman_get_bushandle(regs); + + /* And now the second BAR */ + regs_id2 = AHD_PCI_IOADDR1; + regs2 = bus_alloc_resource(ahd->dev_softc, regs_type, + ®s_id2, 0, ~0, 1, RF_ACTIVE); + if (regs2 == NULL) { + device_printf(ahd->dev_softc, + "can't allocate register resources\n"); + return (ENOMEM); + } + ahd->tags[1] = rman_get_bustag(regs2); + ahd->bshs[1] = rman_get_bushandle(regs2); + command &= ~PCIM_CMD_MEMEN; + ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND, + command, /*bytes*/1); + ahd->platform_data->regs_res_type[1] = regs_type; + ahd->platform_data->regs_res_id[1] = regs_id2; + ahd->platform_data->regs[1] = regs2; + } + ahd->platform_data->regs_res_type[0] = regs_type; + ahd->platform_data->regs_res_id[0] = regs_id; + ahd->platform_data->regs[0] = regs; + return (0); +} + +int +ahd_pci_map_int(struct ahd_softc *ahd) +{ + int zero; + + zero = 0; + ahd->platform_data->irq = + bus_alloc_resource(ahd->dev_softc, SYS_RES_IRQ, &zero, + 0, ~0, 1, RF_ACTIVE | RF_SHAREABLE); + if (ahd->platform_data->irq == NULL) + return (ENOMEM); + ahd->platform_data->irq_res_type = SYS_RES_IRQ; + return (ahd_map_int(ahd)); +} + +void +ahd_power_state_change(struct ahd_softc *ahd, ahd_power_state new_state) +{ + uint32_t cap; + u_int cap_offset; + + /* + * Traverse the capability list looking for + * the power management capability. + */ + cap = 0; + cap_offset = ahd_pci_read_config(ahd->dev_softc, + PCIR_CAP_PTR, /*bytes*/1); + while (cap_offset != 0) { + + cap = ahd_pci_read_config(ahd->dev_softc, + cap_offset, /*bytes*/4); + if ((cap & 0xFF) == 1 + && ((cap >> 16) & 0x3) > 0) { + uint32_t pm_control; + + pm_control = ahd_pci_read_config(ahd->dev_softc, + cap_offset + 4, + /*bytes*/2); + pm_control &= ~0x3; + pm_control |= new_state; + ahd_pci_write_config(ahd->dev_softc, + cap_offset + 4, + pm_control, /*bytes*/2); + break; + } + cap_offset = (cap >> 8) & 0xFF; + } +} diff --git a/sys/dev/aic7xxx/aic79xx.c b/sys/dev/aic7xxx/aic79xx.c new file mode 100644 index 0000000..fe8881e --- /dev/null +++ b/sys/dev/aic7xxx/aic79xx.c @@ -0,0 +1,8345 @@ +/* + * Core routines and tables shareable across OS platforms. + * + * Copyright (c) 1994-2001 Justin T. Gibbs. + * Copyright (c) 2000-2002 Adaptec Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + * $Id: //depot/aic7xxx/aic7xxx/aic79xx.c#79 $ + * + * $FreeBSD$ + */ + +#ifdef __linux__ +#include "aic79xx_osm.h" +#include "aic79xx_inline.h" +#include "aicasm/aicasm_insformat.h" +#else +#include <dev/aic7xxx/aic79xx_osm.h> +#include <dev/aic7xxx/aic79xx_inline.h> +#include <dev/aic7xxx/aicasm/aicasm_insformat.h> +#endif + +/****************************** Softc Data ************************************/ +struct ahd_softc_tailq ahd_tailq = TAILQ_HEAD_INITIALIZER(ahd_tailq); + +/***************************** Lookup Tables **********************************/ +char *ahd_chip_names[] = +{ + "NONE", + "aic7901", + "aic7902" +}; +static const u_int num_chip_names = NUM_ELEMENTS(ahd_chip_names); + +/* + * Hardware error codes. + */ +struct ahd_hard_error_entry { + uint8_t errno; + char *errmesg; +}; + +static struct ahd_hard_error_entry ahd_hard_errors[] = { + { DSCTMOUT, "Discard Timer has timed out" }, + { ILLOPCODE, "Illegal Opcode in sequencer program" }, + { SQPARERR, "Sequencer Parity Error" }, + { DPARERR, "Data-path Parity Error" }, + { MPARERR, "Scratch or SCB Memory Parity Error" }, + { CIOPARERR, "CIOBUS Parity Error" }, +}; +static const u_int num_errors = NUM_ELEMENTS(ahd_hard_errors); + +static struct ahd_phase_table_entry ahd_phase_table[] = +{ + { P_DATAOUT, MSG_NOOP, "in Data-out phase" }, + { P_DATAIN, MSG_INITIATOR_DET_ERR, "in Data-in phase" }, + { P_DATAOUT_DT, MSG_NOOP, "in DT Data-out phase" }, + { P_DATAIN_DT, MSG_INITIATOR_DET_ERR, "in DT Data-in phase" }, + { P_COMMAND, MSG_NOOP, "in Command phase" }, + { P_MESGOUT, MSG_NOOP, "in Message-out phase" }, + { P_STATUS, MSG_INITIATOR_DET_ERR, "in Status phase" }, + { P_MESGIN, MSG_PARITY_ERROR, "in Message-in phase" }, + { P_BUSFREE, MSG_NOOP, "while idle" }, + { 0, MSG_NOOP, "in unknown phase" } +}; + +/* + * In most cases we only wish to itterate over real phases, so + * exclude the last element from the count. + */ +static const u_int num_phases = NUM_ELEMENTS(ahd_phase_table) - 1; + +/* Our Sequencer Program */ +#include "aic79xx_seq.h" + +/**************************** Function Declarations ***************************/ +static void ahd_handle_transmission_error(struct ahd_softc *ahd); +static void ahd_handle_lqiphase_error(struct ahd_softc *ahd, + u_int lqistat1); +static int ahd_handle_pkt_busfree(struct ahd_softc *ahd, + u_int busfreetime); +static int ahd_handle_nonpkt_busfree(struct ahd_softc *ahd); +static void ahd_force_renegotiation(struct ahd_softc *ahd, + struct ahd_devinfo *devinfo); + +static struct ahd_tmode_tstate* + ahd_alloc_tstate(struct ahd_softc *ahd, + u_int scsi_id, char channel); +#ifdef AHD_TARGET_MODE +static void ahd_free_tstate(struct ahd_softc *ahd, + u_int scsi_id, char channel, int force); +#endif +static void ahd_devlimited_syncrate(struct ahd_softc *ahd, + struct ahd_initiator_tinfo *, + u_int *period, + u_int *ppr_options, + role_t role); +static void ahd_update_neg_table(struct ahd_softc *ahd, + struct ahd_devinfo *devinfo, + struct ahd_transinfo *tinfo); +static void ahd_update_pending_scbs(struct ahd_softc *ahd); +static void ahd_fetch_devinfo(struct ahd_softc *ahd, + struct ahd_devinfo *devinfo); +static void ahd_scb_devinfo(struct ahd_softc *ahd, + struct ahd_devinfo *devinfo, + struct scb *scb); +static void ahd_setup_initiator_msgout(struct ahd_softc *ahd, + struct ahd_devinfo *devinfo, + struct scb *scb); +static void ahd_build_transfer_msg(struct ahd_softc *ahd, + struct ahd_devinfo *devinfo); +static void ahd_construct_sdtr(struct ahd_softc *ahd, + struct ahd_devinfo *devinfo, + u_int period, u_int offset); +static void ahd_construct_wdtr(struct ahd_softc *ahd, + struct ahd_devinfo *devinfo, + u_int bus_width); +static void ahd_construct_ppr(struct ahd_softc *ahd, + struct ahd_devinfo *devinfo, + u_int period, u_int offset, + u_int bus_width, u_int ppr_options); +static void ahd_clear_msg_state(struct ahd_softc *ahd); +static void ahd_handle_message_phase(struct ahd_softc *ahd); +typedef enum { + AHDMSG_1B, + AHDMSG_2B, + AHDMSG_EXT +} ahd_msgtype; +static int ahd_sent_msg(struct ahd_softc *ahd, ahd_msgtype type, + u_int msgval, int full); +static int ahd_parse_msg(struct ahd_softc *ahd, + struct ahd_devinfo *devinfo); +static int ahd_handle_msg_reject(struct ahd_softc *ahd, + struct ahd_devinfo *devinfo); +static void ahd_handle_ign_wide_residue(struct ahd_softc *ahd, + struct ahd_devinfo *devinfo); +static void ahd_reinitialize_dataptrs(struct ahd_softc *ahd); +static void ahd_handle_devreset(struct ahd_softc *ahd, + struct ahd_devinfo *devinfo, + cam_status status, char *message, + int verbose_level); +#if AHD_TARGET_MODE +static void ahd_setup_target_msgin(struct ahd_softc *ahd, + struct ahd_devinfo *devinfo, + struct scb *scb); +#endif + +static bus_size_t ahd_sglist_size(struct ahd_softc *ahd); +static bus_size_t ahd_sglist_allocsize(struct ahd_softc *ahd); +static bus_dmamap_callback_t + ahd_dmamap_cb; +static void ahd_initialize_hscbs(struct ahd_softc *ahd); +static int ahd_init_scbdata(struct ahd_softc *ahd); +static void ahd_fini_scbdata(struct ahd_softc *ahd); +static void ahd_setup_iocell_workaround(struct ahd_softc *ahd); +static void ahd_iocell_first_selection(struct ahd_softc *ahd); +static void ahd_chip_init(struct ahd_softc *ahd); +static void ahd_qinfifo_requeue(struct ahd_softc *ahd, + struct scb *prev_scb, + struct scb *scb); +static int ahd_qinfifo_count(struct ahd_softc *ahd); +static int ahd_search_scb_list(struct ahd_softc *ahd, int target, + char channel, int lun, u_int tag, + role_t role, uint32_t status, + ahd_search_action action, + u_int *list_head, u_int tid); +static void ahd_stitch_tid_list(struct ahd_softc *ahd, + u_int tid_prev, u_int tid_cur, + u_int tid_next); +static void ahd_add_scb_to_free_list(struct ahd_softc *ahd, + u_int scbid); +static u_int ahd_rem_wscb(struct ahd_softc *ahd, u_int scbid, + u_int prev, u_int next, u_int tid); +static void ahd_reset_current_bus(struct ahd_softc *ahd); +static ahd_callback_t ahd_reset_poll; +#ifdef AHD_DUMP_SEQ +static void ahd_dumpseq(struct ahd_softc *ahd); +#endif +static void ahd_loadseq(struct ahd_softc *ahd); +static int ahd_check_patch(struct ahd_softc *ahd, + struct patch **start_patch, + u_int start_instr, u_int *skip_addr); +static u_int ahd_resolve_seqaddr(struct ahd_softc *ahd, + u_int address); +static void ahd_download_instr(struct ahd_softc *ahd, + u_int instrptr, uint8_t *dconsts); +#ifdef AHD_TARGET_MODE +static void ahd_queue_lstate_event(struct ahd_softc *ahd, + struct ahd_tmode_lstate *lstate, + u_int initiator_id, + u_int event_type, + u_int event_arg); +static void ahd_update_scsiid(struct ahd_softc *ahd, + u_int targid_mask); +static int ahd_handle_target_cmd(struct ahd_softc *ahd, + struct target_cmd *cmd); +#endif + +/******************************** Private Inlines *****************************/ +static __inline void ahd_assert_atn(struct ahd_softc *ahd); +static __inline int ahd_currently_packetized(struct ahd_softc *ahd); +static __inline int ahd_set_active_fifo(struct ahd_softc *ahd); + +static __inline void +ahd_assert_atn(struct ahd_softc *ahd) +{ + ahd_outb(ahd, SCSISIGO, ATNO); +} + +/* + * Determine if the current connection has a packetized + * agreement. This does not necessarily mean that we + * are currently in a packetized transfer. We could + * just as easily be sending or receiving a message. + */ +static __inline int +ahd_currently_packetized(struct ahd_softc *ahd) +{ + ahd_mode_state saved_modes; + int packetized; + + saved_modes = ahd_save_modes(ahd); + if ((ahd->bugs & AHD_PKTIZED_STATUS_BUG) != 0) { + /* + * The packetized bit refers to the last + * connection, not the current one. Check + * for non-zero LQISTATE instead. + */ + ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); + packetized = ahd_inb(ahd, LQISTATE) != 0; + } else { + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + packetized = ahd_inb(ahd, LQISTAT2) & PACKETIZED; + } + ahd_restore_modes(ahd, saved_modes); + return (packetized); +} + +static __inline int +ahd_set_active_fifo(struct ahd_softc *ahd) +{ + u_int active_fifo; + + AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); + active_fifo = ahd_inb(ahd, DFFSTAT) & CURRFIFO; +/* XXX This is a three possition switch in the B. */ + switch (active_fifo) { + case 0: + case 1: + ahd_set_modes(ahd, active_fifo, active_fifo); + return (1); + default: + return (0); + } +} + +/************************* Sequencer Execution Control ************************/ +/* + * Restart the sequencer program from address zero + */ +void +ahd_restart(struct ahd_softc *ahd) +{ + + ahd_pause(ahd); + + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + + /* No more pending messages */ + ahd_clear_msg_state(ahd); + ahd_outb(ahd, SCSISIGO, 0); /* De-assert BSY */ + ahd_outb(ahd, MSG_OUT, MSG_NOOP); /* No message to send */ + ahd_outb(ahd, SXFRCTL1, ahd_inb(ahd, SXFRCTL1) & ~BITBUCKET); + ahd_outb(ahd, SEQINTCTL, 0); + ahd_outb(ahd, LASTPHASE, P_BUSFREE); + ahd_outb(ahd, SEQ_FLAGS, 0); + ahd_outb(ahd, SAVED_SCSIID, 0xFF); + ahd_outb(ahd, SAVED_LUN, 0xFF); + + /* + * Ensure that the sequencer's idea of TQINPOS + * matches our own. The sequencer increments TQINPOS + * only after it sees a DMA complete and a reset could + * occur before the increment leaving the kernel to believe + * the command arrived but the sequencer to not. + */ + ahd_outb(ahd, TQINPOS, ahd->tqinfifonext); + + /* Always allow reselection */ + ahd_outb(ahd, SCSISEQ1, + ahd_inb(ahd, SCSISEQ_TEMPLATE) & (ENSELI|ENRSELI|ENAUTOATNP)); + /* Ensure that no DMA operations are in progress */ + ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); + ahd_outb(ahd, SCBHCNT, 0); + ahd_outb(ahd, CCSCBCTL, CCSCBRESET); + + ahd_outb(ahd, SEQCTL0, FASTMODE|SEQRESET); + ahd_unpause(ahd); +} + +void +ahd_clear_fifo(struct ahd_softc *ahd, u_int fifo) +{ + ahd_mode_state saved_modes; + +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_FIFOS) != 0) + printf("%s: Clearing FIFO %d\n", ahd_name(ahd), fifo); +#endif + saved_modes = ahd_save_modes(ahd); + ahd_set_modes(ahd, fifo, fifo); + ahd_outb(ahd, DFFSXFRCTL, RSTCHN|CLRSHCNT); + if ((ahd_inb(ahd, SG_STATE) & FETCH_INPROG) != 0) + ahd_outb(ahd, CCSGCTL, CCSGRESET); + ahd_outb(ahd, LONGJMP_ADDR + 1, INVALID_ADDR); + ahd_outb(ahd, SG_STATE, 0); + ahd_restore_modes(ahd, saved_modes); +} + +/************************* Input/Output Queues ********************************/ +void +ahd_run_qoutfifo(struct ahd_softc *ahd) +{ + struct scb *scb; + u_int scb_index; + + ahd_sync_qoutfifo(ahd, BUS_DMASYNC_POSTREAD); + while (ahd->qoutfifo[ahd->qoutfifonext] != SCB_LIST_NULL_LE) { + + scb_index = ahd_le16toh(ahd->qoutfifo[ahd->qoutfifonext]); + scb = ahd_lookup_scb(ahd, scb_index); + if (scb == NULL) { + printf("%s: WARNING no command for scb %d " + "(cmdcmplt)\nQOUTPOS = %d\n", + ahd_name(ahd), scb_index, + ahd->qoutfifonext); + ahd_dump_card_state(ahd); + ahd->qoutfifonext = AHD_QOUT_WRAP(ahd->qoutfifonext+1); + continue; + } + + if ((ahd->qoutfifonext & 0x01) == 0x01) { + + /* + * Clear 32bits of QOUTFIFO at a time + * so that we don't clobber an incoming + * 16bit DMA to the array on architectures + * that only support 32bit load and store + * operations. + */ + ahd->qoutfifo[ahd->qoutfifonext - 1] = SCB_LIST_NULL_LE; + ahd->qoutfifo[ahd->qoutfifonext] = SCB_LIST_NULL_LE; + ahd_dmamap_sync(ahd, ahd->shared_data_dmat, + ahd->shared_data_dmamap, + /*offset*/(ahd->qoutfifonext - 1)*2, + /*len*/4, BUS_DMASYNC_PREREAD); + } + ahd->qoutfifonext = AHD_QOUT_WRAP(ahd->qoutfifonext+1); + + ahd_complete_scb(ahd, scb); + } +} + +void +ahd_run_untagged_queues(struct ahd_softc *ahd) +{ + int i; + + for (i = 0; i < 16; i++) + ahd_run_untagged_queue(ahd, &ahd->untagged_queues[i]); +} + +void +ahd_run_untagged_queue(struct ahd_softc *ahd, struct scb_tailq *queue) +{ + struct scb *scb; + + if (ahd->untagged_queue_lock != 0) + return; + + if ((scb = TAILQ_FIRST(queue)) != NULL + && (scb->flags & SCB_ACTIVE) == 0) { + scb->flags |= SCB_ACTIVE; + ahd_queue_scb(ahd, scb); + } +} + +/************************* Interrupt Handling *********************************/ +void +ahd_handle_hwerrint(struct ahd_softc *ahd) +{ + /* + * Some catastrophic hardware error has occurred. + * Print it for the user and disable the controller. + */ + int i; + int error; + + error = ahd_inb(ahd, ERROR); + for (i = 0; i < num_errors; i++) { + if ((error & ahd_hard_errors[i].errno) != 0) + printf("%s: hwerrint, %s\n", + ahd_name(ahd), ahd_hard_errors[i].errmesg); + } + + ahd_dump_card_state(ahd); + panic("BRKADRINT"); + + /* Tell everyone that this HBA is no longer availible */ + ahd_abort_scbs(ahd, CAM_TARGET_WILDCARD, ALL_CHANNELS, + CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN, + CAM_NO_HBA); + + /* Tell the system that this controller has gone away. */ + ahd_free(ahd); +} + +void +ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat) +{ + u_int seqintcode; + + /* + * Save the sequencer interrupt code and clear the SEQINT + * bit. We will unpause the sequencer, if appropriate, + * after servicing the request. + */ + seqintcode = ahd_inb(ahd, SEQINTCODE); + ahd_outb(ahd, CLRINT, CLRSEQINT); + ahd_update_modes(ahd); +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_MISC) != 0) + printf("%s: Handle Seqint Called for code %d\n", + ahd_name(ahd), seqintcode); +#endif + switch (seqintcode) { + case ENTERING_NONPACK: + { + struct scb *scb; + u_int scbid; + + AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), + ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); + scbid = ahd_get_scbptr(ahd); + scb = ahd_lookup_scb(ahd, scbid); + if (scb == NULL) { + /* + * Somehow need to know if this + * is from a selection or reselection. + * From that, we can termine target + * ID so we at least have an I_T nexus. + */ + } else { + ahd_outb(ahd, SAVED_SCSIID, scb->hscb->scsiid); + ahd_outb(ahd, SAVED_LUN, scb->hscb->lun); + ahd_outb(ahd, SEQ_FLAGS, 0x0); + } + if ((ahd_inb(ahd, LQISTAT2) & LQIPHASE_OUTPKT) != 0 + && (ahd_inb(ahd, SCSISIGO) & ATNO) != 0) { + /* + * Phase change after read stream with + * CRC error with P0 asserted on last + * packet. + */ + printf("Assuming LQIPHASE_NLQ with P0 assertion\n"); + } + printf("Entering NONPACK\n"); + break; + } + case INVALID_SEQINT: + printf("%s: Invalid Sequencer interrupt occurred.\n", + ahd_name(ahd)); + ahd_dump_card_state(ahd); + printf("invalid seqint"); + ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); + break; + case STATUS_OVERRUN: + { + printf("%s: Status Overrun", ahd_name(ahd)); + ahd_dump_card_state(ahd); + ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); + break; + } + case CFG4ISTAT_INTR: + { + struct scb *scb; + u_int scbid; + + ahd_update_modes(ahd); + scbid = ahd_get_scbptr(ahd); + scb = ahd_lookup_scb(ahd, scbid); + if (scb == NULL) { + ahd_dump_card_state(ahd); + printf("CFG4ISTAT: Free SCB %d referenced", scbid); + panic("For safety"); + } + ahd_outq(ahd, HADDR, scb->sense_busaddr); + ahd_outw(ahd, HCNT, AHD_SENSE_BUFSIZE); + ahd_outb(ahd, HCNT + 2, 0); + ahd_outb(ahd, SG_CACHE_PRE, SG_LAST_SEG); + ahd_outb(ahd, DFCNTRL, PRELOADEN|SCSIEN|HDMAEN); + break; + } + case ILLEGAL_PHASE: + { + u_int bus_phase; + + bus_phase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK; + printf("%s: ILLEGAL_PHASE 0x%x\n", + ahd_name(ahd), bus_phase); + + switch (bus_phase) { + case P_DATAOUT: + case P_DATAIN: + case P_DATAOUT_DT: + case P_DATAIN_DT: + case P_MESGOUT: + case P_STATUS: + case P_MESGIN: + ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); + printf("%s: Issued Bus Reset.\n", ahd_name(ahd)); + break; + case P_COMMAND: + { + struct ahd_devinfo devinfo; + struct scb *scb; + struct ahd_initiator_tinfo *targ_info; + struct ahd_tmode_tstate *tstate; + struct ahd_transinfo *tinfo; + u_int scbid; + + /* + * If a target takes us into the command phase + * assume that it has been externally reset and + * has thus lost our previous packetized negotiation + * agreement. Since we have not sent an identify + * message and may not have fully qualified the + * connection, we change our command to TUR, assert + * ATN and ABORT the task when we go to message in + * phase. The OSM will see the REQUEUE_REQUEST + * status and retry the command. + */ + scbid = ahd_get_scbptr(ahd); + scb = ahd_lookup_scb(ahd, scbid); + if (scb == NULL) { + printf("Invalid phase with no valid SCB. " + "Resetting bus.\n"); + ahd_reset_channel(ahd, 'A', + /*Initiate Reset*/TRUE); + break; + } + ahd_compile_devinfo(&devinfo, SCB_GET_OUR_ID(scb), + SCB_GET_TARGET(ahd, scb), + SCB_GET_LUN(scb), + SCB_GET_CHANNEL(ahd, scb), + ROLE_INITIATOR); + targ_info = ahd_fetch_transinfo(ahd, + devinfo.channel, + devinfo.our_scsiid, + devinfo.target, + &tstate); + tinfo = &targ_info->curr; + ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, + AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, + /*paused*/TRUE); + ahd_set_syncrate(ahd, &devinfo, /*period*/0, + /*offset*/0, /*ppr_options*/0, + AHD_TRANS_ACTIVE, /*paused*/TRUE); + ahd_outb(ahd, SCB_CDB_STORE, 0); + ahd_outb(ahd, SCB_CDB_STORE+1, 0); + ahd_outb(ahd, SCB_CDB_STORE+2, 0); + ahd_outb(ahd, SCB_CDB_STORE+3, 0); + ahd_outb(ahd, SCB_CDB_STORE+4, 0); + ahd_outb(ahd, SCB_CDB_STORE+5, 0); + ahd_outb(ahd, SCB_CDB_LEN, 6); + scb->hscb->control &= ~(TAG_ENB|SCB_TAG_TYPE); + scb->hscb->control |= MK_MESSAGE; + ahd_outb(ahd, SCB_CONTROL, scb->hscb->control); + ahd_outb(ahd, MSG_OUT, HOST_MSG); + ahd_outb(ahd, SAVED_SCSIID, scb->hscb->scsiid); + /* + * The lun is 0, regardless of the SCB's lun + * as we have not sent an identify message. + */ + ahd_outb(ahd, SAVED_LUN, 0); + ahd_outb(ahd, SEQ_FLAGS, 0); + ahd_assert_atn(ahd); + scb->flags &= ~(SCB_PACKETIZED); + scb->flags |= SCB_ABORT|SCB_CMDPHASE_ABORT; + ahd_freeze_devq(ahd, scb); + ahd_set_transaction_status(scb, CAM_REQUEUE_REQ); + ahd_freeze_scb(scb); + + /* + * Allow the sequencer to continue with + * non-pack processing. + */ + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + ahd_outb(ahd, CLRLQOINT1, CLRLQOPHACHGINPKT); + if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) { + ahd_outb(ahd, CLRLQOINT1, 0); + } + printf("Continuing non-pack processing...\n"); + break; + } + } + break; + } + case CFG4OVERRUN: + printf("%s: CFG4OVERRUN mode = %x\n", ahd_name(ahd), + ahd_inb(ahd, MODE_PTR)); + break; + case DUMP_CARD_STATE: + { + ahd_dump_card_state(ahd); + break; + } + case PDATA_REINIT: + { + printf("%s: PDATA_REINIT - DFCNTRL = 0x%x " + "SG_CACHE_SHADOW = 0x%x\n", + ahd_name(ahd), ahd_inb(ahd, DFCNTRL), + ahd_inb(ahd, SG_CACHE_SHADOW)); + ahd_reinitialize_dataptrs(ahd); + break; + } + case HOST_MSG_LOOP: + { + struct ahd_devinfo devinfo; + + /* + * The sequencer has encountered a message phase + * that requires host assistance for completion. + * While handling the message phase(s), we will be + * notified by the sequencer after each byte is + * transfered so we can track bus phase changes. + * + * If this is the first time we've seen a HOST_MSG_LOOP + * interrupt, initialize the state of the host message + * loop. + */ + ahd_fetch_devinfo(ahd, &devinfo); + if (ahd->msg_type == MSG_TYPE_NONE) { + struct scb *scb; + u_int scb_index; + u_int bus_phase; + + bus_phase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK; + if (bus_phase != P_MESGIN + && bus_phase != P_MESGOUT) { + printf("ahd_intr: HOST_MSG_LOOP bad " + "phase 0x%x\n", + bus_phase); + /* + * Probably transitioned to bus free before + * we got here. Just punt the message. + */ + ahd_dump_card_state(ahd); + ahd_clear_intstat(ahd); + ahd_restart(ahd); + return; + } + + scb_index = ahd_get_scbptr(ahd); + scb = ahd_lookup_scb(ahd, scb_index); + if (devinfo.role == ROLE_INITIATOR) { + if (bus_phase == P_MESGOUT) + ahd_setup_initiator_msgout(ahd, + &devinfo, + scb); + else { + ahd->msg_type = + MSG_TYPE_INITIATOR_MSGIN; + ahd->msgin_index = 0; + } + } +#if AHD_TARGET_MODE + else { + if (bus_phase == P_MESGOUT) { + ahd->msg_type = + MSG_TYPE_TARGET_MSGOUT; + ahd->msgin_index = 0; + } + else + ahd_setup_target_msgin(ahd, + &devinfo, + scb); + } +#endif + } + + ahd_handle_message_phase(ahd); + break; + } + case NO_MATCH: + { + /* Ensure we don't leave the selection hardware on */ + AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); + ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO); + + printf("%s:%c:%d: no active SCB for reconnecting " + "target - issuing BUS DEVICE RESET\n", + ahd_name(ahd), 'A', ahd_inb(ahd, SELID)); + printf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, " + "ARG_1 == 0x%x ACCUM = 0x%x\n", + ahd_inb(ahd, SAVED_SCSIID), ahd_inb(ahd, SAVED_LUN), + ahd_inb(ahd, ARG_1), ahd_inb(ahd, ACCUM)); + printf("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, " + "SINDEX == 0x%x\n", + ahd_inb(ahd, SEQ_FLAGS), ahd_get_scbptr(ahd), + ahd_find_busy_tcl(ahd, + BUILD_TCL(ahd_inb(ahd, SAVED_SCSIID), + ahd_inb(ahd, SAVED_LUN))), + ahd_inb(ahd, SINDEX)); + printf("SELID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, " + "SCB_CONTROL == 0x%x\n", + ahd_inb(ahd, SELID), ahd_inb_scbram(ahd, SCB_SCSIID), + ahd_inb_scbram(ahd, SCB_LUN), + ahd_inb_scbram(ahd, SCB_CONTROL)); + printf("SCSIBUS[0] == 0x%x, SCSISIGI == 0x%x\n", + ahd_inb(ahd, SCSIBUS), ahd_inb(ahd, SCSISIGI)); + printf("SXFRCTL0 == 0x%x\n", ahd_inb(ahd, SXFRCTL0)); + printf("SEQCTL0 == 0x%x\n", ahd_inb(ahd, SEQCTL0)); + ahd_dump_card_state(ahd); + ahd->msgout_buf[0] = MSG_BUS_DEV_RESET; + ahd->msgout_len = 1; + ahd->msgout_index = 0; + ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; + ahd_outb(ahd, MSG_OUT, HOST_MSG); + ahd_assert_atn(ahd); + break; + } + case PROTO_VIOLATION: + { + struct scb *scb; + u_int scbid; + u_int scsiid; + u_int target; + u_int seq_flags; + u_int curphase; + int found; + + scbid = ahd_get_scbptr(ahd); + scb = ahd_lookup_scb(ahd, scbid); + scsiid = ahd_inb(ahd, SAVED_SCSIID); + target = SCSIID_TARGET(ahd, scsiid); + seq_flags = ahd_inb(ahd, SEQ_FLAGS); + curphase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK; + if ((seq_flags & NOT_IDENTIFIED) != 0) { + + /* + * The reconnecting target either did not send an + * identify message, or did, but we didn't find an SCB + * to match. + */ + printf("%s:%c:%d: Target did not send an " + "IDENTIFY message. LASTPHASE = 0x%x, " + "SAVED_SCSIID == 0x%x\n", ahd_name(ahd), + 'A', target, ahd_inb(ahd, LASTPHASE), + scsiid); + } else if (scb == NULL) { + /* + * We don't seem to have an SCB active for this + * transaction. Print an error and reset the bus. + */ + printf("%s:%c:%d: No SCB found during protocol " + "violation\n", ahd_name(ahd), 'A', target); + goto proto_violation_reset; + } else if ((seq_flags & NO_CDB_SENT) != 0) { + ahd_print_path(ahd, scb); + printf("No or incomplete CDB sent to device.\n"); + } else if ((ahd_inb(ahd, SCB_CONTROL) & STATUS_RCVD) == 0) { + /* + * The target never bothered to provide status to + * us prior to completing the command. Since we don't + * know the disposition of this command, we must attempt + * to abort it. Assert ATN and prepare to send an abort + * message. + */ + ahd_print_path(ahd, scb); + printf("Completed command without status.\n"); + } else { + ahd_print_path(ahd, scb); + printf("Unknown protocol violation.\n"); + ahd_dump_card_state(ahd); + } + if ((curphase & ~P_DATAIN_DT) == 0) { +proto_violation_reset: + /* + * Target either went directly to data + * phase or didn't respond to our ATN. + * The only safe thing to do is to blow + * it away with a bus reset. + */ + found = ahd_reset_channel(ahd, 'A', TRUE); + printf("%s: Issued Channel %c Bus Reset. " + "%d SCBs aborted\n", ahd_name(ahd), 'A', found); + } else { + /* + * Leave the selection hardware off in case + * this abort attempt will affect yet to + * be sent commands. + */ + ahd_outb(ahd, SCSISEQ0, + ahd_inb(ahd, SCSISEQ0) & ~ENSELO); + ahd_print_path(ahd, scb); + printf("Protocol violation %s. Attempting to abort.\n", + ahd_lookup_phase_entry(curphase)->phasemsg); + scb->flags |= SCB_ABORT; + ahd_assert_atn(ahd); + ahd_outb(ahd, MSG_OUT, HOST_MSG); + } + return; + } + case IGN_WIDE_RES: + { + struct ahd_devinfo devinfo; + + ahd_fetch_devinfo(ahd, &devinfo); + ahd_handle_ign_wide_residue(ahd, &devinfo); + break; + } + case BAD_PHASE: + { + u_int lastphase; + + lastphase = ahd_inb(ahd, LASTPHASE); + printf("%s:%c:%d: unknown scsi bus phase %x, " + "lastphase = 0x%x. Attempting to continue\n", + ahd_name(ahd), 'A', + SCSIID_TARGET(ahd, ahd_inb(ahd, SAVED_SCSIID)), + lastphase, ahd_inb(ahd, SCSISIGI)); + break; + } + case MISSED_BUSFREE: + { + u_int lastphase; + + lastphase = ahd_inb(ahd, LASTPHASE); + printf("%s:%c:%d: Missed busfree. " + "Lastphase = 0x%x, Curphase = 0x%x\n", + ahd_name(ahd), 'A', + SCSIID_TARGET(ahd, ahd_inb(ahd, SAVED_SCSIID)), + lastphase, ahd_inb(ahd, SCSISIGI)); + ahd_restart(ahd); + return; + } + case DATA_OVERRUN: + { + /* + * When the sequencer detects an overrun, it + * places the controller in "BITBUCKET" mode + * and allows the target to complete its transfer. + * Unfortunately, none of the counters get updated + * when the controller is in this mode, so we have + * no way of knowing how large the overrun was. + */ + struct scb *scb; + u_int scbindex = ahd_get_scbptr(ahd); + u_int lastphase = ahd_inb(ahd, LASTPHASE); + + scb = ahd_lookup_scb(ahd, scbindex); + ahd_print_path(ahd, scb); + printf("data overrun detected %s." + " Tag == 0x%x.\n", + ahd_lookup_phase_entry(lastphase)->phasemsg, + SCB_GET_TAG(scb)); + ahd_print_path(ahd, scb); + printf("%s seen Data Phase. Length = %ld. NumSGs = %d.\n", + ahd_inb(ahd, SEQ_FLAGS) & DPHASE ? "Have" : "Haven't", + ahd_get_transfer_length(scb), scb->sg_count); + ahd_dump_sglist(scb); + + /* + * Set this and it will take effect when the + * target does a command complete. + */ + ahd_freeze_devq(ahd, scb); + ahd_set_transaction_status(scb, CAM_DATA_RUN_ERR); + ahd_freeze_scb(scb); + break; + } + case MKMSG_FAILED: + { + struct ahd_devinfo devinfo; + struct scb *scb; + u_int scbid; + + ahd_fetch_devinfo(ahd, &devinfo); + printf("%s:%c:%d:%d: Attempt to issue message failed\n", + ahd_name(ahd), devinfo.channel, devinfo.target, + devinfo.lun); + scbid = ahd_get_scbptr(ahd); + scb = ahd_lookup_scb(ahd, scbid); + if (scb != NULL + && (scb->flags & SCB_RECOVERY_SCB) != 0) + /* + * Ensure that we didn't put a second instance of this + * SCB into the QINFIFO. + */ + ahd_search_qinfifo(ahd, SCB_GET_TARGET(ahd, scb), + SCB_GET_CHANNEL(ahd, scb), + SCB_GET_LUN(scb), SCB_GET_TAG(scb), + ROLE_INITIATOR, /*status*/0, + SEARCH_REMOVE); + ahd_outb(ahd, SCB_CONTROL, + ahd_inb(ahd, SCB_CONTROL) & ~MK_MESSAGE); + break; + } + } + /* + * The sequencer is paused immediately on + * a SEQINT, so we should restart it when + * we're done. + */ + ahd_unpause(ahd); +} + +void +ahd_handle_scsiint(struct ahd_softc *ahd, u_int intstat) +{ + struct scb *scb; + u_int status0; + u_int status3; + u_int status; + u_int lqistat1; + u_int lqostat0; + u_int scbid; + + ahd_update_modes(ahd); + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + + status3 = ahd_inb(ahd, SSTAT3) & (NTRAMPERR|OSRAMPERR); + status0 = ahd_inb(ahd, SSTAT0) & (IOERR|OVERRUN|SELDI|SELDO); + status = ahd_inb(ahd, SSTAT1) & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR); + lqistat1 = ahd_inb(ahd, LQISTAT1); + lqostat0 = ahd_inb(ahd, LQOSTAT0); + if ((status0 & (SELDI|SELDO)) != 0) { + u_int simode0; + + ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); + simode0 = ahd_inb(ahd, SIMODE0); + status0 &= simode0 & (ENSELDO|ENSELDI|IOERR); + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + } + scbid = ahd_get_scbptr(ahd); + scb = ahd_lookup_scb(ahd, scbid); + if (scb != NULL + && (ahd_inb(ahd, SEQ_FLAGS) & NOT_IDENTIFIED) != 0) + scb = NULL; + + /* Make sure the sequencer is in a safe location. */ + ahd_clear_critical_section(ahd); + + if ((status0 & IOERR) != 0) { + int now_lvd; + + now_lvd = ahd_inb(ahd, SBLKCTL) & ENAB40; + printf("%s: Transceiver State Has Changed to %s mode\n", + ahd_name(ahd), now_lvd ? "LVD" : "SE"); + ahd_outb(ahd, CLRSINT0, CLRIOERR); +/* XXX Still True?? */ + /* + * When transitioning to SE mode, the reset line + * glitches, triggering an arbitration bug in some + * Ultra2 controllers. This bug is cleared when we + * assert the reset line. Since a reset glitch has + * already occurred with this transition and a + * transceiver state change is handled just like + * a bus reset anyway, asserting the reset line + * ourselves is safe. + */ + ahd_reset_channel(ahd, 'A', /*Initiate Reset*/now_lvd == 0); + + ahd_pause(ahd); + ahd_setup_iocell_workaround(ahd); + ahd_unpause(ahd); + } else if ((status0 & OVERRUN) != 0) { + printf("%s: SCSI offset overrun detected. Resetting bus.\n", + ahd_name(ahd)); + ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); + } else if ((status & SCSIRSTI) != 0) { + printf("%s: Someone reset channel A\n", ahd_name(ahd)); + ahd_reset_channel(ahd, 'A', /*Initiate Reset*/FALSE); + } else if ((status & SCSIPERR) != 0) { + ahd_handle_transmission_error(ahd); + } else if (lqostat0 != 0) { + printf("%s: lqostat0 == 0x%x!\n", ahd_name(ahd), lqostat0); + ahd_outb(ahd, CLRLQOINT0, lqostat0); + if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) { + ahd_outb(ahd, CLRLQOINT1, 0); + } + } else if ((status & SELTO) != 0) { + u_int scbid; + + /* Stop the selection */ + ahd_outb(ahd, SCSISEQ0, 0); + + /* No more pending messages */ + ahd_clear_msg_state(ahd); + + /* Clear interrupt state */ + ahd_outb(ahd, SIMODE1, ahd_inb(ahd, SIMODE1) & ~ENBUSFREE); + ahd_outb(ahd, CLRSINT1, CLRSELTIMEO|CLRBUSFREE|CLRSCSIPERR); + + /* + * Although the driver does not care about the + * 'Selection in Progress' status bit, the busy + * LED does. SELINGO is only cleared by a sucessfull + * selection, so we must manually clear it to insure + * the LED turns off just incase no future successful + * selections occur (e.g. no devices on the bus). + */ + ahd_outb(ahd, CLRSINT0, CLRSELINGO); + + scbid = ahd_inw(ahd, WAITING_TID_HEAD); +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_SELTO) != 0) + printf("%s: Saw Selection Timeout for SCB 0x%x\n", + ahd_name(ahd), scbid); +#endif + scb = ahd_lookup_scb(ahd, scbid); + if (scb == NULL) { + printf("%s: ahd_intr - referenced scb not " + "valid during SELTO scb(0x%x)\n", + ahd_name(ahd), scbid); + ahd_dump_card_state(ahd); + panic("For diagnostics"); + } else { + ahd_set_transaction_status(scb, CAM_SEL_TIMEOUT); + ahd_freeze_devq(ahd, scb); + } + ahd_outb(ahd, CLRINT, CLRSCSIINT); + ahd_iocell_first_selection(ahd); + ahd_restart(ahd); + } else if ((status0 & (SELDI|SELDO)) != 0) { + ahd_iocell_first_selection(ahd); + ahd_unpause(ahd); + } else if (status3 != 0) { + printf("%s: SCSI Cell parity error SSTAT3 == 0x%x\n", + ahd_name(ahd), status3); + ahd_outb(ahd, CLRSINT3, status3); + } else if ((lqistat1 & (LQIPHASE_LQ|LQIPHASE_NLQ)) != 0) { + ahd_handle_lqiphase_error(ahd, lqistat1); + } else if ((status & BUSFREE) != 0) { + u_int busfreetime; + u_int lqostat1; + int restart; + int clear_fifo; + int packetized; + u_int mode; + + /* + * Clear our selection hardware as soon as possible. + * We may have an entry in the waiting Q for this target, + * that is affected by this busfree and we don't want to + * go about selecting the target while we handle the event. + */ + ahd_outb(ahd, SCSISEQ0, 0); + + /* + * Determine what we were up to at the time of + * the busfree. + */ + mode = AHD_MODE_SCSI; + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + busfreetime = ahd_inb(ahd, SSTAT2) & BUSFREETIME; + lqostat1 = ahd_inb(ahd, LQOSTAT1); + switch (busfreetime) { + case BUSFREE_DFF0: + case BUSFREE_DFF1: + { + u_int scbid; + struct scb *scb; + + mode = busfreetime == BUSFREE_DFF0 + ? AHD_MODE_DFF0 : AHD_MODE_DFF1; + ahd_set_modes(ahd, mode, mode); + scbid = ahd_get_scbptr(ahd); + scb = ahd_lookup_scb(ahd, scbid); + if (scb == NULL) { + printf("%s: Invalid SCB in DFF%d " + "during unexpected busfree\n", + ahd_name(ahd), mode); + packetized = 0; + } else + packetized = (scb->flags & SCB_PACKETIZED) != 0; + clear_fifo = 1; + break; + } + case BUSFREE_LQO: + clear_fifo = 0; + packetized = 1; + break; + default: + clear_fifo = 0; + packetized = (lqostat1 & LQOBUSFREE) != 0; + if (!packetized + && ahd_inb(ahd, LASTPHASE) == P_BUSFREE) + packetized = 1; + break; + } + +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_MISC) != 0) + printf("Saw Busfree. Busfreetime = 0x%x.\n", + busfreetime); +#endif + /* + * Busfrees that occur in non-packetized phases are + * handled by the nonpkt_busfree handler. + */ + if (packetized && ahd_inb(ahd, LASTPHASE) == P_BUSFREE) { + restart = ahd_handle_pkt_busfree(ahd, busfreetime); + } else { + restart = ahd_handle_nonpkt_busfree(ahd); + } + /* + * Clear the busfree interrupt status. The setting of + * the interrupt is a pulse, so we do not need to muck + * with the ENBUSFREE logic. This also ensures that if + * the bus has moved on to another connection, busfree + * protection is still in force. + */ + ahd_outb(ahd, CLRSINT1, CLRBUSFREE|CLRSCSIPERR); + + if (clear_fifo) + ahd_clear_fifo(ahd, mode); + + ahd_clear_msg_state(ahd); + ahd_outb(ahd, CLRINT, CLRSCSIINT); + if (restart) + ahd_restart(ahd); + else { + ahd_unpause(ahd); + } + } else { + printf("%s: Missing case in ahd_handle_scsiint. status = %x\n", + ahd_name(ahd), status); + printf("%s: lqostat1 == 0x%x, SIMODE1 == 0x%x\n", + ahd_name(ahd), ahd_inb(ahd, LQOSTAT1), + ahd_inb(ahd, SIMODE1)); + ahd_outb(ahd, CLRINT, CLRSCSIINT); + ahd_dump_card_state(ahd); + panic("Missing SCSIINT case"); + ahd_unpause(ahd); + } +} + +static void +ahd_handle_transmission_error(struct ahd_softc *ahd) +{ + u_int lqistat1; + u_int lqistat2; + u_int msg_out; + u_int curphase; + u_int lastphase; + u_int perrdiag; + + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + lqistat1 = ahd_inb(ahd, LQISTAT1) & ~(LQIPHASE_LQ|LQIPHASE_NLQ); + lqistat2 = ahd_inb(ahd, LQISTAT2); + if ((lqistat1 & (LQICRCI_NLQ|LQICRCI_LQ)) == 0 + && (ahd->bugs & AHD_NLQICRC_DELAYED_BUG) != 0) { + u_int lqistate; + + ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); + lqistate = ahd_inb(ahd, LQISTATE); + if ((lqistate >= 0x1E && lqistate <= 0x24) + || (lqistate == 0x29)) { + printf("%s: NLQCRC found via LQISTATE\n", + ahd_name(ahd)); + lqistat1 |= LQICRCI_NLQ; + } + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + } + + ahd_outb(ahd, CLRLQIINT1, lqistat1); + lastphase = ahd_inb(ahd, LASTPHASE); + curphase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK; + perrdiag = ahd_inb(ahd, PERRDIAG); + msg_out = MSG_INITIATOR_DET_ERR; + ahd_outb(ahd, CLRSINT1, CLRSCSIPERR); + printf("%s: Transmission error detected\n", ahd_name(ahd)); + printf("%s: lqistat1 == 0x%x, LASTPHASE == 0x0%x, " + "curphase = 0x%x, perrdiag == 0x%x\n", + ahd_name(ahd), lqistat1, lastphase, curphase, perrdiag); + ahd_dump_card_state(ahd); + if ((lqistat1 & (LQIOVERI_LQ|LQIOVERI_NLQ)) != 0) { + printf("%s: Gross protocol error during incoming " + "packet. lqistat1 == 0x%x. Resetting bus.\n", + ahd_name(ahd), lqistat1); + ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); + return; + } else if ((lqistat1 & LQICRCI_LQ) != 0) { + /* + * A CRC error has been detected on an incoming LQ. + * The bus is currently hung on the last ACK. + * Hit LQIRETRY to release the last ack, and + * wait for the sequencer to determine that ATNO + * is asserted while in message out to take us + * to our host message loop. No NONPACKREQ or + * LQIPHASE type errors will occur in this + * scenario. After this first LQIRETRY, the LQI + * manager will be in ISELO where it will + * happily sit until another packet phase begins. + * Unexpected bus free detection is enabled + * through any phases that occur after we release + * this last ack until the LQI manager sees a + * packet phase. This implies we may have to + * ignore a perfectly valid "unexected busfree" + * after our "initiator detected error" message is + * sent. A busfree is the expected response after + * we tell the target that it's L_Q was corrupted. + * (SPI4R09 10.7.3.3.3) + */ + ahd_outb(ahd, LQCTL2, LQIRETRY); + printf("LQIRetry for LQICRCI_LQ to release ACK\n"); + } else if ((lqistat1 & LQICRCI_NLQ) != 0) { + u_int scbid; + struct scb *scb; + + /* + * We detected a CRC error in a NON-LQ packet. + * The hardware has varying behavior in this situation + * depending on whether this packet was part of a + * stream or not. + * + * PKT by PKT mode: + * The hardware has already acked the complete packet. + * If the target honors our outstanding ATN condition, + * we should be (or soon will be) in MSGOUT phase. + * This will trigger the LQIPHASE_LQ status bit as the + * hardware was expecting another LQ. Unexpected + * busfree detection is enabled. Once LQIPHASE_LQ is + * true (first entry into host message loop is much + * the same), we must clear LQIPHASE_LQ and hit + * LQIRETRY so the hardware is ready to handle + * a future LQ. NONPACKREQ will not be asserted again + * once we hit LQIRETRY until another packet is + * processed. The target may either go busfree + * or start another packet in response to our message. + * + * Read Streaming P0 asserted: + * If we raise ATN and the target completes the entire + * stream (P0 asserted during the last packet), the + * hardware will ack all data and return to the ISTART + * state. When the target reponds to our ATN condition, + * LQIPHASE_LQ will be asserted. We should respond to + * this with an LQIRETRY to prepare for any future + * packets. NONPACKREQ will not be asserted again + * once we hit LQIRETRY until another packet is + * processed. The target may either go busfree or + * start another packet in response to our message. + * Busfree detection is enabled. + * + * Read Streaming P0 not asserted: + * If we raise ATN and the target transitions to + * MSGOUT in or after a packet where P0 is not + * asserted, the hardware will assert LQIPHASE_NLQ. + * We should respond to the LQIPHASE_NLQ with an + * LQICONTINUE. Should the target stay in a non-pkt + * phase after we send our message, the hardware + * will assert LQIPHASE_LQ. Recovery is then just as + * listed above for the read streaming with P0 asserted. + * Busfree detection is enabled. + */ + printf("LQICRC_NLQ\n"); + ahd_set_active_fifo(ahd); + scbid = ahd_get_scbptr(ahd); + scb = ahd_lookup_scb(ahd, scbid); + if (scb == NULL) { + printf("%s: No SCB valid for LQICRC_NLQ. " + "Resetting bus\n", ahd_name(ahd)); + ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); + return; + } + scb->flags |= SCB_TRANSMISSION_ERROR; + } else if ((lqistat1 & LQIBADLQI) != 0) { + printf("Need to handle BADLQI!\n"); + ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); + return; + } else if ((perrdiag & (PARITYERR|LASTPHASE)) == PARITYERR) { + if ((curphase & ~P_DATAIN_DT) != 0) { + /* Ack the byte. So we can continue. */ + printf("Acking %s to clear perror\n", + ahd_lookup_phase_entry(curphase)->phasemsg); + ahd_inb(ahd, SCSIDAT); + } + + if (curphase == P_MESGIN) + msg_out = MSG_PARITY_ERROR; + } + + /* + * We've set the hardware to assert ATN if we + * get a parity error on "in" phases, so all we + * need to do is stuff the message buffer with + * the appropriate message. "In" phases have set + * mesg_out to something other than MSG_NOP. + */ + ahd->send_msg_perror = msg_out; + ahd_outb(ahd, MSG_OUT, HOST_MSG); + ahd_outb(ahd, CLRINT, CLRSCSIINT); + ahd_unpause(ahd); +} + +static void +ahd_handle_lqiphase_error(struct ahd_softc *ahd, u_int lqistat1) +{ + /* + * Clear the sources of the interrupts. + */ + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + ahd_outb(ahd, CLRLQIINT1, lqistat1); + + /* + * If the "illegal" phase changes were in response + * to our ATN to flag a CRC error, AND we ended up + * on packet boundaries, clear the error, restart the + * LQI manager as appropriate, and go on our merry + * way toward sending the message. Otherwise, reset + * the bus to clear the error. + */ + ahd_set_active_fifo(ahd); + if ((ahd_inb(ahd, SCSISIGO) & ATNO) != 0 + && (ahd_inb(ahd, MDFFSTAT) & DLZERO) != 0) { + if ((lqistat1 & LQIPHASE_LQ) != 0) { + printf("LQIRETRY for LQIPHASE_LQ\n"); + ahd_outb(ahd, LQCTL2, LQIRETRY); + } else if ((lqistat1 & LQIPHASE_NLQ) != 0) { + printf("LQICONTINUE for LQIPHASE_NLQ\n"); + ahd_outb(ahd, LQCTL2, LQIRETRY); + } else + panic("ahd_handle_lqiphase_error: No phase errors\n"); + ahd_dump_card_state(ahd); + ahd_outb(ahd, CLRINT, CLRSCSIINT); + ahd_unpause(ahd); + } else { + printf("Reseting Channel for LQI Phase error\n"); + ahd_dump_card_state(ahd); + ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); + } +} + +/* + * Packetized unexpected or expected busfree. + * Entered in MODE_SCSI. + */ +static int +ahd_handle_pkt_busfree(struct ahd_softc *ahd, u_int busfreetime) +{ + u_int lqostat1; + + AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), + ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); + lqostat1 = ahd_inb(ahd, LQOSTAT1); + if ((lqostat1 & LQOBUSFREE) != 0) { + struct scb *scb; + u_int scbid; + u_int waiting_h; + u_int waiting_t; + u_int next; + + if ((busfreetime & BUSFREE_LQO) == 0) + printf("%s: Warning, BUSFREE time is 0x%x. " + "Expected BUSFREE_LQO.\n", + ahd_name(ahd), busfreetime); + + scbid = ahd_get_scbptr(ahd); + scb = ahd_lookup_scb(ahd, scbid); + if (scb == NULL) + panic("SCB not valid during LQOBUSFREE"); + ahd_print_path(ahd, scb); + printf("Probable outgoing LQ CRC error. Retrying command\n"); + + /* + * Return the LQO manager to its idle loop. It will + * not do this automatically if the busfree occurs + * after the first REQ of either the LQ or command + * packet or between the LQ and command packet. + */ + ahd_outb(ahd, LQCTL2, ahd_inb(ahd, LQCTL2) | LQOTOIDLE); + + /* + * Clear the status. + */ + ahd_outb(ahd, CLRLQOINT1, CLRLQOBUSFREE); + if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) { + ahd_outb(ahd, CLRLQOINT1, 0); + } + /* + * The LQO manager detected an unexpected busfree + * either: + * + * 1) During an outgoing LQ. + * 2) After an outgoing LQ but before the first + * REQ of the command packet. + * 3) During an outgoing command packet. + * + * In all cases, CURRSCB is pointing to the + * SCB that encountered the failure. Clean + * up the queue, clear SELDO and LQOBUSFREE, + * and allow the sequencer to restart the select + * out at its lesure. + */ + ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO); + ahd_outb(ahd, CLRSINT0, CLRSELDO); + waiting_h = ahd_inw(ahd, WAITING_TID_HEAD); + if (waiting_h != scbid) { + + ahd_outw(ahd, WAITING_TID_HEAD, scbid); + waiting_t = ahd_inw(ahd, WAITING_TID_TAIL); + next = SCB_LIST_NULL; + if (waiting_t == waiting_h) { + ahd_outw(ahd, WAITING_TID_TAIL, scbid); + } else { + ahd_set_scbptr(ahd, waiting_h); + next = ahd_inw(ahd, SCB_NEXT2); + } + ahd_set_scbptr(ahd, scbid); + ahd_outw(ahd, SCB_NEXT2, next); + } + + /* Return unpausing the sequencer. */ + return (0); + } + if (ahd->src_mode != AHD_MODE_SCSI) { + u_int scbid; + struct scb *scb; + + scbid = ahd_get_scbptr(ahd); + scb = ahd_lookup_scb(ahd, scbid); + ahd_print_path(ahd, scb); + printf("Unexpected PKT busfree condition\n"); + ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb), 'A', + SCB_GET_LUN(scb), SCB_GET_TAG(scb), + ROLE_INITIATOR, CAM_UNEXP_BUSFREE); + + /* Return restarting the sequencer. */ + return (1); + } + printf("%s: Unexpected PKT busfree condition\n", ahd_name(ahd)); + ahd_dump_card_state(ahd); + /* Restart the sequencer. */ + return (1); +} + +/* + * Non-packetized unexpected or expected busfree. + */ +static int +ahd_handle_nonpkt_busfree(struct ahd_softc *ahd) +{ + struct ahd_devinfo devinfo; + struct scb *scb; + u_int lastphase; + u_int saved_scsiid; + u_int saved_lun; + u_int target; + u_int initiator_role_id; + u_int scbid; + int printerror; + + /* + * Look at what phase we were last in. If its message out, + * chances are pretty good that the busfree was in response + * to one of our abort requests. + */ + lastphase = ahd_inb(ahd, LASTPHASE); + saved_scsiid = ahd_inb(ahd, SAVED_SCSIID); + saved_lun = ahd_inb(ahd, SAVED_LUN); + target = SCSIID_TARGET(ahd, saved_scsiid); + initiator_role_id = SCSIID_OUR_ID(saved_scsiid); + ahd_compile_devinfo(&devinfo, initiator_role_id, + target, saved_lun, 'A', ROLE_INITIATOR); + printerror = 1; + + scbid = ahd_get_scbptr(ahd); + scb = ahd_lookup_scb(ahd, scbid); + if (scb != NULL + && (ahd_inb(ahd, SEQ_FLAGS) & NOT_IDENTIFIED) != 0) + scb = NULL; + + if (lastphase == P_MESGOUT) { + u_int tag; + + tag = SCB_LIST_NULL; + if (ahd_sent_msg(ahd, AHDMSG_1B, MSG_ABORT_TAG, TRUE) + || ahd_sent_msg(ahd, AHDMSG_1B, MSG_ABORT, TRUE)) { + int found; + int sent_msg; + + sent_msg = ahd->msgout_buf[ahd->msgout_index - 1]; + ahd_print_path(ahd, scb); + printf("SCB %d - Abort%s Completed.\n", + SCB_GET_TAG(scb), + sent_msg == MSG_ABORT_TAG ? "" : " Tag"); + + if (sent_msg == MSG_ABORT_TAG) + tag = SCB_GET_TAG(scb); + + if ((scb->flags & SCB_CMDPHASE_ABORT) != 0) { + /* + * This abort is in response to an + * unexpected switch to command phase + * for a packetized connection. Since + * the identify message was never sent, + * "saved lun" is 0. We really want to + * abort only the SCB that encountered + * this error, which could have a different + * lun. The SCB will be retried so the OS + * will see the UA after renegotiating to + * packetized. + */ + tag = SCB_GET_TAG(scb); + saved_lun = scb->hscb->lun; + } + found = ahd_abort_scbs(ahd, target, 'A', saved_lun, + tag, ROLE_INITIATOR, + CAM_REQ_ABORTED); + printf("found == 0x%x\n", found); + printerror = 0; + } else if (ahd_sent_msg(ahd, AHDMSG_1B, + MSG_BUS_DEV_RESET, TRUE)) { +#ifdef __FreeBSD__ + /* + * Don't mark the user's request for this BDR + * as completing with CAM_BDR_SENT. CAM3 + * specifies CAM_REQ_CMP. + */ + if (scb != NULL + && scb->io_ctx->ccb_h.func_code== XPT_RESET_DEV + && ahd_match_scb(ahd, scb, target, 'A', + CAM_LUN_WILDCARD, SCB_LIST_NULL, + ROLE_INITIATOR)) + ahd_set_transaction_status(scb, CAM_REQ_CMP); +#endif + ahd_handle_devreset(ahd, &devinfo, CAM_BDR_SENT, + "Bus Device Reset", + /*verbose_level*/0); + printerror = 0; + } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, FALSE)) { + struct ahd_initiator_tinfo *tinfo; + struct ahd_tmode_tstate *tstate; + + /* + * PPR Rejected. Try non-ppr negotiation + * and retry command. + */ + tinfo = ahd_fetch_transinfo(ahd, devinfo.channel, + devinfo.our_scsiid, + devinfo.target, &tstate); + tinfo->curr.transport_version = 2; + tinfo->goal.transport_version = 2; + tinfo->goal.ppr_options = 0; + ahd_qinfifo_requeue_tail(ahd, scb); + printerror = 0; + } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, FALSE) + || ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, FALSE)) { + /* + * Negotiation Rejected. Go-async and + * retry command. + */ + ahd_set_width(ahd, &devinfo, + MSG_EXT_WDTR_BUS_8_BIT, + AHD_TRANS_CUR|AHD_TRANS_GOAL, + /*paused*/TRUE); + ahd_set_syncrate(ahd, &devinfo, + /*period*/0, /*offset*/0, + /*ppr_options*/0, + AHD_TRANS_CUR|AHD_TRANS_GOAL, + /*paused*/TRUE); + ahd_qinfifo_requeue_tail(ahd, scb); + printerror = 0; + } else if ((ahd->msg_flags & MSG_FLAG_EXPECT_IDE_BUSFREE) != 0 + && ahd_sent_msg(ahd, AHDMSG_1B, + MSG_INITIATOR_DET_ERR, TRUE)) { + +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) + printf("Expected IDE Busfree\n"); +#endif + printerror = 0; + } + } else if (lastphase == P_MESGIN) { + + if ((ahd->msg_flags & MSG_FLAG_EXPECT_PPR_BUSFREE) != 0) { + + ahd_freeze_devq(ahd, scb); + ahd_set_transaction_status(scb, CAM_REQUEUE_REQ); + ahd_freeze_scb(scb); + if ((ahd->msg_flags & MSG_FLAG_IU_REQ_CHANGED) != 0) { + ahd_print_path(ahd, scb); + printf("Now %spacketized.\n", + (scb->flags & SCB_PACKETIZED) == 0 + ? "" : "non-"); + ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb), + SCB_GET_CHANNEL(ahd, scb), + SCB_GET_LUN(scb), SCB_LIST_NULL, + ROLE_INITIATOR, CAM_REQ_ABORTED); + } else { +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) + printf("PPR Negotiation Busfree.\n"); +#endif + ahd_done(ahd, scb); + } + printerror = 0; + } + } + if (printerror != 0) { + int aborted; + + aborted = 0; + if (scb != NULL) { + u_int tag; + + if ((scb->hscb->control & TAG_ENB) != 0) + tag = SCB_GET_TAG(scb); + else + tag = SCB_LIST_NULL; + ahd_print_path(ahd, scb); + aborted = ahd_abort_scbs(ahd, target, 'A', + SCB_GET_LUN(scb), tag, + ROLE_INITIATOR, + CAM_UNEXP_BUSFREE); + } else { + /* + * We had not fully identified this connection, + * so we cannot abort anything. + */ + printf("%s: ", ahd_name(ahd)); + } + if (lastphase != P_BUSFREE) + ahd_force_renegotiation(ahd, &devinfo); + printf("Unexpected busfree %s, %d SCBs aborted, " + "PRGMCNT == 0x%x\n", + ahd_lookup_phase_entry(lastphase)->phasemsg, + aborted, + ahd_inb(ahd, PRGMCNT) + | (ahd_inb(ahd, PRGMCNT+1) << 8)); + ahd_dump_card_state(ahd); + } + /* Always restart the sequencer. */ + return (1); +} + +/* + * Force renegotiation to occur the next time we initiate + * a command to the current device. + */ +static void +ahd_force_renegotiation(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) +{ + struct ahd_initiator_tinfo *targ_info; + struct ahd_tmode_tstate *tstate; + + printf("Forcing renegotiation (%d:%c:%d)\n", + devinfo->our_scsiid, devinfo->channel, + devinfo->target); + targ_info = ahd_fetch_transinfo(ahd, + devinfo->channel, + devinfo->our_scsiid, + devinfo->target, + &tstate); + ahd_update_neg_request(ahd, devinfo, tstate, + targ_info, /*force*/TRUE); +} + +#define AHD_MAX_STEPS 2000 +void +ahd_clear_critical_section(struct ahd_softc *ahd) +{ + ahd_mode_state saved_modes; + int stepping; + int steps; + u_int simode0; + u_int simode1; + u_int simode3; + u_int lqimode0; + u_int lqimode1; + u_int lqomode0; + u_int lqomode1; + + if (ahd->num_critical_sections == 0) + return; + + stepping = FALSE; + steps = 0; + simode0 = 0; + simode1 = 0; + simode3 = 0; + lqimode0 = 0; + lqimode1 = 0; + lqomode0 = 0; + lqomode1 = 0; + saved_modes = ahd_save_modes(ahd); + for (;;) { + struct cs *cs; + u_int seqaddr; + u_int i; + + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + seqaddr = ahd_inb(ahd, CURADDR) + | (ahd_inb(ahd, CURADDR+1) << 8); + + cs = ahd->critical_sections; + for (i = 0; i < ahd->num_critical_sections; i++, cs++) { + + if (cs->begin < seqaddr && cs->end >= seqaddr) + break; + } + + if (i == ahd->num_critical_sections) + break; + + if (steps > AHD_MAX_STEPS) { + printf("%s: Infinite loop in critical section\n", + ahd_name(ahd)); + ahd_dump_card_state(ahd); + panic("critical section loop"); + } + + steps++; + if (stepping == FALSE) { + + /* + * Disable all interrupt sources so that the + * sequencer will not be stuck by a pausing + * interrupt condition while we attempt to + * leave a critical section. + */ + ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); + simode0 = ahd_inb(ahd, SIMODE0); + simode3 = ahd_inb(ahd, SIMODE3); + lqimode0 = ahd_inb(ahd, LQIMODE0); + lqimode1 = ahd_inb(ahd, LQIMODE1); + lqomode0 = ahd_inb(ahd, LQOMODE0); + lqomode1 = ahd_inb(ahd, LQOMODE1); + ahd_outb(ahd, SIMODE0, 0); + ahd_outb(ahd, SIMODE3, 0); + ahd_outb(ahd, LQIMODE0, 0); + ahd_outb(ahd, LQIMODE1, 0); + ahd_outb(ahd, LQOMODE0, 0); + ahd_outb(ahd, LQOMODE1, 0); + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + simode1 = ahd_inb(ahd, SIMODE1); + ahd_outb(ahd, SIMODE1, 0); + ahd_outb(ahd, CLRINT, CLRSCSIINT); + ahd_outb(ahd, SEQCTL0, ahd_inb(ahd, SEQCTL0) | STEP); + stepping = TRUE; + } + ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode); + ahd_outb(ahd, HCNTRL, ahd->unpause); + do { + ahd_delay(200); + } while (!ahd_is_paused(ahd)); + ahd_update_modes(ahd); + } + if (stepping) { + ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); + ahd_outb(ahd, SIMODE0, simode0); + ahd_outb(ahd, SIMODE3, simode3); + ahd_outb(ahd, LQIMODE0, lqimode0); + ahd_outb(ahd, LQIMODE1, lqimode1); + ahd_outb(ahd, LQOMODE0, lqomode0); + ahd_outb(ahd, LQOMODE1, lqomode1); + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + ahd_outb(ahd, SIMODE1, simode1); + ahd_outb(ahd, SEQCTL0, ahd_inb(ahd, SEQCTL0) & ~STEP); + } + ahd_restore_modes(ahd, saved_modes); +} + +/* + * Clear any pending interrupt status. + */ +void +ahd_clear_intstat(struct ahd_softc *ahd) +{ + /* Clear any interrupt conditions this may have caused */ + ahd_outb(ahd, CLRSINT1, CLRSELTIMEO|CLRATNO|CLRSCSIRSTI + |CLRBUSFREE|CLRSCSIPERR|CLRREQINIT); + ahd_outb(ahd, CLRSINT0, CLRSELDO|CLRSELDI|CLRSELINGO|CLRIOERR); + ahd_outb(ahd, CLRINT, CLRSCSIINT); +} + +/**************************** Debugging Routines ******************************/ +#ifdef AHD_DEBUG +uint32_t ahd_debug = AHD_DEBUG_OPTS; +#endif +void +ahd_print_scb(struct scb *scb) +{ + struct hardware_scb *hscb; + int i; + + hscb = scb->hscb; + printf("scb:%p control:0x%x scsiid:0x%x lun:%d cdb_len:%d\n", + (void *)scb, + hscb->control, + hscb->scsiid, + hscb->lun, + hscb->cdb_len); + printf("Shared Data: "); + for (i = 0; i < sizeof(hscb->shared_data.idata.cdb); i++) + printf("%#02x", hscb->shared_data.idata.cdb[i]); + printf(" dataptr:%#x%x datacnt:%#x sgptr:%#x tag:%#x\n", + (uint32_t)((ahd_le64toh(hscb->dataptr) >> 32) & 0xFFFFFFFF), + (uint32_t)(ahd_le64toh(hscb->dataptr) & 0xFFFFFFFF), + ahd_le32toh(hscb->datacnt), + ahd_le32toh(hscb->sgptr), + SCB_GET_TAG(scb)); + ahd_dump_sglist(scb); +} + +void +ahd_dump_sglist(struct scb *scb) +{ + int i; + + if (scb->sg_count > 0) { + if ((scb->ahd_softc->flags & AHD_64BIT_ADDRESSING) != 0) { + struct ahd_dma64_seg *sg_list; + + sg_list = (struct ahd_dma64_seg*)scb->sg_list; + for (i = 0; i < scb->sg_count; i++) { + uint64_t addr; + + addr = ahd_le64toh(sg_list[i].addr); + printf("sg[%d] - Addr 0x%x%x : Length %d\n", + i, + (uint32_t)((addr >> 32) & 0xFFFFFFFF), + (uint32_t)(addr & 0xFFFFFFFF), + ahd_le32toh(sg_list[i].len)); + } + } else { + struct ahd_dma_seg *sg_list; + + sg_list = (struct ahd_dma_seg*)scb->sg_list; + for (i = 0; i < scb->sg_count; i++) { + printf("sg[%d] - Addr 0x%x%x : Length %d\n", + i, + (ahd_le32toh(sg_list[i].len) >> 24 + & SG_HIGH_ADDR_BITS), + ahd_le32toh(sg_list[i].addr), + ahd_le32toh(sg_list[i].len) + & AHD_SG_LEN_MASK); + } + } + } +} + +/************************* Transfer Negotiation *******************************/ +/* + * Allocate per target mode instance (ID we respond to as a target) + * transfer negotiation data structures. + */ +static struct ahd_tmode_tstate * +ahd_alloc_tstate(struct ahd_softc *ahd, u_int scsi_id, char channel) +{ + struct ahd_tmode_tstate *master_tstate; + struct ahd_tmode_tstate *tstate; + int i; + + master_tstate = ahd->enabled_targets[ahd->our_id]; + if (ahd->enabled_targets[scsi_id] != NULL + && ahd->enabled_targets[scsi_id] != master_tstate) + panic("%s: ahd_alloc_tstate - Target already allocated", + ahd_name(ahd)); + tstate = malloc(sizeof(*tstate), M_DEVBUF, M_NOWAIT); + if (tstate == NULL) + return (NULL); + + /* + * If we have allocated a master tstate, copy user settings from + * the master tstate (taken from SRAM or the EEPROM) for this + * channel, but reset our current and goal settings to async/narrow + * until an initiator talks to us. + */ + if (master_tstate != NULL) { + memcpy(tstate, master_tstate, sizeof(*tstate)); + memset(tstate->enabled_luns, 0, sizeof(tstate->enabled_luns)); + for (i = 0; i < 16; i++) { + memset(&tstate->transinfo[i].curr, 0, + sizeof(tstate->transinfo[i].curr)); + memset(&tstate->transinfo[i].goal, 0, + sizeof(tstate->transinfo[i].goal)); + } + } else + memset(tstate, 0, sizeof(*tstate)); + ahd->enabled_targets[scsi_id] = tstate; + return (tstate); +} + +#ifdef AHD_TARGET_MODE +/* + * Free per target mode instance (ID we respond to as a target) + * transfer negotiation data structures. + */ +static void +ahd_free_tstate(struct ahd_softc *ahd, u_int scsi_id, char channel, int force) +{ + struct ahd_tmode_tstate *tstate; + + /* + * Don't clean up our "master" tstate. + * It has our default user settings. + */ + if (scsi_id == ahd->our_id + && force == FALSE) + return; + + tstate = ahd->enabled_targets[scsi_id]; + if (tstate != NULL) + free(tstate, M_DEVBUF); + ahd->enabled_targets[scsi_id] = NULL; +} +#endif + +/* + * Called when we have an active connection to a target on the bus, + * this function finds the nearest period to the input period limited + * by the capabilities of the bus connectivity of and sync settings for + * the target. + */ +void +ahd_devlimited_syncrate(struct ahd_softc *ahd, + struct ahd_initiator_tinfo *tinfo, + u_int *period, u_int *ppr_options, role_t role) +{ + struct ahd_transinfo *transinfo; + u_int maxsync; + + if ((ahd_inb(ahd, SBLKCTL) & ENAB40) != 0 + && (ahd_inb(ahd, SSTAT2) & EXP_ACTIVE) == 0) { + maxsync = AHD_SYNCRATE_PACED; + } else { + maxsync = AHD_SYNCRATE_ULTRA; + /* Can't do DT related options on an SE bus */ + *ppr_options &= MSG_EXT_PPR_QAS_REQ; + } + /* + * Never allow a value higher than our current goal + * period otherwise we may allow a target initiated + * negotiation to go above the limit as set by the + * user. In the case of an initiator initiated + * sync negotiation, we limit based on the user + * setting. This allows the system to still accept + * incoming negotiations even if target initiated + * negotiation is not performed. + */ + if (role == ROLE_TARGET) + transinfo = &tinfo->user; + else + transinfo = &tinfo->goal; + *ppr_options &= (transinfo->ppr_options|MSG_EXT_PPR_PCOMP_EN); + if (transinfo->period == 0) { + *period = 0; + *ppr_options = 0; + } else { + *period = MAX(*period, transinfo->period); + ahd_find_syncrate(ahd, period, ppr_options, maxsync); + } +} + +/* + * Look up the valid period to SCSIRATE conversion in our table. + * Return the period and offset that should be sent to the target + * if this was the beginning of an SDTR. + */ +void +ahd_find_syncrate(struct ahd_softc *ahd, u_int *period, + u_int *ppr_options, u_int maxsync) +{ + /* Skip all PACED only entries if IU is not available */ + if ((*ppr_options & MSG_EXT_PPR_IU_REQ) == 0 + && maxsync < AHD_SYNCRATE_DT) + maxsync = AHD_SYNCRATE_DT; + + /* Skip all DT only entries if DT is not available */ + if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0 + && maxsync < AHD_SYNCRATE_ULTRA2) + maxsync = AHD_SYNCRATE_ULTRA2; + + if (*period < maxsync) + *period = maxsync; + + if ((*ppr_options & MSG_EXT_PPR_DT_REQ) != 0 + && *period > AHD_SYNCRATE_MIN_DT) + *ppr_options &= ~MSG_EXT_PPR_DT_REQ; + + if (*period > AHD_SYNCRATE_MIN) + *period = 0; + + /* Honor PPR option conformance rules. */ + if ((*ppr_options & MSG_EXT_PPR_IU_REQ) == 0) + *ppr_options &= (MSG_EXT_PPR_DT_REQ|MSG_EXT_PPR_QAS_REQ); + + if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0) + *ppr_options &= MSG_EXT_PPR_QAS_REQ; +} + +/* + * Truncate the given synchronous offset to a value the + * current adapter type and syncrate are capable of. + */ +void +ahd_validate_offset(struct ahd_softc *ahd, + struct ahd_initiator_tinfo *tinfo, + u_int period, u_int *offset, int wide, + role_t role) +{ + u_int maxoffset; + + /* Limit offset to what we can do */ + if (period == 0) + maxoffset = 0; + else if (period <= AHD_SYNCRATE_PACED) + maxoffset = MAX_OFFSET_PACED; + else + maxoffset = MAX_OFFSET; + *offset = MIN(*offset, maxoffset); + if (tinfo != NULL) { + if (role == ROLE_TARGET) + *offset = MIN(*offset, tinfo->user.offset); + else + *offset = MIN(*offset, tinfo->goal.offset); + } +} + +/* + * Truncate the given transfer width parameter to a value the + * current adapter type is capable of. + */ +void +ahd_validate_width(struct ahd_softc *ahd, struct ahd_initiator_tinfo *tinfo, + u_int *bus_width, role_t role) +{ + switch (*bus_width) { + default: + if (ahd->features & AHD_WIDE) { + /* Respond Wide */ + *bus_width = MSG_EXT_WDTR_BUS_16_BIT; + break; + } + /* FALLTHROUGH */ + case MSG_EXT_WDTR_BUS_8_BIT: + *bus_width = MSG_EXT_WDTR_BUS_8_BIT; + break; + } + if (tinfo != NULL) { + if (role == ROLE_TARGET) + *bus_width = MIN(tinfo->user.width, *bus_width); + else + *bus_width = MIN(tinfo->goal.width, *bus_width); + } +} + +/* + * Update the bitmask of targets for which the controller should + * negotiate with at the next convenient oportunity. This currently + * means the next time we send the initial identify messages for + * a new transaction. + */ +int +ahd_update_neg_request(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, + struct ahd_tmode_tstate *tstate, + struct ahd_initiator_tinfo *tinfo, int force) +{ + u_int auto_negotiate_orig; + + auto_negotiate_orig = tstate->auto_negotiate; + if (tinfo->curr.period != tinfo->goal.period + || tinfo->curr.width != tinfo->goal.width + || tinfo->curr.offset != tinfo->goal.offset + || tinfo->curr.ppr_options != tinfo->goal.ppr_options + || (force + && (tinfo->goal.period != 0 + || tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT + || tinfo->goal.ppr_options != 0))) + tstate->auto_negotiate |= devinfo->target_mask; + else + tstate->auto_negotiate &= ~devinfo->target_mask; + + return (auto_negotiate_orig != tstate->auto_negotiate); +} + +/* + * Update the user/goal/curr tables of synchronous negotiation + * parameters as well as, in the case of a current or active update, + * any data structures on the host controller. In the case of an + * active update, the specified target is currently talking to us on + * the bus, so the transfer parameter update must take effect + * immediately. + */ +void +ahd_set_syncrate(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, + u_int period, u_int offset, u_int ppr_options, + u_int type, int paused) +{ + struct ahd_initiator_tinfo *tinfo; + struct ahd_tmode_tstate *tstate; + u_int old_period; + u_int old_offset; + u_int old_ppr; + int active; + int update_needed; + + active = (type & AHD_TRANS_ACTIVE) == AHD_TRANS_ACTIVE; + update_needed = 0; + + if (period == 0 || offset == 0) { + period = 0; + offset = 0; + } + + tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid, + devinfo->target, &tstate); + + if ((type & AHD_TRANS_USER) != 0) { + tinfo->user.period = period; + tinfo->user.offset = offset; + tinfo->user.ppr_options = ppr_options; + } + + if ((type & AHD_TRANS_GOAL) != 0) { + tinfo->goal.period = period; + tinfo->goal.offset = offset; + tinfo->goal.ppr_options = ppr_options; + } + + old_period = tinfo->curr.period; + old_offset = tinfo->curr.offset; + old_ppr = tinfo->curr.ppr_options; + + if ((type & AHD_TRANS_CUR) != 0 + && (old_period != period + || old_offset != offset + || old_ppr != ppr_options)) { + + update_needed++; + + tinfo->curr.period = period; + tinfo->curr.offset = offset; + tinfo->curr.ppr_options = ppr_options; + + ahd_send_async(ahd, devinfo->channel, devinfo->target, + CAM_LUN_WILDCARD, AC_TRANSFER_NEG, NULL); + if (bootverbose) { + if (offset != 0) { + printf("%s: target %d synchronous with " + "period = 0x%x, offset = 0x%x%s\n", + ahd_name(ahd), devinfo->target, + period, offset, + (ppr_options & MSG_EXT_PPR_DT_REQ) + ? " (DT)" : ""); + } else { + printf("%s: target %d using " + "asynchronous transfers\n", + ahd_name(ahd), devinfo->target); + } + } + } + /* + * Always refresh the neg-table to handle the case of the + * sequencer setting the ENATNO bit for a MK_MESSAGE request. + * We will always renegotiate in that case if this is a + * packetized request. + */ + if ((type & AHD_TRANS_CUR) != 0) + ahd_update_neg_table(ahd, devinfo, &tinfo->curr); + + update_needed += ahd_update_neg_request(ahd, devinfo, tstate, + tinfo, /*force*/FALSE); + + if (update_needed) + ahd_update_pending_scbs(ahd); +} + +/* + * Update the user/goal/curr tables of wide negotiation + * parameters as well as, in the case of a current or active update, + * any data structures on the host controller. In the case of an + * active update, the specified target is currently talking to us on + * the bus, so the transfer parameter update must take effect + * immediately. + */ +void +ahd_set_width(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, + u_int width, u_int type, int paused) +{ + struct ahd_initiator_tinfo *tinfo; + struct ahd_tmode_tstate *tstate; + u_int oldwidth; + int active; + int update_needed; + + active = (type & AHD_TRANS_ACTIVE) == AHD_TRANS_ACTIVE; + update_needed = 0; + tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid, + devinfo->target, &tstate); + + if ((type & AHD_TRANS_USER) != 0) + tinfo->user.width = width; + + if ((type & AHD_TRANS_GOAL) != 0) + tinfo->goal.width = width; + + oldwidth = tinfo->curr.width; + if ((type & AHD_TRANS_CUR) != 0 && oldwidth != width) { + + update_needed++; + + tinfo->curr.width = width; + ahd_send_async(ahd, devinfo->channel, devinfo->target, + CAM_LUN_WILDCARD, AC_TRANSFER_NEG, NULL); + if (bootverbose) { + printf("%s: target %d using %dbit transfers\n", + ahd_name(ahd), devinfo->target, + 8 * (0x01 << width)); + } + } + if ((type & AHD_TRANS_CUR) != 0) + ahd_update_neg_table(ahd, devinfo, &tinfo->curr); + + update_needed += ahd_update_neg_request(ahd, devinfo, tstate, + tinfo, /*force*/FALSE); + if (update_needed) + ahd_update_pending_scbs(ahd); +} + +/* + * Update the current state of tagged queuing for a given target. + */ +void +ahd_set_tags(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, + ahd_queue_alg alg) +{ + ahd_platform_set_tags(ahd, devinfo, alg); + ahd_send_async(ahd, devinfo->channel, devinfo->target, + devinfo->lun, AC_TRANSFER_NEG, &alg); +} + +static void +ahd_update_neg_table(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, + struct ahd_transinfo *tinfo) +{ + ahd_mode_state saved_modes; + u_int period; + u_int ppr_opts; + u_int con_opts; + u_int offset; + u_int precomp; + + saved_modes = ahd_save_modes(ahd); + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + + ahd_outb(ahd, NEGOADDR, devinfo->target); + period = tinfo->period; + offset = tinfo->offset; + precomp = 0; + if (period == 0) + period = AHD_SYNCRATE_ASYNC; + if (period == AHD_SYNCRATE_160) { + period = AHD_SYNCRATE_REVA_160; + precomp = 0; + if ((ahd->flags & AHD_CPQ_BOARD) == 0) + precomp |= AHD_PRECOMP_FASTSLEW; + if ((tinfo->ppr_options & MSG_EXT_PPR_PCOMP_EN) != 0) + precomp |= AHD_PRECOMP_CUTBACK_29; + } + ahd_outb(ahd, ANNEXCOL, AHD_ANNEXCOL_PRECOMP); + ahd_outb(ahd, ANNEXDAT, precomp); + + ahd_outb(ahd, NEGPERIOD, period); + ppr_opts = tinfo->ppr_options + & (MSG_EXT_PPR_QAS_REQ|MSG_EXT_PPR_DT_REQ|MSG_EXT_PPR_IU_REQ); + /* + * When the SPI4 spec was finalized, PACE transfers + * was not made a configurable option in the PPR message. + * Instead it is assumed to be enabled for any + * syncrate faster than 80MHz. Nevertheless, Harpoon + * allows this to be configurable. + * + * Harpoon also assumes at most 2 data bytes per negotiated + * REQ/ACK offset. Paced transfers take 4, so we must + * adjust our offset. + */ + if (period <= AHD_SYNCRATE_PACED) { + ppr_opts |= PPROPT_PACE; + offset *= 2; + } + ahd_outb(ahd, NEGPPROPTS, ppr_opts); + ahd_outb(ahd, NEGOFFSET, offset); + + con_opts = 0; + if (tinfo->width == MSG_EXT_WDTR_BUS_16_BIT) + con_opts |= WIDEXFER; + + /* + * During packetized transfers, the target will + * give us the oportunity to send command packets + * without us asserting attention. + */ + if ((tinfo->ppr_options & MSG_EXT_PPR_IU_REQ) == 0) + con_opts |= ENAUTOATNO; + ahd_outb(ahd, NEGCONOPTS, con_opts); + ahd_restore_modes(ahd, saved_modes); +} + +/* + * When the transfer settings for a connection change, update any + * in-transit SCBs to contain the new data so the hardware will + * be set correctly during future (re)selections. + */ +static void +ahd_update_pending_scbs(struct ahd_softc *ahd) +{ + struct scb *pending_scb; + int pending_scb_count; + int i; + int paused; + u_int saved_scbptr; + ahd_mode_state saved_modes; + + /* + * Traverse the pending SCB list and ensure that all of the + * SCBs there have the proper settings. + */ + pending_scb_count = 0; + LIST_FOREACH(pending_scb, &ahd->pending_scbs, pending_links) { + struct ahd_devinfo devinfo; + struct hardware_scb *pending_hscb; + struct ahd_initiator_tinfo *tinfo; + struct ahd_tmode_tstate *tstate; + + ahd_scb_devinfo(ahd, &devinfo, pending_scb); + tinfo = ahd_fetch_transinfo(ahd, devinfo.channel, + devinfo.our_scsiid, + devinfo.target, &tstate); + pending_hscb = pending_scb->hscb; + if ((tstate->auto_negotiate & devinfo.target_mask) == 0 + && (pending_scb->flags & SCB_AUTO_NEGOTIATE) != 0) { + pending_scb->flags &= ~SCB_AUTO_NEGOTIATE; + pending_hscb->control &= ~MK_MESSAGE; + } + ahd_sync_scb(ahd, pending_scb, + BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); + pending_scb_count++; + } + + if (pending_scb_count == 0) + return; + + if (ahd_is_paused(ahd)) { + paused = 1; + } else { + paused = 0; + ahd_pause(ahd); + } + + saved_modes = ahd_save_modes(ahd); + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + saved_scbptr = ahd_get_scbptr(ahd); + /* Ensure that the hscbs down on the card match the new information */ + for (i = 0; i < ahd->scb_data.maxhscbs; i++) { + struct hardware_scb *pending_hscb; + u_int control; + u_int scb_tag; + + ahd_set_scbptr(ahd, i); + scb_tag = i; + pending_scb = ahd_lookup_scb(ahd, scb_tag); + if (pending_scb == NULL) + continue; + + pending_hscb = pending_scb->hscb; + control = ahd_inb_scbram(ahd, SCB_CONTROL); + control &= ~MK_MESSAGE; + control |= pending_hscb->control & MK_MESSAGE; + ahd_outb(ahd, SCB_CONTROL, control); + } + ahd_set_scbptr(ahd,saved_scbptr); + ahd_restore_modes(ahd, saved_modes); + + if (paused == 0) + ahd_unpause(ahd); +} + +/**************************** Pathing Information *****************************/ +static void +ahd_fetch_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) +{ + ahd_mode_state saved_modes; + u_int saved_scsiid; + role_t role; + int our_id; + + saved_modes = ahd_save_modes(ahd); + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + + if (ahd_inb(ahd, SSTAT0) & TARGET) + role = ROLE_TARGET; + else + role = ROLE_INITIATOR; + + if (role == ROLE_TARGET + && (ahd_inb(ahd, SEQ_FLAGS) & CMDPHASE_PENDING) != 0) { + /* We were selected, so pull our id from TARGIDIN */ + our_id = ahd_inb(ahd, TARGIDIN) & OID; + } else if (role == ROLE_TARGET) + our_id = ahd_inb(ahd, TOWNID); + else + our_id = ahd_inb(ahd, IOWNID); + + saved_scsiid = ahd_inb(ahd, SAVED_SCSIID); + ahd_compile_devinfo(devinfo, + our_id, + SCSIID_TARGET(ahd, saved_scsiid), + ahd_inb(ahd, SAVED_LUN), + SCSIID_CHANNEL(ahd, saved_scsiid), + role); + ahd_restore_modes(ahd, saved_modes); +} + +struct ahd_phase_table_entry* +ahd_lookup_phase_entry(int phase) +{ + struct ahd_phase_table_entry *entry; + struct ahd_phase_table_entry *last_entry; + + /* + * num_phases doesn't include the default entry which + * will be returned if the phase doesn't match. + */ + last_entry = &ahd_phase_table[num_phases]; + for (entry = ahd_phase_table; entry < last_entry; entry++) { + if (phase == entry->phase) + break; + } + return (entry); +} + +void +ahd_compile_devinfo(struct ahd_devinfo *devinfo, u_int our_id, u_int target, + u_int lun, char channel, role_t role) +{ + devinfo->our_scsiid = our_id; + devinfo->target = target; + devinfo->lun = lun; + devinfo->target_offset = target; + devinfo->channel = channel; + devinfo->role = role; + if (channel == 'B') + devinfo->target_offset += 8; + devinfo->target_mask = (0x01 << devinfo->target_offset); +} + +static void +ahd_scb_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, + struct scb *scb) +{ + role_t role; + int our_id; + + our_id = SCSIID_OUR_ID(scb->hscb->scsiid); + role = ROLE_INITIATOR; + if ((scb->hscb->control & TARGET_SCB) != 0) + role = ROLE_TARGET; + ahd_compile_devinfo(devinfo, our_id, SCB_GET_TARGET(ahd, scb), + SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahd, scb), role); +} + + +/************************ Message Phase Processing ****************************/ +/* + * When an initiator transaction with the MK_MESSAGE flag either reconnects + * or enters the initial message out phase, we are interrupted. Fill our + * outgoing message buffer with the appropriate message and beging handing + * the message phase(s) manually. + */ +static void +ahd_setup_initiator_msgout(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, + struct scb *scb) +{ + /* + * To facilitate adding multiple messages together, + * each routine should increment the index and len + * variables instead of setting them explicitly. + */ + ahd->msgout_index = 0; + ahd->msgout_len = 0; + + if (ahd_currently_packetized(ahd)) + ahd->msg_flags |= MSG_FLAG_PACKETIZED; + + if (ahd->send_msg_perror + && ahd_inb(ahd, MSG_OUT) == HOST_MSG) { + ahd->msgout_buf[ahd->msgout_index++] = ahd->send_msg_perror; + ahd->msgout_len++; + ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; + printf("Setting up for Parity Error delivery\n"); + return; + } else if (scb == NULL) { + printf("%s: WARNING. No pending message for " + "I_T msgin. Issuing NO-OP\n", ahd_name(ahd)); + ahd->msgout_buf[ahd->msgout_index++] = MSG_NOOP; + ahd->msgout_len++; + ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; + return; + } + + if ((scb->flags & SCB_DEVICE_RESET) == 0 + && (scb->flags & SCB_PACKETIZED) == 0 + && ahd_inb(ahd, MSG_OUT) == MSG_IDENTIFYFLAG) { + u_int identify_msg; + + identify_msg = MSG_IDENTIFYFLAG | SCB_GET_LUN(scb); + if ((scb->hscb->control & DISCENB) != 0) + identify_msg |= MSG_IDENTIFY_DISCFLAG; + ahd->msgout_buf[ahd->msgout_index++] = identify_msg; + ahd->msgout_len++; + + if ((scb->hscb->control & TAG_ENB) != 0) { + ahd->msgout_buf[ahd->msgout_index++] = + scb->hscb->control & (TAG_ENB|SCB_TAG_TYPE); + ahd->msgout_buf[ahd->msgout_index++] = SCB_GET_TAG(scb); + ahd->msgout_len += 2; + } + } + + if (scb->flags & SCB_DEVICE_RESET) { + ahd->msgout_buf[ahd->msgout_index++] = MSG_BUS_DEV_RESET; + ahd->msgout_len++; + ahd_print_path(ahd, scb); + printf("Bus Device Reset Message Sent\n"); + /* + * Clear our selection hardware in advance of + * the busfree. We may have an entry in the waiting + * Q for this target, and we don't want to go about + * selecting while we handle the busfree and blow it + * away. + */ + ahd_outb(ahd, SCSISEQ0, 0); + } else if ((scb->flags & SCB_ABORT) != 0) { + + if ((scb->hscb->control & TAG_ENB) != 0) { + ahd->msgout_buf[ahd->msgout_index++] = MSG_ABORT_TAG; + } else { + ahd->msgout_buf[ahd->msgout_index++] = MSG_ABORT; + } + ahd->msgout_len++; + ahd_print_path(ahd, scb); + printf("Abort%s Message Sent\n", + (scb->hscb->control & TAG_ENB) != 0 ? " Tag" : ""); + /* + * Clear our selection hardware in advance of + * the busfree. We may have an entry in the waiting + * Q for this target, and we don't want to go about + * selecting while we handle the busfree and blow it + * away. + */ + ahd_outb(ahd, SCSISEQ0, 0); + } else if ((scb->flags & (SCB_AUTO_NEGOTIATE|SCB_NEGOTIATE)) != 0) { + ahd_build_transfer_msg(ahd, devinfo); + /* + * Clear our selection hardware in advance of potential + * PPR IU status change busfree. We may have an entry in + * the waiting Q for this target, and we don't want to go + * about selecting while we handle the busfree and blow + * it away. + */ + ahd_outb(ahd, SCSISEQ0, 0); + } else { + printf("ahd_intr: AWAITING_MSG for an SCB that " + "does not have a waiting message\n"); + printf("SCSIID = %x, target_mask = %x\n", scb->hscb->scsiid, + devinfo->target_mask); + panic("SCB = %d, SCB Control = %x, MSG_OUT = %x " + "SCB flags = %x", SCB_GET_TAG(scb), scb->hscb->control, + ahd_inb(ahd, MSG_OUT), scb->flags); + } + + /* + * Clear the MK_MESSAGE flag from the SCB so we aren't + * asked to send this message again. + */ + ahd_outb(ahd, SCB_CONTROL, + ahd_inb_scbram(ahd, SCB_CONTROL) & ~MK_MESSAGE); + scb->hscb->control &= ~MK_MESSAGE; + ahd->msgout_index = 0; + ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; +} + +/* + * Build an appropriate transfer negotiation message for the + * currently active target. + */ +static void +ahd_build_transfer_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) +{ + /* + * We need to initiate transfer negotiations. + * If our current and goal settings are identical, + * we want to renegotiate due to a check condition. + */ + struct ahd_initiator_tinfo *tinfo; + struct ahd_tmode_tstate *tstate; + int dowide; + int dosync; + int doppr; + int use_ppr; + u_int period; + u_int ppr_options; + u_int offset; + + tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid, + devinfo->target, &tstate); + /* + * Filter our period based on the current connection. + * If we can't perform DT transfers on this segment (not in LVD + * mode for instance), then our decision to issue a PPR message + * may change. + */ + period = tinfo->goal.period; + ppr_options = tinfo->goal.ppr_options; + /* Target initiated PPR is not allowed in the SCSI spec */ + if (devinfo->role == ROLE_TARGET) + ppr_options = 0; + ahd_devlimited_syncrate(ahd, tinfo, &period, + &ppr_options, devinfo->role); + dowide = tinfo->curr.width != tinfo->goal.width; + dosync = tinfo->curr.period != period; + doppr = tinfo->curr.ppr_options != ppr_options; + + if (!dowide && !dosync && !doppr) { + dowide = tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT; + dosync = tinfo->goal.period != 0; + doppr = tinfo->goal.ppr_options != 0; + } + + if (!dowide && !dosync && !doppr) { + panic("ahd_intr: AWAITING_MSG for negotiation, " + "but no negotiation needed\n"); + } + + use_ppr = (tinfo->curr.transport_version >= 3) || doppr; + /* Target initiated PPR is not allowed in the SCSI spec */ + if (devinfo->role == ROLE_TARGET) + use_ppr = 0; + + /* + * Both the PPR message and SDTR message require the + * goal syncrate to be limited to what the target device + * is capable of handling (based on whether an LVD->SE + * expander is on the bus), so combine these two cases. + * Regardless, guarantee that if we are using WDTR and SDTR + * messages that WDTR comes first. + */ + if (use_ppr || (dosync && !dowide)) { + + offset = tinfo->goal.offset; + ahd_validate_offset(ahd, tinfo, period, &offset, + use_ppr ? tinfo->goal.width + : tinfo->curr.width, + devinfo->role); + if (use_ppr) { + ahd_construct_ppr(ahd, devinfo, period, offset, + tinfo->goal.width, ppr_options); + } else { + ahd_construct_sdtr(ahd, devinfo, period, offset); + } + } else { + ahd_construct_wdtr(ahd, devinfo, tinfo->goal.width); + } +} + +/* + * Build a synchronous negotiation message in our message + * buffer based on the input parameters. + */ +static void +ahd_construct_sdtr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, + u_int period, u_int offset) +{ + ahd->msgout_buf[ahd->msgout_index++] = MSG_EXTENDED; + ahd->msgout_buf[ahd->msgout_index++] = MSG_EXT_SDTR_LEN; + ahd->msgout_buf[ahd->msgout_index++] = MSG_EXT_SDTR; + ahd->msgout_buf[ahd->msgout_index++] = period; + ahd->msgout_buf[ahd->msgout_index++] = offset; + ahd->msgout_len += 5; + if (bootverbose) { + printf("(%s:%c:%d:%d): Sending SDTR period %x, offset %x\n", + ahd_name(ahd), devinfo->channel, devinfo->target, + devinfo->lun, period, offset); + } +} + +/* + * Build a wide negotiateion message in our message + * buffer based on the input parameters. + */ +static void +ahd_construct_wdtr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, + u_int bus_width) +{ + ahd->msgout_buf[ahd->msgout_index++] = MSG_EXTENDED; + ahd->msgout_buf[ahd->msgout_index++] = MSG_EXT_WDTR_LEN; + ahd->msgout_buf[ahd->msgout_index++] = MSG_EXT_WDTR; + ahd->msgout_buf[ahd->msgout_index++] = bus_width; + ahd->msgout_len += 4; + if (bootverbose) { + printf("(%s:%c:%d:%d): Sending WDTR %x\n", + ahd_name(ahd), devinfo->channel, devinfo->target, + devinfo->lun, bus_width); + } +} + +/* + * Build a parallel protocol request message in our message + * buffer based on the input parameters. + */ +static void +ahd_construct_ppr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, + u_int period, u_int offset, u_int bus_width, + u_int ppr_options) +{ + /* + * Always request precompensation from + * the other target if we are running + * at paced syncrates. + */ + if (period <= AHD_SYNCRATE_PACED) + ppr_options |= MSG_EXT_PPR_PCOMP_EN; + ahd->msgout_buf[ahd->msgout_index++] = MSG_EXTENDED; + ahd->msgout_buf[ahd->msgout_index++] = MSG_EXT_PPR_LEN; + ahd->msgout_buf[ahd->msgout_index++] = MSG_EXT_PPR; + ahd->msgout_buf[ahd->msgout_index++] = period; + ahd->msgout_buf[ahd->msgout_index++] = 0; + ahd->msgout_buf[ahd->msgout_index++] = offset; + ahd->msgout_buf[ahd->msgout_index++] = bus_width; + ahd->msgout_buf[ahd->msgout_index++] = ppr_options; + ahd->msgout_len += 8; + if (bootverbose) { + printf("(%s:%c:%d:%d): Sending PPR bus_width %x, period %x, " + "offset %x, ppr_options %x\n", ahd_name(ahd), + devinfo->channel, devinfo->target, devinfo->lun, + bus_width, period, offset, ppr_options); + } +} + +/* + * Clear any active message state. + */ +static void +ahd_clear_msg_state(struct ahd_softc *ahd) +{ + ahd_mode_state saved_modes; + + saved_modes = ahd_save_modes(ahd); + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + ahd->send_msg_perror = 0; + ahd->msg_flags = MSG_FLAG_NONE; + ahd->msgout_len = 0; + ahd->msgin_index = 0; + ahd->msg_type = MSG_TYPE_NONE; + if ((ahd_inb(ahd, SCSISIGO) & ATNO) != 0) { + /* + * The target didn't care to respond to our + * message request, so clear ATN. + */ + ahd_outb(ahd, CLRSINT1, CLRATNO); + } + ahd_outb(ahd, MSG_OUT, MSG_NOOP); + ahd_outb(ahd, SEQ_FLAGS2, + ahd_inb(ahd, SEQ_FLAGS2) & ~TARGET_MSG_PENDING); + ahd_restore_modes(ahd, saved_modes); +} + +/* + * Manual message loop handler. + */ +static void +ahd_handle_message_phase(struct ahd_softc *ahd) +{ + struct ahd_devinfo devinfo; + u_int bus_phase; + int end_session; + + ahd_fetch_devinfo(ahd, &devinfo); + end_session = FALSE; + bus_phase = ahd_inb(ahd, LASTPHASE); + + if ((ahd_inb(ahd, LQISTAT2) & LQIPHASE_OUTPKT) != 0) { + printf("LQIRETRY for LQIPHASE_OUTPKT\n"); + ahd_outb(ahd, LQCTL2, LQIRETRY); + } +reswitch: + switch (ahd->msg_type) { + case MSG_TYPE_INITIATOR_MSGOUT: + { + int lastbyte; + int phasemis; + int msgdone; + + if (ahd->msgout_len == 0 && ahd->send_msg_perror == 0) + panic("HOST_MSG_LOOP interrupt with no active message"); + +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) + printf("%s: INITIATOR_MSG_OUT", ahd_name(ahd)); +#endif + phasemis = bus_phase != P_MESGOUT; + if (phasemis) { +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { + printf(" PHASEMIS %s - %s\n", + ahd_lookup_phase_entry(bus_phase) + ->phasemsg, + ahd_lookup_phase_entry(ahd_inb(ahd, SCSISIGI) & PHASE_MASK) + ->phasemsg); + } +#endif + if (bus_phase == P_MESGIN) { + /* + * Change gears and see if + * this messages is of interest to + * us or should be passed back to + * the sequencer. + */ + ahd_outb(ahd, CLRSINT1, CLRATNO); + ahd->send_msg_perror = 0; + ahd->msg_type = MSG_TYPE_INITIATOR_MSGIN; + ahd->msgin_index = 0; + goto reswitch; + } + end_session = TRUE; + break; + } + + if (ahd->send_msg_perror) { + ahd_outb(ahd, CLRSINT1, CLRATNO); + ahd_outb(ahd, CLRSINT1, CLRREQINIT); +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) + printf(" byte 0x%x\n", ahd->send_msg_perror); +#endif + /* + * If we are notifying the target of a CRC error + * during packetized operations, the target is + * within its rights to acknowledge our message + * with a busfree. + */ + if ((ahd->msg_flags & MSG_FLAG_PACKETIZED) != 0 + && ahd->send_msg_perror == MSG_INITIATOR_DET_ERR) + ahd->msg_flags |= MSG_FLAG_EXPECT_IDE_BUSFREE; + + ahd_outb(ahd, RETURN_2, ahd->send_msg_perror); + ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_WRITE); + break; + } + + msgdone = ahd->msgout_index == ahd->msgout_len; + if (msgdone) { + /* + * The target has requested a retry. + * Re-assert ATN, reset our message index to + * 0, and try again. + */ + ahd->msgout_index = 0; + ahd_assert_atn(ahd); + } + + lastbyte = ahd->msgout_index == (ahd->msgout_len - 1); + if (lastbyte) { + /* Last byte is signified by dropping ATN */ + ahd_outb(ahd, CLRSINT1, CLRATNO); + } + + /* + * Clear our interrupt status and present + * the next byte on the bus. + */ + ahd_outb(ahd, CLRSINT1, CLRREQINIT); +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) + printf(" byte 0x%x\n", + ahd->msgout_buf[ahd->msgout_index]); +#endif + ahd_outb(ahd, RETURN_2, ahd->msgout_buf[ahd->msgout_index++]); + ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_WRITE); + break; + } + case MSG_TYPE_INITIATOR_MSGIN: + { + int phasemis; + int message_done; + +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) + printf("%s: INITIATOR_MSG_IN", ahd_name(ahd)); +#endif + phasemis = bus_phase != P_MESGIN; + if (phasemis) { +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { + printf(" PHASEMIS %s\n", + ahd_lookup_phase_entry(bus_phase) + ->phasemsg); + } +#endif + ahd->msgin_index = 0; + if (bus_phase == P_MESGOUT + && (ahd->send_msg_perror != 0 + || (ahd->msgout_len != 0 + && ahd->msgout_index == 0))) { + ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; + goto reswitch; + } + end_session = TRUE; + break; + } + + /* Pull the byte in without acking it */ + ahd->msgin_buf[ahd->msgin_index] = ahd_inb(ahd, SCSIBUS); +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) + printf(" byte 0x%x\n", + ahd->msgin_buf[ahd->msgin_index]); +#endif + + message_done = ahd_parse_msg(ahd, &devinfo); + + if (message_done) { + /* + * Clear our incoming message buffer in case there + * is another message following this one. + */ + ahd->msgin_index = 0; + + /* + * If this message illicited a response, + * assert ATN so the target takes us to the + * message out phase. + */ + if (ahd->msgout_len != 0) + ahd_assert_atn(ahd); + } else + ahd->msgin_index++; + + if (message_done == MSGLOOP_TERMINATED) { + end_session = TRUE; + } else { + /* Ack the byte */ + ahd_outb(ahd, CLRSINT1, CLRREQINIT); + ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_READ); + } + break; + } + case MSG_TYPE_TARGET_MSGIN: + { + int msgdone; + int msgout_request; + + /* + * By default, the message loop will continue. + */ + ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_TARG); + + if (ahd->msgout_len == 0) + panic("Target MSGIN with no active message"); + + /* + * If we interrupted a mesgout session, the initiator + * will not know this until our first REQ. So, we + * only honor mesgout requests after we've sent our + * first byte. + */ + if ((ahd_inb(ahd, SCSISIGI) & ATNI) != 0 + && ahd->msgout_index > 0) + msgout_request = TRUE; + else + msgout_request = FALSE; + + if (msgout_request) { + + /* + * Change gears and see if + * this messages is of interest to + * us or should be passed back to + * the sequencer. + */ + ahd->msg_type = MSG_TYPE_TARGET_MSGOUT; + ahd_outb(ahd, SCSISIGO, P_MESGOUT | BSYO); + ahd->msgin_index = 0; + /* Dummy read to REQ for first byte */ + ahd_inb(ahd, SCSIDAT); + ahd_outb(ahd, SXFRCTL0, + ahd_inb(ahd, SXFRCTL0) | SPIOEN); + break; + } + + msgdone = ahd->msgout_index == ahd->msgout_len; + if (msgdone) { + ahd_outb(ahd, SXFRCTL0, + ahd_inb(ahd, SXFRCTL0) & ~SPIOEN); + end_session = TRUE; + break; + } + + /* + * Present the next byte on the bus. + */ + ahd_outb(ahd, SXFRCTL0, ahd_inb(ahd, SXFRCTL0) | SPIOEN); + ahd_outb(ahd, SCSIDAT, ahd->msgout_buf[ahd->msgout_index++]); + break; + } + case MSG_TYPE_TARGET_MSGOUT: + { + int lastbyte; + int msgdone; + + /* + * By default, the message loop will continue. + */ + ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_TARG); + + /* + * The initiator signals that this is + * the last byte by dropping ATN. + */ + lastbyte = (ahd_inb(ahd, SCSISIGI) & ATNI) == 0; + + /* + * Read the latched byte, but turn off SPIOEN first + * so that we don't inadvertently cause a REQ for the + * next byte. + */ + ahd_outb(ahd, SXFRCTL0, ahd_inb(ahd, SXFRCTL0) & ~SPIOEN); + ahd->msgin_buf[ahd->msgin_index] = ahd_inb(ahd, SCSIDAT); + msgdone = ahd_parse_msg(ahd, &devinfo); + if (msgdone == MSGLOOP_TERMINATED) { + /* + * The message is *really* done in that it caused + * us to go to bus free. The sequencer has already + * been reset at this point, so pull the ejection + * handle. + */ + return; + } + + ahd->msgin_index++; + + /* + * XXX Read spec about initiator dropping ATN too soon + * and use msgdone to detect it. + */ + if (msgdone == MSGLOOP_MSGCOMPLETE) { + ahd->msgin_index = 0; + + /* + * If this message illicited a response, transition + * to the Message in phase and send it. + */ + if (ahd->msgout_len != 0) { + ahd_outb(ahd, SCSISIGO, P_MESGIN | BSYO); + ahd_outb(ahd, SXFRCTL0, + ahd_inb(ahd, SXFRCTL0) | SPIOEN); + ahd->msg_type = MSG_TYPE_TARGET_MSGIN; + ahd->msgin_index = 0; + break; + } + } + + if (lastbyte) + end_session = TRUE; + else { + /* Ask for the next byte. */ + ahd_outb(ahd, SXFRCTL0, + ahd_inb(ahd, SXFRCTL0) | SPIOEN); + } + + break; + } + default: + panic("Unknown REQINIT message type"); + } + + if (end_session) { + if ((ahd->msg_flags & MSG_FLAG_PACKETIZED) != 0) { + printf("%s: Returning to Idle Loop\n", + ahd_name(ahd)); + ahd_outb(ahd, LASTPHASE, P_BUSFREE); + ahd_clear_msg_state(ahd); + ahd_outb(ahd, SEQCTL0, FASTMODE|SEQRESET); + } else { + ahd_clear_msg_state(ahd); + ahd_outb(ahd, RETURN_1, EXIT_MSG_LOOP); + } + } +} + +/* + * See if we sent a particular extended message to the target. + * If "full" is true, return true only if the target saw the full + * message. If "full" is false, return true if the target saw at + * least the first byte of the message. + */ +static int +ahd_sent_msg(struct ahd_softc *ahd, ahd_msgtype type, u_int msgval, int full) +{ + int found; + u_int index; + + found = FALSE; + index = 0; + + while (index < ahd->msgout_len) { + if (ahd->msgout_buf[index] == MSG_EXTENDED) { + u_int end_index; + + end_index = index + 1 + ahd->msgout_buf[index + 1]; + if (ahd->msgout_buf[index+2] == msgval + && type == AHDMSG_EXT) { + + if (full) { + if (ahd->msgout_index > end_index) + found = TRUE; + } else if (ahd->msgout_index > index) + found = TRUE; + } + index = end_index; + } else if (ahd->msgout_buf[index] >= MSG_SIMPLE_TASK + && ahd->msgout_buf[index] <= MSG_IGN_WIDE_RESIDUE) { + + /* Skip tag type and tag id or residue param*/ + index += 2; + } else { + /* Single byte message */ + if (type == AHDMSG_1B + && ahd->msgout_buf[index] == msgval + && ahd->msgout_index > index) + found = TRUE; + index++; + } + + if (found) + break; + } + return (found); +} + +/* + * Wait for a complete incoming message, parse it, and respond accordingly. + */ +static int +ahd_parse_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) +{ + struct ahd_initiator_tinfo *tinfo; + struct ahd_tmode_tstate *tstate; + int reject; + int done; + int response; + + done = MSGLOOP_IN_PROG; + response = FALSE; + reject = FALSE; + tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid, + devinfo->target, &tstate); + + /* + * Parse as much of the message as is availible, + * rejecting it if we don't support it. When + * the entire message is availible and has been + * handled, return MSGLOOP_MSGCOMPLETE, indicating + * that we have parsed an entire message. + * + * In the case of extended messages, we accept the length + * byte outright and perform more checking once we know the + * extended message type. + */ + switch (ahd->msgin_buf[0]) { + case MSG_DISCONNECT: + case MSG_SAVEDATAPOINTER: + case MSG_CMDCOMPLETE: + case MSG_RESTOREPOINTERS: + case MSG_IGN_WIDE_RESIDUE: + /* + * End our message loop as these are messages + * the sequencer handles on its own. + */ + done = MSGLOOP_TERMINATED; + break; + case MSG_MESSAGE_REJECT: + response = ahd_handle_msg_reject(ahd, devinfo); + /* FALLTHROUGH */ + case MSG_NOOP: + done = MSGLOOP_MSGCOMPLETE; + break; + case MSG_EXTENDED: + { + /* Wait for enough of the message to begin validation */ + if (ahd->msgin_index < 2) + break; + switch (ahd->msgin_buf[2]) { + case MSG_EXT_SDTR: + { + u_int period; + u_int ppr_options; + u_int offset; + u_int saved_offset; + + if (ahd->msgin_buf[1] != MSG_EXT_SDTR_LEN) { + reject = TRUE; + break; + } + + /* + * Wait until we have both args before validating + * and acting on this message. + * + * Add one to MSG_EXT_SDTR_LEN to account for + * the extended message preamble. + */ + if (ahd->msgin_index < (MSG_EXT_SDTR_LEN + 1)) + break; + + period = ahd->msgin_buf[3]; + ppr_options = 0; + saved_offset = offset = ahd->msgin_buf[4]; + ahd_devlimited_syncrate(ahd, tinfo, &period, + &ppr_options, devinfo->role); + ahd_validate_offset(ahd, tinfo, period, &offset, + tinfo->curr.width, devinfo->role); + if (bootverbose) { + printf("(%s:%c:%d:%d): Received " + "SDTR period %x, offset %x\n\t" + "Filtered to period %x, offset %x\n", + ahd_name(ahd), devinfo->channel, + devinfo->target, devinfo->lun, + ahd->msgin_buf[3], saved_offset, + period, offset); + } + ahd_set_syncrate(ahd, devinfo, period, + offset, ppr_options, + AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, + /*paused*/TRUE); + + /* + * See if we initiated Sync Negotiation + * and didn't have to fall down to async + * transfers. + */ + if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, TRUE)) { + /* We started it */ + if (saved_offset != offset) { + /* Went too low - force async */ + reject = TRUE; + } + } else { + /* + * Send our own SDTR in reply + */ + if (bootverbose + && devinfo->role == ROLE_INITIATOR) { + printf("(%s:%c:%d:%d): Target " + "Initiated SDTR\n", + ahd_name(ahd), devinfo->channel, + devinfo->target, devinfo->lun); + } + ahd->msgout_index = 0; + ahd->msgout_len = 0; + ahd_construct_sdtr(ahd, devinfo, + period, offset); + ahd->msgout_index = 0; + response = TRUE; + } + done = MSGLOOP_MSGCOMPLETE; + break; + } + case MSG_EXT_WDTR: + { + u_int bus_width; + u_int saved_width; + u_int sending_reply; + + sending_reply = FALSE; + if (ahd->msgin_buf[1] != MSG_EXT_WDTR_LEN) { + reject = TRUE; + break; + } + + /* + * Wait until we have our arg before validating + * and acting on this message. + * + * Add one to MSG_EXT_WDTR_LEN to account for + * the extended message preamble. + */ + if (ahd->msgin_index < (MSG_EXT_WDTR_LEN + 1)) + break; + + bus_width = ahd->msgin_buf[3]; + saved_width = bus_width; + ahd_validate_width(ahd, tinfo, &bus_width, + devinfo->role); + if (bootverbose) { + printf("(%s:%c:%d:%d): Received WDTR " + "%x filtered to %x\n", + ahd_name(ahd), devinfo->channel, + devinfo->target, devinfo->lun, + saved_width, bus_width); + } + + if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, TRUE)) { + /* + * Don't send a WDTR back to the + * target, since we asked first. + * If the width went higher than our + * request, reject it. + */ + if (saved_width > bus_width) { + reject = TRUE; + printf("(%s:%c:%d:%d): requested %dBit " + "transfers. Rejecting...\n", + ahd_name(ahd), devinfo->channel, + devinfo->target, devinfo->lun, + 8 * (0x01 << bus_width)); + bus_width = 0; + } + } else { + /* + * Send our own WDTR in reply + */ + if (bootverbose + && devinfo->role == ROLE_INITIATOR) { + printf("(%s:%c:%d:%d): Target " + "Initiated WDTR\n", + ahd_name(ahd), devinfo->channel, + devinfo->target, devinfo->lun); + } + ahd->msgout_index = 0; + ahd->msgout_len = 0; + ahd_construct_wdtr(ahd, devinfo, bus_width); + ahd->msgout_index = 0; + response = TRUE; + sending_reply = TRUE; + } + ahd_set_width(ahd, devinfo, bus_width, + AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, + /*paused*/TRUE); + /* After a wide message, we are async */ + ahd_set_syncrate(ahd, devinfo, /*period*/0, + /*offset*/0, /*ppr_options*/0, + AHD_TRANS_ACTIVE, /*paused*/TRUE); + if (sending_reply == FALSE && reject == FALSE) { + + if (tinfo->goal.period) { + ahd->msgout_index = 0; + ahd->msgout_len = 0; + ahd_build_transfer_msg(ahd, devinfo); + ahd->msgout_index = 0; + response = TRUE; + } + } + done = MSGLOOP_MSGCOMPLETE; + break; + } + case MSG_EXT_PPR: + { + u_int period; + u_int offset; + u_int bus_width; + u_int ppr_options; + u_int saved_width; + u_int saved_offset; + u_int saved_ppr_options; + + if (ahd->msgin_buf[1] != MSG_EXT_PPR_LEN) { + reject = TRUE; + break; + } + + /* + * Wait until we have all args before validating + * and acting on this message. + * + * Add one to MSG_EXT_PPR_LEN to account for + * the extended message preamble. + */ + if (ahd->msgin_index < (MSG_EXT_PPR_LEN + 1)) + break; + + period = ahd->msgin_buf[3]; + offset = ahd->msgin_buf[5]; + bus_width = ahd->msgin_buf[6]; + saved_width = bus_width; + ppr_options = ahd->msgin_buf[7]; + /* + * According to the spec, a DT only + * period factor with no DT option + * set implies async. + */ + if ((ppr_options & MSG_EXT_PPR_DT_REQ) == 0 + && period <= 9) + offset = 0; + saved_ppr_options = ppr_options; + saved_offset = offset; + + /* + * Transfer options are only available if we + * are negotiating wide. + */ + if (bus_width == 0) + ppr_options &= MSG_EXT_PPR_QAS_REQ; + + ahd_validate_width(ahd, tinfo, &bus_width, + devinfo->role); + ahd_devlimited_syncrate(ahd, tinfo, &period, + &ppr_options, devinfo->role); + ahd_validate_offset(ahd, tinfo, period, &offset, + bus_width, devinfo->role); + + if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, TRUE)) { + /* + * If we are unable to do any of the + * requested options (we went too low), + * then we'll have to reject the message. + */ + if (saved_width > bus_width + || saved_offset != offset + || saved_ppr_options != ppr_options) { + reject = TRUE; + period = 0; + offset = 0; + bus_width = 0; + ppr_options = 0; + } + } else { + if (devinfo->role != ROLE_TARGET) + printf("(%s:%c:%d:%d): Target " + "Initiated PPR\n", + ahd_name(ahd), devinfo->channel, + devinfo->target, devinfo->lun); + else + printf("(%s:%c:%d:%d): Initiator " + "Initiated PPR\n", + ahd_name(ahd), devinfo->channel, + devinfo->target, devinfo->lun); + ahd->msgout_index = 0; + ahd->msgout_len = 0; + ahd_construct_ppr(ahd, devinfo, period, offset, + bus_width, ppr_options); + ahd->msgout_index = 0; + response = TRUE; + } + if (bootverbose) { + printf("(%s:%c:%d:%d): Received PPR width %x, " + "period %x, offset %x,options %x\n" + "\tFiltered to width %x, period %x, " + "offset %x, options %x\n", + ahd_name(ahd), devinfo->channel, + devinfo->target, devinfo->lun, + saved_width, ahd->msgin_buf[3], + saved_offset, saved_ppr_options, + bus_width, period, offset, ppr_options); + } + ahd_set_width(ahd, devinfo, bus_width, + AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, + /*paused*/TRUE); + saved_ppr_options = tinfo->curr.ppr_options; + if ((saved_ppr_options & MSG_EXT_PPR_IU_REQ) != 0) { +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) + printf("PPR with IU_REQ outstanding\n"); +#endif + ahd->msg_flags |= MSG_FLAG_EXPECT_PPR_BUSFREE; + } + if ((saved_ppr_options & MSG_EXT_PPR_IU_REQ) + != (ppr_options & MSG_EXT_PPR_IU_REQ)) { +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) + printf("Expecting IU Change busfree\n"); +#endif + ahd->msg_flags |= MSG_FLAG_EXPECT_PPR_BUSFREE + | MSG_FLAG_IU_REQ_CHANGED; + } + + ahd_set_syncrate(ahd, devinfo, period, + offset, ppr_options, + AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, + /*paused*/TRUE); + + done = MSGLOOP_MSGCOMPLETE; + break; + } + default: + /* Unknown extended message. Reject it. */ + reject = TRUE; + break; + } + break; + } +#ifdef AHD_TARGET_MODE + case MSG_BUS_DEV_RESET: + ahd_handle_devreset(ahd, devinfo, + CAM_BDR_SENT, + "Bus Device Reset Received", + /*verbose_level*/0); + ahd_restart(ahd); + done = MSGLOOP_TERMINATED; + break; + case MSG_ABORT_TAG: + case MSG_ABORT: + case MSG_CLEAR_QUEUE: + { + int tag; + + /* Target mode messages */ + if (devinfo->role != ROLE_TARGET) { + reject = TRUE; + break; + } + tag = SCB_LIST_NULL; + if (ahd->msgin_buf[0] == MSG_ABORT_TAG) + tag = ahd_inb(ahd, INITIATOR_TAG); + ahd_abort_scbs(ahd, devinfo->target, devinfo->channel, + devinfo->lun, tag, ROLE_TARGET, + CAM_REQ_ABORTED); + + tstate = ahd->enabled_targets[devinfo->our_scsiid]; + if (tstate != NULL) { + struct ahd_tmode_lstate* lstate; + + lstate = tstate->enabled_luns[devinfo->lun]; + if (lstate != NULL) { + ahd_queue_lstate_event(ahd, lstate, + devinfo->our_scsiid, + ahd->msgin_buf[0], + /*arg*/tag); + ahd_send_lstate_events(ahd, lstate); + } + } + ahd_restart(ahd); + done = MSGLOOP_TERMINATED; + break; + } +#endif + case MSG_QAS_REQUEST: + printf("%s: QAS request. SCSISIGI == 0x%x\n", + ahd_name(ahd), ahd_inb(ahd, SCSISIGI)); + /* FALLTHROUGH */ + case MSG_TERM_IO_PROC: + default: + reject = TRUE; + break; + } + + if (reject) { + /* + * Setup to reject the message. + */ + ahd->msgout_index = 0; + ahd->msgout_len = 1; + ahd->msgout_buf[0] = MSG_MESSAGE_REJECT; + done = MSGLOOP_MSGCOMPLETE; + response = TRUE; + } + + if (done != MSGLOOP_IN_PROG && !response) + /* Clear the outgoing message buffer */ + ahd->msgout_len = 0; + + return (done); +} + +/* + * Process a message reject message. + */ +static int +ahd_handle_msg_reject(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) +{ + /* + * What we care about here is if we had an + * outstanding SDTR or WDTR message for this + * target. If we did, this is a signal that + * the target is refusing negotiation. + */ + struct scb *scb; + struct ahd_initiator_tinfo *tinfo; + struct ahd_tmode_tstate *tstate; + u_int scb_index; + u_int last_msg; + int response = 0; + + scb_index = ahd_get_scbptr(ahd); + scb = ahd_lookup_scb(ahd, scb_index); + tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, + devinfo->our_scsiid, + devinfo->target, &tstate); + /* Might be necessary */ + last_msg = ahd_inb(ahd, LAST_MSG); + + if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, /*full*/FALSE)) { + /* + * Target does not support the PPR message. + * Attempt to negotiate SPI-2 style. + */ + if (bootverbose) { + printf("(%s:%c:%d:%d): PPR Rejected. " + "Trying WDTR/SDTR\n", + ahd_name(ahd), devinfo->channel, + devinfo->target, devinfo->lun); + } + tinfo->goal.ppr_options = 0; + tinfo->curr.transport_version = 2; + tinfo->goal.transport_version = 2; + ahd->msgout_index = 0; + ahd->msgout_len = 0; + ahd_build_transfer_msg(ahd, devinfo); + ahd->msgout_index = 0; + response = 1; + } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, /*full*/FALSE)) { + + /* note 8bit xfers */ + printf("(%s:%c:%d:%d): refuses WIDE negotiation. Using " + "8bit transfers\n", ahd_name(ahd), + devinfo->channel, devinfo->target, devinfo->lun); + ahd_set_width(ahd, devinfo, MSG_EXT_WDTR_BUS_8_BIT, + AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, + /*paused*/TRUE); + /* + * No need to clear the sync rate. If the target + * did not accept the command, our syncrate is + * unaffected. If the target started the negotiation, + * but rejected our response, we already cleared the + * sync rate before sending our WDTR. + */ + if (tinfo->goal.period) { + + /* Start the sync negotiation */ + ahd->msgout_index = 0; + ahd->msgout_len = 0; + ahd_build_transfer_msg(ahd, devinfo); + ahd->msgout_index = 0; + response = 1; + } + } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, /*full*/FALSE)) { + /* note asynch xfers and clear flag */ + ahd_set_syncrate(ahd, devinfo, /*period*/0, + /*offset*/0, /*ppr_options*/0, + AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, + /*paused*/TRUE); + printf("(%s:%c:%d:%d): refuses synchronous negotiation. " + "Using asynchronous transfers\n", + ahd_name(ahd), devinfo->channel, + devinfo->target, devinfo->lun); + } else if ((scb->hscb->control & MSG_SIMPLE_TASK) != 0) { + int tag_type; + int mask; + + tag_type = (scb->hscb->control & MSG_SIMPLE_TASK); + + if (tag_type == MSG_SIMPLE_TASK) { + printf("(%s:%c:%d:%d): refuses tagged commands. " + "Performing non-tagged I/O\n", ahd_name(ahd), + devinfo->channel, devinfo->target, devinfo->lun); + ahd_set_tags(ahd, devinfo, AHD_QUEUE_NONE); + mask = ~0x23; + } else { + printf("(%s:%c:%d:%d): refuses %s tagged commands. " + "Performing simple queue tagged I/O only\n", + ahd_name(ahd), devinfo->channel, devinfo->target, + devinfo->lun, tag_type == MSG_ORDERED_TASK + ? "ordered" : "head of queue"); + ahd_set_tags(ahd, devinfo, AHD_QUEUE_BASIC); + mask = ~0x03; + } + + /* + * Resend the identify for this CCB as the target + * may believe that the selection is invalid otherwise. + */ + ahd_outb(ahd, SCB_CONTROL, + ahd_inb_scbram(ahd, SCB_CONTROL) & mask); + scb->hscb->control &= mask; + ahd_set_transaction_tag(scb, /*enabled*/FALSE, + /*type*/MSG_SIMPLE_TASK); + ahd_outb(ahd, MSG_OUT, MSG_IDENTIFYFLAG); + ahd_assert_atn(ahd); + ahd_busy_tcl(ahd, BUILD_TCL(scb->hscb->scsiid, devinfo->lun), + SCB_GET_TAG(scb)); + + /* + * Requeue all tagged commands for this target + * currently in our posession so they can be + * converted to untagged commands. + */ + ahd_search_qinfifo(ahd, SCB_GET_TARGET(ahd, scb), + SCB_GET_CHANNEL(ahd, scb), + SCB_GET_LUN(scb), /*tag*/SCB_LIST_NULL, + ROLE_INITIATOR, CAM_REQUEUE_REQ, + SEARCH_COMPLETE); + } else { + /* + * Otherwise, we ignore it. + */ + printf("%s:%c:%d: Message reject for %x -- ignored\n", + ahd_name(ahd), devinfo->channel, devinfo->target, + last_msg); + } + return (response); +} + +/* + * Process an ingnore wide residue message. + */ +static void +ahd_handle_ign_wide_residue(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) +{ + u_int scb_index; + struct scb *scb; + + scb_index = ahd_get_scbptr(ahd); + scb = ahd_lookup_scb(ahd, scb_index); + /* + * XXX Actually check data direction in the sequencer? + * Perhaps add datadir to some spare bits in the hscb? + */ + if ((ahd_inb(ahd, SEQ_FLAGS) & DPHASE) == 0 + || ahd_get_transfer_dir(scb) != CAM_DIR_IN) { + /* + * Ignore the message if we haven't + * seen an appropriate data phase yet. + */ + } else { + /* + * If the residual occurred on the last + * transfer and the transfer request was + * expected to end on an odd count, do + * nothing. Otherwise, subtract a byte + * and update the residual count accordingly. + */ + uint32_t sgptr; + + sgptr = ahd_inb_scbram(ahd, SCB_RESIDUAL_SGPTR); + if ((sgptr & SG_LIST_NULL) != 0 + && ahd_inb(ahd, DATA_COUNT_ODD) == 1) { + /* + * If the residual occurred on the last + * transfer and the transfer request was + * expected to end on an odd count, do + * nothing. + */ + } else { + uint32_t data_cnt; + uint64_t data_addr; + uint32_t sglen; + + /* Pull in the rest of the sgptr */ + sgptr |= + (ahd_inb_scbram(ahd, SCB_RESIDUAL_SGPTR + 3) << 24) + | (ahd_inb_scbram(ahd, SCB_RESIDUAL_SGPTR + 2) << 16) + | (ahd_inb_scbram(ahd, SCB_RESIDUAL_SGPTR + 1) << 8); + sgptr &= SG_PTR_MASK; + data_cnt = + (ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT+3) << 24) + | (ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT+2) << 16) + | (ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT+1) << 8) + | (ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT)); + + data_addr = (((uint64_t)ahd_inb(ahd, SHADDR + 7)) << 56) + | (((uint64_t)ahd_inb(ahd, SHADDR + 6)) << 48) + | (((uint64_t)ahd_inb(ahd, SHADDR + 5)) << 40) + | (((uint64_t)ahd_inb(ahd, SHADDR + 4)) << 32) + | (ahd_inb(ahd, SHADDR + 3) << 24) + | (ahd_inb(ahd, SHADDR + 2) << 16) + | (ahd_inb(ahd, SHADDR + 1) << 8) + | (ahd_inb(ahd, SHADDR)); + + data_cnt += 1; + data_addr -= 1; + + if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) { + struct ahd_dma64_seg *sg; + + sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); + + /* + * The residual sg ptr points to the next S/G + * to load so we must go back one. + */ + sg--; + sglen = ahd_le32toh(sg->len) & AHD_SG_LEN_MASK; + if (sg != scb->sg_list + && sglen < (data_cnt & AHD_SG_LEN_MASK)) { + + sg--; + sglen = ahd_le32toh(sg->len); + /* + * Preserve High Address and SG_LIST + * bits while setting the count to 1. + */ + data_cnt = 1|(sglen&(~AHD_SG_LEN_MASK)); + data_addr = ahd_le64toh(sg->addr) + + (sglen & AHD_SG_LEN_MASK) + - 1; + + /* + * Increment sg so it points to the + * "next" sg. + */ + sg++; + sgptr = ahd_sg_virt_to_bus(ahd, scb, + sg); + } + } else { + struct ahd_dma_seg *sg; + + sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); + + /* + * The residual sg ptr points to the next S/G + * to load so we must go back one. + */ + sg--; + sglen = ahd_le32toh(sg->len) & AHD_SG_LEN_MASK; + if (sg != scb->sg_list + && sglen < (data_cnt & AHD_SG_LEN_MASK)) { + + sg--; + sglen = ahd_le32toh(sg->len); + /* + * Preserve High Address and SG_LIST + * bits while setting the count to 1. + */ + data_cnt = 1|(sglen&(~AHD_SG_LEN_MASK)); + data_addr = ahd_le32toh(sg->addr) + + (sglen & AHD_SG_LEN_MASK) + - 1; + + /* + * Increment sg so it points to the + * "next" sg. + */ + sg++; + sgptr = ahd_sg_virt_to_bus(ahd, scb, + sg); + } + } + ahd_outb(ahd, SCB_RESIDUAL_SGPTR + 3, sgptr >> 24); + ahd_outb(ahd, SCB_RESIDUAL_SGPTR + 2, sgptr >> 16); + ahd_outb(ahd, SCB_RESIDUAL_SGPTR + 1, sgptr >> 8); + ahd_outb(ahd, SCB_RESIDUAL_SGPTR, sgptr); + + ahd_outb(ahd, SCB_RESIDUAL_DATACNT + 3, data_cnt >> 24); + ahd_outb(ahd, SCB_RESIDUAL_DATACNT + 2, data_cnt >> 16); + ahd_outb(ahd, SCB_RESIDUAL_DATACNT + 1, data_cnt >> 8); + ahd_outb(ahd, SCB_RESIDUAL_DATACNT, data_cnt); + + /* + * The FIFO's pointers will be updated if/when the + * sequencer re-enters a data phase. + */ + } + } +} + + +/* + * Reinitialize the data pointers for the active transfer + * based on its current residual. + */ +static void +ahd_reinitialize_dataptrs(struct ahd_softc *ahd) +{ + struct scb *scb; + ahd_mode_state saved_modes; + u_int scb_index; + u_int wait; + uint32_t sgptr; + uint32_t resid; + uint64_t dataptr; + + AHD_ASSERT_MODES(ahd, AHD_MODE_DFF0_MSK|AHD_MODE_DFF1_MSK, + AHD_MODE_DFF0_MSK|AHD_MODE_DFF1_MSK); + + scb_index = ahd_get_scbptr(ahd); + scb = ahd_lookup_scb(ahd, scb_index); + + /* + * Release and reacquire the FIFO so we + * have a clean slate. + */ + ahd_outb(ahd, DFFSXFRCTL, CLRCHN); + wait = 1000; + do { + ahd_delay(100); + } while (--wait && !(ahd_inb(ahd, MDFFSTAT) & FIFOFREE)); + if (wait == 0) { + ahd_print_path(ahd, scb); + printf("ahd_reinitialize_dataptrs: Forcing FIFO free.\n"); + ahd_outb(ahd, DFFSXFRCTL, RSTCHN|CLRSHCNT); + } + saved_modes = ahd_save_modes(ahd); + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + ahd_outb(ahd, DFFSTAT, + ahd_inb(ahd, DFFSTAT) | (saved_modes == 0x11 ? CURRFIFO : 0)); + + /* + * Determine initial values for data_addr and data_cnt + * for resuming the data phase. + */ + sgptr = (ahd_inb_scbram(ahd, SCB_RESIDUAL_SGPTR + 3) << 24) + | (ahd_inb_scbram(ahd, SCB_RESIDUAL_SGPTR + 2) << 16) + | (ahd_inb_scbram(ahd, SCB_RESIDUAL_SGPTR + 1) << 8) + | ahd_inb_scbram(ahd, SCB_RESIDUAL_SGPTR); + sgptr &= SG_PTR_MASK; + + resid = (ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT + 2) << 16) + | (ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT + 1) << 8) + | ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT); + + if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) { + struct ahd_dma64_seg *sg; + + sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); + + /* The residual sg_ptr always points to the next sg */ + sg--; + + dataptr = ahd_le64toh(sg->addr) + + (ahd_le32toh(sg->len) & AHD_SG_LEN_MASK) + - resid; + ahd_outb(ahd, HADDR + 7, dataptr >> 56); + ahd_outb(ahd, HADDR + 6, dataptr >> 48); + ahd_outb(ahd, HADDR + 5, dataptr >> 40); + ahd_outb(ahd, HADDR + 4, dataptr >> 32); + } else { + struct ahd_dma_seg *sg; + + sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); + + /* The residual sg_ptr always points to the next sg */ + sg--; + + dataptr = ahd_le32toh(sg->addr) + + (ahd_le32toh(sg->len) & AHD_SG_LEN_MASK) + - resid; + ahd_outb(ahd, HADDR + 4, + (ahd_le32toh(sg->len) & ~AHD_SG_LEN_MASK) >> 24); + } + ahd_outb(ahd, HADDR + 3, dataptr >> 24); + ahd_outb(ahd, HADDR + 2, dataptr >> 16); + ahd_outb(ahd, HADDR + 1, dataptr >> 8); + ahd_outb(ahd, HADDR, dataptr); + ahd_outb(ahd, HCNT + 2, resid >> 16); + ahd_outb(ahd, HCNT + 1, resid >> 8); + ahd_outb(ahd, HCNT, resid); +} + +/* + * Handle the effects of issuing a bus device reset message. + */ +static void +ahd_handle_devreset(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, + cam_status status, char *message, int verbose_level) +{ +#ifdef AHD_TARGET_MODE + struct ahd_tmode_tstate* tstate; + u_int lun; +#endif + int found; + + found = ahd_abort_scbs(ahd, devinfo->target, devinfo->channel, + CAM_LUN_WILDCARD, SCB_LIST_NULL, devinfo->role, + status); + +#ifdef AHD_TARGET_MODE + /* + * Send an immediate notify ccb to all target mord peripheral + * drivers affected by this action. + */ + tstate = ahd->enabled_targets[devinfo->our_scsiid]; + if (tstate != NULL) { + for (lun = 0; lun < AHD_NUM_LUNS; lun++) { + struct ahd_tmode_lstate* lstate; + + lstate = tstate->enabled_luns[lun]; + if (lstate == NULL) + continue; + + ahd_queue_lstate_event(ahd, lstate, devinfo->our_scsiid, + MSG_BUS_DEV_RESET, /*arg*/0); + ahd_send_lstate_events(ahd, lstate); + } + } +#endif + + /* + * Go back to async/narrow transfers and renegotiate. + */ + ahd_set_width(ahd, devinfo, MSG_EXT_WDTR_BUS_8_BIT, + AHD_TRANS_CUR, /*paused*/TRUE); + ahd_set_syncrate(ahd, devinfo, /*period*/0, /*offset*/0, + /*ppr_options*/0, AHD_TRANS_CUR, /*paused*/TRUE); + + ahd_send_async(ahd, devinfo->channel, devinfo->target, + CAM_LUN_WILDCARD, AC_SENT_BDR, NULL); + + if (message != NULL + && (verbose_level <= bootverbose)) + printf("%s: %s on %c:%d. %d SCBs aborted\n", ahd_name(ahd), + message, devinfo->channel, devinfo->target, found); +} + +#ifdef AHD_TARGET_MODE +static void +ahd_setup_target_msgin(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, + struct scb *scb) +{ + + /* + * To facilitate adding multiple messages together, + * each routine should increment the index and len + * variables instead of setting them explicitly. + */ + ahd->msgout_index = 0; + ahd->msgout_len = 0; + + if (scb != NULL && (scb->flags & SCB_AUTO_NEGOTIATE) != 0) + ahd_build_transfer_msg(ahd, devinfo); + else + panic("ahd_intr: AWAITING target message with no message"); + + ahd->msgout_index = 0; + ahd->msg_type = MSG_TYPE_TARGET_MSGIN; +} +#endif +/**************************** Initialization **********************************/ +static bus_size_t +ahd_sglist_size(struct ahd_softc *ahd) +{ + bus_size_t list_size; + + list_size = sizeof(struct ahd_dma_seg) * AHD_NSEG; + if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) + list_size = sizeof(struct ahd_dma64_seg) * AHD_NSEG; + return (list_size); +} + +/* + * Calculate the optimum S/G List allocation size. S/G elements used + * for a given transaction must be physically contiguous. Assume the + * OS will allocate full pages to us, so it doesn't make sense to request + * less than a page. + */ +static bus_size_t +ahd_sglist_allocsize(struct ahd_softc *ahd) +{ + bus_size_t sg_list_increment; + bus_size_t sg_list_size; + bus_size_t max_list_size; + bus_size_t best_list_size; + + /* Start out with the minimum required for AHD_NSEG. */ + sg_list_increment = ahd_sglist_size(ahd); + sg_list_size = sg_list_increment; + + /* Get us as close as possible to a page in size. */ + while ((sg_list_size + sg_list_increment) <= PAGE_SIZE) + sg_list_size += sg_list_increment; + + /* + * Try to reduce the amount of wastage by allocating + * multiple pages. + */ + best_list_size = sg_list_size; + max_list_size = roundup(sg_list_increment, PAGE_SIZE); + if (max_list_size < 4 * PAGE_SIZE) + max_list_size = 4 * PAGE_SIZE; + if (max_list_size > (AHD_SCB_MAX_ALLOC * sg_list_increment)) + max_list_size = (AHD_SCB_MAX_ALLOC * sg_list_increment); + while ((sg_list_size + sg_list_increment) <= max_list_size + && (sg_list_size % PAGE_SIZE) != 0) { + bus_size_t new_mod; + bus_size_t best_mod; + + sg_list_size += sg_list_increment; + new_mod = sg_list_size % PAGE_SIZE; + best_mod = best_list_size % PAGE_SIZE; + if (new_mod > best_mod || new_mod == 0) { + best_list_size = sg_list_size; + } + } + return (best_list_size); +} + +/* + * Allocate a controller structure for a new device + * and perform initial initializion. + */ +struct ahd_softc * +ahd_alloc(void *platform_arg, char *name) +{ + struct ahd_softc *ahd; + +#ifndef __FreeBSD__ + ahd = malloc(sizeof(*ahd), M_DEVBUF, M_NOWAIT); + if (!ahd) { + printf("aic7xxx: cannot malloc softc!\n"); + free(name, M_DEVBUF); + return NULL; + } +#else + ahd = device_get_softc((device_t)platform_arg); +#endif + memset(ahd, 0, sizeof(*ahd)); + ahd->seep_config = malloc(sizeof(*ahd->seep_config), + M_DEVBUF, M_NOWAIT); + if (ahd->seep_config == NULL) { +#ifndef __FreeBSD__ + free(ahd, M_DEVBUF); +#endif + free(name, M_DEVBUF); + return (NULL); + } + LIST_INIT(&ahd->pending_scbs); + /* We don't know our unit number until the OSM sets it */ + ahd->name = name; + ahd->unit = -1; + ahd->description = NULL; + ahd->bus_description = NULL; + ahd->channel = 'A'; + ahd->chip = AHD_NONE; + ahd->features = AHD_FENONE; + ahd->bugs = AHD_BUGNONE; + ahd->flags = AHD_SPCHK_ENB_A|AHD_RESET_BUS_A|AHD_TERM_ENB_A + | AHD_EXTENDED_TRANS_A|AHD_STPWLEVEL_A; + ahd_timer_init(&ahd->reset_timer); + + if (ahd_platform_alloc(ahd, platform_arg) != 0) { + ahd_free(ahd); + ahd = NULL; + } + return (ahd); +} + +int +ahd_softc_init(struct ahd_softc *ahd) +{ + + ahd->unpause = 0; + ahd->pause = PAUSE; + return (0); +} + +void +ahd_softc_insert(struct ahd_softc *ahd) +{ + struct ahd_softc *list_ahd; + +#if AHD_PCI_CONFIG > 0 + /* + * Second Function PCI devices need to inherit some + * settings from function 0. + */ + if ((ahd->features & AHD_MULTI_FUNC) != 0) { + TAILQ_FOREACH(list_ahd, &ahd_tailq, links) { + ahd_dev_softc_t list_pci; + ahd_dev_softc_t pci; + + list_pci = list_ahd->dev_softc; + pci = ahd->dev_softc; + if (ahd_get_pci_slot(list_pci) == ahd_get_pci_slot(pci) + && ahd_get_pci_bus(list_pci) == ahd_get_pci_bus(pci)) { + struct ahd_softc *master; + struct ahd_softc *slave; + + if (ahd_get_pci_function(list_pci) == 0) { + master = list_ahd; + slave = ahd; + } else { + master = ahd; + slave = list_ahd; + } + slave->flags &= ~AHD_BIOS_ENABLED; + slave->flags |= + master->flags & AHD_BIOS_ENABLED; + slave->flags &= ~AHD_PRIMARY_CHANNEL; + slave->flags |= + master->flags & AHD_PRIMARY_CHANNEL; + break; + } + } + } +#endif + + /* + * Insertion sort into our list of softcs. + */ + list_ahd = TAILQ_FIRST(&ahd_tailq); + while (list_ahd != NULL + && ahd_softc_comp(list_ahd, ahd) <= 0) + list_ahd = TAILQ_NEXT(list_ahd, links); + if (list_ahd != NULL) + TAILQ_INSERT_BEFORE(list_ahd, ahd, links); + else + TAILQ_INSERT_TAIL(&ahd_tailq, ahd, links); + ahd->init_level++; +} + +/* + * Verify that the passed in softc pointer is for a + * controller that is still configured. + */ +struct ahd_softc * +ahd_find_softc(struct ahd_softc *ahd) +{ + struct ahd_softc *list_ahd; + + TAILQ_FOREACH(list_ahd, &ahd_tailq, links) { + if (list_ahd == ahd) + return (ahd); + } + return (NULL); +} + +void +ahd_set_unit(struct ahd_softc *ahd, int unit) +{ + ahd->unit = unit; +} + +void +ahd_set_name(struct ahd_softc *ahd, char *name) +{ + if (ahd->name != NULL) + free(ahd->name, M_DEVBUF); + ahd->name = name; +} + +void +ahd_free(struct ahd_softc *ahd) +{ + int i; + + ahd_fini_scbdata(ahd); + switch (ahd->init_level) { + default: + case 5: + ahd_shutdown(ahd); + TAILQ_REMOVE(&ahd_tailq, ahd, links); + /* FALLTHROUGH */ + case 4: + ahd_dmamap_unload(ahd, ahd->shared_data_dmat, + ahd->shared_data_dmamap); + /* FALLTHROUGH */ + case 3: + ahd_dmamem_free(ahd, ahd->shared_data_dmat, ahd->qoutfifo, + ahd->shared_data_dmamap); + ahd_dmamap_destroy(ahd, ahd->shared_data_dmat, + ahd->shared_data_dmamap); + /* FALLTHROUGH */ + case 2: + ahd_dma_tag_destroy(ahd, ahd->shared_data_dmat); + case 1: +#ifndef __linux__ + ahd_dma_tag_destroy(ahd, ahd->buffer_dmat); +#endif + break; + case 0: + break; + } + +#ifndef __linux__ + ahd_dma_tag_destroy(ahd, ahd->parent_dmat); +#endif + ahd_platform_free(ahd); + for (i = 0; i < AHD_NUM_TARGETS; i++) { + struct ahd_tmode_tstate *tstate; + + tstate = ahd->enabled_targets[i]; + if (tstate != NULL) { +#if AHD_TARGET_MODE + int j; + + for (j = 0; j < AHD_NUM_LUNS; j++) { + struct ahd_tmode_lstate *lstate; + + lstate = tstate->enabled_luns[j]; + if (lstate != NULL) { + xpt_free_path(lstate->path); + free(lstate, M_DEVBUF); + } + } +#endif + free(tstate, M_DEVBUF); + } + } +#if AHD_TARGET_MODE + if (ahd->black_hole != NULL) { + xpt_free_path(ahd->black_hole->path); + free(ahd->black_hole, M_DEVBUF); + } +#endif + if (ahd->name != NULL) + free(ahd->name, M_DEVBUF); + if (ahd->seep_config != NULL) + free(ahd->seep_config, M_DEVBUF); +#ifndef __FreeBSD__ + free(ahd, M_DEVBUF); +#endif + return; +} + +void +ahd_shutdown(void *arg) +{ + struct ahd_softc *ahd; + + ahd = (struct ahd_softc *)arg; + + /* This will reset most registers to 0, but not all */ + ahd_reset(ahd); +} + +/* + * Reset the controller and record some information about it + * that is only available just after a reset. + */ +int +ahd_reset(struct ahd_softc *ahd) +{ + u_int sxfrctl1; + int wait; + uint32_t cmd; + + /* + * Preserve the value of the SXFRCTL1 register for all channels. + * It contains settings that affect termination and we don't want + * to disturb the integrity of the bus. + */ + ahd_pause(ahd); + sxfrctl1 = ahd_inb(ahd, SXFRCTL1); + + cmd = ahd_pci_read_config(ahd->dev_softc, PCIR_COMMAND, /*bytes*/2); + if ((ahd->bugs & AHD_PCIX_CHIPRST_BUG) != 0) { + uint32_t mod_cmd; + + /* + * A4 Razor #632 + * During the assertion of CHIPRST, the chip + * does not disable its parity logic prior to + * the start of the reset. This may cause a + * parity error to be detected and thus a + * spurious SERR or PERR assertion. Disble + * PERR and SERR responses during the CHIPRST. + */ + mod_cmd = cmd & ~(PCIM_CMD_PERRESPEN|PCIM_CMD_SERRESPEN); + ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND, + mod_cmd, /*bytes*/2); + } + ahd_outb(ahd, HCNTRL, CHIPRST | ahd->pause); + + /* + * Ensure that the reset has finished. We delay 1000us + * prior to reading the register to make sure the chip + * has sufficiently completed its reset to handle register + * accesses. + */ + wait = 1000; + do { + ahd_delay(1000); + } while (--wait && !(ahd_inb(ahd, HCNTRL) & CHIPRSTACK)); + + if (wait == 0) { + printf("%s: WARNING - Failed chip reset! " + "Trying to initialize anyway.\n", ahd_name(ahd)); + } + ahd_outb(ahd, HCNTRL, ahd->pause); + + if ((ahd->bugs & AHD_PCIX_CHIPRST_BUG) != 0) { + /* + * Clear any latched PCI error status and restore + * previous SERR and PERR response enables. + */ + ahd_pci_write_config(ahd->dev_softc, PCIR_STATUS + 1, + 0xFF, /*bytes*/1); + ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND, + cmd, /*bytes*/2); + } + /* After a reset, we know the state of the mode register. */ + ahd_known_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + + /* Determine chip configuration */ + ahd->features &= ~AHD_WIDE; + if ((ahd_inb(ahd, SBLKCTL) & SELWIDE) != 0) + ahd->features |= AHD_WIDE; + + /* + * Restore SXFRCTL1. + * + * We must always initialize STPWEN to 1 before we + * restore the saved values. STPWEN is initialized + * to a tri-state condition which can only be cleared + * by turning it on. + */ + ahd_outb(ahd, SXFRCTL1, sxfrctl1|STPWEN); + ahd_outb(ahd, SXFRCTL1, sxfrctl1); + + /* + * If a recovery action has forced a chip reset, + * re-initialize the chip to our likeing. + */ + if (ahd->init_level > 0) + ahd_chip_init(ahd); + + return (0); +} + +/* + * Determine the number of SCBs available on the controller + */ +int +ahd_probe_scbs(struct ahd_softc *ahd) { + int i; + + AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), + ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); + for (i = 0; i < AHD_SCB_MAX; i++) { + int j; + + ahd_set_scbptr(ahd, i); + ahd_outw(ahd, SCB_BASE, i); + for (j = 2; j < 64; j++) + ahd_outb(ahd, SCB_BASE+j, 0); + /* Start out life as unallocated (needing an abort) */ + ahd_outb(ahd, SCB_CONTROL, MK_MESSAGE); + if (ahd_inw_scbram(ahd, SCB_BASE) != i) + break; + ahd_set_scbptr(ahd, 0); + if (ahd_inw_scbram(ahd, SCB_BASE) != 0) + break; + } + return (i); +} + +static void +ahd_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) +{ + bus_addr_t *baddr; + + baddr = (bus_addr_t *)arg; + *baddr = segs->ds_addr; +} + +static void +ahd_initialize_hscbs(struct ahd_softc *ahd) +{ + int i; + + for (i = 0; i < ahd->scb_data.maxhscbs; i++) { + ahd_set_scbptr(ahd, i); + + /* Clear the control byte. */ + ahd_outb(ahd, SCB_CONTROL, 0); + + /* Set the next pointer */ + ahd_outw(ahd, SCB_NEXT, SCB_LIST_NULL); + } +} + +static int +ahd_init_scbdata(struct ahd_softc *ahd) +{ + struct scb_data *scb_data; + + scb_data = &ahd->scb_data; + SLIST_INIT(&scb_data->free_scbs); + SLIST_INIT(&scb_data->hscb_maps); + SLIST_INIT(&scb_data->sg_maps); + SLIST_INIT(&scb_data->sense_maps); + + /* Determine the number of hardware SCBs and initialize them */ + scb_data->maxhscbs = ahd_probe_scbs(ahd); + if (scb_data->maxhscbs == 0) { + printf("%s: No SCB space found\n", ahd_name(ahd)); + return (ENXIO); + } + + ahd_initialize_hscbs(ahd); + + /* + * Create our DMA tags. These tags define the kinds of device + * accessible memory allocations and memory mappings we will + * need to perform during normal operation. + * + * Unless we need to further restrict the allocation, we rely + * on the restrictions of the parent dmat, hence the common + * use of MAXADDR and MAXSIZE. + */ + + /* DMA tag for our hardware scb structures */ + if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1, + /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, + /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, + /*highaddr*/BUS_SPACE_MAXADDR, + /*filter*/NULL, /*filterarg*/NULL, + PAGE_SIZE, /*nsegments*/1, + /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, + /*flags*/0, &scb_data->hscb_dmat) != 0) { + goto error_exit; + } + + scb_data->init_level++; + + /* DMA tag for our S/G structures. */ + if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1, + /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, + /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, + /*highaddr*/BUS_SPACE_MAXADDR, + /*filter*/NULL, /*filterarg*/NULL, + ahd_sglist_allocsize(ahd), /*nsegments*/1, + /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, + /*flags*/0, &scb_data->sg_dmat) != 0) { + goto error_exit; + } +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_MEMORY) != 0) + printf("%s: ahd_sglist_allocsize = 0x%x\n", ahd_name(ahd), + ahd_sglist_allocsize(ahd)); +#endif + + scb_data->init_level++; + + /* DMA tag for our sense buffers. We allocate in page sized chunks */ + if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1, + /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, + /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, + /*highaddr*/BUS_SPACE_MAXADDR, + /*filter*/NULL, /*filterarg*/NULL, + PAGE_SIZE, /*nsegments*/1, + /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, + /*flags*/0, &scb_data->sense_dmat) != 0) { + goto error_exit; + } + + scb_data->init_level++; + + /* Perform initial CCB allocation */ + ahd_alloc_scbs(ahd); + + if (scb_data->numscbs == 0) { + printf("%s: ahd_init_scbdata - " + "Unable to allocate initial scbs\n", + ahd_name(ahd)); + goto error_exit; + } + + /* + * Reserve an SCB as the initial "next SCB" to be + * queued to the controller. + */ + ahd->next_queued_scb = ahd_get_scb(ahd); + + /* + * Note that we were successfull + */ + return (0); + +error_exit: + + return (ENOMEM); +} + +static void +ahd_fini_scbdata(struct ahd_softc *ahd) +{ + struct scb_data *scb_data; + + scb_data = &ahd->scb_data; + if (scb_data == NULL) + return; + + switch (scb_data->init_level) { + default: + case 7: + { + struct map_node *sns_map; + + while ((sns_map = SLIST_FIRST(&scb_data->sense_maps)) != NULL) { + SLIST_REMOVE_HEAD(&scb_data->sense_maps, links); + ahd_dmamap_unload(ahd, scb_data->sense_dmat, + sns_map->dmamap); + ahd_dmamem_free(ahd, scb_data->sense_dmat, + sns_map->vaddr, sns_map->dmamap); + free(sns_map, M_DEVBUF); + } + ahd_dma_tag_destroy(ahd, scb_data->sense_dmat); + /* FALLTHROUGH */ + } + case 6: + { + struct map_node *sg_map; + + while ((sg_map = SLIST_FIRST(&scb_data->sg_maps)) != NULL) { + SLIST_REMOVE_HEAD(&scb_data->sg_maps, links); + ahd_dmamap_unload(ahd, scb_data->sg_dmat, + sg_map->dmamap); + ahd_dmamem_free(ahd, scb_data->sg_dmat, + sg_map->vaddr, sg_map->dmamap); + free(sg_map, M_DEVBUF); + } + ahd_dma_tag_destroy(ahd, scb_data->sg_dmat); + /* FALLTHROUGH */ + } + case 5: + { + struct map_node *hscb_map; + + while ((hscb_map = SLIST_FIRST(&scb_data->hscb_maps)) != NULL) { + SLIST_REMOVE_HEAD(&scb_data->hscb_maps, links); + ahd_dmamap_unload(ahd, scb_data->hscb_dmat, + hscb_map->dmamap); + ahd_dmamem_free(ahd, scb_data->hscb_dmat, + hscb_map->vaddr, hscb_map->dmamap); + free(hscb_map, M_DEVBUF); + } + ahd_dma_tag_destroy(ahd, scb_data->hscb_dmat); + /* FALLTHROUGH */ + } + case 4: + case 3: + case 2: + case 1: + case 0: + break; + } +} + +/* + * DSP filter Bypass must be enabled until the first selection + * after a change in bus mode (Razor #491 and #493). + */ +static void +ahd_setup_iocell_workaround(struct ahd_softc *ahd) +{ + ahd_mode_state saved_modes; + + saved_modes = ahd_save_modes(ahd); + ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); + ahd_outb(ahd, DSPDATACTL, ahd_inb(ahd, DSPDATACTL) + | BYPASSENAB | RCVROFFSTDIS | XMITOFFSTDIS); + ahd_outb(ahd, SIMODE0, ahd_inb(ahd, SIMODE0) | (ENSELDO|ENSELDI)); + ahd_restore_modes(ahd, saved_modes); +} + +static void +ahd_iocell_first_selection(struct ahd_softc *ahd) +{ + ahd_mode_state saved_modes; + u_int sblkctl; + + saved_modes = ahd_save_modes(ahd); + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + sblkctl = ahd_inb(ahd, SBLKCTL); + ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); + if ((sblkctl & ENAB40) != 0) { + ahd_outb(ahd, DSPDATACTL, + ahd_inb(ahd, DSPDATACTL) & ~BYPASSENAB); + } + ahd_outb(ahd, SIMODE0, ahd_inb(ahd, SIMODE0) & ~(ENSELDO|ENSELDI)); + ahd_outb(ahd, CLRINT, CLRSCSIINT); + ahd_restore_modes(ahd, saved_modes); +} + +void +ahd_alloc_scbs(struct ahd_softc *ahd) +{ + struct scb_data *scb_data; + struct scb *next_scb; + struct hardware_scb *hscb; + struct map_node *hscb_map; + struct map_node *sg_map; + struct map_node *sense_map; + uint8_t *segs; + uint8_t *sense_data; + bus_addr_t hscb_busaddr; + bus_addr_t sg_busaddr; + bus_addr_t sense_busaddr; + int newcount; + int i; + + scb_data = &ahd->scb_data; + if (scb_data->numscbs >= AHD_SCB_MAX_ALLOC) + /* Can't allocate any more */ + return; + + if (scb_data->scbs_left != 0) { + int offset; + + offset = (PAGE_SIZE / sizeof(*hscb)) - scb_data->scbs_left; + hscb_map = SLIST_FIRST(&scb_data->hscb_maps); + hscb = &((struct hardware_scb *)hscb_map->vaddr)[offset]; + hscb_busaddr = hscb_map->physaddr + (offset * sizeof(*hscb)); + } else { + hscb_map = malloc(sizeof(*hscb_map), M_DEVBUF, M_NOWAIT); + + if (hscb_map == NULL) + return; + + /* Allocate the next batch of hardware SCBs */ + if (ahd_dmamem_alloc(ahd, scb_data->hscb_dmat, + (void **)&hscb_map->vaddr, + BUS_DMA_NOWAIT, &hscb_map->dmamap) != 0) { + free(hscb_map, M_DEVBUF); + return; + } + + SLIST_INSERT_HEAD(&scb_data->hscb_maps, hscb_map, links); + + ahd_dmamap_load(ahd, scb_data->hscb_dmat, hscb_map->dmamap, + hscb_map->vaddr, PAGE_SIZE, ahd_dmamap_cb, + &hscb_map->physaddr, /*flags*/0); + + hscb = (struct hardware_scb *)hscb_map->vaddr; + hscb_busaddr = hscb_map->physaddr; + scb_data->scbs_left = PAGE_SIZE / sizeof(*hscb); + } + + if (scb_data->sgs_left != 0) { + int offset; + + offset = ahd_sglist_allocsize(ahd) + - (scb_data->sgs_left * ahd_sglist_size(ahd)); + sg_map = SLIST_FIRST(&scb_data->sg_maps); + segs = sg_map->vaddr + offset; + sg_busaddr = sg_map->physaddr + offset; + } else { + sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT); + + if (sg_map == NULL) + return; + + /* Allocate the next batch of S/G lists */ + if (ahd_dmamem_alloc(ahd, scb_data->sg_dmat, + (void **)&sg_map->vaddr, + BUS_DMA_NOWAIT, &sg_map->dmamap) != 0) { + free(sg_map, M_DEVBUF); + return; + } + + SLIST_INSERT_HEAD(&scb_data->sg_maps, sg_map, links); + + ahd_dmamap_load(ahd, scb_data->sg_dmat, sg_map->dmamap, + sg_map->vaddr, ahd_sglist_allocsize(ahd), + ahd_dmamap_cb, &sg_map->physaddr, /*flags*/0); + + segs = sg_map->vaddr; + sg_busaddr = sg_map->physaddr; + scb_data->sgs_left = + ahd_sglist_allocsize(ahd) / ahd_sglist_size(ahd); +#ifdef AHD_DEBUG + if (ahd_debug & AHD_SHOW_MEMORY) + printf("Mapped SG data\n"); +#endif + } + + if (scb_data->sense_left != 0) { + int offset; + + offset = PAGE_SIZE - (AHD_SENSE_BUFSIZE * scb_data->sense_left); + sense_map = SLIST_FIRST(&scb_data->sense_maps); + sense_data = sense_map->vaddr + offset; + sense_busaddr = sense_map->physaddr + offset; + } else { + sense_map = malloc(sizeof(*sense_map), M_DEVBUF, M_NOWAIT); + + if (sense_map == NULL) + return; + + /* Allocate the next batch of sense buffers */ + if (ahd_dmamem_alloc(ahd, scb_data->sense_dmat, + (void **)&sense_map->vaddr, + BUS_DMA_NOWAIT, &sense_map->dmamap) != 0) { + free(sense_map, M_DEVBUF); + return; + } + + SLIST_INSERT_HEAD(&scb_data->sense_maps, sense_map, links); + + ahd_dmamap_load(ahd, scb_data->sense_dmat, sense_map->dmamap, + sense_map->vaddr, PAGE_SIZE, ahd_dmamap_cb, + &sense_map->physaddr, /*flags*/0); + + sense_data = sense_map->vaddr; + sense_busaddr = sense_map->physaddr; + scb_data->sense_left = PAGE_SIZE / AHD_SENSE_BUFSIZE; +#ifdef AHD_DEBUG + if (ahd_debug & AHD_SHOW_MEMORY) + printf("Mapped sense data\n"); +#endif + } + + newcount = MIN(scb_data->sense_left, scb_data->scbs_left); + newcount = MIN(newcount, scb_data->sgs_left); + newcount = MIN(newcount, (AHD_SCB_MAX_ALLOC - scb_data->numscbs)); + scb_data->sense_left -= newcount; + scb_data->scbs_left -= newcount; + scb_data->sgs_left -= newcount; + for (i = 0; i < newcount; i++) { + struct scb_platform_data *pdata; +#ifndef __linux__ + int error; +#endif + next_scb = (struct scb *)malloc(sizeof(*next_scb), + M_DEVBUF, M_NOWAIT); + if (next_scb == NULL) + break; + + pdata = (struct scb_platform_data *)malloc(sizeof(*pdata), + M_DEVBUF, M_NOWAIT); + if (pdata == NULL) { + free(next_scb, M_DEVBUF); + break; + } + next_scb->platform_data = pdata; + next_scb->hscb_map = hscb_map; + next_scb->sg_map = sg_map; + next_scb->sense_map = sense_map; + next_scb->sg_list = segs; + next_scb->sense_data = sense_data; + next_scb->sense_busaddr = sense_busaddr; + next_scb->hscb = hscb; + hscb->hscb_busaddr = ahd_htole32(hscb_busaddr); + + /* + * The sequencer always starts with the second entry. + * The first entry is embedded in the scb. + */ + next_scb->sg_list_busaddr = sg_busaddr; + if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) + next_scb->sg_list_busaddr + += sizeof(struct ahd_dma64_seg); + else + next_scb->sg_list_busaddr += sizeof(struct ahd_dma_seg); + next_scb->ahd_softc = ahd; + next_scb->flags = SCB_FREE; +#ifndef __linux__ + error = ahd_dmamap_create(ahd, ahd->buffer_dmat, /*flags*/0, + &next_scb->dmamap); + if (error != 0) { + free(next_scb, M_DEVBUF); + free(pdata, M_DEVBUF); + break; + } +#endif + next_scb->hscb->tag = ahd_htole16(scb_data->numscbs); + + SLIST_INSERT_HEAD(&scb_data->free_scbs, + next_scb, links.sle); + hscb++; + hscb_busaddr += sizeof(*hscb); + segs += ahd_sglist_size(ahd); + sg_busaddr += ahd_sglist_size(ahd); + sense_data += AHD_SENSE_BUFSIZE; + sense_busaddr += AHD_SENSE_BUFSIZE; + scb_data->numscbs++; + } +} + +void +ahd_controller_info(struct ahd_softc *ahd, char *buf) +{ + const char *speed; + const char *type; + int len; + + len = sprintf(buf, "%s: ", ahd_chip_names[ahd->chip & AHD_CHIPID_MASK]); + buf += len; + + speed = "Ultra320 "; + if ((ahd->features & AHD_WIDE) != 0) { + type = "Wide"; + } else { + type = "Single"; + } + len = sprintf(buf, "%s%sChannel %c, SCSI Id=%d, ", + speed, type, ahd->channel, ahd->our_id); + buf += len; + + sprintf(buf, "%s, %d SCBs", ahd->bus_description, + ahd->scb_data.maxhscbs); +} + +static const char *channel_strings[] = { + "Primary Low", + "Primary High", + "Secondary Low", + "Secondary High" +}; + +static const char *termstat_strings[] = { + "Terminated Correctly", + "Over Terminated", + "Under Terminated", + "Not Configured" +}; + +/* + * Start the board, ready for normal operation + */ +int +ahd_init(struct ahd_softc *ahd) +{ + size_t driver_data_size; + int i; + int error; + int wait; + u_int warn_user; + uint8_t current_sensing; + uint8_t fstat; + + AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); + + /* + * Verify that the compiler hasn't over-agressively + * padded important structures. + */ + if (sizeof(struct hardware_scb) != 64) + panic("Hardware SCB size is incorrect"); + +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_DEBUG_SEQUENCER) != 0) + ahd->flags |= AHD_SEQUENCER_DEBUG; +#endif + + /* + * Default to allowing initiator operations. + */ + ahd->flags |= AHD_INITIATORROLE; + + /* + * Only allow target mode features if this unit has them enabled. + */ + if ((AHD_TMODE_ENABLE & (0x1 << ahd->unit)) == 0) + ahd->features &= ~AHD_TARGETMODE; + +#ifndef __linux__ + /* DMA tag for mapping buffers into device visible space. */ + if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1, + /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, + /*lowaddr*/BUS_SPACE_MAXADDR, + /*highaddr*/BUS_SPACE_MAXADDR, + /*filter*/NULL, /*filterarg*/NULL, + /*maxsize*/MAXBSIZE, /*nsegments*/AHD_NSEG, + /*maxsegsz*/AHD_MAXTRANSFER_SIZE, + /*flags*/BUS_DMA_ALLOCNOW, + &ahd->buffer_dmat) != 0) { + return (ENOMEM); + } +#endif + + ahd->init_level++; + + /* + * DMA tag for our command fifos and other data in system memory + * the card's sequencer must be able to access. For initiator + * roles, we need to allocate space for the qoutfifo. When providing + * for the target mode role, we must additionally provide space for + * the incoming target command fifo. + */ + driver_data_size = AHD_SCB_MAX * sizeof(uint16_t); + if ((ahd->features & AHD_TARGETMODE) != 0) + driver_data_size += AHD_TMODE_CMDS * sizeof(struct target_cmd); + if ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) != 0) + driver_data_size += PKT_OVERRUN_BUFSIZE; + if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1, + /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, + /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, + /*highaddr*/BUS_SPACE_MAXADDR, + /*filter*/NULL, /*filterarg*/NULL, + driver_data_size, + /*nsegments*/1, + /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, + /*flags*/0, &ahd->shared_data_dmat) != 0) { + return (ENOMEM); + } + + ahd->init_level++; + + /* Allocation of driver data */ + if (ahd_dmamem_alloc(ahd, ahd->shared_data_dmat, + (void **)&ahd->qoutfifo, + BUS_DMA_NOWAIT, &ahd->shared_data_dmamap) != 0) { + return (ENOMEM); + } + + ahd->init_level++; + + /* And permanently map it in */ + ahd_dmamap_load(ahd, ahd->shared_data_dmat, ahd->shared_data_dmamap, + ahd->qoutfifo, driver_data_size, ahd_dmamap_cb, + &ahd->shared_data_busaddr, /*flags*/0); + + if ((ahd->features & AHD_TARGETMODE) != 0) { + /* XXX sequencer assumes qoutfifo is first. */ + ahd->targetcmds = (struct target_cmd *)ahd->qoutfifo; + ahd->qoutfifo = (uint16_t *)&ahd->targetcmds[AHD_TMODE_CMDS]; + } + + if ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) != 0) + ahd->overrun_buf = (uint8_t *)&ahd->qoutfifo[AHD_SCB_MAX]; + + ahd->init_level++; + + /* Allocate SCB data now that buffer_dmat is initialized */ + if (ahd_init_scbdata(ahd) != 0) + return (ENOMEM); + + if ((ahd->flags & AHD_INITIATORROLE) == 0) + ahd->flags &= ~AHD_RESET_BUS_A; + + ahd_chip_init(ahd); + + AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); + + /* + * Wait for up to 500ms for our transceivers + * to settle. If the adapter does not have + * a cable attached, the tranceivers may + * never settle, so don't complain if we + * fail here. + */ + for (wait = 10000; + (ahd_inb(ahd, SBLKCTL) & (ENAB40|ENAB20)) == 0 && wait; + wait--) + ahd_delay(100); + if ((ahd->flags & AHD_CURRENT_SENSING) == 0) + goto init_done; + + /* + * Verify termination based on current draw and + * warn user if the bus is over/under terminated. + */ + error = ahd_write_flexport(ahd, FLXADDR_ROMSTAT_CURSENSECTL, + CURSENSE_ENB); + if (error != 0) { + printf("%s: current sensing timeout 1\n", ahd_name(ahd)); + goto init_done; + } + for (i = 20, fstat = FLX_FSTAT_BUSY; + (fstat & FLX_FSTAT_BUSY) != 0 && i; i--) { + error = ahd_read_flexport(ahd, FLXADDR_FLEXSTAT, &fstat); + if (error != 0) { + printf("%s: current sensing timeout 2\n", + ahd_name(ahd)); + goto init_done; + } + } + if (i == 0) { + printf("%s: Timedout during current-sensing test\n", + ahd_name(ahd)); + goto init_done; + } + + /* Latch Current Sensing status. */ + error = ahd_read_flexport(ahd, FLXADDR_CURRENT_STAT, ¤t_sensing); + if (error != 0) { + printf("%s: current sensing timeout 3\n", ahd_name(ahd)); + goto init_done; + } + + /* Diable current sensing. */ + ahd_write_flexport(ahd, FLXADDR_ROMSTAT_CURSENSECTL, 0); + +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_TERMCTL) != 0) { + printf("%s: current_sensing == 0x%x\n", + ahd_name(ahd), current_sensing); + } +#endif + warn_user = 0; + for (i = 0; i < 4; i++, current_sensing >>= FLX_CSTAT_SHIFT) { + u_int term_stat; + + term_stat = (current_sensing & FLX_CSTAT_MASK); + switch (term_stat) { + case FLX_CSTAT_OVER: + case FLX_CSTAT_UNDER: + warn_user++; + case FLX_CSTAT_INVALID: + case FLX_CSTAT_OKAY: + if (warn_user == 0 && bootverbose == 0) + break; + printf("%s: %s Channel %s\n", ahd_name(ahd), + channel_strings[i], termstat_strings[term_stat]); + break; + } + } + if (warn_user) { + printf("%s: WARNING. Termination is not configured correctly.\n" + "%s: WARNING. SCSI bus operations may FAIL.\n", + ahd_name(ahd), ahd_name(ahd)); + } +init_done: + ahd_restart(ahd); + return (0); +} + +/* + * (Re)initialize chip state after a chip reset. + */ +static void +ahd_chip_init(struct ahd_softc *ahd) +{ + uint32_t busaddr; + u_int sxfrctl1; + u_int scsiseq_template; + u_int i; + u_int target; + + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + /* + * Take the LED out of diagnostic mode + */ + ahd_outb(ahd, SBLKCTL, ahd_inb(ahd, SBLKCTL) & ~(DIAGLEDEN|DIAGLEDON)); + + /* Set the SCSI Id, SXFRCTL0, SXFRCTL1, and SIMODE1. */ + ahd_outb(ahd, IOWNID, ahd->our_id); + ahd_outb(ahd, TOWNID, ahd->our_id); + sxfrctl1 = (ahd->flags & AHD_TERM_ENB_A) != 0 ? STPWEN : 0; + sxfrctl1 |= (ahd->flags & AHD_SPCHK_ENB_A) != 0 ? ENSPCHK : 0; + if ((ahd->bugs & AHD_LONG_SETIMO_BUG) + && (ahd->seltime != STIMESEL_MIN)) { + /* + * The selection timer duration is twice as long + * as it should be. Halve it by adding "1" to + * the user specified setting. + */ + sxfrctl1 |= ahd->seltime + STIMESEL_BUG_ADJ; + } else { + sxfrctl1 |= ahd->seltime; + } + + ahd_outb(ahd, SXFRCTL0, DFON); + ahd_outb(ahd, SXFRCTL1, sxfrctl1|ahd->seltime|ENSTIMER|ACTNEGEN); + ahd_outb(ahd, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR); + + /* Initialize mode specific S/G state. */ + for (i = 0; i < 2; i++) { + ahd_set_modes(ahd, AHD_MODE_DFF0 + i, AHD_MODE_DFF0 + i); + ahd_outb(ahd, LONGJMP_ADDR + 1, INVALID_ADDR); + ahd_outw(ahd, LONGJMP_SCB, SCB_LIST_NULL); + ahd_outb(ahd, SG_STATE, 0); + ahd_outb(ahd, CLRSEQINTSRC, 0xFF); + ahd_outb(ahd, SEQIMODE, + ENSAVEPTRS|ENCFG4DATA|ENCFG4ISTAT + |ENCFG4TSTAT|ENCFG4ICMD|ENCFG4TCMD); + } + + ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); + ahd_outb(ahd, DSCOMMAND0, ahd_inb(ahd, DSCOMMAND0)|MPARCKEN|CACHETHEN); + ahd_outb(ahd, DFF_THRSH, RD_DFTHRSH_75|WR_DFTHRSH_75); + ahd_outb(ahd, SIMODE0, ENIOERR|ENOVERRUN); + ahd_outb(ahd, SIMODE3, ENNTRAMPERR|ENOSRAMPERR); + ahd_outb(ahd, OPTIONMODE, AUTOACKEN|BUSFREEREV|AUTO_MSGOUT_DE); + if ((ahd->chip & AHD_BUS_MASK) == AHD_PCIX) + /* + * Do not issue a target abort when a split completion + * error occurs. Let our PCIX interrupt handler deal + * with it instead. H2A4 Razor #625 + */ + ahd_outb(ahd, PCIXCTL, ahd_inb(ahd, PCIXCTL) | SPLTSTADIS); + + /* + * Tweak IOCELL settings. + */ + if ((ahd->flags & AHD_CPQ_BOARD) != 0) { + for (i = 0; i < NUMDSPS; i++) { + ahd_outb(ahd, DSPSELECT, i); + ahd_outb(ahd, WRTBIASCTL, WRTBIASCTL_CPQ_DEFAULT); + } + } + ahd_setup_iocell_workaround(ahd); + + /* + * Enable LQI Manager interrupts. + */ + ahd_outb(ahd, LQIMODE1, ENLQIPHASE_LQ|ENLQIPHASE_NLQ|ENLIQABORT + | ENLQICRCI_LQ|ENLQICRCI_NLQ|ENLQIBADLQI + | ENLQIOVERI_LQ|ENLQIOVERI_NLQ); + ahd_outb(ahd, LQOMODE0, ENLQOATNLQ|ENLQOATNPKT|ENLQOTCRC); + /* + * An interrupt from LQOBUSFREE is made redundant by the + * BUSFREE interrupt. We choose to have the sequencer catch + * LQOPHCHGINPKT errors manually for the command phase at the + * start of a packetized selection case. + ahd_outb(ahd, LQOMODE1, ENLQOBUSFREE|ENLQOPHACHGINPKT); + */ + ahd_outb(ahd, LQOMODE1, 0); + + /* + * Setup sequencer interrupt handler. + */ + ahd_outw(ahd, INTVEC1_ADDR, ahd_resolve_seqaddr(ahd, LABEL_seq_isr)); + + /* + * Setup SCB Offset registers. + */ + ahd_outb(ahd, LUNPTR, offsetof(struct hardware_scb, lun)); + ahd_outb(ahd, CMDLENPTR, offsetof(struct hardware_scb, cdb_len)); + ahd_outb(ahd, ATTRPTR, + offsetof(struct hardware_scb, task_attribute_nonpkt_tag)); + ahd_outb(ahd, FLAGPTR, offsetof(struct hardware_scb, task_management)); + ahd_outb(ahd, CMDPTR, offsetof(struct hardware_scb, + shared_data.idata.cdb)); + ahd_outb(ahd, QNEXTPTR, + offsetof(struct hardware_scb, next_hscb_busaddr)); + ahd_outb(ahd, ABRTBITPTR, MK_MESSAGE_BIT_OFFSET); + ahd_outb(ahd, ABRTBYTEPTR, offsetof(struct hardware_scb, control)); + ahd_outb(ahd, LUNLEN, sizeof(ahd->next_queued_scb->hscb->lun) - 1); + ahd_outb(ahd, CDBLIMIT, SCB_CDB_LEN_PTR - 1); + ahd_outb(ahd, MAXCMD, 0xFF); + ahd_outb(ahd, SCBAUTOPTR, + AUSCBPTR_EN | offsetof(struct hardware_scb, tag)); + + /* We haven't been enabled for target mode yet. */ + ahd_outb(ahd, MULTARGID, 0); + ahd_outb(ahd, MULTARGID + 1, 0); + + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + /* + * Clear the spare bytes in the neg table to avoid + * spurious parity errors. + */ + for (target = 0; target < AHD_NUM_TARGETS; target++) { + + ahd_outb(ahd, NEGOADDR, target); + ahd_outb(ahd, ANNEXCOL, AHD_ANNEXCOL_PRECOMP); + for (i = 0; i < AHD_NUM_ANNEXCOLS; i++) + ahd_outb(ahd, ANNEXDAT, 0); + } + ahd_outb(ahd, CLRSINT3, NTRAMPERR|OSRAMPERR); + + /* + * Always enable abort on incoming L_Qs if this feature is + * supported. We use this to catch invalid SCB references. + */ + if ((ahd->bugs & AHD_ABORT_LQI_BUG) == 0) + ahd_outb(ahd, LQCTL1, ABORTPENDING); + else + ahd_outb(ahd, LQCTL1, 0); + + /* All of our queues are empty */ + ahd->qoutfifonext = 0; + for (i = 0; i < AHD_QOUT_SIZE; i++) + ahd->qoutfifo[i] = SCB_LIST_NULL_LE; + ahd_sync_qoutfifo(ahd, BUS_DMASYNC_PREREAD); + + ahd->qinfifonext = 0; + for (i = 0; i < AHD_QIN_SIZE; i++) + ahd->qinfifo[i] = SCB_LIST_NULL; + + if ((ahd->features & AHD_TARGETMODE) != 0) { + /* All target command blocks start out invalid. */ + for (i = 0; i < AHD_TMODE_CMDS; i++) + ahd->targetcmds[i].cmd_valid = 0; + ahd_sync_tqinfifo(ahd, BUS_DMASYNC_PREREAD); + ahd->tqinfifonext = 1; + ahd_outb(ahd, KERNEL_TQINPOS, ahd->tqinfifonext - 1); + ahd_outb(ahd, TQINPOS, ahd->tqinfifonext); + } + + /* Initialize Scratch Ram. */ + ahd_outb(ahd, SEQ_FLAGS, 0); + ahd_outb(ahd, SEQ_FLAGS2, 0); + + /* We don't have any waiting selections */ + ahd_outw(ahd, WAITING_TID_HEAD, SCB_LIST_NULL); + ahd_outw(ahd, WAITING_TID_TAIL, SCB_LIST_NULL); + for (i = 0; i < AHD_NUM_TARGETS; i++) + ahd_outw(ahd, WAITING_SCB_TAILS + (2 * i), SCB_LIST_NULL); + + /* + * Nobody is waiting to be DMAed into the QOUTFIFO. + */ + ahd_outw(ahd, COMPLETE_SCB_HEAD, SCB_LIST_NULL); + ahd_outw(ahd, COMPLETE_SCB_DMAINPROG_HEAD, SCB_LIST_NULL); + ahd_outw(ahd, COMPLETE_DMA_SCB_HEAD, SCB_LIST_NULL); + + /* + * The Freeze Count is 0. + */ + ahd_outw(ahd, QFREEZE_COUNT, 0); + + /* + * Tell the sequencer where it can find our arrays in memory. + */ + busaddr = ahd->shared_data_busaddr; + ahd_outb(ahd, SHARED_DATA_ADDR, busaddr & 0xFF); + ahd_outb(ahd, SHARED_DATA_ADDR + 1, (busaddr >> 8) & 0xFF); + ahd_outb(ahd, SHARED_DATA_ADDR + 2, (busaddr >> 16) & 0xFF); + ahd_outb(ahd, SHARED_DATA_ADDR + 3, (busaddr >> 24) & 0xFF); + ahd_outb(ahd, QOUTFIFO_NEXT_ADDR, busaddr & 0xFF); + ahd_outb(ahd, QOUTFIFO_NEXT_ADDR + 1, (busaddr >> 8) & 0xFF); + ahd_outb(ahd, QOUTFIFO_NEXT_ADDR + 2, (busaddr >> 16) & 0xFF); + ahd_outb(ahd, QOUTFIFO_NEXT_ADDR + 3, (busaddr >> 24) & 0xFF); + + /* + * Setup the allowed SCSI Sequences based on operational mode. + * If we are a target, we'll enable select in operations once + * we've had a lun enabled. + */ + scsiseq_template = ENAUTOATNP; + if ((ahd->flags & AHD_INITIATORROLE) != 0) + scsiseq_template |= ENRSELI; + ahd_outb(ahd, SCSISEQ_TEMPLATE, scsiseq_template); + + /* There are no busy SCBs yet. */ + for (target = 0; target < AHD_NUM_TARGETS; target++) { + int lun; + + for (lun = 0; lun < AHD_NUM_LUNS_NONPKT; lun++) + ahd_unbusy_tcl(ahd, BUILD_TCL_RAW(target, 'A', lun)); + } + + /* + * Always enable abort on incoming L_Qs if this feature is + * supported. We use this to catch invalid SCB references. + */ + if ((ahd->bugs & AHD_ABORT_LQI_BUG) == 0) + ahd_outb(ahd, LQCTL1, ABORTPENDING); + else + ahd_outb(ahd, LQCTL1, 0); + + /* + * Initialize the group code to command length table. + * Vendor Unique codes are set to 0 so we only capture + * the first byte of the cdb. These can be overridden + * when target mode is enabled. + */ + ahd_outb(ahd, CMDSIZE_TABLE, 5); + ahd_outb(ahd, CMDSIZE_TABLE + 1, 9); + ahd_outb(ahd, CMDSIZE_TABLE + 2, 9); + ahd_outb(ahd, CMDSIZE_TABLE + 3, 0); + ahd_outb(ahd, CMDSIZE_TABLE + 4, 15); + ahd_outb(ahd, CMDSIZE_TABLE + 5, 11); + ahd_outb(ahd, CMDSIZE_TABLE + 6, 0); + ahd_outb(ahd, CMDSIZE_TABLE + 7, 0); + + /* Tell the sequencer of our initial queue positions */ + ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); + ahd_outb(ahd, QOFF_CTLSTA, SCB_QSIZE_512); + ahd->qinfifonext = 0; + ahd_set_hnscb_qoff(ahd, ahd->qinfifonext); + ahd_set_hescb_qoff(ahd, 0); + ahd_set_snscb_qoff(ahd, 0); + ahd_set_sescb_qoff(ahd, 0); + ahd_set_sdscb_qoff(ahd, 0); + + /* + * Tell the sequencer which SCB will be the next one it receives. + */ + busaddr = ahd_le32toh(ahd->next_queued_scb->hscb->hscb_busaddr); + ahd_outb(ahd, NEXT_QUEUED_SCB_ADDR + 0, busaddr & 0xFF); + ahd_outb(ahd, NEXT_QUEUED_SCB_ADDR + 1, (busaddr >> 8) & 0xFF); + ahd_outb(ahd, NEXT_QUEUED_SCB_ADDR + 2, (busaddr >> 16) & 0xFF); + ahd_outb(ahd, NEXT_QUEUED_SCB_ADDR + 3, (busaddr >> 24) & 0xFF); + ahd_loadseq(ahd); + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); +} + +/* + * Setup default device and controller settings. + * This should only be called if our probe has + * determined that no configuration data is available. + */ +int +ahd_default_config(struct ahd_softc *ahd) +{ + int targ; + + ahd->our_id = 7; + + /* + * Allocate a tstate to house information for our + * initiator presence on the bus as well as the user + * data for any target mode initiator. + */ + if (ahd_alloc_tstate(ahd, ahd->our_id, 'A') == NULL) { + printf("%s: unable to allocate ahd_tmode_tstate. " + "Failing attach\n", ahd_name(ahd)); + return (ENOMEM); + } + + for (targ = 0; targ < AHD_NUM_TARGETS; targ++) { + struct ahd_devinfo devinfo; + struct ahd_initiator_tinfo *tinfo; + struct ahd_tmode_tstate *tstate; + uint16_t target_mask; + + TAILQ_INIT(&ahd->untagged_queues[targ]); + tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id, + targ, &tstate); + /* + * We support SPC2 and SPI4. + */ + tinfo->user.protocol_version = 4; + tinfo->user.transport_version = 4; + + target_mask = 0x01 << targ; + ahd->user_discenable |= target_mask; + tstate->discenable |= target_mask; + ahd->user_tagenable |= target_mask; +#ifdef AHD_FORCE_160 + tinfo->user.period = AHD_SYNCRATE_DT; +#else + tinfo->user.period = AHD_SYNCRATE_160; +#endif + tinfo->user.offset= ~0; + tinfo->user.ppr_options = MSG_EXT_PPR_RD_STRM + | MSG_EXT_PPR_WR_FLOW + | MSG_EXT_PPR_IU_REQ + | MSG_EXT_PPR_QAS_REQ + | MSG_EXT_PPR_DT_REQ; + + tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT; + + /* + * Start out Async/Narrow/Untagged and with + * conservative protocol support. + */ + tinfo->goal.protocol_version = 2; + tinfo->goal.transport_version = 2; + tinfo->curr.protocol_version = 2; + tinfo->curr.transport_version = 2; + ahd_compile_devinfo(&devinfo, ahd->our_id, + targ, CAM_LUN_WILDCARD, + 'A', ROLE_INITIATOR); + tstate->tagenable &= ~target_mask; + ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, + AHD_TRANS_CUR|AHD_TRANS_GOAL, /*paused*/TRUE); + ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0, + /*ppr_options*/0, AHD_TRANS_CUR|AHD_TRANS_GOAL, + /*paused*/TRUE); + /* + * The neg table must be initialized even if the + * new settings above are the same as those from + * when our xfer info data structures were allocated + * and initialized. + */ + ahd_update_neg_table(ahd, &devinfo, &tinfo->curr); + } + return (0); +} + +/* + * Parse device configuration information. + */ +int +ahd_parse_cfgdata(struct ahd_softc *ahd, struct seeprom_config *sc) +{ + int targ; + int max_targ; + + max_targ = sc->max_targets & CFMAXTARG; + ahd->our_id = sc->brtime_id & CFSCSIID; + + /* + * Allocate a tstate to house information for our + * initiator presence on the bus as well as the user + * data for any target mode initiator. + */ + if (ahd_alloc_tstate(ahd, ahd->our_id, 'A') == NULL) { + printf("%s: unable to allocate ahd_tmode_tstate. " + "Failing attach\n", ahd_name(ahd)); + return (ENOMEM); + } + + for (targ = 0; targ < max_targ; targ++) { + struct ahd_devinfo devinfo; + struct ahd_initiator_tinfo *tinfo; + struct ahd_transinfo *user_tinfo; + struct ahd_tmode_tstate *tstate; + uint16_t target_mask; + + tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id, + targ, &tstate); + user_tinfo = &tinfo->user; + + /* + * We support SPC2 and SPI4. + */ + tinfo->user.protocol_version = 4; + tinfo->user.transport_version = 4; + + target_mask = 0x01 << targ; + ahd->user_discenable &= ~target_mask; + tstate->discenable &= ~target_mask; + ahd->user_tagenable &= ~target_mask; + if (sc->device_flags[targ] & CFDISC) { + tstate->discenable |= target_mask; + ahd->user_discenable |= target_mask; + ahd->user_tagenable |= target_mask; + } else { + /* + * Cannot be packetized without disconnection. + */ + sc->device_flags[targ] &= ~CFPACKETIZED; + } + + user_tinfo->ppr_options = 0; + user_tinfo->period = (sc->device_flags[targ] & CFXFER); + if (user_tinfo->period < CFXFER_ASYNC) { + if (user_tinfo->period <= AHD_PERIOD_10MHz) + user_tinfo->ppr_options |= MSG_EXT_PPR_DT_REQ; + user_tinfo->offset = MAX_OFFSET; + } else { + user_tinfo->offset = 0; + user_tinfo->period = AHD_PERIOD_ASYNC; + } +#ifdef AHD_FORCE_160 + if (user_tinfo->period <= AHD_SYNCRATE_160) + user_tinfo->period = AHD_SYNCRATE_DT; +#endif + + if ((sc->device_flags[targ] & CFPACKETIZED) != 0) + user_tinfo->ppr_options |= MSG_EXT_PPR_RD_STRM + | MSG_EXT_PPR_WR_FLOW + | MSG_EXT_PPR_IU_REQ; + + if ((sc->device_flags[targ] & CFQAS) != 0) + user_tinfo->ppr_options |= MSG_EXT_PPR_QAS_REQ; + + if ((sc->device_flags[targ] & CFWIDEB) != 0) + user_tinfo->width = MSG_EXT_WDTR_BUS_16_BIT; + else + user_tinfo->width = MSG_EXT_WDTR_BUS_8_BIT; +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_MISC) != 0) + printf("(%d): %x:%x:%x:%x\n", targ, user_tinfo->width, + user_tinfo->period, user_tinfo->offset, + user_tinfo->ppr_options); +#endif + /* + * Start out Async/Narrow/Untagged and with + * conservative protocol support. + */ + tstate->tagenable &= ~target_mask; + tinfo->goal.protocol_version = 2; + tinfo->goal.transport_version = 2; + tinfo->curr.protocol_version = 2; + tinfo->curr.transport_version = 2; + ahd_compile_devinfo(&devinfo, ahd->our_id, + targ, CAM_LUN_WILDCARD, + 'A', ROLE_INITIATOR); + ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, + AHD_TRANS_CUR|AHD_TRANS_GOAL, /*paused*/TRUE); + ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0, + /*ppr_options*/0, AHD_TRANS_CUR|AHD_TRANS_GOAL, + /*paused*/TRUE); + /* + * The neg table must be initialized even if the + * new settings above are the same as those from + * when our xfer info data structures were allocated + * and initialized. + */ + ahd_update_neg_table(ahd, &devinfo, &tinfo->curr); + } + + ahd->flags &= ~AHD_SPCHK_ENB_A; + if (sc->bios_control & CFSPARITY) + ahd->flags |= AHD_SPCHK_ENB_A; + + ahd->flags &= ~AHD_RESET_BUS_A; + if (sc->bios_control & CFRESETB) + ahd->flags |= AHD_RESET_BUS_A; + + ahd->flags &= ~AHD_EXTENDED_TRANS_A; + if (sc->bios_control & CFEXTEND) + ahd->flags |= AHD_EXTENDED_TRANS_A; + + ahd->flags &= ~AHD_BIOS_ENABLED; + if ((sc->bios_control & CFBIOSSTATE) == CFBS_ENABLED) + ahd->flags |= AHD_BIOS_ENABLED; + + ahd->flags &= ~AHD_STPWLEVEL_A; + if ((sc->adapter_control & CFSTPWLEVEL) != 0) + ahd->flags |= AHD_STPWLEVEL_A; + + return (0); +} + +void +ahd_intr_enable(struct ahd_softc *ahd, int enable) +{ + u_int hcntrl; + + hcntrl = ahd_inb(ahd, HCNTRL); + hcntrl &= ~INTEN; + ahd->pause &= ~INTEN; + ahd->unpause &= ~INTEN; + if (enable) { + hcntrl |= INTEN; + ahd->pause |= INTEN; + ahd->unpause |= INTEN; + } + ahd_outb(ahd, HCNTRL, hcntrl); +} + +/* + * Ensure that the card is paused in a location + * outside of all critical sections and that all + * pending work is completed prior to returning. + * This routine should only be called from outside + * an interrupt context. + */ +void +ahd_pause_and_flushwork(struct ahd_softc *ahd) +{ + int intstat; + int maxloops; + + maxloops = 1000; + ahd->flags |= AHD_ALL_INTERRUPTS; + intstat = 0; + do { + ahd_intr(ahd); + ahd_pause(ahd); + ahd_clear_critical_section(ahd); + if (intstat == 0xFF && (ahd->features & AHD_REMOVABLE) != 0) + break; + maxloops--; + } while (((intstat = ahd_inb(ahd, INTSTAT)) & INT_PEND) && --maxloops); + if (maxloops == 0) { + printf("Infinite interrupt loop, INTSTAT = %x", + ahd_inb(ahd, INTSTAT)); + } + ahd_platform_flushwork(ahd); + ahd->flags &= ~AHD_ALL_INTERRUPTS; +} + +int +ahd_suspend(struct ahd_softc *ahd) +{ +#if 0 + uint8_t *ptr; + int i; + + ahd_pause_and_flushwork(ahd); + + if (LIST_FIRST(&ahd->pending_scbs) != NULL) + return (EBUSY); + +#if AHD_TARGET_MODE + /* + * XXX What about ATIOs that have not yet been serviced? + * Perhaps we should just refuse to be suspended if we + * are acting in a target role. + */ + if (ahd->pending_device != NULL) + return (EBUSY); +#endif + + /* Save volatile registers */ + ahd->suspend_state.channel[0].scsiseq = ahd_inb(ahd, SCSISEQ0); + ahd->suspend_state.channel[0].sxfrctl0 = ahd_inb(ahd, SXFRCTL0); + ahd->suspend_state.channel[0].sxfrctl1 = ahd_inb(ahd, SXFRCTL1); + ahd->suspend_state.channel[0].simode0 = ahd_inb(ahd, SIMODE0); + ahd->suspend_state.channel[0].simode1 = ahd_inb(ahd, SIMODE1); + ahd->suspend_state.channel[0].seltimer = ahd_inb(ahd, SELTIMER); + ahd->suspend_state.channel[0].seqctl = ahd_inb(ahd, SEQCTL0); + ahd->suspend_state.dscommand0 = ahd_inb(ahd, DSCOMMAND0); + ahd->suspend_state.dspcistatus = ahd_inb(ahd, DSPCISTATUS); + + if ((ahd->features & AHD_DT) != 0) { + u_int sfunct; + + sfunct = ahd_inb(ahd, SFUNCT) & ~ALT_MODE; + ahd_outb(ahd, SFUNCT, sfunct | ALT_MODE); + ahd->suspend_state.optionmode = ahd_inb(ahd, OPTIONMODE); + ahd_outb(ahd, SFUNCT, sfunct); + ahd->suspend_state.crccontrol1 = ahd_inb(ahd, CRCCONTROL1); + } + + if ((ahd->features & AHD_MULTI_FUNC) != 0) + ahd->suspend_state.scbbaddr = ahd_inb(ahd, SCBBADDR); + + if ((ahd->features & AHD_ULTRA2) != 0) + ahd->suspend_state.dff_thrsh = ahd_inb(ahd, DFF_THRSH); + + ptr = ahd->suspend_state.scratch_ram; + for (i = 0; i < 64; i++) + *ptr++ = ahd_inb(ahd, SRAM_BASE + i); + + if ((ahd->features & AHD_MORE_SRAM) != 0) { + for (i = 0; i < 16; i++) + *ptr++ = ahd_inb(ahd, TARG_OFFSET + i); + } + + ptr = ahd->suspend_state.btt; + for (i = 0;i < AHD_NUM_TARGETS; i++) { + int j; + + for (j = 0;j < AHD_NUM_LUNS; j++) { + u_int tcl; + + tcl = BUILD_TCL(i << 4, j); + *ptr = ahd_find_busy_tcl(ahd, tcl); + } + } + ahd_shutdown(ahd); +#endif + return (0); +} + +int +ahd_resume(struct ahd_softc *ahd) +{ +#if 0 + uint8_t *ptr; + int i; + + ahd_reset(ahd); + + ahd_build_free_scb_list(ahd); + + /* Restore volatile registers */ + ahd_outb(ahd, SCSISEQ0, ahd->suspend_state.channel[0].scsiseq); + ahd_outb(ahd, SXFRCTL0, ahd->suspend_state.channel[0].sxfrctl0); + ahd_outb(ahd, SXFRCTL1, ahd->suspend_state.channel[0].sxfrctl1); + ahd_outb(ahd, SIMODE0, ahd->suspend_state.channel[0].simode0); + ahd_outb(ahd, SIMODE1, ahd->suspend_state.channel[0].simode1); + ahd_outb(ahd, SELTIMER, ahd->suspend_state.channel[0].seltimer); + ahd_outb(ahd, SEQCTL0, ahd->suspend_state.channel[0].seqctl); + if ((ahd->features & AHD_ULTRA2) != 0) + ahd_outb(ahd, SCSIID_ULTRA2, ahd->our_id); + else + ahd_outb(ahd, SCSIID, ahd->our_id); + + ahd_outb(ahd, DSCOMMAND0, ahd->suspend_state.dscommand0); + ahd_outb(ahd, DSPCISTATUS, ahd->suspend_state.dspcistatus); + + if ((ahd->features & AHD_DT) != 0) { + u_int sfunct; + + sfunct = ahd_inb(ahd, SFUNCT) & ~ALT_MODE; + ahd_outb(ahd, SFUNCT, sfunct | ALT_MODE); + ahd_outb(ahd, OPTIONMODE, ahd->suspend_state.optionmode); + ahd_outb(ahd, SFUNCT, sfunct); + ahd_outb(ahd, CRCCONTROL1, ahd->suspend_state.crccontrol1); + } + + if ((ahd->features & AHD_MULTI_FUNC) != 0) + ahd_outb(ahd, SCBBADDR, ahd->suspend_state.scbbaddr); + + if ((ahd->features & AHD_ULTRA2) != 0) + ahd_outb(ahd, DFF_THRSH, ahd->suspend_state.dff_thrsh); + + ptr = ahd->suspend_state.scratch_ram; + for (i = 0; i < 64; i++) + ahd_outb(ahd, SRAM_BASE + i, *ptr++); + + if ((ahd->features & AHD_MORE_SRAM) != 0) { + for (i = 0; i < 16; i++) + ahd_outb(ahd, TARG_OFFSET + i, *ptr++); + } + + ptr = ahd->suspend_state.btt; + for (i = 0;i < AHD_NUM_TARGETS; i++) { + int j; + + for (j = 0;j < AHD_NUM_LUNS; j++) { + u_int tcl; + + tcl = BUILD_TCL(i << 4, j); + ahd_busy_tcl(ahd, tcl, *ptr); + } + } +#endif + return (0); +} + +/************************** Busy Target Table *********************************/ +/* + * Set SCBPTR to the SCB that contains the busy + * table entry for TCL. Return the offset into + * the SCB that contains the entry for TCL. + * saved_scbid is dereferenced and set to the + * scbid that should be restored once manipualtion + * of the TCL entry is complete. + */ +static __inline u_int +ahd_index_busy_tcl(struct ahd_softc *ahd, u_int *saved_scbid, u_int tcl) +{ + + *saved_scbid = ahd_get_scbptr(ahd); + + /* + * Index to the SCB that contains the busy entry. + */ + ahd_set_scbptr(ahd, TCL_LUN(tcl) + | ((TCL_TARGET_OFFSET(tcl) & ~0x7) << 5)); + + /* + * And now calculate the SCB offset to the entry. + * Each entry is 2 bytes wide, hence the + * multiplication by 2. + */ + return (((TCL_TARGET_OFFSET(tcl) & 0x7) << 1) + SCB_DISCONNECTED_LISTS); +} + +/* + * Return the untagged transaction id for a given target/channel lun. + * Optionally, clear the entry. + */ +u_int +ahd_find_busy_tcl(struct ahd_softc *ahd, u_int tcl) +{ + u_int scbid; + u_int scb_offset; + u_int saved_scbptr; + + scb_offset = ahd_index_busy_tcl(ahd, &saved_scbptr, tcl); + scbid = ahd_inw_scbram(ahd, scb_offset); + ahd_set_scbptr(ahd, saved_scbptr); + return (scbid); +} + +void +ahd_busy_tcl(struct ahd_softc *ahd, u_int tcl, u_int scbid) +{ + u_int scb_offset; + u_int saved_scbptr; + + scb_offset = ahd_index_busy_tcl(ahd, &saved_scbptr, tcl); + ahd_outw(ahd, scb_offset, scbid); + ahd_set_scbptr(ahd, saved_scbptr); +} + +void +ahd_set_disconnected_list(struct ahd_softc *ahd, u_int target, + u_int lun, u_int scbid) +{ + u_int saved_scbptr; + + AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), + ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); + saved_scbptr = ahd_get_scbptr(ahd); + ahd_set_scbptr(ahd, lun | ((target & 0x8) << 5)); + ahd_outw(ahd, SCB_DISCONNECTED_LISTS + ((target & 0x7) << 1), scbid); + ahd_set_scbptr(ahd, saved_scbptr); +} + +/************************** SCB and SCB queue management **********************/ +int +ahd_match_scb(struct ahd_softc *ahd, struct scb *scb, int target, + char channel, int lun, u_int tag, role_t role) +{ + int targ = SCB_GET_TARGET(ahd, scb); + char chan = SCB_GET_CHANNEL(ahd, scb); + int slun = SCB_GET_LUN(scb); + int match; + + match = ((chan == channel) || (channel == ALL_CHANNELS)); + if (match != 0) + match = ((targ == target) || (target == CAM_TARGET_WILDCARD)); + if (match != 0) + match = ((lun == slun) || (lun == CAM_LUN_WILDCARD)); + if (match != 0) { +#if AHD_TARGET_MODE + int group; + + group = XPT_FC_GROUP(scb->io_ctx->ccb_h.func_code); + if (role == ROLE_INITIATOR) { + match = (group != XPT_FC_GROUP_TMODE) + && ((tag == SCB_GET_TAG(scb)) + || (tag == SCB_LIST_NULL)); + } else if (role == ROLE_TARGET) { + match = (group == XPT_FC_GROUP_TMODE) + && ((tag == scb->io_ctx->csio.tag_id) + || (tag == SCB_LIST_NULL)); + } +#else /* !AHD_TARGET_MODE */ + match = ((tag == SCB_GET_TAG(scb)) || (tag == SCB_LIST_NULL)); +#endif /* AHD_TARGET_MODE */ + } + + return match; +} + +void +ahd_freeze_devq(struct ahd_softc *ahd, struct scb *scb) +{ + int target; + char channel; + int lun; + + target = SCB_GET_TARGET(ahd, scb); + lun = SCB_GET_LUN(scb); + channel = SCB_GET_CHANNEL(ahd, scb); + + ahd_search_qinfifo(ahd, target, channel, lun, + /*tag*/SCB_LIST_NULL, ROLE_UNKNOWN, + CAM_REQUEUE_REQ, SEARCH_COMPLETE); + + ahd_platform_freeze_devq(ahd, scb); +} + +void +ahd_qinfifo_requeue_tail(struct ahd_softc *ahd, struct scb *scb) +{ + struct scb *prev_scb; + + prev_scb = NULL; + if (ahd_qinfifo_count(ahd) != 0) { + u_int prev_tag; + u_int prev_pos; + + prev_pos = AHD_QIN_WRAP(ahd->qinfifonext - 1); + prev_tag = ahd->qinfifo[prev_pos]; + prev_scb = ahd_lookup_scb(ahd, prev_tag); + } + ahd_qinfifo_requeue(ahd, prev_scb, scb); + ahd_set_hnscb_qoff(ahd, ahd->qinfifonext); +} + +static void +ahd_qinfifo_requeue(struct ahd_softc *ahd, struct scb *prev_scb, + struct scb *scb) +{ + if (prev_scb == NULL) { + uint32_t busaddr; + + busaddr = ahd_le32toh(scb->hscb->hscb_busaddr); + ahd_outb(ahd, NEXT_QUEUED_SCB_ADDR + 0, busaddr & 0xFF); + ahd_outb(ahd, NEXT_QUEUED_SCB_ADDR + 1, (busaddr >> 8) & 0xFF); + ahd_outb(ahd, NEXT_QUEUED_SCB_ADDR + 2, (busaddr >> 16) & 0xFF); + ahd_outb(ahd, NEXT_QUEUED_SCB_ADDR + 3, (busaddr >> 24) & 0xFF); + } else { + prev_scb->hscb->next_hscb_busaddr = scb->hscb->hscb_busaddr; + ahd_sync_scb(ahd, prev_scb, + BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); + } + ahd->qinfifo[AHD_QIN_WRAP(ahd->qinfifonext)] = SCB_GET_TAG(scb); + ahd->qinfifonext++; + scb->hscb->next_hscb_busaddr = ahd->next_queued_scb->hscb->hscb_busaddr; + ahd_sync_scb(ahd, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); +} + +static int +ahd_qinfifo_count(struct ahd_softc *ahd) +{ + u_int qinpos; + u_int wrap_qinpos; + u_int wrap_qinfifonext; + + AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); + qinpos = ahd_get_snscb_qoff(ahd); + wrap_qinpos = AHD_QIN_WRAP(qinpos); + wrap_qinfifonext = AHD_QIN_WRAP(ahd->qinfifonext); + if (wrap_qinfifonext > wrap_qinpos) + return (wrap_qinfifonext - wrap_qinpos); + else + return (wrap_qinfifonext + + NUM_ELEMENTS(ahd->qinfifo) - wrap_qinpos); +} + +int +ahd_search_qinfifo(struct ahd_softc *ahd, int target, char channel, + int lun, u_int tag, role_t role, uint32_t status, + ahd_search_action action) +{ + struct scb *scb; + struct scb *prev_scb; + ahd_mode_state saved_modes; + u_int qinstart; + u_int qinpos; + u_int qintail; + u_int tid_next; + u_int tid_prev; + u_int scbid; + u_int savedscbptr; + uint32_t busaddr; + int found; + int targets; + + /* Must be in CCHAN mode */ + saved_modes = ahd_save_modes(ahd); + ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); + + /* + * Halt any pending SCB DMA. The sequencer will reinitiate + * this dma if the qinfifo is not empty once we unpause. + */ + if ((ahd_inb(ahd, CCSCBCTL) & (CCARREN|CCSCBEN|CCSCBDIR)) + == (CCARREN|CCSCBEN|CCSCBDIR)) { + ahd_outb(ahd, CCSCBCTL, + ahd_inb(ahd, CCSCBCTL) & ~(CCARREN|CCSCBEN)); + while ((ahd_inb(ahd, CCSCBCTL) & (CCARREN|CCSCBEN)) != 0) + ; + } + /* Determine sequencer's position in the qinfifo. */ + qintail = AHD_QIN_WRAP(ahd->qinfifonext); + qinstart = ahd_get_snscb_qoff(ahd); + qinpos = AHD_QIN_WRAP(qinstart); + found = 0; + prev_scb = NULL; + + if (action == SEARCH_COMPLETE) { + /* + * Don't attempt to run any queued untagged transactions + * until we are done with the abort process. + */ + ahd_freeze_untagged_queues(ahd); + } + + if (action == SEARCH_PRINT) { + printf("qinstart = %d qinfifonext = %d\nQINFIFO:", + qinstart, ahd->qinfifonext); + } + + /* + * Start with an empty queue. Entries that are not chosen + * for removal will be re-added to the queue as we go. + */ + ahd->qinfifonext = qinstart; + busaddr = ahd_le32toh(ahd->next_queued_scb->hscb->hscb_busaddr); + ahd_outb(ahd, NEXT_QUEUED_SCB_ADDR + 0, busaddr & 0xFF); + ahd_outb(ahd, NEXT_QUEUED_SCB_ADDR + 1, (busaddr >> 8) & 0xFF); + ahd_outb(ahd, NEXT_QUEUED_SCB_ADDR + 2, (busaddr >> 16) & 0xFF); + ahd_outb(ahd, NEXT_QUEUED_SCB_ADDR + 3, (busaddr >> 24) & 0xFF); + + while (qinpos != qintail) { + scb = ahd_lookup_scb(ahd, ahd->qinfifo[qinpos]); + if (scb == NULL) { + printf("qinpos = %d, SCB index = %d\n", + qinpos, ahd->qinfifo[qinpos]); + panic("Loop 1\n"); + } + + if (ahd_match_scb(ahd, scb, target, channel, lun, tag, role)) { + /* + * We found an scb that needs to be acted on. + */ + found++; + switch (action) { + case SEARCH_COMPLETE: + { + cam_status ostat; + cam_status cstat; + + ostat = ahd_get_transaction_status(scb); + if (ostat == CAM_REQ_INPROG) + ahd_set_transaction_status(scb, + status); + cstat = ahd_get_transaction_status(scb); + if (cstat != CAM_REQ_CMP) + ahd_freeze_scb(scb); + if ((scb->flags & SCB_ACTIVE) == 0) + printf("Inactive SCB in qinfifo\n"); + ahd_done(ahd, scb); + + /* FALLTHROUGH */ + } + case SEARCH_REMOVE: + break; + case SEARCH_PRINT: + printf(" 0x%x", ahd->qinfifo[qinpos]); + /* FALLTHROUGH */ + case SEARCH_COUNT: + ahd_qinfifo_requeue(ahd, prev_scb, scb); + prev_scb = scb; + break; + } + } else { + ahd_qinfifo_requeue(ahd, prev_scb, scb); + prev_scb = scb; + } + qinpos = AHD_QIN_WRAP(qinpos+1); + } + + ahd_set_hnscb_qoff(ahd, ahd->qinfifonext); + + if (action == SEARCH_PRINT) + printf("\nWAITING_TID_QUEUES:\n"); + + /* + * Search waiting for selection lists. We traverse the + * list of "their ids" waiting for selection and, if + * appropriate, traverse the SCBs of each "their id" + * looking for matches. + */ + savedscbptr = ahd_get_scbptr(ahd); + tid_next = ahd_inw(ahd, WAITING_TID_HEAD); + tid_prev = SCB_LIST_NULL; + targets = 0; + for (scbid = tid_next; !SCBID_IS_NULL(scbid); scbid = tid_next) { + u_int tid_head; + + /* + * We limit based on the number of SCBs since + * MK_MESSAGE SCBs are not in the per-tid lists. + */ + targets++; + if (targets > AHD_SCB_MAX) { + panic("TID LIST LOOP"); + } + if (scbid >= ahd->scb_data.numscbs) { + printf("%s: Waiting TID List inconsistency. " + "SCB index == 0x%x, yet numscbs == 0x%x.", + ahd_name(ahd), scbid, ahd->scb_data.numscbs); + ahd_dump_card_state(ahd); + panic("for safety"); + } + scb = ahd_lookup_scb(ahd, scbid); + if (scb == NULL) { + printf("%s: SCB = 0x%x Not Active!\n", + ahd_name(ahd), scbid); + panic("Waiting TID List traversal\n"); + } + ahd_set_scbptr(ahd, scbid); + tid_next = ahd_inw_scbram(ahd, SCB_NEXT2); + if (ahd_match_scb(ahd, scb, target, channel, CAM_LUN_WILDCARD, + SCB_LIST_NULL, ROLE_UNKNOWN) == 0) { + tid_prev = scbid; + continue; + } + + /* + * We found a list of scbs that needs to be searched. + */ + if (action == SEARCH_PRINT) + printf(" %d ( ", SCB_GET_TARGET(ahd, scb)); + tid_head = scbid; + found += ahd_search_scb_list(ahd, target, channel, + lun, tag, role, status, + action, &tid_head, + SCB_GET_TARGET(ahd, scb)); + if (tid_head != scbid) + ahd_stitch_tid_list(ahd, tid_prev, tid_head, tid_next); + if (!SCBID_IS_NULL(tid_head)) + tid_prev = tid_head; + if (action == SEARCH_PRINT) + printf(")\n"); + } + ahd_set_scbptr(ahd, savedscbptr); + + if (action == SEARCH_COMPLETE) + ahd_release_untagged_queues(ahd); + ahd_restore_modes(ahd, saved_modes); + return (found); +} + +static int +ahd_search_scb_list(struct ahd_softc *ahd, int target, char channel, + int lun, u_int tag, role_t role, uint32_t status, + ahd_search_action action, u_int *list_head, u_int tid) +{ + struct scb *scb; + u_int scbid; + u_int next; + u_int prev; + int found; + + AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); + found = 0; + prev = SCB_LIST_NULL; + next = *list_head; + for (scbid = next; !SCBID_IS_NULL(scbid); scbid = next) { + if (scbid >= ahd->scb_data.numscbs) { + printf("%s:SCB List inconsistency. " + "SCB == 0x%x, yet numscbs == 0x%x.", + ahd_name(ahd), scbid, ahd->scb_data.numscbs); + ahd_dump_card_state(ahd); + panic("for safety"); + } + scb = ahd_lookup_scb(ahd, scbid); + if (scb == NULL) { + printf("%s: SCB = %d Not Active!\n", + ahd_name(ahd), scbid); + panic("Waiting List traversal\n"); + } + ahd_set_scbptr(ahd, scbid); + next = ahd_inw_scbram(ahd, SCB_NEXT); + if (ahd_match_scb(ahd, scb, target, channel, + lun, SCB_LIST_NULL, role) == 0) { + prev = scbid; + continue; + } + found++; + switch (action) { + case SEARCH_COMPLETE: + { + cam_status ostat; + cam_status cstat; + + ostat = ahd_get_transaction_status(scb); + if (ostat == CAM_REQ_INPROG) + ahd_set_transaction_status(scb, status); + cstat = ahd_get_transaction_status(scb); + if (cstat != CAM_REQ_CMP) + ahd_freeze_scb(scb); + if ((scb->flags & SCB_ACTIVE) == 0) + printf("Inactive SCB in Waiting List\n"); + ahd_done(ahd, scb); + /* FALLTHROUGH */ + } + case SEARCH_REMOVE: + ahd_rem_wscb(ahd, scbid, prev, next, tid); + if (prev == SCB_LIST_NULL) + *list_head = next; + break; + case SEARCH_PRINT: + printf("0x%x ", scbid); + case SEARCH_COUNT: + prev = scbid; + break; + } + if (found > AHD_SCB_MAX) + panic("SCB LIST LOOP"); + } + return (found); +} + +static void +ahd_stitch_tid_list(struct ahd_softc *ahd, u_int tid_prev, + u_int tid_cur, u_int tid_next) +{ + AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); + + if (SCBID_IS_NULL(tid_cur)) { + + /* Bypass current TID list */ + if (SCBID_IS_NULL(tid_prev)) { + ahd_outw(ahd, WAITING_TID_HEAD, tid_next); + } else { + ahd_set_scbptr(ahd, tid_prev); + ahd_outw(ahd, SCB_NEXT2, tid_next); + } + if (SCBID_IS_NULL(tid_next)) + ahd_outw(ahd, WAITING_TID_TAIL, tid_prev); + } else { + + /* Stitch through tid_cur */ + if (SCBID_IS_NULL(tid_prev)) { + ahd_outw(ahd, WAITING_TID_HEAD, tid_cur); + } else { + ahd_set_scbptr(ahd, tid_prev); + ahd_outw(ahd, SCB_NEXT2, tid_cur); + } + ahd_set_scbptr(ahd, tid_cur); + ahd_outw(ahd, SCB_NEXT2, tid_next); + + if (SCBID_IS_NULL(tid_next)) + ahd_outw(ahd, WAITING_TID_TAIL, tid_cur); + } +} + +/* + * Manipulate the waiting for selection list and return the + * scb that follows the one that we remove. + */ +static u_int +ahd_rem_wscb(struct ahd_softc *ahd, u_int scbid, + u_int prev, u_int next, u_int tid) +{ + u_int tail_offset; + + AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); + if (!SCBID_IS_NULL(prev)) { + ahd_set_scbptr(ahd, prev); + ahd_outw(ahd, SCB_NEXT, next); + } + + /* + * SCBs that had MK_MESSAGE set in them will not + * be queued to the per-target lists, so don't + * blindly clear the tail pointer. + */ + tail_offset = WAITING_SCB_TAILS + (2 * tid); + if (SCBID_IS_NULL(next) + && ahd_inw(ahd, tail_offset) == scbid) + ahd_outw(ahd, tail_offset, prev); + ahd_add_scb_to_free_list(ahd, scbid); + return (next); +} + +/* + * Add the SCB as selected by SCBPTR onto the on chip list of + * free hardware SCBs. This list is empty/unused if we are not + * performing SCB paging. + */ +static void +ahd_add_scb_to_free_list(struct ahd_softc *ahd, u_int scbid) +{ +/* XXX Need some other mechanism to designate "free". */ + /* + * Invalidate the tag so that our abort + * routines don't think it's active. + ahd_outb(ahd, SCB_TAG, SCB_LIST_NULL); + */ +} + +/******************************** Error Handling ******************************/ +/* + * Abort all SCBs that match the given description (target/channel/lun/tag), + * setting their status to the passed in status if the status has not already + * been modified from CAM_REQ_INPROG. This routine assumes that the sequencer + * is paused before it is called. + */ +int +ahd_abort_scbs(struct ahd_softc *ahd, int target, char channel, + int lun, u_int tag, role_t role, uint32_t status) +{ + struct scb *scbp; + struct scb *scbp_next; + u_int active_scb; + u_int i, j; + u_int maxtarget; + u_int minlun; + u_int maxlun; + + int found; + + /* + * Don't attempt to run any queued untagged transactions + * until we are done with the abort process. + */ + ahd_freeze_untagged_queues(ahd); + + /* restore this when we're done */ + active_scb = ahd_get_scbptr(ahd); + + found = ahd_search_qinfifo(ahd, target, channel, lun, SCB_LIST_NULL, + role, CAM_REQUEUE_REQ, SEARCH_COMPLETE); + + /* + * Clean out the busy target table for any untagged commands. + */ + i = 0; + maxtarget = 16; + if (target != CAM_TARGET_WILDCARD) { + i = target; + if (channel == 'B') + i += 8; + maxtarget = i + 1; + } + + if (lun == CAM_LUN_WILDCARD) { + minlun = 0; + maxlun = AHD_NUM_LUNS_NONPKT; + } else if (lun >= AHD_NUM_LUNS_NONPKT) { + minlun = maxlun = 0; + } else { + minlun = lun; + maxlun = lun + 1; + } + + if (role != ROLE_TARGET) { + for (;i < maxtarget; i++) { + for (j = minlun;j < maxlun; j++) { + u_int scbid; + u_int tcl; + + tcl = BUILD_TCL(i << 4, j); + scbid = ahd_find_busy_tcl(ahd, tcl); + scbp = ahd_lookup_scb(ahd, scbid); + if (scbp == NULL + || ahd_match_scb(ahd, scbp, target, channel, + lun, tag, role) == 0) + continue; + ahd_unbusy_tcl(ahd, BUILD_TCL_RAW(i, 'A', j)); + } + } + } + + /* + * Go through the pending CCB list and look for + * commands for this target that are still active. + * These are other tagged commands that were + * disconnected when the reset occurred. + */ + scbp_next = LIST_FIRST(&ahd->pending_scbs); + while (scbp_next != NULL) { + scbp = scbp_next; + scbp_next = LIST_NEXT(scbp, pending_links); + if (ahd_match_scb(ahd, scbp, target, channel, lun, tag, role)) { + cam_status ostat; + + ostat = ahd_get_transaction_status(scbp); + if (ostat == CAM_REQ_INPROG) + ahd_set_transaction_status(scbp, status); + if (ahd_get_transaction_status(scbp) != CAM_REQ_CMP) + ahd_freeze_scb(scbp); + if ((scbp->flags & SCB_ACTIVE) == 0) + printf("Inactive SCB on pending list\n"); + ahd_done(ahd, scbp); + found++; + } + } + ahd_set_scbptr(ahd, active_scb); + ahd_platform_abort_scbs(ahd, target, channel, lun, tag, role, status); + ahd_release_untagged_queues(ahd); + return found; +} + +static void +ahd_reset_current_bus(struct ahd_softc *ahd) +{ + uint8_t scsiseq; + + AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); + ahd_outb(ahd, SIMODE1, ahd_inb(ahd, SIMODE1) & ~ENSCSIRST); + scsiseq = ahd_inb(ahd, SCSISEQ0); + ahd_outb(ahd, SCSISEQ0, scsiseq | SCSIRSTO); + ahd_delay(AHD_BUSRESET_DELAY); + /* Turn off the bus reset */ + ahd_outb(ahd, SCSISEQ0, scsiseq & ~SCSIRSTO); + if ((ahd->bugs & AHD_SCSIRST_BUG) != 0) { + /* + * 2A Razor #474 + * Certain chip state is not cleared for + * SCSI bus resets that we initiate, so + * we must reset the chip. + */ + ahd_reset(ahd); + ahd_intr_enable(ahd, /*enable*/TRUE); + AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); + } + + ahd_clear_intstat(ahd); + + /* Re-enable reset interrupts */ + ahd_outb(ahd, SIMODE1, ahd_inb(ahd, SIMODE1) | ENSCSIRST); +} + +int +ahd_reset_channel(struct ahd_softc *ahd, char channel, int initiate_reset) +{ + struct ahd_devinfo devinfo; +#if AHD_TARGET_MODE + u_int target; + u_int max_scsiid; +#endif + int found; + + ahd->pending_device = NULL; + + ahd_compile_devinfo(&devinfo, + CAM_TARGET_WILDCARD, + CAM_TARGET_WILDCARD, + CAM_LUN_WILDCARD, + channel, ROLE_UNKNOWN); + ahd_pause(ahd); + + /* Make sure the sequencer is in a safe location. */ + ahd_clear_critical_section(ahd); + + /* + * Run our command complete fifos to ensure that we perform + * completion processing on any commands that 'completed' + * before the reset occurred. + */ + ahd_run_qoutfifo(ahd); +#if AHD_TARGET_MODE + if ((ahd->flags & AHD_TARGETROLE) != 0) { + ahd_run_tqinfifo(ahd, /*paused*/TRUE); + } +#endif + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + + /* + * Reset the bus if we are initiating this reset + */ + ahd_clear_msg_state(ahd); + ahd_outb(ahd, SIMODE1, ahd_inb(ahd, SIMODE1) & ~(ENBUSFREE|ENSCSIRST)); + if (initiate_reset) + ahd_reset_current_bus(ahd); + ahd_clear_intstat(ahd); + + /* + * Clean up all the state information for the + * pending transactions on this bus. + */ + found = ahd_abort_scbs(ahd, CAM_TARGET_WILDCARD, channel, + CAM_LUN_WILDCARD, SCB_LIST_NULL, + ROLE_UNKNOWN, CAM_SCSI_BUS_RESET); + +#ifdef AHD_TARGET_MODE + max_scsiid = (ahd->features & AHD_WIDE) ? 15 : 7; + + /* + * Send an immediate notify ccb to all target more peripheral + * drivers affected by this action. + */ + for (target = 0; target <= max_scsiid; target++) { + struct ahd_tmode_tstate* tstate; + u_int lun; + + tstate = ahd->enabled_targets[target]; + if (tstate == NULL) + continue; + for (lun = 0; lun < AHD_NUM_LUNS; lun++) { + struct ahd_tmode_lstate* lstate; + + lstate = tstate->enabled_luns[lun]; + if (lstate == NULL) + continue; + + ahd_queue_lstate_event(ahd, lstate, CAM_TARGET_WILDCARD, + EVENT_TYPE_BUS_RESET, /*arg*/0); + ahd_send_lstate_events(ahd, lstate); + } + } +#endif + /* Notify the XPT that a bus reset occurred */ + ahd_send_async(ahd, devinfo.channel, CAM_TARGET_WILDCARD, + CAM_LUN_WILDCARD, AC_BUS_RESET, NULL); + + /* + * Freeze the SIMQ until our poller can determine that + * the bus reset has really gone away. We set the initial + * timer to 0 to have the check performed as soon as possible + * from the timer context. + */ + ahd_freeze_simq(ahd); + ahd_timer_reset(&ahd->reset_timer, 0, ahd_reset_poll, ahd); + return (found); +} + + +#define AHD_RESET_POLL_US 1000 +static void +ahd_reset_poll(void *arg) +{ + struct ahd_softc *ahd; + u_int scsiseq1; + u_int initiator, target, max_scsiid; + u_long l; + u_long s; + + ahd_list_lock(&l); + ahd = ahd_find_softc((struct ahd_softc *)arg); + if (ahd == NULL) { + printf("ahd_reset_poll: Instance %p no longer exists\n", arg); + ahd_list_unlock(&l); + return; + } + ahd_lock(ahd, &s); + if (ahd_is_paused(ahd) == 0) + panic("Someone unpaused the sequencer!\n"); + + ahd_clear_intstat(ahd); + if ((ahd_inb(ahd, SSTAT0) & SCSIRSTI) != 0) { + ahd_timer_reset(&ahd->reset_timer, AHD_RESET_POLL_US, + ahd_reset_poll, ahd); + ahd_unlock(ahd, &s); + } + + /* Reset is now low. Complete chip reinitialization. */ + ahd_outb(ahd, SIMODE1, ahd_inb(ahd, SIMODE1) | ENSCSIRST); + scsiseq1 = ahd_inb(ahd, SCSISEQ_TEMPLATE); + ahd_outb(ahd, SCSISEQ1, scsiseq1 & (ENSELI|ENRSELI|ENAUTOATNP)); + + /* + * Revert to async/narrow transfers until we renegotiate. + */ + max_scsiid = (ahd->features & AHD_WIDE) ? 15 : 7; + for (target = 0; target <= max_scsiid; target++) { + + if (ahd->enabled_targets[target] == NULL) + continue; + for (initiator = 0; initiator <= max_scsiid; initiator++) { + struct ahd_devinfo devinfo; + + ahd_compile_devinfo(&devinfo, target, initiator, + CAM_LUN_WILDCARD, + 'A', ROLE_UNKNOWN); + ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, + AHD_TRANS_CUR, /*paused*/TRUE); + ahd_set_syncrate(ahd, &devinfo, /*period*/0, + /*offset*/0, /*ppr_options*/0, + AHD_TRANS_CUR, /*paused*/TRUE); + } + } + + ahd_clear_fifo(ahd, 0); + ahd_clear_fifo(ahd, 1); + ahd_restart(ahd); + ahd_unlock(ahd, &s); + ahd_release_simq(ahd); + ahd_list_unlock(&l); +} + + +/****************************** Status Processing *****************************/ +void +ahd_handle_scb_status(struct ahd_softc *ahd, struct scb *scb) +{ + if (scb->hscb->shared_data.istatus.scsi_status != 0) { + ahd_handle_scsi_status(ahd, scb); + } else { + ahd_calc_residual(ahd, scb); + ahd_done(ahd, scb); + } +} + +void +ahd_handle_scsi_status(struct ahd_softc *ahd, struct scb *scb) +{ + struct hardware_scb *hscb; + u_int qfreeze_cnt; + ahd_mode_state saved_modes; + + /* + * The sequencer freezes its select-out queue + * anytime a SCSI status error occurs. We must + * handle the error and decrement the QFREEZE count + * to allow the sequencer to continue. + */ + hscb = scb->hscb; + + /* Don't want to clobber the original sense code */ + if ((scb->flags & SCB_SENSE) != 0) { + /* + * Clear the SCB_SENSE Flag and perform + * a normal command completion. + */ + scb->flags &= ~SCB_SENSE; + ahd_set_transaction_status(scb, CAM_AUTOSENSE_FAIL); + ahd_done(ahd, scb); + return; + } + ahd_set_transaction_status(scb, CAM_SCSI_STATUS_ERROR); + /* Freeze the queue until the client sees the error. */ + ahd_pause(ahd); + saved_modes = ahd_save_modes(ahd); + ahd_clear_critical_section(ahd); + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + ahd_freeze_devq(ahd, scb); + ahd_freeze_scb(scb); + qfreeze_cnt = ahd_inw(ahd, QFREEZE_COUNT); + if (qfreeze_cnt == 0) { + printf("%s: Bad status with 0 qfreeze count!\n", ahd_name(ahd)); + } else { + qfreeze_cnt--; + ahd_outw(ahd, QFREEZE_COUNT, qfreeze_cnt); + } + if (qfreeze_cnt == 0) + ahd_outb(ahd, SEQ_FLAGS2, + ahd_inb(ahd, SEQ_FLAGS2) & ~SELECTOUT_QFROZEN); + ahd_unpause(ahd); + ahd_set_scsi_status(scb, hscb->shared_data.istatus.scsi_status); + switch (hscb->shared_data.istatus.scsi_status) { + case STATUS_PKT_SENSE: + { + struct scsi_status_iu_header *siu; + + ahd_sync_sense(ahd, scb, BUS_DMASYNC_POSTREAD); + siu = (struct scsi_status_iu_header *)scb->sense_data; + ahd_set_scsi_status(scb, siu->status); +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_SENSE) != 0) + ahd_print_path(ahd, scb); + printf("SCB 0x%x Received PKT Status of 0x%x\n", + SCB_GET_TAG(scb), siu->status); + printf("\tflags = 0x%x, sense len = 0x%x, " + "pktfail = 0x%x\n", + siu->flags, scsi_4btoul(siu->sense_length), + scsi_4btoul(siu->pkt_failures_length)); +#endif + if ((siu->flags & SIU_RSPVALID) != 0) { + ahd_print_path(ahd, scb); + if (scsi_4btoul(siu->pkt_failures_length) < 4) { + printf("Unable to parse pkt_failures\n"); + } else { + + switch (SIU_PKTFAIL_CODE(siu)) { + case SIU_PFC_NONE: + printf("No packet failure found\n"); + break; + case SIU_PFC_CIU_FIELDS_INVALID: + printf("Invalid Command IU Field\n"); + break; + case SIU_PFC_TMF_NOT_SUPPORTED: + printf("TMF not supportd\n"); + break; + case SIU_PFC_TMF_FAILED: + printf("TMF failed\n"); + break; + case SIU_PFC_INVALID_TYPE_CODE: + printf("Invalid L_Q Type code\n"); + break; + case SIU_PFC_ILLEGAL_REQUEST: + printf("Illegal request\n"); + default: + break; + } + } + if (siu->status == SCSI_STATUS_OK) + ahd_set_transaction_status(scb, + CAM_REQ_CMP_ERR); + } + if ((siu->flags & SIU_SNSVALID) != 0) { + scb->flags |= SCB_PKT_SENSE; +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_SENSE) != 0) + printf("Sense data available\n"); +#endif + } + ahd_done(ahd, scb); + break; + } + case SCSI_STATUS_CMD_TERMINATED: + case SCSI_STATUS_CHECK_COND: + { + struct ahd_devinfo devinfo; + struct ahd_dma_seg *sg; + struct scsi_sense *sc; + struct ahd_initiator_tinfo *targ_info; + struct ahd_tmode_tstate *tstate; + struct ahd_transinfo *tinfo; +#ifdef AHD_DEBUG + if (ahd_debug & AHD_SHOW_SENSE) { + ahd_print_path(ahd, scb); + printf("SCB %d: requests Check Status\n", + SCB_GET_TAG(scb)); + } +#endif + + if (ahd_perform_autosense(scb) == 0) + break; + + ahd_compile_devinfo(&devinfo, SCB_GET_OUR_ID(scb), + SCB_GET_TARGET(ahd, scb), + SCB_GET_LUN(scb), + SCB_GET_CHANNEL(ahd, scb), + ROLE_INITIATOR); + targ_info = ahd_fetch_transinfo(ahd, + devinfo.channel, + devinfo.our_scsiid, + devinfo.target, + &tstate); + tinfo = &targ_info->curr; + sg = scb->sg_list; + sc = (struct scsi_sense *)hscb->shared_data.idata.cdb; + /* + * Save off the residual if there is one. + */ + ahd_update_residual(ahd, scb); +#ifdef AHD_DEBUG + if (ahd_debug & AHD_SHOW_SENSE) { + ahd_print_path(ahd, scb); + printf("Sending Sense\n"); + } +#endif + scb->sg_count = 0; + sg = ahd_sg_setup(ahd, scb, sg, ahd_get_sense_bufaddr(ahd, scb), + ahd_get_sense_bufsize(ahd, scb), + /*last*/TRUE); + sc->opcode = REQUEST_SENSE; + sc->byte2 = 0; + if (tinfo->protocol_version <= SCSI_REV_2 + && SCB_GET_LUN(scb) < 8) + sc->byte2 = SCB_GET_LUN(scb) << 5; + sc->unused[0] = 0; + sc->unused[1] = 0; + sc->length = ahd_get_sense_bufsize(ahd, scb); + sc->control = 0; + + /* + * We can't allow the target to disconnect. + * This will be an untagged transaction and + * having the target disconnect will make this + * transaction indestinguishable from outstanding + * tagged transactions. + */ + hscb->control = 0; + + /* + * This request sense could be because the + * the device lost power or in some other + * way has lost our transfer negotiations. + * Renegotiate if appropriate. Unit attention + * errors will be reported before any data + * phases occur. + */ + if (ahd_get_residual(scb) == ahd_get_transfer_length(scb)) { + ahd_update_neg_request(ahd, &devinfo, + tstate, targ_info, + /*force*/TRUE); + } + if (tstate->auto_negotiate & devinfo.target_mask) { + hscb->control |= MK_MESSAGE; + scb->flags &= + ~(SCB_NEGOTIATE|SCB_ABORT|SCB_DEVICE_RESET); + scb->flags |= SCB_AUTO_NEGOTIATE; + } + hscb->cdb_len = sizeof(*sc); + ahd_setup_data_scb(ahd, scb); + scb->flags |= SCB_SENSE; + ahd_queue_scb(ahd, scb); +#ifdef __FreeBSD__ + /* + * Ensure we have enough time to actually + * retrieve the sense. + */ + untimeout(ahd_timeout, (caddr_t)scb, + scb->io_ctx->ccb_h.timeout_ch); + scb->io_ctx->ccb_h.timeout_ch = + timeout(ahd_timeout, (caddr_t)scb, 5 * hz); +#endif + break; + } + case SCSI_STATUS_OK: + printf("%s: Interrupted for staus of 0???\n", + ahd_name(ahd)); + /* FALLTHROUGH */ + default: + ahd_done(ahd, scb); + break; + } +} + +/* + * Calculate the residual for a just completed SCB. + */ +void +ahd_calc_residual(struct ahd_softc *ahd, struct scb *scb) +{ + struct hardware_scb *hscb; + struct initiator_status *spkt; + uint32_t sgptr; + uint32_t resid_sgptr; + uint32_t resid; + + /* + * 5 cases. + * 1) No residual. + * SG_STATUS_VALID clear in sgptr. + * 2) Transferless command + * 3) Never performed any transfers. + * sgptr has SG_FULL_RESID set. + * 4) No residual but target did not + * save data pointers after the + * last transfer, so sgptr was + * never updated. + * 5) We have a partial residual. + * Use residual_sgptr to determine + * where we are. + */ + + hscb = scb->hscb; + sgptr = ahd_le32toh(hscb->sgptr); + if ((sgptr & SG_STATUS_VALID) == 0) + /* Case 1 */ + return; + sgptr &= ~SG_STATUS_VALID; + + if ((sgptr & SG_LIST_NULL) != 0) + /* Case 2 */ + return; + + /* + * Residual fields are the same in both + * target and initiator status packets, + * so we can always use the initiator fields + * regardless of the role for this SCB. + */ + spkt = &hscb->shared_data.istatus; + resid_sgptr = ahd_le32toh(spkt->residual_sgptr); + if ((sgptr & SG_FULL_RESID) != 0) { + /* Case 3 */ + resid = ahd_get_transfer_length(scb); + } else if ((resid_sgptr & SG_LIST_NULL) != 0) { + /* Case 4 */ + return; + } else if ((resid_sgptr & SG_OVERRUN_RESID) != 0) { + ahd_print_path(ahd, scb); + printf("data overrun detected Tag == 0x%x.\n", + SCB_GET_TAG(scb)); + ahd_freeze_devq(ahd, scb); + ahd_set_transaction_status(scb, CAM_DATA_RUN_ERR); + ahd_freeze_scb(scb); + return; + } else if ((resid_sgptr & ~SG_PTR_MASK) != 0) { + panic("Bogus resid sgptr value 0x%x\n", resid_sgptr); + /* NOTREACHED */ + } else { + struct ahd_dma_seg *sg; + + /* + * Remainder of the SG where the transfer + * stopped. + */ + resid = ahd_le32toh(spkt->residual_datacnt) & AHD_SG_LEN_MASK; + sg = ahd_sg_bus_to_virt(ahd, scb, resid_sgptr & SG_PTR_MASK); + + /* The residual sg_ptr always points to the next sg */ + sg--; + + /* + * Add up the contents of all residual + * SG segments that are after the SG where + * the transfer stopped. + */ + while ((ahd_le32toh(sg->len) & AHD_DMA_LAST_SEG) == 0) { + sg++; + resid += ahd_le32toh(sg->len) & AHD_SG_LEN_MASK; + } + } + if ((scb->flags & SCB_SENSE) == 0) + ahd_set_residual(scb, resid); + else + ahd_set_sense_residual(scb, resid); + +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_MISC) != 0) { + ahd_print_path(ahd, scb); + printf("Handled Residual of %d bytes\n", resid); + } +#endif +} + +/******************************* Target Mode **********************************/ +#ifdef AHD_TARGET_MODE +/* + * Add a target mode event to this lun's queue + */ +static void +ahd_queue_lstate_event(struct ahd_softc *ahd, struct ahd_tmode_lstate *lstate, + u_int initiator_id, u_int event_type, u_int event_arg) +{ + struct ahd_tmode_event *event; + int pending; + + xpt_freeze_devq(lstate->path, /*count*/1); + if (lstate->event_w_idx >= lstate->event_r_idx) + pending = lstate->event_w_idx - lstate->event_r_idx; + else + pending = AHD_TMODE_EVENT_BUFFER_SIZE + 1 + - (lstate->event_r_idx - lstate->event_w_idx); + + if (event_type == EVENT_TYPE_BUS_RESET + || event_type == MSG_BUS_DEV_RESET) { + /* + * Any earlier events are irrelevant, so reset our buffer. + * This has the effect of allowing us to deal with reset + * floods (an external device holding down the reset line) + * without losing the event that is really interesting. + */ + lstate->event_r_idx = 0; + lstate->event_w_idx = 0; + xpt_release_devq(lstate->path, pending, /*runqueue*/FALSE); + } + + if (pending == AHD_TMODE_EVENT_BUFFER_SIZE) { + xpt_print_path(lstate->path); + printf("immediate event %x:%x lost\n", + lstate->event_buffer[lstate->event_r_idx].event_type, + lstate->event_buffer[lstate->event_r_idx].event_arg); + lstate->event_r_idx++; + if (lstate->event_r_idx == AHD_TMODE_EVENT_BUFFER_SIZE) + lstate->event_r_idx = 0; + xpt_release_devq(lstate->path, /*count*/1, /*runqueue*/FALSE); + } + + event = &lstate->event_buffer[lstate->event_w_idx]; + event->initiator_id = initiator_id; + event->event_type = event_type; + event->event_arg = event_arg; + lstate->event_w_idx++; + if (lstate->event_w_idx == AHD_TMODE_EVENT_BUFFER_SIZE) + lstate->event_w_idx = 0; +} + +/* + * Send any target mode events queued up waiting + * for immediate notify resources. + */ +void +ahd_send_lstate_events(struct ahd_softc *ahd, struct ahd_tmode_lstate *lstate) +{ + struct ccb_hdr *ccbh; + struct ccb_immed_notify *inot; + + while (lstate->event_r_idx != lstate->event_w_idx + && (ccbh = SLIST_FIRST(&lstate->immed_notifies)) != NULL) { + struct ahd_tmode_event *event; + + event = &lstate->event_buffer[lstate->event_r_idx]; + SLIST_REMOVE_HEAD(&lstate->immed_notifies, sim_links.sle); + inot = (struct ccb_immed_notify *)ccbh; + switch (event->event_type) { + case EVENT_TYPE_BUS_RESET: + ccbh->status = CAM_SCSI_BUS_RESET|CAM_DEV_QFRZN; + break; + default: + ccbh->status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN; + inot->message_args[0] = event->event_type; + inot->message_args[1] = event->event_arg; + break; + } + inot->initiator_id = event->initiator_id; + inot->sense_len = 0; + xpt_done((union ccb *)inot); + lstate->event_r_idx++; + if (lstate->event_r_idx == AHD_TMODE_EVENT_BUFFER_SIZE) + lstate->event_r_idx = 0; + } +} +#endif + +/******************** Sequencer Program Patching/Download *********************/ + +#ifdef AHD_DUMP_SEQ +void +ahd_dumpseq(struct ahd_softc* ahd) +{ + int i; + int max_prog; + + max_prog = 2048; + + ahd_outb(ahd, SEQCTL0, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); + ahd_outb(ahd, PRGMCNT, 0); + ahd_outb(ahd, PRGMCNT+1, 0); + for (i = 0; i < max_prog; i++) { + uint8_t ins_bytes[4]; + + ahd_insb(ahd, SEQRAM, ins_bytes, 4); + printf("0x%08x\n", ins_bytes[0] << 24 + | ins_bytes[1] << 16 + | ins_bytes[2] << 8 + | ins_bytes[3]); + } +} +#endif + +static void +ahd_loadseq(struct ahd_softc *ahd) +{ + struct cs cs_table[num_critical_sections]; + u_int begin_set[num_critical_sections]; + u_int end_set[num_critical_sections]; + struct patch *cur_patch; + u_int cs_count; + u_int cur_cs; + u_int i; + int downloaded; + u_int skip_addr; + u_int sg_prefetch_cnt; + u_int sg_prefetch_cnt_limit; + u_int sg_prefetch_align; + u_int sg_size; + uint8_t download_consts[DOWNLOAD_CONST_COUNT]; + + if (bootverbose) + printf("%s: Downloading Sequencer Program...", + ahd_name(ahd)); + +#if DOWNLOAD_CONST_COUNT != 6 +#error "Download Const Mismatch" +#endif + /* + * Start out with 0 critical sections + * that apply to this firmware load. + */ + cs_count = 0; + cur_cs = 0; + memset(begin_set, 0, sizeof(begin_set)); + memset(end_set, 0, sizeof(end_set)); + + /* + * Setup downloadable constant table. + * + * The computation for the S/G prefetch variables is + * a bit complicated. We would like to always fetch + * in terms of cachelined sized increments. However, + * if the cacheline is not an even multiple of the + * SG element size or is larger than our SG RAM, using + * just the cache size might leave us with only a portion + * of an SG element at the tail of a prefetch. If the + * cacheline is larger than our S/G prefetch buffer less + * the size of an SG element, we may round down to a cacheline + * that doesn't contain any or all of the S/G of interest + * within the bounds of our S/G ram. Provide variables to + * the sequencer that will allow it to handle these edge + * cases. + */ + /* Start by aligning to the nearest cacheline. */ + sg_prefetch_align = ahd->pci_cachesize; + if (sg_prefetch_align == 0) + sg_prefetch_cnt = 8; + /* Round down to the nearest power of 2. */ + while (powerof2(sg_prefetch_align) == 0) + sg_prefetch_align--; + /* + * If the cacheline boundary is greater than half our prefetch RAM + * we risk not being able to fetch even a single complete S/G + * segment if we align to that boundary. + */ + if (sg_prefetch_align > CCSGADDR_MAX/2) + sg_prefetch_align = CCSGADDR_MAX/2; + /* Start by fetching a single cacheline. */ + sg_prefetch_cnt = sg_prefetch_align; + /* + * Increment the prefetch count by cachelines until + * at least one S/G element will fit. + */ + sg_size = sizeof(struct ahd_dma_seg); + if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) + sg_size = sizeof(struct ahd_dma64_seg); + while (sg_prefetch_cnt < sg_size) + sg_prefetch_cnt += sg_prefetch_align; + /* + * If the cacheline is not an even multiple of + * the S/G size, we may only get a partial S/G when + * we align. Add a cacheline if this is the case. + */ + if ((sg_prefetch_align % sg_size) != 0 + && (sg_prefetch_cnt < CCSGADDR_MAX)) + sg_prefetch_cnt += sg_prefetch_align; + /* + * Lastly, compute a value that the sequencer can use + * to determine if the remainder of the CCSGRAM buffer + * has a full S/G element in it. + */ + sg_prefetch_cnt_limit = -(sg_prefetch_cnt - sg_size + 1); + download_consts[SG_PREFETCH_CNT] = sg_prefetch_cnt; + download_consts[SG_PREFETCH_CNT_LIMIT] = sg_prefetch_cnt_limit; + download_consts[SG_PREFETCH_ALIGN_MASK] = ~(sg_prefetch_align - 1); + download_consts[SG_PREFETCH_ADDR_MASK] = (sg_prefetch_align - 1); + download_consts[SG_SIZEOF] = sg_size; + download_consts[PKT_OVERRUN_BUFOFFSET] = + (AHD_SCB_MAX * sizeof(uint16_t)) / 256; + if ((ahd->features & AHD_TARGETMODE) != 0) + download_consts[PKT_OVERRUN_BUFOFFSET] += + (AHD_TMODE_CMDS * sizeof(struct target_cmd)) / 256; + + cur_patch = patches; + downloaded = 0; + skip_addr = 0; + ahd_outb(ahd, SEQCTL0, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); + ahd_outb(ahd, PRGMCNT, 0); + ahd_outb(ahd, PRGMCNT+1, 0); + + for (i = 0; i < sizeof(seqprog)/4; i++) { + if (ahd_check_patch(ahd, &cur_patch, i, &skip_addr) == 0) { + /* + * Don't download this instruction as it + * is in a patch that was removed. + */ + continue; + } + /* + * Move through the CS table until we find a CS + * that might apply to this instruction. + */ + for (; cur_cs < num_critical_sections; cur_cs++) { + if (critical_sections[cur_cs].end <= i) { + if (begin_set[cs_count] == TRUE + && end_set[cs_count] == FALSE) { + cs_table[cs_count].end = downloaded; + end_set[cs_count] = TRUE; + cs_count++; + } + continue; + } + if (critical_sections[cur_cs].begin <= i + && begin_set[cs_count] == FALSE) { + cs_table[cs_count].begin = downloaded; + begin_set[cs_count] = TRUE; + } + break; + } + ahd_download_instr(ahd, i, download_consts); + downloaded++; + } + + ahd->num_critical_sections = cs_count; + if (cs_count != 0) { + + cs_count *= sizeof(struct cs); + ahd->critical_sections = malloc(cs_count, M_DEVBUF, M_NOWAIT); + if (ahd->critical_sections == NULL) + panic("ahd_loadseq: Could not malloc"); + memcpy(ahd->critical_sections, cs_table, cs_count); + } + ahd_outb(ahd, SEQCTL0, PERRORDIS|FAILDIS|FASTMODE); + + if (bootverbose) + printf(" %d instructions downloaded\n", downloaded); +} + +static int +ahd_check_patch(struct ahd_softc *ahd, struct patch **start_patch, + u_int start_instr, u_int *skip_addr) +{ + struct patch *cur_patch; + struct patch *last_patch; + u_int num_patches; + + num_patches = sizeof(patches)/sizeof(struct patch); + last_patch = &patches[num_patches]; + cur_patch = *start_patch; + + while (cur_patch < last_patch && start_instr == cur_patch->begin) { + + if (cur_patch->patch_func(ahd) == 0) { + + /* Start rejecting code */ + *skip_addr = start_instr + cur_patch->skip_instr; + cur_patch += cur_patch->skip_patch; + } else { + /* Accepted this patch. Advance to the next + * one and wait for our intruction pointer to + * hit this point. + */ + cur_patch++; + } + } + + *start_patch = cur_patch; + if (start_instr < *skip_addr) + /* Still skipping */ + return (0); + + return (1); +} + +static u_int +ahd_resolve_seqaddr(struct ahd_softc *ahd, u_int address) +{ + struct patch *cur_patch; + int address_offset; + u_int skip_addr; + u_int i; + + address_offset = 0; + cur_patch = patches; + skip_addr = 0; + + for (i = 0; i < address;) { + + ahd_check_patch(ahd, &cur_patch, i, &skip_addr); + + if (skip_addr > i) { + int end_addr; + + end_addr = MIN(address, skip_addr); + address_offset += end_addr - i; + i = skip_addr; + } else { + i++; + } + } + return (address - address_offset); +} + +static void +ahd_download_instr(struct ahd_softc *ahd, u_int instrptr, uint8_t *dconsts) +{ + union ins_formats instr; + struct ins_format1 *fmt1_ins; + struct ins_format3 *fmt3_ins; + u_int opcode; + + /* + * The firmware is always compiled into a little endian format. + */ + instr.integer = ahd_le32toh(*(uint32_t*)&seqprog[instrptr * 4]); + + fmt1_ins = &instr.format1; + fmt3_ins = NULL; + + /* Pull the opcode */ + opcode = instr.format1.opcode; + switch (opcode) { + case AIC_OP_JMP: + case AIC_OP_JC: + case AIC_OP_JNC: + case AIC_OP_CALL: + case AIC_OP_JNE: + case AIC_OP_JNZ: + case AIC_OP_JE: + case AIC_OP_JZ: + { + fmt3_ins = &instr.format3; + fmt3_ins->address = ahd_resolve_seqaddr(ahd, fmt3_ins->address); + /* FALLTHROUGH */ + } + case AIC_OP_OR: + case AIC_OP_AND: + case AIC_OP_XOR: + case AIC_OP_ADD: + case AIC_OP_ADC: + case AIC_OP_BMOV: + if (fmt1_ins->parity != 0) { + fmt1_ins->immediate = dconsts[fmt1_ins->immediate]; + } + fmt1_ins->parity = 0; + /* FALLTHROUGH */ + case AIC_OP_ROL: + { + int i, count; + + /* Calculate odd parity for the instruction */ + for (i = 0, count = 0; i < 31; i++) { + uint32_t mask; + + mask = 0x01 << i; + if ((instr.integer & mask) != 0) + count++; + } + if ((count & 0x01) == 0) + instr.format1.parity = 1; + + /* The sequencer is a little endian cpu */ + instr.integer = ahd_htole32(instr.integer); + ahd_outsb(ahd, SEQRAM, instr.bytes, 4); + break; + } + default: + panic("Unknown opcode encountered in seq program"); + break; + } +} + +void +ahd_dump_all_cards_state() +{ + struct ahd_softc *list_ahd; + + TAILQ_FOREACH(list_ahd, &ahd_tailq, links) { + ahd_dump_card_state(list_ahd); + } +} + +void +ahd_dump_card_state(struct ahd_softc *ahd) +{ + struct scb *scb; + ahd_mode_state saved_modes; + u_int dffstat; + int paused; + u_int scb_index; + u_int i; + + if (ahd_is_paused(ahd)) { + paused = 1; + } else { + paused = 0; + ahd_pause(ahd); + } + saved_modes = ahd_save_modes(ahd); + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + printf("%s: Dumping Card State at program address 0x%x Mode 0x%x\n", + ahd_name(ahd), + ahd_inb(ahd, CURADDR) | (ahd_inb(ahd, CURADDR+1) << 8), + ahd_build_mode_state(ahd, ahd->saved_src_mode, + ahd->saved_dst_mode)); + printf("Softc pointer is %p\n", ahd); + printf("IOWNID == 0x%x, TOWNID == 0x%x, SCSISEQ1 == 0x%x\n", + ahd_inb(ahd, IOWNID), ahd_inb(ahd, TOWNID), + ahd_inb(ahd, SCSISEQ1)); + printf("SCSISIGI == 0x%x\n", ahd_inb(ahd, SCSISIGI)); + printf("QFREEZE_COUNT == %d, SEQ_FLAGS2 == 0x%x\n", + ahd_inw(ahd, QFREEZE_COUNT), ahd_inb(ahd, SEQ_FLAGS2)); + if (paused) + printf("Card was paused\n"); + printf("%s: LASTSCB 0x%x CURRSCB 0x%x NEXTSCB 0x%x SEQINTCTL 0x%x\n", + ahd_name(ahd), ahd_inw(ahd, LASTSCB), ahd_inw(ahd, CURRSCB), + ahd_inw(ahd, NEXTSCB), ahd_inb(ahd, SEQINTCTL)); + printf("SCSISEQ = 0x%x\n", ahd_inb(ahd, SCSISEQ0)); + printf("SCB count = %d\n", ahd->scb_data.numscbs); + printf("Kernel NEXTQSCB = %d\n", SCB_GET_TAG(ahd->next_queued_scb)); + printf("%s: LQCTL1 = 0x%x\n", + ahd_name(ahd), ahd_inb(ahd, LQCTL1)); + printf("%s: WAITING_TID_LIST == 0x%x:0x%x\n", + ahd_name(ahd), ahd_inw(ahd, WAITING_TID_HEAD), + ahd_inw(ahd, WAITING_TID_TAIL)); + printf("%s: WAITING_SCB_TAILS: ", ahd_name(ahd)); + for (i = 0; i < AHD_NUM_TARGETS; i++) { + printf("%d(0x%x) ", i, + ahd_inw(ahd, WAITING_SCB_TAILS + (2 * i))); + } + printf("\n"); + /* QINFIFO */ + ahd_search_qinfifo(ahd, CAM_TARGET_WILDCARD, ALL_CHANNELS, + CAM_LUN_WILDCARD, SCB_LIST_NULL, + ROLE_UNKNOWN, /*status*/0, SEARCH_PRINT); + printf("Pending list:\n"); + i = 0; + LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) { + if (i++ > AHD_SCB_MAX) + break; + if (scb != LIST_FIRST(&ahd->pending_scbs)) + printf(", "); + printf("%3d", SCB_GET_TAG(scb)); + ahd_set_scbptr(ahd, SCB_GET_TAG(scb)); + printf("(CTRL 0x%x ID 0x%x N 0x%x N2 0x%x SG 0x%x," + " RSG 0x%x, KSG 0x%x)\n", + ahd_inb(ahd, SCB_CONTROL), + ahd_inb(ahd, SCB_SCSIID), ahd_inw(ahd, SCB_NEXT), + ahd_inw(ahd, SCB_NEXT2), ahd_inl(ahd, SCB_SGPTR), + ahd_inl(ahd, SCB_RESIDUAL_SGPTR), + ahd_le32toh(scb->hscb->sgptr)); + } + printf("\n"); + + printf("Kernel Free SCB list: "); + i = 0; + SLIST_FOREACH(scb, &ahd->scb_data.free_scbs, links.sle) { + if (i++ > AHD_SCB_MAX) + break; + printf("%d ", SCB_GET_TAG(scb)); + } + printf("\n"); + + printf("Sequencer Complete DMA-inprog list: "); + scb_index = ahd_inw(ahd, COMPLETE_SCB_DMAINPROG_HEAD); + i = 0; + while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) { + ahd_set_scbptr(ahd, scb_index); + printf("%d ", scb_index); + scb_index = ahd_inw(ahd, SCB_NEXT_COMPLETE); + } + printf("\n"); + + printf("Sequencer Complete list: "); + scb_index = ahd_inw(ahd, COMPLETE_SCB_HEAD); + i = 0; + while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) { + ahd_set_scbptr(ahd, scb_index); + printf("%d ", scb_index); + scb_index = ahd_inw(ahd, SCB_NEXT_COMPLETE); + } + printf("\n"); + + + printf("Sequencer DMA-Up and Complete list: "); + scb_index = ahd_inw(ahd, COMPLETE_DMA_SCB_HEAD); + i = 0; + while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) { + ahd_set_scbptr(ahd, scb_index); + printf("%d ", scb_index); + scb_index = ahd_inw(ahd, SCB_NEXT_COMPLETE); + } + printf("\n"); + printf("%s: SIMODE1 = 0x%x\n", + ahd_name(ahd), ahd_inb(ahd, SIMODE1)); + printf("%s: LQISTAT0 = 0x%x, LQISTAT1 = 0x%x, LQISTAT2 = 0x%x\n", + ahd_name(ahd), ahd_inb(ahd, LQISTAT0), ahd_inb(ahd, LQISTAT1), + ahd_inb(ahd, LQISTAT2)); + printf("%s: LQOSTAT0 = 0x%x, LQOSTAT1 = 0x%x, LQOSTAT2 = 0x%x\n", + ahd_name(ahd), ahd_inb(ahd, LQOSTAT0), ahd_inb(ahd, LQOSTAT1), + ahd_inb(ahd, LQOSTAT2)); + dffstat = ahd_inb(ahd, DFFSTAT); + for (i = 0; i < 2; i++) { + struct scb *fifo_scb; + u_int fifo_scbptr; + + ahd_set_modes(ahd, AHD_MODE_DFF0 + i, AHD_MODE_DFF0 + i); + fifo_scbptr = ahd_inb(ahd, SCBPTR); + printf("%s: FIFO%d %s, LONGJMP == 0x%x, SCB 0x%x, LJSCB 0x%x\n", + ahd_name(ahd), i, + (dffstat & (FIFO0FREE << i)) ? "Free" : "Active", + ahd_inw(ahd, LONGJMP_ADDR), fifo_scbptr, + ahd_inw(ahd, LONGJMP_SCB)); + printf("%s: SEQIMODE == 0x%x, SEQINTSRC == 0x%x\n", + ahd_name(ahd), ahd_inb(ahd, SEQIMODE), + ahd_inb(ahd, SEQINTSRC)); + printf("%s: DFCNTRL == 0x%x, DFSTATUS == 0x%x, " + "SG_CACHE_SHADOW = 0x%x, SG_STATE = 0x%x\n", + ahd_name(ahd), ahd_inb(ahd, DFCNTRL), + ahd_inb(ahd, DFSTATUS), ahd_inb(ahd, SG_CACHE_SHADOW), + ahd_inb(ahd, SG_STATE)); + printf("SSTAT0 = 0x%x, SSTAT1 = 0x%x, SSTAT2 = 0x%x\n", + ahd_inb(ahd, SSTAT0), ahd_inb(ahd, SSTAT1), + ahd_inb(ahd, SSTAT2)); + printf("DFFSXFRCTL = 0x%x, SOFFCNT = 0x%x\n", + ahd_inb(ahd, DFFSXFRCTL), ahd_inb(ahd, SOFFCNT)); + printf("MDFFSTAT = 0x%x, SHADDR = 0x%x, SHCNT = 0x%x\n", + ahd_inb(ahd, MDFFSTAT), ahd_inl(ahd, SHADDR), + (ahd_inb(ahd, SHCNT) + | (ahd_inb(ahd, SHCNT + 1) << 8) + | (ahd_inb(ahd, SHCNT + 2) << 16))); + printf("HADDR = 0x%x, HCNT = 0x%x\n", + ahd_inl(ahd, HADDR), + (ahd_inb(ahd, HCNT) + | (ahd_inb(ahd, HCNT + 1) << 8) + | (ahd_inb(ahd, HCNT + 2) << 16))); + printf("CCSGCTL = 0x%x\n", ahd_inb(ahd, CCSGCTL)); + fifo_scb = ahd_lookup_scb(ahd, fifo_scbptr); + if (fifo_scb != NULL) + ahd_dump_sglist(fifo_scb); + } + printf("LQIN: "); + for (i = 0; i < 20; i++) + printf("0x%x ", ahd_inb(ahd, LQIN + i)); + printf("\n"); + printf("%s: SSTAT3 == 0x%x\n", ahd_name(ahd), ahd_inb(ahd, SSTAT3)); + ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); + printf("%s: LQISTATE = 0x%x, LQOSTATE = 0x%x, OPTIONMODE = 0x%x\n", + ahd_name(ahd), ahd_inb(ahd, LQISTATE), ahd_inb(ahd, LQOSTATE), + ahd_inb(ahd, OPTIONMODE)); + printf("%s: OS_SPACE_CNT = 0x%x MAXCMDCNT = 0x%x\n", + ahd_name(ahd), ahd_inb(ahd, OS_SPACE_CNT), + ahd_inb(ahd, MAXCMDCNT)); + ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode); + printf("%s: REG0 == 0x%x, SINDEX = 0x%x, DINDEX = 0x%x\n", + ahd_name(ahd), ahd_inw(ahd, REG0), ahd_inw(ahd, SINDEX), + ahd_inw(ahd, DINDEX)); + printf("%s: SCBPTR == 0x%x, SCB_NEXT == 0x%x, SCB_NEXT2 == 0x%x\n", + ahd_name(ahd), ahd_get_scbptr(ahd), ahd_inw(ahd, SCB_NEXT), + ahd_inw(ahd, SCB_NEXT2)); + printf("CDB %x %x %x %x %x %x\n", + ahd_inb(ahd, SCB_CDB_STORE), + ahd_inb(ahd, SCB_CDB_STORE+1), + ahd_inb(ahd, SCB_CDB_STORE+2), + ahd_inb(ahd, SCB_CDB_STORE+3), + ahd_inb(ahd, SCB_CDB_STORE+4), + ahd_inb(ahd, SCB_CDB_STORE+5)); + printf("STACK:"); + for(i = 0; i < SEQ_STACK_SIZE; i++) + printf(" 0x%x", ahd_inb(ahd, STACK)|(ahd_inb(ahd, STACK) << 8)); + printf("\n"); + ahd_platform_dump_card_state(ahd); + ahd_restore_modes(ahd, saved_modes); + if (paused == 0) + ahd_unpause(ahd); +} + +void +ahd_dump_scbs(struct ahd_softc *ahd) +{ + ahd_mode_state saved_modes; + int i; + + saved_modes = ahd_save_modes(ahd); + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + for (i = 0; i < AHD_SCB_MAX; i++) { + ahd_set_scbptr(ahd, i); + printf("%3d", i); + printf("(CTRL 0x%x ID 0x%x N 0x%x N2 0x%x SG 0x%x, RSG 0x%x)\n", + ahd_inb(ahd, SCB_CONTROL), + ahd_inb(ahd, SCB_SCSIID), ahd_inw(ahd, SCB_NEXT), + ahd_inw(ahd, SCB_NEXT2), ahd_inl(ahd, SCB_SGPTR), + ahd_inl(ahd, SCB_RESIDUAL_SGPTR)); + } + printf("\n"); + ahd_restore_modes(ahd, saved_modes); +} + +/**************************** Flexport Logic **********************************/ +/* + * Read count 16bit words from 16bit word address start_addr from the + * SEEPROM attached to the controller, into buf, using the controller's + * SEEPROM reading state machine. + */ +int +ahd_read_seeprom(struct ahd_softc *ahd, uint16_t *buf, + u_int start_addr, u_int count) +{ + u_int cur_addr; + u_int end_addr; + int error; + + /* + * If we never make it through the loop even once, + * we were passed invalid arguments. + */ + error = EINVAL; + AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); + end_addr = start_addr + count; + for (cur_addr = start_addr; cur_addr < end_addr; cur_addr++) { + ahd_outb(ahd, SEEADR, cur_addr); + ahd_outb(ahd, SEECTL, SEEOP_READ | SEESTART); + + error = ahd_wait_seeprom(ahd); + if (error) + break; + *buf++ = ahd_inw(ahd, SEEDAT); + } + return (error); +} + +/* + * Write count 16bit words from buf, into SEEPROM attache to the + * controller starting at 16bit word address start_addr, using the + * controller's SEEPROM writing state machine. + */ +int +ahd_write_seeprom(struct ahd_softc *ahd, uint16_t *buf, + u_int start_addr, u_int count) +{ + u_int cur_addr; + u_int end_addr; + int error; + int retval; + + AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); + error = ENOENT; + + /* Place the chip into write-enable mode */ + ahd_outb(ahd, SEEADR, SEEOP_EWEN_ADDR); + ahd_outb(ahd, SEECTL, SEEOP_EWEN | SEESTART); + error = ahd_wait_seeprom(ahd); + if (error) + return (error); + + /* + * Write the data. If we don't get throught the loop at + * least once, the arguments were invalid. + */ + retval = EINVAL; + end_addr = start_addr + count; + for (cur_addr = start_addr; cur_addr < end_addr; cur_addr++) { + ahd_outw(ahd, SEEDAT, *buf++); + ahd_outb(ahd, SEEADR, cur_addr); + ahd_outb(ahd, SEECTL, SEEOP_WRITE | SEESTART); + + retval = ahd_wait_seeprom(ahd); + if (retval) + break; + } + + /* + * Disable writes. + */ + ahd_outb(ahd, SEEADR, SEEOP_EWDS_ADDR); + ahd_outb(ahd, SEECTL, SEEOP_EWDS | SEESTART); + error = ahd_wait_seeprom(ahd); + if (error) + return (error); + return (retval); +} + +/* + * Wait ~100us for the serial eeprom to satisfy our request. + */ +int +ahd_wait_seeprom(struct ahd_softc *ahd) +{ + int cnt; + + cnt = 20; + while ((ahd_inb(ahd, SEESTAT) & (SEEARBACK|SEEBUSY)) != 0 && --cnt) + ahd_delay(5); + + if (cnt == 0) + return (ETIMEDOUT); + return (0); +} + +int +ahd_verify_cksum(struct seeprom_config *sc) +{ + int i; + int maxaddr; + uint32_t checksum; + uint16_t *scarray; + + maxaddr = (sizeof(*sc)/2) - 1; + checksum = 0; + scarray = (uint16_t *)sc; + + for (i = 0; i < maxaddr; i++) + checksum = checksum + scarray[i]; + if (checksum == 0 + || (checksum & 0xFFFF) != sc->checksum) { + return (0); + } else { + return (1); + } +} + +int +ahd_acquire_seeprom(struct ahd_softc *ahd) +{ + uint8_t seetype; + int error; + + error = ahd_read_flexport(ahd, FLXADDR_ROMSTAT_CURSENSECTL, &seetype); + if (error != 0 + || ((seetype & FLX_ROMSTAT_SEECFG) == FLX_ROMSTAT_SEE_NONE)) + return (0); + return (1); +} + +void +ahd_release_seeprom(struct ahd_softc *ahd) +{ + /* Currently a no-op */ +} + +int +ahd_write_flexport(struct ahd_softc *ahd, u_int addr, u_int value) +{ + int error; + + AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); + if (addr > 7) + panic("ahd_write_flexport: address out of range"); + ahd_outb(ahd, BRDCTL, BRDEN|(addr << 3)); + error = ahd_wait_flexport(ahd); + if (error != 0) + return (error); + ahd_outb(ahd, BRDDAT, value); + ahd_flush_device_writes(ahd); + ahd_outb(ahd, BRDCTL, BRDSTB|BRDEN|(addr << 3)); + ahd_flush_device_writes(ahd); + ahd_outb(ahd, BRDCTL, BRDEN|(addr << 3)); + ahd_flush_device_writes(ahd); + ahd_outb(ahd, BRDCTL, 0); + ahd_flush_device_writes(ahd); + return (0); +} + +int +ahd_read_flexport(struct ahd_softc *ahd, u_int addr, uint8_t *value) +{ + int error; + + AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); + if (addr > 7) + panic("ahd_read_flexport: address out of range"); + ahd_outb(ahd, BRDCTL, BRDRW|BRDEN|(addr << 3)); + error = ahd_wait_flexport(ahd); + if (error != 0) + return (error); + *value = ahd_inb(ahd, BRDDAT); + ahd_outb(ahd, BRDCTL, 0); + ahd_flush_device_writes(ahd); + return (0); +} + +/* + * Wait at most 2 seconds for flexport arbitration to succeed. + */ +int +ahd_wait_flexport(struct ahd_softc *ahd) +{ + int cnt; + + AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); + cnt = 1000000 * 2 / 5; + while ((ahd_inb(ahd, BRDCTL) & FLXARBACK) == 0 && --cnt) + ahd_delay(5); + + if (cnt == 0) + return (ETIMEDOUT); + return (0); +} + +/************************* Target Mode ****************************************/ +#ifdef AHD_TARGET_MODE +cam_status +ahd_find_tmode_devs(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb, + struct ahd_tmode_tstate **tstate, + struct ahd_tmode_lstate **lstate, + int notfound_failure) +{ + + if ((ahd->features & AHD_TARGETMODE) == 0) + return (CAM_REQ_INVALID); + + /* + * Handle the 'black hole' device that sucks up + * requests to unattached luns on enabled targets. + */ + if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD + && ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) { + *tstate = NULL; + *lstate = ahd->black_hole; + } else { + u_int max_id; + + max_id = (ahd->features & AHD_WIDE) ? 15 : 7; + if (ccb->ccb_h.target_id > max_id) + return (CAM_TID_INVALID); + + if (ccb->ccb_h.target_lun >= AHD_NUM_LUNS) + return (CAM_LUN_INVALID); + + *tstate = ahd->enabled_targets[ccb->ccb_h.target_id]; + *lstate = NULL; + if (*tstate != NULL) + *lstate = + (*tstate)->enabled_luns[ccb->ccb_h.target_lun]; + } + + if (notfound_failure != 0 && *lstate == NULL) + return (CAM_PATH_INVALID); + + return (CAM_REQ_CMP); +} + +void +ahd_handle_en_lun(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb) +{ +#if NOT_YET + struct ahd_tmode_tstate *tstate; + struct ahd_tmode_lstate *lstate; + struct ccb_en_lun *cel; + cam_status status; + u_int target; + u_int lun; + u_int target_mask; + u_long s; + char channel; + + status = ahd_find_tmode_devs(ahd, sim, ccb, &tstate, &lstate, + /*notfound_failure*/FALSE); + + if (status != CAM_REQ_CMP) { + ccb->ccb_h.status = status; + return; + } + + if ((ahd->features & AHD_MULTIROLE) != 0) { + u_int our_id; + + our_id = ahd->our_id; + if (ccb->ccb_h.target_id != our_id) { + if ((ahd->features & AHD_MULTI_TID) != 0 + && (ahd->flags & AHD_INITIATORROLE) != 0) { + /* + * Only allow additional targets if + * the initiator role is disabled. + * The hardware cannot handle a re-select-in + * on the initiator id during a re-select-out + * on a different target id. + */ + status = CAM_TID_INVALID; + } else if ((ahd->flags & AHD_INITIATORROLE) != 0 + || ahd->enabled_luns > 0) { + /* + * Only allow our target id to change + * if the initiator role is not configured + * and there are no enabled luns which + * are attached to the currently registered + * scsi id. + */ + status = CAM_TID_INVALID; + } + } + } + + if (status != CAM_REQ_CMP) { + ccb->ccb_h.status = status; + return; + } + + /* + * We now have an id that is valid. + * If we aren't in target mode, switch modes. + */ + if ((ahd->flags & AHD_TARGETROLE) == 0 + && ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) { + u_long s; + + printf("Configuring Target Mode\n"); + ahd_lock(ahd, &s); + if (LIST_FIRST(&ahd->pending_scbs) != NULL) { + ccb->ccb_h.status = CAM_BUSY; + ahd_unlock(ahd, &s); + return; + } + ahd->flags |= AHD_TARGETROLE; + if ((ahd->features & AHD_MULTIROLE) == 0) + ahd->flags &= ~AHD_INITIATORROLE; + ahd_pause(ahd); + ahd_loadseq(ahd); + ahd_unlock(ahd, &s); + } + cel = &ccb->cel; + target = ccb->ccb_h.target_id; + lun = ccb->ccb_h.target_lun; + channel = SIM_CHANNEL(ahd, sim); + target_mask = 0x01 << target; + if (channel == 'B') + target_mask <<= 8; + + if (cel->enable != 0) { + u_int scsiseq1; + + /* Are we already enabled?? */ + if (lstate != NULL) { + xpt_print_path(ccb->ccb_h.path); + printf("Lun already enabled\n"); + ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; + return; + } + + if (cel->grp6_len != 0 + || cel->grp7_len != 0) { + /* + * Don't (yet?) support vendor + * specific commands. + */ + ccb->ccb_h.status = CAM_REQ_INVALID; + printf("Non-zero Group Codes\n"); + return; + } + + /* + * Seems to be okay. + * Setup our data structures. + */ + if (target != CAM_TARGET_WILDCARD && tstate == NULL) { + tstate = ahd_alloc_tstate(ahd, target, channel); + if (tstate == NULL) { + xpt_print_path(ccb->ccb_h.path); + printf("Couldn't allocate tstate\n"); + ccb->ccb_h.status = CAM_RESRC_UNAVAIL; + return; + } + } + lstate = malloc(sizeof(*lstate), M_DEVBUF, M_NOWAIT); + if (lstate == NULL) { + xpt_print_path(ccb->ccb_h.path); + printf("Couldn't allocate lstate\n"); + ccb->ccb_h.status = CAM_RESRC_UNAVAIL; + return; + } + memset(lstate, 0, sizeof(*lstate)); + status = xpt_create_path(&lstate->path, /*periph*/NULL, + xpt_path_path_id(ccb->ccb_h.path), + xpt_path_target_id(ccb->ccb_h.path), + xpt_path_lun_id(ccb->ccb_h.path)); + if (status != CAM_REQ_CMP) { + free(lstate, M_DEVBUF); + xpt_print_path(ccb->ccb_h.path); + printf("Couldn't allocate path\n"); + ccb->ccb_h.status = CAM_RESRC_UNAVAIL; + return; + } + SLIST_INIT(&lstate->accept_tios); + SLIST_INIT(&lstate->immed_notifies); + ahd_lock(ahd, &s); + ahd_pause(ahd); + if (target != CAM_TARGET_WILDCARD) { + tstate->enabled_luns[lun] = lstate; + ahd->enabled_luns++; + + if ((ahd->features & AHD_MULTI_TID) != 0) { + u_int targid_mask; + + targid_mask = ahd_inb(ahd, TARGID) + | (ahd_inb(ahd, TARGID + 1) << 8); + + targid_mask |= target_mask; + ahd_outb(ahd, TARGID, targid_mask); + ahd_outb(ahd, TARGID+1, (targid_mask >> 8)); + + ahd_update_scsiid(ahd, targid_mask); + } else { + u_int our_id; + char channel; + + channel = SIM_CHANNEL(ahd, sim); + our_id = SIM_SCSI_ID(ahd, sim); + + /* + * This can only happen if selections + * are not enabled + */ + if (target != our_id) { + u_int sblkctl; + char cur_channel; + int swap; + + sblkctl = ahd_inb(ahd, SBLKCTL); + cur_channel = (sblkctl & SELBUSB) + ? 'B' : 'A'; + if ((ahd->features & AHD_TWIN) == 0) + cur_channel = 'A'; + swap = cur_channel != channel; + ahd->our_id = target; + + if (swap) + ahd_outb(ahd, SBLKCTL, + sblkctl ^ SELBUSB); + + ahd_outb(ahd, SCSIID, target); + + if (swap) + ahd_outb(ahd, SBLKCTL, sblkctl); + } + } + } else + ahd->black_hole = lstate; + /* Allow select-in operations */ + if (ahd->black_hole != NULL && ahd->enabled_luns > 0) { + scsiseq1 = ahd_inb(ahd, SCSISEQ_TEMPLATE); + scsiseq1 |= ENSELI; + ahd_outb(ahd, SCSISEQ_TEMPLATE, scsiseq1); + scsiseq1 = ahd_inb(ahd, SCSISEQ1); + scsiseq1 |= ENSELI; + ahd_outb(ahd, SCSISEQ1, scsiseq1); + } + ahd_unpause(ahd); + ahd_unlock(ahd, &s); + ccb->ccb_h.status = CAM_REQ_CMP; + xpt_print_path(ccb->ccb_h.path); + printf("Lun now enabled for target mode\n"); + } else { + struct scb *scb; + int i, empty; + + if (lstate == NULL) { + ccb->ccb_h.status = CAM_LUN_INVALID; + return; + } + + ahd_lock(ahd, &s); + + ccb->ccb_h.status = CAM_REQ_CMP; + LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) { + struct ccb_hdr *ccbh; + + ccbh = &scb->io_ctx->ccb_h; + if (ccbh->func_code == XPT_CONT_TARGET_IO + && !xpt_path_comp(ccbh->path, ccb->ccb_h.path)){ + printf("CTIO pending\n"); + ccb->ccb_h.status = CAM_REQ_INVALID; + ahd_unlock(ahd, &s); + return; + } + } + + if (SLIST_FIRST(&lstate->accept_tios) != NULL) { + printf("ATIOs pending\n"); + ccb->ccb_h.status = CAM_REQ_INVALID; + } + + if (SLIST_FIRST(&lstate->immed_notifies) != NULL) { + printf("INOTs pending\n"); + ccb->ccb_h.status = CAM_REQ_INVALID; + } + + if (ccb->ccb_h.status != CAM_REQ_CMP) { + ahd_unlock(ahd, &s); + return; + } + + xpt_print_path(ccb->ccb_h.path); + printf("Target mode disabled\n"); + xpt_free_path(lstate->path); + free(lstate, M_DEVBUF); + + ahd_pause(ahd); + /* Can we clean up the target too? */ + if (target != CAM_TARGET_WILDCARD) { + tstate->enabled_luns[lun] = NULL; + ahd->enabled_luns--; + for (empty = 1, i = 0; i < 8; i++) + if (tstate->enabled_luns[i] != NULL) { + empty = 0; + break; + } + + if (empty) { + ahd_free_tstate(ahd, target, channel, + /*force*/FALSE); + if (ahd->features & AHD_MULTI_TID) { + u_int targid_mask; + + targid_mask = ahd_inb(ahd, TARGID) + | (ahd_inb(ahd, TARGID + 1) + << 8); + + targid_mask &= ~target_mask; + ahd_outb(ahd, TARGID, targid_mask); + ahd_outb(ahd, TARGID+1, + (targid_mask >> 8)); + ahd_update_scsiid(ahd, targid_mask); + } + } + } else { + + ahd->black_hole = NULL; + + /* + * We can't allow selections without + * our black hole device. + */ + empty = TRUE; + } + if (ahd->enabled_luns == 0) { + /* Disallow select-in */ + u_int scsiseq1; + + scsiseq1 = ahd_inb(ahd, SCSISEQ_TEMPLATE); + scsiseq1 &= ~ENSELI; + ahd_outb(ahd, SCSISEQ_TEMPLATE, scsiseq1); + scsiseq1 = ahd_inb(ahd, SCSISEQ1); + scsiseq1 &= ~ENSELI; + ahd_outb(ahd, SCSISEQ1, scsiseq1); + + if ((ahd->features & AHD_MULTIROLE) == 0) { + printf("Configuring Initiator Mode\n"); + ahd->flags &= ~AHD_TARGETROLE; + ahd->flags |= AHD_INITIATORROLE; + ahd_pause(ahd); + ahd_loadseq(ahd); + } + } + ahd_unpause(ahd); + ahd_unlock(ahd, &s); + } +#endif +} + +static void +ahd_update_scsiid(struct ahd_softc *ahd, u_int targid_mask) +{ +#if NOT_YET + u_int scsiid_mask; + u_int scsiid; + + if ((ahd->features & AHD_MULTI_TID) == 0) + panic("ahd_update_scsiid called on non-multitid unit\n"); + + /* + * Since we will rely on the the TARGID mask + * for selection enables, ensure that OID + * in SCSIID is not set to some other ID + * that we don't want to allow selections on. + */ + if ((ahd->features & AHD_ULTRA2) != 0) + scsiid = ahd_inb(ahd, SCSIID_ULTRA2); + else + scsiid = ahd_inb(ahd, SCSIID); + scsiid_mask = 0x1 << (scsiid & OID); + if ((targid_mask & scsiid_mask) == 0) { + u_int our_id; + + /* ffs counts from 1 */ + our_id = ffs(targid_mask); + if (our_id == 0) + our_id = ahd->our_id; + else + our_id--; + scsiid &= TID; + scsiid |= our_id; + } + if ((ahd->features & AHD_ULTRA2) != 0) + ahd_outb(ahd, SCSIID_ULTRA2, scsiid); + else + ahd_outb(ahd, SCSIID, scsiid); +#endif +} + +void +ahd_run_tqinfifo(struct ahd_softc *ahd, int paused) +{ + struct target_cmd *cmd; + + ahd_sync_tqinfifo(ahd, BUS_DMASYNC_POSTREAD); + while ((cmd = &ahd->targetcmds[ahd->tqinfifonext])->cmd_valid != 0) { + + /* + * Only advance through the queue if we + * have the resources to process the command. + */ + if (ahd_handle_target_cmd(ahd, cmd) != 0) + break; + + cmd->cmd_valid = 0; + ahd_dmamap_sync(ahd, ahd->shared_data_dmat, + ahd->shared_data_dmamap, + ahd_targetcmd_offset(ahd, ahd->tqinfifonext), + sizeof(struct target_cmd), + BUS_DMASYNC_PREREAD); + ahd->tqinfifonext++; + + /* + * Lazily update our position in the target mode incoming + * command queue as seen by the sequencer. + */ + if ((ahd->tqinfifonext & (HOST_TQINPOS - 1)) == 1) { + u_int hs_mailbox; + + hs_mailbox = ahd_inb(ahd, HS_MAILBOX); + hs_mailbox &= ~HOST_TQINPOS; + hs_mailbox |= ahd->tqinfifonext & HOST_TQINPOS; + ahd_outb(ahd, HS_MAILBOX, hs_mailbox); + } + } +} + +static int +ahd_handle_target_cmd(struct ahd_softc *ahd, struct target_cmd *cmd) +{ + struct ahd_tmode_tstate *tstate; + struct ahd_tmode_lstate *lstate; + struct ccb_accept_tio *atio; + uint8_t *byte; + int initiator; + int target; + int lun; + + initiator = SCSIID_TARGET(ahd, cmd->scsiid); + target = SCSIID_OUR_ID(cmd->scsiid); + lun = (cmd->identify & MSG_IDENTIFY_LUNMASK); + + byte = cmd->bytes; + tstate = ahd->enabled_targets[target]; + lstate = NULL; + if (tstate != NULL) + lstate = tstate->enabled_luns[lun]; + + /* + * Commands for disabled luns go to the black hole driver. + */ + if (lstate == NULL) + lstate = ahd->black_hole; + + atio = (struct ccb_accept_tio*)SLIST_FIRST(&lstate->accept_tios); + if (atio == NULL) { + ahd->flags |= AHD_TQINFIFO_BLOCKED; + /* + * Wait for more ATIOs from the peripheral driver for this lun. + */ + return (1); + } else + ahd->flags &= ~AHD_TQINFIFO_BLOCKED; +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_TQIN) != 0) + printf("Incoming command from %d for %d:%d%s\n", + initiator, target, lun, + lstate == ahd->black_hole ? "(Black Holed)" : ""); +#endif + SLIST_REMOVE_HEAD(&lstate->accept_tios, sim_links.sle); + + if (lstate == ahd->black_hole) { + /* Fill in the wildcards */ + atio->ccb_h.target_id = target; + atio->ccb_h.target_lun = lun; + } + + /* + * Package it up and send it off to + * whomever has this lun enabled. + */ + atio->sense_len = 0; + atio->init_id = initiator; + if (byte[0] != 0xFF) { + /* Tag was included */ + atio->tag_action = *byte++; + atio->tag_id = *byte++; + atio->ccb_h.flags = CAM_TAG_ACTION_VALID; + } else { + atio->ccb_h.flags = 0; + } + byte++; + + /* Okay. Now determine the cdb size based on the command code */ + switch (*byte >> CMD_GROUP_CODE_SHIFT) { + case 0: + atio->cdb_len = 6; + break; + case 1: + case 2: + atio->cdb_len = 10; + break; + case 4: + atio->cdb_len = 16; + break; + case 5: + atio->cdb_len = 12; + break; + case 3: + default: + /* Only copy the opcode. */ + atio->cdb_len = 1; + printf("Reserved or VU command code type encountered\n"); + break; + } + + memcpy(atio->cdb_io.cdb_bytes, byte, atio->cdb_len); + + atio->ccb_h.status |= CAM_CDB_RECVD; + + if ((cmd->identify & MSG_IDENTIFY_DISCFLAG) == 0) { + /* + * We weren't allowed to disconnect. + * We're hanging on the bus until a + * continue target I/O comes in response + * to this accept tio. + */ +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_TQIN) != 0) + printf("Received Immediate Command %d:%d:%d - %p\n", + initiator, target, lun, ahd->pending_device); +#endif + ahd->pending_device = lstate; + ahd_freeze_ccb((union ccb *)atio); + atio->ccb_h.flags |= CAM_DIS_DISCONNECT; + } + xpt_done((union ccb*)atio); + return (0); +} + +#endif diff --git a/sys/dev/aic7xxx/aic79xx.h b/sys/dev/aic7xxx/aic79xx.h new file mode 100644 index 0000000..06cb11b --- /dev/null +++ b/sys/dev/aic7xxx/aic79xx.h @@ -0,0 +1,1298 @@ +/* + * Core definitions and data structures shareable across OS platforms. + * + * Copyright (c) 1994-2001 Justin T. Gibbs. + * Copyright (c) 2000-2001 Adaptec Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + * $Id: //depot/aic7xxx/aic7xxx/aic79xx.h#46 $ + * + * $FreeBSD$ + */ + +#ifndef _AIC79XX_H_ +#define _AIC79XX_H_ + +/* Register Definitions */ +#include "aic79xx_reg.h" + +/************************* Forward Declarations *******************************/ +struct ahd_platform_data; +struct scb_platform_data; + +/****************************** Useful Macros *********************************/ +#ifndef MAX +#define MAX(a,b) (((a) > (b)) ? (a) : (b)) +#endif + +#ifndef MIN +#define MIN(a,b) (((a) < (b)) ? (a) : (b)) +#endif + +#ifndef TRUE +#define TRUE 1 +#endif +#ifndef FALSE +#define FALSE 0 +#endif + +#define NUM_ELEMENTS(array) (sizeof(array) / sizeof(*array)) + +#define ALL_CHANNELS '\0' +#define ALL_TARGETS_MASK 0xFFFF +#define INITIATOR_WILDCARD (~0) +#define SCB_LIST_NULL 0xFF00 +#define SCB_LIST_NULL_LE (ahd_htole16(SCB_LIST_NULL)) +#define SCBID_IS_NULL(scbid) (((scbid) & 0xFF00 ) == SCB_LIST_NULL) + +#define SCSIID_TARGET(ahd, scsiid) \ + (((scsiid) & TID) >> TID_SHIFT) +#define SCSIID_OUR_ID(scsiid) \ + ((scsiid) & OID) +#define SCSIID_CHANNEL(ahd, scsiid) ('A') +#define SCB_IS_SCSIBUS_B(ahd, scb) (0) +#define SCB_GET_OUR_ID(scb) \ + SCSIID_OUR_ID((scb)->hscb->scsiid) +#define SCB_GET_TARGET(ahd, scb) \ + SCSIID_TARGET((ahd), (scb)->hscb->scsiid) +#define SCB_GET_CHANNEL(ahd, scb) \ + SCSIID_CHANNEL(ahd, (scb)->hscb->scsiid) +#define SCB_GET_LUN(scb) \ + ((scb)->hscb->lun) +#define SCB_GET_TARGET_OFFSET(ahd, scb) \ + SCB_GET_TARGET(ahd, scb) +#define SCB_GET_TARGET_MASK(ahd, scb) \ + (0x01 << (SCB_GET_TARGET_OFFSET(ahd, scb))) +/* + * TCLs have the following format: TTTTLLLLLLLL + */ +#define TCL_TARGET_OFFSET(tcl) \ + ((((tcl) >> 4) & TID) >> 4) +#define TCL_LUN(tcl) \ + (tcl & (AHD_NUM_LUNS_NONPKT - 1)) +#define BUILD_TCL(scsiid, lun) \ + ((lun) | (((scsiid) & TID) << 4)) +#define BUILD_TCL_RAW(target, channel, lun) \ + ((lun) | ((target) << 8)) + +#define SCB_GET_TAG(scb) \ + ahd_le16toh(scb->hscb->tag) + +#ifndef AHD_TARGET_MODE +#undef AHD_TMODE_ENABLE +#define AHD_TMODE_ENABLE 0 +#endif + +/**************************** Driver Constants ********************************/ +/* + * The maximum number of supported targets. + */ +#define AHD_NUM_TARGETS 16 + +/* + * The maximum number of supported luns. + * The identify message only supports 64 luns in non-packetized transfers. + * You can have 2^64 luns when information unit transfers are enabled, + * but until we see a need to support that many, we support 256. + */ +#define AHD_NUM_LUNS_NONPKT 64 +#define AHD_NUM_LUNS 256 + +/* + * The maximum transfer per S/G segment. + */ +#define AHD_MAXTRANSFER_SIZE 0x00ffffff /* limited by 24bit counter */ + +/* + * The maximum amount of SCB storage in hardware on a controller. + * This value represents an upper bound. Due to software design, + * we may not be able to use this number. + */ +#define AHD_SCB_MAX 512 + +/* + * The maximum number of concurrent transactions supported per driver instance. + * Sequencer Control Blocks (SCBs) store per-transaction information. + * We are limited to 510 because: + * 1) SCB storage space holds us to at most 512. + * 2) Our input queue scheme requires one SCB to always be reserved + * in advance of queuing any SCBs. This takes us down to 511. + * 3) To handle our output queue correctly on machines that only + * support 32bit stores, we must clear the array 4 bytes at a + * time. To avoid colliding with a DMA write from the sequencer, + * we must be sure that 2, 16bit slots are empty when we write to + * clear the queue. This restricts us to only 511 SCBs: 1 that + * just completed and the known additional empty slot in the queue + * that precedes it. +#define AHD_MAX_QUEUE 510 + */ +#define AHD_MAX_QUEUE 255 + +/* + * Define the size of our QIN and QOUT FIFOs. They must be a power of 2 + * in size and accomodate as many transactions as can be queued concurrently. + */ +#define AHD_QIN_SIZE 512 +#define AHD_QOUT_SIZE 512 + +#define AHD_QIN_WRAP(x) ((x) & (AHD_QIN_SIZE-1)) +#define AHD_QOUT_WRAP(x) ((x) & (AHD_QOUT_SIZE-1)) + +/* + * The maximum amount of SCB storage we allocate in host memory. This + * number should reflect the 1 additional SCB we require to handle our + * qinfifo mechanism. + */ +#define AHD_SCB_MAX_ALLOC (AHD_MAX_QUEUE+1) + +/* + * Ring Buffer of incoming target commands. + * We allocate 256 to simplify the logic in the sequencer + * by using the natural wrap point of an 8bit counter. + */ +#define AHD_TMODE_CMDS 256 + +/* Reset line assertion time in us */ +#define AHD_BUSRESET_DELAY 250 + +/******************* Chip Characteristics/Operating Settings *****************/ +/* + * Chip Type + * The chip order is from least sophisticated to most sophisticated. + */ +typedef enum { + AHD_NONE = 0x0000, + AHD_CHIPID_MASK = 0x00FF, + AHD_AIC7901 = 0x0001, + AHD_AIC7902 = 0x0002, + AHD_PCI = 0x0100, /* Bus type PCI */ + AHD_PCIX = 0x0200, /* Bus type PCIX */ + AHD_BUS_MASK = 0x0F00 +} ahd_chip; + +/* + * Features available in each chip type. + */ +typedef enum { + AHD_FENONE = 0x00000, + AHD_WIDE = 0x00001, /* Wide Channel */ + AHD_MULTI_FUNC = 0x00100, /* Multi-Function Twin Channel Device */ + AHD_TARGETMODE = 0x01000, /* Has tested target mode support */ + AHD_MULTIROLE = 0x02000, /* Space for two roles at a time */ + AHD_REMOVABLE = 0x00000, /* Hot-Swap supported - None so far*/ + AHD_AIC7901_FE = AHD_FENONE, + AHD_AIC7902_FE = AHD_MULTI_FUNC +} ahd_feature; + +/* + * Bugs in the silicon that we work around in software. + */ +typedef enum { + AHD_BUGNONE = 0x0000, + AHD_SENT_SCB_UPDATE_BUG = 0x0001, + AHD_ABORT_LQI_BUG = 0x0002, + AHD_PKT_BITBUCKET_BUG = 0x0004, + AHD_LONG_SETIMO_BUG = 0x0008, + AHD_NLQICRC_DELAYED_BUG = 0x0010, + AHD_SCSIRST_BUG = 0x0020, + AHD_PCIX_ARBITER_BUG = 0x0040, + AHD_PCIX_SPLIT_BUG = 0x0080, + AHD_PCIX_CHIPRST_BUG = 0x0100, + AHD_PCIX_MMAPIO_BUG = 0x0200, + /* Bug workarounds that can be disabled on non-PCIX busses. */ + AHD_PCIX_BUG_MASK = AHD_PCIX_ARBITER_BUG + | AHD_PCIX_SPLIT_BUG + | AHD_PCIX_CHIPRST_BUG + | AHD_PCIX_MMAPIO_BUG, + AHD_LQO_ATNO_BUG = 0x0400, + AHD_AUTOFLUSH_BUG = 0x0800, + AHD_CLRLQO_AUTOCLR_BUG = 0x1000, + AHD_PKTIZED_STATUS_BUG = 0x2000 +} ahd_bug; + +/* + * Configuration specific settings. + * The driver determines these settings by probing the + * chip/controller's configuration. + */ +typedef enum { + AHD_FNONE = 0x00000, + AHD_PRIMARY_CHANNEL = 0x00003,/* + * The channel that should + * be probed first. + */ + AHD_USEDEFAULTS = 0x00004,/* + * For cards without an seeprom + * or a BIOS to initialize the chip's + * SRAM, we use the default target + * settings. + */ + AHD_SEQUENCER_DEBUG = 0x00008, + AHD_RESET_BUS_A = 0x00010, + AHD_EXTENDED_TRANS_A = 0x00020, + AHD_TERM_ENB_A = 0x00040, + AHD_SPCHK_ENB_A = 0x00080, + AHD_STPWLEVEL_A = 0x00100, + AHD_INITIATORROLE = 0x00200,/* + * Allow initiator operations on + * this controller. + */ + AHD_TARGETROLE = 0x00400,/* + * Allow target operations on this + * controller. + */ + AHD_RESOURCE_SHORTAGE = 0x00800, + AHD_TQINFIFO_BLOCKED = 0x01000,/* Blocked waiting for ATIOs */ + AHD_INT50_SPEEDFLEX = 0x02000,/* + * Internal 50pin connector + * sits behind an aic3860 + */ + AHD_BIOS_ENABLED = 0x04000, + AHD_ALL_INTERRUPTS = 0x08000, + AHD_39BIT_ADDRESSING = 0x10000,/* Use 39 bit addressing scheme. */ + AHD_64BIT_ADDRESSING = 0x20000,/* Use 64 bit addressing scheme. */ + AHD_CURRENT_SENSING = 0x40000, + AHD_SCB_CONFIG_USED = 0x80000,/* No SEEPROM but SCB had info. */ + AHD_CPQ_BOARD = 0x100000 +} ahd_flag; + +/************************* Hardware SCB Definition ***************************/ + +/* + * The driver keeps up to MAX_SCB scb structures per card in memory. The SCB + * consists of a "hardware SCB" mirroring the fields availible on the card + * and additional information the kernel stores for each transaction. + * + * To minimize space utilization, a portion of the hardware scb stores + * different data during different portions of a SCSI transaction. + * As initialized by the host driver for the initiator role, this area + * contains the SCSI cdb (or a pointer to the cdb) to be executed. After + * the cdb has been presented to the target, this area serves to store + * residual transfer information and the SCSI status byte. + * For the target role, the contents of this area do not change, but + * still serve a different purpose than for the initiator role. See + * struct target_data for details. + */ + +/* + * Status information embedded in the shared poriton of + * an SCB after passing the cdb to the target. The kernel + * driver will only read this data for transactions that + * complete abnormally. + */ +struct initiator_status { + uint32_t residual_datacnt; /* Residual in the current S/G seg */ + uint32_t residual_sgptr; /* The next S/G for this transfer */ + uint8_t scsi_status; /* Standard SCSI status byte */ +}; + +struct target_status { + uint32_t residual_datacnt; /* Residual in the current S/G seg */ + uint32_t residual_sgptr; /* The next S/G for this transfer */ + uint8_t scsi_status; /* SCSI status to give to initiator */ + uint8_t target_phases; /* Bitmap of phases to execute */ + uint8_t data_phase; /* Data-In or Data-Out */ + uint8_t initiator_tag; /* Initiator's transaction tag */ +}; + +/* + * Initiator mode SCB shared data area. + * If the embedded CDB is 12 bytes or less, we embed + * the sense buffer address in the SCB. This allows + * us to retrieve sense information without interupting + * the host in packetized mode. + */ +typedef uint32_t sense_addr_t; +#define MAX_CDB_LEN 16 +#define MAX_CDB_LEN_WITH_SENSE_ADDR (MAX_CDB_LEN - sizeof(sense_addr_t)) +union initiator_data { + uint64_t cdbptr; + uint8_t cdb[MAX_CDB_LEN]; + struct { + uint8_t cdb[MAX_CDB_LEN_WITH_SENSE_ADDR]; + sense_addr_t sense_addr; + } cdb_plus_saddr; +}; + +/* + * Target mode version of the shared data SCB segment. + */ +struct target_data { + uint32_t spare[2]; + uint8_t scsi_status; /* SCSI status to give to initiator */ + uint8_t target_phases; /* Bitmap of phases to execute */ + uint8_t data_phase; /* Data-In or Data-Out */ + uint8_t initiator_tag; /* Initiator's transaction tag */ +}; + +struct hardware_scb { +/*0*/ union { + union initiator_data idata; + struct target_data tdata; + struct initiator_status istatus; + struct target_status tstatus; + } shared_data; +/* + * A word about residuals. + * The scb is presented to the sequencer with the dataptr and datacnt + * fields initialized to the contents of the first S/G element to + * transfer. The sgptr field is initialized to the bus address for + * the S/G element that follows the first in the in core S/G array + * or'ed with the SG_FULL_RESID flag. Sgptr may point to an invalid + * S/G entry for this transfer (single S/G element transfer with the + * first elements address and length preloaded in the dataptr/datacnt + * fields). If no transfer is to occur, sgptr is set to SG_LIST_NULL. + * The SG_FULL_RESID flag ensures that the residual will be correctly + * noted even if no data transfers occur. Once the data phase is entered, + * the residual sgptr and datacnt are loaded from the sgptr and the + * datacnt fields. After each S/G element's dataptr and length are + * loaded into the hardware, the residual sgptr is advanced. After + * each S/G element is expired, its datacnt field is checked to see + * if the LAST_SEG flag is set. If so, SG_LIST_NULL is set in the + * residual sg ptr and the transfer is considered complete. If the + * sequencer determines that there is a residual in the tranfer, or + * there is non-zero status, it will set the SG_STATUS_VALID flag in + * sgptr and dma the scb back into host memory. To sumarize: + * + * Sequencer: + * o A residual has occurred if SG_FULL_RESID is set in sgptr, + * or residual_sgptr does not have SG_LIST_NULL set. + * + * o We are transfering the last segment if residual_datacnt has + * the SG_LAST_SEG flag set. + * + * Host: + * o A residual can only have occurred if a completed scb has the + * SG_STATUS_VALID flag set. Inspection of the SCSI status field, + * the residual_datacnt, and the residual_sgptr field will tell + * for sure. + * + * o residual_sgptr and sgptr refer to the "next" sg entry + * and so may point beyond the last valid sg entry for the + * transfer. + */ +#define SG_PTR_MASK 0xFFFFFFF8 +/*16*/ uint8_t cdb_len; +/*17*/ uint8_t task_management; +/*18*/ uint16_t tag; +/*20*/ uint32_t next_hscb_busaddr; +/*24*/ uint64_t dataptr; +/*32*/ uint32_t datacnt; /* Byte 3 is spare. */ +/*36*/ uint32_t sgptr; +/*40*/ uint8_t control; /* See SCB_CONTROL in aic79xx.reg for details */ +/*41*/ uint8_t scsiid; /* + * Selection out Id + * Our Id (bits 0-3) Their ID (bits 4-7) + */ +/*42*/ uint8_t lun; +/*43*/ uint8_t task_attribute_nonpkt_tag; +/*44*/ uint32_t hscb_busaddr; +/******* Fields below are not Downloaded (Sequencer may use for scratch) ******/ +/*48*/ uint8_t spare[16]; +}; + +/************************ Kernel SCB Definitions ******************************/ +/* + * Some fields of the SCB are OS dependent. Here we collect the + * definitions for elements that all OS platforms need to include + * in there SCB definition. + */ + +/* + * Definition of a scatter/gather element as transfered to the controller. + * The aic7xxx chips only support a 24bit length. We use the top byte of + * the length to store additional address bits and a flag to indicate + * that a given segment terminates the transfer. This gives us an + * addressable range of 512GB on machines with 64bit PCI or with chips + * that can support dual address cycles on 32bit PCI busses. + */ +struct ahd_dma_seg { + uint32_t addr; + uint32_t len; +#define AHD_DMA_LAST_SEG 0x80000000 +#define AHD_SG_HIGH_ADDR_MASK 0x7F000000 +#define AHD_SG_LEN_MASK 0x00FFFFFF +}; + +struct ahd_dma64_seg { + uint64_t addr; + uint32_t len; + uint32_t pad; +}; + +struct map_node { + bus_dmamap_t dmamap; + bus_addr_t physaddr; + uint8_t *vaddr; + SLIST_ENTRY(map_node) links; +}; + +/* + * The current state of this SCB. + */ +typedef enum { + SCB_FREE = 0x0000, + SCB_TRANSMISSION_ERROR = 0x0001,/* + * We detected a parity or CRC + * error that has effected the + * payload of the command. This + * flag is checked when normal + * status is returned to catch + * the case of a target not + * responding to our attempt + * to report the error. + */ + SCB_OTHERTCL_TIMEOUT = 0x0002,/* + * Another device was active + * during the first timeout for + * this SCB so we gave ourselves + * an additional timeout period + * in case it was hogging the + * bus. + */ + SCB_DEVICE_RESET = 0x0004, + SCB_SENSE = 0x0008, + SCB_CDB32_PTR = 0x0010, + SCB_RECOVERY_SCB = 0x0020, + SCB_AUTO_NEGOTIATE = 0x0040,/* Negotiate to achieve goal. */ + SCB_NEGOTIATE = 0x0080,/* Negotiation forced for command. */ + SCB_ABORT = 0x0100, + SCB_UNTAGGEDQ = 0x0200, + SCB_ACTIVE = 0x0400, + SCB_TARGET_IMMEDIATE = 0x0800, + SCB_PACKETIZED = 0x1000, + SCB_EXPECT_PPR_BUSFREE = 0x2000, + SCB_PKT_SENSE = 0x4000, + SCB_CMDPHASE_ABORT = 0x8000 +} scb_flag; + +struct scb { + struct hardware_scb *hscb; + union { + SLIST_ENTRY(scb) sle; + TAILQ_ENTRY(scb) tqe; + } links; + LIST_ENTRY(scb) pending_links; + ahd_io_ctx_t io_ctx; + struct ahd_softc *ahd_softc; + scb_flag flags; +#ifndef __linux__ + bus_dmamap_t dmamap; +#endif + struct scb_platform_data *platform_data; + struct map_node *hscb_map; + struct map_node *sg_map; + struct map_node *sense_map; + void *sg_list; + uint8_t *sense_data; + bus_addr_t sg_list_busaddr; + bus_addr_t sense_busaddr; + u_int sg_count;/* How full ahd_dma_seg is */ +}; + +struct scb_data { + SLIST_HEAD(, scb) free_scbs; /* + * Pool of SCBs ready to be assigned + * commands to execute. + */ + struct scb *scbindex[AHD_SCB_MAX]; + /* + * Mapping from tag to SCB. + */ + /* + * "Bus" addresses of our data structures. + */ + bus_dma_tag_t hscb_dmat; /* dmat for our hardware SCB array */ + bus_dma_tag_t sg_dmat; /* dmat for our sg segments */ + bus_dma_tag_t sense_dmat; /* dmat for our sense buffers */ + SLIST_HEAD(, map_node) hscb_maps; + SLIST_HEAD(, map_node) sg_maps; + SLIST_HEAD(, map_node) sense_maps; + int scbs_left; /* unallocated scbs in head map_node */ + int sgs_left; /* unallocated sgs in head map_node */ + int sense_left; /* unallocated sense in head map_node */ + uint16_t numscbs; + uint16_t maxhscbs; /* Number of SCBs on the card */ + uint8_t init_level; /* + * How far we've initialized + * this structure. + */ +}; + +/************************ Target Mode Definitions *****************************/ + +/* + * Connection desciptor for select-in requests in target mode. + */ +struct target_cmd { + uint8_t scsiid; /* Our ID and the initiator's ID */ + uint8_t identify; /* Identify message */ + uint8_t bytes[22]; /* + * Bytes contains any additional message + * bytes terminated by 0xFF. The remainder + * is the cdb to execute. + */ + uint8_t cmd_valid; /* + * When a command is complete, the firmware + * will set cmd_valid to all bits set. + * After the host has seen the command, + * the bits are cleared. This allows us + * to just peek at host memory to determine + * if more work is complete. cmd_valid is on + * an 8 byte boundary to simplify setting + * it on aic7880 hardware which only has + * limited direct access to the DMA FIFO. + */ + uint8_t pad[7]; +}; + +/* + * Number of events we can buffer up if we run out + * of immediate notify ccbs. + */ +#define AHD_TMODE_EVENT_BUFFER_SIZE 8 +struct ahd_tmode_event { + uint8_t initiator_id; + uint8_t event_type; /* MSG type or EVENT_TYPE_BUS_RESET */ +#define EVENT_TYPE_BUS_RESET 0xFF + uint8_t event_arg; +}; + +/* + * Per enabled lun target mode state. + * As this state is directly influenced by the host OS'es target mode + * environment, we let the OS module define it. Forward declare the + * structure here so we can store arrays of them, etc. in OS neutral + * data structures. + */ +#ifdef AHD_TARGET_MODE +struct ahd_tmode_lstate { + struct cam_path *path; + struct ccb_hdr_slist accept_tios; + struct ccb_hdr_slist immed_notifies; + struct ahd_tmode_event event_buffer[AHD_TMODE_EVENT_BUFFER_SIZE]; + uint8_t event_r_idx; + uint8_t event_w_idx; +}; +#else +struct ahd_tmode_lstate; +#endif + +/******************** Transfer Negotiation Datastructures *********************/ +#define AHD_TRANS_CUR 0x01 /* Modify current neogtiation status */ +#define AHD_TRANS_ACTIVE 0x03 /* Assume this target is on the bus */ +#define AHD_TRANS_GOAL 0x04 /* Modify negotiation goal */ +#define AHD_TRANS_USER 0x08 /* Modify user negotiation settings */ +#define AHD_PERIOD_ASYNC 0xFF +#define AHD_PERIOD_10MHz 0x19 + +/* + * Transfer Negotiation Information. + */ +struct ahd_transinfo { + uint8_t protocol_version; /* SCSI Revision level */ + uint8_t transport_version; /* SPI Revision level */ + uint8_t width; /* Bus width */ + uint8_t period; /* Sync rate factor */ + uint8_t offset; /* Sync offset */ + uint8_t ppr_options; /* Parallel Protocol Request options */ +}; + +/* + * Per-initiator current, goal and user transfer negotiation information. */ +struct ahd_initiator_tinfo { + struct ahd_transinfo curr; + struct ahd_transinfo goal; + struct ahd_transinfo user; +}; + +/* + * Per enabled target ID state. + * Pointers to lun target state as well as sync/wide negotiation information + * for each initiator<->target mapping. For the initiator role we pretend + * that we are the target and the targets are the initiators since the + * negotiation is the same regardless of role. + */ +struct ahd_tmode_tstate { + struct ahd_tmode_lstate* enabled_luns[AHD_NUM_LUNS]; + struct ahd_initiator_tinfo transinfo[AHD_NUM_TARGETS]; + + /* + * Per initiator state bitmasks. + */ + uint16_t auto_negotiate;/* Auto Negotiation Required */ + uint16_t discenable; /* Disconnection allowed */ + uint16_t tagenable; /* Tagged Queuing allowed */ +}; + +/* + * Points of interest along the negotiated transfer scale. + */ +#define AHD_SYNCRATE_MAX 0x8 +#define AHD_SYNCRATE_160 0x8 +#define AHD_SYNCRATE_PACED 0x8 +#define AHD_SYNCRATE_DT 0x9 +#define AHD_SYNCRATE_ULTRA2 0xa +#define AHD_SYNCRATE_ULTRA 0xc +#define AHD_SYNCRATE_FAST 0x19 +#define AHD_SYNCRATE_MIN_DT AHD_SYNCRATE_FAST +#define AHD_SYNCRATE_SYNC 0x32 +#define AHD_SYNCRATE_MIN 0x60 +#define AHD_SYNCRATE_ASYNC 0xFF + +/* + * In RevA, the synctable uses a 120MHz rate for the period + * factor 8 and 160MHz for the period factor 7. The 120MHz + * rate never made it into the official SCSI spec, so we must + * compensate when setting the negotiation table for Rev A + * parts. + */ +#define AHD_SYNCRATE_REVA_120 0x8 +#define AHD_SYNCRATE_REVA_160 0x7 + +/***************************** Lookup Tables **********************************/ +/* + * Phase -> name and message out response + * to parity errors in each phase table. + */ +struct ahd_phase_table_entry { + uint8_t phase; + uint8_t mesg_out; /* Message response to parity errors */ + char *phasemsg; +}; + +/************************** Serial EEPROM Format ******************************/ + +struct seeprom_config { +/* + * Per SCSI ID Configuration Flags + */ + uint16_t device_flags[16]; /* words 0-15 */ +#define CFXFER 0x003F /* synchronous transfer rate */ +#define CFXFER_ASYNC 0x3F +#define CFQAS 0x0040 /* Negotiate QAS */ +#define CFPACKETIZED 0x0080 /* Negotiate Packetized Transfers */ +#define CFSTART 0x0100 /* send start unit SCSI command */ +#define CFINCBIOS 0x0200 /* include in BIOS scan */ +#define CFDISC 0x0400 /* enable disconnection */ +#define CFMULTILUNDEV 0x0800 /* Probe multiple luns in BIOS scan */ +#define CFWIDEB 0x1000 /* wide bus device */ +#define CFHOSTMANAGED 0x8000 /* Managed by a RAID controller */ + +/* + * BIOS Control Bits + */ + uint16_t bios_control; /* word 16 */ +#define CFSUPREM 0x0001 /* support all removeable drives */ +#define CFSUPREMB 0x0002 /* support removeable boot drives */ +#define CFBIOSSTATE 0x000C /* BIOS Action State */ +#define CFBS_DISABLED 0x00 +#define CFBS_ENABLED 0x04 +#define CFBS_DISABLED_SCAN 0x08 +#define CFENABLEDV 0x0010 /* Perform Domain Validation */ +#define CFCTRL_A 0x0020 /* BIOS displays Ctrl-A message */ +#define CFSPARITY 0x0040 /* SCSI parity */ +#define CFEXTEND 0x0080 /* extended translation enabled */ +#define CFBOOTCD 0x0100 /* Support Bootable CD-ROM */ +#define CFMSG_LEVEL 0x0600 /* BIOS Message Level */ +#define CFMSG_VERBOSE 0x0000 +#define CFMSG_SILENT 0x0200 +#define CFMSG_DIAG 0x0400 +#define CFRESETB 0x0800 /* reset SCSI bus at boot */ +/* UNUSED 0xf000 */ + +/* + * Host Adapter Control Bits + */ + uint16_t adapter_control; /* word 17 */ +#define CFAUTOTERM 0x0001 /* Perform Auto termination */ +#define CFSTERM 0x0002 /* SCSI low byte termination */ +#define CFWSTERM 0x0004 /* SCSI high byte termination */ +#define CFSEAUTOTERM 0x0008 /* Ultra2 Perform secondary Auto Term*/ +#define CFSELOWTERM 0x0010 /* Ultra2 secondary low term */ +#define CFSEHIGHTERM 0x0020 /* Ultra2 secondary high term */ +#define CFSTPWLEVEL 0x0040 /* Termination level control */ +#define CFBIOSAUTOTERM 0x0080 /* Perform Auto termination */ +#define CFTERM_MENU 0x0100 /* BIOS displays termination menu */ +#define CFCLUSTERENB 0x8000 /* Cluster Enable */ + +/* + * Bus Release Time, Host Adapter ID + */ + uint16_t brtime_id; /* word 18 */ +#define CFSCSIID 0x000f /* host adapter SCSI ID */ +/* UNUSED 0x00f0 */ +#define CFBRTIME 0xff00 /* bus release time/PCI Latency Time */ + +/* + * Maximum targets + */ + uint16_t max_targets; /* word 19 */ +#define CFMAXTARG 0x00ff /* maximum targets */ +#define CFBOOTLUN 0x0f00 /* Lun to boot from */ +#define CFBOOTID 0xf000 /* Target to boot from */ + uint16_t res_1[10]; /* words 20-29 */ + uint16_t signature; /* BIOS Signature */ +#define CFSIGNATURE 0x400 + uint16_t checksum; /* word 31 */ +}; + +/****************************** Flexport Logic ********************************/ +#define FLXADDR_TERMCTL 0x0 +#define FLX_TERMCTL_ENSECHIGH 0x8 +#define FLX_TERMCTL_ENSECLOW 0x4 +#define FLX_TERMCTL_ENPRIHIGH 0x2 +#define FLX_TERMCTL_ENPRILOW 0x1 +#define FLXADDR_ROMSTAT_CURSENSECTL 0x1 +#define FLX_ROMSTAT_SEECFG 0xF0 +#define FLX_ROMSTAT_EECFG 0x0F +#define FLX_ROMSTAT_SEE_93C66 0x00 +#define FLX_ROMSTAT_SEE_NONE 0xF0 +#define FLX_ROMSTAT_EE_512x8 0x0 +#define FLX_ROMSTAT_EE_1MBx8 0x1 +#define FLX_ROMSTAT_EE_2MBx8 0x2 +#define FLX_ROMSTAT_EE_4MBx8 0x3 +#define FLX_ROMSTAT_EE_16MBx8 0x4 +#define CURSENSE_ENB 0x1 +#define FLXADDR_FLEXSTAT 0x2 +#define FLX_FSTAT_BUSY 0x1 +#define FLXADDR_CURRENT_STAT 0x4 +#define FLX_CSTAT_SEC_HIGH 0xC0 +#define FLX_CSTAT_SEC_LOW 0x30 +#define FLX_CSTAT_PRI_HIGH 0x0C +#define FLX_CSTAT_PRI_LOW 0x03 +#define FLX_CSTAT_MASK 0x03 +#define FLX_CSTAT_SHIFT 2 +#define FLX_CSTAT_OKAY 0x0 +#define FLX_CSTAT_OVER 0x1 +#define FLX_CSTAT_UNDER 0x2 +#define FLX_CSTAT_INVALID 0x3 + +int ahd_read_seeprom(struct ahd_softc *ahd, uint16_t *buf, + u_int start_addr, u_int count); + +int ahd_write_seeprom(struct ahd_softc *ahd, uint16_t *buf, + u_int start_addr, u_int count); +int ahd_wait_seeprom(struct ahd_softc *ahd); +int ahd_verify_cksum(struct seeprom_config *sc); +int ahd_acquire_seeprom(struct ahd_softc *ahd); +void ahd_release_seeprom(struct ahd_softc *ahd); + +/**************************** Message Buffer *********************************/ +typedef enum { + MSG_FLAG_NONE = 0x00, + MSG_FLAG_EXPECT_PPR_BUSFREE = 0x01, + MSG_FLAG_IU_REQ_CHANGED = 0x02, + MSG_FLAG_EXPECT_IDE_BUSFREE = 0x04, + MSG_FLAG_PACKETIZED = 0x08 +} ahd_msg_flags; + +typedef enum { + MSG_TYPE_NONE = 0x00, + MSG_TYPE_INITIATOR_MSGOUT = 0x01, + MSG_TYPE_INITIATOR_MSGIN = 0x02, + MSG_TYPE_TARGET_MSGOUT = 0x03, + MSG_TYPE_TARGET_MSGIN = 0x04 +} ahd_msg_type; + +typedef enum { + MSGLOOP_IN_PROG, + MSGLOOP_MSGCOMPLETE, + MSGLOOP_TERMINATED +} msg_loop_stat; + +/*********************** Software Configuration Structure *********************/ +TAILQ_HEAD(scb_tailq, scb); + +struct ahd_suspend_channel_state { + uint8_t scsiseq; + uint8_t sxfrctl0; + uint8_t sxfrctl1; + uint8_t simode0; + uint8_t simode1; + uint8_t seltimer; + uint8_t seqctl; +}; + +struct ahd_suspend_state { + struct ahd_suspend_channel_state channel[2]; + uint8_t optionmode; + uint8_t dscommand0; + uint8_t dspcistatus; + /* hsmailbox */ + uint8_t crccontrol1; + uint8_t scbbaddr; + /* Host and sequencer SCB counts */ + uint8_t dff_thrsh; + uint8_t *scratch_ram; + uint8_t *btt; +}; + +typedef void (*ahd_bus_intr_t)(struct ahd_softc *); + +typedef enum { + AHD_MODE_DFF0, + AHD_MODE_DFF1, + AHD_MODE_CCHAN, + AHD_MODE_SCSI, + AHD_MODE_CFG, + AHD_MODE_UNKNOWN +} ahd_mode; + +#define AHD_MK_MSK(x) (0x01 << (x)) +#define AHD_MODE_DFF0_MSK AHD_MK_MSK(AHD_MODE_DFF0) +#define AHD_MODE_DFF1_MSK AHD_MK_MSK(AHD_MODE_DFF1) +#define AHD_MODE_CCHAN_MSK AHD_MK_MSK(AHD_MODE_CCHAN) +#define AHD_MODE_SCSI_MSK AHD_MK_MSK(AHD_MODE_SCSI) +#define AHD_MODE_CFG_MSK AHD_MK_MSK(AHD_MODE_CFG) +#define AHD_MODE_UNKNOWN_MSK AHD_MK_MSK(AHD_MODE_UNKNOWN) +#define AHD_MODE_ANY_MSK (~0) + +typedef uint8_t ahd_mode_state; + +typedef void ahd_callback_t (void *); + +struct ahd_softc { + bus_space_tag_t tags[2]; + bus_space_handle_t bshs[2]; +#ifndef __linux__ + bus_dma_tag_t buffer_dmat; /* dmat for buffer I/O */ +#endif + struct scb_data scb_data; + + struct scb *next_queued_scb; + + /* + * SCBs that have been sent to the controller + */ + LIST_HEAD(, scb) pending_scbs; + + /* + * Current register window mode information. + */ + ahd_mode dst_mode; + ahd_mode src_mode; + + /* + * Saved register window mode information + * used for restore on next unpause. + */ + ahd_mode saved_dst_mode; + ahd_mode saved_src_mode; + + /* + * Counting lock for deferring the release of additional + * untagged transactions from the untagged_queues. When + * the lock is decremented to 0, all queues in the + * untagged_queues array are run. + */ + u_int untagged_queue_lock; + + /* + * Per-target queue of untagged-transactions. The + * transaction at the head of the queue is the + * currently pending untagged transaction for the + * target. The driver only allows a single untagged + * transaction per target. + */ + struct scb_tailq untagged_queues[AHD_NUM_TARGETS]; + + /* + * Platform specific data. + */ + struct ahd_platform_data *platform_data; + + /* + * Platform specific device information. + */ + ahd_dev_softc_t dev_softc; + + /* + * Bus specific device information. + */ + ahd_bus_intr_t bus_intr; + + /* + * Target mode related state kept on a per enabled lun basis. + * Targets that are not enabled will have null entries. + * As an initiator, we keep one target entry for our initiator + * ID to store our sync/wide transfer settings. + */ + struct ahd_tmode_tstate *enabled_targets[AHD_NUM_TARGETS]; + + /* + * The black hole device responsible for handling requests for + * disabled luns on enabled targets. + */ + struct ahd_tmode_lstate *black_hole; + + /* + * Device instance currently on the bus awaiting a continue TIO + * for a command that was not given the disconnect priveledge. + */ + struct ahd_tmode_lstate *pending_device; + + /* + * Timer handles for timer driven callbacks. + */ + ahd_timer_t reset_timer; + + /* + * Card characteristics + */ + ahd_chip chip; + ahd_feature features; + ahd_bug bugs; + ahd_flag flags; + struct seeprom_config *seep_config; + + /* Values to store in the SEQCTL register for pause and unpause */ + uint8_t unpause; + uint8_t pause; + + /* Command Queues */ + uint16_t qoutfifonext; + uint16_t qinfifonext; + uint16_t qinfifo[AHD_SCB_MAX]; + uint16_t *qoutfifo; + + /* Critical Section Data */ + struct cs *critical_sections; + u_int num_critical_sections; + + /* Buffer for handling packetized bitbucket. */ + uint8_t *overrun_buf; + + /* Links for chaining softcs */ + TAILQ_ENTRY(ahd_softc) links; + + /* Channel Names ('A', 'B', etc.) */ + char channel; + + /* Initiator Bus ID */ + uint8_t our_id; + + /* + * PCI error detection. + */ + int unsolicited_ints; + + /* + * Target incoming command FIFO. + */ + struct target_cmd *targetcmds; + uint8_t tqinfifonext; + + /* + * Incoming and outgoing message handling. + */ + uint8_t send_msg_perror; + ahd_msg_flags msg_flags; + ahd_msg_type msg_type; + uint8_t msgout_buf[12];/* Message we are sending */ + uint8_t msgin_buf[12];/* Message we are receiving */ + u_int msgout_len; /* Length of message to send */ + u_int msgout_index; /* Current index in msgout */ + u_int msgin_index; /* Current index in msgin */ + + /* + * Mapping information for data structures shared + * between the sequencer and kernel. + */ + bus_dma_tag_t parent_dmat; + bus_dma_tag_t shared_data_dmat; + bus_dmamap_t shared_data_dmamap; + bus_addr_t shared_data_busaddr; + + /* Information saved through suspend/resume cycles */ + struct ahd_suspend_state suspend_state; + + /* Number of enabled target mode device on this card */ + u_int enabled_luns; + + /* Initialization level of this data structure */ + u_int init_level; + + /* PCI cacheline size. */ + u_int pci_cachesize; + + /* Per-Unit descriptive information */ + const char *description; + const char *bus_description; + char *name; + int unit; + + /* Selection Timer settings */ + int seltime; + + uint16_t user_discenable;/* Disconnection allowed */ + uint16_t user_tagenable;/* Tagged Queuing allowed */ +}; + +TAILQ_HEAD(ahd_softc_tailq, ahd_softc); +extern struct ahd_softc_tailq ahd_tailq; + +/************************ Active Device Information ***************************/ +typedef enum { + ROLE_UNKNOWN, + ROLE_INITIATOR, + ROLE_TARGET +} role_t; + +struct ahd_devinfo { + int our_scsiid; + int target_offset; + uint16_t target_mask; + u_int target; + u_int lun; + char channel; + role_t role; /* + * Only guaranteed to be correct if not + * in the busfree state. + */ +}; + +/****************************** PCI Structures ********************************/ +#define AHD_PCI_IOADDR0 PCIR_MAPS /* I/O BAR*/ +#define AHD_PCI_MEMADDR (PCIR_MAPS + 4) /* Memory BAR */ +#define AHD_PCI_IOADDR1 (PCIR_MAPS + 12)/* Second I/O BAR */ + +typedef int (ahd_device_setup_t)(struct ahd_softc *); + +struct ahd_pci_identity { + uint64_t full_id; + uint64_t id_mask; + char *name; + ahd_device_setup_t *setup; +}; +extern struct ahd_pci_identity ahd_pci_ident_table []; +extern const u_int ahd_num_pci_devs; + +/***************************** VL/EISA Declarations ***************************/ +struct aic7770_identity { + uint32_t full_id; + uint32_t id_mask; + char *name; + ahd_device_setup_t *setup; +}; +extern struct aic7770_identity aic7770_ident_table []; +extern const int ahd_num_aic7770_devs; + +#define AHD_EISA_SLOT_OFFSET 0xc00 +#define AHD_EISA_IOSIZE 0x100 + +/*************************** Function Declarations ****************************/ +/******************************************************************************/ +u_int ahd_find_busy_tcl(struct ahd_softc *ahd, u_int tcl); +void ahd_set_disconnected_list(struct ahd_softc *ahd, + u_int target, u_int lun, + u_int scbid); +void ahd_busy_tcl(struct ahd_softc *ahd, + u_int tcl, u_int busyid); +static __inline void ahd_unbusy_tcl(struct ahd_softc *ahd, u_int tcl); +static __inline void +ahd_unbusy_tcl(struct ahd_softc *ahd, u_int tcl) +{ + ahd_busy_tcl(ahd, tcl, SCB_LIST_NULL); +} + +/***************************** PCI Front End *********************************/ +struct ahd_pci_identity *ahd_find_pci_device(ahd_dev_softc_t); +int ahd_pci_config(struct ahd_softc *, + struct ahd_pci_identity *); + +/*************************** EISA/VL Front End ********************************/ +struct aic7770_identity *aic7770_find_device(uint32_t); +int aic7770_config(struct ahd_softc *ahd, + struct aic7770_identity *); + +/************************** SCB and SCB queue management **********************/ +int ahd_probe_scbs(struct ahd_softc *); +void ahd_run_untagged_queues(struct ahd_softc *ahd); +void ahd_run_untagged_queue(struct ahd_softc *ahd, + struct scb_tailq *queue); +void ahd_qinfifo_requeue_tail(struct ahd_softc *ahd, + struct scb *scb); +int ahd_match_scb(struct ahd_softc *ahd, struct scb *scb, + int target, char channel, int lun, + u_int tag, role_t role); + +/****************************** Initialization ********************************/ +struct ahd_softc *ahd_alloc(void *platform_arg, char *name); +int ahd_softc_init(struct ahd_softc *); +void ahd_controller_info(struct ahd_softc *ahd, char *buf); +int ahd_init(struct ahd_softc *ahd); +int ahd_default_config(struct ahd_softc *ahd); +int ahd_parse_cfgdata(struct ahd_softc *ahd, + struct seeprom_config *sc); +void ahd_intr_enable(struct ahd_softc *ahd, int enable); +void ahd_pause_and_flushwork(struct ahd_softc *ahd); +int ahd_suspend(struct ahd_softc *ahd); +int ahd_resume(struct ahd_softc *ahd); +void ahd_softc_insert(struct ahd_softc *); +struct ahd_softc *ahd_find_softc(struct ahd_softc *ahd); +void ahd_set_unit(struct ahd_softc *, int); +void ahd_set_name(struct ahd_softc *, char *); +void ahd_alloc_scbs(struct ahd_softc *ahd); +void ahd_free(struct ahd_softc *ahd); +int ahd_reset(struct ahd_softc *ahd); +void ahd_shutdown(void *arg); +int ahd_write_flexport(struct ahd_softc *ahd, + u_int addr, u_int value); +int ahd_read_flexport(struct ahd_softc *ahd, u_int addr, + uint8_t *value); +int ahd_wait_flexport(struct ahd_softc *ahd); + +/*************************** Interrupt Services *******************************/ +void ahd_pci_intr(struct ahd_softc *ahd); +void ahd_clear_intstat(struct ahd_softc *ahd); +void ahd_run_qoutfifo(struct ahd_softc *ahd); +#ifdef AHD_TARGET_MODE +void ahd_run_tqinfifo(struct ahd_softc *ahd, int paused); +#endif +void ahd_handle_hwerrint(struct ahd_softc *ahd); +void ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat); +void ahd_handle_scsiint(struct ahd_softc *ahd, + u_int intstat); +void ahd_clear_critical_section(struct ahd_softc *ahd); + +/***************************** Error Recovery *********************************/ +typedef enum { + SEARCH_COMPLETE, + SEARCH_COUNT, + SEARCH_REMOVE, + SEARCH_PRINT +} ahd_search_action; +int ahd_search_qinfifo(struct ahd_softc *ahd, int target, + char channel, int lun, u_int tag, + role_t role, uint32_t status, + ahd_search_action action); +int ahd_search_disc_list(struct ahd_softc *ahd, int target, + char channel, int lun, u_int tag, + int stop_on_first, int remove, + int save_state); +void ahd_freeze_devq(struct ahd_softc *ahd, struct scb *scb); +int ahd_reset_channel(struct ahd_softc *ahd, char channel, + int initiate_reset); +int ahd_abort_scbs(struct ahd_softc *ahd, int target, + char channel, int lun, u_int tag, + role_t role, uint32_t status); +void ahd_restart(struct ahd_softc *ahd); +void ahd_clear_fifo(struct ahd_softc *ahd, u_int fifo); +void ahd_handle_scb_status(struct ahd_softc *ahd, + struct scb *scb); +void ahd_handle_scsi_status(struct ahd_softc *ahd, + struct scb *scb); +void ahd_calc_residual(struct ahd_softc *ahd, + struct scb *scb); +/*************************** Utility Functions ********************************/ +struct ahd_phase_table_entry* + ahd_lookup_phase_entry(int phase); +void ahd_compile_devinfo(struct ahd_devinfo *devinfo, + u_int our_id, u_int target, + u_int lun, char channel, + role_t role); +/************************** Transfer Negotiation ******************************/ +void ahd_find_syncrate(struct ahd_softc *ahd, u_int *period, + u_int *ppr_options, u_int maxsync); +void ahd_validate_offset(struct ahd_softc *ahd, + struct ahd_initiator_tinfo *tinfo, + u_int period, u_int *offset, + int wide, role_t role); +void ahd_validate_width(struct ahd_softc *ahd, + struct ahd_initiator_tinfo *tinfo, + u_int *bus_width, + role_t role); +int ahd_update_neg_request(struct ahd_softc*, + struct ahd_devinfo*, + struct ahd_tmode_tstate*, + struct ahd_initiator_tinfo*, + int /*force*/); +void ahd_set_width(struct ahd_softc *ahd, + struct ahd_devinfo *devinfo, + u_int width, u_int type, int paused); +void ahd_set_syncrate(struct ahd_softc *ahd, + struct ahd_devinfo *devinfo, + u_int period, u_int offset, + u_int ppr_options, + u_int type, int paused); +typedef enum { + AHD_QUEUE_NONE, + AHD_QUEUE_BASIC, + AHD_QUEUE_TAGGED +} ahd_queue_alg; + +void ahd_set_tags(struct ahd_softc *ahd, + struct ahd_devinfo *devinfo, + ahd_queue_alg alg); + +/**************************** Target Mode *************************************/ +#ifdef AHD_TARGET_MODE +void ahd_send_lstate_events(struct ahd_softc *, + struct ahd_tmode_lstate *); +void ahd_handle_en_lun(struct ahd_softc *ahd, + struct cam_sim *sim, union ccb *ccb); +cam_status ahd_find_tmode_devs(struct ahd_softc *ahd, + struct cam_sim *sim, union ccb *ccb, + struct ahd_tmode_tstate **tstate, + struct ahd_tmode_lstate **lstate, + int notfound_failure); +#ifndef AHD_TMODE_ENABLE +#define AHD_TMODE_ENABLE 0 +#endif +#endif +/******************************* Debug ***************************************/ +#ifdef AHD_DEBUG +extern uint32_t ahd_debug; +#define AHD_SHOW_MISC 0x0001 +#define AHD_SHOW_SENSE 0x0002 +#define AHD_DUMP_SEEPROM 0x0004 +#define AHD_SHOW_TERMCTL 0x0008 +#define AHD_SHOW_MEMORY 0x0010 +#define AHD_SHOW_MESSAGES 0x0020 +#define AHD_SHOW_MODEPTR 0x0040 +#define AHD_SHOW_SELTO 0x0080 +#define AHD_SHOW_FIFOS 0x0100 +#define AHD_SHOW_QFULL 0x0200 +#define AHD_SHOW_QUEUE 0x0400 +#define AHD_SHOW_TQIN 0x0800 +#define AHD_DEBUG_SEQUENCER 0x1000 +#endif +void ahd_print_scb(struct scb *scb); +void ahd_dump_sglist(struct scb *scb); +void ahd_dump_all_cards_state(void); +void ahd_dump_card_state(struct ahd_softc *ahd); +void ahd_dump_scbs(struct ahd_softc *ahd); +#endif /* _AIC79XX_H_ */ diff --git a/sys/dev/aic7xxx/aic79xx.reg b/sys/dev/aic7xxx/aic79xx.reg new file mode 100644 index 0000000..b681bd3 --- /dev/null +++ b/sys/dev/aic7xxx/aic79xx.reg @@ -0,0 +1,3716 @@ +/* + * Aic79xx register and scratch ram definitions. + * + * Copyright (c) 1994-2001 Justin T. Gibbs. + * Copyright (c) 2000-2001 Adaptec Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + * $FreeBSD$ + */ +VERSION = "$Id: //depot/aic7xxx/aic7xxx/aic79xx.reg#34 $" + +/* + * This file is processed by the aic7xxx_asm utility for use in assembling + * firmware for the aic79xx family of SCSI host adapters as well as to generate + * a C header file for use in the kernel portion of the Aic79xx driver. + */ + +/* Register window Modes */ +#define M_DFF0 0 +#define M_DFF1 1 +#define M_CCHAN 2 +#define M_SCSI 3 +#define M_CFG 4 +#define M_DST_SHIFT 4 + +#define MK_MODE(src, dst) ((src) | ((dst) << M_DST_SHIFT)) +#define SET_MODE(src, dst) \ + SET_SRC_MODE src; \ + SET_DST_MODE dst; \ + mvi MK_MODE(src, dst) call set_mode_work_around + +/* + * Mode Pointer + * Controls which of the 5, 512byte, address spaces should be used + * as the source and destination of any register accesses in our + * register window. + */ +register MODE_PTR { + address 0x000 + access_mode RW + mask DST_MODE 0x70 + mask SRC_MODE 0x07 + mode_pointer +} + +const SRC_MODE_SHIFT 0 +const DST_MODE_SHIFT 4 + +/* + * Host Interrupt Status + */ +register INTSTAT { + address 0x001 + access_mode RW + bit HWERRINT 0x80 + bit BRKADRINT 0x40 + bit SWTMINT 0x20 + bit PCIINT 0x10 + bit SCSIINT 0x08 + bit SEQINT 0x04 + bit CMDCMPLT 0x02 + bit SPLTINT 0x01 + mask INT_PEND 0xFF +} + +/* + * Sequencer Interrupt Code + */ +register SEQINTCODE { + address 0x002 + access_mode RW + mask BAD_PHASE 1 /* unknown scsi bus phase */ + mask SEND_REJECT 2 /* sending a message reject */ + mask PROTO_VIOLATION 3 /* Protocol Violation */ + mask NO_MATCH 4 /* no cmd match for reconnect */ + mask IGN_WIDE_RES 5 /* Complex IGN Wide Res Msg */ + mask PDATA_REINIT 6 /* + * Returned to data phase + * that requires data + * transfer pointers to be + * recalculated from the + * transfer residual. + */ + mask HOST_MSG_LOOP 7 /* + * The bus is ready for the + * host to perform another + * message transaction. This + * mechanism is used for things + * like sync/wide negotiation + * that require a kernel based + * message state engine. + */ + mask BAD_STATUS 8 /* Bad status from target */ + mask DATA_OVERRUN 9 /* + * Target attempted to write + * beyond the bounds of its + * command. + */ + mask MKMSG_FAILED 10 /* + * Target completed command + * without honoring our ATN + * request to issue a message. + */ + mask MISSED_BUSFREE 11 /* + * The sequencer never saw + * the bus go free after + * either a command complete + * or disconnect message. + */ + mask SCB_MISMATCH 12 /* + * Downloaded SCB's tag does + * not match the entry we + * intended to download. + */ + mask NO_FREE_SCB 13 /* + * get_free_or_disc_scb failed. + */ + mask OUT_OF_RANGE 14 + mask NO_FREE_FIFO 15 + mask DUMP_CARD_STATE 16 + mask ILLEGAL_PHASE 17 + mask INVALID_SEQINT 18 + mask CFG4ISTAT_INTR 19 + mask STATUS_OVERRUN 20 + mask CFG4OVERRUN 21 + mask SNAPSHOTCLRCHN 22 + mask MONITORDRAIN 23 + mask ENTERING_NONPACK 24 + mask PCIX_ARBITOR_WW 25 +} + +/* + * Clear Host Interrupt + */ +register CLRINT { + address 0x003 + access_mode WO + bit CLRBRKADRINT 0x40 + bit CLRSWTMINT 0x20 + bit CLRSCSIINT 0x08 + bit CLRSEQINT 0x04 + bit CLRCMDINT 0x02 + bit CLRSPLTINT 0x01 +} + +/* + * Error Register + */ +register ERROR { + address 0x004 + access_mode RO + bit CIOPARERR 0x80 + bit MPARERR 0x20 + bit DPARERR 0x10 + bit SQPARERR 0x08 + bit ILLOPCODE 0x04 + bit DSCTMOUT 0x02 +} + +/* + * Clear Error + */ +register CLRERR { + address 0x004 + access_mode WO + bit CLRCIOPARERR 0x80 + bit CLRMPARERR 0x20 + bit CLRDPARERR 0x10 + bit CLRSQPARERR 0x08 + bit CLRILLOPCODE 0x04 + bit CLRDSCTMOUT 0x02 +} + +/* + * Host Control Register + * Overall host control of the device. + */ +register HCNTRL { + address 0x005 + access_mode RW + bit POWRDN 0x40 + bit SWINT 0x10 + bit HCNTRL3 0x08 + bit PAUSE 0x04 + bit INTEN 0x02 + bit CHIPRST 0x01 + bit CHIPRSTACK 0x01 +} + +/* + * Host New SCB Queue Offset + */ +register HNSCB_QOFF { + address 0x006 + access_mode RW + size 2 +} + +/* + * Host Empty SCB Queue Offset + */ +register HESCB_QOFF { + address 0x008 + access_mode RW +} + +/* + * Host Mailbox + */ +register HS_MAILBOX { + address 0x0B + access_mode RW + mask HOST_TQINPOS 0x80 /* Boundary at either 0 or 128 */ +} + +/* + * Sequencer Interupt Status + */ +register SEQINTSTAT { + address 0x0C + access_mode RO + bit SEQ_SWTMRTO 0x10 + bit SEQ_SEQINT 0x08 + bit SEQ_SCSIINT 0x04 + bit SEQ_PCIINT 0x02 + bit SEQ_SPLTINT 0x01 +} + +/* + * Clear SEQ Interrupt + */ +register CLRSEQINTSTAT { + address 0x0C0 + access_mode WO + bit CLRSEQ_SWTMRTO 0x10 + bit CLRSEQ_SEQINT 0x08 + bit CLRSEQ_SCSIINT 0x04 + bit CLRSEQ_PCIINT 0x02 + bit CLRSEQ_SPLTINT 0x01 +} + +/* + * Software Timer + */ +register SWTIMER { + address 0x0E0 + access_mode RW + size 2 +} + +/* + * SEQ New SCB Queue Offset + */ +register SNSCB_QOFF { + address 0x010 + access_mode RW + size 2 + modes M_CCHAN +} + +/* + * SEQ Empty SCB Queue Offset + */ +register SESCB_QOFF { + address 0x012 + access_mode RW + modes M_CCHAN +} + +/* + * SEQ Done SCB Queue Offset + */ +register SDSCB_QOFF { + address 0x014 + access_mode RW + modes M_CCHAN + size 2 +} + +/* + * Queue Offset Control & Status + */ +register QOFF_CTLSTA { + address 0x016 + access_mode RW + modes M_CCHAN + bit EMPTY_SCB_AVAIL 0x80 + bit NEW_SCB_AVAIL 0x40 + bit SDSCB_ROLLOVR 0x20 + bit HS_MAILBOX_ACT 0x10 + mask SCB_QSIZE 0x0F + mask SCB_QSIZE_4 0x00 + mask SCB_QSIZE_8 0x01 + mask SCB_QSIZE_16 0x02 + mask SCB_QSIZE_32 0x03 + mask SCB_QSIZE_64 0x04 + mask SCB_QSIZE_128 0x05 + mask SCB_QSIZE_256 0x06 + mask SCB_QSIZE_512 0x07 + mask SCB_QSIZE_1024 0x08 + mask SCB_QSIZE_2048 0x09 + mask SCB_QSIZE_4096 0x0A + mask SCB_QSIZE_8192 0x0B + mask SCB_QSIZE_16384 0x0C +} + +/* + * Interrupt Control + */ +register INTCTL { + address 0x018 + access_mode RW + bit SWTMINTMASK 0x80 + bit SWTMINTEN 0x40 + bit SWTIMER_START 0x20 + bit AUTOCLRCMDINT 0x10 + bit PCIINTEN 0x08 + bit SCSIINTEN 0x04 + bit SEQINTEN 0x02 + bit SPLTINTEN 0x01 +} + +/* + * Data FIFO Control + */ +register DFCNTRL { + address 0x019 + access_mode RW + modes M_DFF0, M_DFF1 + bit PRELOADEN 0x80 + bit SCSIEN 0x20 + bit SCSIENACK 0x20 + bit HDMAEN 0x08 + bit HDMAENACK 0x08 + bit DIRECTION 0x04 + bit DIRECTIONACK 0x04 + bit FIFOFLUSH 0x02 + bit FIFOFLUSHACK 0x02 + bit DIRECTIONEN 0x01 +} + +/* + * Device Space Command 0 + */ +register DSCOMMAND0 { + address 0x019 + access_mode RW + modes M_CFG + bit CACHETHEN 0x80 /* Cache Threshold enable */ + bit DPARCKEN 0x40 /* Data Parity Check Enable */ + bit MPARCKEN 0x20 /* Memory Parity Check Enable */ + bit EXTREQLCK 0x10 /* External Request Lock */ + bit CIOPARCKEN 0x01 /* Internal bus parity error enable */ +} + +/* + * Data FIFO Status + */ +register DFSTATUS { + address 0x01A + access_mode RO + modes M_DFF0, M_DFF1 + bit PRELOAD_AVAIL 0x80 + bit PKT_PRELOAD_AVAIL 0x40 + bit MREQPEND 0x10 + bit HDONE 0x08 + bit DFTHRESH 0x04 + bit FIFOFULL 0x02 + bit FIFOEMP 0x01 +} + +/* + * S/G Cache Pointer + */ +register SG_CACHE_PRE { + address 0x01B + access_mode WO + modes M_DFF0, M_DFF1 + mask SG_ADDR_MASK 0xf8 + bit ODD_SEG 0x04 + bit LAST_SEG 0x02 +} + +register SG_CACHE_SHADOW { + address 0x01B + access_mode RO + modes M_DFF0, M_DFF1 + mask SG_ADDR_MASK 0xf8 + bit ODD_SEG 0x04 + bit LAST_SEG 0x02 + bit LAST_SEG_DONE 0x01 +} + +/* + * Arbiter Control + */ +register ARBCTL { + address 0x01B + access_mode RW + modes M_CFG + bit RESET_HARB 0x80 + bit RETRY_SWEN 0x08 + mask USE_TIME 0x07 +} + +/* + * Data Channel Host Address + */ +register HADDR { + address 0x070 + access_mode RW + size 8 + modes M_DFF0, M_DFF1 +} + +/* + * Host Overlay DMA Address + */ +register HODMAADR { + address 0x070 + access_mode RW + size 8 + modes M_SCSI +} + +/* + * Data Channel Host Count + */ +register HCNT { + address 0x078 + access_mode RW + size 3 + modes M_DFF0, M_DFF1 +} + +/* + * Host Overlay DMA Count + */ +register HODMACNT { + address 0x078 + access_mode RW + size 2 + modes M_SCSI +} + +/* + * Host Overlay DMA Enable + */ +register HODMAEN { + address 0x07A + access_mode RW + modes M_SCSI +} + +/* + * Scatter/Gather Host Address + */ +register SGHADDR { + address 0x07C + access_mode RW + size 8 + modes M_DFF0, M_DFF1 +} + +/* + * SCB Host Address + */ +register SCBHADDR { + address 0x07C + access_mode RW + size 8 + modes M_CCHAN +} + +/* + * Scatter/Gather Host Count + */ +register SGHCNT { + address 0x084 + access_mode RW + modes M_DFF0, M_DFF1 +} + +/* + * SCB Host Count + */ +register SCBHCNT { + address 0x084 + access_mode RW + modes M_CCHAN +} + +/* + * Data FIFO Threshold + */ +register DFF_THRSH { + address 0x088 + access_mode RW + modes M_CFG + mask WR_DFTHRSH 0x70 + mask RD_DFTHRSH 0x07 + mask RD_DFTHRSH_MIN 0x00 + mask RD_DFTHRSH_25 0x01 + mask RD_DFTHRSH_50 0x02 + mask RD_DFTHRSH_63 0x03 + mask RD_DFTHRSH_75 0x04 + mask RD_DFTHRSH_85 0x05 + mask RD_DFTHRSH_90 0x06 + mask RD_DFTHRSH_MAX 0x07 + mask WR_DFTHRSH_MIN 0x00 + mask WR_DFTHRSH_25 0x10 + mask WR_DFTHRSH_50 0x20 + mask WR_DFTHRSH_63 0x30 + mask WR_DFTHRSH_75 0x40 + mask WR_DFTHRSH_85 0x50 + mask WR_DFTHRSH_90 0x60 + mask WR_DFTHRSH_MAX 0x70 +} + +/* + * ROM Address + */ +register ROMADDR { + address 0x08A + access_mode RW + size 3 +} + +/* + * ROM Control + */ +register ROMCNTRL { + address 0x08D + access_mode RW + mask ROMOP 0xE0 + mask ROMSPD 0x18 + bit REPEAT 0x02 + bit RDY 0x01 +} + +/* + * ROM Data + */ +register ROMDATA { + address 0x08E + access_mode RW +} + +/* + * Data Channel Receive Message 0 + */ +register DCHRXMSG0 { + address 0x090 + access_mode RO + modes M_DFF0, M_DFF1 + mask CDNUM 0xF8 + mask CFNUM 0x07 +} + +/* + * CMC Recieve Message 0 + */ +register CMCRXMSG0 { + address 0x090 + access_mode RO + modes M_CCHAN + mask CDNUM 0xF8 + mask CFNUM 0x07 +} + +/* + * Overlay Recieve Message 0 + */ +register OVLYRXMSG0 { + address 0x090 + access_mode RO + modes M_SCSI + mask CDNUM 0xF8 + mask CFNUM 0x07 +} + +/* + * Relaxed Order Enable + */ +register ROENABLE { + address 0x090 + access_mode RW + modes M_CFG + bit MSIROEN 0x20 + bit OVLYROEN 0x10 + bit CMCROEN 0x08 + bit SGROEN 0x04 + bit DCH1ROEN 0x02 + bit DCH0ROEN 0x01 +} + +/* + * Data Channel Receive Message 1 + */ +register DCHRXMSG1 { + address 0x091 + access_mode RO + modes M_DFF0, M_DFF1 + mask CBNUM 0xFF +} + +/* + * CMC Recieve Message 1 + */ +register CMCRXMSG1 { + address 0x091 + access_mode RO + modes M_CCHAN + mask CBNUM 0xFF +} + +/* + * Overlay Recieve Message 1 + */ +register OVLYRXMSG1 { + address 0x091 + access_mode RO + modes M_SCSI + mask CBNUM 0xFF +} + +/* + * No Snoop Enable + */ +register NSENABLE { + address 0x091 + access_mode RW + modes M_CFG + bit MSINSEN 0x20 + bit OVLYNSEN 0x10 + bit CMCNSEN 0x08 + bit SGNSEN 0x04 + bit DCH1NSEN 0x02 + bit DCH0NSEN 0x01 +} + +/* + * Data Channel Receive Message 2 + */ +register DCHRXMSG2 { + address 0x092 + access_mode RO + modes M_DFF0, M_DFF1 + mask MINDEX 0xFF +} + +/* + * CMC Recieve Message 2 + */ +register CMCRXMSG2 { + address 0x092 + access_mode RO + modes M_CCHAN + mask MINDEX 0xFF +} + +/* + * Overlay Recieve Message 2 + */ +register OVLYRXMSG2 { + address 0x092 + access_mode RO + modes M_SCSI + mask MINDEX 0xFF +} + +/* + * Outstanding Split Transactions + */ +register OST { + address 0x092 + access_mode RW + modes M_CFG +} + +/* + * Data Channel Receive Message 3 + */ +register DCHRXMSG3 { + address 0x093 + access_mode RO + modes M_DFF0, M_DFF1 + mask MCLASS 0x0F +} + +/* + * CMC Recieve Message 3 + */ +register CMCRXMSG3 { + address 0x093 + access_mode RO + modes M_CCHAN + mask MCLASS 0x0F +} + +/* + * Overlay Recieve Message 3 + */ +register OVLYRXMSG3 { + address 0x093 + access_mode RO + modes M_SCSI + mask MCLASS 0x0F +} + +/* + * PCI-X Control + */ +register PCIXCTL { + address 0x093 + access_mode RW + modes M_CFG + bit SERRPULSE 0x80 + bit UNEXPSCIEN 0x20 + bit SPLTSMADIS 0x10 + bit SPLTSTADIS 0x08 + bit SRSPDPEEN 0x04 + bit TSCSERREN 0x02 + bit CMPABCDIS 0x01 +} + +/* + * CMC Sequencer Byte Count + */ +register CMCSEQBCNT { + address 0x094 + access_mode RO + modes M_CCHAN +} + +/* + * Overlay Sequencer Byte Count + */ +register OVLYSEQBCNT { + address 0x094 + access_mode RO + modes M_SCSI +} + +/* + * Data Channel Sequencer Byte Count + */ +register DCHSEQBCNT { + address 0x094 + access_mode RO + size 2 + modes M_DFF0, M_DFF1 +} + +/* + * Data Channel Split Status 0 + */ +register DCHSPLTSTAT0 { + address 0x096 + access_mode RW + modes M_DFF0, M_DFF1 + bit STAETERM 0x80 + bit SCBCERR 0x40 + bit SCADERR 0x20 + bit SCDATBUCKET 0x10 + bit CNTNOTCMPLT 0x08 + bit RXOVRUN 0x04 + bit RXSCEMSG 0x02 + bit RXSPLTRSP 0x01 +} + +/* + * CMC Split Status 0 + */ +register CMCSPLTSTAT0 { + address 0x096 + access_mode RW + modes M_CCHAN + bit STAETERM 0x80 + bit SCBCERR 0x40 + bit SCADERR 0x20 + bit SCDATBUCKET 0x10 + bit CNTNOTCMPLT 0x08 + bit RXOVRUN 0x04 + bit RXSCEMSG 0x02 + bit RXSPLTRSP 0x01 +} + +/* + * Overlay Split Status 0 + */ +register OVLYSPLTSTAT0 { + address 0x096 + access_mode RW + modes M_SCSI + bit STAETERM 0x80 + bit SCBCERR 0x40 + bit SCADERR 0x20 + bit SCDATBUCKET 0x10 + bit CNTNOTCMPLT 0x08 + bit RXOVRUN 0x04 + bit RXSCEMSG 0x02 + bit RXSPLTRSP 0x01 +} + +/* + * Data Channel Split Status 1 + */ +register DCHSPLTSTAT1 { + address 0x097 + access_mode RW + modes M_DFF0, M_DFF1 + bit RXDATABUCKET 0x01 +} + +/* + * CMC Split Status 1 + */ +register CMCSPLTSTAT1 { + address 0x097 + access_mode RW + modes M_CCHAN + bit RXDATABUCKET 0x01 +} + +/* + * Overlay Split Status 1 + */ +register OVLYSPLTSTAT1 { + address 0x097 + access_mode RW + modes M_SCSI + bit RXDATABUCKET 0x01 +} + +/* + * S/G Receive Message 0 + */ +register SGRXMSG0 { + address 0x098 + access_mode RO + modes M_DFF0, M_DFF1 + mask CDNUM 0xF8 + mask CFNUM 0x07 +} + +/* + * S/G Receive Message 1 + */ +register SGRXMSG1 { + address 0x099 + access_mode RO + modes M_DFF0, M_DFF1 + mask CBNUM 0xFF +} + +/* + * S/G Receive Message 2 + */ +register SGRXMSG2 { + address 0x09A + access_mode RO + modes M_DFF0, M_DFF1 + mask MINDEX 0xFF +} + +/* + * S/G Receive Message 3 + */ +register SGRXMSG3 { + address 0x09B + access_mode RO + modes M_DFF0, M_DFF1 + mask MCLASS 0x0F +} + +/* + * Slave Split Out Address 0 + */ +register SLVSPLTOUTADR0 { + address 0x098 + access_mode RO + modes M_SCSI + mask LOWER_ADDR 0x7F +} + +/* + * Slave Split Out Address 1 + */ +register SLVSPLTOUTADR1 { + address 0x099 + access_mode RO + modes M_SCSI + mask REQ_DNUM 0xF8 + mask REQ_FNUM 0x07 +} + +/* + * Slave Split Out Address 2 + */ +register SLVSPLTOUTADR2 { + address 0x09A + access_mode RO + modes M_SCSI + mask REQ_BNUM 0xFF +} + +/* + * Slave Split Out Address 3 + */ +register SLVSPLTOUTADR3 { + address 0x09B + access_mode RO + modes M_SCSI + bit RLXORD 020 + mask TAG_NUM 0x1F +} + +/* + * SG Sequencer Byte Count + */ +register SGSEQBCNT { + address 0x09C + access_mode RO + modes M_DFF0, M_DFF1 +} + +/* + * Slave Split Out Attribute 0 + */ +register SLVSPLTOUTATTR0 { + address 0x09C + access_mode RO + modes M_SCSI + mask LOWER_BCNT 0xFF +} + +/* + * Slave Split Out Attribute 1 + */ +register SLVSPLTOUTATTR1 { + address 0x09D + access_mode RO + modes M_SCSI + mask CMPLT_DNUM 0xF8 + mask CMPLT_FNUM 0x07 +} + +/* + * Slave Split Out Attribute 2 + */ +register SLVSPLTOUTATTR2 { + address 0x09E + access_mode RO + size 2 + modes M_SCSI + mask CMPLT_BNUM 0xFF +} +/* + * S/G Split Status 0 + */ +register SGSPLTSTAT0 { + address 0x09E + access_mode RW + modes M_DFF0, M_DFF1 + bit STAETERM 0x80 + bit SCBCERR 0x40 + bit SCADERR 0x20 + bit SCDATBUCKET 0x10 + bit CNTNOTCMPLT 0x08 + bit RXOVRUN 0x04 + bit RXSCEMSG 0x02 + bit RXSPLTRSP 0x01 +} + +/* + * S/G Split Status 1 + */ +register SGSPLTSTAT1 { + address 0x09F + access_mode RW + modes M_DFF0, M_DFF1 + bit RXDATABUCKET 0x01 +} + +/* + * Special Function + */ +register SFUNCT { + address 0x09f + access_mode RW + modes M_CFG + mask TEST_GROUP 0xF0 + mask TEST_NUM 0x0F +} + +/* + * Data FIFO 0 PCI Status + */ +register DF0PCISTAT { + address 0x0A0 + access_mode RW + modes M_CFG + bit DPE 0x80 + bit SSE 0x40 + bit RMA 0x20 + bit RTA 0x10 + bit SCAAPERR 0x08 + bit RDPERR 0x04 + bit TWATERR 0x02 + bit DPR 0x01 +} + +/* + * Data FIFO 1 PCI Status + */ +register DF1PCISTAT { + address 0x0A1 + access_mode RW + modes M_CFG + bit DPE 0x80 + bit SSE 0x40 + bit RMA 0x20 + bit RTA 0x10 + bit SCAAPERR 0x08 + bit RDPERR 0x04 + bit TWATERR 0x02 + bit DPR 0x01 +} + +/* + * S/G PCI Status + */ +register SGPCISTAT { + address 0x0A2 + access_mode RW + modes M_CFG + bit DPE 0x80 + bit SSE 0x40 + bit RMA 0x20 + bit RTA 0x10 + bit SCAAPERR 0x08 + bit RDPERR 0x04 + bit DPR 0x01 +} + +/* + * CMC PCI Status + */ +register CMCPCISTAT { + address 0x0A3 + access_mode RW + modes M_CFG + bit DPE 0x80 + bit SSE 0x40 + bit RMA 0x20 + bit RTA 0x10 + bit SCAAPERR 0x08 + bit RDPERR 0x04 + bit TWATERR 0x02 + bit DPR 0x01 +} + +/* + * Overlay PCI Status + */ +register OVLYPCISTAT { + address 0x0A4 + access_mode RW + modes M_CFG + bit DPE 0x80 + bit SSE 0x40 + bit RMA 0x20 + bit RTA 0x10 + bit SCAAPERR 0x08 + bit RDPERR 0x04 + bit DPR 0x01 +} + +/* + * PCI Status for MSI Master DMA Transfer + */ +register MSIPCISTAT { + address 0x0A6 + access_mode RW + modes M_CFG + bit SSE 0x40 + bit RMA 0x20 + bit RTA 0x10 + bit CLRPENDMSI 0x08 + bit TWATERR 0x02 + bit DPR 0x01 +} + +/* + * PCI Status for Target + */ +register TARGPCISTAT { + address 0x0A6 + access_mode RW + modes M_CFG + bit DPE 0x80 + bit SSE 0x40 + bit STA 0x08 + bit TWATERR 0x02 +} + +/* + * LQ Packet In + * The last LQ Packet recieved + */ +register LQIN { + address 0x020 + access_mode RW + size 20 + modes M_DFF0, M_DFF1, M_SCSI +} + +/* + * SCB Type Pointer + * SCB offset for Target Mode SCB type information + */ +register TYPEPTR { + address 0x020 + access_mode RW + modes M_CFG +} + +/* + * Queue Tag Pointer + * SCB offset to the Two Byte tag identifier used for target mode. + */ +register TAGPTR { + address 0x021 + access_mode RW + modes M_CFG +} + +/* + * Logical Unit Number Pointer + * SCB offset to the LSB (little endian) of the lun field. + */ +register LUNPTR { + address 0x022 + access_mode RW + modes M_CFG +} + +/* + * Data Length Pointer + * SCB offset for the 4 byte data length field in target mode. + */ +register DATALENPTR { + address 0x023 + access_mode RW + modes M_CFG +} + +/* + * Status Length Pointer + * SCB offset to the two byte status field in target SCBs. + */ +register STATLENPTR { + address 0x024 + access_mode RW + modes M_CFG +} + +/* + * Command Length Pointer + * Scb offset for the CDB length field in initiator SCBs. + */ +register CMDLENPTR { + address 0x025 + access_mode RW + modes M_CFG +} + +/* + * Task Attribute Pointer + * Scb offset for the byte field specifying the attribute byte + * to be used in command packets. + */ +register ATTRPTR { + address 0x026 + access_mode RW + modes M_CFG +} + +/* + * Task Management Flags Pointer + * Scb offset for the byte field specifying the attribute flags + * byte to be used in command packets. + */ +register FLAGPTR { + address 0x027 + access_mode RW + modes M_CFG +} + +/* + * Command Pointer + * Scb offset for the first byte in the CDB for initiator SCBs. + */ +register CMDPTR { + address 0x028 + access_mode RW + modes M_CFG +} + +/* + * Queue Next Pointer + * Scb offset for the 2 byte "next scb link". + */ +register QNEXTPTR { + address 0x029 + access_mode RW + modes M_CFG +} + +/* + * SCSI ID Pointer + * Scb offset to the value to place in the SCSIID register + * during target mode connections. + */ +register IDPTR { + address 0x02A + access_mode RW + modes M_CFG +} + +/* + * Command Aborted Byte Pointer + * Offset to the SCB flags field that includes the + * "SCB aborted" status bit. + */ +register ABRTBYTEPTR { + address 0x02B + access_mode RW + modes M_CFG +} + +/* + * Command Aborted Bit Pointer + * Bit offset in the SCB flags field for "SCB aborted" status. + */ +register ABRTBITPTR { + address 0x02C + access_mode RW + modes M_CFG +} + +/* + * Logical Unit Number Length + * The length, in bytes, of the SCB lun field. + */ +register LUNLEN { + address 0x030 + access_mode RW + modes M_CFG +} + +/* + * CDB Limit + * The size, in bytes, of the embedded CDB field in initator SCBs. + */ +register CDBLIMIT { + address 0x031 + access_mode RW + modes M_CFG +} + +/* + * Maximum Commands + * The maximum number of commands to issue during a + * single packetized connection. + */ +register MAXCMD { + address 0x032 + access_mode RW + modes M_CFG +} + +/* + * Maximum Command Counter + * The number of commands already sent during this connection + */ +register MAXCMDCNT { + address 0x033 + access_mode RW + modes M_CFG +} + +/* + * LQ Packet Reserved Bytes + * The bytes to be sent in the currently reserved fileds + * of all LQ packets. + */ +register LQRSVD01 { + address 0x034 + access_mode RW + modes M_SCSI +} +register LQRSVD16 { + address 0x035 + access_mode RW + modes M_SCSI +} +register LQRSVD17 { + address 0x036 + access_mode RW + modes M_SCSI +} + +/* + * Command Reserved 0 + * The byte to be sent for the reserved byte 0 of + * outgoing command packets. + */ +register CMDRSVD0 { + address 0x037 + access_mode RW + modes M_CFG +} + +/* + * LQ Manager Control 0 + */ +register LQCTL0 { + address 0x038 + access_mode RW + modes M_CFG + mask LQITARGCLT 0xC0 + mask LQIINITGCLT 0x30 + mask LQ0TARGCLT 0x0C + mask LQ0INITGCLT 0x03 +} + +/* + * LQ Manager Control 1 + */ +register LQCTL1 { + address 0x038 + access_mode RW + modes M_DFF0, M_DFF1, M_SCSI + bit PCI2PCI 0x04 + bit SINGLECMD 0x02 + bit ABORTPENDING 0x01 +} + +/* + * LQ Manager Control 2 + */ +register LQCTL2 { + address 0x039 + access_mode RW + modes M_DFF0, M_DFF1, M_SCSI + bit LQIRETRY 0x80 + bit LQICONTINUE 0x40 + bit LQITOIDLE 0x20 + bit LQIPAUSE 0x10 + bit LQORETRY 0x08 + bit LQOCONTINUE 0x04 + bit LQOTOIDLE 0x02 + bit LQOPAUSE 0x01 +} + +/* + * SCSI RAM BIST0 + */ +register SCSBIST0 { + address 0x039 + access_mode RW + modes M_CFG + bit GSBISTERR 0x40 + bit GSBISTDONE 0x20 + bit GSBISTRUN 0x10 + bit OSBISTERR 0x04 + bit OSBISTDONE 0x02 + bit OSBISTRUN 0x01 +} + +/* + * SCSI Sequence Control0 + */ +register SCSISEQ0 { + address 0x03A + access_mode RW + modes M_DFF0, M_DFF1, M_SCSI + bit TEMODEO 0x80 + bit ENSELO 0x40 + bit ENARBO 0x20 + bit FORCEBUSFREE 0x10 + bit SCSIRSTO 0x01 +} + +/* + * SCSI RAM BIST 1 + */ +register SCSBIST1 { + address 0x03A + access_mode RW + modes M_CFG + bit NTBISTERR 0x04 + bit NTBISTDONE 0x02 + bit NTBISTRUN 0x01 +} + +/* + * SCSI Sequence Control 1 + */ +register SCSISEQ1 { + address 0x03B + access_mode RW + modes M_DFF0, M_DFF1, M_SCSI + bit MANUALCTL 0x40 + bit ENSELI 0x20 + bit ENRSELI 0x10 + mask MANUALP 0x0C + bit ENAUTOATNP 0x02 + bit ALTSTIM 0x01 +} + +/* + * SCSI Transfer Control 0 + */ +register SXFRCTL0 { + address 0x03C + access_mode RW + modes M_SCSI + bit DFON 0x80 + bit DFPEXP 0x40 + bit BIOSCANCELEN 0x10 + bit SPIOEN 0x08 +} + +/* + * SCSI Transfer Control 1 + */ +register SXFRCTL1 { + address 0x03D + access_mode RW + modes M_SCSI + bit BITBUCKET 0x80 + bit ENSACHK 0x40 + bit ENSPCHK 0x20 + mask STIMESEL 0x18 + bit ENSTIMER 0x04 + bit ACTNEGEN 0x02 + bit STPWEN 0x01 +} + +/* + * SCSI Transfer Control 2 + */ +register SXFRCTL2 { + address 0x03E + access_mode RW + modes M_SCSI + bit AUTORSTDIS 0x10 + bit CMDDMAEN 0x08 + mask ASU 0x07 +} + +/* + * SCSI Bus Initiator IDs + * Bitmask of observed initiators on the bus. + */ +register BUSINITID { + address 0x03C + access_mode RW + modes M_CFG + size 2 +} + +/* + * Data Length Counters + * Packet byte counter. + */ +register DLCOUNT { + address 0x03C + access_mode RW + modes M_DFF0, M_DFF1 + size 3 +} + +/* + * Data FIFO Status + */ +register DFFSTAT { + address 0x03F + access_mode RW + modes M_SCSI + bit FIFO1FREE 0x20 + bit FIFO0FREE 0x10 + bit CURRFIFO 0x01 +} + +/* + * SCSI Bus Target IDs + * Bitmask of observed targets on the bus. + */ +register BUSTARGID { + address 0x03E + access_mode RW + modes M_CFG + size 2 +} + +/* + * SCSI Control Signal Out + */ +register SCSISIGO { + address 0x040 + access_mode RW + modes M_DFF0, M_DFF1, M_SCSI + bit CDO 0x80 + bit IOO 0x40 + bit MSGO 0x20 + bit ATNO 0x10 + bit SELO 0x08 + bit BSYO 0x04 + bit REQO 0x02 + bit ACKO 0x01 +/* + * Possible phases to write into SCSISIG0 + */ + mask PHASE_MASK CDO|IOO|MSGO + mask P_DATAOUT 0x00 + mask P_DATAIN IOO + mask P_DATAOUT_DT P_DATAOUT|MSGO + mask P_DATAIN_DT P_DATAIN|MSGO + mask P_COMMAND CDO + mask P_MESGOUT CDO|MSGO + mask P_STATUS CDO|IOO + mask P_MESGIN CDO|IOO|MSGO +} + +register SCSISIGI { + address 0x041 + access_mode RO + modes M_DFF0, M_DFF1, M_SCSI + bit CDI 0x80 + bit IOI 0x40 + bit MSGI 0x20 + bit ATNI 0x10 + bit SELI 0x08 + bit BSYI 0x04 + bit REQI 0x02 + bit ACKI 0x01 +/* + * Possible phases in SCSISIGI + */ + mask PHASE_MASK CDI|IOI|MSGI + mask P_DATAOUT 0x00 + mask P_DATAIN IOI + mask P_DATAOUT_DT P_DATAOUT|MSGI + mask P_DATAIN_DT P_DATAIN|MSGI + mask P_COMMAND CDI + mask P_MESGOUT CDI|MSGI + mask P_STATUS CDI|IOI + mask P_MESGIN CDI|IOI|MSGI +} + +/* + * Multiple Target IDs + * Bitmask of ids to respond as a target. + */ +register MULTARGID { + address 0x040 + access_mode RW + modes M_CFG + size 2 +} + +/* + * SCSI Phase + */ +register SCSIPHASE { + address 0x042 + access_mode RO + modes M_DFF0, M_DFF1, M_SCSI + bit STATUS_PHASE 0x20 + bit COMMAND_PHASE 0x10 + bit MSG_IN_PHASE 0x08 + bit MSG_OUT_PHASE 0x04 + bit DATA_IN_PHASE 0x02 + bit DATA_OUT_PHASE 0x01 + mask DATA_PHASE_MASK 0x03 +} + +/* + * SCSI Data 0 Image + */ +register SCSIDAT0_IMG { + address 0x043 + access_mode RW + modes M_DFF0, M_DFF1, M_SCSI +} + +/* + * SCSI Latched Data + */ +register SCSIDAT { + address 0x044 + access_mode RW + modes M_DFF0, M_DFF1, M_SCSI + size 2 +} + +/* + * SCSI Data Bus + */ +register SCSIBUS { + address 0x046 + access_mode RW + modes M_DFF0, M_DFF1, M_SCSI + size 2 +} + +/* + * Target ID In + */ +register TARGIDIN { + address 0x048 + access_mode RO + modes M_DFF0, M_DFF1, M_SCSI + bit CLKOUT 0x80 + mask TARGID 0x0F +} + +/* + * Selection/Reselection ID + * Upper four bits are the device id. The ONEBIT is set when the re/selecting + * device did not set its own ID. + */ +register SELID { + address 0x049 + access_mode RW + modes M_DFF0, M_DFF1, M_SCSI + mask SELID_MASK 0xf0 + bit ONEBIT 0x08 +} + +/* + * SCSI Block Control + * Controls Bus type and channel selection. SELWIDE allows for the + * coexistence of 8bit and 16bit devices on a wide bus. + */ +register SBLKCTL { + address 0x04A + access_mode RW + modes M_DFF0, M_DFF1, M_SCSI + bit DIAGLEDEN 0x80 + bit DIAGLEDON 0x40 + bit ENAB40 0x08 /* LVD transceiver active */ + bit ENAB20 0x04 /* SE/HVD transceiver active */ + bit SELWIDE 0x02 +} + +/* + * Option Mode + */ +register OPTIONMODE { + address 0x04A + access_mode RW + modes M_CFG + bit BIOSCANCTL 0x80 + bit AUTOACKEN 0x40 + bit BIASCANCTL 0x20 + bit BUSFREEREV 0x10 + bit ENDGFORMCHK 0x04 + bit AUTO_MSGOUT_DE 0x02 + mask OPTIONMODE_DEFAULTS AUTO_MSGOUT_DE +} + +/* + * SCSI Status 0 + */ +register SSTAT0 { + address 0x04B + access_mode RO + modes M_DFF0, M_DFF1, M_SCSI + bit TARGET 0x80 /* Board acting as target */ + bit SELDO 0x40 /* Selection Done */ + bit SELDI 0x20 /* Board has been selected */ + bit SELINGO 0x10 /* Selection In Progress */ + bit IOERR 0x08 /* LVD Tranceiver mode changed */ + bit OVERRUN 0x04 /* SCSI Offset overrun detected */ + bit SPIORDY 0x02 /* SCSI PIO Ready */ + bit ARBDO 0x01 /* Arbitration Done Out */ +} + +/* + * Clear SCSI Interrupt 0 + * Writing a 1 to a bit clears the associated SCSI Interrupt in SSTAT0. + */ +register CLRSINT0 { + address 0x04B + access_mode WO + modes M_DFF0, M_DFF1, M_SCSI + bit CLRSELDO 0x40 + bit CLRSELDI 0x20 + bit CLRSELINGO 0x10 + bit CLRIOERR 0x08 + bit CLROVERRUN 0x04 + bit CLRSPIORDY 0x02 + bit CLRARBDO 0x01 +} + +/* + * SCSI Interrupt Mode 0 + * Setting any bit will enable the corresponding function + * in SIMODE0 to interrupt via the IRQ pin. + */ +register SIMODE0 { + address 0x04B + access_mode RW + modes M_CFG + bit ENSELDO 0x40 + bit ENSELDI 0x20 + bit ENSELINGO 0x10 + bit ENIOERR 0x08 + bit ENOVERRUN 0x04 + bit ENSPIORDY 0x02 + bit ENARBDO 0x01 +} + +/* + * SCSI Status 1 + */ +register SSTAT1 { + address 0x04C + access_mode RO + modes M_DFF0, M_DFF1, M_SCSI + bit SELTO 0x80 + bit ATNTARG 0x40 + bit SCSIRSTI 0x20 + bit PHASEMIS 0x10 + bit BUSFREE 0x08 + bit SCSIPERR 0x04 + bit STRB2FAST 0x02 + bit REQINIT 0x01 +} + +/* + * Clear SCSI Interrupt 1 + * Writing a 1 to a bit clears the associated SCSI Interrupt in SSTAT1. + */ +register CLRSINT1 { + address 0x04c + access_mode WO + modes M_DFF0, M_DFF1, M_SCSI + bit CLRSELTIMEO 0x80 + bit CLRATNO 0x40 + bit CLRSCSIRSTI 0x20 + bit CLRBUSFREE 0x08 + bit CLRSCSIPERR 0x04 + bit CLRSTRB2FAST 0x02 + bit CLRREQINIT 0x01 +} + +/* + * SCSI Status 2 + */ +register SSTAT2 { + address 0x04d + access_mode RO + modes M_DFF0, M_DFF1, M_SCSI + mask BUSFREETIME 0xc0 + mask BUSFREE_LQO 0x40 + mask BUSFREE_DFF0 0x80 + mask BUSFREE_DFF1 0xC0 + bit NONPACKREQ 0x20 + bit EXP_ACTIVE 0x10 /* SCSI Expander Active */ + bit BSYX 0x08 /* Busy Expander */ + bit WIDE_RES 0x04 /* Modes 0 and 1 only */ + bit SDONE 0x02 /* Modes 0 and 1 only */ + bit DMADONE 0x01 /* Modes 0 and 1 only */ +} + +/* + * Clear SCSI Interrupt 2 + */ +register CLRSINT2 { + address 0x04D + access_mode WO + modes M_DFF0, M_DFF1, M_SCSI + bit CLRNONPACKREQ 0x20 + bit CLRWIDE_RES 0x04 /* Modes 0 and 1 only */ + bit CLRSDONE 0x02 /* Modes 0 and 1 only */ + bit CLRDMADONE 0x01 /* Modes 0 and 1 only */ +} + +/* + * SCSI Interrupt Mode 2 + */ +register SIMODE2 { + address 0x04D + access_mode RW + modes M_CFG + bit ENWIDE_RES 0x04 + bit ENSDONE 0x02 + bit ENDMADONE 0x01 +} + +/* + * Physical Error Diagnosis + */ +register PERRDIAG { + address 0x04E + access_mode RO + modes M_DFF0, M_DFF1, M_SCSI + bit HIZERO 0x80 + bit HIPERR 0x40 + bit PREVPHASE 0x20 + bit PARITYERR 0x10 + bit AIPERR 0x08 + bit CRCERR 0x04 + bit DGFORMERR 0x02 + bit DTERR 0x01 +} + +/* + * LQI Manager Current State + */ +register LQISTATE { + address 0x04E + access_mode RO + modes M_CFG +} + +/* + * SCSI Offset Count + */ +register SOFFCNT { + address 0x04F + access_mode RO + modes M_DFF0, M_DFF1, M_SCSI +} + +/* + * LQO Manager Current State + */ +register LQOSTATE { + address 0x04F + access_mode RO + modes M_CFG +} + +/* + * LQI Manager Status + */ +register LQISTAT0 { + address 0x050 + access_mode RO + modes M_DFF0, M_DFF1, M_SCSI + bit LQIATNQAS 0x20 + bit LQICRCT1 0x10 + bit LQICRCT2 0x08 + bit LQIBADLQT 0x04 + bit LQIATNLQ 0x02 + bit LQIATNCMD 0x01 +} + +/* + * Clear LQI Interrupts 0 + */ +register CLRLQIINTO { + address 0x050 + access_mode WO + modes M_DFF0, M_DFF1, M_SCSI + bit CLRLQIATNQAS 0x20 + bit CLRLQICRCT1 0x10 + bit CLRLQICRCT2 0x08 + bit CLRLQIBADLQT 0x04 + bit CLRLQIATNLQ 0x02 + bit CLRLQIATNCMD 0x01 +} + +/* + * LQI Manager Interrupt Mode 0 + */ +register LQIMODE0 { + address 0x050 + access_mode RW + modes M_CFG + bit ENLQIATNQASK 0x20 + bit ENLQICRCT1 0x10 + bit ENLQICRCT2 0x08 + bit ENLQIBADLQT 0x04 + bit ENLQIATNLQ 0x02 + bit ENLQIATNCMD 0x01 +} + +/* + * LQI Manager Status 1 + */ +register LQISTAT1 { + address 0x051 + access_mode RO + modes M_DFF0, M_DFF1, M_SCSI + mask LQIPHASE_LQ 0x80 + mask LQIPHASE_NLQ 0x40 + bit LQIABORT 0x20 + mask LQICRCI_LQ 0x10 + mask LQICRCI_NLQ 0x08 + bit LQIBADLQI 0x04 + mask LQIOVERI_LQ 0x02 + mask LQIOVERI_NLQ 0x01 +} + +/* + * Clear LQI Manager Interrupts1 + */ +register CLRLQIINT1 { + address 0x051 + access_mode WO + modes M_DFF0, M_DFF1, M_SCSI + mask CLRLQIPHASE_LQ 0x80 + mask CLRLQIPHASE_NLQ 0x40 + bit CLRLIQABORT 0x20 + mask CLRLQICRCI_LQ 0x10 + mask CLRLQICRCI_NLQ 0x08 + bit CLRLQIBADLQI 0x04 + mask CLRLQIOVERI_LQ 0x02 + mask CLRLQIOVERI_NLQ 0x01 +} + +/* + * LQI Manager Interrupt Mode 1 + */ +register LQIMODE1 { + address 0x051 + access_mode RW + modes M_CFG + mask ENLQIPHASE_LQ 0x80 + mask ENLQIPHASE_NLQ 0x40 + bit ENLIQABORT 0x20 + mask ENLQICRCI_LQ 0x10 + mask ENLQICRCI_NLQ 0x08 + bit ENLQIBADLQI 0x04 + mask ENLQIOVERI_LQ 0x02 + mask ENLQIOVERI_NLQ 0x01 +} + +/* + * LQI Manager Status 2 + */ +register LQISTAT2 { + address 0x052 + access_mode RO + modes M_DFF0, M_DFF1, M_SCSI + bit PACKETIZED 0x80 + bit LQIPHASE_OUTPKT 0x40 + bit LQIWORKONLQ 0x20 + bit LQIWAITFIFO 0x10 + bit LQISTOPPKT 0x08 + bit LQISTOPLQ 0x04 + bit LQISTOPCMD 0x02 + bit LQIGSAVAIL 0x01 +} + +/* + * SCSI Status 3 + */ +register SSTAT3 { + address 0x053 + access_mode RO + modes M_DFF0, M_DFF1, M_SCSI + bit NTRAMPERR 0x02 + bit OSRAMPERR 0x01 +} + +/* + * Clear SCSI Status 3 + */ +register CLRSINT3 { + address 0x053 + access_mode WO + modes M_DFF0, M_DFF1, M_SCSI + bit CLRNTRAMPERR 0x02 + bit CLROSRAMPERR 0x01 +} + +/* + * SCSI Interrupt Mode 3 + */ +register SIMODE3 { + address 0x053 + access_mode RW + modes M_CFG + bit ENNTRAMPERR 0x02 + bit ENOSRAMPERR 0x01 +} + +/* + * LQO Manager Status 0 + */ +register LQOSTAT0 { + address 0x054 + access_mode RO + modes M_DFF0, M_DFF1, M_SCSI + bit LQOTARGSCBPERR 0x10 + bit LQOSTOPT2 0x08 + bit LQOATNLQ 0x04 + bit LQOATNPKT 0x02 + bit LQOTCRC 0x01 +} + +/* + * Clear LQO Manager interrupt 0 + */ +register CLRLQOINT0 { + address 0x054 + access_mode WO + modes M_DFF0, M_DFF1, M_SCSI + bit CLRLQOTARGSCBPERR 0x10 + bit CLRLQOSTOPT2 0x08 + bit CLRLQOATNLQ 0x04 + bit CLRLQOATNPKT 0x02 + bit CLRLQOTCRC 0x01 +} + +/* + * LQO Manager Interrupt Mode 0 + */ +register LQOMODE0 { + address 0x054 + access_mode RW + modes M_CFG + bit ENLQOTARGSCBPERR 0x10 + bit ENLQOSTOPT2 0x08 + bit ENLQOATNLQ 0x04 + bit ENLQOATNPKT 0x02 + bit ENLQOTCRC 0x01 +} + +/* + * LQO Manager Status 1 + */ +register LQOSTAT1 { + address 0x055 + access_mode RO + modes M_DFF0, M_DFF1, M_SCSI + bit LQOINITSCBPERR 0x10 + bit LQOSTOPI2 0x08 + bit LQOBADQAS 0x04 + bit LQOBUSFREE 0x02 + bit LQOPHACHGINPKT 0x01 +} + +/* + * Clear LOQ Interrupt 1 + */ +register CLRLQOINT1 { + address 0x055 + access_mode WO + modes M_DFF0, M_DFF1, M_SCSI + bit CLRLQOINITSCBPERR 0x10 + bit CLRLQOSTOPI2 0x08 + bit CLRLQOBADQAS 0x04 + bit CLRLQOBUSFREE 0x02 + bit CLRLQOPHACHGINPKT 0x01 +} + +/* + * LQO Manager Interrupt Mode 1 + */ +register LQOMODE1 { + address 0x055 + access_mode RW + modes M_CFG + bit ENLQOINITSCBPERR 0x10 + bit ENLQOSTOPI2 0x08 + bit ENLQOBADQAS 0x04 + bit ENLQOBUSFREE 0x02 + bit ENLQOPHACHGINPKT 0x01 +} + +/* + * LQO Manager Status 2 + */ +register LQOSTAT2 { + address 0x056 + access_mode RO + modes M_DFF0, M_DFF1, M_SCSI + mask LQOPKT 0xE0 + bit LQOWAITFIFO 0x10 + bit LQOPHACHGOUTPKT 0x02 /* outside of packet boundaries. */ + bit LQOSTOP0 0x01 /* Stopped after sending all packets */ +} + +/* + * Output Synchronizer Space Count + */ +register OS_SPACE_CNT { + address 0x056 + access_mode RO + modes M_CFG +} + +/* + * SCSI Interrupt Mode 1 + * Setting any bit will enable the corresponding function + * in SIMODE1 to interrupt via the IRQ pin. + */ +register SIMODE1 { + address 0x057 + access_mode RW + modes M_DFF0, M_DFF1, M_SCSI + bit ENSELTIMO 0x80 + bit ENATNTARG 0x40 + bit ENSCSIRST 0x20 + bit ENPHASEMIS 0x10 + bit ENBUSFREE 0x08 + bit ENSCSIPERR 0x04 + bit ENSTRB2FAST 0x02 + bit ENREQINIT 0x01 +} + +/* + * Good Status FIFO + */ +register GSFIFO { + address 0x058 + access_mode RO + size 2 + modes M_DFF0, M_DFF1, M_SCSI +} + +/* + * Data FIFO SCSI Transfer Control + */ +register DFFSXFRCTL { + address 0x05A + access_mode RW + modes M_DFF0, M_DFF1 + bit CLRSHCNT 0x04 + bit CLRCHN 0x02 + bit RSTCHN 0x01 +} + +/* + * Next SCSI Control Block + */ +register NEXTSCB { + address 0x05A + access_mode RW + size 2 + modes M_SCSI +} + +/* + * SEQ Interrupts + */ +register SEQINTSRC { + address 0x05B + access_mode RO + modes M_DFF0, M_DFF1 + bit CTXTDONE 0x40 + bit SAVEPTRS 0x20 + bit CFG4DATA 0x10 + bit CFG4ISTAT 0x08 + bit CFG4TSTAT 0x04 + bit CFG4ICMD 0x02 + bit CFG4TCMD 0x01 +} + +/* + * Clear Arp Interrupts + */ +register CLRSEQINTSRC { + address 0x05B + access_mode WO + modes M_DFF0, M_DFF1 + bit CLRCTXTDONE 0x40 + bit CLRSAVEPTRS 0x20 + bit CLRCFG4DATA 0x10 + bit CLRCFG4ISTAT 0x08 + bit CLRCFG4TSTAT 0x04 + bit CLRCFG4ICMD 0x02 + bit CLRCFG4TCMD 0x01 +} + +/* + * SEQ Interrupt Enabled (Shared) + */ +register SEQIMODE { + address 0x05C + access_mode RW + modes M_DFF0, M_DFF1 + bit ENCTXTDONE 0x40 + bit ENSAVEPTRS 0x20 + bit ENCFG4DATA 0x10 + bit ENCFG4ISTAT 0x08 + bit ENCFG4TSTAT 0x04 + bit ENCFG4ICMD 0x02 + bit ENCFG4TCMD 0x01 +} + +/* + * Current SCSI Control Block + */ +register CURRSCB { + address 0x05C + access_mode RW + size 2 + modes M_SCSI +} + +/* + * Data FIFO Status + */ +register MDFFSTAT { + address 0x05D + access_mode RO + modes M_DFF0, M_DFF1 + bit LASTSDONE 0x10 + bit SHVALID 0x08 + bit DLZERO 0x04 /* FIFO data ends on packet boundary. */ + bit DATAINFIFO 0x02 + bit FIFOFREE 0x01 +} + +/* + * CRC Control + */ +register CRCCONTROL { + address 0x05d + access_mode RW + modes M_CFG + bit CRCVALCHKEN 0x40 +} + +/* + * SCSI Test Control + */ +register SCSITEST { + address 0x05E + access_mode RW + modes M_CFG + bit CNTRTEST 0x08 + bit SEL_TXPLL_DEBUG 0x04 +} + +/* + * Data FIFO Queue Tag + */ +register DFFTAG { + address 0x05E + access_mode RW + size 2 + modes M_DFF0, M_DFF1 +} + +/* + * Last SCSI Control Block + */ +register LASTSCB { + address 0x05E + access_mode RW + size 2 + modes M_SCSI +} + +/* + * SCSI I/O Cell Power-down Control + */ +register IOPDNCTL { + address 0x05F + access_mode RW + modes M_CFG + bit DISABLE_OE 0x80 + bit PDN_IDIST 0x04 + bit PDN_DIFFSENSE 0x01 +} + +/* + * Shaddow Host Address. + */ +register SHADDR { + address 0x060 + access_mode RO + size 8 + modes M_DFF0, M_DFF1 +} + +/* + * Data Group CRC Interval. + */ +register DGRPCRCI { + address 0x060 + access_mode RW + size 2 + modes M_CFG +} + +/* + * Data Transfer Negotiation Address + */ +register NEGOADDR { + address 0x060 + access_mode RW + modes M_SCSI +} + +/* + * Data Transfer Negotiation Data - Period Byte + */ +register NEGPERIOD { + address 0x061 + access_mode RW + modes M_SCSI +} + +/* + * Packetized CRC Interval + */ +register PACKCRCI { + address 0x062 + access_mode RW + size 2 + modes M_CFG +} + +/* + * Data Transfer Negotiation Data - Offset Byte + */ +register NEGOFFSET { + address 0x062 + access_mode RW + modes M_SCSI +} + +/* + * Data Transfer Negotiation Data - PPR Options + */ +register NEGPPROPTS { + address 0x063 + access_mode RW + modes M_SCSI + bit PPROPT_PACE 0x08 + bit PPROPT_QAS 0x04 + bit PPROPT_DT 0x02 + bit PPROPT_IUT 0x01 +} + +/* + * Data Transfer Negotiation Data - Connection Options + */ +register NEGCONOPTS { + address 0x064 + access_mode RW + modes M_SCSI + bit ENAIP 0x08 + bit ENAUTOATNI 0x04 + bit ENAUTOATNO 0x02 + bit WIDEXFER 0x01 +} + +/* + * Negotiation Table Annex Column Index. + */ +register ANNEXCOL { + address 0x065 + access_mode RW + modes M_SCSI +} + +const AHD_ANNEXCOL_PRECOMP 4 +const AHD_PRECOMP_MASK 0x07 +const AHD_PRECOMP_CUTBACK_17 0x04 +const AHD_PRECOMP_CUTBACK_29 0x06 +const AHD_PRECOMP_CUTBACK_37 0x07 +const AHD_PRECOMP_FASTSLEW 0x40 +const AHD_NUM_ANNEXCOLS 4 + +/* + * Negotiation Table Annex Data Port. + */ +register ANNEXDAT { + address 0x066 + access_mode RW + modes M_SCSI +} + +/* + * Initiator's Own Id. + * The SCSI ID to use for Selection Out and seen during a reselection.. + */ +register IOWNID { + address 0x067 + access_mode RW + modes M_SCSI +} + +/* + * 960MHz Phase-Locked Loop Control 0 + */ +register PLL960CTL0 { + address 0x068 + access_mode RW + modes M_CFG + bit PLL_VCOSEL 0x80 + bit PLL_PWDN 0x40 + mask PLL_NS 0x30 + bit PLL_ENLUD 0x08 + bit PLL_ENLPF 0x04 + bit PLL_DLPF 0x02 + bit PLL_ENFBM 0x01 +} + +/* + * Target Own Id + */ +register TOWNID { + address 0x069 + access_mode RW + modes M_SCSI +} + +/* + * 960MHz Phase-Locked Loop Control 1 + */ +register PLL960CTL1 { + address 0x069 + access_mode RW + modes M_CFG + bit PLL_CNTEN 0x80 + bit PLL_CNTCLR 0x40 + bit PLL_RST 0x01 +} + +/* + * Expander Signature + */ +register XSIG { + address 0x06A + access_mode RW + modes M_SCSI +} + +/* + * Shadow Byte Count + */ +register SHCNT { + address 0x068 + access_mode RW + size 3 + modes M_DFF0, M_DFF1 +} + +/* + * Selection Out ID + */ +register SELOID { + address 0x06B + access_mode RW + modes M_SCSI +} + +/* + * 960-MHz Phase-Locked Loop Test Count + */ +register PLL960CNT0 { + address 0x06A + access_mode RO + size 2 + modes M_CFG +} + +/* + * 400-MHz Phase-Locked Loop Control 0 + */ +register PLL400CTL0 { + address 0x06C + access_mode RW + modes M_CFG + bit PLL_VCOSEL 0x80 + bit PLL_PWDN 0x40 + mask PLL_NS 0x30 + bit PLL_ENLUD 0x08 + bit PLL_ENLPF 0x04 + bit PLL_DLPF 0x02 + bit PLL_ENFBM 0x01 +} + +/* + * Arbitration Fairness + */ +register FAIRNESS { + address 0x06C + access_mode RW + size 2 + modes M_SCSI +} + +/* + * 400-MHz Phase-Locked Loop Control 1 + */ +register PLL400CTL1 { + address 0x06D + access_mode RW + modes M_CFG + bit PLL_CNTEN 0x80 + bit PLL_CNTCLR 0x40 + bit PLL_RST 0x01 +} + +/* + * Arbitration Unfairness + */ +register UNFAIRNESS { + address 0x06E + access_mode RW + size 2 + modes M_SCSI +} + +/* + * 400-MHz Phase-Locked Loop Test Count + */ +register PLL400CNT0 { + address 0x06E + access_mode RO + size 2 + modes M_CFG +} + +/* + * SCB Page Pointer + */ +register SCBPTR { + address 0x0A8 + access_mode RW + size 2 + modes M_DFF0, M_DFF1, M_CCHAN, M_SCSI +} + +/* + * CMC SCB Array Count + * Number of bytes to transfer between CMC SCB memory and SCBRAM. + * Transfers must be 8byte aligned and sized. + */ +register CCSCBACNT { + address 0x0AB + access_mode RW + modes M_CCHAN +} + +/* + * SCB Autopointer + * SCB-Next Address Snooping logic. When an SCB is transferred to + * the card, the next SCB address to be used by the CMC array can + * be autoloaded from that transfer. + */ +register SCBAUTOPTR { + address 0x0AB + access_mode RW + modes M_CFG + bit AUSCBPTR_EN 0x80 + mask SCBPTR_ADDR 0x38 + mask SCBPTR_OFF 0x07 +} + +/* + * CMC SG Ram Address Pointer + */ +register CCSGADDR { + address 0x0AC + access_mode RW + modes M_DFF0, M_DFF1 +} + +/* + * CMC SCB RAM Address Pointer + */ +register CCSCBADDR { + address 0x0AC + access_mode RW + modes M_CCHAN +} + +/* + * CMC SCB Ram Back-up Address Pointer + * Indicates the true stop location of transfers halted prior + * to SCBHCNT going to 0. + */ +register CCSCBADR_BK { + address 0x0AC + access_mode RO + modes M_CFG +} + +/* + * CMC SG Control + */ +register CCSGCTL { + address 0x0AD + access_mode RW + modes M_DFF0, M_DFF1 + bit CCSGDONE 0x80 + bit SG_CACHE_AVAIL 0x10 + bit CCSGEN 0x08 + bit SG_FETCH_REQ 0x02 + bit CCSGRESET 0x01 +} + +/* + * CMD SCB Control + */ +register CCSCBCTL { + address 0x0AD + access_mode RW + modes M_CCHAN + bit CCSCBDONE 0x80 + bit ARRDONE 0x40 + bit CCARREN 0x10 + bit CCSCBEN 0x08 + bit CCSCBDIR 0x04 + bit CCSCBRESET 0x01 +} + +/* + * CMC Ram BIST + */ +register CMC_RAMBIST { + address 0x0AD + access_mode RW + modes M_CFG + bit SG_ELEMENT_SIZE 0x80 + bit SCBRAMBIST_FAIL 0x40 + bit SG_BIST_FAIL 0x20 + bit SG_BIST_EN 0x10 + bit CMC_BUFFER_BIST_FAIL 0x02 + bit CMC_BUFFER_BIST_EN 0x01 +} + +/* + * CMC SG RAM Data Port + */ +register CCSGRAM { + address 0x0B0 + access_mode RW + modes M_DFF0, M_DFF1 +} + +/* + * CMC SCB RAM Data Port + */ +register CCSCBRAM { + address 0x0B0 + access_mode RW + modes M_CCHAN +} + +/* + * Flex DMA Address. + */ +register FLEXADR { + address 0x0B0 + access_mode RW + size 3 + modes M_SCSI +} + +/* + * Flex DMA Byte Count + */ +register FLEXCNT { + address 0x0B3 + access_mode RW + size 2 + modes M_SCSI +} + +/* + * Flex DMA Status + */ +register FLEXDMASTAT { + address 0x0B5 + access_mode RW + modes M_SCSI + bit FLEXDMAERR 0x02 + bit FLEXDMADONE 0x01 +} + +/* + * Flex DMA Data Port + */ +register FLEXDATA { + address 0x0B6 + access_mode RW + modes M_SCSI +} + +/* + * Board Data + */ +register BRDDAT { + address 0x0B8 + access_mode RW + modes M_SCSI +} + +/* + * Board Control + */ +register BRDCTL { + address 0x0B9 + access_mode RW + modes M_SCSI + bit FLXARBACK 0x80 + bit FLXARBREQ 0x40 + mask BRDADDR 0x38 + bit BRDEN 0x04 + bit BRDRW 0x02 + bit BRDSTB 0x01 +} + +/* + * Serial EEPROM Address + */ +register SEEADR { + address 0x0BA + access_mode RW + modes M_SCSI +} + +/* + * Serial EEPROM Data + */ +register SEEDAT { + address 0x0BC + access_mode RW + size 2 + modes M_SCSI +} + +/* + * Serial EEPROM Status + */ +register SEESTAT { + address 0x0BE + access_mode RO + modes M_SCSI + bit INIT_DONE 0x80 + mask SEEOPCODE 0x70 + bit LDALTID_L 0x08 + bit SEEARBACK 0x04 + bit SEEBUSY 0x02 + bit SEESTART 0x01 +} + +/* + * Serial EEPROM Control + */ +register SEECTL { + address 0x0BE + access_mode RW + modes M_SCSI + mask SEEOPCODE 0x70 + mask SEEOP_ERASE 0x70 + mask SEEOP_READ 0x60 + mask SEEOP_WRITE 0x50 + /* + * The following four commands use special + * addresses for differentiation. + */ + mask SEEOP_ERAL 0x40 + mask SEEOP_EWEN 0x40 + mask SEEOP_WALL 0x40 + mask SEEOP_EWDS 0x40 + bit SEERST 0x02 + bit SEESTART 0x01 +} + +const SEEOP_ERAL_ADDR 0x80 +const SEEOP_EWEN_ADDR 0xC0 +const SEEOP_WRAL_ADDR 0x40 +const SEEOP_EWDS_ADDR 0x00 + +/* + * SCB Counter + */ +register SCBCNT { + address 0x0BF + access_mode RW + modes M_SCSI +} + +/* + * Data FIFO Write Address + * Pointer to the next QWD location to be written to the data FIFO. + */ +register DFWADDR { + address 0x0C0 + access_mode RW + size 2 + modes M_DFF0, M_DFF1 +} + +/* + * DSP Filter Control + */ +register DSPFLTRCTL { + address 0x0C0 + access_mode RW + modes M_CFG + bit FLTRDISABLE 0x20 + bit EDGESENSE 0x10 + mask DSPFCNTSEL 0x0F +} + +/* + * DSP Data Channel Control + */ +register DSPDATACTL { + address 0x0C1 + access_mode RW + modes M_CFG + bit BYPASSENAB 0x80 + bit DESQDIS 0x10 + bit RCVROFFSTDIS 0x04 + bit XMITOFFSTDIS 0x02 +} + +/* + * Data FIFO Read Address + * Pointer to the next QWD location to be read from the data FIFO. + */ +register DFRADDR { + address 0x0C2 + access_mode RW + size 2 + modes M_DFF0, M_DFF1 +} + +/* + * DSP REQ Control + */ +register DSPREQCTL { + address 0x0C2 + access_mode RW + modes M_CFG + mask MANREQCTL 0xC0 + mask MANREQDLY 0x3F +} + +/* + * DSP ACK Control + */ +register DSPACKCTL { + address 0x0C3 + access_mode RW + modes M_CFG + mask MANACKCTL 0xC0 + mask MANACKDLY 0x3F +} + +/* + * Data FIFO Data + * Read/Write byte port into the data FIFO. The read and write + * FIFO pointers increment with each read and write respectively + * to this port. + */ +register DFDAT { + address 0x0C4 + access_mode RW + modes M_DFF0, M_DFF1 +} + +/* + * DSP Channel Select + */ +register DSPSELECT { + address 0x0C4 + access_mode RW + modes M_CFG + bit AUTOINCEN 0x80 + mask DSPSEL 0x1F +} + +const NUMDSPS 0x14 + +/* + * Write Bias Control + */ +register WRTBIASCTL { + address 0x0C5 + access_mode WO + modes M_CFG + bit AUTOXBCDIS 0x80 + mask XMITMANVAL 0x3F +} + +const WRTBIASCTL_CPQ_DEFAULT 0x97 + +/* + * Receiver Bias Control + */ +register RCVRBIOSCTL { + address 0x0C6 + access_mode WO + modes M_CFG + bit AUTORBCDIS 0x80 + mask RCVRMANVAL 0x3F +} + +/* + * Write Bias Calculator + */ +register WRTBIASCALC { + address 0x0C7 + access_mode RO + modes M_CFG +} + +/* + * Data FIFO Pointers + * Contains the byte offset from DFWADDR and DWRADDR to the current + * FIFO write/read locations. + */ +register DFPTRS { + address 0x0C8 + access_mode RW + modes M_DFF0, M_DFF1 +} + +/* + * Receiver Bias Calculator + */ +register RCVRBIASCALC { + address 0x0C8 + access_mode RO + modes M_CFG +} + +/* + * Data FIFO Debug Control + */ +register DFDBCTL { + address 0x0C8 + access_mode RW + modes M_DFF0, M_DFF1 + bit DFF_CIO_WR_RDY 0x20 + bit DFF_CIO_RD_RDY 0x10 + bit DFF_DIR_ERR 0x08 + bit DFF_RAMBIST_FAIL 0x04 + bit DFF_RAMBIST_DONE 0x02 + bit DFF_RAMBIST_EN 0x01 +} + +/* + * Data FIFO Backup Read Pointer + * Contains the data FIFO address to be restored if the last + * data accessed from the data FIFO was not transferred successfully. + */ +register DFBKPTR { + address 0x0C9 + access_mode RW + size 2 + modes M_DFF0, M_DFF1 +} + +/* + * Skew Calculator + */ +register SKEWCALC { + address 0x0C9 + access_mode RO + modes M_CFG +} + +/* + * Data FIFO Space Count + * Number of FIFO locations that are free. + */ +register DFSCNT { + address 0x0CC + access_mode RO + size 2 + modes M_DFF0, M_DFF1 +} + +/* + * Data FIFO Byte Count + * Number of filled FIFO locations. + */ +register DFBCNT { + address 0x0CE + access_mode RO + size 2 + modes M_DFF0, M_DFF1 +} + +/* + * Sequencer Program Overlay Address. + * Low address must be written prior to high address. + */ +register OVLYADDR { + address 0x0D4 + modes M_SCSI + size 2 + access_mode RW +} + +/* + * Sequencer Control 0 + * Error detection mode, speed configuration, + * single step, breakpoints and program load. + */ +register SEQCTL0 { + address 0x0D6 + access_mode RW + bit PERRORDIS 0x80 + bit PAUSEDIS 0x40 + bit FAILDIS 0x20 + bit FASTMODE 0x10 + bit BRKADRINTEN 0x08 + bit STEP 0x04 + bit SEQRESET 0x02 + bit LOADRAM 0x01 +} + +/* + * Sequencer Control 1 + * Instruction RAM Diagnostics + */ +register SEQCTL1 { + address 0x0D7 + access_mode RW + bit OVRLAY_DATA_CHK 0x08 + bit RAMBIST_DONE 0x04 + bit RAMBIST_FAIL 0x02 + bit RAMBIST_EN 0x01 +} + +/* + * Sequencer Flags + * Zero and Carry state of the ALU. + */ +register FLAGS { + address 0x0D8 + access_mode RO + bit ZERO 0x02 + bit CARRY 0x01 +} + +/* + * Sequencer Interrupt Control + */ +register SEQINTCTL { + address 0x0D9 + access_mode RW + bit INTVEC1DSL 0x80 + bit INT1_CONTEXT 0x20 + bit SCS_SEQ_INT1M1 0x10 + bit SCS_SEQ_INT1M0 0x08 + mask INTMASK 0x06 + bit IRET 0x01 +} + +/* + * Sequencer RAM Data Port + * Single byte window into the Sequencer Instruction Ram area starting + * at the address specified by OVLYADDR. To write a full instruction word, + * simply write four bytes in succession. OVLYADDR will increment after the + * most significant instrution byte (the byte with the parity bit) is written. + */ +register SEQRAM { + address 0x0DA + access_mode RW +} + +/* + * Sequencer Program Counter + * Low byte must be written prior to high byte. + */ +register PRGMCNT { + address 0x0DE + access_mode RW + size 2 +} + +/* + * Accumulator + */ +register ACCUM { + address 0x0E0 + access_mode RW + accumulator +} + +/* + * Source Index Register + * Incrementing index for reads of SINDIR and the destination (low byte only) + * for any immediate operands passed in jmp, jc, jnc, call instructions. + * Example: + * mvi 0xFF call some_routine; + * + * Will set SINDEX[0] to 0xFF and call the routine "some_routine. + */ +register SINDEX { + address 0x0E2 + access_mode RW + size 2 + sindex +} + +/* + * Destination Index Register + * Incrementing index for writes to DINDIR. Can be used as a scratch register. + */ +register DINDEX { + address 0x0E4 + access_mode RW + size 2 +} + +/* + * Break Address + * Sequencer instruction breakpoint address address. + */ +register BRKADDR0 { + address 0x0E6 + access_mode RW +} + +register BRKADDR1 { + address 0x0E6 + access_mode RW + bit BRKDIS 0x80 /* Disable Breakpoint */ +} + +/* + * All Ones + * All reads to this register return the value 0xFF. + */ +register ALLONES { + address 0x0E8 + access_mode RO + allones +} + +/* + * All Zeros + * All reads to this register return the value 0. + */ +register ALLZEROS { + address 0x0EA + access_mode RO + allzeros +} + +/* + * No Destination + * Writes to this register have no effect. + */ +register NONE { + address 0x0EA + access_mode WO + none +} + +/* + * Source Index Indirect + * Reading this register is equivalent to reading (register_base + SINDEX) and + * incrementing SINDEX by 1. + */ +register SINDIR { + address 0x0EC + access_mode RO +} + +/* + * Destination Index Indirect + * Writing this register is equivalent to writing to (register_base + DINDEX) + * and incrementing DINDEX by 1. + */ +register DINDIR { + address 0x0ED + access_mode WO +} + +/* + * Function One + * 2's complement to bit value conversion. Write the 2's complement value + * (0-7 only) to the top nibble and retrieve the bit indexed by that value + * on the next read of this register. + * Example: + * Write 0x60 + * Read 0x40 + */ +register FUNCTION1 { + address 0x0F0 + access_mode RW +} + +/* + * Stack + * Window into the stack. Each stack location is 10 bits wide reported + * low byte followed by high byte. There are 8 stack locations. + */ +register STACK { + address 0x0F2 + access_mode RW +} + +/* + * Interrupt Vector 1 Address + * Interrupt branch address for SCS SEQ_INT1 mode 0 and 1 interrupts. + */ +register INTVEC1_ADDR { + address 0x0F4 + access_mode RW + size 2 + modes M_CFG +} + +/* + * Current Address + * Address of the SEQRAM instruction currently executing instruction. + */ +register CURADDR { + address 0x0F4 + access_mode RW + size 2 + modes M_SCSI +} + +/* + * Interrupt Vector 2 Address + * Interrupt branch address for HST_SEQ_INT2 interrupts. + */ +register INTVEC2_ADDR { + address 0x0F6 + access_mode RW + size 2 + modes M_CFG +} + +/* + * Last Address + * Address of the SEQRAM instruction executed prior to the current instruction. + */ +register LASTADDR { + address 0x0F6 + access_mode RW + size 2 + modes M_SCSI +} + +register AHD_PCI_CONFIG_BASE { + address 0x100 + access_mode RW + size 256 + modes M_CFG +} + +/* ---------------------- Scratch RAM Offsets ------------------------- */ +scratch_ram { + /* Mode Specific */ + address 0x0A0 + size 8 + modes 0, 1, 2, 3 + REG0 { + size 2 + } + REG1 { + size 2 + } + REG2 { + size 2 + } + SG_STATE { + size 1 + bit SEGS_AVAIL 0x01 + bit LOADING_NEEDED 0x02 + bit FETCH_INPROG 0x04 + } + /* + * Track whether the transfer byte count for + * the current data phase is odd. + */ + DATA_COUNT_ODD { + size 1 + } +} + +scratch_ram { + /* Mode Specific */ + address 0x0F8 + size 8 + modes 0, 1, 2, 3 + LONGJMP_ADDR { + size 2 + } + LONGJMP_SCB { + size 2 + } + ACCUM_SAVE { + size 1 + } +} + + +scratch_ram { + address 0x100 + size 128 + modes 0, 1, 2, 3 + /* + * Per "other-id" execution queues. We use an array of + * tail pointers into lists of SCBs sorted by "other-id". + * The execution head pointer threads the head SCBs for + * each list. + */ + WAITING_SCB_TAILS { + size 32 + } + WAITING_TID_HEAD { + size 2 + } + WAITING_TID_TAIL { + size 2 + } + /* + * SCBID of the next SCB in the new SCB queue. + */ + NEXT_QUEUED_SCB_ADDR { + size 4 + } + /* + * head of list of SCBs that have + * completed but have not been + * put into the qoutfifo. + */ + COMPLETE_SCB_HEAD { + size 2 + } + /* + * The list of completed SCBs in + * the active DMA. + */ + COMPLETE_SCB_DMAINPROG_HEAD { + size 2 + } + /* + * head of list of SCBs that have + * completed but need to be uploaded + * to the host prior to being completed. + */ + COMPLETE_DMA_SCB_HEAD { + size 2 + } + /* Counting semaphore to prevent new select-outs */ + QFREEZE_COUNT { + size 2 + } + /* + * Mode to restore on idle_loop exit. + */ + SAVED_MODE { + size 1 + } + /* + * Single byte buffer used to designate the type or message + * to send to a target. + */ + MSG_OUT { + size 1 + } + /* Parameters for DMA Logic */ + DMAPARAMS { + size 1 + bit PRELOADEN 0x80 + bit WIDEODD 0x40 + bit SCSIEN 0x20 + bit SDMAEN 0x10 + bit SDMAENACK 0x10 + bit HDMAEN 0x08 + bit HDMAENACK 0x08 + bit DIRECTION 0x04 /* Set indicates PCI->SCSI */ + bit FIFOFLUSH 0x02 + bit FIFORESET 0x01 + } + SEQ_FLAGS { + size 1 + bit NOT_IDENTIFIED 0x80 + bit TARGET_CMD_IS_TAGGED 0x40 + bit NO_CDB_SENT 0x40 + bit DPHASE 0x20 + /* Target flags */ + bit TARG_CMD_PENDING 0x10 + bit CMDPHASE_PENDING 0x08 + bit DPHASE_PENDING 0x04 + bit SPHASE_PENDING 0x02 + bit NO_DISCONNECT 0x01 + } + /* + * Temporary storage for the + * target/channel/lun of a + * reconnecting target + */ + SAVED_SCSIID { + size 1 + } + SAVED_LUN { + size 1 + } + /* + * The last bus phase as seen by the sequencer. + */ + LASTPHASE { + size 1 + bit CDI 0x80 + bit IOI 0x40 + bit MSGI 0x20 + mask PHASE_MASK CDI|IOI|MSGI + mask P_DATAOUT 0x00 + mask P_DATAIN IOI + mask P_DATAOUT_DT P_DATAOUT|MSGO + mask P_DATAIN_DT P_DATAIN|MSGO + mask P_COMMAND CDI + mask P_MESGOUT CDI|MSGI + mask P_STATUS CDI|IOI + mask P_MESGIN CDI|IOI|MSGI + mask P_BUSFREE 0x01 + } + /* + * Base address of our shared data with the kernel driver in host + * memory. This includes the qoutfifo and target mode + * incoming command queue. + */ + SHARED_DATA_ADDR { + size 4 + } + /* + * Pointer to location in host memory for next + * position in the qoutfifo. + */ + QOUTFIFO_NEXT_ADDR { + size 4 + } + /* + * Kernel and sequencer offsets into the queue of + * incoming target mode command descriptors. The + * queue is full when the KERNEL_TQINPOS == TQINPOS. + */ + KERNEL_TQINPOS { + size 1 + } + TQINPOS { + size 1 + } + ARG_1 { + size 1 + mask SEND_MSG 0x80 + mask SEND_SENSE 0x40 + mask SEND_REJ 0x20 + mask MSGOUT_PHASEMIS 0x10 + mask EXIT_MSG_LOOP 0x08 + mask CONT_MSG_LOOP_WRITE 0x04 + mask CONT_MSG_LOOP_READ 0x03 + mask CONT_MSG_LOOP_TARG 0x02 + alias RETURN_1 + } + ARG_2 { + size 1 + alias RETURN_2 + } + + /* + * Snapshot of MSG_OUT taken after each message is sent. + */ + LAST_MSG { + size 1 + } + + /* + * Sequences the kernel driver has okayed for us. This allows + * the driver to do things like prevent initiator or target + * operations. + */ + SCSISEQ_TEMPLATE { + size 1 + bit MANUALCTL 0x40 + bit ENSELI 0x20 + bit ENRSELI 0x10 + mask MANUALP 0x0C + bit ENAUTOATNP 0x02 + bit ALTSTIM 0x01 + } + + /* + * The initiator specified tag for this target mode transaction. + */ + INITIATOR_TAG { + size 1 + } + + SEQ_FLAGS2 { + size 1 + bit SCB_DMA 0x01 + bit TARGET_MSG_PENDING 0x02 + bit SELECTOUT_QFROZEN 0x04 + } + /* + * Target-mode CDB type to CDB length table used + * in non-packetized operation. + */ + CMDSIZE_TABLE { + size 8 + } +} + +/************************* Hardware SCB Definition ****************************/ +scb { + address 0x180 + size 64 + modes 0, 1, 2, 3 + SCB_RESIDUAL_DATACNT { + size 4 + alias SCB_CDB_STORE + } + SCB_RESIDUAL_SGPTR { + size 4 + alias SCB_CDB_PTR + mask SG_ADDR_MASK 0xf8 /* In the last byte */ + bit SG_OVERRUN_RESID 0x02 /* In the first byte */ + bit SG_LIST_NULL 0x01 /* In the first byte */ + } + SCB_SCSI_STATUS { + size 1 + } + SCB_TARGET_PHASES { + size 1 + } + SCB_TARGET_DATA_DIR { + size 1 + } + SCB_TARGET_ITAG { + size 1 + } + SCB_SENSE_BUSADDR { + /* + * Only valid if CDB length is less than 13 bytes or + * we are using a CDB pointer. Otherwise contains + * the last 4 bytes of embedded cdb information. + */ + size 4 + alias SCB_NEXT_COMPLETE + } + SCB_CDB_LEN { + size 1 + bit SCB_CDB_LEN_PTR 0x80 /* CDB in host memory */ + } + SCB_TASK_MANAGEMENT { + size 1 + } + SCB_TAG { + size 2 + } + SCB_NEXT { + alias SCB_NEXT_SCB_BUSADDR + size 2 + } + SCB_NEXT2 { + size 2 + } + SCB_DATAPTR { + size 8 + } + SCB_DATACNT { + /* + * The last byte is really the high address bits for + * the data address. + */ + size 4 + bit SG_LAST_SEG 0x80 /* In the fourth byte */ + mask SG_HIGH_ADDR_BITS 0x7F /* In the fourth byte */ + } + SCB_SGPTR { + size 4 + bit SG_STATUS_VALID 0x04 /* In the first byte */ + bit SG_FULL_RESID 0x02 /* In the first byte */ + bit SG_LIST_NULL 0x01 /* In the first byte */ + } + SCB_CONTROL { + size 1 + bit TARGET_SCB 0x80 + bit DISCENB 0x40 + bit TAG_ENB 0x20 + bit MK_MESSAGE 0x10 + bit STATUS_RCVD 0x08 + bit DISCONNECTED 0x04 + mask SCB_TAG_TYPE 0x03 + } + SCB_SCSIID { + size 1 + mask TID 0xF0 + mask OID 0x0F + } + SCB_LUN { + size 1 + mask LID 0xff + } + SCB_TASK_ATTRIBUTE { + size 1 + alias SCB_NONPACKET_TAG + } + SCB_BUSADDR { + size 4 + } + SCB_DISCONNECTED_LISTS { + size 16 + } +} + +/*********************************** Constants ********************************/ +const SEQ_STACK_SIZE 8 +const MK_MESSAGE_BIT_OFFSET 4 +const TID_SHIFT 4 +const TARGET_CMD_CMPLT 0xfe +const INVALID_ADDR 0x80 +#define SCB_LIST_NULL 0xff + +const CCSGADDR_MAX 0x80 +const CCSCBADDR_MAX 0x80 +const CCSGRAM_MAXSEGS 16 + +/* Selection Timeout Timer Constants */ +const STIMESEL_SHIFT 3 +const STIMESEL_MIN 0x18 +const STIMESEL_BUG_ADJ 0x8 + +/* WDTR Message values */ +const BUS_8_BIT 0x00 +const BUS_16_BIT 0x01 +const BUS_32_BIT 0x02 + +/* Offset maximums */ +const MAX_OFFSET 0xfe +const MAX_OFFSET_PACED 0x7f +const HOST_MSG 0xff + +/* + * The size of our sense buffers. + * Sense buffer mapping can be handled in either of two ways. + * The first is to allocate a dmamap for each transaction. + * Depending on the architecture, dmamaps can be costly. The + * alternative is to statically map the buffers in much the same + * way we handle our scatter gather lists. The driver implements + * the later. + */ +const AHD_SENSE_BUFSIZE 256 + +/* Target mode command processing constants */ +const CMD_GROUP_CODE_SHIFT 0x05 + +const STATUS_BUSY 0x08 +const STATUS_QUEUE_FULL 0x28 +const STATUS_PKT_SENSE 0xFF +const TARGET_DATA_IN 1 + +const SCB_TRANSFER_SIZE 48 +/* PKT_OVERRUN_BUFSIZE must be a multiple of 256 less than 64K */ +const PKT_OVERRUN_BUFSIZE 512 + +/* + * Downloaded (kernel inserted) constants + */ +const SG_PREFETCH_CNT download +const SG_PREFETCH_CNT_LIMIT download +const SG_PREFETCH_ALIGN_MASK download +const SG_PREFETCH_ADDR_MASK download +const SG_SIZEOF download +const PKT_OVERRUN_BUFOFFSET download + +/* + * BIOS SCB offsets + */ +const NVRAM_SCB_OFFSET 0x2C diff --git a/sys/dev/aic7xxx/aic79xx.seq b/sys/dev/aic7xxx/aic79xx.seq new file mode 100644 index 0000000..0fb6575 --- /dev/null +++ b/sys/dev/aic7xxx/aic79xx.seq @@ -0,0 +1,1723 @@ +/* + * Adaptec 274x/284x/294x device driver firmware for Linux and FreeBSD. + * + * Copyright (c) 1994-2001 Justin T. Gibbs. + * Copyright (c) 2000-2001 Adaptec Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + * $FreeBSD$ + */ + +VERSION = "$Id: //depot/aic7xxx/aic7xxx/aic79xx.seq#42 $" +PATCH_ARG_LIST = "struct ahd_softc *ahd" + +#include "aic79xx.reg" +#include "scsi_message.h" + +idle_loop: + SET_MODE(M_SCSI, M_SCSI); + test SCSISEQ0, ENSELO|ENARBO jnz idle_loop_checkbus; + test SEQ_FLAGS2, SELECTOUT_QFROZEN jnz idle_loop_checkbus; + cmp WAITING_TID_HEAD[1], SCB_LIST_NULL je idle_loop_checkbus; + /* + * ENSELO is cleared by a SELDO, so we must test for SELDO + * one last time. + */ +BEGIN_CRITICAL; + test SSTAT0, SELDO jnz select_out; +END_CRITICAL; + call start_selection; +idle_loop_checkbus: +BEGIN_CRITICAL; + test SSTAT0, SELDO jnz select_out; +END_CRITICAL; + test SSTAT0, SELDI jnz select_in; + test SCSIPHASE, ~DATA_PHASE_MASK jz idle_loop_check_nonpackreq; + test SCSISIGO, ATNO jz idle_loop_check_nonpackreq; + call unexpected_nonpkt_phase_find_ctxt; +idle_loop_check_nonpackreq: + test SSTAT2, NONPACKREQ jz idle_loop_scsi; + call unexpected_nonpkt_phase_find_ctxt; +idle_loop_scsi: +BEGIN_CRITICAL; + test LQISTAT2, LQIGSAVAIL jz idle_loop_service_fifos; + /* + * We have received good status for this transaction. There may + * still be data in our FIFOs draining to the host. Setup + * monitoring of the draining process or complete the SCB. + */ +good_status_IU_done: + bmov SCBPTR, GSFIFO, 2; + clr SCB_SCSI_STATUS; + or SCB_CONTROL, STATUS_RCVD; + + /* + * Since this status did not consume a FIFO, we have to + * be a bit more dilligent in how we check for FIFOs pertaining + * to this transaction. There are three states that a FIFO still + * transferring data may be in. + * + * 1) Configured and draining to the host, with a pending CLRCHN. + * 2) Configured and draining to the host, no pending CLRCHN. + * 3) Pending cfg4data, fifo not empty. + * + * For case 1, we assume that our DMA post of the completed command + * will occur after the FIFO finishes draining due to the higher + * priority of data FIFO transfers relative to command channel + * transfers. + * + * Case 2 can be detected by noticing that a longjmp is active for the + * FIFO and LONGJMP_SCB matches our SCB. In this case, we allow + * the routine servicing the FIFO to complete the SCB. + * + * Case 3 implies either a pending or yet to occur save data + * pointers for this same context in the other FIFO. So, if + * we detect case 2, we will properly defer the post of the SCB + * and achieve the desired result. The pending cfg4data will + * notice that status has been received and complete the SCB. + */ + test SCB_SGPTR, SG_LIST_NULL jz good_status_check_fifos; + /* + * All segments have been loaded (or no data transfer), so + * it is safe to complete the command. Since this was a + * cheap command to check for completion, loop to see if + * more entries can be removed from the GSFIFO. + */ + call complete; +END_CRITICAL; + jmp idle_loop_scsi; +BEGIN_CRITICAL; +good_status_check_fifos: + clc; + bmov ARG_1, SCBPTR, 2; + SET_MODE(M_DFF0, M_DFF0); + call check_fifo; + jc idle_loop_service_fifos; + SET_MODE(M_DFF1, M_DFF1); + call check_fifo; + jc idle_loop_service_fifos; + SET_MODE(M_SCSI, M_SCSI); + call queue_scb_completion; +END_CRITICAL; +idle_loop_service_fifos: + SET_MODE(M_DFF0, M_DFF0); + test LONGJMP_ADDR[1], INVALID_ADDR jnz idle_loop_next_fifo; + call longjmp; +idle_loop_next_fifo: + SET_MODE(M_DFF1, M_DFF1); + test LONGJMP_ADDR[1], INVALID_ADDR jnz idle_loop_last_fifo_done; + call longjmp; +idle_loop_last_fifo_done: + call idle_loop_cchan; + jmp idle_loop; + +idle_loop_cchan: + SET_MODE(M_CCHAN, M_CCHAN); + test CCSCBCTL, CCARREN|CCSCBEN jz scbdma_idle; + test CCSCBCTL, CCSCBDIR jnz fetch_new_scb_inprog; + test CCSCBCTL, CCSCBDONE jz return; + /* FALLTHROUGH */ +scbdma_tohost_done: + test CCSCBCTL, CCARREN jz fill_qoutfifo_dmadone; + /* + * A complete SCB upload requires no intervention. + * The SCB is already on the COMPLETE_SCB list + * and its completion notification will now be + * handled just like any other SCB. + */ + and CCSCBCTL, ~(CCARREN|CCSCBEN) ret; +fill_qoutfifo_dmadone: + and CCSCBCTL, ~(CCARREN|CCSCBEN); + mvi INTSTAT, CMDCMPLT; + mvi COMPLETE_SCB_DMAINPROG_HEAD[1], SCB_LIST_NULL; + bmov QOUTFIFO_NEXT_ADDR, SCBHADDR, 4; + test QOFF_CTLSTA, SDSCB_ROLLOVR jz return; + bmov QOUTFIFO_NEXT_ADDR, SHARED_DATA_ADDR, 4 ret; + +fetch_new_scb_inprog: + test CCSCBCTL, ARRDONE jz return; +fetch_new_scb_done: + and CCSCBCTL, ~(CCARREN|CCSCBEN); + bmov REG0, SCBPTR, 2; + /* Update the next SCB address to download. */ + bmov NEXT_QUEUED_SCB_ADDR, SCB_NEXT_SCB_BUSADDR, 4; + mvi SCB_NEXT[1], SCB_LIST_NULL; + mvi SCB_NEXT2[1], SCB_LIST_NULL; + /* + * SCBs that want to send messages are always + * queued independently. This ensures that they + * are at the head of the SCB list to select out + * to a target and we will see the MK_MESSAGE flag. + */ + test SCB_CONTROL, MK_MESSAGE jnz first_new_target_scb; + shr SINDEX, 3, SCB_SCSIID; + and SINDEX, ~0x1; + mvi SINDEX[1], (WAITING_SCB_TAILS >> 8); + bmov DINDEX, SINDEX, 2; + bmov SCBPTR, SINDIR, 2; + bmov DINDIR, REG0, 2; + cmp SCBPTR[1], SCB_LIST_NULL je first_new_target_scb; + bmov SCB_NEXT, REG0, 2; +fetch_new_scb_fini: + /* Increment our position in the QINFIFO. */ + mov NONE, SNSCB_QOFF ret; +first_new_target_scb: + cmp WAITING_TID_HEAD[1], SCB_LIST_NULL je first_new_scb; + bmov SCBPTR, WAITING_TID_TAIL, 2; + bmov SCB_NEXT2, REG0, 2; + bmov WAITING_TID_TAIL, REG0, 2; + /* Increment our position in the QINFIFO. */ + mov NONE, SNSCB_QOFF ret; +first_new_scb: + bmov WAITING_TID_HEAD, REG0, 2; + bmov WAITING_TID_TAIL, REG0, 2; + /* Increment our position in the QINFIFO. */ + mov NONE, SNSCB_QOFF ret; + +scbdma_idle: + /* + * Give precedence to downloading new SCBs to execute + * unless select-outs are currently frozen. + * XXX Use a timer to prevent completion starvation. + */ + test SEQ_FLAGS2, SELECTOUT_QFROZEN jnz . + 2; +BEGIN_CRITICAL; + test QOFF_CTLSTA, NEW_SCB_AVAIL jnz fetch_new_scb; + cmp COMPLETE_DMA_SCB_HEAD[1], SCB_LIST_NULL jne dma_complete_scb; + cmp COMPLETE_SCB_HEAD[1], SCB_LIST_NULL je return; + /* FALLTHROUGH */ +fill_qoutfifo: + /* + * Keep track of the SCBs we are dmaing just + * in case the DMA fails or is aborted. + */ + bmov COMPLETE_SCB_DMAINPROG_HEAD, COMPLETE_SCB_HEAD, 2; + mvi CCSCBCTL, CCSCBRESET; + bmov SCBHADDR, QOUTFIFO_NEXT_ADDR, 4; + bmov CCSCBRAM, COMPLETE_SCB_HEAD, 2; + bmov SCBPTR, COMPLETE_SCB_HEAD, 2; + jmp fill_qoutfifo_first_entry; +fill_qoutfifo_loop: + bmov CCSCBRAM, SCB_NEXT_COMPLETE, 2; + bmov SCBPTR, SCB_NEXT_COMPLETE, 2; +fill_qoutfifo_first_entry: + mov NONE, SDSCB_QOFF; + cmp SCB_NEXT_COMPLETE[1], SCB_LIST_NULL je fill_qoutfifo_done; + cmp CCSCBADDR, CCSCBADDR_MAX je fill_qoutfifo_done; + test QOFF_CTLSTA, SDSCB_ROLLOVR jz fill_qoutfifo_loop; +fill_qoutfifo_done: + mov SCBHCNT, CCSCBADDR; + mvi CCSCBCTL, CCSCBEN|CCSCBRESET; + bmov COMPLETE_SCB_HEAD, SCB_NEXT_COMPLETE, 2; + mvi SCB_NEXT_COMPLETE[1], SCB_LIST_NULL ret; + +fetch_new_scb: + bmov SCBHADDR, NEXT_QUEUED_SCB_ADDR, 4; + mvi CCARREN|CCSCBEN|CCSCBDIR|CCSCBRESET jmp dma_scb; +dma_complete_scb: + bmov SCBPTR, COMPLETE_DMA_SCB_HEAD, 2; + bmov SCBHADDR, SCB_BUSADDR, 4; + mvi CCARREN|CCSCBEN|CCSCBRESET call dma_scb; + /* + * Now that we've started the DMA, push us onto + * the normal completion queue to have our SCBID + * posted to the kernel. + */ + bmov COMPLETE_DMA_SCB_HEAD, SCB_NEXT_COMPLETE, 2; + bmov SCB_NEXT_COMPLETE, COMPLETE_SCB_HEAD, 2; + bmov COMPLETE_SCB_HEAD, SCBPTR, 2 ret; +END_CRITICAL; + +/* + * Either post or fetch an SCB from host memory. The caller + * is responsible for polling for transfer completion. + * + * Prerequisits: Mode == M_CCHAN + * SINDEX contains CCSCBCTL flags + * SCBHADDR set to Host SCB address + * SCBPTR set to SCB src location on "push" operations + */ +SET_SRC_MODE M_CCHAN; +SET_DST_MODE M_CCHAN; +dma_scb: + mvi SCBHCNT, SCB_TRANSFER_SIZE; + mov CCSCBCTL, SINDEX; + or SEQ_FLAGS2, SCB_DMA ret; + +BEGIN_CRITICAL; +setjmp_setscb: + bmov LONGJMP_SCB, SCBPTR, 2; +setjmp: + bmov LONGJMP_ADDR, STACK, 2 ret; +setjmp_inline: + bmov LONGJMP_ADDR, STACK, 2; +longjmp: + bmov STACK, LONGJMP_ADDR, 2 ret; +END_CRITICAL; + +/************************ Packetized LongJmp Routines *************************/ +/* + * Must disable interrupts when setting the mode pointer + * register as an interrupt occurring mid update will + * fail to store the new mode value for restoration on + * an iret. + */ +set_mode_work_around: + mvi SEQINTCTL, INTVEC1DSL; + mov MODE_PTR, SINDEX; + clr SEQINTCTL ret; + +SET_SRC_MODE M_SCSI; +SET_DST_MODE M_SCSI; +start_selection: +BEGIN_CRITICAL; + if ((ahd->bugs & AHD_SENT_SCB_UPDATE_BUG) != 0) { + /* + * Razor #494 + * Rev A hardware fails to update LAST/CURR/NEXTSCB + * correctly after a packetized selection in several + * situations: + * + * 1) If only one command existed in the the queue, the + * LAST/CURR/NEXTSCB are unchanged. + * + * 2) In a non QAS, protocol allowed phase change, + * the queue is shifted 1 too far. LASTSCB is + * the last SCB that was correctly processed. + * + * 3) In the QAS case, if the full list of commands + * was successfully sent, NEXTSCB is NULL and neither + * CURRSCB nor LASTSCB can be trusted. We must + * manually walk the list counting MAXCMDCNT elements + * to find the last SCB that was sent correctly. + * + * To simplify the workaround for this bug in SELDO + * handling, we initialize LASTSCB prior to enabling + * selection so we can rely on it even for case #1 above. + */ + bmov LASTSCB, WAITING_TID_HEAD, 2; + } + bmov CURRSCB, WAITING_TID_HEAD, 2; + bmov SCBPTR, WAITING_TID_HEAD, 2; + shr SELOID, 4, SCB_SCSIID; + /* + * If we want to send a message to the device, ensure + * we are selecting with atn irregardless of our packetized + * agreement. Since SPI4 only allows target reset or PPR + * messages if this is a packetized connection, the change + * to our negotiation table entry for this selection will + * be cleared when the message is acted on. + */ + test SCB_CONTROL, MK_MESSAGE jz . + 3; + mov NEGOADDR, SELOID; + or NEGCONOPTS, ENAUTOATNO; + or SCSISEQ0, ENSELO ret; +END_CRITICAL; + +/* + * Allocate a FIFO for a non-packetized transaction. + * For some reason unkown to me, both FIFOs must be free before we + * can allocate a FIFO for a non-packetized transaction. This + * may be fixed in Rev B. + */ +allocate_fifo_loop: + /* + * Do whatever work is required to free a FIFO. + */ + SET_MODE(M_DFF0, M_DFF0); + test LONGJMP_ADDR[1], INVALID_ADDR jnz . + 2; + call longjmp; + SET_MODE(M_DFF1, M_DFF1); + test LONGJMP_ADDR[1], INVALID_ADDR jnz . + 2; + call longjmp; + SET_MODE(M_SCSI, M_SCSI); +allocate_fifo: + and A, FIFO0FREE|FIFO1FREE, DFFSTAT; + cmp A, FIFO0FREE|FIFO1FREE jne allocate_fifo_loop; +take_fifo: + bmov ARG_1, SCBPTR, 2; + or DFFSTAT, CURRFIFO; + SET_MODE(M_DFF1, M_DFF1); + bmov SCBPTR, ARG_1, 2 ret; + +/* + * We have been reselected as an initiator + * or selected as a target. + */ +SET_SRC_MODE M_SCSI; +SET_DST_MODE M_SCSI; +select_in: + or SXFRCTL0, SPIOEN; + and SAVED_SCSIID, SELID_MASK, SELID; + and A, OID, IOWNID; + or SAVED_SCSIID, A; + mvi CLRSINT0, CLRSELDI; + jmp ITloop; + +/* + * We have successfully selected out. + * + * Clear SELDO. + * Dequeue all SCBs sent from the waiting queue + * Requeue all SCBs *not* sent to the tail of the waiting queue + * Take Razor #494 into account for above. + * + * In Packetized Mode: + * Return to the idle loop. Our interrupt handler will take + * care of any incoming L_Qs. + * + * In Non-Packetize Mode: + * Continue to our normal state machine. + */ +SET_SRC_MODE M_SCSI; +SET_DST_MODE M_SCSI; +select_out: +BEGIN_CRITICAL; + /* Clear out all SCBs that have been successfully sent. */ + if ((ahd->bugs & AHD_SENT_SCB_UPDATE_BUG) != 0) { + /* + * For packetized, the LQO manager clears ENSELO on + * the assertion of SELDO. If we are non-packetized, + * LASTSCB and CURRSCB are acuate. + */ + test SCSISEQ0, ENSELO jnz use_lastscb; + + /* + * The update is correct for LQOSTAT1 errors. All + * but LQOBUSFREE are handled by kernel interrupts. + * If we see LQOBUSFREE, return to the idle loop. + * Once we are out of the select_out critical section, + * the kernel will cleanup the LQOBUSFREE and we will + * eventually restart the selection if appropriate. + */ + test LQOSTAT1, LQOBUSFREE jnz idle_loop; + + /* + * On a phase change oustside of packet boundaries, + * LASTSCB points to the currently active SCB context + * on the bus. + */ + test LQOSTAT2, LQOPHACHGOUTPKT jnz use_lastscb; + + /* + * If the hardware has traversed the whole list, NEXTSCB + * will be NULL, CURRSCB and LASTSCB cannot be trusted, + * but MAXCMDCNT is accurate. If we stop part way through + * the list or only had one command to issue, NEXTSCB[1] is + * not NULL and LASTSCB is the last command to go out. + */ + cmp NEXTSCB[1], SCB_LIST_NULL jne use_lastscb; + + /* + * Brute force walk. + */ + bmov SCBPTR, WAITING_TID_HEAD, 2; + mvi SEQINTCTL, INTVEC1DSL; + mvi MODE_PTR, MK_MODE(M_CFG, M_CFG); + mov A, MAXCMDCNT; + mvi MODE_PTR, MK_MODE(M_SCSI, M_SCSI); + clr SEQINTCTL; +find_lastscb_loop: + dec A; + test A, 0xFF jz found_last_sent_scb; + bmov SCBPTR, SCB_NEXT, 2; + jmp find_lastscb_loop; +use_lastscb: + bmov SCBPTR, LASTSCB, 2; +found_last_sent_scb: + bmov CURRSCB, SCBPTR, 2; +curscb_ww_done: + } else { + /* + * Untested - Verify with Rev B. + */ + bmov SCBPTR, CURRSCB, 2; + } + /* + * Requeue any SCBs not sent, to the tail of the waiting Q. + */ + cmp SCB_NEXT[1], SCB_LIST_NULL je select_out_list_done; + + /* + * We know that neither the per-TID list nor the list of + * TIDs is empty. Use this knowledge to our advantage. + */ + bmov REG0, SCB_NEXT, 2; + bmov SCBPTR, WAITING_TID_TAIL, 2; + bmov SCB_NEXT2, REG0, 2; + bmov WAITING_TID_TAIL, REG0, 2; + jmp select_out_inc_tid_q; + +select_out_list_done: + /* + * The whole list made it. Just clear our TID's tail pointer + * unless we were queued independently due to our need to + * send a message. + */ + test SCB_CONTROL, MK_MESSAGE jnz select_out_inc_tid_q; + shr DINDEX, 3, SCB_SCSIID; +/* XXX When we switch to SCB_SELOID, put +1 in addition below. */ + or DINDEX, 1; /* Want only the second byte */ + mvi DINDEX[1], ((WAITING_SCB_TAILS) >> 8); + mvi DINDIR, SCB_LIST_NULL; +select_out_inc_tid_q: + bmov SCBPTR, WAITING_TID_HEAD, 2; + bmov WAITING_TID_HEAD, SCB_NEXT2, 2; + cmp WAITING_TID_HEAD[1], SCB_LIST_NULL jne . + 2; + mvi WAITING_TID_TAIL[1], SCB_LIST_NULL; + bmov SCBPTR, CURRSCB, 2; +END_CRITICAL; + + mvi CLRSINT0, CLRSELDO; + + test LQOSTAT2, LQOPHACHGOUTPKT jnz unexpected_nonpkt_phase; + test LQOSTAT1, LQOPHACHGINPKT jnz unexpected_nonpkt_phase; + + /* + * If this is a packetized connection, return to our + * idle_loop and let our interrupt handler deal with + * any connection setup/teardown issues. The only + * exception is the case of MK_MESSAGE SCBs. In the + * A, the LQO manager transitions to LQOSTOP0 even if + * we have selected out with ATN asserted and the target + * REQs in a non-packet phase. + */ + if ((ahd->bugs & AHD_LQO_ATNO_BUG) != 0) { + test SCB_CONTROL, MK_MESSAGE jz select_out_no_message; + test SCSISIGO, ATNO jnz select_out_non_packetized; +select_out_no_message: + } + test LQOSTAT2, LQOSTOP0 jnz idle_loop; + +select_out_non_packetized: + /* Non packetized request. */ + and SCSISEQ0, ~ENSELO; + mov SAVED_SCSIID, SCB_SCSIID; + mov SAVED_LUN, SCB_LUN; + or SXFRCTL0, SPIOEN; + + /* + * As soon as we get a successful selection, the target + * should go into the message out phase since we have ATN + * asserted. + */ + mvi MSG_OUT, MSG_IDENTIFYFLAG; + mvi SEQ_FLAGS, NO_CDB_SENT; + + /* + * Main loop for information transfer phases. Wait for the + * target to assert REQ before checking MSG, C/D and I/O for + * the bus phase. + */ +mesgin_phasemis: +ITloop: + call phase_lock; + + mov A, LASTPHASE; + + test A, ~P_DATAIN_DT jz p_data; + cmp A,P_COMMAND je p_command; + cmp A,P_MESGOUT je p_mesgout; + cmp A,P_STATUS je p_status; + cmp A,P_MESGIN je p_mesgin; + + mvi SEQINTCODE, BAD_PHASE; + jmp ITloop; /* Try reading the bus again. */ + +/* + * Command phase. Set up the DMA registers and let 'er rip. + */ +p_command: +SET_SRC_MODE M_DFF1; +SET_DST_MODE M_DFF1; + test MODE_PTR, ~(MK_MODE(M_DFF1, M_DFF1)) + jnz p_command_allocate_fifo; + /* + * Command retry. Free our current FIFO and + * re-allocate a FIFO so transfer state is + * reset. + */ + mvi DFFSXFRCTL, RSTCHN|CLRSHCNT; +p_command_allocate_fifo: + call allocate_fifo; + add NONE, -17, SCB_CDB_LEN; + jnc p_command_embedded; +p_command_from_host: + bmov HADDR[0], SCB_CDB_PTR, 11; + mvi SG_CACHE_PRE, LAST_SEG; + mvi DFCNTRL, (PRELOADEN|SCSIEN|HDMAEN); + jmp p_command_loop; +p_command_embedded: + bmov SHCNT[0], SCB_CDB_LEN, 1; + bmov SHCNT[1], ALLZEROS, 2; + bmov DFDAT, SCB_CDB_STORE, 16; + mvi DFCNTRL, SCSIEN; +p_command_loop: + test DFCNTRL, SCSIEN jnz p_command_loop; + /* + * DMA Channel automatically disabled. + * Don't allow a data phase if the command + * was not fully transferred. Make sure that + * we clear the IDENTIFY SEEN flag if a retry + * falls short too. + */ + and SEQ_FLAGS, ~NO_CDB_SENT; + test SSTAT2, SDONE jnz ITloop; + or SEQ_FLAGS, NO_CDB_SENT; + jmp ITloop; + + +/* + * Status phase. Wait for the data byte to appear, then read it + * and store it into the SCB. + */ +SET_SRC_MODE M_SCSI; +SET_DST_MODE M_SCSI; +p_status: + test SEQ_FLAGS,NOT_IDENTIFIED jz p_status_okay; + mvi SEQINTCODE, PROTO_VIOLATION; + jmp mesgin_done; +p_status_okay: + mov SCB_SCSI_STATUS, SCSIDAT; + or SCB_CONTROL, STATUS_RCVD; + jmp ITloop; + +/* + * Message out phase. If MSG_OUT is MSG_IDENTIFYFLAG, build a full + * indentify message sequence and send it to the target. The host may + * override this behavior by setting the MK_MESSAGE bit in the SCB + * control byte. This will cause us to interrupt the host and allow + * it to handle the message phase completely on its own. If the bit + * associated with this target is set, we will also interrupt the host, + * thereby allowing it to send a message on the next selection regardless + * of the transaction being sent. + * + * If MSG_OUT is == HOST_MSG, also interrupt the host and take a message. + * This is done to allow the host to send messages outside of an identify + * sequence while protecting the seqencer from testing the MK_MESSAGE bit + * on an SCB that might not be for the current nexus. (For example, a + * BDR message in responce to a bad reselection would leave us pointed to + * an SCB that doesn't have anything to do with the current target). + * + * Otherwise, treat MSG_OUT as a 1 byte message to send (abort, abort tag, + * bus device reset). + * + * When there are no messages to send, MSG_OUT should be set to MSG_NOOP, + * in case the target decides to put us in this phase for some strange + * reason. + */ +p_mesgout_retry: + /* Turn on ATN for the retry */ + mvi SCSISIGO, ATNO; +p_mesgout: + mov SINDEX, MSG_OUT; + cmp SINDEX, MSG_IDENTIFYFLAG jne p_mesgout_from_host; + test SCB_CONTROL,MK_MESSAGE jnz host_message_loop; +p_mesgout_identify: + or SINDEX, MSG_IDENTIFYFLAG|DISCENB, SCB_LUN; + test SCB_CONTROL, DISCENB jnz . + 2; + and SINDEX, ~DISCENB; +/* + * Send a tag message if TAG_ENB is set in the SCB control block. + * Use SCB_NONPACKET_TAG as the tag value. + */ +p_mesgout_tag: + test SCB_CONTROL,TAG_ENB jz p_mesgout_onebyte; + mov SCSIDAT, SINDEX; /* Send the identify message */ + call phase_lock; + cmp LASTPHASE, P_MESGOUT jne p_mesgout_done; + and SCSIDAT,TAG_ENB|SCB_TAG_TYPE,SCB_CONTROL; + call phase_lock; + cmp LASTPHASE, P_MESGOUT jne p_mesgout_done; + mov SCB_NONPACKET_TAG jmp p_mesgout_onebyte; +/* + * Interrupt the driver, and allow it to handle this message + * phase and any required retries. + */ +p_mesgout_from_host: + cmp SINDEX, HOST_MSG jne p_mesgout_onebyte; + jmp host_message_loop; + +p_mesgout_onebyte: + mvi CLRSINT1, CLRATNO; + mov SCSIDAT, SINDEX; + +/* + * If the next bus phase after ATN drops is message out, it means + * that the target is requesting that the last message(s) be resent. + */ + call phase_lock; + cmp LASTPHASE, P_MESGOUT je p_mesgout_retry; + +p_mesgout_done: + mvi CLRSINT1,CLRATNO; /* Be sure to turn ATNO off */ + mov LAST_MSG, MSG_OUT; + mvi MSG_OUT, MSG_NOOP; /* No message left */ + jmp ITloop; + +/* + * Message in phase. Bytes are read using Automatic PIO mode. + */ +p_mesgin: + /* read the 1st message byte */ + mvi ACCUM call inb_first; + + test A,MSG_IDENTIFYFLAG jnz mesgin_identify; + cmp A,MSG_DISCONNECT je mesgin_disconnect; + cmp A,MSG_SAVEDATAPOINTER je mesgin_sdptrs; + cmp ALLZEROS,A je mesgin_complete; + cmp A,MSG_RESTOREPOINTERS je mesgin_rdptrs; + cmp A,MSG_IGN_WIDE_RESIDUE je mesgin_ign_wide_residue; + cmp A,MSG_NOOP je mesgin_done; + +/* + * Pushed message loop to allow the kernel to + * run it's own message state engine. To avoid an + * extra nop instruction after signaling the kernel, + * we perform the phase_lock before checking to see + * if we should exit the loop and skip the phase_lock + * in the ITloop. Performing back to back phase_locks + * shouldn't hurt, but why do it twice... + */ +host_message_loop: + call phase_lock; /* Benign the first time through. */ + mvi SEQINTCODE, HOST_MSG_LOOP; + cmp RETURN_1, EXIT_MSG_LOOP je ITloop; + cmp RETURN_1, CONT_MSG_LOOP_WRITE jne . + 3; + mov SCSIDAT, RETURN_2; + jmp host_message_loop; + /* Must be CONT_MSG_LOOP_READ */ + mov NONE, SCSIDAT; /* ACK Byte */ + jmp host_message_loop; + +mesgin_ign_wide_residue: + shr NEGOADDR, 4, SAVED_SCSIID; + test NEGCONOPTS, WIDEXFER jz mesgin_reject; + /* Pull the residue byte */ + mvi REG0 call inb_next; + cmp REG0, 0x01 jne mesgin_reject; + test SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL jz . + 2; + test DATA_COUNT_ODD, 0x1 jz mesgin_done; + mvi SEQINTCODE, IGN_WIDE_RES; + jmp mesgin_done; + +mesgin_reject: + mvi MSG_MESSAGE_REJECT call mk_mesg; +mesgin_done: + mov NONE,SCSIDAT; /*dummy read from latch to ACK*/ + jmp ITloop; + +#define INDEX_DISC_LIST_SCB(target, lun) \ + mov SCBPTR, lun; \ + shr SCBPTR[1], 3, target + +#define INDEX_DISC_LIST(target, lun) \ + INDEX_DISC_LIST_SCB(target, lun); \ + and SINDEX, 0x7, target; \ + shl SINDEX, 1; \ + add SINDEX, (SCB_DISCONNECTED_LISTS & 0xFF); \ + mvi SINDEX[1], ((SCB_DISCONNECTED_LISTS >> 8) & 0xFF) + +mesgin_identify: + /* + * Determine whether a target is using tagged or non-tagged + * transactions by first looking at the transaction stored in + * the per-device, disconnected array. If there is no untagged + * transaction for this target this must be an untagged transaction. + */ + shr SINDEX, 4, SAVED_SCSIID; + and SAVED_LUN, MSG_IDENTIFY_LUNMASK, A; + INDEX_DISC_LIST(SINDEX, SAVED_LUN); + bmov DINDEX, SINDEX, 2; + bmov SCBPTR, SINDIR, 2; + cmp SCBPTR[1], SCB_LIST_NULL je snoop_tag; + test SCB_CONTROL, TAG_ENB jnz snoop_tag; + /* Untagged. Setup the SCB. */ + bmov REG1, SCB_TAG, 4; /* Save SCB_TAG and SCB_NEXT */ + jmp dequeue_first_scb; + +/* + * Here we "snoop" the bus looking for a SIMPLE QUEUE TAG message. + * If we get one, we use the tag returned to find the proper + * SCB. The disconnected list contains any outstanding tagged transactions + * where SCB_TAG != SCB_NONPACKET_TAG or SCB_NONPACKET_TAG + 256. + * After receiving the tag, look for the SCB at SCB locations tag and + * tag + 256. If those SCBs do not match, traverse the disconnected + * list until we find the correct SCB. + */ +snoop_tag: + if ((ahd->flags & AHD_SEQUENCER_DEBUG) != 0) { + or SEQ_FLAGS, 0x80; + } + mov NONE, SCSIDAT; /* ACK Identify MSG */ + call phase_lock; + if ((ahd->flags & AHD_SEQUENCER_DEBUG) != 0) { + or SEQ_FLAGS, 0x1; + } + cmp LASTPHASE, P_MESGIN jne not_found_ITloop; + if ((ahd->flags & AHD_SEQUENCER_DEBUG) != 0) { + or SEQ_FLAGS, 0x2; + } + cmp SCSIBUS, MSG_SIMPLE_Q_TAG jne not_found; +get_tag: + /* Save diconnected list head. */ + bmov REG0, SCBPTR, 2; + clr SCBPTR[1]; + mvi SCBPTR call inb_next; /* tag value */ + mov REG1, SCBPTR; +verify_scb: + mov A, REG1; + cmp SCB_NONPACKET_TAG, A jne verify_other_scb; + mov A, SAVED_SCSIID; + cmp SCB_SCSIID, A jne verify_other_scb; + mov A, SAVED_LUN; + cmp SCB_LUN, A je setup_SCB; +verify_other_scb: + xor SCBPTR[1], 1; + test SCBPTR[1], 0xFF jnz verify_scb; + +search_disc_list: + /* Restore disconnected list head. */ + bmov SCBPTR, REG0, 2; + mvi REG0[1], SCB_LIST_NULL; + mov A, REG1; +search_disc_list_loop: + cmp SCB_NONPACKET_TAG, A je dequeue_scb; + bmov REG0, SCBPTR, 2; + bmov SCBPTR, SCB_NEXT, 2; + cmp SCBPTR[1], SCB_LIST_NULL jne search_disc_list_loop; + jmp not_found; + +dequeue_scb: + bmov REG1, SCB_TAG, 4; /* Save SCB_TAG and SCB_NEXT */ + cmp REG0[1], SCB_LIST_NULL jne dequeue_intermediate_SCB; +dequeue_first_scb: + shr SINDEX, 4, SAVED_SCSIID; + INDEX_DISC_LIST_SCB(SINDEX, SAVED_LUN); + /* Update list head. */ + bmov DINDIR, REG2, 2; + jmp dequeue_restore; +dequeue_intermediate_SCB: + bmov SCBPTR, REG0, 2; + bmov SCB_NEXT, REG2, 2; +dequeue_restore: + bmov SCBPTR, REG1, 2; + +/* + * Ensure that the SCB the tag points to is for + * an SCB transaction to the reconnecting target. + */ +setup_SCB: + if ((ahd->flags & AHD_SEQUENCER_DEBUG) != 0) { + or SEQ_FLAGS, 0x10; + } + test SCB_CONTROL,DISCONNECTED jz not_found; + and SCB_CONTROL,~DISCONNECTED; + clr SEQ_FLAGS; /* make note of IDENTIFY */ + test SCB_SGPTR, SG_LIST_NULL jnz . + 2; + call allocate_fifo; +/* mvi SEQINTCODE, PRINT_RESIDUALS; */ + /* See if the host wants to send a message upon reconnection */ + test SCB_CONTROL, MK_MESSAGE jz mesgin_done; + mvi HOST_MSG call mk_mesg; + jmp mesgin_done; + +not_found: + mvi SEQINTCODE, NO_MATCH; + jmp mesgin_done; + +not_found_ITloop: + mvi SEQINTCODE, NO_MATCH; + jmp ITloop; + +/* + * We received a "command complete" message. Put the SCB on the complete + * queue and trigger a completion interrupt via the idle loop. Before doing + * so, check to see if there + * is a residual or the status byte is something other than STATUS_GOOD (0). + * In either of these conditions, we upload the SCB back to the host so it can + * process this information. In the case of a non zero status byte, we + * additionally interrupt the kernel driver synchronously, allowing it to + * decide if sense should be retrieved. If the kernel driver wishes to request + * sense, it will fill the kernel SCB with a request sense command, requeue + * it to the QINFIFO and tell us not to post to the QOUTFIFO by setting + * RETURN_1 to SEND_SENSE. + */ +mesgin_complete: + + /* + * If ATN is raised, we still want to give the target a message. + * Perhaps there was a parity error on this last message byte. + * Either way, the target should take us to message out phase + * and then attempt to complete the command again. We should use a + * critical section here to guard against a timeout triggering + * for this command and setting ATN while we are still processing + * the completion. + test SCSISIGI, ATNI jnz mesgin_done; + */ + + /* + * If the target never sent an identify message but instead went + * to mesgin to give an invalid message, let the host abort us. + */ + test SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT jz . + 3; + mvi SEQINTCODE, PROTO_VIOLATION; + jmp mesgin_done; + + /* + * If the target never gave us status information, have + * the host abort the command. + */ + test SCB_CONTROL, STATUS_RCVD jz . - 2; + + /* + * See if we attempted to deliver a message but the target ingnored us. + */ + test SCB_CONTROL, MK_MESSAGE jz . + 2; + mvi SEQINTCODE, MKMSG_FAILED; + call queue_scb_completion; + jmp await_busfree; + +freeze_queue: + /* Cancel any pending select-out. */ + test SSTAT0, SELDO jnz . + 2; + and SCSISEQ0, ~ENSELO; + mov ACCUM_SAVE, A; + clr A; + add QFREEZE_COUNT, 1; + adc QFREEZE_COUNT[1], A; + or SEQ_FLAGS2, SELECTOUT_QFROZEN; + mov A, ACCUM_SAVE ret; + +queue_arg1_scb_completion: + SET_MODE(M_SCSI, M_SCSI); + bmov SCBPTR, ARG_1, 2; +queue_scb_completion: + test SCB_SCSI_STATUS,0xff jnz bad_status; + /* + * Check for residuals + */ + test SCB_SGPTR, SG_LIST_NULL jnz complete; /* No xfer */ + test SCB_SGPTR, SG_FULL_RESID jnz upload_scb;/* Never xfered */ + test SCB_RESIDUAL_SGPTR, SG_LIST_NULL jz upload_scb; +complete: + bmov SCB_NEXT_COMPLETE, COMPLETE_SCB_HEAD, 2; + bmov COMPLETE_SCB_HEAD, SCBPTR, 2 ret; +bad_status: + cmp SCB_SCSI_STATUS, STATUS_PKT_SENSE je upload_scb; + call freeze_queue; +upload_scb: + bmov SCB_NEXT_COMPLETE, COMPLETE_DMA_SCB_HEAD, 2; + bmov COMPLETE_DMA_SCB_HEAD, SCBPTR, 2; + or SCB_SGPTR, SG_STATUS_VALID ret; + +/* + * Is it a disconnect message? Set a flag in the SCB to remind us + * and await the bus going free. If this is an untagged transaction + * store the SCB id for it in our untagged target table for lookup on + * a reselction. + */ +mesgin_disconnect: + /* + * If ATN is raised, we still want to give the target a message. + * Perhaps there was a parity error on this last message byte + * or we want to abort this command. Either way, the target + * should take us to message out phase and then attempt to + * disconnect again. + * XXX - Wait for more testing. + test SCSISIGI, ATNI jnz mesgin_done; + */ + test SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT jz disconnect_allowed; + mvi SEQINTCODE, PROTO_VIOLATION; + jmp mesgin_done; +disconnect_allowed: + or SCB_CONTROL,DISCONNECTED; + test SCB_CONTROL, TAG_ENB jz queue_disc_scb; + mov A, SCB_NONPACKET_TAG; + cmp SCBPTR, A je await_busfree; +queue_disc_scb: + bmov REG0, SCBPTR, 2; + shr SINDEX, 4, SCB_SCSIID; + INDEX_DISC_LIST(SINDEX, SCB_LUN); + bmov DINDEX, SINDEX, 2; + bmov REG1, SINDIR, 2; + bmov DINDIR, REG0, 2; + bmov SCBPTR, REG0, 2; + bmov SCB_NEXT, REG1, 2; + /* FALLTHROUGH */ +await_busfree: + and SIMODE1, ~ENBUSFREE; + mov NONE, SCSIDAT; /* Ack the last byte */ + call clear_target_state; + test MODE_PTR, ~(MK_MODE(M_DFF1, M_DFF1)) + jnz await_busfree_not_m_dff; +SET_SRC_MODE M_DFF1; +SET_DST_MODE M_DFF1; +await_busfree_clrchn: + mvi DFFSXFRCTL, CLRCHN; +await_busfree_not_m_dff: + test SSTAT1,REQINIT|BUSFREE jz .; + test SSTAT1, BUSFREE jnz idle_loop; + mvi SEQINTCODE, MISSED_BUSFREE; + + +/* + * Save data pointers message: + * Copying RAM values back to SCB, for Save Data Pointers message, but + * only if we've actually been into a data phase to change them. This + * protects against bogus data in scratch ram and the residual counts + * since they are only initialized when we go into data_in or data_out. + * Ack the message as soon as possible. For chips without S/G pipelining, + * we can only ack the message after SHADDR has been saved. On these + * chips, SHADDR increments with every bus transaction, even PIO. + */ +SET_SRC_MODE M_DFF1; +SET_DST_MODE M_DFF1; +mesgin_sdptrs: + mov NONE,SCSIDAT; /*dummy read from latch to ACK*/ + test SEQ_FLAGS, DPHASE jz ITloop; + call save_pointers; + jmp ITloop; + +save_pointers: + /* + * If we are asked to save our position at the end of the + * transfer, just mark us at the end rather than perform a + * full save. + */ + test SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL jz save_pointers_full; + or SCB_SGPTR, SG_LIST_NULL ret; + +save_pointers_full: + /* + * The SCB_DATAPTR becomes the current SHADDR. + * All other information comes directly from our residual + * state. + */ + bmov SCB_DATAPTR, SHADDR, 8; + bmov SCB_DATACNT, SCB_RESIDUAL_DATACNT, 8 ret; + +/* + * Restore pointers message? Data pointers are recopied from the + * SCB anytime we enter a data phase for the first time, so all + * we need to do is clear the DPHASE flag and let the data phase + * code do the rest. We also reset/reallocate the FIFO to make + * sure we have a clean start for the next data phase. + */ +mesgin_rdptrs: + and SEQ_FLAGS, ~DPHASE; + test MODE_PTR, ~(MK_MODE(M_DFF1, M_DFF1)) jnz msgin_rdptrs_get_fifo; + mvi DFFSXFRCTL, RSTCHN|CLRSHCNT; +msgin_rdptrs_get_fifo: + call allocate_fifo; + jmp mesgin_done; + +clear_target_state: + mvi LASTPHASE, P_BUSFREE; + /* clear target specific flags */ + mvi SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT ret; + +phase_lock: + test SCSIPHASE, 0xFF jz .; +phase_lock_latch_phase: + and LASTPHASE, PHASE_MASK, SCSISIGI ret; + +/* + * Functions to read data in Automatic PIO mode. + * + * An ACK is not sent on input from the target until SCSIDATL is read from. + * So we wait until SCSIDATL is latched (the usual way), then read the data + * byte directly off the bus using SCSIBUSL. When we have pulled the ATN + * line, or we just want to acknowledge the byte, then we do a dummy read + * from SCISDATL. The SCSI spec guarantees that the target will hold the + * data byte on the bus until we send our ACK. + * + * The assumption here is that these are called in a particular sequence, + * and that REQ is already set when inb_first is called. inb_{first,next} + * use the same calling convention as inb. + */ +inb_next: + mov NONE,SCSIDAT; /*dummy read from latch to ACK*/ +inb_next_wait: + /* + * If there is a parity error, wait for the kernel to + * see the interrupt and prepare our message response + * before continuing. + */ + test SCSIPHASE, 0xFF jz .; +inb_next_check_phase: + and LASTPHASE, PHASE_MASK, SCSISIGI; + cmp LASTPHASE, P_MESGIN jne mesgin_phasemis; +inb_first: + clr DINDEX[1]; + mov DINDEX,SINDEX; + mov DINDIR,SCSIBUS ret; /*read byte directly from bus*/ +inb_last: + mov NONE,SCSIDAT ret; /*dummy read from latch to ACK*/ + +mk_mesg: + mvi SCSISIGO, ATNO; + mov MSG_OUT,SINDEX ret; + +SET_SRC_MODE M_DFF1; +SET_DST_MODE M_DFF1; +disable_ccsgen: + test SG_STATE, FETCH_INPROG jz return; + clr SG_STATE; +disable_ccsgen_fetch_done: + clr CCSGCTL ret; + +toggle_dff_mode: + mvi SEQINTCTL, INTVEC1DSL; + xor MODE_PTR, MK_MODE(M_DFF1, M_DFF1); + clr SEQINTCTL ret; + +data_group_idle_loop: + mov SAVED_MODE, MODE_PTR; + test SG_STATE, LOADING_NEEDED jz . + 2; + call service_fifo; + call toggle_dff_mode; + test SG_STATE, LOADING_NEEDED jz . + 2; + call service_fifo; + call idle_loop_cchan; + mov SAVED_MODE jmp set_mode_work_around; + +service_fifo: + /* + * Do we have any prefetch left??? + */ + test SG_STATE, SEGS_AVAIL jnz idle_sg_avail; + + /* + * Can this FIFO have access to the S/G cache yet? + */ + test CCSGCTL, SG_CACHE_AVAIL jz return; + + /* Did we just finish fetching segs? */ + cmp CCSGCTL, CCSGEN|SG_CACHE_AVAIL|CCSGDONE + je idle_sgfetch_complete; + + /* Are we actively fetching segments? */ + test CCSGCTL, CCSGEN jnz return; + + /* + * We fetch a "cacheline aligned" and sized amount of data + * so we don't end up referencing a non-existant page. + * Cacheline aligned is in quotes because the kernel will + * set the prefetch amount to a reasonable level if the + * cacheline size is unknown. + */ + mvi SGHCNT, SG_PREFETCH_CNT; + and SGHADDR[0], SG_PREFETCH_ALIGN_MASK, SCB_RESIDUAL_SGPTR; + bmov SGHADDR[1], SCB_RESIDUAL_SGPTR[1], 3; + mvi CCSGCTL, CCSGEN|SG_CACHE_AVAIL|CCSGRESET; + or SG_STATE, FETCH_INPROG ret; +idle_sgfetch_complete: + /* + * Guard against SG_CACHE_AVAIL activating during sg fetch + * request in the other FIFO. + */ + test SG_STATE, FETCH_INPROG jz return; + call disable_ccsgen_fetch_done; + and CCSGADDR, SG_PREFETCH_ADDR_MASK, SCB_RESIDUAL_SGPTR; + mvi SG_STATE, SEGS_AVAIL|LOADING_NEEDED; +idle_sg_avail: + /* Does the hardware have space for another SG entry? */ + test DFSTATUS, PRELOAD_AVAIL jz return; + if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) { + bmov HADDR, CCSGRAM, 8; + } else { + bmov HADDR, CCSGRAM, 4; + } + bmov HCNT, CCSGRAM, 3; + test HCNT[0], 0x1 jz . + 2; + xor DATA_COUNT_ODD, 0x1; + bmov SCB_RESIDUAL_DATACNT[3], CCSGRAM, 1; + if ((ahd->flags & AHD_39BIT_ADDRESSING) != 0) { + and HADDR[4], SG_HIGH_ADDR_BITS, SCB_RESIDUAL_DATACNT[3]; + } + if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) { + /* Skip 4 bytes of pad. */ + add CCSGADDR, 4; + } +sg_advance: + clr A; /* add sizeof(struct scatter) */ + add SCB_RESIDUAL_SGPTR[0],SG_SIZEOF; + adc SCB_RESIDUAL_SGPTR[1],A; + adc SCB_RESIDUAL_SGPTR[2],A; + adc SCB_RESIDUAL_SGPTR[3],A; + mov SINDEX, SCB_RESIDUAL_SGPTR[0]; + test DATA_COUNT_ODD, 0x1 jz . + 2; + or SINDEX, ODD_SEG; + test SCB_RESIDUAL_DATACNT[3], SG_LAST_SEG jz . + 3; + or SINDEX, LAST_SEG; + clr SG_STATE; + mov SG_CACHE_PRE, SINDEX; + /* + * Load the segment. Or in HDMAEN here too + * just in case HDMAENACK has not come true + * by the time this segment is loaded. If + * HDMAENACK is not true, this or will disable + * HDMAEN mid-transfer. We do not want to simply + * mvi our original settings as SCSIEN automatically + * de-asserts and we don't want to accidentally + * re-enable it. + */ + or DFCNTRL, PRELOADEN|HDMAEN; + /* + * Do we have another segment in the cache? + */ + add NONE, SG_PREFETCH_CNT_LIMIT, CCSGADDR; + jnc return; + and SG_STATE, ~SEGS_AVAIL ret; + +/* + * Initialize the DMA address and counter from the SCB. + */ +load_first_seg: + bmov HADDR, SCB_DATAPTR, 11; + and DATA_COUNT_ODD, 0x1, SCB_DATACNT[0]; + and REG0, ~SG_FULL_RESID, SCB_SGPTR[0]; + test SCB_DATACNT[3], SG_LAST_SEG jz . + 2; + or REG0, LAST_SEG; + test DATA_COUNT_ODD, 0x1 jz . + 2; + or REG0, ODD_SEG; + mov SG_CACHE_PRE, REG0; + mvi DFCNTRL, (PRELOADEN|SCSIEN|HDMAEN); + /* + * Since we've are entering a data phase, we will + * rely on the SCB_RESID* fields. Initialize the + * residual and clear the full residual flag. + */ + and SCB_SGPTR[0], ~SG_FULL_RESID; + bmov SCB_RESIDUAL_DATACNT[3], SCB_DATACNT[3], 5; + /* If we need more S/G elements, tell the idle loop */ + test SCB_RESIDUAL_DATACNT[3], SG_LAST_SEG jnz . + 2; + mvi SG_STATE, LOADING_NEEDED ret; + clr SG_STATE ret; + +p_data: + test SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT jz p_data_allowed; + mvi SEQINTCODE, PROTO_VIOLATION; +p_data_allowed: + + test SEQ_FLAGS, DPHASE jz data_phase_initialize; + + /* + * If we re-enter the data phase after going through another + * phase, our transfer location has almost certainly been + * corrupted by the interveining, non-data, transfers. Ask + * the host driver to fix us up based on the transfer residual + * unless we already know that we should be bitbucketing. + */ + test SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL jnz p_data_bitbucket; + mvi SEQINTCODE, PDATA_REINIT; + +p_data_bitbucket: + /* + * Turn on `Bit Bucket' mode, wait until the target takes + * us to another phase, and then notify the host. + */ + test MODE_PTR, ~(MK_MODE(M_DFF1, M_DFF1)) + jnz bitbucket_not_m_dff; + /* + * Ensure that any FIFO contents are cleared out and the + * FIFO free'd prior to starting the BITBUCKET. BITBUCKET + * doesn't discard data already in the FIFO. + */ + mvi DFFSXFRCTL, RSTCHN|CLRSHCNT; + SET_MODE(M_SCSI, M_SCSI); +bitbucket_not_m_dff: + or SXFRCTL1,BITBUCKET; + test SCSIPHASE, DATA_PHASE_MASK jnz .; + and SXFRCTL1, ~BITBUCKET; + SET_MODE(M_DFF1, M_DFF1); + mvi SEQINTCODE, DATA_OVERRUN; + jmp ITloop; + +data_phase_initialize: + test SCB_SGPTR[0], SG_LIST_NULL jnz p_data_bitbucket; + call load_first_seg; +data_phase_inbounds: + /* We have seen a data phase at least once. */ + or SEQ_FLAGS, DPHASE; +data_group_dma_loop: + /* + * The transfer is complete if either the last segment + * completes or the target changes phase. Both conditions + * will clear SCSIEN. We test SCSIEN twice during our + * "idle loop" to avoid long delays before we notice the + * SCSIEN transition. + */ + call data_group_idle_loop; + test DFCNTRL, SCSIEN jnz data_group_dma_loop; + +data_group_dmafinish: + /* + * The transfer has terminated either due to a phase + * change, and/or the completion of the last segment. + * We have two goals here. Do as much other work + * as possible while the data fifo drains on a read + * and respond as quickly as possible to the standard + * messages (save data pointers/disconnect and command + * complete) that usually follow a data phase. + */ + call calc_residual; + + /* + * Go ahead and shut down the DMA engine now. + */ + test DFCNTRL, DIRECTION jnz data_phase_finish; +data_group_fifoflush: + if ((ahd->bugs & AHD_AUTOFLUSH_BUG) != 0) { + or DFCNTRL, FIFOFLUSH; + } + /* + * We have enabled the auto-ack feature. This means + * that the controller may have already transferred + * some overrun bytes into the data FIFO and acked them + * on the bus. The only way to detect this situation is + * to wait for LAST_SEG_DONE to come true on a completed + * transfer and then test to see if the data FIFO is + * non-empty. We know there is more data yet to transfer + * if SG_LIST_NULL is not yet set, thus there cannot be + * an overrun. + */ + test SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL jz data_phase_finish; + test SG_CACHE_SHADOW, LAST_SEG_DONE jz .; + test DFSTATUS, FIFOEMP jnz data_phase_finish; + /* Overrun */ + jmp p_data; +data_phase_finish: + /* + * If the target has left us in data phase, loop through + * the dma code again. We will only loop if there is a + * data overrun. + */ + if ((ahd->flags & AHD_TARGETROLE) != 0) { + test SSTAT0, TARGET jnz data_phase_done; + } + if ((ahd->flags & AHD_INITIATORROLE) != 0) { + test SSTAT1, REQINIT jz .; + test SCSIPHASE, DATA_PHASE_MASK jnz p_data; + } + +data_phase_done: + /* Kill off any pending prefetch */ + call disable_ccsgen; + + if ((ahd->flags & AHD_TARGETROLE) != 0) { + test SEQ_FLAGS, DPHASE_PENDING jz ITloop; + /* + and SEQ_FLAGS, ~DPHASE_PENDING; + * For data-in phases, wait for any pending acks from the + * initiator before changing phase. We only need to + * send Ignore Wide Residue messages for data-in phases. + test DFCNTRL, DIRECTION jz target_ITloop; + test SSTAT1, REQINIT jnz .; + test DATA_COUNT_ODD, 0x1 jz target_ITloop; + SET_MODE(M_SCSI, M_SCSI); + test NEGCONOPTS, WIDEXFER jz target_ITloop; + */ + /* + * Issue an Ignore Wide Residue Message. + mvi P_MESGIN|BSYO call change_phase; + mvi MSG_IGN_WIDE_RESIDUE call target_outb; + mvi 1 call target_outb; + jmp target_ITloop; + */ + } else { + jmp ITloop; + } + +/* + * We assume that, even though data may still be + * transferring to the host, that the SCSI side of + * the DMA engine is now in a static state. This + * allows us to update our notion of where we are + * in this transfer. + * + * If, by chance, we stopped before being able + * to fetch additional segments for this transfer, + * yet the last S/G was completely exhausted, + * call our idle loop until it is able to load + * another segment. This will allow us to immediately + * pickup on the next segment on the next data phase. + * + * If we happened to stop on the last segment, then + * our residual information is still correct from + * the idle loop and there is no need to perform + * any fixups. + */ +calc_residual: + test SG_CACHE_SHADOW, LAST_SEG jz residual_before_last_seg; + /* Record if we've consumed all S/G entries */ + test MDFFSTAT, SHVALID jz . + 2; + bmov SCB_RESIDUAL_DATACNT, SHCNT, 3 ret; + or SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL ret; +residual_before_last_seg: + test MDFFSTAT, SHVALID jnz sgptr_fixup; + /* + * Can never happen from an interrupt as the packetized + * hardware will only interrupt us once SHVALID or + * LAST_SEG_DONE. + */ + call data_group_idle_loop; + jmp calc_residual; + +sgptr_fixup: + /* + * Fixup the residual next S/G pointer. The S/G preload + * feature of the chip allows us to load two elements + * in addition to the currently active element. We + * store the bottom byte of the next S/G pointer in + * the SG_CACHE_PTR register so we can restore the + * correct value when the DMA completes. If the next + * sg ptr value has advanced to the point where higher + * bytes in the address have been affected, fix them + * too. + */ + test SG_CACHE_SHADOW, 0x80 jz sgptr_fixup_done; + test SCB_RESIDUAL_SGPTR[0], 0x80 jnz sgptr_fixup_done; + add SCB_RESIDUAL_SGPTR[1], -1; + adc SCB_RESIDUAL_SGPTR[2], -1; + adc SCB_RESIDUAL_SGPTR[3], -1; +sgptr_fixup_done: + and SCB_RESIDUAL_SGPTR[0], SG_ADDR_MASK, SG_CACHE_SHADOW; + clr DATA_COUNT_ODD; + test SG_CACHE_SHADOW, ODD_SEG jz . + 2; + or DATA_COUNT_ODD, 0x1; + clr SCB_RESIDUAL_DATACNT[3]; /* We are not the last seg */ + bmov SCB_RESIDUAL_DATACNT, SHCNT, 3 ret; + +export seq_isr: + nop; /* Jumps in the first ISR instruction fail on Rev A. */ + test SEQINTSRC, SAVEPTRS jnz saveptr_intr; + test SEQINTSRC, CFG4DATA jnz cfg4data_intr; + test SEQINTSRC, CFG4ISTAT jnz cfg4istat_intr; + test SEQINTSRC, CFG4ICMD jnz cfg4icmd_intr; + mvi SEQINTCODE, INVALID_SEQINT; + +/* + * There are two types of save pointers interrupts: + * The first is a snapshot save pointers where the current FIFO is not + * active and contains a snapshot of the current poniter information. + * This happens between packets in a stream for a single L_Q. Since we + * are not performing a pointer save, we can safely clear the channel + * so it can be used for other transactions. + * + * The second case is a save pointers on an active FIFO which occurs + * if the target changes to a new L_Q or busfrees/QAS' and the transfer + * has a residual. This should occur coincident with a ctxtdone. We + * disable the interrupt and allow our active routine to handle the + * save. + */ +saveptr_intr: + test DFCNTRL, HDMAENACK jz snapshot_saveptr; + and SEQIMODE, ~ENSAVEPTRS; + or SEQINTCTL, IRET ret; +snapshot_saveptr: + mvi DFFSXFRCTL, CLRCHN; + or SEQINTCTL, IRET ret; + +cfg4data_intr: + test SCB_SGPTR[0], SG_LIST_NULL jnz pkt_handle_overrun; + call load_first_seg; + call pkt_handle_xfer; + or SEQINTCTL, IRET ret; + +cfg4istat_intr: + call freeze_queue; + add NONE, -13, SCB_CDB_LEN; + jnc cfg4istat_have_sense_addr; + test SCB_CDB_LEN, SCB_CDB_LEN_PTR jnz cfg4istat_have_sense_addr; + /* + * Host sets up address/count and enables transfer. + */ + mvi SEQINTCODE, CFG4ISTAT_INTR; + jmp cfg4istat_setup_handler; +cfg4istat_have_sense_addr: + bmov HADDR, SCB_SENSE_BUSADDR, 4; + mvi HCNT[1], (AHD_SENSE_BUFSIZE >> 8); + mvi SG_CACHE_PRE, LAST_SEG; + mvi DFCNTRL, PRELOADEN|SCSIEN|HDMAEN; +cfg4istat_setup_handler: + /* + * Status pkt is transferring to host. + * Wait in idle loop for transfer to complete. + */ + call pkt_handle_status; + or SEQINTCTL, IRET ret; + +/* + * See if the target has gone on in this context creating an + * overrun condition. For the write case, the hardware cannot + * ack bytes until data is provided. So, if the target begins + * another packet without changing contexts, implying we are + * not sitting on a packet boundary, we are in an overrun + * situation. For the read case, the hardware will continue to + * ack bytes into the FIFO, and may even ack the last overrun packet + * into the FIFO. If the FIFO should become non-empty, we are in + * a read overrun case. + */ +#define check_overrun \ + /* Not on a packet boundary. */ \ + test MDFFSTAT, DLZERO jz pkt_handle_overrun; \ + test DFSTATUS, FIFOEMP jz pkt_handle_overrun + +pkt_handle_xfer: + bmov LONGJMP_SCB, SCBPTR, 2; + test SG_STATE, LOADING_NEEDED jz pkt_last_seg; + call setjmp; + test SEQINTSRC, SAVEPTRS jnz pkt_saveptrs; + test SCSIPHASE, ~DATA_PHASE_MASK jz . + 2; + test SCSISIGO, ATNO jnz . + 2; + test SSTAT2, NONPACKREQ jz pkt_service_fifo; + /* + * Defer handling of this NONPACKREQ until we + * can be sure it pertains to this FIFO. SAVEPTRS + * will not be asserted if the NONPACKREQ is for us, + * so we must simulate it if shaddow is valid. If + * shaddow is not valid, keep running this FIFO until we + * have satisfied the transfer by loading segments and + * waiting for either shaddow valid or last_seg_done. + */ + test MDFFSTAT, SHVALID jnz pkt_saveptrs; +pkt_service_fifo: + test SG_STATE, LOADING_NEEDED jnz service_fifo; +pkt_last_seg: + call setjmp; + test SEQINTSRC, SAVEPTRS jnz pkt_saveptrs; + test SG_CACHE_SHADOW, LAST_SEG_DONE jnz last_pkt_done; + test SCSIPHASE, ~DATA_PHASE_MASK jz . + 2; + test SCSISIGO, ATNO jnz . + 2; + test SSTAT2, NONPACKREQ jz return; + test MDFFSTAT, SHVALID jnz pkt_saveptrs; + jmp return; +last_pkt_done: +BEGIN_CRITICAL; + if ((ahd->bugs & AHD_AUTOFLUSH_BUG) != 0) { + or DFCNTRL, FIFOFLUSH; + } + test SCB_CONTROL, STATUS_RCVD jz wait_pkt_end; + check_overrun; + or SCB_SGPTR, SG_LIST_NULL; + /* + * I think it is safe to skip the FIFO check. + * in this case as LAST_SEG_DONE implies + * the other FIFO, if ever active for this transfer, + * has completed. + */ +last_pkt_queue_scb: + or LONGJMP_ADDR[1], INVALID_ADDR; + bmov ARG_1, SCBPTR, 2; + mvi DFFSXFRCTL, CLRCHN; + jmp queue_arg1_scb_completion; + +last_pkt_complete: + bmov ARG_1, SCBPTR, 2; + mvi DFFSXFRCTL, CLRCHN; +check_other_fifo: + clc; + call toggle_dff_mode; + call check_fifo; + jnc queue_arg1_scb_completion; +return: + ret; + +wait_pkt_end: + call setjmp; +END_CRITICAL; +wait_pkt_end_loop: + test SEQINTSRC, CTXTDONE jnz pkt_end; + check_overrun; + test SSTAT2, NONPACKREQ jz return; + test SEQINTSRC, CTXTDONE jz unexpected_nonpkt_phase; +pkt_end: +BEGIN_CRITICAL; + check_overrun; + or LONGJMP_ADDR[1], INVALID_ADDR; + or SCB_SGPTR, SG_LIST_NULL; + test SCB_CONTROL, STATUS_RCVD jnz last_pkt_complete; + mvi DFFSXFRCTL, CLRCHN ret; +END_CRITICAL; + +/* + * Either a SAVEPTRS interrupt condition is pending for this FIFO + * or we have a pending nonpackreq for this FIFO. We differentiate + * between the two by capturing the state of the SAVEPTRS interrupt + * prior to clearing and handling the common code of these two cases. + */ +pkt_saveptrs: +BEGIN_CRITICAL; + if ((ahd->bugs & AHD_AUTOFLUSH_BUG) != 0) { + or DFCNTRL, FIFOFLUSH; + } + mov REG0, SEQINTSRC; + mvi CLRSEQINTSRC, CLRSAVEPTRS; + call calc_residual; + call save_pointers; + call disable_ccsgen; + or SEQIMODE, ENSAVEPTRS; + or LONGJMP_ADDR[1], INVALID_ADDR; +pkt_saveptrs_check_status: + test REG0, SAVEPTRS jz unexpected_nonpkt_phase; + test SCB_CONTROL, STATUS_RCVD jz pkt_saveptrs_clrchn; + jmp last_pkt_complete; +pkt_saveptrs_clrchn: + mvi DFFSXFRCTL, CLRCHN ret; +END_CRITICAL; + +check_status_overrun: + test SHCNT[2], 0xFF jz status_IU_done; + mvi SEQINTCODE, STATUS_OVERRUN; + jmp status_IU_done; +pkt_handle_status: + call setjmp_setscb; + test MDFFSTAT, LASTSDONE jnz check_status_overrun; + test SEQINTSRC, CTXTDONE jz return; +status_IU_done: +BEGIN_CRITICAL; + if ((ahd->bugs & AHD_AUTOFLUSH_BUG) != 0) { + or DFCNTRL, FIFOFLUSH; + } + or LONGJMP_ADDR[1], INVALID_ADDR; + mvi SCB_SCSI_STATUS, STATUS_PKT_SENSE; + or SCB_CONTROL, STATUS_RCVD; + jmp last_pkt_complete; +END_CRITICAL; + +SET_SRC_MODE M_DFF0; +SET_DST_MODE M_DFF0; +BEGIN_CRITICAL; +check_fifo: + test LONGJMP_ADDR[1], INVALID_ADDR jnz return; + mov A, ARG_2; + cmp LONGJMP_SCB[1], A jne return; + mov A, ARG_1; + cmp LONGJMP_SCB[0], A jne return; + stc ret; +END_CRITICAL; + +/* + * Nonpackreq is a polled status. It can come true in three situations: + * we have received an L_Q, we have sent one or more L_Qs, or there is no + * L_Q context associated with this REQ (REQ occurs immediately after a + * (re)selection). Routines that know that the context responsible for this + * nonpackreq call directly into unexpected_nonpkt_phase. In the case of the + * top level idle loop, we exhaust all active contexts prior to determining that + * we simply do not have the full I_T_L_Q for this phase. + */ +unexpected_nonpkt_phase_find_ctxt: + /* + * This nonpackreq is most likely associated with one of the tags + * in a FIFO or an outgoing LQ. Only treat it as an I_T only + * nonpackreq if we've cleared out the FIFOs and handled any + * pending SELDO. + */ +SET_SRC_MODE M_SCSI; +SET_DST_MODE M_SCSI; + and A, FIFO1FREE|FIFO0FREE, DFFSTAT; + cmp A, FIFO1FREE|FIFO0FREE jne return; + test SSTAT0, SELDO jnz return; + mvi SCBPTR[1], SCB_LIST_NULL; +unexpected_nonpkt_phase: + test MODE_PTR, ~(MK_MODE(M_DFF1, M_DFF1)) jnz . + 3; +SET_SRC_MODE M_DFF0; +SET_DST_MODE M_DFF0; + or LONGJMP_ADDR[1], INVALID_ADDR; + mvi DFFSXFRCTL, CLRCHN; + mvi CLRSINT2, CLRNONPACKREQ; + test SCSIPHASE, ~(MSG_IN_PHASE|MSG_OUT_PHASE) jnz illegal_phase; + mvi SEQINTCODE, ENTERING_NONPACK; + jmp ITloop; + +illegal_phase: + mvi SEQINTCODE, ILLEGAL_PHASE; + jmp ITloop; + +/* + * We have entered an overrun situation. If we have working + * BITBUCKET, flip that on and let the hardware eat any overrun + * data. Otherwise use an overrun buffer in the host to simulate + * BITBUCKET. + */ +pkt_handle_overrun: + mvi SEQINTCODE, CFG4OVERRUN; + call freeze_queue; + if ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) == 0) { + SET_MODE(M_SCSI, M_SCSI); + or SXFRCTL1,BITBUCKET; +SET_SRC_MODE M_DFF1; +SET_DST_MODE M_DFF1; + } else { + call load_overrun_buf; + mvi DFCNTRL, (HDMAEN|SCSIEN|PRELOADEN); + } + call setjmp; + if ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) != 0) { + test DFSTATUS, PKT_PRELOAD_AVAIL jz overrun_load_done; + call load_overrun_buf; + or DFCNTRL, PRELOADEN; +overrun_load_done: + } + test SEQINTSRC, CTXTDONE jnz pkt_overrun_end; + test SSTAT2, NONPACKREQ jz return; +pkt_overrun_end: + or SCB_RESIDUAL_SGPTR, SG_OVERRUN_RESID; + test SEQINTSRC, CTXTDONE jz unexpected_nonpkt_phase; + test SCB_CONTROL, STATUS_RCVD jnz last_pkt_queue_scb; + mvi DFFSXFRCTL, CLRCHN ret; + +if ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) != 0) { +load_overrun_buf: + /* + * Load a dummy segment if preload space is available. + */ + mov HADDR[0], SHARED_DATA_ADDR; + add HADDR[1], PKT_OVERRUN_BUFOFFSET, SHARED_DATA_ADDR[1]; + mov ACCUM_SAVE, A; + clr A; + adc HADDR[2], A, SHARED_DATA_ADDR[2]; + adc HADDR[3], A, SHARED_DATA_ADDR[3]; + mov A, ACCUM_SAVE; + bmov HADDR[4], ALLZEROS, 4; + /* PKT_OVERRUN_BUFSIZE is a multiple of 256 */ + clr HCNT[0]; + mvi HCNT[1], ((PKT_OVERRUN_BUFSIZE >> 8) & 0xFF); + clr HCNT[2]; +} + +cfg4icmd_intr: diff --git a/sys/dev/aic7xxx/aic79xx_inline.h b/sys/dev/aic7xxx/aic79xx_inline.h new file mode 100644 index 0000000..5d0c79d --- /dev/null +++ b/sys/dev/aic7xxx/aic79xx_inline.h @@ -0,0 +1,997 @@ +/* + * Inline routines shareable across OS platforms. + * + * Copyright (c) 1994-2001 Justin T. Gibbs. + * Copyright (c) 2000-2001 Adaptec Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + * $Id: //depot/aic7xxx/aic7xxx/aic79xx_inline.h#27 $ + * + * $FreeBSD$ + */ + +#ifndef _AIC79XX_INLINE_H_ +#define _AIC79XX_INLINE_H_ + +/******************************** Debugging ***********************************/ +static __inline char *ahd_name(struct ahd_softc *ahd); + +static __inline char * +ahd_name(struct ahd_softc *ahd) +{ + return (ahd->name); +} + +/************************ Sequencer Execution Control *************************/ +static __inline void ahd_known_modes(struct ahd_softc *ahd, + ahd_mode src, ahd_mode dst); +static __inline ahd_mode_state ahd_build_mode_state(struct ahd_softc *ahd, + ahd_mode src, + ahd_mode dst); +static __inline void ahd_extract_mode_state(struct ahd_softc *ahd, + ahd_mode_state state, + ahd_mode *src, ahd_mode *dst); +static __inline void ahd_set_modes(struct ahd_softc *ahd, ahd_mode src, + ahd_mode dst); +static __inline void ahd_update_modes(struct ahd_softc *ahd); +static __inline void ahd_assert_modes(struct ahd_softc *ahd, ahd_mode srcmode, + ahd_mode dstmode, const char *file, + int line); +static __inline ahd_mode_state ahd_save_modes(struct ahd_softc *ahd); +static __inline void ahd_restore_modes(struct ahd_softc *ahd, + ahd_mode_state state); +static __inline int ahd_is_paused(struct ahd_softc *ahd); +static __inline void ahd_pause(struct ahd_softc *ahd); +static __inline void ahd_unpause(struct ahd_softc *ahd); + +static __inline void +ahd_known_modes(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst) +{ + ahd->src_mode = src; + ahd->dst_mode = dst; + ahd->saved_src_mode = src; + ahd->saved_dst_mode = dst; +} + +static __inline ahd_mode_state +ahd_build_mode_state(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst) +{ + return ((src << SRC_MODE_SHIFT) | (dst << DST_MODE_SHIFT)); +} + +static __inline void +ahd_extract_mode_state(struct ahd_softc *ahd, ahd_mode_state state, + ahd_mode *src, ahd_mode *dst) +{ + *src = (state & SRC_MODE) >> SRC_MODE_SHIFT; + *dst = (state & DST_MODE) >> DST_MODE_SHIFT; +} + +static __inline void +ahd_set_modes(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst) +{ + if (ahd->src_mode == src && ahd->dst_mode == dst) + return; +#ifdef AHD_DEBUG + if (ahd->src_mode == AHD_MODE_UNKNOWN + || ahd->dst_mode == AHD_MODE_UNKNOWN) + panic("Setting mode prior to saving it.\n"); + if ((ahd_debug & AHD_SHOW_MODEPTR) != 0) + printf("Setting mode 0x%x\n", + ahd_build_mode_state(ahd, src, dst)); +#endif + ahd_outb(ahd, MODE_PTR, ahd_build_mode_state(ahd, src, dst)); + ahd->src_mode = src; + ahd->dst_mode = dst; +} + +static __inline void +ahd_update_modes(struct ahd_softc *ahd) +{ + ahd_mode_state mode_ptr; + ahd_mode src; + ahd_mode dst; + + mode_ptr = ahd_inb(ahd, MODE_PTR); +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_MODEPTR) != 0) + printf("Reading mode 0x%x\n", mode_ptr); +#endif + ahd_extract_mode_state(ahd, mode_ptr, &src, &dst); + ahd_known_modes(ahd, src, dst); +} + +static __inline void +ahd_assert_modes(struct ahd_softc *ahd, ahd_mode srcmode, + ahd_mode dstmode, const char *file, int line) +{ +#ifdef AHD_DEBUG + if ((srcmode & AHD_MK_MSK(ahd->src_mode)) == 0 + || (dstmode & AHD_MK_MSK(ahd->dst_mode)) == 0) { + panic("%s:%s:%d: Mode assertion failed.\n", + ahd_name(ahd), file, line); + } +#endif +} + +static __inline ahd_mode_state +ahd_save_modes(struct ahd_softc *ahd) +{ + if (ahd->src_mode == AHD_MODE_UNKNOWN + || ahd->dst_mode == AHD_MODE_UNKNOWN) + ahd_update_modes(ahd); + + return (ahd_build_mode_state(ahd, ahd->src_mode, ahd->dst_mode)); +} + +static __inline void +ahd_restore_modes(struct ahd_softc *ahd, ahd_mode_state state) +{ + ahd_mode src; + ahd_mode dst; + + ahd_extract_mode_state(ahd, state, &src, &dst); + ahd_set_modes(ahd, src, dst); +} + +#define AHD_ASSERT_MODES(ahd, source, dest) \ + ahd_assert_modes(ahd, source, dest, __FILE__, __LINE__); + +/* + * Determine whether the sequencer has halted code execution. + * Returns non-zero status if the sequencer is stopped. + */ +static __inline int +ahd_is_paused(struct ahd_softc *ahd) +{ + return ((ahd_inb(ahd, HCNTRL) & PAUSE) != 0); +} + +/* + * Request that the sequencer stop and wait, indefinitely, for it + * to stop. The sequencer will only acknowledge that it is paused + * once it has reached an instruction boundary and PAUSEDIS is + * cleared in the SEQCTL register. The sequencer may use PAUSEDIS + * for critical sections. + */ +static __inline void +ahd_pause(struct ahd_softc *ahd) +{ + ahd_outb(ahd, HCNTRL, ahd->pause); + + /* + * Since the sequencer can disable pausing in a critical section, we + * must loop until it actually stops. + */ + while (ahd_is_paused(ahd) == 0) + ; +} + +/* + * Allow the sequencer to continue program execution. + * We check here to ensure that no additional interrupt + * sources that would cause the sequencer to halt have been + * asserted. If, for example, a SCSI bus reset is detected + * while we are fielding a different, pausing, interrupt type, + * we don't want to release the sequencer before going back + * into our interrupt handler and dealing with this new + * condition. + */ +static __inline void +ahd_unpause(struct ahd_softc *ahd) +{ + /* + * Automatically restore our modes to those saved + * prior to the first change of the mode. + */ + if (ahd->saved_src_mode != AHD_MODE_UNKNOWN + && ahd->saved_dst_mode != AHD_MODE_UNKNOWN) + ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode); + + if ((ahd_inb(ahd, INTSTAT) & ~(SWTMINT | CMDCMPLT)) == 0) + ahd_outb(ahd, HCNTRL, ahd->unpause); + + ahd_known_modes(ahd, AHD_MODE_UNKNOWN, AHD_MODE_UNKNOWN); +} + +/*********************** Untagged Transaction Routines ************************/ +static __inline void ahd_freeze_untagged_queues(struct ahd_softc *ahd); +static __inline void ahd_release_untagged_queues(struct ahd_softc *ahd); + +/* + * Block our completion routine from starting the next untagged + * transaction for this target or target lun. + */ +static __inline void +ahd_freeze_untagged_queues(struct ahd_softc *ahd) +{ + /* + * Assume we have enough space in the card's SCB + * to obviate the need for a per target untagged + * transaction limit. + */ +#if 0 + ahd->untagged_queue_lock++; +#endif +} + +/* + * Allow the next untagged transaction for this target or target lun + * to be executed. We use a counting semaphore to allow the lock + * to be acquired recursively. Once the count drops to zero, the + * transaction queues will be run. + */ +static __inline void +ahd_release_untagged_queues(struct ahd_softc *ahd) +{ + /* + * Assume we have enough space in the card's SCB + * to obviate the need for a per target untagged + * transaction limit. + */ +#if 0 + ahd->untagged_queue_lock--; + if (ahd->untagged_queue_lock == 0) + ahd_run_untagged_queues(ahd); +#endif +} + +/*********************** Scatter Gather List Handling *************************/ +static __inline void *ahd_sg_setup(struct ahd_softc *ahd, struct scb *scb, + void *sgptr, bus_addr_t addr, + bus_size_t len, int last); +static __inline void ahd_setup_scb_common(struct ahd_softc *ahd, + struct scb *scb); +static __inline void ahd_setup_data_scb(struct ahd_softc *ahd, + struct scb *scb); +static __inline void ahd_setup_noxfer_scb(struct ahd_softc *ahd, + struct scb *scb); + +static __inline void * +ahd_sg_setup(struct ahd_softc *ahd, struct scb *scb, + void *sgptr, bus_addr_t addr, bus_size_t len, int last) +{ + scb->sg_count++; + if (sizeof(bus_addr_t) > 4 + && (ahd->flags & AHD_64BIT_ADDRESSING) != 0) { + struct ahd_dma64_seg *sg; + + sg = (struct ahd_dma64_seg *)sgptr; + sg->addr = ahd_htole64(addr); + sg->len = ahd_htole32(len | (last ? AHD_DMA_LAST_SEG : 0)); + return (sg + 1); + } else { + struct ahd_dma_seg *sg; + + sg = (struct ahd_dma_seg *)sgptr; + sg->addr = ahd_htole64(addr); + sg->len = ahd_htole32(len | ((addr >> 8) & 0x7F000000) + | (last ? AHD_DMA_LAST_SEG : 0)); + return (sg + 1); + } +} + +static __inline void +ahd_setup_scb_common(struct ahd_softc *ahd, struct scb *scb) +{ + /* XXX Handle target mode SCBs. */ + if ((scb->flags & SCB_PACKETIZED) != 0) { + /* XXX what about ACA?? It is type 4, but TAG_TYPE == 0x3. */ + scb->hscb->task_attribute_nonpkt_tag = + scb->hscb->control & SCB_TAG_TYPE; + scb->hscb->task_management = 0; + } else { + scb->hscb->task_attribute_nonpkt_tag = SCB_GET_TAG(scb); + } + + if (scb->hscb->cdb_len <= MAX_CDB_LEN_WITH_SENSE_ADDR + || (scb->hscb->cdb_len & SCB_CDB_LEN_PTR) != 0) + scb->hscb->shared_data.idata.cdb_plus_saddr.sense_addr = + ahd_htole32(scb->sense_busaddr); +} + +static __inline void +ahd_setup_data_scb(struct ahd_softc *ahd, struct scb *scb) +{ + /* + * Copy the first SG into the "current" data ponter area. + */ + if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) { + struct ahd_dma64_seg *sg; + + sg = (struct ahd_dma64_seg *)scb->sg_list; + scb->hscb->dataptr = sg->addr; + scb->hscb->datacnt = sg->len; + } else { + struct ahd_dma_seg *sg; + + sg = (struct ahd_dma_seg *)scb->sg_list; + scb->hscb->dataptr = sg->addr; + if ((ahd->flags & AHD_39BIT_ADDRESSING) != 0) { + uint64_t high_addr; + + high_addr = (ahd_le32toh(sg->len) & 0x7F000000) << 8; + scb->hscb->dataptr |= ahd_htole64(high_addr); + } + scb->hscb->datacnt = sg->len; + } + /* + * Note where to find the SG entries in bus space. + * We also set the full residual flag which the + * sequencer will clear as soon as a data transfer + * occurs. + */ + scb->hscb->sgptr = ahd_htole32(scb->sg_list_busaddr|SG_FULL_RESID); +} + +static __inline void +ahd_setup_noxfer_scb(struct ahd_softc *ahd, struct scb *scb) +{ + scb->hscb->sgptr = ahd_htole32(SG_LIST_NULL); + scb->hscb->dataptr = 0; + scb->hscb->datacnt = 0; +} + +/************************** Memory mapping routines ***************************/ +static __inline size_t ahd_sg_size(struct ahd_softc *ahd); +static __inline void * + ahd_sg_bus_to_virt(struct ahd_softc *ahd, + struct scb *scb, + uint32_t sg_busaddr); +static __inline uint32_t + ahd_sg_virt_to_bus(struct ahd_softc *ahd, + struct scb *scb, + void *sg); +static __inline void ahd_sync_scb(struct ahd_softc *ahd, + struct scb *scb, int op); +static __inline void ahd_sync_sglist(struct ahd_softc *ahd, + struct scb *scb, int op); +static __inline void ahd_sync_sense(struct ahd_softc *ahd, + struct scb *scb, int op); +static __inline uint32_t + ahd_targetcmd_offset(struct ahd_softc *ahd, + u_int index); + +static __inline size_t +ahd_sg_size(struct ahd_softc *ahd) +{ + if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) + return (sizeof(struct ahd_dma64_seg)); + return (sizeof(struct ahd_dma_seg)); +} + +static __inline void * +ahd_sg_bus_to_virt(struct ahd_softc *ahd, struct scb *scb, uint32_t sg_busaddr) +{ + bus_addr_t sg_offset; + + /* sg_list_phys points to entry 1, not 0 */ + sg_offset = sg_busaddr - (scb->sg_list_busaddr - ahd_sg_size(ahd)); + return ((uint8_t *)scb->sg_list + sg_offset); +} + +static __inline uint32_t +ahd_sg_virt_to_bus(struct ahd_softc *ahd, struct scb *scb, void *sg) +{ + bus_addr_t sg_offset; + + /* sg_list_phys points to entry 1, not 0 */ + sg_offset = ((uint8_t *)sg - (uint8_t *)scb->sg_list) + - ahd_sg_size(ahd); + + return (scb->sg_list_busaddr + sg_offset); +} + +static __inline void +ahd_sync_scb(struct ahd_softc *ahd, struct scb *scb, int op) +{ + ahd_dmamap_sync(ahd, ahd->scb_data.hscb_dmat, + scb->hscb_map->dmamap, + /*offset*/(uint8_t*)scb->hscb - scb->hscb_map->vaddr, + /*len*/sizeof(*scb->hscb), op); +} + +static __inline void +ahd_sync_sglist(struct ahd_softc *ahd, struct scb *scb, int op) +{ + if (scb->sg_count == 0) + return; + + ahd_dmamap_sync(ahd, ahd->scb_data.sg_dmat, + scb->sg_map->dmamap, + /*offset*/scb->sg_list_busaddr - ahd_sg_size(ahd), + /*len*/ahd_sg_size(ahd) * scb->sg_count, op); +} + +static __inline void +ahd_sync_sense(struct ahd_softc *ahd, struct scb *scb, int op) +{ + ahd_dmamap_sync(ahd, ahd->scb_data.sense_dmat, + scb->sense_map->dmamap, + /*offset*/scb->sense_busaddr, + /*len*/AHD_SENSE_BUFSIZE, op); +} + +static __inline uint32_t +ahd_targetcmd_offset(struct ahd_softc *ahd, u_int index) +{ + return (((uint8_t *)&ahd->targetcmds[index]) + - (uint8_t *)ahd->qoutfifo); +} + +/*********************** Miscelaneous Support Functions ***********************/ +static __inline void ahd_complete_scb(struct ahd_softc *ahd, + struct scb *scb); +static __inline void ahd_update_residual(struct ahd_softc *ahd, + struct scb *scb); +static __inline struct ahd_initiator_tinfo * + ahd_fetch_transinfo(struct ahd_softc *ahd, + char channel, u_int our_id, + u_int remote_id, + struct ahd_tmode_tstate **tstate); +static __inline struct scb* + ahd_get_scb(struct ahd_softc *ahd); +static __inline void ahd_free_scb(struct ahd_softc *ahd, struct scb *scb); +static __inline uint16_t + ahd_inw(struct ahd_softc *ahd, u_int port); +static __inline void ahd_outw(struct ahd_softc *ahd, u_int port, + u_int value); +static __inline uint32_t + ahd_inl(struct ahd_softc *ahd, u_int port); +static __inline void ahd_outl(struct ahd_softc *ahd, u_int port, + uint32_t value); +static __inline uint64_t + ahd_inq(struct ahd_softc *ahd, u_int port); +static __inline void ahd_outq(struct ahd_softc *ahd, u_int port, + uint64_t value); +static __inline u_int ahd_get_scbptr(struct ahd_softc *ahd); +static __inline void ahd_set_scbptr(struct ahd_softc *ahd, u_int scbptr); +static __inline u_int ahd_get_hnscb_qoff(struct ahd_softc *ahd); +static __inline void ahd_set_hnscb_qoff(struct ahd_softc *ahd, u_int value); +static __inline u_int ahd_get_hescb_qoff(struct ahd_softc *ahd); +static __inline void ahd_set_hescb_qoff(struct ahd_softc *ahd, u_int value); +static __inline u_int ahd_get_snscb_qoff(struct ahd_softc *ahd); +static __inline void ahd_set_snscb_qoff(struct ahd_softc *ahd, u_int value); +static __inline u_int ahd_get_sescb_qoff(struct ahd_softc *ahd); +static __inline void ahd_set_sescb_qoff(struct ahd_softc *ahd, u_int value); +static __inline u_int ahd_get_sdscb_qoff(struct ahd_softc *ahd); +static __inline void ahd_set_sdscb_qoff(struct ahd_softc *ahd, u_int value); +static __inline u_int ahd_inb_scbram(struct ahd_softc *ahd, u_int offset); +static __inline u_int ahd_inw_scbram(struct ahd_softc *ahd, u_int offset); +static __inline uint32_t + ahd_inl_scbram(struct ahd_softc *ahd, u_int offset); +static __inline void ahd_swap_with_next_hscb(struct ahd_softc *ahd, + struct scb *scb); +static __inline void ahd_queue_scb(struct ahd_softc *ahd, struct scb *scb); +static __inline uint8_t * + ahd_get_sense_buf(struct ahd_softc *ahd, + struct scb *scb); +static __inline uint32_t + ahd_get_sense_bufaddr(struct ahd_softc *ahd, + struct scb *scb); + +static __inline void +ahd_complete_scb(struct ahd_softc *ahd, struct scb *scb) +{ + uint32_t sgptr; + + sgptr = ahd_le32toh(scb->hscb->sgptr); + if ((sgptr & SG_STATUS_VALID) != 0) + ahd_handle_scb_status(ahd, scb); + else + ahd_done(ahd, scb); +} + +/* + * Determine whether the sequencer reported a residual + * for this SCB/transaction. + */ +static __inline void +ahd_update_residual(struct ahd_softc *ahd, struct scb *scb) +{ + uint32_t sgptr; + + sgptr = ahd_le32toh(scb->hscb->sgptr); + if ((sgptr & SG_STATUS_VALID) != 0) + ahd_calc_residual(ahd, scb); +} + +/* + * Return pointers to the transfer negotiation information + * for the specified our_id/remote_id pair. + */ +static __inline struct ahd_initiator_tinfo * +ahd_fetch_transinfo(struct ahd_softc *ahd, char channel, u_int our_id, + u_int remote_id, struct ahd_tmode_tstate **tstate) +{ + /* + * Transfer data structures are stored from the perspective + * of the target role. Since the parameters for a connection + * in the initiator role to a given target are the same as + * when the roles are reversed, we pretend we are the target. + */ + if (channel == 'B') + our_id += 8; + *tstate = ahd->enabled_targets[our_id]; + return (&(*tstate)->transinfo[remote_id]); +} + +/* + * Get a free scb. If there are none, see if we can allocate a new SCB. + */ +static __inline struct scb * +ahd_get_scb(struct ahd_softc *ahd) +{ + struct scb *scb; + + if ((scb = SLIST_FIRST(&ahd->scb_data.free_scbs)) == NULL) { + ahd_alloc_scbs(ahd); + scb = SLIST_FIRST(&ahd->scb_data.free_scbs); + if (scb == NULL) + return (NULL); + } + SLIST_REMOVE_HEAD(&ahd->scb_data.free_scbs, links.sle); + return (scb); +} + +/* + * Return an SCB resource to the free list. + */ +static __inline void +ahd_free_scb(struct ahd_softc *ahd, struct scb *scb) +{ + + /* Clean up for the next user */ + ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = NULL; + scb->flags = SCB_FREE; + scb->hscb->control = 0; + + SLIST_INSERT_HEAD(&ahd->scb_data.free_scbs, scb, links.sle); + + /* Notify the OSM that a resource is now available. */ + ahd_platform_scb_free(ahd, scb); +} + +static __inline uint16_t +ahd_inw(struct ahd_softc *ahd, u_int port) +{ + return ((ahd_inb(ahd, port+1) << 8) | ahd_inb(ahd, port)); +} + +static __inline void +ahd_outw(struct ahd_softc *ahd, u_int port, u_int value) +{ + ahd_outb(ahd, port, value & 0xFF); + ahd_outb(ahd, port+1, (value >> 8) & 0xFF); +} + +static __inline uint32_t +ahd_inl(struct ahd_softc *ahd, u_int port) +{ + return ((ahd_inb(ahd, port)) + | (ahd_inb(ahd, port+1) << 8) + | (ahd_inb(ahd, port+2) << 16) + | (ahd_inb(ahd, port+3) << 24)); +} + +static __inline void +ahd_outl(struct ahd_softc *ahd, u_int port, uint32_t value) +{ + ahd_outb(ahd, port, (value) & 0xFF); + ahd_outb(ahd, port+1, ((value) >> 8) & 0xFF); + ahd_outb(ahd, port+2, ((value) >> 16) & 0xFF); + ahd_outb(ahd, port+3, ((value) >> 24) & 0xFF); +} + +static __inline uint64_t +ahd_inq(struct ahd_softc *ahd, u_int port) +{ + return ((ahd_inb(ahd, port)) + | (ahd_inb(ahd, port+1) << 8) + | (ahd_inb(ahd, port+2) << 16) + | (ahd_inb(ahd, port+3) << 24) + | (((uint64_t)ahd_inb(ahd, port+4)) << 32) + | (((uint64_t)ahd_inb(ahd, port+5)) << 40) + | (((uint64_t)ahd_inb(ahd, port+6)) << 48) + | (((uint64_t)ahd_inb(ahd, port+7)) << 56)); +} + +static __inline void +ahd_outq(struct ahd_softc *ahd, u_int port, uint64_t value) +{ + ahd_outb(ahd, port, value & 0xFF); + ahd_outb(ahd, port+1, (value >> 8) & 0xFF); + ahd_outb(ahd, port+2, (value >> 16) & 0xFF); + ahd_outb(ahd, port+3, (value >> 24) & 0xFF); + ahd_outb(ahd, port+4, (value >> 32) & 0xFF); + ahd_outb(ahd, port+5, (value >> 40) & 0xFF); + ahd_outb(ahd, port+6, (value >> 48) & 0xFF); + ahd_outb(ahd, port+7, (value >> 56) & 0xFF); +} + +static __inline u_int +ahd_get_scbptr(struct ahd_softc *ahd) +{ + AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), + ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); + return (ahd_inb(ahd, SCBPTR) | (ahd_inb(ahd, SCBPTR + 1) << 8)); +} + +static __inline void +ahd_set_scbptr(struct ahd_softc *ahd, u_int scbptr) +{ + AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), + ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); + ahd_outb(ahd, SCBPTR, scbptr & 0xFF); + ahd_outb(ahd, SCBPTR+1, (scbptr >> 8) & 0xFF); +} + +static __inline u_int +ahd_get_hnscb_qoff(struct ahd_softc *ahd) +{ + return (ahd_inw_atomic(ahd, HNSCB_QOFF)); +} + +static __inline void +ahd_set_hnscb_qoff(struct ahd_softc *ahd, u_int value) +{ + ahd_outw_atomic(ahd, HNSCB_QOFF, value); +} + +static __inline u_int +ahd_get_hescb_qoff(struct ahd_softc *ahd) +{ + return (ahd_inb(ahd, HESCB_QOFF)); +} + +static __inline void +ahd_set_hescb_qoff(struct ahd_softc *ahd, u_int value) +{ + ahd_outb(ahd, HESCB_QOFF, value); +} + +static __inline u_int +ahd_get_snscb_qoff(struct ahd_softc *ahd) +{ + u_int oldvalue; + + AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); + oldvalue = ahd_inw(ahd, SNSCB_QOFF); + ahd_outw(ahd, SNSCB_QOFF, oldvalue); + return (oldvalue); +} + +static __inline void +ahd_set_snscb_qoff(struct ahd_softc *ahd, u_int value) +{ + AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); + ahd_outw(ahd, SNSCB_QOFF, value); +} + +static __inline u_int +ahd_get_sescb_qoff(struct ahd_softc *ahd) +{ + AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); + return (ahd_inb(ahd, SESCB_QOFF)); +} + +static __inline void +ahd_set_sescb_qoff(struct ahd_softc *ahd, u_int value) +{ + AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); + ahd_outb(ahd, SESCB_QOFF, value); +} + +static __inline u_int +ahd_get_sdscb_qoff(struct ahd_softc *ahd) +{ + AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); + return (ahd_inb(ahd, SDSCB_QOFF) | (ahd_inb(ahd, SDSCB_QOFF + 1) << 8)); +} + +static __inline void +ahd_set_sdscb_qoff(struct ahd_softc *ahd, u_int value) +{ + AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); + ahd_outb(ahd, SDSCB_QOFF, value & 0xFF); + ahd_outb(ahd, SDSCB_QOFF+1, (value >> 8) & 0xFF); +} + +static __inline u_int +ahd_inb_scbram(struct ahd_softc *ahd, u_int offset) +{ + u_int value; + + /* + * Workaround PCI-X Rev A. hardware bug. + * After a host read of SCB memory, the chip + * may become confused into thinking prefetch + * was required. This starts the discard timer + * running and can cause an unexpected discard + * timer interrupt. The work around is to read + * a normal register prior to the exhaustion of + * the discard timer. The mode pointer register + * has no side effects and so serves well for + * this purpose. + * + * Razor #528 + */ + value = ahd_inb(ahd, offset); + ahd_inb(ahd, MODE_PTR); + return (value); +} + +static __inline u_int +ahd_inw_scbram(struct ahd_softc *ahd, u_int offset) +{ + return (ahd_inb_scbram(ahd, offset) + | (ahd_inb_scbram(ahd, offset+1) << 8)); +} + +static __inline uint32_t +ahd_inl_scbram(struct ahd_softc *ahd, u_int offset) +{ + return (ahd_inb_scbram(ahd, offset) + | (ahd_inb_scbram(ahd, offset+1) << 8) + | (ahd_inb_scbram(ahd, offset+2) << 16) + | (ahd_inb_scbram(ahd, offset+3) << 24)); +} + +static __inline struct scb * +ahd_lookup_scb(struct ahd_softc *ahd, u_int tag) +{ + struct scb* scb; + + if (tag >= AHD_SCB_MAX) + return (NULL); + scb = ahd->scb_data.scbindex[tag]; + if (scb != NULL) + ahd_sync_scb(ahd, scb, + BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); + return (scb); +} + +static __inline void +ahd_swap_with_next_hscb(struct ahd_softc *ahd, struct scb *scb) +{ + struct hardware_scb *q_hscb; + uint32_t saved_hscb_busaddr; + + /* + * Our queuing method is a bit tricky. The card + * knows in advance which HSCB (by address) to download, + * and we can't disappoint it. To achieve this, the next + * SCB to download is saved off in ahd->next_queued_scb. + * When we are called to queue "an arbitrary scb", + * we copy the contents of the incoming HSCB to the one + * the sequencer knows about, swap HSCB pointers and + * finally assign the SCB to the tag indexed location + * in the scb_array. This makes sure that we can still + * locate the correct SCB by SCB_TAG. + */ + q_hscb = ahd->next_queued_scb->hscb; + saved_hscb_busaddr = q_hscb->hscb_busaddr; + memcpy(q_hscb, scb->hscb, sizeof(*scb->hscb)); + q_hscb->hscb_busaddr = saved_hscb_busaddr; + q_hscb->next_hscb_busaddr = scb->hscb->hscb_busaddr; + + /* Now swap HSCB pointers. */ + ahd->next_queued_scb->hscb = scb->hscb; + scb->hscb = q_hscb; + + /* Now define the mapping from tag to SCB in the scbindex */ +/* XXX This should be constant now. Can we avoid the mapping? */ + ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = scb; +} + +/* + * Tell the sequencer about a new transaction to execute. + */ +static __inline void +ahd_queue_scb(struct ahd_softc *ahd, struct scb *scb) +{ + ahd_swap_with_next_hscb(ahd, scb); + + if (SCBID_IS_NULL(SCB_GET_TAG(scb))) + panic("Attempt to queue invalid SCB tag %x\n", + SCB_GET_TAG(scb)); + + /* + * Keep a history of SCBs we've downloaded in the qinfifo. + */ + ahd->qinfifo[AHD_QIN_WRAP(ahd->qinfifonext)] = SCB_GET_TAG(scb); + ahd->qinfifonext++; + + if (scb->sg_count != 0) + ahd_setup_data_scb(ahd, scb); + else + ahd_setup_noxfer_scb(ahd, scb); + ahd_setup_scb_common(ahd, scb); + + /* + * Make sure our data is consistant from the + * perspective of the adapter. + */ + ahd_sync_scb(ahd, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); + +#ifdef AHD_DEBUG + if ((ahd_debug & AHD_SHOW_QUEUE) != 0) { + printf("%s: Queueing SCB 0x%x bus addr 0x%x - 0x%x%x/0x%x\n", + ahd_name(ahd), + SCB_GET_TAG(scb), scb->hscb->hscb_busaddr, + (u_int)((scb->hscb->dataptr >> 32) & 0xFFFFFFFF), + (u_int)(scb->hscb->dataptr & 0xFFFFFFFF), + scb->hscb->datacnt); + } +#endif + /* Tell the adapter about the newly queued SCB */ + ahd_set_hnscb_qoff(ahd, ahd->qinfifonext); +} + +static __inline uint8_t * +ahd_get_sense_buf(struct ahd_softc *ahd, struct scb *scb) +{ + return (scb->sense_data); +} + +static __inline uint32_t +ahd_get_sense_bufaddr(struct ahd_softc *ahd, struct scb *scb) +{ + return (scb->sense_busaddr); +} + +/************************** Interrupt Processing ******************************/ +static __inline void ahd_sync_qoutfifo(struct ahd_softc *ahd, int op); +static __inline void ahd_sync_tqinfifo(struct ahd_softc *ahd, int op); +static __inline u_int ahd_check_cmdcmpltqueues(struct ahd_softc *ahd); +static __inline void ahd_intr(struct ahd_softc *ahd); + +static __inline void +ahd_sync_qoutfifo(struct ahd_softc *ahd, int op) +{ + ahd_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_dmamap, + /*offset*/0, /*len*/AHC_SCB_MAX * sizeof(uint16_t), op); +} + +static __inline void +ahd_sync_tqinfifo(struct ahd_softc *ahd, int op) +{ +#ifdef AHD_TARGET_MODE + if ((ahd->flags & AHD_TARGETROLE) != 0) { + ahd_dmamap_sync(ahd, ahd->shared_data_dmat, + ahd->shared_data_dmamap, + ahd_targetcmd_offset(ahd, 0), + sizeof(struct target_cmd) * AHD_TMODE_CMDS, + op); + } +#endif +} + +/* + * See if the firmware has posted any completed commands + * into our in-core command complete fifos. + */ +#define AHD_RUN_QOUTFIFO 0x1 +#define AHD_RUN_TQINFIFO 0x2 +static __inline u_int +ahd_check_cmdcmpltqueues(struct ahd_softc *ahd) +{ + u_int retval; + + retval = 0; + ahd_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_dmamap, + /*offset*/ahd->qoutfifonext, /*len*/2, + BUS_DMASYNC_POSTREAD); + if (ahd->qoutfifo[ahd->qoutfifonext] != SCB_LIST_NULL_LE) + retval |= AHD_RUN_QOUTFIFO; +#ifdef AHD_TARGET_MODE + if ((ahd->flags & AHD_TARGETROLE) != 0 + && (ahd->flags & AHD_TQINFIFO_BLOCKED) == 0) { + ahd_dmamap_sync(ahd, ahd->shared_data_dmat, + ahd->shared_data_dmamap, + ahd_targetcmd_offset(ahd, ahd->tqinfifofnext), + /*len*/sizeof(struct target_cmd), + BUS_DMASYNC_POSTREAD); + if (ahd->targetcmds[ahd->tqinfifonext].cmd_valid != 0) + retval |= AHD_RUN_TQINFIFO; + } +#endif + return (retval); +} + +/* + * Catch an interrupt from the adapter + */ +static __inline void +ahd_intr(struct ahd_softc *ahd) +{ + u_int intstat; + + /* + * Instead of directly reading the interrupt status register, + * infer the cause of the interrupt by checking our in-core + * completion queues. This avoids a costly PCI bus read in + * most cases. + */ + if ((ahd->flags & AHD_ALL_INTERRUPTS) == 0 + && (ahd_check_cmdcmpltqueues(ahd) != 0)) + intstat = CMDCMPLT; + else + intstat = ahd_inb(ahd, INTSTAT); + + if (intstat & CMDCMPLT) { + ahd_outb(ahd, CLRINT, CLRCMDINT); + + /* + * Ensure that the chip sees that we've cleared + * this interrupt before we walk the output fifo. + * Otherwise, we may, due to posted bus writes, + * clear the interrupt after we finish the scan, + * and after the sequencer has added new entries + * and asserted the interrupt again. + */ + ahd_flush_device_writes(ahd); + ahd_run_qoutfifo(ahd); +#ifdef AHD_TARGET_MODE + if ((ahd->flags & AHD_TARGETROLE) != 0) + ahd_run_tqinfifo(ahd, /*paused*/FALSE); +#endif + } + + if (intstat == 0xFF && (ahd->features & AHD_REMOVABLE) != 0) + /* Hot eject */ + return; + + if ((intstat & INT_PEND) == 0) + return; + + if (intstat & HWERRINT) { + ahd_handle_hwerrint(ahd); + return; + } + + if ((intstat & (PCIINT|SPLTINT)) != 0) { + ahd->bus_intr(ahd); + return; + } + + if ((intstat & SEQINT) != 0) + ahd_handle_seqint(ahd, intstat); + + if ((intstat & SCSIINT) != 0) + ahd_handle_scsiint(ahd, intstat); +} + +#endif /* _AIC79XX_INLINE_H_ */ diff --git a/sys/dev/aic7xxx/aic79xx_osm.c b/sys/dev/aic7xxx/aic79xx_osm.c new file mode 100644 index 0000000..51071e6 --- /dev/null +++ b/sys/dev/aic7xxx/aic79xx_osm.c @@ -0,0 +1,1992 @@ +/* + * Bus independent FreeBSD shim for the aic7xxx based adaptec SCSI controllers + * + * Copyright (c) 1994-2001 Justin T. Gibbs. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU Public License ("GPL"). + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR + * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $Id$ + * + * $FreeBSD$ + */ + +#include <dev/aic7xxx/aic79xx_osm.h> +#include <dev/aic7xxx/aic79xx_inline.h> + +#include "opt_ddb.h" +#ifdef DDB +#include <ddb/ddb.h> +#endif + +#ifndef AHD_TMODE_ENABLE +#define AHD_TMODE_ENABLE 0 +#endif + +#define ccb_scb_ptr spriv_ptr0 + +#if UNUSED +static void ahd_dump_targcmd(struct target_cmd *cmd); +#endif +static int ahd_modevent(module_t mod, int type, void *data); +static void ahd_action(struct cam_sim *sim, union ccb *ccb); +static void ahd_set_tran_settings(struct ahd_softc *ahd, + int our_id, char channel, + struct ccb_trans_settings *cts); +static void ahd_get_tran_settings(struct ahd_softc *ahd, + int our_id, char channel, + struct ccb_trans_settings *cts); +static void ahd_async(void *callback_arg, uint32_t code, + struct cam_path *path, void *arg); +static void ahd_execute_scb(void *arg, bus_dma_segment_t *dm_segs, + int nsegments, int error); +static void ahd_poll(struct cam_sim *sim); +static void ahd_setup_data(struct ahd_softc *ahd, struct cam_sim *sim, + struct ccb_scsiio *csio, struct scb *scb); +static void ahd_abort_ccb(struct ahd_softc *ahd, struct cam_sim *sim, + union ccb *ccb); +static int ahd_create_path(struct ahd_softc *ahd, + char channel, u_int target, u_int lun, + struct cam_path **path); + +#if NOT_YET +static void ahd_set_recoveryscb(struct ahd_softc *ahd, struct scb *scb); +#endif + +static int +ahd_create_path(struct ahd_softc *ahd, char channel, u_int target, + u_int lun, struct cam_path **path) +{ + path_id_t path_id; + + if (channel == 'B') + path_id = cam_sim_path(ahd->platform_data->sim_b); + else + path_id = cam_sim_path(ahd->platform_data->sim); + + return (xpt_create_path(path, /*periph*/NULL, + path_id, target, lun)); +} + +int +ahd_map_int(struct ahd_softc *ahd) +{ + int error; + + /* Hook up our interrupt handler */ + error = bus_setup_intr(ahd->dev_softc, ahd->platform_data->irq, + INTR_TYPE_CAM, ahd_platform_intr, ahd, + &ahd->platform_data->ih); + if (error != 0) + device_printf(ahd->dev_softc, "bus_setup_intr() failed: %d\n", + error); + return (error); +} + +/* + * Attach all the sub-devices we can find + */ +int +ahd_attach(struct ahd_softc *ahd) +{ + char ahd_info[256]; + struct ccb_setasync csa; + struct cam_devq *devq; + struct cam_sim *sim; + struct cam_path *path; + long s; + int count; + + count = 0; + sim = NULL; + + ahd_controller_info(ahd, ahd_info); + printf("%s\n", ahd_info); + ahd_lock(ahd, &s); + + /* + * Create the device queue for our SIM(s). + */ + devq = cam_simq_alloc(AHD_MAX_QUEUE); + if (devq == NULL) + goto fail; + + /* + * Construct our SIM entry + */ + sim = cam_sim_alloc(ahd_action, ahd_poll, "ahd", ahd, + device_get_unit(ahd->dev_softc), + 1, /*XXX*/256, devq); + if (sim == NULL) { + cam_simq_free(devq); + goto fail; + } + + if (xpt_bus_register(sim, /*bus_id*/0) != CAM_SUCCESS) { + cam_sim_free(sim, /*free_devq*/TRUE); + sim = NULL; + goto fail; + } + + if (xpt_create_path(&path, /*periph*/NULL, + cam_sim_path(sim), CAM_TARGET_WILDCARD, + CAM_LUN_WILDCARD) != CAM_REQ_CMP) { + xpt_bus_deregister(cam_sim_path(sim)); + cam_sim_free(sim, /*free_devq*/TRUE); + sim = NULL; + goto fail; + } + + xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5); + csa.ccb_h.func_code = XPT_SASYNC_CB; + csa.event_enable = AC_LOST_DEVICE; + csa.callback = ahd_async; + csa.callback_arg = sim; + xpt_action((union ccb *)&csa); + count++; + +fail: + ahd->platform_data->sim = sim; + ahd->platform_data->path = path; + ahd_unlock(ahd, &s); + + if (count != 0) + /* We have to wait until after any system dumps... */ + ahd->platform_data->eh = + EVENTHANDLER_REGISTER(shutdown_final, ahd_shutdown, + ahd, SHUTDOWN_PRI_DEFAULT); + + return (count); +} + +/* + * Catch an interrupt from the adapter + */ +void +ahd_platform_intr(void *arg) +{ + struct ahd_softc *ahd; + + ahd = (struct ahd_softc *)arg; + ahd_intr(ahd); +} + +/* + * We have an scb which has been processed by the + * adaptor, now we look to see how the operation + * went. + */ +void +ahd_done(struct ahd_softc *ahd, struct scb *scb) +{ + union ccb *ccb; + + CAM_DEBUG(scb->io_ctx->ccb_h.path, CAM_DEBUG_TRACE, + ("ahd_done - scb %d\n", SCB_GET_TAG(scb))); + + ccb = scb->io_ctx; + LIST_REMOVE(scb, pending_links); + if ((scb->flags & SCB_UNTAGGEDQ) != 0) { + struct scb_tailq *untagged_q; + int target_offset; + + target_offset = SCB_GET_TARGET_OFFSET(ahd, scb); + untagged_q = &ahd->untagged_queues[target_offset]; + TAILQ_REMOVE(untagged_q, scb, links.tqe); + scb->flags &= ~SCB_UNTAGGEDQ; + ahd_run_untagged_queue(ahd, untagged_q); + } + + untimeout(ahd_timeout, (caddr_t)scb, ccb->ccb_h.timeout_ch); + + if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { + bus_dmasync_op_t op; + + if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) + op = BUS_DMASYNC_POSTREAD; + else + op = BUS_DMASYNC_POSTWRITE; + bus_dmamap_sync(ahd->buffer_dmat, scb->dmamap, op); + bus_dmamap_unload(ahd->buffer_dmat, scb->dmamap); + } + +#ifdef AHD_TARGET_MODE + if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) { + struct cam_path *ccb_path; + + /* + * If we have finally disconnected, clean up our + * pending device state. + * XXX - There may be error states that cause where + * we will remain connected. + */ + ccb_path = ccb->ccb_h.path; + if (ahd->pending_device != NULL + && xpt_path_comp(ahd->pending_device->path, ccb_path) == 0) { + + if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) { + ahd->pending_device = NULL; + } else { + xpt_print_path(ccb->ccb_h.path); + printf("Still disconnected\n"); + ahd_freeze_ccb(ccb); + } + } + + if (ahd_get_transaction_status(scb) == CAM_REQ_INPROG) + ccb->ccb_h.status |= CAM_REQ_CMP; + ccb->ccb_h.status &= ~CAM_SIM_QUEUED; + ahd_free_scb(ahd, scb); + xpt_done(ccb); + return; + } +#endif + + /* + * If the recovery SCB completes, we have to be + * out of our timeout. + */ + if ((scb->flags & SCB_RECOVERY_SCB) != 0) { + struct scb *list_scb; + + /* + * We were able to complete the command successfully, + * so reinstate the timeouts for all other pending + * commands. + */ + LIST_FOREACH(list_scb, &ahd->pending_scbs, pending_links) { + union ccb *ccb; + uint64_t time; + + ccb = list_scb->io_ctx; + if (ccb->ccb_h.timeout == CAM_TIME_INFINITY) + continue; + + time = ccb->ccb_h.timeout; + time *= hz; + time /= 1000; + ccb->ccb_h.timeout_ch = + timeout(ahd_timeout, list_scb, time); + } + + if (ahd_get_transaction_status(scb) == CAM_BDR_SENT + || ahd_get_transaction_status(scb) == CAM_REQ_ABORTED) + ahd_set_transaction_status(scb, CAM_CMD_TIMEOUT); + ahd_print_path(ahd, scb); + printf("no longer in timeout, status = %x\n", + ccb->ccb_h.status); + } + + /* Don't clobber any existing error state */ + if (ahd_get_transaction_status(scb) == CAM_REQ_INPROG) { + ccb->ccb_h.status |= CAM_REQ_CMP; + } else if ((scb->flags & SCB_SENSE) != 0) { + /* + * We performed autosense retrieval. + * + * Zero any sense not transferred by the + * device. The SCSI spec mandates that any + * untransfered data should be assumed to be + * zero. Complete the 'bounce' of sense information + * through buffers accessible via bus-space by + * copying it into the clients csio. + */ + memset(&ccb->csio.sense_data, 0, sizeof(ccb->csio.sense_data)); + memcpy(&ccb->csio.sense_data, + ahd_get_sense_buf(ahd, scb), +/* XXX What size do we want to use??? */ + sizeof(ccb->csio.sense_data) + - ccb->csio.sense_resid); + scb->io_ctx->ccb_h.status |= CAM_AUTOSNS_VALID; + } else if ((scb->flags & SCB_PKT_SENSE) != 0) { + struct scsi_status_iu_header *siu; + u_int sense_len; + int i; + + /* + * Copy only the sense data into the provided buffer. + */ + siu = (struct scsi_status_iu_header *)scb->sense_data; + sense_len = MIN(scsi_4btoul(siu->sense_length), + sizeof(ccb->csio.sense_data)); + memset(&ccb->csio.sense_data, 0, sizeof(ccb->csio.sense_data)); + memcpy(&ccb->csio.sense_data, + ahd_get_sense_buf(ahd, scb) + SIU_SENSE_OFFSET(siu), + sense_len); + printf("Copied %d bytes of sense data offset %d:", sense_len, + SIU_SENSE_OFFSET(siu)); + for (i = 0; i < sense_len; i++) + printf(" 0x%x", ((uint8_t *)&ccb->csio.sense_data)[i]); + printf("\n"); + scb->io_ctx->ccb_h.status |= CAM_AUTOSNS_VALID; + } + ccb->ccb_h.status &= ~CAM_SIM_QUEUED; + ahd_free_scb(ahd, scb); + xpt_done(ccb); +} + +static void +ahd_action(struct cam_sim *sim, union ccb *ccb) +{ + struct ahd_softc *ahd; +#ifdef AHD_TARGET_MODE + struct ahd_tmode_lstate *lstate; +#endif + u_int target_id; + u_int our_id; + long s; + + CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ahd_action\n")); + + ahd = (struct ahd_softc *)cam_sim_softc(sim); + + target_id = ccb->ccb_h.target_id; + our_id = SIM_SCSI_ID(ahd, sim); + + switch (ccb->ccb_h.func_code) { + /* Common cases first */ +#ifdef AHD_TARGET_MODE + case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */ + case XPT_CONT_TARGET_IO:/* Continue Host Target I/O Connection*/ + { + struct ahd_tmode_tstate *tstate; + cam_status status; + + status = ahd_find_tmode_devs(ahd, sim, ccb, &tstate, + &lstate, TRUE); + + if (status != CAM_REQ_CMP) { + if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) { + /* Response from the black hole device */ + tstate = NULL; + lstate = ahd->black_hole; + } else { + ccb->ccb_h.status = status; + xpt_done(ccb); + break; + } + } + if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { + + ahd_lock(ahd, &s); + SLIST_INSERT_HEAD(&lstate->accept_tios, &ccb->ccb_h, + sim_links.sle); + ccb->ccb_h.status = CAM_REQ_INPROG; + if ((ahd->flags & AHD_TQINFIFO_BLOCKED) != 0) + ahd_run_tqinfifo(ahd, /*paused*/FALSE); + ahd_unlock(ahd, &s); + break; + } + + /* + * The target_id represents the target we attempt to + * select. In target mode, this is the initiator of + * the original command. + */ + our_id = target_id; + target_id = ccb->csio.init_id; + /* FALLTHROUGH */ + } +#endif + case XPT_SCSI_IO: /* Execute the requested I/O operation */ + case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ + { + struct scb *scb; + struct hardware_scb *hscb; + + if ((ahd->flags & AHD_INITIATORROLE) == 0 + && (ccb->ccb_h.func_code == XPT_SCSI_IO + || ccb->ccb_h.func_code == XPT_RESET_DEV)) { + ccb->ccb_h.status = CAM_PROVIDE_FAIL; + xpt_done(ccb); + return; + } + + /* + * get an scb to use. + */ + ahd_lock(ahd, &s); + if ((scb = ahd_get_scb(ahd)) == NULL) { + + xpt_freeze_simq(sim, /*count*/1); + ahd->flags |= AHD_RESOURCE_SHORTAGE; + ahd_unlock(ahd, &s); + ccb->ccb_h.status = CAM_REQUEUE_REQ; + xpt_done(ccb); + return; + } + ahd_unlock(ahd, &s); + + hscb = scb->hscb; + + CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_SUBTRACE, + ("start scb(%p)\n", scb)); + scb->io_ctx = ccb; + /* + * So we can find the SCB when an abort is requested + */ + ccb->ccb_h.ccb_scb_ptr = scb; + + /* + * Put all the arguments for the xfer in the scb + */ + hscb->control = 0; + hscb->scsiid = BUILD_SCSIID(ahd, sim, target_id, our_id); + hscb->lun = ccb->ccb_h.target_lun; + if (ccb->ccb_h.func_code == XPT_RESET_DEV) { + hscb->cdb_len = 0; + scb->flags |= SCB_DEVICE_RESET; + hscb->control |= MK_MESSAGE; + ahd_execute_scb(scb, NULL, 0, 0); + } else { +#ifdef AHD_TARGET_MODE + if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) { + struct target_data *tdata; + + tdata = &hscb->shared_data.tdata; + if (ahd->pending_device == lstate) + scb->flags |= SCB_TARGET_IMMEDIATE; + hscb->control |= TARGET_SCB; + tdata->target_phases = 0; + if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) { + tdata->target_phases |= SPHASE_PENDING; + tdata->scsi_status = + ccb->csio.scsi_status; + } + if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) + tdata->target_phases |= NO_DISCONNECT; + + tdata->initiator_tag = + ahd_htole16(ccb->csio.tag_id); + } +#endif + if (ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) + hscb->control |= ccb->csio.tag_action; + + ahd_setup_data(ahd, sim, &ccb->csio, scb); + } + break; + } +#ifdef AHD_TARGET_MODE + case XPT_NOTIFY_ACK: + case XPT_IMMED_NOTIFY: + { + struct ahd_tmode_tstate *tstate; + struct ahd_tmode_lstate *lstate; + cam_status status; + + status = ahd_find_tmode_devs(ahd, sim, ccb, &tstate, + &lstate, TRUE); + + if (status != CAM_REQ_CMP) { + ccb->ccb_h.status = status; + xpt_done(ccb); + break; + } + SLIST_INSERT_HEAD(&lstate->immed_notifies, &ccb->ccb_h, + sim_links.sle); + ccb->ccb_h.status = CAM_REQ_INPROG; + ahd_send_lstate_events(ahd, lstate); + break; + } + case XPT_EN_LUN: /* Enable LUN as a target */ + ahd_handle_en_lun(ahd, sim, ccb); + xpt_done(ccb); + break; +#endif + case XPT_ABORT: /* Abort the specified CCB */ + { + ahd_abort_ccb(ahd, sim, ccb); + break; + } + case XPT_SET_TRAN_SETTINGS: + { + ahd_lock(ahd, &s); + ahd_set_tran_settings(ahd, SIM_SCSI_ID(ahd, sim), + SIM_CHANNEL(ahd, sim), &ccb->cts); + ahd_unlock(ahd, &s); + xpt_done(ccb); + break; + } + case XPT_GET_TRAN_SETTINGS: + /* Get default/user set transfer settings for the target */ + { + ahd_lock(ahd, &s); + ahd_get_tran_settings(ahd, SIM_SCSI_ID(ahd, sim), + SIM_CHANNEL(ahd, sim), &ccb->cts); + ahd_unlock(ahd, &s); + xpt_done(ccb); + break; + } + case XPT_CALC_GEOMETRY: + { + struct ccb_calc_geometry *ccg; + uint32_t size_mb; + uint32_t secs_per_cylinder; + int extended; + + ccg = &ccb->ccg; + size_mb = ccg->volume_size + / ((1024L * 1024L) / ccg->block_size); + extended = ahd->flags & AHD_EXTENDED_TRANS_A; + + if (size_mb > 1024 && extended) { + ccg->heads = 255; + ccg->secs_per_track = 63; + } else { + ccg->heads = 64; + ccg->secs_per_track = 32; + } + secs_per_cylinder = ccg->heads * ccg->secs_per_track; + ccg->cylinders = ccg->volume_size / secs_per_cylinder; + ccb->ccb_h.status = CAM_REQ_CMP; + xpt_done(ccb); + break; + } + case XPT_RESET_BUS: /* Reset the specified SCSI bus */ + { + int found; + + ahd_lock(ahd, &s); + found = ahd_reset_channel(ahd, SIM_CHANNEL(ahd, sim), + /*initiate reset*/TRUE); + ahd_unlock(ahd, &s); + if (bootverbose) { + xpt_print_path(SIM_PATH(ahd, sim)); + printf("SCSI bus reset delivered. " + "%d SCBs aborted.\n", found); + } + ccb->ccb_h.status = CAM_REQ_CMP; + xpt_done(ccb); + break; + } + case XPT_TERM_IO: /* Terminate the I/O process */ + /* XXX Implement */ + ccb->ccb_h.status = CAM_REQ_INVALID; + xpt_done(ccb); + break; + case XPT_PATH_INQ: /* Path routing inquiry */ + { + struct ccb_pathinq *cpi = &ccb->cpi; + + cpi->version_num = 1; /* XXX??? */ + cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE; + if ((ahd->features & AHD_WIDE) != 0) + cpi->hba_inquiry |= PI_WIDE_16; + if ((ahd->features & AHD_TARGETMODE) != 0) { + cpi->target_sprt = PIT_PROCESSOR + | PIT_DISCONNECT + | PIT_TERM_IO; + } else { + cpi->target_sprt = 0; + } + cpi->hba_misc = 0; + cpi->hba_eng_cnt = 0; + cpi->max_target = (ahd->features & AHD_WIDE) ? 15 : 7; + cpi->max_lun = AHD_NUM_LUNS - 1; + cpi->initiator_id = ahd->our_id; + if ((ahd->flags & AHD_RESET_BUS_A) == 0) { + cpi->hba_misc |= PIM_NOBUSRESET; + } + cpi->bus_id = cam_sim_bus(sim); + cpi->base_transfer_speed = 3300; + strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); + strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN); + strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); + cpi->unit_number = cam_sim_unit(sim); +#ifdef AHD_NEW_TRAN_SETTINGS + cpi->protocol = PROTO_SCSI; + cpi->protocol_version = SCSI_REV_2; + cpi->transport = XPORT_SPI; + cpi->transport_version = 2; + cpi->xport_specific.spi.ppr_options = SID_SPI_CLOCK_ST; + cpi->transport_version = 4; + cpi->xport_specific.spi.ppr_options = SID_SPI_CLOCK_DT_ST; +#endif + cpi->ccb_h.status = CAM_REQ_CMP; + xpt_done(ccb); + break; + } + default: + ccb->ccb_h.status = CAM_PROVIDE_FAIL; + xpt_done(ccb); + break; + } +} + + +static void +ahd_set_tran_settings(struct ahd_softc *ahd, int our_id, char channel, + struct ccb_trans_settings *cts) +{ +#ifdef AHD_NEW_TRAN_SETTINGS + struct ahd_devinfo devinfo; + struct ccb_trans_settings_scsi *scsi; + struct ccb_trans_settings_spi *spi; + struct ahd_initiator_tinfo *tinfo; + struct ahd_tmode_tstate *tstate; + uint16_t *discenable; + uint16_t *tagenable; + u_int update_type; + + scsi = &cts->proto_specific.scsi; + spi = &cts->xport_specific.spi; + ahd_compile_devinfo(&devinfo, SIM_SCSI_ID(ahd, sim), + cts->ccb_h.target_id, + cts->ccb_h.target_lun, + SIM_CHANNEL(ahd, sim), + ROLE_UNKNOWN); + tinfo = ahd_fetch_transinfo(ahd, devinfo.channel, + devinfo.our_scsiid, + devinfo.target, &tstate); + update_type = 0; + if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { + update_type |= AHD_TRANS_GOAL; + discenable = &tstate->discenable; + tagenable = &tstate->tagenable; + tinfo->curr.protocol_version = cts->protocol_version; + tinfo->curr.transport_version = cts->transport_version; + tinfo->goal.protocol_version = cts->protocol_version; + tinfo->goal.transport_version = cts->transport_version; + } else if (cts->type == CTS_TYPE_USER_SETTINGS) { + update_type |= AHD_TRANS_USER; + discenable = &ahd->user_discenable; + tagenable = &ahd->user_tagenable; + tinfo->user.protocol_version = cts->protocol_version; + tinfo->user.transport_version = cts->transport_version; + } else { + cts->ccb_h.status = CAM_REQ_INVALID; + return; + } + + if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { + if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) + *discenable |= devinfo.target_mask; + else + *discenable &= ~devinfo.target_mask; + } + + if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { + if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) + *tagenable |= devinfo.target_mask; + else + *tagenable &= ~devinfo.target_mask; + } + + if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { + ahd_validate_width(ahd, /*tinfo limit*/NULL, + &spi->bus_width, ROLE_UNKNOWN); + ahd_set_width(ahd, &devinfo, spi->bus_width, + update_type, /*paused*/FALSE); + } + + if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0) { + if (update_type == AHD_TRANS_USER) + spi->ppr_options = tinfo->user.ppr_options; + else + spi->ppr_options = tinfo->goal.ppr_options; + } + + if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) { + if (update_type == AHD_TRANS_USER) + spi->sync_offset = tinfo->user.offset; + else + spi->sync_offset = tinfo->goal.offset; + } + + if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) { + if (update_type == AHD_TRANS_USER) + spi->sync_period = tinfo->user.period; + else + spi->sync_period = tinfo->goal.period; + } + + if (((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) + || ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0)) { + u_int maxsync; + + maxsync = AHD_SYNCRATE_MAX; + + if (spi->bus_width != MSG_EXT_WDTR_BUS_16_BIT) + spi->ppr_options &= ~MSG_EXT_PPR_DT_REQ; + + if ((*discenable & devinfo.target_mask) == 0) + spi->ppr_options &= ~MSG_EXT_PPR_IU_REQ; + + ahd_find_syncrate(ahd, &spi->sync_period, + &spi->ppr_options, maxsync); + ahd_validate_offset(ahd, /*tinfo limit*/NULL, + spi->sync_period, &spi->sync_offset, + spi->bus_width, ROLE_UNKNOWN); + + /* We use a period of 0 to represent async */ + if (spi->sync_offset == 0) { + spi->sync_period = 0; + spi->ppr_options = 0; + } + + ahd_set_syncrate(ahd, &devinfo, spi->sync_period, + spi->sync_offset, spi->ppr_options, + update_type, /*paused*/FALSE); + } + cts->ccb_h.status = CAM_REQ_CMP; +#else + struct ahd_devinfo devinfo; + struct ahd_initiator_tinfo *tinfo; + struct ahd_tmode_tstate *tstate; + uint16_t *discenable; + uint16_t *tagenable; + u_int update_type; + + ahd_compile_devinfo(&devinfo, SIM_SCSI_ID(ahd, sim), + cts->ccb_h.target_id, + cts->ccb_h.target_lun, + SIM_CHANNEL(ahd, sim), + ROLE_UNKNOWN); + tinfo = ahd_fetch_transinfo(ahd, devinfo.channel, + devinfo.our_scsiid, + devinfo.target, &tstate); + update_type = 0; + if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) { + update_type |= AHD_TRANS_GOAL; + discenable = &tstate->discenable; + tagenable = &tstate->tagenable; + } else if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) { + update_type |= AHD_TRANS_USER; + discenable = &ahd->user_discenable; + tagenable = &ahd->user_tagenable; + } else { + cts->ccb_h.status = CAM_REQ_INVALID; + return; + } + + if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) { + if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) + *discenable |= devinfo.target_mask; + else + *discenable &= ~devinfo.target_mask; + } + + if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) { + if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) + *tagenable |= devinfo.target_mask; + else + *tagenable &= ~devinfo.target_mask; + } + + if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) { + ahd_validate_width(ahd, /*tinfo limit*/NULL, + &cts->bus_width, ROLE_UNKNOWN); + ahd_set_width(ahd, &devinfo, cts->bus_width, + update_type, /*paused*/FALSE); + } + + if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0) { + if (update_type == AHD_TRANS_USER) + cts->sync_offset = tinfo->user.offset; + else + cts->sync_offset = tinfo->goal.offset; + } + + if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0) { + if (update_type == AHD_TRANS_USER) + cts->sync_period = tinfo->user.period; + else + cts->sync_period = tinfo->goal.period; + } + + if (((cts->valid & CCB_TRANS_SYNC_RATE_VALID) != 0) + || ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0)) { + u_int ppr_options; + u_int maxsync; + + maxsync = AHD_SYNCRATE_MAX; + ppr_options = 0; + if (cts->sync_period <= AHD_SYNCRATE_DT + && cts->bus_width == MSG_EXT_WDTR_BUS_16_BIT) { + ppr_options = tinfo->user.ppr_options + | MSG_EXT_PPR_DT_REQ; + } + + ahd_find_syncrate(ahd, &cts->sync_period, + &ppr_options, maxsync); + ahd_validate_offset(ahd, /*tinfo limit*/NULL, + cts->sync_period, &cts->sync_offset, + MSG_EXT_WDTR_BUS_8_BIT, + ROLE_UNKNOWN); + + /* We use a period of 0 to represent async */ + if (cts->sync_offset == 0) { + cts->sync_period = 0; + ppr_options = 0; + } + + if (ppr_options != 0 + && tinfo->user.transport_version >= 3) { + tinfo->goal.transport_version = + tinfo->user.transport_version; + tinfo->curr.transport_version = + tinfo->user.transport_version; + } + + ahd_set_syncrate(ahd, &devinfo, cts->sync_period, + cts->sync_offset, ppr_options, + update_type, /*paused*/FALSE); + } + cts->ccb_h.status = CAM_REQ_CMP; +#endif +} + +static void +ahd_get_tran_settings(struct ahd_softc *ahd, int our_id, char channel, + struct ccb_trans_settings *cts) +{ +#ifdef AHD_NEW_TRAN_SETTINGS + struct ahd_devinfo devinfo; + struct ccb_trans_settings_scsi *scsi; + struct ccb_trans_settings_spi *spi; + struct ahd_initiator_tinfo *targ_info; + struct ahd_tmode_tstate *tstate; + struct ahd_transinfo *tinfo; + + scsi = &cts->proto_specific.scsi; + spi = &cts->xport_specific.spi; + ahd_compile_devinfo(&devinfo, our_id, + cts->ccb_h.target_id, + cts->ccb_h.target_lun, + channel, ROLE_UNKNOWN); + targ_info = ahd_fetch_transinfo(ahd, devinfo.channel, + devinfo.our_scsiid, + devinfo.target, &tstate); + + if (cts->type == CTS_TYPE_CURRENT_SETTINGS) + tinfo = &targ_info->curr; + else + tinfo = &targ_info->user; + + scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; + spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; + if (cts->type == CTS_TYPE_USER_SETTINGS) { + if ((ahd->user_discenable & devinfo.target_mask) != 0) + spi->flags |= CTS_SPI_FLAGS_DISC_ENB; + + if ((ahd->user_tagenable & devinfo.target_mask) != 0) + scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; + } else { + if ((tstate->discenable & devinfo.target_mask) != 0) + spi->flags |= CTS_SPI_FLAGS_DISC_ENB; + + if ((tstate->tagenable & devinfo.target_mask) != 0) + scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; + } + cts->protocol_version = tinfo->protocol_version; + cts->transport_version = tinfo->transport_version; + + spi->sync_period = tinfo->period; + spi->sync_offset = tinfo->offset; + spi->bus_width = tinfo->width; + spi->ppr_options = tinfo->ppr_options; + + cts->protocol = PROTO_SCSI; + cts->transport = XPORT_SPI; + spi->valid = CTS_SPI_VALID_SYNC_RATE + | CTS_SPI_VALID_SYNC_OFFSET + | CTS_SPI_VALID_BUS_WIDTH + | CTS_SPI_VALID_PPR_OPTIONS; + + if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { + scsi->valid = CTS_SCSI_VALID_TQ; + spi->valid |= CTS_SPI_VALID_DISC; + } else { + scsi->valid = 0; + } + + cts->ccb_h.status = CAM_REQ_CMP; +#else + struct ahd_devinfo devinfo; + struct ahd_initiator_tinfo *targ_info; + struct ahd_tmode_tstate *tstate; + struct ahd_transinfo *tinfo; + + ahd_compile_devinfo(&devinfo, our_id, + cts->ccb_h.target_id, + cts->ccb_h.target_lun, + channel, ROLE_UNKNOWN); + targ_info = ahd_fetch_transinfo(ahd, devinfo.channel, + devinfo.our_scsiid, + devinfo.target, &tstate); + + if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) + tinfo = &targ_info->curr; + else + tinfo = &targ_info->user; + + cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB); + if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) == 0) { + if ((ahd->user_discenable & devinfo.target_mask) != 0) + cts->flags |= CCB_TRANS_DISC_ENB; + + if ((ahd->user_tagenable & devinfo.target_mask) != 0) + cts->flags |= CCB_TRANS_TAG_ENB; + } else { + if ((tstate->discenable & devinfo.target_mask) != 0) + cts->flags |= CCB_TRANS_DISC_ENB; + + if ((tstate->tagenable & devinfo.target_mask) != 0) + cts->flags |= CCB_TRANS_TAG_ENB; + } + cts->sync_period = tinfo->period; + cts->sync_offset = tinfo->offset; + cts->bus_width = tinfo->width; + + cts->valid = CCB_TRANS_SYNC_RATE_VALID + | CCB_TRANS_SYNC_OFFSET_VALID + | CCB_TRANS_BUS_WIDTH_VALID; + + if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) + cts->valid |= CCB_TRANS_DISC_VALID|CCB_TRANS_TQ_VALID; + + cts->ccb_h.status = CAM_REQ_CMP; +#endif +} + +static void +ahd_async(void *callback_arg, uint32_t code, struct cam_path *path, void *arg) +{ + struct ahd_softc *ahd; + struct cam_sim *sim; + + sim = (struct cam_sim *)callback_arg; + ahd = (struct ahd_softc *)cam_sim_softc(sim); + switch (code) { + case AC_LOST_DEVICE: + { + struct ahd_devinfo devinfo; + long s; + + ahd_compile_devinfo(&devinfo, SIM_SCSI_ID(ahd, sim), + xpt_path_target_id(path), + xpt_path_lun_id(path), + SIM_CHANNEL(ahd, sim), + ROLE_UNKNOWN); + + /* + * Revert to async/narrow transfers + * for the next device. + */ + ahd_lock(ahd, &s); + ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, + AHD_TRANS_GOAL|AHD_TRANS_CUR, /*paused*/FALSE); + ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0, + /*ppr_options*/0, AHD_TRANS_GOAL|AHD_TRANS_CUR, + /*paused*/FALSE); + ahd_unlock(ahd, &s); + break; + } + default: + break; + } +} + +static void +ahd_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments, + int error) +{ + struct scb *scb; + union ccb *ccb; + struct ahd_softc *ahd; + struct ahd_initiator_tinfo *tinfo; + struct ahd_tmode_tstate *tstate; + u_int mask; + u_long s; + + scb = (struct scb *)arg; + ccb = scb->io_ctx; + ahd = scb->ahd_softc; + + if (error != 0) { + if (error == EFBIG) + ahd_set_transaction_status(scb, CAM_REQ_TOO_BIG); + else + ahd_set_transaction_status(scb, CAM_REQ_CMP_ERR); + if (nsegments != 0) + bus_dmamap_unload(ahd->buffer_dmat, scb->dmamap); + ahd_lock(ahd, &s); + ahd_free_scb(ahd, scb); + ahd_unlock(ahd, &s); + xpt_done(ccb); + return; + } + scb->sg_count = 0; + if (nsegments != 0) { + void *sg; + bus_dmasync_op_t op; + u_int i; + + /* Copy the segments into our SG list */ + for (i = nsegments, sg = scb->sg_list; i > 0; i--) { + + sg = ahd_sg_setup(ahd, scb, sg, dm_segs->ds_addr, + dm_segs->ds_len, + /*last*/i == 1); + dm_segs++; + } + + if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) + op = BUS_DMASYNC_PREREAD; + else + op = BUS_DMASYNC_PREWRITE; + + bus_dmamap_sync(ahd->buffer_dmat, scb->dmamap, op); + + if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) { + struct target_data *tdata; + + tdata = &scb->hscb->shared_data.tdata; + tdata->target_phases |= DPHASE_PENDING; + if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) + tdata->data_phase = P_DATAOUT; + else + tdata->data_phase = P_DATAIN; + } + } + + ahd_lock(ahd, &s); + + /* + * Last time we need to check if this SCB needs to + * be aborted. + */ + if (ahd_get_transaction_status(scb) != CAM_REQ_INPROG) { + if (nsegments != 0) + bus_dmamap_unload(ahd->buffer_dmat, + scb->dmamap); + ahd_free_scb(ahd, scb); + ahd_unlock(ahd, &s); + xpt_done(ccb); + return; + } + + tinfo = ahd_fetch_transinfo(ahd, SCSIID_CHANNEL(ahd, scb->hscb->scsiid), + SCSIID_OUR_ID(scb->hscb->scsiid), + SCSIID_TARGET(ahd, scb->hscb->scsiid), + &tstate); + + mask = SCB_GET_TARGET_MASK(ahd, scb); + + if ((tstate->discenable & mask) != 0 + && (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0) + scb->hscb->control |= DISCENB; + + if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0) + scb->flags |= SCB_PACKETIZED; + + if ((ccb->ccb_h.flags & CAM_NEGOTIATE) != 0 + && (tinfo->goal.width != 0 + || tinfo->goal.period != 0 + || tinfo->goal.ppr_options != 0)) { + scb->flags |= SCB_NEGOTIATE; + scb->hscb->control |= MK_MESSAGE; + } else if ((tstate->auto_negotiate & mask) != 0) { + scb->flags |= SCB_AUTO_NEGOTIATE; + scb->hscb->control |= MK_MESSAGE; + } + + LIST_INSERT_HEAD(&ahd->pending_scbs, scb, pending_links); + + ccb->ccb_h.status |= CAM_SIM_QUEUED; + + if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { + uint64_t time; + + if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) + ccb->ccb_h.timeout = 5 * 1000; + + time = ccb->ccb_h.timeout; + time *= hz; + time /= 1000; + ccb->ccb_h.timeout_ch = + timeout(ahd_timeout, (caddr_t)scb, time); + } + + scb->flags |= SCB_ACTIVE; + + if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) { + /* Define a mapping from our tag to the SCB. */ + ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = scb; + ahd_pause(ahd); + ahd_set_scbptr(ahd, SCB_GET_TAG(scb)); + ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_TARG); + ahd_unpause(ahd); + } else { + ahd_queue_scb(ahd, scb); + } + + ahd_unlock(ahd, &s); +} + +static void +ahd_poll(struct cam_sim *sim) +{ + ahd_intr(cam_sim_softc(sim)); +} + +static void +ahd_setup_data(struct ahd_softc *ahd, struct cam_sim *sim, + struct ccb_scsiio *csio, struct scb *scb) +{ + struct hardware_scb *hscb; + struct ccb_hdr *ccb_h; + + hscb = scb->hscb; + ccb_h = &csio->ccb_h; + + csio->resid = 0; + csio->sense_resid = 0; + if (ccb_h->func_code == XPT_SCSI_IO) { + hscb->cdb_len = csio->cdb_len; + if ((ccb_h->flags & CAM_CDB_POINTER) != 0) { + + if (hscb->cdb_len > MAX_CDB_LEN + && (ccb_h->flags & CAM_CDB_PHYS) == 0) { + u_long s; + + ahd_set_transaction_status(scb, + CAM_REQ_INVALID); + ahd_lock(ahd, &s); + ahd_free_scb(ahd, scb); + ahd_unlock(ahd, &s); + xpt_done((union ccb *)csio); + return; + } + if ((ccb_h->flags & CAM_CDB_PHYS) != 0) { + hscb->shared_data.idata.cdbptr = + ahd_htole64((uintptr_t)csio->cdb_io.cdb_ptr); + } else { + memcpy(hscb->shared_data.idata.cdb, + csio->cdb_io.cdb_ptr, + hscb->cdb_len); + } + } else { + if (hscb->cdb_len > MAX_CDB_LEN) { + u_long s; + + ahd_set_transaction_status(scb, + CAM_REQ_INVALID); + ahd_lock(ahd, &s); + ahd_free_scb(ahd, scb); + ahd_unlock(ahd, &s); + xpt_done((union ccb *)csio); + return; + } + memcpy(hscb->shared_data.idata.cdb, + csio->cdb_io.cdb_bytes, hscb->cdb_len); + } + } + + /* Only use S/G if there is a transfer */ + if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { + if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) { + /* We've been given a pointer to a single buffer */ + if ((ccb_h->flags & CAM_DATA_PHYS) == 0) { + int s; + int error; + + s = splsoftvm(); + error = bus_dmamap_load(ahd->buffer_dmat, + scb->dmamap, + csio->data_ptr, + csio->dxfer_len, + ahd_execute_scb, + scb, /*flags*/0); + if (error == EINPROGRESS) { + /* + * So as to maintain ordering, + * freeze the controller queue + * until our mapping is + * returned. + */ + xpt_freeze_simq(sim, + /*count*/1); + scb->io_ctx->ccb_h.status |= + CAM_RELEASE_SIMQ; + } + splx(s); + } else { + struct bus_dma_segment seg; + + /* Pointer to physical buffer */ + if (csio->dxfer_len > AHD_MAXTRANSFER_SIZE) + panic("ahd_setup_data - Transfer size " + "larger than can device max"); + + seg.ds_addr = (bus_addr_t)csio->data_ptr; + seg.ds_len = csio->dxfer_len; + ahd_execute_scb(scb, &seg, 1, 0); + } + } else { + struct bus_dma_segment *segs; + + if ((ccb_h->flags & CAM_DATA_PHYS) != 0) + panic("ahd_setup_data - Physical segment " + "pointers unsupported"); + + if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0) + panic("ahd_setup_data - Virtual segment " + "addresses unsupported"); + + /* Just use the segments provided */ + segs = (struct bus_dma_segment *)csio->data_ptr; + ahd_execute_scb(scb, segs, csio->sglist_cnt, 0); + } + } else { + ahd_execute_scb(scb, NULL, 0, 0); + } +} + +#if NOT_YET +static void +ahd_set_recoveryscb(struct ahd_softc *ahd, struct scb *scb) { + + if ((scb->flags & SCB_RECOVERY_SCB) == 0) { + struct scb *list_scb; + + scb->flags |= SCB_RECOVERY_SCB; + + /* + * Take all queued, but not sent SCBs out of the equation. + * Also ensure that no new CCBs are queued to us while we + * try to fix this problem. + */ + if ((scb->io_ctx->ccb_h.status & CAM_RELEASE_SIMQ) == 0) { + xpt_freeze_simq(SCB_GET_SIM(ahd, scb), /*count*/1); + scb->io_ctx->ccb_h.status |= CAM_RELEASE_SIMQ; + } + + /* + * Go through all of our pending SCBs and remove + * any scheduled timeouts for them. We will reschedule + * them after we've successfully fixed this problem. + */ + LIST_FOREACH(list_scb, &ahd->pending_scbs, pending_links) { + union ccb *ccb; + + ccb = list_scb->io_ctx; + untimeout(ahd_timeout, list_scb, ccb->ccb_h.timeout_ch); + } + } +} +#endif + +void +ahd_timeout(void *arg) +{ + struct scb *scb; + struct ahd_softc *ahd; + ahd_mode_state saved_modes; + long s; + int target; + int lun; + char channel; + +#if NOT_YET + int i; + int found; + u_int last_phase; +#endif + + scb = (struct scb *)arg; + ahd = (struct ahd_softc *)scb->ahd_softc; + + ahd_lock(ahd, &s); + + ahd_pause_and_flushwork(ahd); + + saved_modes = ahd_save_modes(ahd); +#if 0 + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + ahd_outb(ahd, SCSISIGO, ACKO); + printf("set ACK\n"); + ahd_outb(ahd, SCSISIGO, 0); + printf("clearing Ack\n"); + ahd_restore_modes(ahd, saved_modes); +#endif + if ((scb->flags & SCB_ACTIVE) == 0) { + /* Previous timeout took care of me already */ + printf("%s: Timedout SCB already complete. " + "Interrupts may not be functioning.\n", ahd_name(ahd)); + ahd_unpause(ahd); + ahd_unlock(ahd, &s); + return; + } + + target = SCB_GET_TARGET(ahd, scb); + channel = SCB_GET_CHANNEL(ahd, scb); + lun = SCB_GET_LUN(scb); + + ahd_print_path(ahd, scb); + printf("SCB 0x%x - timed out\n", SCB_GET_TAG(scb)); + ahd_dump_card_state(ahd); + ahd_reset_channel(ahd, SIM_CHANNEL(ahd, sim), + /*initiate reset*/TRUE); + ahd_unlock(ahd, &s); + return; +#if NOT_YET + last_phase = ahd_inb(ahd, LASTPHASE); + if (scb->sg_count > 0) { + for (i = 0; i < scb->sg_count; i++) { + printf("sg[%d] - Addr 0x%x : Length %d\n", + i, + ((struct ahd_dma_seg *)scb->sg_list)[i].addr, + ((struct ahd_dma_seg *)scb->sg_list)[i].len + & AHD_SG_LEN_MASK); + } + } + if (scb->flags & (SCB_DEVICE_RESET|SCB_ABORT)) { + /* + * Been down this road before. + * Do a full bus reset. + */ +bus_reset: + ahd_set_transaction_status(scb, CAM_CMD_TIMEOUT); + found = ahd_reset_channel(ahd, channel, /*Initiate Reset*/TRUE); + printf("%s: Issued Channel %c Bus Reset. " + "%d SCBs aborted\n", ahd_name(ahd), channel, found); + } else { + /* + * If we are a target, transition to bus free and report + * the timeout. + * + * The target/initiator that is holding up the bus may not + * be the same as the one that triggered this timeout + * (different commands have different timeout lengths). + * If the bus is idle and we are actiing as the initiator + * for this request, queue a BDR message to the timed out + * target. Otherwise, if the timed out transaction is + * active: + * Initiator transaction: + * Stuff the message buffer with a BDR message and assert + * ATN in the hopes that the target will let go of the bus + * and go to the mesgout phase. If this fails, we'll + * get another timeout 2 seconds later which will attempt + * a bus reset. + * + * Target transaction: + * Transition to BUS FREE and report the error. + * It's good to be the target! + */ + u_int active_scb_index; + u_int saved_scbptr; + + saved_scbptr = ahd_get_scbptr(ahd); + active_scb_index = saved_scbptr; + + if (last_phase != P_BUSFREE + && (ahd_inb(ahd, SEQ_FLAGS) & NOT_IDENTIFIED) == 0 + && (active_scb_index < ahd->scb_data.numscbs)) { + struct scb *active_scb; + + /* + * If the active SCB is not us, assume that + * the active SCB has a longer timeout than + * the timedout SCB, and wait for the active + * SCB to timeout. + */ + active_scb = ahd_lookup_scb(ahd, active_scb_index); + if (active_scb != scb) { + struct ccb_hdr *ccbh; + uint64_t newtimeout; + + ahd_print_path(ahd, scb); + printf("Other SCB Timeout%s", + (scb->flags & SCB_OTHERTCL_TIMEOUT) != 0 + ? " again\n" : "\n"); + scb->flags |= SCB_OTHERTCL_TIMEOUT; + newtimeout = + MAX(active_scb->io_ctx->ccb_h.timeout, + scb->io_ctx->ccb_h.timeout); + newtimeout *= hz; + newtimeout /= 1000; + ccbh = &scb->io_ctx->ccb_h; + scb->io_ctx->ccb_h.timeout_ch = + timeout(ahd_timeout, scb, newtimeout); + ahd_unpause(ahd); + ahd_unlock(ahd, &s); + return; + } + + /* It's us */ + if ((scb->hscb->control & TARGET_SCB) != 0) { + + /* + * Send back any queued up transactions + * and properly record the error condition. + */ + ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb), + SCB_GET_CHANNEL(ahd, scb), + SCB_GET_LUN(scb), + SCB_GET_TAG(scb), + ROLE_TARGET, + CAM_CMD_TIMEOUT); + + /* Will clear us from the bus */ + ahd_restart(ahd); + ahd_unlock(ahd, &s); + return; + } + + ahd_set_recoveryscb(ahd, active_scb); + ahd_outb(ahd, MSG_OUT, HOST_MSG); + ahd_outb(ahd, SCSISIGO, last_phase|ATNO); + ahd_print_path(ahd, active_scb); + printf("BDR message in message buffer\n"); + active_scb->flags |= SCB_DEVICE_RESET; + active_scb->io_ctx->ccb_h.timeout_ch = + timeout(ahd_timeout, (caddr_t)active_scb, 2 * hz); + ahd_unpause(ahd); + } else { + int disconnected; + + /* XXX Shouldn't panic. Just punt instead? */ + if ((scb->hscb->control & TARGET_SCB) != 0) + panic("Timed-out target SCB but bus idle"); + + if (last_phase != P_BUSFREE + && (ahd_inb(ahd, SSTAT0) & TARGET) != 0) { + /* XXX What happened to the SCB? */ + /* Hung target selection. Goto busfree */ + printf("%s: Hung target selection\n", + ahd_name(ahd)); + ahd_restart(ahd); + ahd_unlock(ahd, &s); + return; + } + + if (ahd_search_qinfifo(ahd, target, channel, lun, + SCB_GET_TAG(scb), ROLE_INITIATOR, + /*status*/0, SEARCH_COUNT) > 0) { + disconnected = FALSE; + } else { + disconnected = TRUE; + } + + if (disconnected) { + + ahd_set_recoveryscb(ahd, scb); + /* + * Actually re-queue this SCB in an attempt + * to select the device before it reconnects. + * In either case (selection or reselection), + * we will now issue a target reset to the + * timed-out device. + * + * Set the MK_MESSAGE control bit indicating + * that we desire to send a message. We + * also set the disconnected flag since + * in the paging case there is no guarantee + * that our SCB control byte matches the + * version on the card. We don't want the + * sequencer to abort the command thinking + * an unsolicited reselection occurred. + */ + scb->hscb->control |= MK_MESSAGE|DISCONNECTED; + scb->flags |= SCB_DEVICE_RESET; + + /* + * The sequencer will never re-reference the + * in-core SCB. To make sure we are notified + * during reslection, set the MK_MESSAGE flag + * in the card's copy of the SCB. + */ + ahd_set_scbptr(ahd, SCB_GET_TAG(scb)); + ahd_outb(ahd, SCB_CONTROL, + ahd_inb(ahd, SCB_CONTROL)|MK_MESSAGE); + + /* + * Clear out any entries in the QINFIFO first + * so we are the next SCB for this target + * to run. + */ + ahd_search_qinfifo(ahd, + SCB_GET_TARGET(ahd, scb), + channel, SCB_GET_LUN(scb), + SCB_LIST_NULL, + ROLE_INITIATOR, + CAM_REQUEUE_REQ, + SEARCH_COMPLETE); + ahd_print_path(ahd, scb); + printf("Queuing a BDR SCB\n"); + ahd_qinfifo_requeue_tail(ahd, scb); + ahd_set_scbptr(ahd, saved_scbptr); + scb->io_ctx->ccb_h.timeout_ch = + timeout(ahd_timeout, (caddr_t)scb, 2 * hz); + ahd_unpause(ahd); + } else { + /* Go "immediatly" to the bus reset */ + /* This shouldn't happen */ + ahd_set_recoveryscb(ahd, scb); + ahd_print_path(ahd, scb); + printf("SCB %d: Immediate reset. " + "Flags = 0x%x\n", SCB_GET_TAG(scb), + scb->flags); + goto bus_reset; + } + } + } + ahd_unlock(ahd, &s); +#endif +} + +static void +ahd_abort_ccb(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb) +{ + union ccb *abort_ccb; + + abort_ccb = ccb->cab.abort_ccb; + switch (abort_ccb->ccb_h.func_code) { +#ifdef AHD_TARGET_MODE + case XPT_ACCEPT_TARGET_IO: + case XPT_IMMED_NOTIFY: + case XPT_CONT_TARGET_IO: + { + struct ahd_tmode_tstate *tstate; + struct ahd_tmode_lstate *lstate; + struct ccb_hdr_slist *list; + cam_status status; + + status = ahd_find_tmode_devs(ahd, sim, abort_ccb, &tstate, + &lstate, TRUE); + + if (status != CAM_REQ_CMP) { + ccb->ccb_h.status = status; + break; + } + + if (abort_ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) + list = &lstate->accept_tios; + else if (abort_ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) + list = &lstate->immed_notifies; + else + list = NULL; + + if (list != NULL) { + struct ccb_hdr *curelm; + int found; + + curelm = SLIST_FIRST(list); + found = 0; + if (curelm == &abort_ccb->ccb_h) { + found = 1; + SLIST_REMOVE_HEAD(list, sim_links.sle); + } else { + while(curelm != NULL) { + struct ccb_hdr *nextelm; + + nextelm = + SLIST_NEXT(curelm, sim_links.sle); + + if (nextelm == &abort_ccb->ccb_h) { + found = 1; + SLIST_NEXT(curelm, + sim_links.sle) = + SLIST_NEXT(nextelm, + sim_links.sle); + break; + } + curelm = nextelm; + } + } + + if (found) { + abort_ccb->ccb_h.status = CAM_REQ_ABORTED; + xpt_done(abort_ccb); + ccb->ccb_h.status = CAM_REQ_CMP; + } else { + xpt_print_path(abort_ccb->ccb_h.path); + printf("Not found\n"); + ccb->ccb_h.status = CAM_PATH_INVALID; + } + break; + } + /* FALLTHROUGH */ + } +#endif + case XPT_SCSI_IO: + /* XXX Fully implement the hard ones */ + ccb->ccb_h.status = CAM_UA_ABORT; + break; + default: + ccb->ccb_h.status = CAM_REQ_INVALID; + break; + } + xpt_done(ccb); +} + +void +ahd_send_async(struct ahd_softc *ahd, char channel, u_int target, + u_int lun, ac_code code, void *opt_arg) +{ + struct ccb_trans_settings cts; + struct cam_path *path; + void *arg; + int error; + + arg = NULL; + error = ahd_create_path(ahd, channel, target, lun, &path); + + if (error != CAM_REQ_CMP) + return; + + switch (code) { + case AC_TRANSFER_NEG: + { +#ifdef AHD_NEW_TRAN_SETTINGS + struct ccb_trans_settings_scsi *scsi; + + cts.type = CTS_TYPE_CURRENT_SETTINGS; + scsi = &cts.proto_specific.scsi; +#else + cts.flags = CCB_TRANS_CURRENT_SETTINGS; +#endif + cts.ccb_h.path = path; + cts.ccb_h.target_id = target; + cts.ccb_h.target_lun = lun; + ahd_get_tran_settings(ahd, ahd->our_id, channel, &cts); + arg = &cts; +#ifdef AHD_NEW_TRAN_SETTINGS + scsi->valid &= ~CTS_SCSI_VALID_TQ; + scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; +#else + cts.valid &= ~CCB_TRANS_TQ_VALID; + cts.flags &= ~CCB_TRANS_TAG_ENB; +#endif + if (opt_arg == NULL) + break; + if (*((ahd_queue_alg *)opt_arg) == AHD_QUEUE_TAGGED) +#ifdef AHD_NEW_TRAN_SETTINGS + scsi->flags |= ~CTS_SCSI_FLAGS_TAG_ENB; + scsi->valid |= CTS_SCSI_VALID_TQ; +#else + cts.flags |= CCB_TRANS_TAG_ENB; + cts.valid |= CCB_TRANS_TQ_VALID; +#endif + break; + } + case AC_SENT_BDR: + case AC_BUS_RESET: + break; + default: + panic("ahd_send_async: Unexpected async event"); + } + xpt_async(code, path, arg); + xpt_free_path(path); +} + +void +ahd_platform_set_tags(struct ahd_softc *ahd, + struct ahd_devinfo *devinfo, int enable) +{ +} + +int +ahd_platform_alloc(struct ahd_softc *ahd, void *platform_arg) +{ + ahd->platform_data = malloc(sizeof(struct ahd_platform_data), M_DEVBUF, + M_NOWAIT | M_ZERO); + if (ahd->platform_data == NULL) + return (ENOMEM); + return (0); +} + +void +ahd_platform_free(struct ahd_softc *ahd) +{ + struct ahd_platform_data *pdata; + + pdata = ahd->platform_data; + if (pdata != NULL) { + if (pdata->regs[0] != NULL) + bus_release_resource(ahd->dev_softc, + pdata->regs_res_type[0], + pdata->regs_res_id[0], + pdata->regs[0]); + + if (pdata->regs[1] != NULL) + bus_release_resource(ahd->dev_softc, + pdata->regs_res_type[1], + pdata->regs_res_id[1], + pdata->regs[1]); + + if (pdata->irq != NULL) + bus_release_resource(ahd->dev_softc, + pdata->irq_res_type, + 0, pdata->irq); + + if (pdata->sim_b != NULL) { + xpt_async(AC_LOST_DEVICE, pdata->path_b, NULL); + xpt_free_path(pdata->path_b); + xpt_bus_deregister(cam_sim_path(pdata->sim_b)); + cam_sim_free(pdata->sim_b, /*free_devq*/TRUE); + } + if (pdata->sim != NULL) { + xpt_async(AC_LOST_DEVICE, pdata->path, NULL); + xpt_free_path(pdata->path); + xpt_bus_deregister(cam_sim_path(pdata->sim)); + cam_sim_free(pdata->sim, /*free_devq*/TRUE); + } + if (pdata->eh != NULL) + EVENTHANDLER_DEREGISTER(shutdown_final, pdata->eh); + free(ahd->platform_data, M_DEVBUF); + } +} + +int +ahd_softc_comp(struct ahd_softc *lahd, struct ahd_softc *rahd) +{ + /* We don't sort softcs under FreeBSD so report equal always */ + return (0); +} + +int +ahd_detach(device_t dev) +{ + struct ahd_softc *ahd; + u_long l; + u_long s; + + ahd_list_lock(&l); + device_printf(dev, "detaching device\n"); + ahd = device_get_softc(dev); + ahd = ahd_find_softc(ahd); + if (ahd == NULL) { + device_printf(dev, "aic7xxx already detached\n"); + ahd_list_unlock(&l); + return (ENOENT); + } + ahd_lock(ahd, &s); + ahd_intr_enable(ahd, FALSE); + bus_teardown_intr(dev, ahd->platform_data->irq, ahd->platform_data->ih); + ahd_unlock(ahd, &s); + ahd_free(ahd); + ahd_list_unlock(&l); + return (0); +} + +#if UNUSED +static void +ahd_dump_targcmd(struct target_cmd *cmd) +{ + uint8_t *byte; + uint8_t *last_byte; + int i; + + byte = &cmd->initiator_channel; + /* Debugging info for received commands */ + last_byte = &cmd[1].initiator_channel; + + i = 0; + while (byte < last_byte) { + if (i == 0) + printf("\t"); + printf("%#x", *byte++); + i++; + if (i == 8) { + printf("\n"); + i = 0; + } else { + printf(", "); + } + } +} +#endif + +static int +ahd_modevent(module_t mod, int type, void *data) +{ + /* XXX Deal with busy status on unload. */ + return 0; +} + +static moduledata_t ahd_mod = { + "ahd", + ahd_modevent, + NULL +}; + +/********************************** DDB Hooks *********************************/ +#ifdef DDB +static struct ahd_softc *ahd_ddb_softc; +static int ahd_ddb_paused; +static int ahd_ddb_paused_on_entry; +DB_COMMAND(ahd_set_unit, ahd_ddb_set_unit) +{ + struct ahd_softc *list_ahd; + + ahd_ddb_softc = NULL; + TAILQ_FOREACH(list_ahd, &ahd_tailq, links) { + if (list_ahd->unit == addr) + ahd_ddb_softc = list_ahd; + } + if (ahd_ddb_softc == NULL) + db_error("No matching softc found!\n"); +} + +DB_COMMAND(ahd_pause, ahd_ddb_pause) +{ + if (ahd_ddb_softc == NULL) { + db_error("Must set unit with ahd_set_unit first!\n"); + return; + } + if (ahd_ddb_paused == 0) { + ahd_ddb_paused++; + if (ahd_is_paused(ahd_ddb_softc)) { + ahd_ddb_paused_on_entry++; + return; + } + ahd_pause(ahd_ddb_softc); + } +} + +DB_COMMAND(ahd_unpause, ahd_ddb_unpause) +{ + if (ahd_ddb_softc == NULL) { + db_error("Must set unit with ahd_set_unit first!\n"); + return; + } + if (ahd_ddb_paused != 0) { + ahd_ddb_paused = 0; + if (ahd_ddb_paused_on_entry) + return; + ahd_unpause(ahd_ddb_softc); + } else if (ahd_ddb_paused_on_entry != 0) { + /* Two unpauses to clear a paused on entry. */ + ahd_ddb_paused_on_entry = 0; + ahd_unpause(ahd_ddb_softc); + } +} + +DB_COMMAND(ahd_in, ahd_ddb_in) +{ + int c; + int size; + + if (ahd_ddb_softc == NULL) { + db_error("Must set unit with ahd_set_unit first!\n"); + return; + } + if (have_addr == 0) + return; + + size = 1; + while ((c = *modif++) != '\0') { + switch (c) { + case 'b': + size = 1; + break; + case 'w': + size = 2; + break; + case 'l': + size = 4; + break; + } + } + + if (count <= 0) + count = 1; + while (--count >= 0) { + db_printf("%04x (M)%x: \t", addr, + ahd_inb(ahd_ddb_softc, MODE_PTR)); + switch (size) { + case 1: + db_printf("%02x\n", ahd_inb(ahd_ddb_softc, addr)); + break; + case 2: + db_printf("%04x\n", ahd_inw(ahd_ddb_softc, addr)); + break; + case 4: + db_printf("%08x\n", ahd_inl(ahd_ddb_softc, addr)); + break; + } + } +} + +DB_SET(ahd_out, ahd_ddb_out, db_cmd_set, CS_MORE, NULL) +{ + db_expr_t old_value; + db_expr_t new_value; + int size; + + if (ahd_ddb_softc == NULL) { + db_error("Must set unit with ahd_set_unit first!\n"); + return; + } + + switch (modif[0]) { + case '\0': + case 'b': + size = 1; + break; + case 'h': + size = 2; + break; + case 'l': + size = 4; + break; + default: + db_error("Unknown size\n"); + return; + } + + while (db_expression(&new_value)) { + switch (size) { + default: + case 1: + old_value = ahd_inb(ahd_ddb_softc, addr); + ahd_outb(ahd_ddb_softc, addr, new_value); + break; + case 2: + old_value = ahd_inw(ahd_ddb_softc, addr); + ahd_outw(ahd_ddb_softc, addr, new_value); + break; + case 4: + old_value = ahd_inl(ahd_ddb_softc, addr); + ahd_outl(ahd_ddb_softc, addr, new_value); + break; + } + db_printf("%04x (M)%x: \t0x%x\t=\t0x%x", + addr, ahd_inb(ahd_ddb_softc, MODE_PTR), + old_value, new_value); + addr += size; + } + db_skip_to_eol(); +} + +#endif + + +DECLARE_MODULE(ahd, ahd_mod, SI_SUB_DRIVERS, SI_ORDER_MIDDLE); +MODULE_DEPEND(ahd, cam, 1, 1, 1); +MODULE_VERSION(ahd, 1); diff --git a/sys/dev/aic7xxx/aic79xx_osm.h b/sys/dev/aic7xxx/aic79xx_osm.h new file mode 100644 index 0000000..4012aa7 --- /dev/null +++ b/sys/dev/aic7xxx/aic79xx_osm.h @@ -0,0 +1,577 @@ +/* + * FreeBSD platform specific driver option settings, data structures, + * function declarations and includes. + * + * Copyright (c) 1994-2001 Justin T. Gibbs. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU Public License ("GPL"). + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR + * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $Id$ + * + * $FreeBSD$ + */ + +#ifndef _AIC79XX_FREEBSD_H_ +#define _AIC79XX_FREEBSD_H_ + +#include <opt_aic79xx.h> /* for config options */ +#ifndef NPCI +#include <pci.h> +#endif + +#include <sys/param.h> +#include <sys/systm.h> +#include <sys/bus.h> /* For device_t */ +#include <sys/eventhandler.h> +#include <sys/kernel.h> +#include <sys/malloc.h> +#include <sys/queue.h> + +#define AHD_PCI_CONFIG 1 +#include <machine/bus_memio.h> +#include <machine/bus_pio.h> +#include <machine/bus.h> +#include <machine/endian.h> +#include <machine/clock.h> +#include <machine/resource.h> + +#include <sys/rman.h> + +#if NPCI > 0 +#include <pci/pcireg.h> +#include <pci/pcivar.h> +#endif + +#include <cam/cam.h> +#include <cam/cam_ccb.h> +#include <cam/cam_debug.h> +#include <cam/cam_sim.h> +#include <cam/cam_xpt_sim.h> + +#include <cam/scsi/scsi_all.h> +#include <cam/scsi/scsi_message.h> +#include <cam/scsi/scsi_iu.h> + +#ifdef CAM_NEW_TRAN_CODE +#define AHD_NEW_TRAN_SETTINGS +#endif /* CAM_NEW_TRAN_CODE */ + +/****************************** Platform Macros *******************************/ +#define SIM_IS_SCSIBUS_B(ahd, sim) \ + (0) +#define SIM_CHANNEL(ahd, sim) \ + ('A') +#define SIM_SCSI_ID(ahd, sim) \ + (ahd->our_id) +#define SIM_PATH(ahd, sim) \ + (ahd->platform_data->path) +#define BUILD_SCSIID(ahd, sim, target_id, our_id) \ + ((((target_id) << TID_SHIFT) & TID) | (our_id)) + + +#define SCB_GET_SIM(ahd, scb) \ + ((ahd)->platform_data->sim) + +#ifndef offsetof +#define offsetof(type, member) ((size_t)(&((type *)0)->member)) +#endif +/************************* Forward Declarations *******************************/ +typedef device_t ahd_dev_softc_t; +typedef union ccb *ahd_io_ctx_t; + +/***************************** Bus Space/DMA **********************************/ +#define ahd_dma_tag_create(ahd, parent_tag, alignment, boundary, \ + lowaddr, highaddr, filter, filterarg, \ + maxsize, nsegments, maxsegsz, flags, \ + dma_tagp) \ + bus_dma_tag_create(parent_tag, alignment, boundary, \ + lowaddr, highaddr, filter, filterarg, \ + maxsize, nsegments, maxsegsz, flags, \ + dma_tagp) + +#define ahd_dma_tag_destroy(ahd, tag) \ + bus_dma_tag_destroy(tag) + +#define ahd_dmamem_alloc(ahd, dmat, vaddr, flags, mapp) \ + bus_dmamem_alloc(dmat, vaddr, flags, mapp) + +#define ahd_dmamem_free(ahd, dmat, vaddr, map) \ + bus_dmamem_free(dmat, vaddr, map) + +#define ahd_dmamap_create(ahd, tag, flags, mapp) \ + bus_dmamap_create(tag, flags, mapp) + +#define ahd_dmamap_destroy(ahd, tag, map) \ + bus_dmamap_destroy(tag, map) + +#define ahd_dmamap_load(ahd, dmat, map, addr, buflen, callback, \ + callback_arg, flags) \ + bus_dmamap_load(dmat, map, addr, buflen, callback, callback_arg, flags) + +#define ahd_dmamap_unload(ahd, tag, map) \ + bus_dmamap_unload(tag, map) + +/* XXX Need to update Bus DMA for partial map syncs */ +#define ahd_dmamap_sync(ahd, dma_tag, dmamap, offset, len, op) \ + bus_dmamap_sync(dma_tag, dmamap, op) + +/************************ Tunable Driver Parameters **************************/ +/* + * The number of dma segments supported. The sequencer can handle any number + * of physically contiguous S/G entrys. To reduce the driver's memory + * consumption, we limit the number supported to be sufficient to handle + * the largest mapping supported by the kernel, MAXPHYS. Assuming the + * transfer is as fragmented as possible and unaligned, this turns out to + * be the number of paged sized transfers in MAXPHYS plus an extra element + * to handle any unaligned residual. The sequencer fetches SG elements + * in cacheline sized chucks, so make the number per-transaction an even + * multiple of 16 which should align us on even the largest of cacheline + * boundaries. + */ +#define AHD_NSEG (roundup(btoc(MAXPHYS) + 1, 16)) + +/* This driver supports target mode */ +#if NOT_YET +#define AHD_TARGET_MODE 1 +#endif + +/************************** Softc/SCB Platform Data ***************************/ +struct ahd_platform_data { + /* + * Hooks into the XPT. + */ + struct cam_sim *sim; + struct cam_sim *sim_b; + struct cam_path *path; + struct cam_path *path_b; + + int regs_res_type[2]; + int regs_res_id[2]; + int irq_res_type; + struct resource *regs[2]; + struct resource *irq; + void *ih; + eventhandler_tag eh; +}; + +struct scb_platform_data { +}; + +/********************************* Byte Order *********************************/ +/* + * XXX Waiting for FreeBSD byte swapping functions. + * For now assume host is Little Endian. + */ +#define ahd_htobe16(x) x +#define ahd_htobe32(x) x +#define ahd_htobe64(x) x +#define ahd_htole16(x) x +#define ahd_htole32(x) x +#define ahd_htole64(x) x + +#define ahd_be16toh(x) x +#define ahd_be32toh(x) x +#define ahd_be64toh(x) x +#define ahd_le16toh(x) x +#define ahd_le32toh(x) x +#define ahd_le64toh(x) x + +/************************** Timer DataStructures ******************************/ +typedef struct callout ahd_timer_t; + +/***************************** Core Includes **********************************/ +#include <dev/aic7xxx/aic79xx.h> + +/***************************** Timer Facilities *******************************/ +#define ahd_timer_init callout_init +#define ahd_timer_stop callout_stop + +static __inline void +ahd_timer_reset(ahd_timer_t *timer, int usec, ahd_callback_t *func, void *arg) +{ + callout_reset(timer, (usec * hz)/1000000, func, arg); +} + +/*************************** Device Access ************************************/ +#define ahd_inb(ahd, port) \ + bus_space_read_1((ahd)->tags[(port) >> 8], \ + (ahd)->bshs[(port) >> 8], (port) & 0xFF) + +#define ahd_outb(ahd, port, value) \ + bus_space_write_1((ahd)->tags[(port) >> 8], \ + (ahd)->bshs[(port) >> 8], (port) & 0xFF, value) + +#define ahd_inw_atomic(ahd, port) \ + ahd_le16toh(bus_space_read_2((ahd)->tags[(port) >> 8], \ + (ahd)->bshs[(port) >> 8], (port) & 0xFF)) + +#define ahd_outw_atomic(ahd, port, value) \ + bus_space_write_2((ahd)->tags[(port) >> 8], \ + (ahd)->bshs[(port) >> 8], \ + (port & 0xFF), ahd_htole16(value)) + +#define ahd_outsb(ahd, port, valp, count) \ + bus_space_write_multi_1((ahd)->tags[(port) >> 8], \ + (ahd)->bshs[(port) >> 8], \ + (port & 0xFF), valp, count) + +#define ahd_insb(ahd, port, valp, count) \ + bus_space_read_multi_1((ahd)->tags[(port) >> 8], \ + (ahd)->bshs[(port) >> 8], \ + (port & 0xFF), valp, count) + +static __inline void ahd_flush_device_writes(struct ahd_softc *); + +static __inline void +ahd_flush_device_writes(struct ahd_softc *ahd) +{ + /* XXX Is this sufficient for all architectures??? */ + ahd_inb(ahd, INTSTAT); +} + +/**************************** Locking Primitives ******************************/ +/* Lock protecting internal data structures */ +static __inline void ahd_lockinit(struct ahd_softc *); +static __inline void ahd_lock(struct ahd_softc *, unsigned long *flags); +static __inline void ahd_unlock(struct ahd_softc *, unsigned long *flags); + +/* Lock held during command compeletion to the upper layer */ +static __inline void ahd_done_lockinit(struct ahd_softc *); +static __inline void ahd_done_lock(struct ahd_softc *, unsigned long *flags); +static __inline void ahd_done_unlock(struct ahd_softc *, unsigned long *flags); + +/* Lock held during ahc_list manipulation and ahc softc frees */ +static __inline void ahd_list_lockinit(void); +static __inline void ahd_list_lock(unsigned long *flags); +static __inline void ahd_list_unlock(unsigned long *flags); + +static __inline void +ahd_lockinit(struct ahd_softc *ahd) +{ +} + +static __inline void +ahd_lock(struct ahd_softc *ahd, unsigned long *flags) +{ + *flags = splcam(); +} + +static __inline void +ahd_unlock(struct ahd_softc *ahd, unsigned long *flags) +{ + splx(*flags); +} + +/* Lock held during command compeletion to the upper layer */ +static __inline void +ahd_done_lockinit(struct ahd_softc *ahd) +{ +} + +static __inline void +ahd_done_lock(struct ahd_softc *ahd, unsigned long *flags) +{ +} + +static __inline void +ahd_done_unlock(struct ahd_softc *ahd, unsigned long *flags) +{ +} + +/* Lock held during ahc_list manipulation and ahc softc frees */ +static __inline void +ahd_list_lockinit() +{ +} + +static __inline void +ahd_list_lock(unsigned long *flags) +{ +} + +static __inline void +ahd_list_unlock(unsigned long *flags) +{ +} +/****************************** OS Primitives *********************************/ +#define ahd_delay DELAY + +/************************** Transaction Operations ****************************/ +static __inline void ahd_set_transaction_status(struct scb *, uint32_t); +static __inline void ahd_set_scsi_status(struct scb *, uint32_t); +static __inline uint32_t ahd_get_transaction_status(struct scb *); +static __inline uint32_t ahd_get_scsi_status(struct scb *); +static __inline void ahd_set_transaction_tag(struct scb *, int, u_int); +static __inline u_long ahd_get_transfer_length(struct scb *); +static __inline int ahd_get_transfer_dir(struct scb *); +static __inline void ahd_set_residual(struct scb *, u_long); +static __inline void ahd_set_sense_residual(struct scb *, u_long); +static __inline u_long ahd_get_residual(struct scb *); +static __inline int ahd_perform_autosense(struct scb *); +static __inline uint32_t ahd_get_sense_bufsize(struct ahd_softc*, struct scb*); +static __inline void ahd_freeze_simq(struct ahd_softc *); +static __inline void ahd_release_simq(struct ahd_softc *); +static __inline void ahd_freeze_ccb(union ccb *ccb); +static __inline void ahd_freeze_scb(struct scb *scb); +static __inline void ahd_platform_freeze_devq(struct ahd_softc *, struct scb *); +static __inline int ahd_platform_abort_scbs(struct ahd_softc *ahd, int target, + char channel, int lun, u_int tag, + role_t role, uint32_t status); + +static __inline +void ahd_set_transaction_status(struct scb *scb, uint32_t status) +{ + scb->io_ctx->ccb_h.status &= ~CAM_STATUS_MASK; + scb->io_ctx->ccb_h.status |= status; +} + +static __inline +void ahd_set_scsi_status(struct scb *scb, uint32_t status) +{ + scb->io_ctx->csio.scsi_status = status; +} + +static __inline +uint32_t ahd_get_transaction_status(struct scb *scb) +{ + return (scb->io_ctx->ccb_h.status & CAM_STATUS_MASK); +} + +static __inline +uint32_t ahd_get_scsi_status(struct scb *scb) +{ + return (scb->io_ctx->csio.scsi_status); +} + +static __inline +void ahd_set_transaction_tag(struct scb *scb, int enabled, u_int type) +{ + scb->io_ctx->csio.tag_action = type; + if (enabled) + scb->io_ctx->ccb_h.flags |= CAM_TAG_ACTION_VALID; + else + scb->io_ctx->ccb_h.flags &= ~CAM_TAG_ACTION_VALID; +} + +static __inline +u_long ahd_get_transfer_length(struct scb *scb) +{ + return (scb->io_ctx->csio.dxfer_len); +} + +static __inline +int ahd_get_transfer_dir(struct scb *scb) +{ + return (scb->io_ctx->ccb_h.flags & CAM_DIR_MASK); +} + +static __inline +void ahd_set_residual(struct scb *scb, u_long resid) +{ + scb->io_ctx->csio.resid = resid; +} + +static __inline +void ahd_set_sense_residual(struct scb *scb, u_long resid) +{ + scb->io_ctx->csio.sense_resid = resid; +} + +static __inline +u_long ahd_get_residual(struct scb *scb) +{ + return (scb->io_ctx->csio.resid); +} + +static __inline +int ahd_perform_autosense(struct scb *scb) +{ + return (!(scb->io_ctx->ccb_h.flags & CAM_DIS_AUTOSENSE)); +} + +static __inline uint32_t +ahd_get_sense_bufsize(struct ahd_softc *ahd, struct scb *scb) +{ + return (sizeof(struct scsi_sense_data)); +} + +static __inline void +ahd_freeze_simq(struct ahd_softc *ahd) +{ + xpt_freeze_simq(ahd->platform_data->sim, /*count*/1); +} + +static __inline void +ahd_release_simq(struct ahd_softc *ahd) +{ + xpt_release_simq(ahd->platform_data->sim, /*run queue*/TRUE); +} + +static __inline void +ahd_freeze_ccb(union ccb *ccb) +{ + if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { + ccb->ccb_h.status |= CAM_DEV_QFRZN; + xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); + } +} + +static __inline void +ahd_freeze_scb(struct scb *scb) +{ + ahd_freeze_ccb(scb->io_ctx); +} + +static __inline void +ahd_platform_freeze_devq(struct ahd_softc *ahd, struct scb *scb) +{ + /* Nothing to do here for FreeBSD */ +} + +static __inline int +ahd_platform_abort_scbs(struct ahd_softc *ahd, int target, + char channel, int lun, u_int tag, + role_t role, uint32_t status) +{ + /* Nothing to do here for FreeBSD */ + return (0); +} + +static __inline void +ahd_platform_scb_free(struct ahd_softc *ahd, struct scb *scb) +{ + /* What do we do to generically handle driver resource shortages??? */ + if ((ahd->flags & AHD_RESOURCE_SHORTAGE) != 0 + && scb->io_ctx != NULL + && (scb->io_ctx->ccb_h.status & CAM_RELEASE_SIMQ) == 0) { + scb->io_ctx->ccb_h.status |= CAM_RELEASE_SIMQ; + ahd->flags &= ~AHD_RESOURCE_SHORTAGE; + } + scb->io_ctx = NULL; +} + +/********************************** PCI ***************************************/ +#ifdef AHD_PCI_CONFIG +static __inline uint32_t ahd_pci_read_config(ahd_dev_softc_t pci, + int reg, int width); +static __inline void ahd_pci_write_config(ahd_dev_softc_t pci, + int reg, uint32_t value, + int width); +static __inline int ahd_get_pci_function(ahd_dev_softc_t); +static __inline int ahd_get_pci_slot(ahd_dev_softc_t); +static __inline int ahd_get_pci_bus(ahd_dev_softc_t); + +int ahd_pci_map_registers(struct ahd_softc *ahd); +int ahd_pci_map_int(struct ahd_softc *ahd); + +static __inline uint32_t +ahd_pci_read_config(ahd_dev_softc_t pci, int reg, int width) +{ + return (pci_read_config(pci, reg, width)); +} + +static __inline void +ahd_pci_write_config(ahd_dev_softc_t pci, int reg, uint32_t value, int width) +{ + pci_write_config(pci, reg, value, width); +} + +static __inline int +ahd_get_pci_function(ahd_dev_softc_t pci) +{ + return (pci_get_function(pci)); +} + +static __inline int +ahd_get_pci_slot(ahd_dev_softc_t pci) +{ + return (pci_get_slot(pci)); +} + +static __inline int +ahd_get_pci_bus(ahd_dev_softc_t pci) +{ + return (pci_get_bus(pci)); +} + +typedef enum +{ + AHD_POWER_STATE_D0, + AHD_POWER_STATE_D1, + AHD_POWER_STATE_D2, + AHD_POWER_STATE_D3 +} ahd_power_state; + +void ahd_power_state_change(struct ahd_softc *ahd, + ahd_power_state new_state); +#endif +/******************************** VL/EISA *************************************/ +int aic7770_map_registers(struct ahd_softc *ahd); +int aic7770_map_int(struct ahd_softc *ahd, int irq); + +/********************************* Debug **************************************/ +static __inline void ahd_print_path(struct ahd_softc *, struct scb *); +static __inline void ahd_platform_dump_card_state(struct ahd_softc *ahd); + +static __inline void +ahd_print_path(struct ahd_softc *ahd, struct scb *scb) +{ + xpt_print_path(scb->io_ctx->ccb_h.path); +} + +static __inline void +ahd_platform_dump_card_state(struct ahd_softc *ahd) +{ + /* Nothing to do here for FreeBSD */ +} +/**************************** Transfer Settings *******************************/ +void ahd_notify_xfer_settings_change(struct ahd_softc *, + struct ahd_devinfo *); +void ahd_platform_set_tags(struct ahd_softc *, struct ahd_devinfo *, + int /*enable*/); + +/************************* Initialization/Teardown ****************************/ +int ahd_platform_alloc(struct ahd_softc *ahd, void *platform_arg); +void ahd_platform_free(struct ahd_softc *ahd); +int ahd_map_int(struct ahd_softc *ahd); +int ahd_attach(struct ahd_softc *); +int ahd_softc_comp(struct ahd_softc *lahd, struct ahd_softc *rahd); +int ahd_detach(device_t); + +/****************************** Interrupts ************************************/ +void ahd_platform_intr(void *); +static __inline void ahd_platform_flushwork(struct ahd_softc *ahd); +static __inline void +ahd_platform_flushwork(struct ahd_softc *ahd) +{ +} + +/************************ Misc Function Declarations **************************/ +timeout_t ahd_timeout; +void ahd_done(struct ahd_softc *ahd, struct scb *scb); +void ahd_send_async(struct ahd_softc *, char /*channel*/, + u_int /*target*/, u_int /*lun*/, ac_code, void *arg); +#endif /* _AIC79XX_FREEBSD_H_ */ diff --git a/sys/dev/aic7xxx/aic79xx_pci.c b/sys/dev/aic7xxx/aic79xx_pci.c new file mode 100644 index 0000000..f4fd826 --- /dev/null +++ b/sys/dev/aic7xxx/aic79xx_pci.c @@ -0,0 +1,792 @@ +/* + * Product specific probe and attach routines for: + * aic7901 and aic7902 SCSI controllers + * + * Copyright (c) 1994-2001 Justin T. Gibbs. + * Copyright (c) 2000-2001 Adaptec Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + * $Id: //depot/aic7xxx/aic7xxx/aic79xx_pci.c#32 $ + * + * $FreeBSD$ + */ + +#ifdef __linux__ +#include "aic79xx_osm.h" +#include "aic79xx_inline.h" +#else +#include <dev/aic7xxx/aic79xx_osm.h> +#include <dev/aic7xxx/aic79xx_inline.h> +#endif + +static __inline uint64_t +ahd_compose_id(u_int device, u_int vendor, u_int subdevice, u_int subvendor) +{ + uint64_t id; + + id = subvendor + | (subdevice << 16) + | ((uint64_t)vendor << 32) + | ((uint64_t)device << 48); + + return (id); +} + +#define ID_ALL_MASK 0xFFFFFFFFFFFFFFFFull +#define ID_DEV_VENDOR_MASK 0xFFFFFFFF00000000ull +#define ID_9005_GENERIC_MASK 0xFFF0FFFF00000000ull + +#define ID_AIC7901 0x800F9005FFFF9005ull +#define ID_AIC7901_IROC 0x80089005FFFF9005ull +#define ID_AHA_29320 0x8000900500609005ull +#define ID_AHA_29320LP 0x8000900500409005ull + +#define ID_AIC7902 0x801F9005FFFF9005ull +#define ID_AIC7902_IROC 0x80189005FFFF9005ull +#define ID_AHA_39320 0x8010900500409005ull +#define ID_AHA_39320D 0x8011900500419005ull +#define ID_AHA_39320D_CPQ 0x8011900500AC0E11ull +#define ID_AIC7902_PCI_REV_A3 0x2 +#define ID_AIC7902_PCI_REV_A4 0x3 +#define ID_AIC7902_PCI_REV_B0 0xFF /* Rev Id not yet known. */ +#define SUBID_CPQ 0x0E11 + +#define DEVID_9005_TYPE(id) ((id) & 0xF) +#define DEVID_9005_TYPE_HBA 0x0 /* Standard Card */ +#define DEVID_9005_TYPE_HBA_2EXT 0x1 /* 2 External Ports */ +#define DEVID_9005_TYPE_IROC 0x8 /* Raid(0,1,10) Card */ +#define DEVID_9005_TYPE_MB 0xF /* On Motherboard */ + +#define DEVID_9005_MFUNC(id) ((id) & 0x10) + +#define DEVID_9005_PACKETIZED(id) ((id) & 0x8000) + +#define SUBID_9005_TYPE(id) ((id) & 0xF) +#define SUBID_9005_TYPE_HBA 0x0 /* Standard Card */ +#define SUBID_9005_TYPE_MB 0xF /* On Motherboard */ + +#define SUBID_9005_AUTOTERM(id) (((id) & 0x10) == 0) + +#define SUBID_9005_LEGACYCONN_FUNC(id) ((id) & 0x20) + +#define SUBID_9005_SEEPTYPE(id) ((id) & 0x0C0) >> 6) +#define SUBID_9005_SEEPTYPE_NONE 0x0 +#define SUBID_9005_SEEPTYPE_4K 0x1 + +static ahd_device_setup_t ahd_aic7901_setup; +static ahd_device_setup_t ahd_aic7902_setup; + +struct ahd_pci_identity ahd_pci_ident_table [] = +{ + /* aic7901 based controllers */ + { + ID_AHA_29320, + ID_ALL_MASK, + "Adaptec 29320 Ultra320 SCSI adapter", + ahd_aic7901_setup + }, + { + ID_AHA_29320LP, + ID_ALL_MASK, + "Adaptec 29320LP Ultra320 SCSI adapter", + ahd_aic7901_setup + }, + /* aic7902 based controllers */ + { + ID_AHA_39320, + ID_ALL_MASK, + "Adaptec 39320 Ultra320 SCSI adapter", + ahd_aic7902_setup + }, + { + ID_AHA_39320D, + ID_ALL_MASK, + "Adaptec 39320D Ultra320 SCSI adapter", + ahd_aic7902_setup + }, + { + ID_AHA_39320D_CPQ, + ID_ALL_MASK, + "Adaptec (Compaq OEM) 39320D Ultra320 SCSI adapter", + ahd_aic7902_setup + }, + /* Generic chip probes for devices we don't know 'exactly' */ + { + ID_AIC7901 & ID_9005_GENERIC_MASK, + ID_9005_GENERIC_MASK, + "Adaptec aic7901 Ultra320 SCSI adapter", + ahd_aic7901_setup + }, + { + ID_AIC7902 & ID_9005_GENERIC_MASK, + ID_9005_GENERIC_MASK, + "Adaptec aic7902 Ultra320 SCSI adapter", + ahd_aic7902_setup + } +}; + +const u_int ahd_num_pci_devs = NUM_ELEMENTS(ahd_pci_ident_table); + +#define DEVCONFIG 0x40 +#define PCIXINITPAT 0x0000E000ul +#define PCIXINIT_PCI33_66 0x0000E000ul +#define PCIXINIT_PCIX50_66 0x0000C000ul +#define PCIXINIT_PCIX66_100 0x0000A000ul +#define PCIXINIT_PCIX100_133 0x00008000ul +#define PCI_BUS_MODES_INDEX(devconfig) \ + (((devconfig) & PCIXINITPAT) >> 13) +static const char *pci_bus_modes[] = +{ + "PCI bus mode unknown", + "PCI bus mode unknown", + "PCI bus mode unknown", + "PCI bus mode unknown", + "PCI-X 101-133Mhz", + "PCI-X 67-100Mhz", + "PCI-X 50-66Mhz", + "PCI 33 or 66Mhz" +}; + +#define TESTMODE 0x00000800ul +#define IRDY_RST 0x00000200ul +#define FRAME_RST 0x00000100ul +#define PCI64BIT 0x00000080ul +#define MRDCEN 0x00000040ul +#define ENDIANSEL 0x00000020ul +#define MIXQWENDIANEN 0x00000008ul +#define DACEN 0x00000004ul +#define STPWLEVEL 0x00000002ul +#define QWENDIANSEL 0x00000001ul + +#define DEVCONFIG1 0x44 +#define PREQDIS 0x01 + +#define CSIZE_LATTIME 0x0c +#define CACHESIZE 0x000000fful +#define LATTIME 0x0000ff00ul + +static int ahd_check_extport(struct ahd_softc *ahd); +static void ahd_configure_termination(struct ahd_softc *ahd, + u_int adapter_control); +static void ahd_pci_split_intr(struct ahd_softc *ahd, u_int intstat); + +struct ahd_pci_identity * +ahd_find_pci_device(ahd_dev_softc_t pci) +{ + uint64_t full_id; + uint16_t device; + uint16_t vendor; + uint16_t subdevice; + uint16_t subvendor; + struct ahd_pci_identity *entry; + u_int i; + + vendor = ahd_pci_read_config(pci, PCIR_DEVVENDOR, /*bytes*/2); + device = ahd_pci_read_config(pci, PCIR_DEVICE, /*bytes*/2); + subvendor = ahd_pci_read_config(pci, PCIR_SUBVEND_0, /*bytes*/2); + subdevice = ahd_pci_read_config(pci, PCIR_SUBDEV_0, /*bytes*/2); + full_id = ahd_compose_id(device, + vendor, + subdevice, + subvendor); + + for (i = 0; i < ahd_num_pci_devs; i++) { + entry = &ahd_pci_ident_table[i]; + if (entry->full_id == (full_id & entry->id_mask)) { + /* Honor exclusion entries. */ + if (entry->name == NULL) + return (NULL); + return (entry); + } + } + return (NULL); +} + +int +ahd_pci_config(struct ahd_softc *ahd, struct ahd_pci_identity *entry) +{ + struct scb_data *shared_scb_data; + u_long l; + u_long s; + u_int command; + uint32_t devconfig; + uint16_t subvendor; + int error; + + shared_scb_data = NULL; + error = entry->setup(ahd); + if (error != 0) + return (error); + + ahd->description = entry->name; + devconfig = ahd_pci_read_config(ahd->dev_softc, DEVCONFIG, /*bytes*/4); + if ((devconfig & PCIXINITPAT) == PCIXINIT_PCI33_66) { + ahd->chip |= AHD_PCI; + /* Disable PCIX workarounds when running in PCI mode. */ + ahd->bugs &= ~AHD_PCIX_BUG_MASK; + } else { + ahd->chip |= AHD_PCIX; + } + ahd->bus_description = pci_bus_modes[PCI_BUS_MODES_INDEX(devconfig)]; + + /* + * Record if this is a Compaq board. + */ + subvendor = ahd_pci_read_config(ahd->dev_softc, + PCIR_SUBVEND_0, /*bytes*/2); + if (subvendor == SUBID_CPQ) + ahd->flags |= AHD_CPQ_BOARD; + + ahd_power_state_change(ahd, AHD_POWER_STATE_D0); + + error = ahd_pci_map_registers(ahd); + if (error != 0) + return (error); + + /* + * If we need to support high memory, enable dual + * address cycles. This bit must be set to enable + * high address bit generation even if we are on a + * 64bit bus (PCI64BIT set in devconfig). + */ + if ((ahd->flags & (AHD_39BIT_ADDRESSING|AHD_64BIT_ADDRESSING)) != 0) { + uint32_t devconfig; + + if (bootverbose) + printf("%s: Enabling 39Bit Addressing\n", + ahd_name(ahd)); + devconfig = ahd_pci_read_config(ahd->dev_softc, + DEVCONFIG, /*bytes*/4); + devconfig |= DACEN; + ahd_pci_write_config(ahd->dev_softc, DEVCONFIG, + devconfig, /*bytes*/4); + } + + /* Ensure busmastering is enabled */ + command = ahd_pci_read_config(ahd->dev_softc, PCIR_COMMAND, /*bytes*/1); + command |= PCIM_CMD_BUSMASTEREN; + ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND, command, /*bytes*/1); + + error = ahd_softc_init(ahd); + if (error != 0) + return (error); + + ahd->bus_intr = ahd_pci_intr; + + error = ahd_reset(ahd); + if (error != 0) + return (ENXIO); + + ahd->pci_cachesize = + ahd_pci_read_config(ahd->dev_softc, CSIZE_LATTIME, + /*bytes*/1) & CACHESIZE; + ahd->pci_cachesize *= 4; + + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + /* See if we have a SEEPROM and perform auto-term */ + error = ahd_check_extport(ahd); + if (error != 0) + return (error); + + /* Core initialization */ + error = ahd_init(ahd); + if (error != 0) + return (error); + + /* + * Allow interrupts now that we are completely setup. + */ + error = ahd_pci_map_int(ahd); + if (error != 0) + return (error); + + ahd_list_lock(&l); + /* + * Link this softc in with all other ahd instances. + */ + ahd_softc_insert(ahd); + + ahd_lock(ahd, &s); + ahd_intr_enable(ahd, TRUE); + ahd_unlock(ahd, &s); + + ahd_list_unlock(&l); + return (0); +} + +/* + * Check the external port logic for a serial eeprom + * and termination/cable detection contrls. + */ +static int +ahd_check_extport(struct ahd_softc *ahd) +{ + struct seeprom_config *sc; + u_int adapter_control; + int have_seeprom; + int error; + + sc = ahd->seep_config; + have_seeprom = ahd_acquire_seeprom(ahd); + if (have_seeprom) { + u_int start_addr; + + if (bootverbose) + printf("%s: Reading SEEPROM...", ahd_name(ahd)); + + /* Address is always in units of 16bit words */ + start_addr = (sizeof(*sc) / 2) * (ahd->channel - 'A'); + + error = ahd_read_seeprom(ahd, (uint16_t *)sc, + start_addr, sizeof(*sc)/2); + + if (error != 0) { + printf("Unable to read SEEPROM\n"); + have_seeprom = 0; + } else { + have_seeprom = ahd_verify_cksum(sc); + + if (bootverbose) { + if (have_seeprom == 0) + printf ("checksum error\n"); + else + printf ("done.\n"); + } + } + ahd_release_seeprom(ahd); + } + + if (!have_seeprom) { + u_int nvram_scb; + + /* + * Pull scratch ram settings and treat them as + * if they are the contents of an seeprom if + * the 'ADPT', 'BIOS', or 'ASPI' signature is found + * in SCB 0xFF. We manually compose the data as 16bit + * values to avoid endian issues. + */ + ahd_set_scbptr(ahd, 0xFF); + nvram_scb = ahd_inb_scbram(ahd, SCB_BASE + NVRAM_SCB_OFFSET); + printf("nvram_scb == 0x%x\n", nvram_scb); + printf("SCBPTR == 0x%x\n", ahd_get_scbptr(ahd)); + printf("Signature = %c%c%c%c\n", + ahd_inb_scbram(ahd, SCB_BASE + 0), + ahd_inb_scbram(ahd, SCB_BASE + 1), + ahd_inb_scbram(ahd, SCB_BASE + 2), + ahd_inb_scbram(ahd, SCB_BASE + 3)); + if (nvram_scb != 0xFF + && ((ahd_inb_scbram(ahd, SCB_BASE + 0) == 'A' + && ahd_inb_scbram(ahd, SCB_BASE + 1) == 'D' + && ahd_inb_scbram(ahd, SCB_BASE + 2) == 'P' + && ahd_inb_scbram(ahd, SCB_BASE + 3) == 'T') + || (ahd_inb_scbram(ahd, SCB_BASE + 0) == 'B' + && ahd_inb_scbram(ahd, SCB_BASE + 1) == 'I' + && ahd_inb_scbram(ahd, SCB_BASE + 2) == 'O' + && ahd_inb_scbram(ahd, SCB_BASE + 3) == 'S') + || (ahd_inb_scbram(ahd, SCB_BASE + 0) == 'A' + && ahd_inb_scbram(ahd, SCB_BASE + 1) == 'S' + && ahd_inb_scbram(ahd, SCB_BASE + 2) == 'P' + && ahd_inb_scbram(ahd, SCB_BASE + 3) == 'I'))) { + uint16_t *sc_data; + int i; + + ahd_set_scbptr(ahd, nvram_scb); + sc_data = (uint16_t *)sc; + for (i = 0; i < 64; i += 2) + *sc_data++ = ahd_inw_scbram(ahd, SCB_BASE+i); + have_seeprom = ahd_verify_cksum(sc); + if (have_seeprom) + ahd->flags |= AHD_SCB_CONFIG_USED; + } + } + +#if AHD_DEBUG + if (have_seeprom != 0 + && (ahd_debug & AHD_DUMP_SEEPROM) != 0) { + uint8_t *sc_data; + int i; + + printf("%s: Seeprom Contents:", ahd_name(ahd)); + sc_data = (uint8_t *)sc; + for (i = 0; i < (sizeof(*sc)); i += 2) + printf("\n\t0x%.4x", + sc_data[i] | (sc_data[i+1] << 8)); + printf("\n"); + } +#endif + + if (!have_seeprom) { + if (bootverbose) + printf("%s: No SEEPROM available.\n", ahd_name(ahd)); + ahd->flags |= AHD_USEDEFAULTS; + error = ahd_default_config(ahd); + adapter_control = CFAUTOTERM|CFSEAUTOTERM; + free(ahd->seep_config, M_DEVBUF); + ahd->seep_config = NULL; + } else { + error = ahd_parse_cfgdata(ahd, sc); + adapter_control = sc->adapter_control; + } + if (error != 0) + return (error); + + ahd_configure_termination(ahd, adapter_control); + + return (0); +} + +static void +ahd_configure_termination(struct ahd_softc *ahd, u_int adapter_control) +{ + int error; + u_int sxfrctl1; + uint8_t termctl; + uint32_t devconfig; + + devconfig = ahd_pci_read_config(ahd->dev_softc, DEVCONFIG, /*bytes*/4); + devconfig &= ~STPWLEVEL; + if ((ahd->flags & AHD_STPWLEVEL_A) != 0) { + devconfig |= STPWLEVEL; + } + ahd_pci_write_config(ahd->dev_softc, DEVCONFIG, devconfig, /*bytes*/4); + + /* Make sure current sensing is off. */ + if ((ahd->flags & AHD_CURRENT_SENSING) != 0) { + (void)ahd_write_flexport(ahd, FLXADDR_ROMSTAT_CURSENSECTL, 0); + } + + /* + * Read to sense. Write to set. + */ + error = ahd_read_flexport(ahd, FLXADDR_TERMCTL, &termctl); + if ((adapter_control & CFAUTOTERM) == 0) { + if (bootverbose) + printf("%s: Manual Primary Termination\n", + ahd_name(ahd)); + termctl &= ~(FLX_TERMCTL_ENPRILOW|FLX_TERMCTL_ENPRIHIGH); + if ((adapter_control & CFSTERM) != 0) + termctl |= FLX_TERMCTL_ENPRILOW; + if ((adapter_control & CFWSTERM) != 0) + termctl |= FLX_TERMCTL_ENPRIHIGH; + } else if (error != 0) { + printf("%s: Primary Auto-Term Sensing failed! " + "Using Defaults.\n", ahd_name(ahd)); + termctl = FLX_TERMCTL_ENPRILOW|FLX_TERMCTL_ENPRIHIGH; + } + + if ((adapter_control & CFSEAUTOTERM) == 0) { + if (bootverbose) + printf("%s: Manual Secondary Termination\n", + ahd_name(ahd)); + termctl &= ~(FLX_TERMCTL_ENSECLOW|FLX_TERMCTL_ENSECHIGH); + if ((adapter_control & CFSELOWTERM) != 0) + termctl |= FLX_TERMCTL_ENSECLOW; + if ((adapter_control & CFSEHIGHTERM) != 0) + termctl |= FLX_TERMCTL_ENSECHIGH; + } else if (error != 0) { + printf("%s: Secondary Auto-Term Sensing failed! " + "Using Defaults.\n", ahd_name(ahd)); + termctl |= FLX_TERMCTL_ENSECLOW|FLX_TERMCTL_ENSECHIGH; + } + + /* + * Now set the termination based on what we found. + */ + sxfrctl1 = ahd_inb(ahd, SXFRCTL1) & ~STPWEN; + if ((termctl & FLX_TERMCTL_ENPRILOW) != 0) { + ahd->flags |= AHD_TERM_ENB_A; + sxfrctl1 |= STPWEN; + } + /* Must set the latch once in order to be effective. */ + ahd_outb(ahd, SXFRCTL1, sxfrctl1|STPWEN); + ahd_outb(ahd, SXFRCTL1, sxfrctl1); + + error = ahd_write_flexport(ahd, FLXADDR_TERMCTL, termctl); + if (error != 0) { + printf("%s: Unable to set termination settings!\n", + ahd_name(ahd)); + } else if (bootverbose) { + printf("%s: Primary High byte termination %sabled\n", + ahd_name(ahd), + (termctl & FLX_TERMCTL_ENPRIHIGH) ? "En" : "Dis"); + + printf("%s: Primary Low byte termination %sabled\n", + ahd_name(ahd), + (termctl & FLX_TERMCTL_ENPRILOW) ? "En" : "Dis"); + + printf("%s: Secondary High byte termination %sabled\n", + ahd_name(ahd), + (termctl & FLX_TERMCTL_ENSECHIGH) ? "En" : "Dis"); + + printf("%s: Secondary Low byte termination %sabled\n", + ahd_name(ahd), + (termctl & FLX_TERMCTL_ENSECLOW) ? "En" : "Dis"); + } + return; +} + +#define DPE 0x80 +#define SSE 0x40 +#define RMA 0x20 +#define RTA 0x10 +#define STA 0x08 +#define DPR 0x01 + +static const char *split_status_source[] = +{ + "DFF0", + "DFF1", + "OVLY", + "CMC", +}; + +static const char *pci_status_source[] = +{ + "DFF0", + "DFF1", + "SG", + "CMC", + "OVLY", + "NONE", + "MSI", + "TARG" +}; + +static const char *split_status_strings[] = +{ + "%s: Received split response in %s.\n" + "%s: Received split completion error message in %s\n", + "%s: Receive overrun in %s\n", + "%s: Count not complete in %s\n", + "%s: Split completion data bucket in %s\n", + "%s: Split completion address error in %s\n", + "%s: Split completion byte count error in %s\n", + "%s: Signaled Target-abort to early terminate a split in %s\n", +}; + +static const char *pci_status_strings[] = +{ + "%s: Data Parity Error has been reported via PERR# in %s\n", + "%s: Target initial wait state error in %s\n", + "%s: Split completion read data parity error in %s\n", + "%s: Split completion address attribute parity error in %s\n", + "%s: Received a Target Abort in %s\n", + "%s: Received a Master Abort in %s\n", + "%s: Signal System Error Detected in %s\n", + "%s: Address or Write Phase Parity Error Detected in %s.\n" +}; + +void +ahd_pci_intr(struct ahd_softc *ahd) +{ + uint8_t pci_status[8]; + ahd_mode_state saved_modes; + u_int pci_status1; + u_int intstat; + u_int i; + u_int reg; + + intstat = ahd_inb(ahd, INTSTAT); + + if ((intstat & SPLTINT) != 0) + ahd_pci_split_intr(ahd, intstat); + + if ((intstat & PCIINT) == 0) + return; + + printf("%s: PCI error Interrupt\n", ahd_name(ahd)); + saved_modes = ahd_save_modes(ahd); + ahd_dump_card_state(ahd); + ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); + for (i = 0, reg = DF0PCISTAT; i < 8; i++, reg++) { + + if (i == 5) + continue; + pci_status[i] = ahd_inb(ahd, reg); + /* Clear latched errors. So our interupt deasserts. */ + ahd_outb(ahd, reg, pci_status[i]); + } + + for (i = 0; i < 8; i++) { + u_int bit; + + if (i == 5) + continue; + + for (bit = 0; bit < 8; bit++) { + + if ((pci_status[i] & (0x1 << bit)) != 0) { + static const char *s; + + s = pci_status_strings[bit]; + if (i == 7/*TARG*/ && bit == 3) + s = "%s: Signal Target Abort\n"; + printf(s, ahd_name(ahd), pci_status_source[i]); + } + } + } + pci_status1 = ahd_pci_read_config(ahd->dev_softc, + PCIR_STATUS + 1, /*bytes*/1); + ahd_pci_write_config(ahd->dev_softc, PCIR_STATUS + 1, + pci_status1, /*bytes*/1); + ahd_restore_modes(ahd, saved_modes); + ahd_unpause(ahd); +} + +static void +ahd_pci_split_intr(struct ahd_softc *ahd, u_int intstat) +{ + uint8_t split_status[4]; + uint8_t split_status1[4]; + uint8_t sg_split_status[2]; + uint8_t sg_split_status1[2]; + ahd_mode_state saved_modes; + u_int i; + uint16_t pcix_status; + + /* + * Check for splits in all modes. Modes 0 and 1 + * additionally have SG engine splits to look at. + */ + pcix_status = ahd_pci_read_config(ahd->dev_softc, PCIXR_STATUS, + /*bytes*/2); + printf("%s: PCI Split Interrupt - PCI-X status = 0x%x\n", + ahd_name(ahd), pcix_status); + saved_modes = ahd_save_modes(ahd); + for (i = 0; i < 4; i++) { + ahd_set_modes(ahd, i, i); + + split_status[i] = ahd_inb(ahd, DCHSPLTSTAT0); + split_status1[i] = ahd_inb(ahd, DCHSPLTSTAT1); + /* Clear latched errors. So our interupt deasserts. */ + ahd_outb(ahd, DCHSPLTSTAT0, split_status[i]); + ahd_outb(ahd, DCHSPLTSTAT1, split_status1[i]); + if (i != 0) + continue; + sg_split_status[i] = ahd_inb(ahd, SGSPLTSTAT0); + sg_split_status1[i] = ahd_inb(ahd, SGSPLTSTAT1); + /* Clear latched errors. So our interupt deasserts. */ + ahd_outb(ahd, SGSPLTSTAT0, sg_split_status[i]); + ahd_outb(ahd, SGSPLTSTAT1, sg_split_status1[i]); + } + + for (i = 0; i < 4; i++) { + u_int bit; + + for (bit = 0; bit < 8; bit++) { + + if ((split_status[i] & (0x1 << bit)) != 0) { + static const char *s; + + s = split_status_strings[bit]; + printf(s, ahd_name(ahd), + split_status_source[i]); + } + + if (i != 0) + continue; + + if ((sg_split_status[i] & (0x1 << bit)) != 0) { + static const char *s; + + s = split_status_strings[bit]; + printf(s, ahd_name(ahd), "SG"); + } + } + } + /* + * Clear PCI-X status bits. + */ + ahd_pci_write_config(ahd->dev_softc, PCIXR_STATUS, + pcix_status, /*bytes*/2); + ahd_restore_modes(ahd, saved_modes); +} + +static int +ahd_aic7901_setup(struct ahd_softc *ahd) +{ + ahd_dev_softc_t pci; + + pci = ahd->dev_softc; + ahd->channel = 'A'; + ahd->chip = AHD_AIC7901; + ahd->features = AHD_AIC7901_FE; + return (0); +} + +static int +ahd_aic7902_setup(struct ahd_softc *ahd) +{ + ahd_dev_softc_t pci; + u_int rev; + u_int devconfig1; + + pci = ahd->dev_softc; + rev = ahd_pci_read_config(pci, PCIR_REVID, /*bytes*/1); + if (rev < ID_AIC7902_PCI_REV_A3) { + printf("%s: Unable to attach to unsupported chip revision %d\n", + ahd_name(ahd), rev); + ahd_pci_write_config(pci, PCIR_COMMAND, 0, /*bytes*/1); + return (ENXIO); + } + if (rev < ID_AIC7902_PCI_REV_B0) { + /* + * Pending request assertion does not work on the A if we have + * DMA requests outstanding on both channels. See H2A3 Razors + * #327 and #365. + */ + devconfig1 = ahd_pci_read_config(pci, DEVCONFIG1, /*bytes*/1); + ahd_pci_write_config(pci, DEVCONFIG1, + devconfig1|PREQDIS, /*bytes*/1); + devconfig1 = ahd_pci_read_config(pci, DEVCONFIG1, /*bytes*/1); + /* + * Enable A series workarounds. + */ + ahd->bugs |= AHD_SENT_SCB_UPDATE_BUG|AHD_ABORT_LQI_BUG + | AHD_PKT_BITBUCKET_BUG|AHD_LONG_SETIMO_BUG + | AHD_NLQICRC_DELAYED_BUG|AHD_SCSIRST_BUG + | AHD_LQO_ATNO_BUG|AHD_AUTOFLUSH_BUG + | AHD_CLRLQO_AUTOCLR_BUG|AHD_PCIX_MMAPIO_BUG + | AHD_PCIX_CHIPRST_BUG|AHD_PKTIZED_STATUS_BUG; + if (rev < ID_AIC7902_PCI_REV_A4) + ahd->bugs |= AHD_PCIX_ARBITER_BUG|AHD_PCIX_SPLIT_BUG; + } + + ahd->channel = ahd_get_pci_function(pci) + 'A'; + ahd->chip = AHD_AIC7902; + ahd->features = AHD_AIC7902_FE; + return (0); +} |