diff options
author | msmith <msmith@FreeBSD.org> | 2001-02-25 22:48:34 +0000 |
---|---|---|
committer | msmith <msmith@FreeBSD.org> | 2001-02-25 22:48:34 +0000 |
commit | e5b2c99d71956a1a73efdfbefb3ed19bd7d00d3b (patch) | |
tree | 92c016de15d655b019e93776c4543e7d28f396b8 /sys/dev/mly | |
parent | 4e1aac88bdd20610856392647e751f1cd18eef78 (diff) | |
download | FreeBSD-src-e5b2c99d71956a1a73efdfbefb3ed19bd7d00d3b.zip FreeBSD-src-e5b2c99d71956a1a73efdfbefb3ed19bd7d00d3b.tar.gz |
Major update and bugfix for the 'mly' driver.
- Convert to a more efficient queueing implementation.
- Don't allocate command buffers on the fly; simply work from a
static pool.
- Add a control device interface, for later use.
- Handle controller overload better as a consequence of the
improved queue implementation.
- Add support for the XPT_GET_TRAN_SETTINGS ccb, and correctly
set the virtual SCSI channels up for multiple outstanding I/Os.
- Update copyrights for 2001.
- Some whitespace fixes to improve readability.
Due to a misunderstanding on my part, previous versions of the
driver were limited to a single outstanding I/O per virtual drive.
Needless to say, this update improves performance substantially.
Diffstat (limited to 'sys/dev/mly')
-rw-r--r-- | sys/dev/mly/mly.c | 466 | ||||
-rw-r--r-- | sys/dev/mly/mly_cam.c | 107 | ||||
-rw-r--r-- | sys/dev/mly/mly_pci.c | 41 | ||||
-rw-r--r-- | sys/dev/mly/mlyio.h | 72 | ||||
-rw-r--r-- | sys/dev/mly/mlyvar.h | 267 |
5 files changed, 599 insertions, 354 deletions
diff --git a/sys/dev/mly/mly.c b/sys/dev/mly/mly.c index 71ad591..fd3d357 100644 --- a/sys/dev/mly/mly.c +++ b/sys/dev/mly/mly.c @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2000 Michael Smith + * Copyright (c) 2000, 2001 Michael Smith * Copyright (c) 2000 BSDi * All rights reserved. * @@ -34,6 +34,8 @@ #include <sys/bus.h> #include <sys/conf.h> #include <sys/ctype.h> +#include <sys/ioccom.h> +#include <sys/stat.h> #include <machine/bus_memio.h> #include <machine/bus.h> @@ -43,6 +45,7 @@ #include <cam/scsi/scsi_all.h> #include <dev/mly/mlyreg.h> +#include <dev/mly/mlyio.h> #include <dev/mly/mlyvar.h> #define MLY_DEFINE_TABLES #include <dev/mly/mly_tables.h> @@ -65,9 +68,8 @@ static int mly_immediate_command(struct mly_command *mc); static int mly_start(struct mly_command *mc); static void mly_complete(void *context, int pending); -static int mly_get_slot(struct mly_command *mc); -static void mly_alloc_command_cluster_map(void *arg, bus_dma_segment_t *segs, int nseg, int error); -static void mly_alloc_command_cluster(struct mly_softc *sc); +static void mly_alloc_commands_map(void *arg, bus_dma_segment_t *segs, int nseg, int error); +static int mly_alloc_commands(struct mly_softc *sc); static void mly_map_command(struct mly_command *mc); static void mly_unmap_command(struct mly_command *mc); @@ -80,6 +82,32 @@ static void mly_print_command(struct mly_command *mc); static void mly_print_packet(struct mly_command *mc); static void mly_panic(struct mly_softc *sc, char *reason); #endif +void mly_print_controller(int controller); + +static d_open_t mly_user_open; +static d_close_t mly_user_close; +static d_ioctl_t mly_user_ioctl; +static int mly_user_command(struct mly_softc *sc, struct mly_user_command *uc); +static int mly_user_health(struct mly_softc *sc, struct mly_user_health *uh); + +#define MLY_CDEV_MAJOR 158 + +static struct cdevsw mly_cdevsw = { + mly_user_open, + mly_user_close, + noread, + nowrite, + mly_user_ioctl, + nopoll, + nommap, + nostrategy, + "mly", + MLY_CDEV_MAJOR, + nodump, + nopsize, + 0, + -1 +}; /******************************************************************************** ******************************************************************************** @@ -100,10 +128,10 @@ mly_attach(struct mly_softc *sc) /* * Initialise per-controller queues. */ - TAILQ_INIT(&sc->mly_freecmds); - TAILQ_INIT(&sc->mly_ready); - TAILQ_INIT(&sc->mly_completed); - TAILQ_INIT(&sc->mly_clusters); + mly_initq_free(sc); + mly_initq_ready(sc); + mly_initq_busy(sc); + mly_initq_complete(sc); #if __FreeBSD_version >= 500005 /* @@ -124,10 +152,10 @@ mly_attach(struct mly_softc *sc) return(error); /* - * Initialise the slot allocator so that we can issue commands. + * Allocate command buffers */ - sc->mly_max_commands = MLY_SLOT_MAX; - sc->mly_last_slot = MLY_SLOT_START; + if ((error = mly_alloc_commands(sc))) + return(error); /* * Obtain controller feature information @@ -136,11 +164,6 @@ mly_attach(struct mly_softc *sc) return(error); /* - * Update the slot allocator limit based on the controller inquiry. - */ - sc->mly_max_commands = imin(sc->mly_controllerinfo->maximum_parallel_commands, MLY_SLOT_MAX); - - /* * Get the current event counter for health purposes, populate the initial * health status buffer. */ @@ -177,6 +200,13 @@ mly_attach(struct mly_softc *sc) */ mly_periodic((void *)sc); + /* + * Create the control device. + */ + sc->mly_dev_t = make_dev(&mly_cdevsw, device_get_unit(sc->mly_dev), UID_ROOT, GID_OPERATOR, + S_IRUSR | S_IWUSR, "mly%d", device_get_unit(sc->mly_dev)); + sc->mly_dev_t->si_drv1 = sc; + /* enable interrupts now */ MLY_UNMASK_INTERRUPTS(sc); @@ -370,6 +400,8 @@ mly_complete_rescan(struct mly_command *mc) sc->mly_btl[bus][target].mb_flags = MLY_BTL_PHYSICAL; /* clears all other flags */ sc->mly_btl[bus][target].mb_type = MLY_DEVICE_TYPE_PHYSICAL; sc->mly_btl[bus][target].mb_state = pdi->state; + sc->mly_btl[bus][target].mb_speed = pdi->speed; + sc->mly_btl[bus][target].mb_width = pdi->width; if (pdi->state != MLY_DEVICE_STATE_UNCONFIGURED) sc->mly_btl[bus][target].mb_flags |= MLY_BTL_PROTECTED; debug(2, "BTL rescan for %d:%d returns %s", bus, target, @@ -438,9 +470,12 @@ mly_enable_mmbox(struct mly_softc *sc) bzero(&mci, sizeof(mci)); mci.sub_ioctl = MDACIOCTL_SETMEMORYMAILBOX; /* set buffer addresses */ - mci.param.setmemorymailbox.command_mailbox_physaddr = sc->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_command); - mci.param.setmemorymailbox.status_mailbox_physaddr = sc->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_status); - mci.param.setmemorymailbox.health_buffer_physaddr = sc->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_health); + mci.param.setmemorymailbox.command_mailbox_physaddr = + sc->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_command); + mci.param.setmemorymailbox.status_mailbox_physaddr = + sc->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_status); + mci.param.setmemorymailbox.health_buffer_physaddr = + sc->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_health); /* set buffer sizes - abuse of data_size field is revolting */ sp = (u_int8_t *)&mci.data_size; @@ -451,7 +486,8 @@ mly_enable_mmbox(struct mly_softc *sc) debug(1, "memory mailbox at %p (0x%llx/%d 0x%llx/%d 0x%llx/%d", sc->mly_mmbox, mci.param.setmemorymailbox.command_mailbox_physaddr, sp[0], mci.param.setmemorymailbox.status_mailbox_physaddr, sp[1], - mci.param.setmemorymailbox.health_buffer_physaddr, mci.param.setmemorymailbox.health_buffer_size); + mci.param.setmemorymailbox.health_buffer_physaddr, + mci.param.setmemorymailbox.health_buffer_size); if ((error = mly_ioctl(sc, &mci, NULL, 0, &status, NULL, NULL))) return(error); @@ -806,13 +842,14 @@ mly_immediate_command(struct mly_command *mc) if (sc->mly_state & MLY_STATE_INTERRUPTS_ON) { /* sleep on the command */ - while(MLY_CMD_STATE(mc) != MLY_CMD_COMPLETE) { + while(!(mc->mc_flags & MLY_CMD_COMPLETE)) { tsleep(mc, PRIBIO, "mlywait", 0); } } else { /* spin and collect status while we do */ - while(MLY_CMD_STATE(mc) != MLY_CMD_COMPLETE) + while(!(mc->mc_flags & MLY_CMD_COMPLETE)) { mly_done(mc->mc_sc); + } } splx(s); return(0); @@ -864,14 +901,13 @@ mly_start(struct mly_command *mc) debug_called(2); /* - * Set the command up for delivery to the controller. This may fail - * due to resource shortages. + * Set the command up for delivery to the controller. */ - if (mly_get_slot(mc)) - return(EBUSY); mly_map_command(mc); + mc->mc_packet->generic.command_id = mc->mc_slot; s = splcam(); + /* * Do we have to use the hardware mailbox? */ @@ -883,7 +919,8 @@ mly_start(struct mly_command *mc) splx(s); return(EBUSY); } - + mc->mc_flags |= MLY_CMD_BUSY; + /* * It's ready, send the command. */ @@ -894,11 +931,12 @@ mly_start(struct mly_command *mc) pkt = &sc->mly_mmbox->mmm_command[sc->mly_mmbox_command_index]; - /* check to see if the next slot is free yet */ + /* check to see if the next index is free yet */ if (pkt->mmbox.flag != 0) { splx(s); return(EBUSY); } + mc->mc_flags |= MLY_CMD_BUSY; /* copy in new command */ bcopy(mc->mc_packet->mmbox.data, pkt->mmbox.data, sizeof(pkt->mmbox.data)); @@ -914,6 +952,7 @@ mly_start(struct mly_command *mc) sc->mly_mmbox_command_index = (sc->mly_mmbox_command_index + 1) % MLY_MMBOX_COMMANDS; } + mly_enqueue_busy(mc); splx(s); return(0); } @@ -936,17 +975,14 @@ mly_done(struct mly_softc *sc) if (MLY_ODBR_TRUE(sc, MLY_HM_STSREADY)) { slot = MLY_GET_REG2(sc, sc->mly_status_mailbox); if (slot < MLY_SLOT_MAX) { - mc = sc->mly_busycmds[slot]; - if (mc != NULL) { - mc->mc_status = MLY_GET_REG(sc, sc->mly_status_mailbox + 2); - mc->mc_sense = MLY_GET_REG(sc, sc->mly_status_mailbox + 3); - mc->mc_resid = MLY_GET_REG4(sc, sc->mly_status_mailbox + 4); - mly_enqueue_completed(mc); - sc->mly_busycmds[slot] = NULL; - worked = 1; - } else { - mly_printf(sc, "got HM completion for nonbusy slot %u\n", slot); - } + mc = &sc->mly_command[slot - MLY_SLOT_START]; + mc->mc_status = MLY_GET_REG(sc, sc->mly_status_mailbox + 2); + mc->mc_sense = MLY_GET_REG(sc, sc->mly_status_mailbox + 3); + mc->mc_resid = MLY_GET_REG4(sc, sc->mly_status_mailbox + 4); + mly_remove_busy(mc); + mc->mc_flags &= ~MLY_CMD_BUSY; + mly_enqueue_complete(mc); + worked = 1; } else { /* slot 0xffff may mean "extremely bogus command" */ mly_printf(sc, "got HM completion for illegal slot %u\n", slot); @@ -968,23 +1004,21 @@ mly_done(struct mly_softc *sc) /* get slot number */ slot = sp->status.command_id; if (slot < MLY_SLOT_MAX) { - mc = sc->mly_busycmds[slot]; - if (mc != NULL) { - mc->mc_status = sp->status.status; - mc->mc_sense = sp->status.sense_length; - mc->mc_resid = sp->status.residue; - mly_enqueue_completed(mc); - sc->mly_busycmds[slot] = NULL; - worked = 1; - } else { - mly_printf(sc, "got AM completion for nonbusy slot %u\n", slot); - } + mc = &sc->mly_command[slot - MLY_SLOT_START]; + mc->mc_status = sp->status.status; + mc->mc_sense = sp->status.sense_length; + mc->mc_resid = sp->status.residue; + mly_remove_busy(mc); + mc->mc_flags &= ~MLY_CMD_BUSY; + mly_enqueue_complete(mc); + worked = 1; } else { /* slot 0xffff may mean "extremely bogus command" */ - mly_printf(sc, "got AM completion for illegal slot %u at %d\n", slot, sc->mly_mmbox_status_index); + mly_printf(sc, "got AM completion for illegal slot %u at %d\n", + slot, sc->mly_mmbox_status_index); } - /* clear and move to next slot */ + /* clear and move to next index */ sp->mmbox.flag = 0; sc->mly_mmbox_status_index = (sc->mly_mmbox_status_index + 1) % MLY_MMBOX_STATUS; } @@ -1019,7 +1053,7 @@ mly_complete(void *context, int pending) /* * Spin pulling commands off the completed queue and processing them. */ - while ((mc = mly_dequeue_completed(sc)) != NULL) { + while ((mc = mly_dequeue_complete(sc)) != NULL) { /* * Free controller resources, mark command complete. @@ -1031,7 +1065,7 @@ mly_complete(void *context, int pending) */ mly_unmap_command(mc); mc_complete = mc->mc_complete; - MLY_CMD_SETSTATE(mc, MLY_CMD_COMPLETE); + mc->mc_flags |= MLY_CMD_COMPLETE; /* * Call completion handler or wake up sleeping consumer. @@ -1063,6 +1097,9 @@ mly_complete(void *context, int pending) debug(1, "event change %d, event status update, %d -> %d", sc->mly_event_change, sc->mly_event_waiting, sc->mly_mmbox->mmm_health.status.next_event); sc->mly_event_waiting = sc->mly_mmbox->mmm_health.status.next_event; + + /* wake up anyone that might be interested in this */ + wakeup(&sc->mly_event_change); } if (sc->mly_event_counter != sc->mly_event_waiting) mly_fetch_event(sc); @@ -1075,54 +1112,6 @@ mly_complete(void *context, int pending) ********************************************************************************/ /******************************************************************************** - * Give a command a slot in our lookup table, so that we can recover it when - * the controller returns the slot number. - * - * Slots are freed in mly_done(). - */ -static int -mly_get_slot(struct mly_command *mc) -{ - struct mly_softc *sc = mc->mc_sc; - u_int16_t slot; - int tries; - - debug_called(3); - - if (mc->mc_flags & MLY_CMD_SLOTTED) - return(0); - - /* - * Optimisation for the controller-busy case - check to see whether - * we are already over the limit and stop immediately. - */ - if (sc->mly_busy_count >= sc->mly_max_commands) - return(EBUSY); - - /* - * Scan forward from the last slot that we assigned looking for a free - * slot. Don't scan more than the maximum number of commands that we - * support (we should never reach the limit here due to the optimisation - * above) - */ - slot = sc->mly_last_slot; - for (tries = sc->mly_max_commands; tries > 0; tries--) { - if (sc->mly_busycmds[slot] == NULL) { - sc->mly_busycmds[slot] = mc; - mc->mc_slot = slot; - mc->mc_packet->generic.command_id = slot; - mc->mc_flags |= MLY_CMD_SLOTTED; - sc->mly_last_slot = slot; - return(0); - } - slot++; - if (slot >= MLY_SLOT_MAX) - slot = MLY_SLOT_START; - } - return(EBUSY); -} - -/******************************************************************************** * Allocate a command. */ int @@ -1132,17 +1121,9 @@ mly_alloc_command(struct mly_softc *sc, struct mly_command **mcp) debug_called(3); - if ((mc = mly_dequeue_free(sc)) == NULL) { - mly_alloc_command_cluster(sc); - mc = mly_dequeue_free(sc); - } - if (mc != NULL) - TAILQ_REMOVE(&sc->mly_freecmds, mc, mc_link); - - if (mc == NULL) + if ((mc = mly_dequeue_free(sc)) == NULL) return(ENOMEM); - MLY_CMD_SETSTATE(mc, MLY_CMD_SETUP); *mcp = mc; return(0); } @@ -1159,7 +1140,6 @@ mly_release_command(struct mly_command *mc) * Fill in parts of the command that may cause confusion if * a consumer doesn't when we are later allocated. */ - MLY_CMD_SETSTATE(mc, MLY_CMD_FREE); mc->mc_data = NULL; mc->mc_flags = 0; mc->mc_complete = NULL; @@ -1176,66 +1156,55 @@ mly_release_command(struct mly_command *mc) } /******************************************************************************** - * Map helper for command cluster allocation. - * - * Note that there are never more command packets in a cluster than will fit in - * a page, so there is no need to look at anything other than the base of the - * allocation (which will be page-aligned). + * Map helper for command allocation. */ static void -mly_alloc_command_cluster_map(void *arg, bus_dma_segment_t *segs, int nseg, int error) +mly_alloc_commands_map(void *arg, bus_dma_segment_t *segs, int nseg, int error) { - struct mly_command_cluster *mcc = (struct mly_command_cluster *)arg; + struct mly_softc *sc = (struct mly_softc *)arg debug_called(2); - mcc->mcc_packetphys = segs[0].ds_addr; + sc->mly_packetphys = segs[0].ds_addr; } /******************************************************************************** - * Allocate and initialise a cluster of commands. + * Allocate and initialise command and packet structures. */ -static void -mly_alloc_command_cluster(struct mly_softc *sc) +static int +mly_alloc_commands(struct mly_softc *sc) { - struct mly_command_cluster *mcc; struct mly_command *mc; int i; - debug_called(1); - - mcc = malloc(sizeof(struct mly_command_cluster), M_DEVBUF, M_NOWAIT); - if (mcc != NULL) { - - /* - * Allocate enough space for all the command packets for this cluster and - * map them permanently into controller-visible space. - */ - if (bus_dmamem_alloc(sc->mly_packet_dmat, (void **)&mcc->mcc_packet, - BUS_DMA_NOWAIT, &mcc->mcc_packetmap)) { - free(mcc, M_DEVBUF); - return; - } - bus_dmamap_load(sc->mly_packet_dmat, mcc->mcc_packetmap, mcc->mcc_packet, - MLY_CMD_CLUSTERCOUNT * sizeof(union mly_command_packet), - mly_alloc_command_cluster_map, mcc, 0); - - mly_enqueue_cluster(sc, mcc); - for (i = 0; i < MLY_CMD_CLUSTERCOUNT; i++) { - mc = &mcc->mcc_command[i]; - bzero(mc, sizeof(*mc)); - mc->mc_sc = sc; - mc->mc_packet = mcc->mcc_packet + i; - mc->mc_packetphys = mcc->mcc_packetphys + (i * sizeof(union mly_command_packet)); - if (!bus_dmamap_create(sc->mly_buffer_dmat, 0, &mc->mc_datamap)) - mly_release_command(mc); - } + /* + * Allocate enough space for all the command packets in one chunk and + * map them permanently into controller-visible space. + */ + if (bus_dmamem_alloc(sc->mly_packet_dmat, (void **)&sc->mly_packet, + BUS_DMA_NOWAIT, &sc->mly_packetmap)) { + return(ENOMEM); } + bus_dmamap_load(sc->mly_packet_dmat, sc->mly_packetmap, sc->mly_packet, + MLY_MAXCOMMANDS * sizeof(union mly_command_packet), + mly_alloc_commands_map, sc, 0); + + for (i = 0; i < MLY_MAXCOMMANDS; i++) { + mc = &sc->mly_command[i]; + bzero(mc, sizeof(*mc)); + mc->mc_sc = sc; + mc->mc_slot = MLY_SLOT_START + i; + mc->mc_packet = sc->mly_packet + i; + mc->mc_packetphys = sc->mly_packetphys + (i * sizeof(union mly_command_packet)); + if (!bus_dmamap_create(sc->mly_buffer_dmat, 0, &mc->mc_datamap)) + mly_release_command(mc); + } + return(0); } /******************************************************************************** - * Command-mapping helper function - populate this command slot's s/g table - * with the s/g entries for this command. + * Command-mapping helper function - populate this command's s/g table + * with the s/g entries for its data. */ static void mly_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error) @@ -1253,7 +1222,7 @@ mly_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error) sg = &gen->transfer.direct.sg[0]; gen->command_control.extended_sg_table = 0; } else { - tabofs = (mc->mc_slot * MLY_MAXSGENTRIES); + tabofs = ((mc->mc_slot - MLY_SLOT_START) * MLY_MAXSGENTRIES); sg = sc->mly_sg_table + tabofs; gen->transfer.indirect.entries[0] = nseg; gen->transfer.indirect.table_physaddr[0] = sc->mly_sg_busaddr + (tabofs * sizeof(struct mly_sg_entry)); @@ -1520,7 +1489,6 @@ mly_print_command(struct mly_command *mc) mly_printf(sc, "COMMAND @ %p\n", mc); mly_printf(sc, " slot %d\n", mc->mc_slot); - mly_printf(sc, " state %d\n", MLY_CMD_STATE(mc)); mly_printf(sc, " status 0x%x\n", mc->mc_status); mly_printf(sc, " sense len %d\n", mc->mc_sense); mly_printf(sc, " resid %d\n", mc->mc_resid); @@ -1528,7 +1496,7 @@ mly_print_command(struct mly_command *mc) if (mc->mc_packet != NULL) mly_print_packet(mc); mly_printf(sc, " data %p/%d\n", mc->mc_data, mc->mc_length); - mly_printf(sc, " flags %b\n", mc->mc_flags, "\20\11slotted\12mapped\13priority\14datain\15dataout\n"); + mly_printf(sc, " flags %b\n", mc->mc_flags, "\20\1busy\2complete\3slotted\4mapped\5datain\6dataout\n"); mly_printf(sc, " complete %p\n", mc->mc_complete); mly_printf(sc, " private %p\n", mc->mc_private); } @@ -1706,3 +1674,187 @@ mly_panic(struct mly_softc *sc, char *reason) panic(reason); } #endif + +/******************************************************************************** + * Print queue statistics, callable from DDB. + */ +void +mly_print_controller(int controller) +{ + struct mly_softc *sc; + + if ((sc = devclass_get_softc(devclass_find("mly"), controller)) == NULL) { + printf("mly: controller %d invalid\n", controller); + } else { + device_printf(sc->mly_dev, "queue curr max\n"); + device_printf(sc->mly_dev, "free %04d/%04d\n", + sc->mly_qstat[MLYQ_FREE].q_length, sc->mly_qstat[MLYQ_FREE].q_max); + device_printf(sc->mly_dev, "ready %04d/%04d\n", + sc->mly_qstat[MLYQ_READY].q_length, sc->mly_qstat[MLYQ_READY].q_max); + device_printf(sc->mly_dev, "busy %04d/%04d\n", + sc->mly_qstat[MLYQ_BUSY].q_length, sc->mly_qstat[MLYQ_BUSY].q_max); + device_printf(sc->mly_dev, "complete %04d/%04d\n", + sc->mly_qstat[MLYQ_COMPLETE].q_length, sc->mly_qstat[MLYQ_COMPLETE].q_max); + } +} + + +/******************************************************************************** + ******************************************************************************** + Control device interface + ******************************************************************************** + ********************************************************************************/ + +/******************************************************************************** + * Accept an open operation on the control device. + */ +static int +mly_user_open(dev_t dev, int flags, int fmt, struct proc *p) +{ + int unit = minor(dev); + struct mly_softc *sc = devclass_get_softc(devclass_find("mly"), unit); + + sc->mly_state |= MLY_STATE_OPEN; + return(0); +} + +/******************************************************************************** + * Accept the last close on the control device. + */ +static int +mly_user_close(dev_t dev, int flags, int fmt, struct proc *p) +{ + int unit = minor(dev); + struct mly_softc *sc = devclass_get_softc(devclass_find("mly"), unit); + + sc->mly_state &= ~MLY_STATE_OPEN; + return (0); +} + +/******************************************************************************** + * Handle controller-specific control operations. + */ +static int +mly_user_ioctl(dev_t dev, u_long cmd, caddr_t addr, int32_t flag, struct proc *p) +{ + struct mly_softc *sc = (struct mly_softc *)dev->si_drv1; + struct mly_user_command *uc = (struct mly_user_command *)addr; + struct mly_user_health *uh = (struct mly_user_health *)addr; + + switch(cmd) { + case MLYIO_COMMAND: + return(mly_user_command(sc, uc)); + case MLYIO_HEALTH: + return(mly_user_health(sc, uh)); + default: + return(ENOIOCTL); + } +} + +/******************************************************************************** + * Execute a command passed in from userspace. + * + * The control structure contains the actual command for the controller, as well + * as the user-space data pointer and data size, and an optional sense buffer + * size/pointer. On completion, the data size is adjusted to the command + * residual, and the sense buffer size to the size of the returned sense data. + * + */ +static int +mly_user_command(struct mly_softc *sc, struct mly_user_command *uc) +{ + struct mly_command *mc; + int error, s; + + /* allocate a command */ + if (mly_alloc_command(sc, &mc)) { + error = ENOMEM; + goto out; /* XXX Linux version will wait for a command */ + } + + /* handle data size/direction */ + mc->mc_length = (uc->DataTransferLength >= 0) ? uc->DataTransferLength : -uc->DataTransferLength; + if (mc->mc_length > 0) { + if ((mc->mc_data = malloc(mc->mc_length, M_DEVBUF, M_NOWAIT)) == NULL) { + error = ENOMEM; + goto out; + } + } + if (uc->DataTransferLength > 0) { + mc->mc_flags |= MLY_CMD_DATAIN; + bzero(mc->mc_data, mc->mc_length); + } + if (uc->DataTransferLength < 0) { + mc->mc_flags |= MLY_CMD_DATAOUT; + if ((error = copyin(uc->DataTransferBuffer, mc->mc_data, mc->mc_length)) != 0) + goto out; + } + + /* copy the controller command */ + bcopy(&uc->CommandMailbox, mc->mc_packet, sizeof(uc->CommandMailbox)); + + /* clear command completion handler so that we get woken up */ + mc->mc_complete = NULL; + + /* execute the command */ + s = splcam(); + mly_requeue_ready(mc); + mly_startio(sc); + while (!(mc->mc_flags & MLY_CMD_COMPLETE)) + tsleep(mc, PRIBIO, "mlyioctl", 0); + splx(s); + + /* return the data to userspace */ + if (uc->DataTransferLength > 0) + if ((error = copyout(mc->mc_data, uc->DataTransferBuffer, mc->mc_length)) != 0) + goto out; + + /* return the sense buffer to userspace */ + if ((uc->RequestSenseLength > 0) && (mc->mc_sense > 0)) { + if ((error = copyout(mc->mc_packet, uc->RequestSenseBuffer, + min(uc->RequestSenseLength, mc->mc_sense))) != 0) + goto out; + } + + /* return command results to userspace (caller will copy out) */ + uc->DataTransferLength = mc->mc_resid; + uc->RequestSenseLength = min(uc->RequestSenseLength, mc->mc_sense); + uc->CommandStatus = mc->mc_status; + error = 0; + + out: + if (mc->mc_data != NULL) + free(mc->mc_data, M_DEVBUF); + if (mc != NULL) + mly_release_command(mc); + return(error); +} + +/******************************************************************************** + * Return health status to userspace. If the health change index in the user + * structure does not match that currently exported by the controller, we + * return the current status immediately. Otherwise, we block until either + * interrupted or new status is delivered. + */ +static int +mly_user_health(struct mly_softc *sc, struct mly_user_health *uh) +{ + struct mly_health_status mh; + int error, s; + + /* fetch the current health status from userspace */ + if ((error = copyin(uh->HealthStatusBuffer, &mh, sizeof(mh))) != 0) + return(error); + + /* spin waiting for a status update */ + s = splcam(); + error = EWOULDBLOCK; + while ((error != 0) && (sc->mly_event_change == mh.change_counter)) + error = tsleep(&sc->mly_event_change, PRIBIO | PCATCH, "mlyhealth", 0); + splx(s); + + /* copy the controller's health status buffer out (there is a race here if it changes again) */ + error = copyout(&sc->mly_mmbox->mmm_health.status, uh->HealthStatusBuffer, + sizeof(uh->HealthStatusBuffer)); + return(error); +} diff --git a/sys/dev/mly/mly_cam.c b/sys/dev/mly/mly_cam.c index 9175b27..f27be2b 100644 --- a/sys/dev/mly/mly_cam.c +++ b/sys/dev/mly/mly_cam.c @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2000 Michael Smith + * Copyright (c) 2000, 2001 Michael Smith * Copyright (c) 2000 BSDi * All rights reserved. * @@ -41,11 +41,13 @@ #include <cam/cam_sim.h> #include <cam/cam_xpt_sim.h> #include <cam/scsi/scsi_all.h> +#include <cam/scsi/scsi_message.h> #include <machine/resource.h> #include <machine/bus.h> #include <dev/mly/mlyreg.h> +#include <dev/mly/mlyio.h> #include <dev/mly/mlyvar.h> #include <dev/mly/mly_tables.h> @@ -58,12 +60,20 @@ static struct cam_periph *mly_find_periph(struct mly_softc *sc, int bus, int tar * CAM-specific queue primitives */ static __inline void +mly_initq_ccb(struct mly_softc *sc) +{ + TAILQ_INIT(&sc->mly_cam_ccbq); + MLYQ_INIT(sc, MLYQ_CCB); +} + +static __inline void mly_enqueue_ccb(struct mly_softc *sc, union ccb *ccb) { int s; s = splcam(); TAILQ_INSERT_TAIL(&sc->mly_cam_ccbq, &ccb->ccb_h, sim_links.tqe); + MLYQ_ADD(sc, MLYQ_CCB); splx(s); } @@ -74,6 +84,7 @@ mly_requeue_ccb(struct mly_softc *sc, union ccb *ccb) s = splcam(); TAILQ_INSERT_HEAD(&sc->mly_cam_ccbq, &ccb->ccb_h, sim_links.tqe); + MLYQ_ADD(sc, MLYQ_CCB); splx(s); } @@ -84,8 +95,10 @@ mly_dequeue_ccb(struct mly_softc *sc) int s; s = splcam(); - if ((ccb = (union ccb *)TAILQ_FIRST(&sc->mly_cam_ccbq)) != NULL) + if ((ccb = (union ccb *)TAILQ_FIRST(&sc->mly_cam_ccbq)) != NULL) { TAILQ_REMOVE(&sc->mly_cam_ccbq, &ccb->ccb_h, sim_links.tqe); + MLYQ_REMOVE(sc, MLYQ_CCB); + } splx(s); return(ccb); } @@ -112,12 +125,12 @@ int mly_cam_attach(struct mly_softc *sc) { struct cam_devq *devq; - int chn, nchn; + int chn, i; debug_called(1); /* initialise the CCB queue */ - TAILQ_INIT(&sc->mly_cam_ccbq); + mly_initq_ccb(sc); /* * Allocate a devq for all our channels combined. @@ -129,11 +142,11 @@ mly_cam_attach(struct mly_softc *sc) /* * Iterate over channels, registering them with CAM. + * + * Physical channels are set up to support tagged commands and only a single + * untagged command. Virtual channels do not support tags, and don't need them. */ - nchn = sc->mly_controllerinfo->physical_channels_present + - sc->mly_controllerinfo->virtual_channels_present; - for (chn = 0; chn < nchn; chn++) { - + for (i = 0, chn = 0; i < sc->mly_controllerinfo->physical_channels_present; i++, chn++) { /* allocate a sim */ if ((sc->mly_cam_sim[chn] = cam_sim_alloc(mly_cam_action, mly_cam_poll, @@ -147,14 +160,30 @@ mly_cam_attach(struct mly_softc *sc) mly_printf(sc, "CAM SIM attach failed\n"); return(ENOMEM); } + } + for (i = 0; i < sc->mly_controllerinfo->virtual_channels_present; i++, chn++) { + /* allocate a sim */ + if ((sc->mly_cam_sim[chn] = cam_sim_alloc(mly_cam_action, + mly_cam_poll, + "mly", + sc, + device_get_unit(sc->mly_dev), + sc->mly_controllerinfo->maximum_parallel_commands, + 0, + devq)) == NULL) { + cam_simq_free(devq); + mly_printf(sc, "CAM SIM attach failed\n"); + return(ENOMEM); + } + } - /* register the bus ID so we can get it later */ - if (xpt_bus_register(sc->mly_cam_sim[chn], chn)) { + for (i = 0; i < chn; i++) { + /* register the bus IDs so we can get them later */ + if (xpt_bus_register(sc->mly_cam_sim[i], i)) { mly_printf(sc, "CAM XPT bus registration failed\n"); return(ENXIO); } - debug(1, "registered sim %p bus %d", sc->mly_cam_sim[chn], chn); - + debug(1, "registered sim %p bus %d", sc->mly_cam_sim[i], i); } return(0); @@ -312,6 +341,59 @@ mly_cam_action(struct cam_sim *sim, union ccb *ccb) break; } + case XPT_GET_TRAN_SETTINGS: + { + struct ccb_trans_settings *cts = &ccb->cts; + int bus, target; + + bus = cam_sim_bus(sim); + target = cts->ccb_h.target_id; + + debug(2, "XPT_GET_TRAN_SETTINGS %d:%d", bus, target); + cts->valid = 0; + + /* logical device? */ + if (sc->mly_btl[bus][target].mb_flags & MLY_BTL_LOGICAL) { + /* nothing special for these */ + + /* physical device? */ + } else if (sc->mly_btl[bus][target].mb_flags & MLY_BTL_PHYSICAL) { + /* allow CAM to try tagged transactions */ + cts->flags |= CCB_TRANS_TAG_ENB; + cts->valid |= CCB_TRANS_TQ_VALID; + + /* convert speed (MHz) to usec */ + cts->sync_period = 1000000 / sc->mly_btl[bus][target].mb_speed; + + /* convert bus width to CAM internal encoding */ + switch (sc->mly_btl[bus][target].mb_width) { + case 32: + cts->bus_width = MSG_EXT_WDTR_BUS_32_BIT; + break; + case 16: + cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; + break; + case 8: + default: + cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; + break; + } + cts->valid |= CCB_TRANS_SYNC_RATE_VALID | CCB_TRANS_BUS_WIDTH_VALID; + + /* not a device, bail out */ + } else { + cts->ccb_h.status = CAM_REQ_CMP_ERR; + break; + } + + /* disconnect always OK */ + cts->flags |= CCB_TRANS_DISC_ENB; + cts->valid |= CCB_TRANS_DISC_VALID; + + cts->ccb_h.status = CAM_REQ_CMP; + break; + } + default: /* we can't do this */ debug(2, "unspported func_code = 0x%x", ccb->ccb_h.func_code); ccb->ccb_h.status = CAM_REQ_INVALID; @@ -362,7 +444,6 @@ mly_cam_command(struct mly_softc *sc, struct mly_command **mcp) } /* build the command */ - MLY_CMD_SETSTATE(mc, MLY_CMD_SETUP); mc->mc_data = csio->data_ptr; mc->mc_length = csio->dxfer_len; mc->mc_complete = mly_cam_complete; diff --git a/sys/dev/mly/mly_pci.c b/sys/dev/mly/mly_pci.c index c0dbd45..e890a69 100644 --- a/sys/dev/mly/mly_pci.c +++ b/sys/dev/mly/mly_pci.c @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2000 Michael Smith + * Copyright (c) 2000, 2001 Michael Smith * Copyright (c) 2000 BSDi * All rights reserved. * @@ -46,6 +46,7 @@ #include <pci/pcivar.h> #include <dev/mly/mlyreg.h> +#include <dev/mly/mlyio.h> #include <dev/mly/mlyvar.h> static int mly_pci_probe(device_t dev); @@ -60,7 +61,6 @@ static int mly_sg_map(struct mly_softc *sc); static void mly_sg_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error); static int mly_mmbox_map(struct mly_softc *sc); static void mly_mmbox_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error); -static void mly_free_command_cluster(struct mly_command_cluster *mcc); static device_method_t mly_methods[] = { /* Device interface */ @@ -236,7 +236,7 @@ mly_pci_attach(device_t dev) BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ - sizeof(union mly_command_packet) * MLY_CMD_CLUSTERCOUNT, 1, /* maxsize, nsegments */ + sizeof(union mly_command_packet) * MLY_MAXCOMMANDS, 1, /* maxsize, nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ &sc->mly_packet_dmat)) { @@ -515,16 +515,22 @@ mly_mmbox_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error) void mly_free(struct mly_softc *sc) { - struct mly_command_cluster *mcc; - + struct mly_command *mc; + debug_called(1); /* detach from CAM */ mly_cam_detach(sc); - /* throw away any command buffers */ - while ((mcc = mly_dequeue_cluster(sc)) != NULL) - mly_free_command_cluster(mcc); + /* throw away command buffer DMA maps */ + while (mly_alloc_command(sc, &mc) == 0) + bus_dmamap_destroy(sc->mly_buffer_dmat, mc->mc_datamap); + + /* release the packet storage */ + if (sc->mly_packet != NULL) { + bus_dmamap_unload(sc->mly_packet_dmat, sc->mly_packetmap); + bus_dmamem_free(sc->mly_packet_dmat, sc->mly_packet, sc->mly_packetmap); + } /* throw away the controllerinfo structure */ if (sc->mly_controllerinfo != NULL) @@ -569,22 +575,3 @@ mly_free(struct mly_softc *sc) bus_release_resource(sc->mly_dev, SYS_RES_MEMORY, sc->mly_regs_rid, sc->mly_regs_resource); } -/******************************************************************************** - * Free a command cluster. - */ -static void -mly_free_command_cluster(struct mly_command_cluster *mcc) -{ - struct mly_softc *sc = mcc->mcc_command[0].mc_sc; - int i; - - debug_called(1); - - for (i = 0; i < MLY_CMD_CLUSTERCOUNT; i++) - bus_dmamap_destroy(sc->mly_buffer_dmat, mcc->mcc_command[i].mc_datamap); - - bus_dmamap_unload(sc->mly_packet_dmat, mcc->mcc_packetmap); - bus_dmamem_free(sc->mly_packet_dmat, mcc->mcc_packet, mcc->mcc_packetmap); - free(mcc, M_DEVBUF); -} - diff --git a/sys/dev/mly/mlyio.h b/sys/dev/mly/mlyio.h new file mode 100644 index 0000000..949b071 --- /dev/null +++ b/sys/dev/mly/mlyio.h @@ -0,0 +1,72 @@ +/*- + * Copyright (c) 2001 Michael Smith + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +/******************************************************************************** + * Control structures exchanged through the GAM interface with userland + * management tools. + * + * The member naming here is consistent with the Linux driver, with which this + * interface is basically compatible. + */ +struct mly_user_command +{ + unsigned char ControllerNumber; + union mly_command_packet CommandMailbox; + int DataTransferLength; + int RequestSenseLength; + void *DataTransferBuffer; + void *RequestSenseBuffer; + int CommandStatus; /* not in the Linux structure */ +}; + +#define MLYIO_COMMAND _IOWR('M', 200, struct mly_user_command) + +struct mly_user_health +{ + unsigned char ControllerNumber; + void *HealthStatusBuffer; +}; + +#define MLYIO_HEALTH _IOW('M', 201, struct mly_user_health) + +/* + * Command queue statistics + */ + +#define MLYQ_FREE 0 +#define MLYQ_CCB 1 +#define MLYQ_READY 2 +#define MLYQ_BUSY 3 +#define MLYQ_COMPLETE 4 +#define MLYQ_COUNT 5 + +struct mly_qstat +{ + u_int32_t q_length; + u_int32_t q_max; +}; diff --git a/sys/dev/mly/mlyvar.h b/sys/dev/mly/mlyvar.h index 00a9c9b..565e5e5 100644 --- a/sys/dev/mly/mlyvar.h +++ b/sys/dev/mly/mlyvar.h @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2000 Michael Smith + * Copyright (c) 2000, 2001 Michael Smith * Copyright (c) 2000 BSDi * All rights reserved. * @@ -38,7 +38,7 @@ * table this size (256k) would be too expensive, so we cap ourselves at a * reasonable limit. */ -#define MLY_MAXCOMMANDS 256 /* max outstanding commands per controller, limit 65535 */ +#define MLY_MAXCOMMANDS 256 /* max commands per controller */ /* * The firmware interface allows for a 16-bit s/g list length. We limit @@ -48,7 +48,7 @@ /******************************************************************************** ******************************************************************************** - Driver Variable Definitions + Cross-version Compatibility ******************************************************************************** ********************************************************************************/ @@ -56,6 +56,18 @@ # include <sys/taskqueue.h> #endif +#if __FreeBSD_version <= 500014 +# include <machine/clock.h> +# undef offsetof +# define offsetof(type, field) ((size_t)(&((type *)0)->field)) +#endif + +/******************************************************************************** + ******************************************************************************** + Driver Variable Definitions + ******************************************************************************** + ********************************************************************************/ + /* * Debugging levels: * 0 - quiet, only emit warnings @@ -93,6 +105,10 @@ struct mly_btl { char mb_name[16]; /* peripheral attached to this device */ int mb_state; /* see 8.1 */ int mb_type; /* see 8.2 */ + + /* physical devices only */ + int mb_speed; /* interface transfer rate */ + int mb_width; /* interface width */ }; /* @@ -104,18 +120,11 @@ struct mly_command { struct mly_softc *mc_sc; /* controller that owns us */ u_int16_t mc_slot; /* command slot we occupy */ int mc_flags; -#define MLY_CMD_STATEMASK ((1<<8)-1) -#define MLY_CMD_STATE(mc) ((mc)->mc_flags & MLY_CMD_STATEMASK) -#define MLY_CMD_SETSTATE(mc, s) ((mc)->mc_flags = ((mc)->mc_flags &= ~MLY_CMD_STATEMASK) | (s)) -#define MLY_CMD_FREE 0 /* command is on the free list */ -#define MLY_CMD_SETUP 1 /* command is being built */ -#define MLY_CMD_BUSY 2 /* command is being run, or ready to run, or not completed */ -#define MLY_CMD_COMPLETE 3 /* command has been completed */ -#define MLY_CMD_SLOTTED (1<<8) /* command has a slot number */ -#define MLY_CMD_MAPPED (1<<9) /* command has had its data mapped */ -#define MLY_CMD_PRIORITY (1<<10) /* allow use of "priority" slots */ -#define MLY_CMD_DATAIN (1<<11) /* data moves controller->system */ -#define MLY_CMD_DATAOUT (1<<12) /* data moves system->controller */ +#define MLY_CMD_BUSY (1<<0) /* command is being run, or ready to run, or not completed */ +#define MLY_CMD_COMPLETE (1<<1) /* command has been completed */ +#define MLY_CMD_MAPPED (1<<3) /* command has had its data mapped */ +#define MLY_CMD_DATAIN (1<<4) /* data moves controller->system */ +#define MLY_CMD_DATAOUT (1<<5) /* data moves system->controller */ u_int16_t mc_status; /* command completion status */ u_int8_t mc_sense; /* sense data length */ int32_t mc_resid; /* I/O residual count */ @@ -141,36 +150,12 @@ struct mly_command { #define MLY_SLOT_MAX (MLY_SLOT_START + MLY_MAXCOMMANDS) /* - * Command/command packet cluster. - * - * Due to the difficulty of using the zone allocator to create a new - * zone from within a module, we use our own clustering to reduce - * memory wastage caused by allocating lots of these small structures. - * - * Note that it is possible to require more than MLY_MAXCOMMANDS - * command structures. - * - * Since we may need to allocate extra clusters at any time, and since this - * process needs to allocate a physically contiguous slab of controller - * addressible memory in which to place the command packets, do not allow more - * command packets in a cluster than will fit in a page. - */ -#define MLY_CMD_CLUSTERCOUNT (PAGE_SIZE / sizeof(union mly_command_packet)) - -struct mly_command_cluster { - TAILQ_ENTRY(mly_command_cluster) mcc_link; - union mly_command_packet *mcc_packet; - bus_dmamap_t mcc_packetmap; - u_int64_t mcc_packetphys; - struct mly_command mcc_command[MLY_CMD_CLUSTERCOUNT]; -}; - -/* * Per-controller structure. */ struct mly_softc { /* bus connections */ device_t mly_dev; + dev_t mly_dev_t; struct resource *mly_regs_resource; /* register interface window */ int mly_regs_rid; /* resource ID */ bus_space_handle_t mly_bhandle; /* bus space handle */ @@ -203,8 +188,8 @@ struct mly_softc { u_int64_t mly_mmbox_busaddr; /* bus-space address of memory mailbox */ bus_dma_tag_t mly_mmbox_dmat; /* memory mailbox DMA tag */ bus_dmamap_t mly_mmbox_dmamap; /* memory mailbox DMA map */ - u_int32_t mly_mmbox_command_index; /* next slot to use */ - u_int32_t mly_mmbox_status_index; /* slot we next expect status in */ + u_int32_t mly_mmbox_command_index; /* next index to use */ + u_int32_t mly_mmbox_status_index; /* index we next expect status at */ /* controller features, limits and status */ int mly_state; @@ -212,20 +197,21 @@ struct mly_softc { #define MLY_STATE_OPEN (1<<1) #define MLY_STATE_INTERRUPTS_ON (1<<2) #define MLY_STATE_MMBOX_ACTIVE (1<<3) - int mly_max_commands; /* max parallel commands we allow */ struct mly_ioctl_getcontrollerinfo *mly_controllerinfo; struct mly_param_controller *mly_controllerparam; struct mly_btl mly_btl[MLY_MAX_CHANNELS][MLY_MAX_TARGETS]; /* command management */ - struct mly_command *mly_busycmds[MLY_SLOT_MAX]; /* busy commands */ - int mly_busy_count; - int mly_last_slot; - TAILQ_HEAD(,mly_command) mly_freecmds; /* commands available for reuse */ + struct mly_command mly_command[MLY_MAXCOMMANDS]; /* commands */ + union mly_command_packet *mly_packet; /* command packets */ + bus_dma_tag_t mly_packet_dmat; /* packet DMA tag */ + bus_dmamap_t mly_packetmap; /* packet DMA map */ + u_int64_t mly_packetphys; /* packet array base address */ + TAILQ_HEAD(,mly_command) mly_free; /* commands available for reuse */ TAILQ_HEAD(,mly_command) mly_ready; /* commands ready to be submitted */ - TAILQ_HEAD(,mly_command) mly_completed; /* commands which have been returned by the controller */ - TAILQ_HEAD(,mly_command_cluster) mly_clusters; /* command memory blocks */ - bus_dma_tag_t mly_packet_dmat; /* command packet DMA tag */ + TAILQ_HEAD(,mly_command) mly_busy; + TAILQ_HEAD(,mly_command) mly_complete; /* commands which have been returned by the controller */ + struct mly_qstat mly_qstat[MLYQ_COUNT]; /* queue statistics */ /* health monitoring */ u_int32_t mly_event_change; /* event status change indicator */ @@ -311,113 +297,80 @@ extern int mly_name_device(struct mly_softc *sc, int bus, int target); /******************************************************************************** * Queue primitives - * - * These are broken out individually to make statistics gathering easier. */ -static __inline void -mly_enqueue_ready(struct mly_command *mc) -{ - int s; - - s = splcam(); - TAILQ_INSERT_TAIL(&mc->mc_sc->mly_ready, mc, mc_link); - MLY_CMD_SETSTATE(mc, MLY_CMD_BUSY); - splx(s); -} - -static __inline void -mly_requeue_ready(struct mly_command *mc) -{ - int s; - - s = splcam(); - TAILQ_INSERT_HEAD(&mc->mc_sc->mly_ready, mc, mc_link); - splx(s); -} - -static __inline struct mly_command * -mly_dequeue_ready(struct mly_softc *sc) -{ - struct mly_command *mc; - int s; - - s = splcam(); - if ((mc = TAILQ_FIRST(&sc->mly_ready)) != NULL) - TAILQ_REMOVE(&sc->mly_ready, mc, mc_link); - splx(s); - return(mc); -} - -static __inline void -mly_enqueue_completed(struct mly_command *mc) -{ - int s; - - s = splcam(); - TAILQ_INSERT_TAIL(&mc->mc_sc->mly_completed, mc, mc_link); - /* don't set MLY_CMD_COMPLETE here to avoid wakeup race */ - splx(s); -} - -static __inline struct mly_command * -mly_dequeue_completed(struct mly_softc *sc) -{ - struct mly_command *mc; - int s; - - s = splcam(); - if ((mc = TAILQ_FIRST(&sc->mly_completed)) != NULL) - TAILQ_REMOVE(&sc->mly_completed, mc, mc_link); - splx(s); - return(mc); -} - -static __inline void -mly_enqueue_free(struct mly_command *mc) -{ - int s; - - s = splcam(); - TAILQ_INSERT_HEAD(&mc->mc_sc->mly_freecmds, mc, mc_link); - MLY_CMD_SETSTATE(mc, MLY_CMD_FREE); - splx(s); -} - -static __inline struct mly_command * -mly_dequeue_free(struct mly_softc *sc) -{ - struct mly_command *mc; - int s; - - s = splcam(); - if ((mc = TAILQ_FIRST(&sc->mly_freecmds)) != NULL) - TAILQ_REMOVE(&sc->mly_freecmds, mc, mc_link); - splx(s); - return(mc); -} - -static __inline void -mly_enqueue_cluster(struct mly_softc *sc, struct mly_command_cluster *mcc) -{ - int s; - - s = splcam(); - TAILQ_INSERT_HEAD(&sc->mly_clusters, mcc, mcc_link); - splx(s); -} - -static __inline struct mly_command_cluster * -mly_dequeue_cluster(struct mly_softc *sc) -{ - struct mly_command_cluster *mcc; - int s; - - s = splcam(); - if ((mcc = TAILQ_FIRST(&sc->mly_clusters)) != NULL) - TAILQ_REMOVE(&sc->mly_clusters, mcc, mcc_link); - splx(s); - return(mcc); -} - +#define MLYQ_ADD(sc, qname) \ + do { \ + struct mly_qstat *qs = &(sc)->mly_qstat[qname]; \ + \ + qs->q_length++; \ + if (qs->q_length > qs->q_max) \ + qs->q_max = qs->q_length; \ + } while(0) + +#define MLYQ_REMOVE(sc, qname) (sc)->mly_qstat[qname].q_length-- +#define MLYQ_INIT(sc, qname) \ + do { \ + sc->mly_qstat[qname].q_length = 0; \ + sc->mly_qstat[qname].q_max = 0; \ + } while(0) + + +#define MLYQ_COMMAND_QUEUE(name, index) \ +static __inline void \ +mly_initq_ ## name (struct mly_softc *sc) \ +{ \ + TAILQ_INIT(&sc->mly_ ## name); \ + MLYQ_INIT(sc, index); \ +} \ +static __inline void \ +mly_enqueue_ ## name (struct mly_command *mc) \ +{ \ + int s; \ + \ + s = splcam(); \ + TAILQ_INSERT_TAIL(&mc->mc_sc->mly_ ## name, mc, mc_link); \ + MLYQ_ADD(mc->mc_sc, index); \ + splx(s); \ +} \ +static __inline void \ +mly_requeue_ ## name (struct mly_command *mc) \ +{ \ + int s; \ + \ + s = splcam(); \ + TAILQ_INSERT_HEAD(&mc->mc_sc->mly_ ## name, mc, mc_link); \ + MLYQ_ADD(mc->mc_sc, index); \ + splx(s); \ +} \ +static __inline struct mly_command * \ +mly_dequeue_ ## name (struct mly_softc *sc) \ +{ \ + struct mly_command *mc; \ + int s; \ + \ + s = splcam(); \ + if ((mc = TAILQ_FIRST(&sc->mly_ ## name)) != NULL) { \ + TAILQ_REMOVE(&sc->mly_ ## name, mc, mc_link); \ + MLYQ_REMOVE(sc, index); \ + } \ + splx(s); \ + return(mc); \ +} \ +static __inline void \ +mly_remove_ ## name (struct mly_command *mc) \ +{ \ + int s; \ + \ + s = splcam(); \ + TAILQ_REMOVE(&mc->mc_sc->mly_ ## name, mc, mc_link); \ + MLYQ_REMOVE(mc->mc_sc, index); \ + splx(s); \ +} \ +struct hack + +MLYQ_COMMAND_QUEUE(free, MLYQ_FREE); +MLYQ_COMMAND_QUEUE(ready, MLYQ_READY); +MLYQ_COMMAND_QUEUE(busy, MLYQ_BUSY); +MLYQ_COMMAND_QUEUE(complete, MLYQ_COMPLETE); |