summaryrefslogtreecommitdiffstats
path: root/sys/dev/stge
diff options
context:
space:
mode:
authoryongari <yongari@FreeBSD.org>2006-07-25 00:37:09 +0000
committeryongari <yongari@FreeBSD.org>2006-07-25 00:37:09 +0000
commite9264082c6e3b21299c17dcc439eed58688d0485 (patch)
treead9ad656aeef597f3f32bdad04421228603029fe /sys/dev/stge
parent5312caa99a0235b6751a7ee6b3ce2f02390a02b1 (diff)
downloadFreeBSD-src-e9264082c6e3b21299c17dcc439eed58688d0485.zip
FreeBSD-src-e9264082c6e3b21299c17dcc439eed58688d0485.tar.gz
Add stge(4), a driver for Sundance/Tamarack TC9021 Gigabit Ethernet
controller ported from NetBSD. It supports the following Gigabit Ethernet adapters. o Antares Microsystems Gigabit Ethernet o ASUS NX1101 Gigabit Ethernet o D-Link DL-4000 Gigabit Ethernet o IC Plus IP1000A Gigabit Ethernet o Sundance ST-2021 Gigabit Ethernet o Sundance ST-2023 Gigabit Ethernet o Sundance TC9021 Gigabit Ethernet o Tamarack TC9021 Gigabit Ethernet The IP1000A Gigabit Ethernet is also found on some motherboards (LOM) from ABIT. Unlike NetBSD stge(4) it does not require promiscuous mode operation to revice packet and it supports all hardware features(TCP/UDP/IP checksum offload, VLAN tag stripping/insertion features and JUMBO frame) and polling(4). Due to lack of hardware, hardwares that have TBI trantransceivers were not tested at all. Special thanks to wpaul who provided valauble datasheet for the controller and helped to debug jumbo frame related issues. Whitout his datasheet I would have spent many hours to debug this chip. Tested on: i386, sparc64
Diffstat (limited to 'sys/dev/stge')
-rw-r--r--sys/dev/stge/if_stge.c2698
-rw-r--r--sys/dev/stge/if_stgereg.h697
2 files changed, 3395 insertions, 0 deletions
diff --git a/sys/dev/stge/if_stge.c b/sys/dev/stge/if_stge.c
new file mode 100644
index 0000000..f346ca3
--- /dev/null
+++ b/sys/dev/stge/if_stge.c
@@ -0,0 +1,2698 @@
+/* $NetBSD: if_stge.c,v 1.32 2005/12/11 12:22:49 christos Exp $ */
+
+/*-
+ * Copyright (c) 2001 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Device driver for the Sundance Tech. TC9021 10/100/1000
+ * Ethernet controller.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#ifdef HAVE_KERNEL_OPTION_HEADERS
+#include "opt_device_polling.h"
+#endif
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/endian.h>
+#include <sys/mbuf.h>
+#include <sys/malloc.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <sys/sysctl.h>
+#include <sys/taskqueue.h>
+
+#include <net/bpf.h>
+#include <net/ethernet.h>
+#include <net/if.h>
+#include <net/if_dl.h>
+#include <net/if_media.h>
+#include <net/if_types.h>
+#include <net/if_vlan_var.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+
+#include <dev/mii/mii.h>
+#include <dev/mii/miivar.h>
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+
+#include <dev/stge/if_stgereg.h>
+
+#define STGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
+
+MODULE_DEPEND(stge, pci, 1, 1, 1);
+MODULE_DEPEND(stge, ether, 1, 1, 1);
+MODULE_DEPEND(stge, miibus, 1, 1, 1);
+
+/* "device miibus" required. See GENERIC if you get errors here. */
+#include "miibus_if.h"
+
+/*
+ * Devices supported by this driver.
+ */
+static struct stge_product {
+ uint16_t stge_vendorid;
+ uint16_t stge_deviceid;
+ const char *stge_name;
+} stge_products[] = {
+ { VENDOR_SUNDANCETI, DEVICEID_SUNDANCETI_ST1023,
+ "Sundance ST-1023 Gigabit Ethernet" },
+
+ { VENDOR_SUNDANCETI, DEVICEID_SUNDANCETI_ST2021,
+ "Sundance ST-2021 Gigabit Ethernet" },
+
+ { VENDOR_TAMARACK, DEVICEID_TAMARACK_TC9021,
+ "Tamarack TC9021 Gigabit Ethernet" },
+
+ { VENDOR_TAMARACK, DEVICEID_TAMARACK_TC9021_ALT,
+ "Tamarack TC9021 Gigabit Ethernet" },
+
+ /*
+ * The Sundance sample boards use the Sundance vendor ID,
+ * but the Tamarack product ID.
+ */
+ { VENDOR_SUNDANCETI, DEVICEID_TAMARACK_TC9021,
+ "Sundance TC9021 Gigabit Ethernet" },
+
+ { VENDOR_SUNDANCETI, DEVICEID_TAMARACK_TC9021_ALT,
+ "Sundance TC9021 Gigabit Ethernet" },
+
+ { VENDOR_DLINK, DEVICEID_DLINK_DL4000,
+ "D-Link DL-4000 Gigabit Ethernet" },
+
+ { VENDOR_ANTARES, DEVICEID_ANTARES_TC9021,
+ "Antares Gigabit Ethernet" }
+};
+
+static int stge_probe(device_t);
+static int stge_attach(device_t);
+static int stge_detach(device_t);
+static void stge_shutdown(device_t);
+static int stge_suspend(device_t);
+static int stge_resume(device_t);
+
+static int stge_encap(struct stge_softc *, struct mbuf **);
+static void stge_start(struct ifnet *);
+static void stge_start_locked(struct ifnet *);
+static void stge_watchdog(struct ifnet *);
+static int stge_ioctl(struct ifnet *, u_long, caddr_t);
+static void stge_init(void *);
+static void stge_init_locked(struct stge_softc *);
+static void stge_vlan_setup(struct stge_softc *);
+static void stge_stop(struct stge_softc *);
+static void stge_start_tx(struct stge_softc *);
+static void stge_start_rx(struct stge_softc *);
+static void stge_stop_tx(struct stge_softc *);
+static void stge_stop_rx(struct stge_softc *);
+
+static void stge_reset(struct stge_softc *, uint32_t);
+static int stge_eeprom_wait(struct stge_softc *);
+static void stge_read_eeprom(struct stge_softc *, int, uint16_t *);
+static void stge_tick(void *);
+static void stge_stats_update(struct stge_softc *);
+static void stge_set_filter(struct stge_softc *);
+static void stge_set_multi(struct stge_softc *);
+
+static void stge_link_task(void *, int);
+static void stge_intr(void *);
+static __inline int stge_tx_error(struct stge_softc *);
+static void stge_txeof(struct stge_softc *);
+static void stge_rxeof(struct stge_softc *);
+static __inline void stge_discard_rxbuf(struct stge_softc *, int);
+static int stge_newbuf(struct stge_softc *, int);
+#ifndef __NO_STRICT_ALIGNMENT
+static __inline struct mbuf *stge_fixup_rx(struct stge_softc *, struct mbuf *);
+#endif
+
+static void stge_mii_sync(struct stge_softc *);
+static void stge_mii_send(struct stge_softc *, uint32_t, int);
+static int stge_mii_readreg(struct stge_softc *, struct stge_mii_frame *);
+static int stge_mii_writereg(struct stge_softc *, struct stge_mii_frame *);
+static int stge_miibus_readreg(device_t, int, int);
+static int stge_miibus_writereg(device_t, int, int, int);
+static void stge_miibus_statchg(device_t);
+static int stge_mediachange(struct ifnet *);
+static void stge_mediastatus(struct ifnet *, struct ifmediareq *);
+
+static void stge_dmamap_cb(void *, bus_dma_segment_t *, int, int);
+static int stge_dma_alloc(struct stge_softc *);
+static void stge_dma_free(struct stge_softc *);
+static void stge_dma_wait(struct stge_softc *);
+static void stge_init_tx_ring(struct stge_softc *);
+static int stge_init_rx_ring(struct stge_softc *);
+#ifdef DEVICE_POLLING
+static void stge_poll(struct ifnet *, enum poll_cmd, int);
+#endif
+
+static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
+static int sysctl_hw_stge_rxint_nframe(SYSCTL_HANDLER_ARGS);
+static int sysctl_hw_stge_rxint_dmawait(SYSCTL_HANDLER_ARGS);
+
+static device_method_t stge_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, stge_probe),
+ DEVMETHOD(device_attach, stge_attach),
+ DEVMETHOD(device_detach, stge_detach),
+ DEVMETHOD(device_shutdown, stge_shutdown),
+ DEVMETHOD(device_suspend, stge_suspend),
+ DEVMETHOD(device_resume, stge_resume),
+
+ /* MII interface */
+ DEVMETHOD(miibus_readreg, stge_miibus_readreg),
+ DEVMETHOD(miibus_writereg, stge_miibus_writereg),
+ DEVMETHOD(miibus_statchg, stge_miibus_statchg),
+
+ { 0, 0 }
+
+};
+
+static driver_t stge_driver = {
+ "stge",
+ stge_methods,
+ sizeof(struct stge_softc)
+};
+
+static devclass_t stge_devclass;
+
+DRIVER_MODULE(stge, pci, stge_driver, stge_devclass, 0, 0);
+DRIVER_MODULE(miibus, stge, miibus_driver, miibus_devclass, 0, 0);
+
+static struct resource_spec stge_res_spec_io[] = {
+ { SYS_RES_IOPORT, PCIR_BAR(0), RF_ACTIVE },
+ { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
+ { -1, 0, 0 }
+};
+
+static struct resource_spec stge_res_spec_mem[] = {
+ { SYS_RES_MEMORY, PCIR_BAR(1), RF_ACTIVE },
+ { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
+ { -1, 0, 0 }
+};
+
+#define MII_SET(x) \
+ CSR_WRITE_1(sc, STGE_PhyCtrl, CSR_READ_1(sc, STGE_PhyCtrl) | (x))
+#define MII_CLR(x) \
+ CSR_WRITE_1(sc, STGE_PhyCtrl, CSR_READ_1(sc, STGE_PhyCtrl) & ~(x))
+
+/*
+ * Sync the PHYs by setting data bit and strobing the clock 32 times.
+ */
+static void
+stge_mii_sync(struct stge_softc *sc)
+{
+ int i;
+
+ MII_SET(PC_MgmtDir | PC_MgmtData);
+
+ for (i = 0; i < 32; i++) {
+ MII_SET(PC_MgmtClk);
+ DELAY(1);
+ MII_CLR(PC_MgmtClk);
+ DELAY(1);
+ }
+}
+
+/*
+ * Clock a series of bits through the MII.
+ */
+static void
+stge_mii_send(struct stge_softc *sc, uint32_t bits, int cnt)
+{
+ int i;
+
+ MII_CLR(PC_MgmtClk);
+
+ for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
+ if (bits & i)
+ MII_SET(PC_MgmtData);
+ else
+ MII_CLR(PC_MgmtData);
+ DELAY(1);
+ MII_CLR(PC_MgmtClk);
+ DELAY(1);
+ MII_SET(PC_MgmtClk);
+ }
+}
+
+/*
+ * Read an PHY register through the MII.
+ */
+static int
+stge_mii_readreg(struct stge_softc *sc, struct stge_mii_frame *frame)
+{
+ int i, ack;
+
+ /*
+ * Set up frame for RX.
+ */
+ frame->mii_stdelim = STGE_MII_STARTDELIM;
+ frame->mii_opcode = STGE_MII_READOP;
+ frame->mii_turnaround = 0;
+ frame->mii_data = 0;
+
+ CSR_WRITE_1(sc, STGE_PhyCtrl, 0 | sc->sc_PhyCtrl);
+ /*
+ * Turn on data xmit.
+ */
+ MII_SET(PC_MgmtDir);
+
+ stge_mii_sync(sc);
+
+ /*
+ * Send command/address info.
+ */
+ stge_mii_send(sc, frame->mii_stdelim, 2);
+ stge_mii_send(sc, frame->mii_opcode, 2);
+ stge_mii_send(sc, frame->mii_phyaddr, 5);
+ stge_mii_send(sc, frame->mii_regaddr, 5);
+
+ /* Turn off xmit. */
+ MII_CLR(PC_MgmtDir);
+
+ /* Idle bit */
+ MII_CLR((PC_MgmtClk | PC_MgmtData));
+ DELAY(1);
+ MII_SET(PC_MgmtClk);
+ DELAY(1);
+
+ /* Check for ack */
+ MII_CLR(PC_MgmtClk);
+ DELAY(1);
+ ack = CSR_READ_1(sc, STGE_PhyCtrl) & PC_MgmtData;
+ MII_SET(PC_MgmtClk);
+ DELAY(1);
+
+ /*
+ * Now try reading data bits. If the ack failed, we still
+ * need to clock through 16 cycles to keep the PHY(s) in sync.
+ */
+ if (ack) {
+ for(i = 0; i < 16; i++) {
+ MII_CLR(PC_MgmtClk);
+ DELAY(1);
+ MII_SET(PC_MgmtClk);
+ DELAY(1);
+ }
+ goto fail;
+ }
+
+ for (i = 0x8000; i; i >>= 1) {
+ MII_CLR(PC_MgmtClk);
+ DELAY(1);
+ if (!ack) {
+ if (CSR_READ_1(sc, STGE_PhyCtrl) & PC_MgmtData)
+ frame->mii_data |= i;
+ DELAY(1);
+ }
+ MII_SET(PC_MgmtClk);
+ DELAY(1);
+ }
+
+fail:
+ MII_CLR(PC_MgmtClk);
+ DELAY(1);
+ MII_SET(PC_MgmtClk);
+ DELAY(1);
+
+ if (ack)
+ return(1);
+ return(0);
+}
+
+/*
+ * Write to a PHY register through the MII.
+ */
+static int
+stge_mii_writereg(struct stge_softc *sc, struct stge_mii_frame *frame)
+{
+
+ /*
+ * Set up frame for TX.
+ */
+ frame->mii_stdelim = STGE_MII_STARTDELIM;
+ frame->mii_opcode = STGE_MII_WRITEOP;
+ frame->mii_turnaround = STGE_MII_TURNAROUND;
+
+ /*
+ * Turn on data output.
+ */
+ MII_SET(PC_MgmtDir);
+
+ stge_mii_sync(sc);
+
+ stge_mii_send(sc, frame->mii_stdelim, 2);
+ stge_mii_send(sc, frame->mii_opcode, 2);
+ stge_mii_send(sc, frame->mii_phyaddr, 5);
+ stge_mii_send(sc, frame->mii_regaddr, 5);
+ stge_mii_send(sc, frame->mii_turnaround, 2);
+ stge_mii_send(sc, frame->mii_data, 16);
+
+ /* Idle bit. */
+ MII_SET(PC_MgmtClk);
+ DELAY(1);
+ MII_CLR(PC_MgmtClk);
+ DELAY(1);
+
+ /*
+ * Turn off xmit.
+ */
+ MII_CLR(PC_MgmtDir);
+
+ return(0);
+}
+
+/*
+ * sc_miibus_readreg: [mii interface function]
+ *
+ * Read a PHY register on the MII of the TC9021.
+ */
+static int
+stge_miibus_readreg(device_t dev, int phy, int reg)
+{
+ struct stge_softc *sc;
+ struct stge_mii_frame frame;
+ int error;
+
+ sc = device_get_softc(dev);
+
+ if (reg == STGE_PhyCtrl) {
+ /* XXX allow ip1000phy read STGE_PhyCtrl register. */
+ STGE_MII_LOCK(sc);
+ error = CSR_READ_1(sc, STGE_PhyCtrl);
+ STGE_MII_UNLOCK(sc);
+ return (error);
+ }
+ bzero(&frame, sizeof(frame));
+ frame.mii_phyaddr = phy;
+ frame.mii_regaddr = reg;
+
+ STGE_MII_LOCK(sc);
+ error = stge_mii_readreg(sc, &frame);
+ STGE_MII_UNLOCK(sc);
+
+ if (error != 0) {
+ /* Don't show errors for PHY probe request */
+ if (reg != 1)
+ device_printf(sc->sc_dev, "phy read fail\n");
+ return (0);
+ }
+ return (frame.mii_data);
+}
+
+/*
+ * stge_miibus_writereg: [mii interface function]
+ *
+ * Write a PHY register on the MII of the TC9021.
+ */
+static int
+stge_miibus_writereg(device_t dev, int phy, int reg, int val)
+{
+ struct stge_softc *sc;
+ struct stge_mii_frame frame;
+ int error;
+
+ sc = device_get_softc(dev);
+
+ bzero(&frame, sizeof(frame));
+ frame.mii_phyaddr = phy;
+ frame.mii_regaddr = reg;
+ frame.mii_data = val;
+
+ STGE_MII_LOCK(sc);
+ error = stge_mii_writereg(sc, &frame);
+ STGE_MII_UNLOCK(sc);
+
+ if (error != 0)
+ device_printf(sc->sc_dev, "phy write fail\n");
+ return (0);
+}
+
+/*
+ * stge_miibus_statchg: [mii interface function]
+ *
+ * Callback from MII layer when media changes.
+ */
+static void
+stge_miibus_statchg(device_t dev)
+{
+ struct stge_softc *sc;
+ struct mii_data *mii;
+
+ sc = device_get_softc(dev);
+ mii = device_get_softc(sc->sc_miibus);
+
+ STGE_MII_LOCK(sc);
+ if (IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE) {
+ STGE_MII_UNLOCK(sc);
+ return;
+ }
+
+ sc->sc_MACCtrl = 0;
+ if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) != 0)
+ sc->sc_MACCtrl |= MC_DuplexSelect;
+ if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG0) != 0)
+ sc->sc_MACCtrl |= MC_RxFlowControlEnable;
+ if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG1) != 0)
+ sc->sc_MACCtrl |= MC_TxFlowControlEnable;
+ /*
+ * We can't access STGE_MACCtrl register in this context due to
+ * the races between MII layer and driver which accesses this
+ * register to program MAC. In order to solve the race, we defer
+ * STGE_MACCtrl programming until we know we are out of MII.
+ */
+ taskqueue_enqueue(taskqueue_swi, &sc->sc_link_task);
+ STGE_MII_UNLOCK(sc);
+}
+
+/*
+ * stge_mediastatus: [ifmedia interface function]
+ *
+ * Get the current interface media status.
+ */
+static void
+stge_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
+{
+ struct stge_softc *sc;
+ struct mii_data *mii;
+
+ sc = ifp->if_softc;
+ mii = device_get_softc(sc->sc_miibus);
+
+ mii_pollstat(mii);
+ ifmr->ifm_status = mii->mii_media_status;
+ ifmr->ifm_active = mii->mii_media_active;
+}
+
+/*
+ * stge_mediachange: [ifmedia interface function]
+ *
+ * Set hardware to newly-selected media.
+ */
+static int
+stge_mediachange(struct ifnet *ifp)
+{
+ struct stge_softc *sc;
+ struct mii_data *mii;
+
+ sc = ifp->if_softc;
+ mii = device_get_softc(sc->sc_miibus);
+ mii_mediachg(mii);
+
+ return (0);
+}
+
+static int
+stge_eeprom_wait(struct stge_softc *sc)
+{
+ int i;
+
+ for (i = 0; i < STGE_TIMEOUT; i++) {
+ DELAY(1000);
+ if ((CSR_READ_2(sc, STGE_EepromCtrl) & EC_EepromBusy) == 0)
+ return (0);
+ }
+ return (1);
+}
+
+/*
+ * stge_read_eeprom:
+ *
+ * Read data from the serial EEPROM.
+ */
+static void
+stge_read_eeprom(struct stge_softc *sc, int offset, uint16_t *data)
+{
+
+ if (stge_eeprom_wait(sc))
+ device_printf(sc->sc_dev, "EEPROM failed to come ready\n");
+
+ CSR_WRITE_2(sc, STGE_EepromCtrl,
+ EC_EepromAddress(offset) | EC_EepromOpcode(EC_OP_RR));
+ if (stge_eeprom_wait(sc))
+ device_printf(sc->sc_dev, "EEPROM read timed out\n");
+ *data = CSR_READ_2(sc, STGE_EepromData);
+}
+
+
+static int
+stge_probe(device_t dev)
+{
+ struct stge_product *sp;
+ int i;
+ uint16_t vendor, devid;
+
+ vendor = pci_get_vendor(dev);
+ devid = pci_get_device(dev);
+ sp = stge_products;
+ for (i = 0; i < sizeof(stge_products)/sizeof(stge_products[0]);
+ i++, sp++) {
+ if (vendor == sp->stge_vendorid &&
+ devid == sp->stge_deviceid) {
+ device_set_desc(dev, sp->stge_name);
+ return (BUS_PROBE_DEFAULT);
+ }
+ }
+
+ return (ENXIO);
+}
+
+static int
+stge_attach(device_t dev)
+{
+ struct stge_softc *sc;
+ struct ifnet *ifp;
+ uint8_t enaddr[ETHER_ADDR_LEN];
+ int error, i;
+ uint16_t cmd;
+ uint32_t val;
+
+ error = 0;
+ sc = device_get_softc(dev);
+ sc->sc_dev = dev;
+
+ mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
+ MTX_DEF);
+ mtx_init(&sc->sc_mii_mtx, "stge_mii_mutex", NULL, MTX_DEF);
+ callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0);
+ TASK_INIT(&sc->sc_link_task, 0, stge_link_task, sc);
+
+ /*
+ * Map the device.
+ */
+ pci_enable_busmaster(dev);
+ cmd = pci_read_config(dev, PCIR_COMMAND, 2);
+ val = pci_read_config(dev, PCIR_BAR(1), 4);
+ if ((val & 0x01) != 0)
+ sc->sc_spec = stge_res_spec_mem;
+ else {
+ val = pci_read_config(dev, PCIR_BAR(0), 4);
+ if ((val & 0x01) == 0) {
+ device_printf(sc->sc_dev, "couldn't locate IO BAR\n");
+ error = ENXIO;
+ goto fail;
+ }
+ sc->sc_spec = stge_res_spec_io;
+ }
+ error = bus_alloc_resources(dev, sc->sc_spec, sc->sc_res);
+ if (error != 0) {
+ device_printf(dev, "couldn't allocate %s resources\n",
+ sc->sc_spec == stge_res_spec_mem ? "memory" : "I/O");
+ goto fail;
+ }
+ sc->sc_rev = pci_get_revid(dev);
+
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
+ "rxint_nframe", CTLTYPE_INT|CTLFLAG_RW, &sc->sc_rxint_nframe, 0,
+ sysctl_hw_stge_rxint_nframe, "I", "stge rx interrupt nframe");
+
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
+ "rxint_dmawait", CTLTYPE_INT|CTLFLAG_RW, &sc->sc_rxint_dmawait, 0,
+ sysctl_hw_stge_rxint_dmawait, "I", "stge rx interrupt dmawait");
+
+ /* Pull in device tunables. */
+ sc->sc_rxint_nframe = STGE_RXINT_NFRAME_DEFAULT;
+ error = resource_int_value(device_get_name(dev), device_get_unit(dev),
+ "rxint_nframe", &sc->sc_rxint_nframe);
+ if (error == 0) {
+ if (sc->sc_rxint_nframe < STGE_RXINT_NFRAME_MIN ||
+ sc->sc_rxint_nframe > STGE_RXINT_NFRAME_MAX) {
+ device_printf(dev, "rxint_nframe value out of range; "
+ "using default: %d\n", STGE_RXINT_NFRAME_DEFAULT);
+ sc->sc_rxint_nframe = STGE_RXINT_NFRAME_DEFAULT;
+ }
+ }
+
+ sc->sc_rxint_dmawait = STGE_RXINT_DMAWAIT_DEFAULT;
+ error = resource_int_value(device_get_name(dev), device_get_unit(dev),
+ "rxint_dmawait", &sc->sc_rxint_dmawait);
+ if (error == 0) {
+ if (sc->sc_rxint_dmawait < STGE_RXINT_DMAWAIT_MIN ||
+ sc->sc_rxint_dmawait > STGE_RXINT_DMAWAIT_MAX) {
+ device_printf(dev, "rxint_dmawait value out of range; "
+ "using default: %d\n", STGE_RXINT_DMAWAIT_DEFAULT);
+ sc->sc_rxint_dmawait = STGE_RXINT_DMAWAIT_DEFAULT;
+ }
+ }
+
+ if ((error = stge_dma_alloc(sc) != 0))
+ goto fail;
+
+ /*
+ * Determine if we're copper or fiber. It affects how we
+ * reset the card.
+ */
+ if (CSR_READ_4(sc, STGE_AsicCtrl) & AC_PhyMedia)
+ sc->sc_usefiber = 1;
+ else
+ sc->sc_usefiber = 0;
+
+ /* Load LED configuration from EEPROM. */
+ stge_read_eeprom(sc, STGE_EEPROM_LEDMode, &sc->sc_led);
+
+ /*
+ * Reset the chip to a known state.
+ */
+ STGE_LOCK(sc);
+ stge_reset(sc, STGE_RESET_FULL);
+ STGE_UNLOCK(sc);
+
+ /*
+ * Reading the station address from the EEPROM doesn't seem
+ * to work, at least on my sample boards. Instead, since
+ * the reset sequence does AutoInit, read it from the station
+ * address registers. For Sundance 1023 you can only read it
+ * from EEPROM.
+ */
+ if (pci_get_device(dev) != DEVICEID_SUNDANCETI_ST1023) {
+ uint16_t v;
+
+ v = CSR_READ_2(sc, STGE_StationAddress0);
+ enaddr[0] = v & 0xff;
+ enaddr[1] = v >> 8;
+ v = CSR_READ_2(sc, STGE_StationAddress1);
+ enaddr[2] = v & 0xff;
+ enaddr[3] = v >> 8;
+ v = CSR_READ_2(sc, STGE_StationAddress2);
+ enaddr[4] = v & 0xff;
+ enaddr[5] = v >> 8;
+ sc->sc_stge1023 = 0;
+ } else {
+ uint16_t myaddr[ETHER_ADDR_LEN / 2];
+ for (i = 0; i <ETHER_ADDR_LEN / 2; i++) {
+ stge_read_eeprom(sc, STGE_EEPROM_StationAddress0 + i,
+ &myaddr[i]);
+ myaddr[i] = le16toh(myaddr[i]);
+ }
+ bcopy(myaddr, enaddr, sizeof(enaddr));
+ sc->sc_stge1023 = 1;
+ }
+
+ ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
+ if (ifp == NULL) {
+ device_printf(sc->sc_dev, "failed to if_alloc()\n");
+ error = ENXIO;
+ goto fail;
+ }
+
+ ifp->if_softc = sc;
+ if_initname(ifp, device_get_name(dev), device_get_unit(dev));
+ ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+ ifp->if_ioctl = stge_ioctl;
+ ifp->if_start = stge_start;
+ ifp->if_watchdog = stge_watchdog;
+ ifp->if_init = stge_init;
+ ifp->if_mtu = ETHERMTU;
+ ifp->if_snd.ifq_drv_maxlen = STGE_TX_RING_CNT - 1;
+ IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
+ IFQ_SET_READY(&ifp->if_snd);
+ /* Revision B3 and earlier chips have checksum bug. */
+ if (sc->sc_rev >= 0x0c) {
+ ifp->if_hwassist = STGE_CSUM_FEATURES;
+ ifp->if_capabilities = IFCAP_HWCSUM;
+ } else {
+ ifp->if_hwassist = 0;
+ ifp->if_capabilities = 0;
+ }
+ ifp->if_capenable = ifp->if_capabilities;
+
+ /*
+ * Read some important bits from the PhyCtrl register.
+ */
+ sc->sc_PhyCtrl = CSR_READ_1(sc, STGE_PhyCtrl) &
+ (PC_PhyDuplexPolarity | PC_PhyLnkPolarity);
+
+ /* Set up MII bus. */
+ if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, stge_mediachange,
+ stge_mediastatus)) != 0) {
+ device_printf(sc->sc_dev, "no PHY found!\n");
+ goto fail;
+ }
+
+ ether_ifattach(ifp, enaddr);
+
+ /* VLAN capability setup */
+ ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
+ if (sc->sc_rev >= 0x0c)
+ ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
+ ifp->if_capenable = ifp->if_capabilities;
+#ifdef DEVICE_POLLING
+ ifp->if_capabilities |= IFCAP_POLLING;
+#endif
+ /*
+ * Tell the upper layer(s) we support long frames.
+ * Must appear after the call to ether_ifattach() because
+ * ether_ifattach() sets ifi_hdrlen to the default value.
+ */
+ ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
+
+ /*
+ * The manual recommends disabling early transmit, so we
+ * do. It's disabled anyway, if using IP checksumming,
+ * since the entire packet must be in the FIFO in order
+ * for the chip to perform the checksum.
+ */
+ sc->sc_txthresh = 0x0fff;
+
+ /*
+ * Disable MWI if the PCI layer tells us to.
+ */
+ sc->sc_DMACtrl = 0;
+ if ((cmd & PCIM_CMD_MWRICEN) == 0)
+ sc->sc_DMACtrl |= DMAC_MWIDisable;
+
+ /*
+ * Hookup IRQ
+ */
+ error = bus_setup_intr(dev, sc->sc_res[1], INTR_TYPE_NET | INTR_MPSAFE,
+ stge_intr, sc, &sc->sc_ih);
+ if (error != 0) {
+ ether_ifdetach(ifp);
+ device_printf(sc->sc_dev, "couldn't set up IRQ\n");
+ sc->sc_ifp = NULL;
+ goto fail;
+ }
+
+fail:
+ if (error != 0)
+ stge_detach(dev);
+
+ return (error);
+}
+
+static int
+stge_detach(device_t dev)
+{
+ struct stge_softc *sc;
+ struct ifnet *ifp;
+
+ sc = device_get_softc(dev);
+
+ ifp = sc->sc_ifp;
+#ifdef DEVICE_POLLING
+ if (ifp && ifp->if_capenable & IFCAP_POLLING)
+ ether_poll_deregister(ifp);
+#endif
+ if (device_is_attached(dev)) {
+ STGE_LOCK(sc);
+ /* XXX */
+ sc->sc_detach = 1;
+ stge_stop(sc);
+ STGE_UNLOCK(sc);
+ callout_drain(&sc->sc_tick_ch);
+ taskqueue_drain(taskqueue_swi, &sc->sc_link_task);
+ ether_ifdetach(ifp);
+ }
+
+ if (sc->sc_miibus != NULL) {
+ device_delete_child(dev, sc->sc_miibus);
+ sc->sc_miibus = NULL;
+ }
+ bus_generic_detach(dev);
+ stge_dma_free(sc);
+
+ if (ifp != NULL) {
+ if_free(ifp);
+ sc->sc_ifp = NULL;
+ }
+
+ if (sc->sc_ih) {
+ bus_teardown_intr(dev, sc->sc_res[1], sc->sc_ih);
+ sc->sc_ih = NULL;
+ }
+ bus_release_resources(dev, sc->sc_spec, sc->sc_res);
+
+ mtx_destroy(&sc->sc_mii_mtx);
+ mtx_destroy(&sc->sc_mtx);
+
+ return (0);
+}
+
+struct stge_dmamap_arg {
+ bus_addr_t stge_busaddr;
+};
+
+static void
+stge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
+{
+ struct stge_dmamap_arg *ctx;
+
+ if (error != 0)
+ return;
+
+ ctx = (struct stge_dmamap_arg *)arg;
+ ctx->stge_busaddr = segs[0].ds_addr;
+}
+
+static int
+stge_dma_alloc(struct stge_softc *sc)
+{
+ struct stge_dmamap_arg ctx;
+ struct stge_txdesc *txd;
+ struct stge_rxdesc *rxd;
+ int error, i;
+
+ /* create parent tag. */
+ error = bus_dma_tag_create(NULL, /* parent */
+ 1, 0, /* algnmnt, boundary */
+ STGE_DMA_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
+ 0, /* nsegments */
+ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc->sc_cdata.stge_parent_tag);
+ if (error != 0) {
+ device_printf(sc->sc_dev, "failed to create parent DMA tag\n");
+ goto fail;
+ }
+ /* create tag for Tx ring. */
+ error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
+ STGE_RING_ALIGN, 0, /* algnmnt, boundary */
+ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ STGE_TX_RING_SZ, /* maxsize */
+ 1, /* nsegments */
+ STGE_TX_RING_SZ, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc->sc_cdata.stge_tx_ring_tag);
+ if (error != 0) {
+ device_printf(sc->sc_dev,
+ "failed to allocate Tx ring DMA tag\n");
+ goto fail;
+ }
+
+ /* create tag for Rx ring. */
+ error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
+ STGE_RING_ALIGN, 0, /* algnmnt, boundary */
+ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ STGE_RX_RING_SZ, /* maxsize */
+ 1, /* nsegments */
+ STGE_RX_RING_SZ, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc->sc_cdata.stge_rx_ring_tag);
+ if (error != 0) {
+ device_printf(sc->sc_dev,
+ "failed to allocate Rx ring DMA tag\n");
+ goto fail;
+ }
+
+ /* create tag for Tx buffers. */
+ error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
+ 1, 0, /* algnmnt, boundary */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ MCLBYTES * STGE_MAXTXSEGS, /* maxsize */
+ STGE_MAXTXSEGS, /* nsegments */
+ MCLBYTES, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc->sc_cdata.stge_tx_tag);
+ if (error != 0) {
+ device_printf(sc->sc_dev, "failed to allocate Tx DMA tag\n");
+ goto fail;
+ }
+
+ /* create tag for Rx buffers. */
+ error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
+ 1, 0, /* algnmnt, boundary */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ MCLBYTES, /* maxsize */
+ 1, /* nsegments */
+ MCLBYTES, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc->sc_cdata.stge_rx_tag);
+ if (error != 0) {
+ device_printf(sc->sc_dev, "failed to allocate Rx DMA tag\n");
+ goto fail;
+ }
+
+ /* allocate DMA'able memory and load the DMA map for Tx ring. */
+ error = bus_dmamem_alloc(sc->sc_cdata.stge_tx_ring_tag,
+ (void **)&sc->sc_rdata.stge_tx_ring, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
+ &sc->sc_cdata.stge_tx_ring_map);
+ if (error != 0) {
+ device_printf(sc->sc_dev,
+ "failed to allocate DMA'able memory for Tx ring\n");
+ goto fail;
+ }
+
+ ctx.stge_busaddr = 0;
+ error = bus_dmamap_load(sc->sc_cdata.stge_tx_ring_tag,
+ sc->sc_cdata.stge_tx_ring_map, sc->sc_rdata.stge_tx_ring,
+ STGE_TX_RING_SZ, stge_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
+ if (error != 0 || ctx.stge_busaddr == 0) {
+ device_printf(sc->sc_dev,
+ "failed to load DMA'able memory for Tx ring\n");
+ goto fail;
+ }
+ sc->sc_rdata.stge_tx_ring_paddr = ctx.stge_busaddr;
+
+ /* allocate DMA'able memory and load the DMA map for Rx ring. */
+ error = bus_dmamem_alloc(sc->sc_cdata.stge_rx_ring_tag,
+ (void **)&sc->sc_rdata.stge_rx_ring, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
+ &sc->sc_cdata.stge_rx_ring_map);
+ if (error != 0) {
+ device_printf(sc->sc_dev,
+ "failed to allocate DMA'able memory for Rx ring\n");
+ goto fail;
+ }
+
+ ctx.stge_busaddr = 0;
+ error = bus_dmamap_load(sc->sc_cdata.stge_rx_ring_tag,
+ sc->sc_cdata.stge_rx_ring_map, sc->sc_rdata.stge_rx_ring,
+ STGE_RX_RING_SZ, stge_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
+ if (error != 0 || ctx.stge_busaddr == 0) {
+ device_printf(sc->sc_dev,
+ "failed to load DMA'able memory for Rx ring\n");
+ goto fail;
+ }
+ sc->sc_rdata.stge_rx_ring_paddr = ctx.stge_busaddr;
+
+ /* create DMA maps for Tx buffers. */
+ for (i = 0; i < STGE_TX_RING_CNT; i++) {
+ txd = &sc->sc_cdata.stge_txdesc[i];
+ txd->tx_m = NULL;
+ txd->tx_dmamap = 0;
+ error = bus_dmamap_create(sc->sc_cdata.stge_tx_tag, 0,
+ &txd->tx_dmamap);
+ if (error != 0) {
+ device_printf(sc->sc_dev,
+ "failed to create Tx dmamap\n");
+ goto fail;
+ }
+ }
+ /* create DMA maps for Rx buffers. */
+ if ((error = bus_dmamap_create(sc->sc_cdata.stge_rx_tag, 0,
+ &sc->sc_cdata.stge_rx_sparemap)) != 0) {
+ device_printf(sc->sc_dev, "failed to create spare Rx dmamap\n");
+ goto fail;
+ }
+ for (i = 0; i < STGE_RX_RING_CNT; i++) {
+ rxd = &sc->sc_cdata.stge_rxdesc[i];
+ rxd->rx_m = NULL;
+ rxd->rx_dmamap = 0;
+ error = bus_dmamap_create(sc->sc_cdata.stge_rx_tag, 0,
+ &rxd->rx_dmamap);
+ if (error != 0) {
+ device_printf(sc->sc_dev,
+ "failed to create Rx dmamap\n");
+ goto fail;
+ }
+ }
+
+fail:
+ return (error);
+}
+
+static void
+stge_dma_free(struct stge_softc *sc)
+{
+ struct stge_txdesc *txd;
+ struct stge_rxdesc *rxd;
+ int i;
+
+ /* Tx ring */
+ if (sc->sc_cdata.stge_tx_ring_tag) {
+ if (sc->sc_cdata.stge_tx_ring_map)
+ bus_dmamap_unload(sc->sc_cdata.stge_tx_ring_tag,
+ sc->sc_cdata.stge_tx_ring_map);
+ if (sc->sc_cdata.stge_tx_ring_map &&
+ sc->sc_rdata.stge_tx_ring)
+ bus_dmamem_free(sc->sc_cdata.stge_tx_ring_tag,
+ sc->sc_rdata.stge_tx_ring,
+ sc->sc_cdata.stge_tx_ring_map);
+ sc->sc_rdata.stge_tx_ring = NULL;
+ sc->sc_cdata.stge_tx_ring_map = 0;
+ bus_dma_tag_destroy(sc->sc_cdata.stge_tx_ring_tag);
+ sc->sc_cdata.stge_tx_ring_tag = NULL;
+ }
+ /* Rx ring */
+ if (sc->sc_cdata.stge_rx_ring_tag) {
+ if (sc->sc_cdata.stge_rx_ring_map)
+ bus_dmamap_unload(sc->sc_cdata.stge_rx_ring_tag,
+ sc->sc_cdata.stge_rx_ring_map);
+ if (sc->sc_cdata.stge_rx_ring_map &&
+ sc->sc_rdata.stge_rx_ring)
+ bus_dmamem_free(sc->sc_cdata.stge_rx_ring_tag,
+ sc->sc_rdata.stge_rx_ring,
+ sc->sc_cdata.stge_rx_ring_map);
+ sc->sc_rdata.stge_rx_ring = NULL;
+ sc->sc_cdata.stge_rx_ring_map = 0;
+ bus_dma_tag_destroy(sc->sc_cdata.stge_rx_ring_tag);
+ sc->sc_cdata.stge_rx_ring_tag = NULL;
+ }
+ /* Tx buffers */
+ if (sc->sc_cdata.stge_tx_tag) {
+ for (i = 0; i < STGE_TX_RING_CNT; i++) {
+ txd = &sc->sc_cdata.stge_txdesc[i];
+ if (txd->tx_dmamap) {
+ bus_dmamap_destroy(sc->sc_cdata.stge_tx_tag,
+ txd->tx_dmamap);
+ txd->tx_dmamap = 0;
+ }
+ }
+ bus_dma_tag_destroy(sc->sc_cdata.stge_tx_tag);
+ sc->sc_cdata.stge_tx_tag = NULL;
+ }
+ /* Rx buffers */
+ if (sc->sc_cdata.stge_rx_tag) {
+ for (i = 0; i < STGE_RX_RING_CNT; i++) {
+ rxd = &sc->sc_cdata.stge_rxdesc[i];
+ if (rxd->rx_dmamap) {
+ bus_dmamap_destroy(sc->sc_cdata.stge_rx_tag,
+ rxd->rx_dmamap);
+ rxd->rx_dmamap = 0;
+ }
+ }
+ if (sc->sc_cdata.stge_rx_sparemap) {
+ bus_dmamap_destroy(sc->sc_cdata.stge_rx_tag,
+ sc->sc_cdata.stge_rx_sparemap);
+ sc->sc_cdata.stge_rx_sparemap = 0;
+ }
+ bus_dma_tag_destroy(sc->sc_cdata.stge_rx_tag);
+ sc->sc_cdata.stge_rx_tag = NULL;
+ }
+
+ if (sc->sc_cdata.stge_parent_tag) {
+ bus_dma_tag_destroy(sc->sc_cdata.stge_parent_tag);
+ sc->sc_cdata.stge_parent_tag = NULL;
+ }
+}
+
+/*
+ * stge_shutdown:
+ *
+ * Make sure the interface is stopped at reboot time.
+ */
+static void
+stge_shutdown(device_t dev)
+{
+ struct stge_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ STGE_LOCK(sc);
+ stge_stop(sc);
+ STGE_UNLOCK(sc);
+}
+
+static int
+stge_suspend(device_t dev)
+{
+ struct stge_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ STGE_LOCK(sc);
+ stge_stop(sc);
+ sc->sc_suspended = 1;
+ STGE_UNLOCK(sc);
+
+ return (0);
+}
+
+static int
+stge_resume(device_t dev)
+{
+ struct stge_softc *sc;
+ struct ifnet *ifp;
+
+ sc = device_get_softc(dev);
+
+ STGE_LOCK(sc);
+ ifp = sc->sc_ifp;
+ if (ifp->if_flags & IFF_UP)
+ stge_init_locked(sc);
+
+ sc->sc_suspended = 0;
+ STGE_UNLOCK(sc);
+
+ return (0);
+}
+
+static void
+stge_dma_wait(struct stge_softc *sc)
+{
+ int i;
+
+ for (i = 0; i < STGE_TIMEOUT; i++) {
+ DELAY(2);
+ if ((CSR_READ_4(sc, STGE_DMACtrl) & DMAC_TxDMAInProg) == 0)
+ break;
+ }
+
+ if (i == STGE_TIMEOUT)
+ device_printf(sc->sc_dev, "DMA wait timed out\n");
+}
+
+static int
+stge_encap(struct stge_softc *sc, struct mbuf **m_head)
+{
+ struct stge_txdesc *txd;
+ struct stge_tfd *tfd;
+ struct mbuf *m, *n;
+ struct m_tag *mtag;
+ bus_dma_segment_t txsegs[STGE_MAXTXSEGS];
+ int error, i, nsegs, si;
+ uint64_t csum_flags, tfc;
+
+ STGE_LOCK_ASSERT(sc);
+
+ if ((txd = STAILQ_FIRST(&sc->sc_cdata.stge_txfreeq)) == NULL)
+ return (ENOBUFS);
+
+ m = *m_head;
+ error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.stge_tx_tag,
+ txd->tx_dmamap, m, txsegs, &nsegs, 0);
+ if (error == EFBIG) {
+ n = m_defrag(m, M_DONTWAIT);
+ if (n == NULL) {
+ m_freem(m);
+ m = NULL;
+ return (ENOMEM);
+ }
+ m = n;
+ error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.stge_tx_tag,
+ txd->tx_dmamap, m, txsegs, &nsegs, 0);
+ if (error != 0) {
+ m_freem(m);
+ m = NULL;
+ return (error);
+ }
+ } else if (error != 0)
+ return (error);
+ if (nsegs == 0) {
+ m_freem(m);
+ m = NULL;
+ return (EIO);
+ }
+
+ csum_flags = 0;
+ if ((m->m_pkthdr.csum_flags & STGE_CSUM_FEATURES) != 0) {
+ if (m->m_pkthdr.csum_flags & CSUM_IP)
+ csum_flags |= TFD_IPChecksumEnable;
+ if (m->m_pkthdr.csum_flags & CSUM_TCP)
+ csum_flags |= TFD_TCPChecksumEnable;
+ else if (m->m_pkthdr.csum_flags & CSUM_UDP)
+ csum_flags |= TFD_UDPChecksumEnable;
+ }
+
+ si = sc->sc_cdata.stge_tx_prod;
+ tfd = &sc->sc_rdata.stge_tx_ring[si];
+ for (i = 0; i < nsegs; i++)
+ tfd->tfd_frags[i].frag_word0 =
+ htole64(FRAG_ADDR(txsegs[i].ds_addr) |
+ FRAG_LEN(txsegs[i].ds_len));
+ sc->sc_cdata.stge_tx_cnt++;
+
+ tfc = TFD_FrameId(si) | TFD_WordAlign(TFD_WordAlign_disable) |
+ TFD_FragCount(nsegs) | csum_flags;
+ if (sc->sc_cdata.stge_tx_cnt >= STGE_TX_HIWAT)
+ tfc |= TFD_TxDMAIndicate;
+
+ /* Update producer index. */
+ sc->sc_cdata.stge_tx_prod = (si + 1) % STGE_TX_RING_CNT;
+
+ /* Check if we have a VLAN tag to insert. */
+ mtag = VLAN_OUTPUT_TAG(sc->sc_ifp, m);
+ if (mtag != NULL)
+ tfc |= TFD_VLANTagInsert | TFD_VID(VLAN_TAG_VALUE(mtag));
+ tfd->tfd_control = htole64(tfc);
+
+ /* Update Tx Queue. */
+ STAILQ_REMOVE_HEAD(&sc->sc_cdata.stge_txfreeq, tx_q);
+ STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txbusyq, txd, tx_q);
+ txd->tx_m = m;
+
+ /* Sync descriptors. */
+ bus_dmamap_sync(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap,
+ BUS_DMASYNC_PREWRITE);
+ bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
+ sc->sc_cdata.stge_tx_ring_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+ return (0);
+}
+
+/*
+ * stge_start: [ifnet interface function]
+ *
+ * Start packet transmission on the interface.
+ */
+static void
+stge_start(struct ifnet *ifp)
+{
+ struct stge_softc *sc;
+
+ sc = ifp->if_softc;
+ STGE_LOCK(sc);
+ stge_start_locked(ifp);
+ STGE_UNLOCK(sc);
+}
+
+static void
+stge_start_locked(struct ifnet *ifp)
+{
+ struct stge_softc *sc;
+ struct mbuf *m_head;
+ int enq;
+
+ sc = ifp->if_softc;
+
+ STGE_LOCK_ASSERT(sc);
+
+ if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
+ IFF_DRV_RUNNING)
+ return;
+
+ for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
+ if (sc->sc_cdata.stge_tx_cnt >= STGE_TX_HIWAT) {
+ ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+ break;
+ }
+
+ IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
+ if (m_head == NULL)
+ break;
+ /*
+ * Pack the data into the transmit ring. If we
+ * don't have room, set the OACTIVE flag and wait
+ * for the NIC to drain the ring.
+ */
+ if (stge_encap(sc, &m_head)) {
+ if (m_head == NULL)
+ break;
+ IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
+ ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+ break;
+ }
+
+ enq++;
+ /*
+ * If there's a BPF listener, bounce a copy of this frame
+ * to him.
+ */
+ BPF_MTAP(ifp, m_head);
+ }
+
+ if (enq > 0) {
+ /* Transmit */
+ CSR_WRITE_4(sc, STGE_DMACtrl, DMAC_TxDMAPollNow);
+
+ /* Set a timeout in case the chip goes out to lunch. */
+ ifp->if_timer = 5;
+ }
+}
+
+/*
+ * stge_watchdog: [ifnet interface function]
+ *
+ * Watchdog timer handler.
+ */
+static void
+stge_watchdog(struct ifnet *ifp)
+{
+ struct stge_softc *sc;
+
+ sc = ifp->if_softc;
+
+ STGE_LOCK(sc);
+ if_printf(sc->sc_ifp, "device timeout\n");
+ ifp->if_oerrors++;
+ stge_init_locked(sc);
+ STGE_UNLOCK(sc);
+}
+
+/*
+ * stge_ioctl: [ifnet interface function]
+ *
+ * Handle control requests from the operator.
+ */
+static int
+stge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+{
+ struct stge_softc *sc;
+ struct ifreq *ifr;
+ struct mii_data *mii;
+ int error, mask;
+
+ sc = ifp->if_softc;
+ ifr = (struct ifreq *)data;
+ error = 0;
+ switch (cmd) {
+ case SIOCSIFMTU:
+ if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > STGE_JUMBO_MTU)
+ error = EINVAL;
+ else if (ifp->if_mtu != ifr->ifr_mtu) {
+ ifp->if_mtu = ifr->ifr_mtu;
+ STGE_LOCK(sc);
+ stge_init_locked(sc);
+ STGE_UNLOCK(sc);
+ }
+ break;
+ case SIOCSIFFLAGS:
+ STGE_LOCK(sc);
+ if ((ifp->if_flags & IFF_UP) != 0) {
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
+ if (((ifp->if_flags ^ sc->sc_if_flags)
+ & IFF_PROMISC) != 0)
+ stge_set_filter(sc);
+ } else {
+ if (sc->sc_detach == 0)
+ stge_init_locked(sc);
+ }
+ } else {
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
+ stge_stop(sc);
+ }
+ sc->sc_if_flags = ifp->if_flags;
+ STGE_UNLOCK(sc);
+ break;
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ STGE_LOCK(sc);
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
+ stge_set_multi(sc);
+ STGE_UNLOCK(sc);
+ break;
+ case SIOCSIFMEDIA:
+ case SIOCGIFMEDIA:
+ mii = device_get_softc(sc->sc_miibus);
+ error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
+ break;
+ case SIOCSIFCAP:
+ mask = ifr->ifr_reqcap ^ ifp->if_capenable;
+#ifdef DEVICE_POLLING
+ if ((mask & IFCAP_POLLING) != 0) {
+ if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) {
+ error = ether_poll_register(stge_poll, ifp);
+ if (error != 0)
+ break;
+ STGE_LOCK(sc);
+ CSR_WRITE_2(sc, STGE_IntEnable, 0);
+ ifp->if_capenable |= IFCAP_POLLING;
+ STGE_UNLOCK(sc);
+ } else {
+ error = ether_poll_deregister(ifp);
+ if (error != 0)
+ break;
+ STGE_LOCK(sc);
+ CSR_WRITE_2(sc, STGE_IntEnable,
+ sc->sc_IntEnable);
+ ifp->if_capenable &= ~IFCAP_POLLING;
+ STGE_UNLOCK(sc);
+ }
+ }
+#endif
+ if ((mask & IFCAP_HWCSUM) != 0) {
+ ifp->if_capenable ^= IFCAP_HWCSUM;
+ if ((IFCAP_HWCSUM & ifp->if_capenable) != 0 &&
+ (IFCAP_HWCSUM & ifp->if_capabilities) != 0)
+ ifp->if_hwassist = STGE_CSUM_FEATURES;
+ else
+ ifp->if_hwassist = 0;
+ }
+ if ((mask & IFCAP_VLAN_HWTAGGING) != 0) {
+ ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
+ STGE_LOCK(sc);
+ stge_vlan_setup(sc);
+ STGE_UNLOCK(sc);
+ }
+ }
+ VLAN_CAPABILITIES(ifp);
+ break;
+ default:
+ error = ether_ioctl(ifp, cmd, data);
+ break;
+ }
+
+ return (error);
+}
+
+static void
+stge_link_task(void *arg, int pending)
+{
+ struct stge_softc *sc;
+ uint32_t v, ac;
+ int i;
+
+ sc = (struct stge_softc *)arg;
+ STGE_LOCK(sc);
+ /*
+ * Update STGE_MACCtrl register depending on link status.
+ * (duplex, flow control etc)
+ */
+ v = ac = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
+ v &= ~(MC_DuplexSelect|MC_RxFlowControlEnable|MC_TxFlowControlEnable);
+ v |= sc->sc_MACCtrl;
+ CSR_WRITE_4(sc, STGE_MACCtrl, v);
+ if (((ac ^ sc->sc_MACCtrl) & MC_DuplexSelect) != 0) {
+ /* Duplex setting changed, reset Tx/Rx functions. */
+ ac = CSR_READ_4(sc, STGE_AsicCtrl);
+ ac |= AC_TxReset | AC_RxReset;
+ CSR_WRITE_4(sc, STGE_AsicCtrl, ac);
+ for (i = 0; i < STGE_TIMEOUT; i++) {
+ DELAY(100);
+ if ((CSR_READ_4(sc, STGE_AsicCtrl) & AC_ResetBusy) == 0)
+ break;
+ }
+ if (i == STGE_TIMEOUT)
+ device_printf(sc->sc_dev, "reset failed to complete\n");
+ }
+ STGE_UNLOCK(sc);
+}
+
+static __inline int
+stge_tx_error(struct stge_softc *sc)
+{
+ uint32_t txstat;
+ int error;
+
+ for (error = 0;;) {
+ txstat = CSR_READ_4(sc, STGE_TxStatus);
+ if ((txstat & TS_TxComplete) == 0)
+ break;
+ /* Tx underrun */
+ if ((txstat & TS_TxUnderrun) != 0) {
+ /*
+ * XXX
+ * There should be a more better way to recover
+ * from Tx underrun instead of a full reset.
+ */
+ if (sc->sc_nerr++ < STGE_MAXERR)
+ device_printf(sc->sc_dev, "Tx underrun, "
+ "resetting...\n");
+ if (sc->sc_nerr == STGE_MAXERR)
+ device_printf(sc->sc_dev, "too many errors; "
+ "not reporting any more\n");
+ error = -1;
+ break;
+ }
+ /* Maximum/Late collisions, Re-enable Tx MAC. */
+ if ((txstat & (TS_MaxCollisions|TS_LateCollision)) != 0)
+ CSR_WRITE_4(sc, STGE_MACCtrl,
+ (CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK) |
+ MC_TxEnable);
+ }
+
+ return (error);
+}
+
+/*
+ * stge_intr:
+ *
+ * Interrupt service routine.
+ */
+static void
+stge_intr(void *arg)
+{
+ struct stge_softc *sc;
+ struct ifnet *ifp;
+ int reinit;
+ uint16_t status;
+
+ sc = (struct stge_softc *)arg;
+ ifp = sc->sc_ifp;
+
+ STGE_LOCK(sc);
+
+#ifdef DEVICE_POLLING
+ if ((ifp->if_capenable & IFCAP_POLLING) != 0)
+ goto done_locked;
+#endif
+ status = CSR_READ_2(sc, STGE_IntStatus);
+ if (sc->sc_suspended || (status & IS_InterruptStatus) == 0)
+ goto done_locked;
+
+ /* Disable interrupts. */
+ for (reinit = 0;;) {
+ status = CSR_READ_2(sc, STGE_IntStatusAck);
+ status &= sc->sc_IntEnable;
+ if (status == 0)
+ break;
+ /* Host interface errors. */
+ if ((status & IS_HostError) != 0) {
+ device_printf(sc->sc_dev,
+ "Host interface error, resetting...\n");
+ reinit = 1;
+ goto force_init;
+ }
+
+ /* Receive interrupts. */
+ if ((status & IS_RxDMAComplete) != 0) {
+ stge_rxeof(sc);
+ if ((status & IS_RFDListEnd) != 0)
+ CSR_WRITE_4(sc, STGE_DMACtrl,
+ DMAC_RxDMAPollNow);
+ }
+
+ /* Transmit interrupts. */
+ if ((status & (IS_TxDMAComplete | IS_TxComplete)) != 0)
+ stge_txeof(sc);
+
+ /* Transmission errors.*/
+ if ((status & IS_TxComplete) != 0) {
+ if ((reinit = stge_tx_error(sc)) != 0)
+ break;
+ }
+ }
+
+force_init:
+ if (reinit != 0)
+ stge_init_locked(sc);
+
+ /* Re-enable interrupts. */
+ CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable);
+
+ /* Try to get more packets going. */
+ if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+ stge_start_locked(ifp);
+
+done_locked:
+ STGE_UNLOCK(sc);
+}
+
+/*
+ * stge_txeof:
+ *
+ * Helper; handle transmit interrupts.
+ */
+static void
+stge_txeof(struct stge_softc *sc)
+{
+ struct ifnet *ifp;
+ struct stge_txdesc *txd;
+ uint64_t control;
+ int cons;
+
+ STGE_LOCK_ASSERT(sc);
+
+ ifp = sc->sc_ifp;
+
+ txd = STAILQ_FIRST(&sc->sc_cdata.stge_txbusyq);
+ if (txd == NULL)
+ return;
+ bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
+ sc->sc_cdata.stge_tx_ring_map, BUS_DMASYNC_POSTREAD);
+
+ /*
+ * Go through our Tx list and free mbufs for those
+ * frames which have been transmitted.
+ */
+ for (cons = sc->sc_cdata.stge_tx_cons;;
+ cons = (cons + 1) % STGE_TX_RING_CNT) {
+ if (sc->sc_cdata.stge_tx_cnt <= 0)
+ break;
+ control = le64toh(sc->sc_rdata.stge_tx_ring[cons].tfd_control);
+ if ((control & TFD_TFDDone) == 0)
+ break;
+ sc->sc_cdata.stge_tx_cnt--;
+ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+
+ bus_dmamap_sync(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap,
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap);
+
+ /* Output counter is updated with statistics register */
+ m_freem(txd->tx_m);
+ txd->tx_m = NULL;
+ STAILQ_REMOVE_HEAD(&sc->sc_cdata.stge_txbusyq, tx_q);
+ STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txfreeq, txd, tx_q);
+ txd = STAILQ_FIRST(&sc->sc_cdata.stge_txbusyq);
+ }
+ sc->sc_cdata.stge_tx_cons = cons;
+ if (sc->sc_cdata.stge_tx_cnt == 0)
+ ifp->if_timer = 0;
+
+ bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
+ sc->sc_cdata.stge_tx_ring_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+}
+
+static __inline void
+stge_discard_rxbuf(struct stge_softc *sc, int idx)
+{
+ struct stge_rfd *rfd;
+
+ rfd = &sc->sc_rdata.stge_rx_ring[idx];
+ rfd->rfd_status = 0;
+}
+
+#ifndef __NO_STRICT_ALIGNMENT
+/*
+ * It seems that TC9021's DMA engine has alignment restrictions in
+ * DMA scatter operations. The first DMA segment has no address
+ * alignment restrictins but the rest should be aligned on 4(?) bytes
+ * boundary. Otherwise it would corrupt random memory. Since we don't
+ * know which one is used for the first segment in advance we simply
+ * don't align at all.
+ * To avoid copying over an entire frame to align, we allocate a new
+ * mbuf and copy ethernet header to the new mbuf. The new mbuf is
+ * prepended into the existing mbuf chain.
+ */
+static __inline struct mbuf *
+stge_fixup_rx(struct stge_softc *sc, struct mbuf *m)
+{
+ struct mbuf *n;
+
+ n = NULL;
+ if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
+ bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
+ m->m_data += ETHER_HDR_LEN;
+ n = m;
+ } else {
+ MGETHDR(n, M_DONTWAIT, MT_DATA);
+ if (n != NULL) {
+ bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
+ m->m_data += ETHER_HDR_LEN;
+ m->m_len -= ETHER_HDR_LEN;
+ n->m_len = ETHER_HDR_LEN;
+ M_MOVE_PKTHDR(n, m);
+ n->m_next = m;
+ } else
+ m_freem(m);
+ }
+
+ return (n);
+}
+#endif
+
+/*
+ * stge_rxeof:
+ *
+ * Helper; handle receive interrupts.
+ */
+static void
+stge_rxeof(struct stge_softc *sc)
+{
+ struct ifnet *ifp;
+ struct stge_rxdesc *rxd;
+ struct mbuf *mp, *m;
+ uint64_t status64;
+ uint32_t status;
+ int cons, prog;
+
+ STGE_LOCK_ASSERT(sc);
+
+ ifp = sc->sc_ifp;
+
+ bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag,
+ sc->sc_cdata.stge_rx_ring_map, BUS_DMASYNC_POSTREAD);
+
+ prog = 0;
+ for (cons = sc->sc_cdata.stge_rx_cons; prog < STGE_RX_RING_CNT;
+ prog++, cons = (cons + 1) % STGE_RX_RING_CNT) {
+ status64 = le64toh(sc->sc_rdata.stge_rx_ring[cons].rfd_status);
+ status = RFD_RxStatus(status64);
+ if ((status & RFD_RFDDone) == 0)
+ break;
+#ifdef DEVICE_POLLING
+ if (ifp->if_capenable & IFCAP_POLLING) {
+ if (sc->sc_cdata.stge_rxcycles <= 0)
+ break;
+ sc->sc_cdata.stge_rxcycles--;
+ }
+#endif
+ prog++;
+ rxd = &sc->sc_cdata.stge_rxdesc[cons];
+ mp = rxd->rx_m;
+
+ /*
+ * If the packet had an error, drop it. Note we count
+ * the error later in the periodic stats update.
+ */
+ if ((status & RFD_FrameEnd) != 0 && (status &
+ (RFD_RxFIFOOverrun | RFD_RxRuntFrame |
+ RFD_RxAlignmentError | RFD_RxFCSError |
+ RFD_RxLengthError)) != 0) {
+ stge_discard_rxbuf(sc, cons);
+ if (sc->sc_cdata.stge_rxhead != NULL) {
+ m_freem(sc->sc_cdata.stge_rxhead);
+ STGE_RXCHAIN_RESET(sc);
+ }
+ continue;
+ }
+ /*
+ * Add a new receive buffer to the ring.
+ */
+ if (stge_newbuf(sc, cons) != 0) {
+ ifp->if_iqdrops++;
+ stge_discard_rxbuf(sc, cons);
+ if (sc->sc_cdata.stge_rxhead != NULL) {
+ m_freem(sc->sc_cdata.stge_rxhead);
+ STGE_RXCHAIN_RESET(sc);
+ }
+ continue;
+ }
+
+ if ((status & RFD_FrameEnd) != 0)
+ mp->m_len = RFD_RxDMAFrameLen(status) -
+ sc->sc_cdata.stge_rxlen;
+ sc->sc_cdata.stge_rxlen += mp->m_len;
+
+ /* Chain mbufs. */
+ if (sc->sc_cdata.stge_rxhead == NULL) {
+ sc->sc_cdata.stge_rxhead = mp;
+ sc->sc_cdata.stge_rxtail = mp;
+ } else {
+ mp->m_flags &= ~M_PKTHDR;
+ sc->sc_cdata.stge_rxtail->m_next = mp;
+ sc->sc_cdata.stge_rxtail = mp;
+ }
+
+ if ((status & RFD_FrameEnd) != 0) {
+ m = sc->sc_cdata.stge_rxhead;
+ m->m_pkthdr.rcvif = ifp;
+ m->m_pkthdr.len = sc->sc_cdata.stge_rxlen;
+
+ if (m->m_pkthdr.len > sc->sc_if_framesize) {
+ m_freem(m);
+ STGE_RXCHAIN_RESET(sc);
+ continue;
+ }
+ /*
+ * Set the incoming checksum information for
+ * the packet.
+ */
+ if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
+ if ((status & RFD_IPDetected) != 0) {
+ m->m_pkthdr.csum_flags |=
+ CSUM_IP_CHECKED;
+ if ((status & RFD_IPError) == 0)
+ m->m_pkthdr.csum_flags |=
+ CSUM_IP_VALID;
+ }
+ if (((status & RFD_TCPDetected) != 0 &&
+ (status & RFD_TCPError) == 0) ||
+ ((status & RFD_UDPDetected) != 0 &&
+ (status & RFD_UDPError) == 0)) {
+ m->m_pkthdr.csum_flags |=
+ (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
+ m->m_pkthdr.csum_data = 0xffff;
+ }
+ }
+
+#ifndef __NO_STRICT_ALIGNMENT
+ if (sc->sc_if_framesize > (MCLBYTES - ETHER_ALIGN)) {
+ if ((m = stge_fixup_rx(sc, m)) == NULL) {
+ STGE_RXCHAIN_RESET(sc);
+ continue;
+ }
+ }
+#endif
+ /* Check for VLAN tagged packets. */
+ if ((status & RFD_VLANDetected) != 0 &&
+ (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
+ VLAN_INPUT_TAG(ifp, m, RFD_TCI(status64));
+
+ STGE_UNLOCK(sc);
+ /* Pass it on. */
+ (*ifp->if_input)(ifp, m);
+ STGE_LOCK(sc);
+
+ STGE_RXCHAIN_RESET(sc);
+ }
+ }
+
+ if (prog > 0) {
+ /* Update the consumer index. */
+ sc->sc_cdata.stge_rx_cons = cons;
+ bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag,
+ sc->sc_cdata.stge_rx_ring_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ }
+}
+
+#ifdef DEVICE_POLLING
+static void
+stge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
+{
+ struct stge_softc *sc;
+ uint16_t status;
+
+ sc = ifp->if_softc;
+ STGE_LOCK(sc);
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
+ STGE_UNLOCK(sc);
+ return;
+ }
+
+ sc->sc_cdata.stge_rxcycles = count;
+ stge_rxeof(sc);
+ stge_txeof(sc);
+
+ if (cmd == POLL_AND_CHECK_STATUS) {
+ status = CSR_READ_2(sc, STGE_IntStatus);
+ status &= sc->sc_IntEnable;
+ if (status != 0) {
+ if ((status & IS_HostError) != 0) {
+ device_printf(sc->sc_dev,
+ "Host interface error, resetting...\n");
+ stge_init_locked(sc);
+ }
+ if ((status & IS_TxComplete) != 0) {
+ if (stge_tx_error(sc) != 0)
+ stge_init_locked(sc);
+ }
+ }
+
+ }
+
+ if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+ stge_start_locked(ifp);
+
+ STGE_UNLOCK(sc);
+}
+#endif /* DEVICE_POLLING */
+
+/*
+ * stge_tick:
+ *
+ * One second timer, used to tick the MII.
+ */
+static void
+stge_tick(void *arg)
+{
+ struct stge_softc *sc;
+ struct mii_data *mii;
+
+ sc = (struct stge_softc *)arg;
+
+ STGE_LOCK_ASSERT(sc);
+
+ mii = device_get_softc(sc->sc_miibus);
+ mii_tick(mii);
+
+ /* Update statistics counters. */
+ stge_stats_update(sc);
+
+ /*
+ * Relcaim any pending Tx descriptors to release mbufs in a
+ * timely manner as we don't generate Tx completion interrupts
+ * for every frame. This limits the delay to a maximum of one
+ * second.
+ */
+ if (sc->sc_cdata.stge_tx_cnt != 0)
+ stge_txeof(sc);
+
+ callout_reset(&sc->sc_tick_ch, hz, stge_tick, sc);
+}
+
+/*
+ * stge_stats_update:
+ *
+ * Read the TC9021 statistics counters.
+ */
+static void
+stge_stats_update(struct stge_softc *sc)
+{
+ struct ifnet *ifp;
+
+ STGE_LOCK_ASSERT(sc);
+
+ ifp = sc->sc_ifp;
+
+ CSR_READ_4(sc,STGE_OctetRcvOk);
+
+ ifp->if_ipackets += CSR_READ_4(sc, STGE_FramesRcvdOk);
+
+ ifp->if_ierrors += CSR_READ_2(sc, STGE_FramesLostRxErrors);
+
+ CSR_READ_4(sc, STGE_OctetXmtdOk);
+
+ ifp->if_opackets += CSR_READ_4(sc, STGE_FramesXmtdOk);
+
+ ifp->if_collisions +=
+ CSR_READ_4(sc, STGE_LateCollisions) +
+ CSR_READ_4(sc, STGE_MultiColFrames) +
+ CSR_READ_4(sc, STGE_SingleColFrames);
+
+ ifp->if_oerrors +=
+ CSR_READ_2(sc, STGE_FramesAbortXSColls) +
+ CSR_READ_2(sc, STGE_FramesWEXDeferal);
+}
+
+/*
+ * stge_reset:
+ *
+ * Perform a soft reset on the TC9021.
+ */
+static void
+stge_reset(struct stge_softc *sc, uint32_t how)
+{
+ uint32_t ac;
+ uint8_t v;
+ int i, dv;
+
+ STGE_LOCK_ASSERT(sc);
+
+ dv = 5000;
+ ac = CSR_READ_4(sc, STGE_AsicCtrl);
+ switch (how) {
+ case STGE_RESET_TX:
+ ac |= AC_TxReset | AC_FIFO;
+ dv = 100;
+ break;
+ case STGE_RESET_RX:
+ ac |= AC_RxReset | AC_FIFO;
+ dv = 100;
+ break;
+ case STGE_RESET_FULL:
+ default:
+ /*
+ * Only assert RstOut if we're fiber. We need GMII clocks
+ * to be present in order for the reset to complete on fiber
+ * cards.
+ */
+ ac |= AC_GlobalReset | AC_RxReset | AC_TxReset |
+ AC_DMA | AC_FIFO | AC_Network | AC_Host | AC_AutoInit |
+ (sc->sc_usefiber ? AC_RstOut : 0);
+ break;
+ }
+
+ CSR_WRITE_4(sc, STGE_AsicCtrl, ac);
+
+ /* Account for reset problem at 10Mbps. */
+ DELAY(dv);
+
+ for (i = 0; i < STGE_TIMEOUT; i++) {
+ if ((CSR_READ_4(sc, STGE_AsicCtrl) & AC_ResetBusy) == 0)
+ break;
+ DELAY(dv);
+ }
+
+ if (i == STGE_TIMEOUT)
+ device_printf(sc->sc_dev, "reset failed to complete\n");
+
+ /* Set LED, from Linux IPG driver. */
+ ac = CSR_READ_4(sc, STGE_AsicCtrl);
+ ac &= ~(AC_LEDMode | AC_LEDSpeed | AC_LEDModeBit1);
+ if ((sc->sc_led & 0x01) != 0)
+ ac |= AC_LEDMode;
+ if ((sc->sc_led & 0x03) != 0)
+ ac |= AC_LEDModeBit1;
+ if ((sc->sc_led & 0x08) != 0)
+ ac |= AC_LEDSpeed;
+ CSR_WRITE_4(sc, STGE_AsicCtrl, ac);
+
+ /* Set PHY, from Linux IPG driver */
+ v = CSR_READ_1(sc, STGE_PhySet);
+ v &= ~(PS_MemLenb9b | PS_MemLen | PS_NonCompdet);
+ v |= ((sc->sc_led & 0x70) >> 4);
+ CSR_WRITE_1(sc, STGE_PhySet, v);
+}
+
+/*
+ * stge_init: [ ifnet interface function ]
+ *
+ * Initialize the interface.
+ */
+static void
+stge_init(void *xsc)
+{
+ struct stge_softc *sc;
+
+ sc = (struct stge_softc *)xsc;
+ STGE_LOCK(sc);
+ stge_init_locked(sc);
+ STGE_UNLOCK(sc);
+}
+
+static void
+stge_init_locked(struct stge_softc *sc)
+{
+ struct ifnet *ifp;
+ struct mii_data *mii;
+ uint16_t eaddr[3];
+ uint32_t v;
+ int error;
+
+ STGE_LOCK_ASSERT(sc);
+
+ ifp = sc->sc_ifp;
+ mii = device_get_softc(sc->sc_miibus);
+
+ /*
+ * Cancel any pending I/O.
+ */
+ stge_stop(sc);
+
+ /* Init descriptors. */
+ error = stge_init_rx_ring(sc);
+ if (error != 0) {
+ device_printf(sc->sc_dev,
+ "initialization failed: no memory for rx buffers\n");
+ stge_stop(sc);
+ goto out;
+ }
+ stge_init_tx_ring(sc);
+
+ /* Set the station address. */
+ bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
+ CSR_WRITE_2(sc, STGE_StationAddress0, htole16(eaddr[0]));
+ CSR_WRITE_2(sc, STGE_StationAddress1, htole16(eaddr[1]));
+ CSR_WRITE_2(sc, STGE_StationAddress2, htole16(eaddr[2]));
+
+ /*
+ * Set the statistics masks. Disable all the RMON stats,
+ * and disable selected stats in the non-RMON stats registers.
+ */
+ CSR_WRITE_4(sc, STGE_RMONStatisticsMask, 0xffffffff);
+ CSR_WRITE_4(sc, STGE_StatisticsMask,
+ (1U << 1) | (1U << 2) | (1U << 3) | (1U << 4) | (1U << 5) |
+ (1U << 6) | (1U << 7) | (1U << 8) | (1U << 9) | (1U << 10) |
+ (1U << 13) | (1U << 14) | (1U << 15) | (1U << 19) | (1U << 20) |
+ (1U << 21));
+
+ /* Set up the receive filter. */
+ stge_set_filter(sc);
+ /* Program multicast filter. */
+ stge_set_multi(sc);
+
+ /*
+ * Give the transmit and receive ring to the chip.
+ */
+ CSR_WRITE_4(sc, STGE_TFDListPtrHi,
+ STGE_ADDR_HI(STGE_TX_RING_ADDR(sc, 0)));
+ CSR_WRITE_4(sc, STGE_TFDListPtrLo,
+ STGE_ADDR_LO(STGE_TX_RING_ADDR(sc, 0)));
+
+ CSR_WRITE_4(sc, STGE_RFDListPtrHi,
+ STGE_ADDR_HI(STGE_RX_RING_ADDR(sc, 0)));
+ CSR_WRITE_4(sc, STGE_RFDListPtrLo,
+ STGE_ADDR_LO(STGE_RX_RING_ADDR(sc, 0)));
+
+ /*
+ * Initialize the Tx auto-poll period. It's OK to make this number
+ * large (255 is the max, but we use 127) -- we explicitly kick the
+ * transmit engine when there's actually a packet.
+ */
+ CSR_WRITE_1(sc, STGE_TxDMAPollPeriod, 127);
+
+ /* ..and the Rx auto-poll period. */
+ CSR_WRITE_1(sc, STGE_RxDMAPollPeriod, 1);
+
+ /* Initialize the Tx start threshold. */
+ CSR_WRITE_2(sc, STGE_TxStartThresh, sc->sc_txthresh);
+
+ /* Rx DMA thresholds, from Linux */
+ CSR_WRITE_1(sc, STGE_RxDMABurstThresh, 0x30);
+ CSR_WRITE_1(sc, STGE_RxDMAUrgentThresh, 0x30);
+
+ /* Rx early threhold, from Linux */
+ CSR_WRITE_2(sc, STGE_RxEarlyThresh, 0x7ff);
+
+ /* Tx DMA thresholds, from Linux */
+ CSR_WRITE_1(sc, STGE_TxDMABurstThresh, 0x30);
+ CSR_WRITE_1(sc, STGE_TxDMAUrgentThresh, 0x04);
+
+ /*
+ * Initialize the Rx DMA interrupt control register. We
+ * request an interrupt after every incoming packet, but
+ * defer it for sc_rxint_dmawait us. When the number of
+ * interrupts pending reaches STGE_RXINT_NFRAME, we stop
+ * deferring the interrupt, and signal it immediately.
+ */
+ CSR_WRITE_4(sc, STGE_RxDMAIntCtrl,
+ RDIC_RxFrameCount(sc->sc_rxint_nframe) |
+ RDIC_RxDMAWaitTime(STGE_RXINT_USECS2TICK(sc->sc_rxint_dmawait)));
+
+ /*
+ * Initialize the interrupt mask.
+ */
+ sc->sc_IntEnable = IS_HostError | IS_TxComplete |
+ IS_TxDMAComplete | IS_RxDMAComplete | IS_RFDListEnd;
+#ifdef DEVICE_POLLING
+ /* Disable interrupts if we are polling. */
+ if ((ifp->if_capenable & IFCAP_POLLING) != 0)
+ CSR_WRITE_2(sc, STGE_IntEnable, 0);
+ else
+#endif
+ CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable);
+
+ /*
+ * Configure the DMA engine.
+ * XXX Should auto-tune TxBurstLimit.
+ */
+ CSR_WRITE_4(sc, STGE_DMACtrl, sc->sc_DMACtrl | DMAC_TxBurstLimit(3));
+
+ /*
+ * Send a PAUSE frame when we reach 29,696 bytes in the Rx
+ * FIFO, and send an un-PAUSE frame when we reach 3056 bytes
+ * in the Rx FIFO.
+ */
+ CSR_WRITE_2(sc, STGE_FlowOnTresh, 29696 / 16);
+ CSR_WRITE_2(sc, STGE_FlowOffThresh, 3056 / 16);
+
+ /*
+ * Set the maximum frame size.
+ */
+ sc->sc_if_framesize = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ CSR_WRITE_2(sc, STGE_MaxFrameSize, sc->sc_if_framesize);
+
+ /*
+ * Initialize MacCtrl -- do it before setting the media,
+ * as setting the media will actually program the register.
+ *
+ * Note: We have to poke the IFS value before poking
+ * anything else.
+ */
+ /* Tx/Rx MAC should be disabled before programming IFS.*/
+ CSR_WRITE_4(sc, STGE_MACCtrl, MC_IFSSelect(MC_IFS96bit));
+
+ stge_vlan_setup(sc);
+
+ if (sc->sc_rev >= 6) { /* >= B.2 */
+ /* Multi-frag frame bug work-around. */
+ CSR_WRITE_2(sc, STGE_DebugCtrl,
+ CSR_READ_2(sc, STGE_DebugCtrl) | 0x0200);
+
+ /* Tx Poll Now bug work-around. */
+ CSR_WRITE_2(sc, STGE_DebugCtrl,
+ CSR_READ_2(sc, STGE_DebugCtrl) | 0x0010);
+ /* Tx Poll Now bug work-around. */
+ CSR_WRITE_2(sc, STGE_DebugCtrl,
+ CSR_READ_2(sc, STGE_DebugCtrl) | 0x0020);
+ }
+
+ v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
+ v |= MC_StatisticsEnable | MC_TxEnable | MC_RxEnable;
+ CSR_WRITE_4(sc, STGE_MACCtrl, v);
+ /*
+ * It seems that transmitting frames without checking the state of
+ * Rx/Tx MAC wedge the hardware.
+ */
+ stge_start_tx(sc);
+ stge_start_rx(sc);
+
+ /*
+ * Set the current media.
+ */
+ mii_mediachg(mii);
+
+ /*
+ * Start the one second MII clock.
+ */
+ callout_reset(&sc->sc_tick_ch, hz, stge_tick, sc);
+
+ /*
+ * ...all done!
+ */
+ ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+
+ out:
+ if (error != 0)
+ device_printf(sc->sc_dev, "interface not running\n");
+}
+
+static void
+stge_vlan_setup(struct stge_softc *sc)
+{
+ struct ifnet *ifp;
+ uint32_t v;
+
+ ifp = sc->sc_ifp;
+ /*
+ * The NIC always copy a VLAN tag regardless of STGE_MACCtrl
+ * MC_AutoVLANuntagging bit.
+ * MC_AutoVLANtagging bit selects which VLAN source to use
+ * between STGE_VLANTag and TFC. However TFC TFD_VLANTagInsert
+ * bit has priority over MC_AutoVLANtagging bit. So we always
+ * use TFC instead of STGE_VLANTag register.
+ */
+ v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
+ if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
+ v |= MC_AutoVLANuntagging;
+ else
+ v &= ~MC_AutoVLANuntagging;
+ CSR_WRITE_4(sc, STGE_MACCtrl, v);
+}
+
+/*
+ * Stop transmission on the interface.
+ */
+static void
+stge_stop(struct stge_softc *sc)
+{
+ struct ifnet *ifp;
+ struct stge_txdesc *txd;
+ struct stge_rxdesc *rxd;
+ uint32_t v;
+ int i;
+
+ STGE_LOCK_ASSERT(sc);
+ /*
+ * Stop the one second clock.
+ */
+ callout_stop(&sc->sc_tick_ch);
+
+ /*
+ * Reset the chip to a known state.
+ */
+ stge_reset(sc, STGE_RESET_FULL);
+
+ /*
+ * Disable interrupts.
+ */
+ CSR_WRITE_2(sc, STGE_IntEnable, 0);
+
+ /*
+ * Stop receiver, transmitter, and stats update.
+ */
+ stge_stop_rx(sc);
+ stge_stop_tx(sc);
+ v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
+ v |= MC_StatisticsDisable;
+ CSR_WRITE_4(sc, STGE_MACCtrl, v);
+
+ /*
+ * Stop the transmit and receive DMA.
+ */
+ stge_dma_wait(sc);
+ CSR_WRITE_4(sc, STGE_TFDListPtrHi, 0);
+ CSR_WRITE_4(sc, STGE_TFDListPtrLo, 0);
+ CSR_WRITE_4(sc, STGE_RFDListPtrHi, 0);
+ CSR_WRITE_4(sc, STGE_RFDListPtrLo, 0);
+
+ /*
+ * Free RX and TX mbufs still in the queues.
+ */
+ for (i = 0; i < STGE_RX_RING_CNT; i++) {
+ rxd = &sc->sc_cdata.stge_rxdesc[i];
+ if (rxd->rx_m != NULL) {
+ bus_dmamap_sync(sc->sc_cdata.stge_rx_tag,
+ rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(sc->sc_cdata.stge_rx_tag,
+ rxd->rx_dmamap);
+ m_freem(rxd->rx_m);
+ rxd->rx_m = NULL;
+ }
+ }
+ for (i = 0; i < STGE_TX_RING_CNT; i++) {
+ txd = &sc->sc_cdata.stge_txdesc[i];
+ if (txd->tx_m != NULL) {
+ bus_dmamap_sync(sc->sc_cdata.stge_tx_tag,
+ txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(sc->sc_cdata.stge_tx_tag,
+ txd->tx_dmamap);
+ m_freem(txd->tx_m);
+ txd->tx_m = NULL;
+ }
+ }
+
+ /*
+ * Mark the interface down and cancel the watchdog timer.
+ */
+ ifp = sc->sc_ifp;
+ ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+ ifp->if_timer = 0;
+}
+
+static void
+stge_start_tx(struct stge_softc *sc)
+{
+ uint32_t v;
+ int i;
+
+ v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
+ if ((v & MC_TxEnabled) != 0)
+ return;
+ v |= MC_TxEnable;
+ CSR_WRITE_4(sc, STGE_MACCtrl, v);
+ CSR_WRITE_1(sc, STGE_TxDMAPollPeriod, 127);
+ for (i = STGE_TIMEOUT; i > 0; i--) {
+ DELAY(10);
+ v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
+ if ((v & MC_TxEnabled) != 0)
+ break;
+ }
+ if (i == 0)
+ device_printf(sc->sc_dev, "Starting Tx MAC timed out\n");
+}
+
+static void
+stge_start_rx(struct stge_softc *sc)
+{
+ uint32_t v;
+ int i;
+
+ v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
+ if ((v & MC_RxEnabled) != 0)
+ return;
+ v |= MC_RxEnable;
+ CSR_WRITE_4(sc, STGE_MACCtrl, v);
+ CSR_WRITE_1(sc, STGE_RxDMAPollPeriod, 1);
+ for (i = STGE_TIMEOUT; i > 0; i--) {
+ DELAY(10);
+ v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
+ if ((v & MC_RxEnabled) != 0)
+ break;
+ }
+ if (i == 0)
+ device_printf(sc->sc_dev, "Starting Rx MAC timed out\n");
+}
+
+static void
+stge_stop_tx(struct stge_softc *sc)
+{
+ uint32_t v;
+ int i;
+
+ v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
+ if ((v & MC_TxEnabled) == 0)
+ return;
+ v |= MC_TxDisable;
+ CSR_WRITE_4(sc, STGE_MACCtrl, v);
+ for (i = STGE_TIMEOUT; i > 0; i--) {
+ DELAY(10);
+ v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
+ if ((v & MC_TxEnabled) == 0)
+ break;
+ }
+ if (i == 0)
+ device_printf(sc->sc_dev, "Stopping Tx MAC timed out\n");
+}
+
+static void
+stge_stop_rx(struct stge_softc *sc)
+{
+ uint32_t v;
+ int i;
+
+ v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
+ if ((v & MC_RxEnabled) == 0)
+ return;
+ v |= MC_RxDisable;
+ CSR_WRITE_4(sc, STGE_MACCtrl, v);
+ for (i = STGE_TIMEOUT; i > 0; i--) {
+ DELAY(10);
+ v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
+ if ((v & MC_RxEnabled) == 0)
+ break;
+ }
+ if (i == 0)
+ device_printf(sc->sc_dev, "Stopping Rx MAC timed out\n");
+}
+
+static void
+stge_init_tx_ring(struct stge_softc *sc)
+{
+ struct stge_ring_data *rd;
+ struct stge_txdesc *txd;
+ bus_addr_t addr;
+ int i;
+
+ STAILQ_INIT(&sc->sc_cdata.stge_txfreeq);
+ STAILQ_INIT(&sc->sc_cdata.stge_txbusyq);
+
+ sc->sc_cdata.stge_tx_prod = 0;
+ sc->sc_cdata.stge_tx_cons = 0;
+ sc->sc_cdata.stge_tx_cnt = 0;
+
+ rd = &sc->sc_rdata;
+ bzero(rd->stge_tx_ring, STGE_TX_RING_SZ);
+ for (i = 0; i < STGE_TX_RING_CNT; i++) {
+ if (i == (STGE_TX_RING_CNT - 1))
+ addr = STGE_TX_RING_ADDR(sc, 0);
+ else
+ addr = STGE_TX_RING_ADDR(sc, i + 1);
+ rd->stge_tx_ring[i].tfd_next = htole64(addr);
+ rd->stge_tx_ring[i].tfd_control = htole64(TFD_TFDDone);
+ txd = &sc->sc_cdata.stge_txdesc[i];
+ STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txfreeq, txd, tx_q);
+ }
+
+ bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
+ sc->sc_cdata.stge_tx_ring_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+}
+
+static int
+stge_init_rx_ring(struct stge_softc *sc)
+{
+ struct stge_ring_data *rd;
+ bus_addr_t addr;
+ int i;
+
+ sc->sc_cdata.stge_rx_cons = 0;
+ STGE_RXCHAIN_RESET(sc);
+
+ rd = &sc->sc_rdata;
+ bzero(rd->stge_rx_ring, STGE_RX_RING_SZ);
+ for (i = 0; i < STGE_RX_RING_CNT; i++) {
+ if (stge_newbuf(sc, i) != 0)
+ return (ENOBUFS);
+ if (i == (STGE_RX_RING_CNT - 1))
+ addr = STGE_RX_RING_ADDR(sc, 0);
+ else
+ addr = STGE_RX_RING_ADDR(sc, i + 1);
+ rd->stge_rx_ring[i].rfd_next = htole64(addr);
+ rd->stge_rx_ring[i].rfd_status = 0;
+ }
+
+ bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag,
+ sc->sc_cdata.stge_rx_ring_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+ return (0);
+}
+
+/*
+ * stge_newbuf:
+ *
+ * Add a receive buffer to the indicated descriptor.
+ */
+static int
+stge_newbuf(struct stge_softc *sc, int idx)
+{
+ struct stge_rxdesc *rxd;
+ struct stge_rfd *rfd;
+ struct mbuf *m;
+ bus_dma_segment_t segs[1];
+ bus_dmamap_t map;
+ int nsegs;
+
+ m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
+ if (m == NULL)
+ return (ENOBUFS);
+ m->m_len = m->m_pkthdr.len = MCLBYTES;
+ /*
+ * The hardware requires 4bytes aligned DMA address when JUMBO
+ * frame is used.
+ */
+ if (sc->sc_if_framesize <= (MCLBYTES - ETHER_ALIGN))
+ m_adj(m, ETHER_ALIGN);
+
+ if (bus_dmamap_load_mbuf_sg(sc->sc_cdata.stge_rx_tag,
+ sc->sc_cdata.stge_rx_sparemap, m, segs, &nsegs, 0) != 0) {
+ m_freem(m);
+ return (ENOBUFS);
+ }
+ KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
+
+ rxd = &sc->sc_cdata.stge_rxdesc[idx];
+ if (rxd->rx_m != NULL) {
+ bus_dmamap_sync(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap,
+ BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap);
+ }
+ map = rxd->rx_dmamap;
+ rxd->rx_dmamap = sc->sc_cdata.stge_rx_sparemap;
+ sc->sc_cdata.stge_rx_sparemap = map;
+ bus_dmamap_sync(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap,
+ BUS_DMASYNC_PREREAD);
+ rxd->rx_m = m;
+
+ rfd = &sc->sc_rdata.stge_rx_ring[idx];
+ rfd->rfd_frag.frag_word0 =
+ htole64(FRAG_ADDR(segs[0].ds_addr) | FRAG_LEN(segs[0].ds_len));
+ rfd->rfd_status = 0;
+
+ return (0);
+}
+
+/*
+ * stge_set_filter:
+ *
+ * Set up the receive filter.
+ */
+static void
+stge_set_filter(struct stge_softc *sc)
+{
+ struct ifnet *ifp;
+ uint16_t mode;
+
+ STGE_LOCK_ASSERT(sc);
+
+ ifp = sc->sc_ifp;
+
+ mode = CSR_READ_2(sc, STGE_ReceiveMode);
+ mode |= RM_ReceiveUnicast;
+ if ((ifp->if_flags & IFF_BROADCAST) != 0)
+ mode |= RM_ReceiveBroadcast;
+ else
+ mode &= ~RM_ReceiveBroadcast;
+ if ((ifp->if_flags & IFF_PROMISC) != 0)
+ mode |= RM_ReceiveAllFrames;
+ else
+ mode &= ~RM_ReceiveAllFrames;
+
+ CSR_WRITE_2(sc, STGE_ReceiveMode, mode);
+}
+
+static void
+stge_set_multi(struct stge_softc *sc)
+{
+ struct ifnet *ifp;
+ struct ifmultiaddr *ifma;
+ uint32_t crc;
+ uint32_t mchash[2];
+ uint16_t mode;
+ int count;
+
+ STGE_LOCK_ASSERT(sc);
+
+ ifp = sc->sc_ifp;
+
+ mode = CSR_READ_2(sc, STGE_ReceiveMode);
+ if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
+ if ((ifp->if_flags & IFF_PROMISC) != 0)
+ mode |= RM_ReceiveAllFrames;
+ else if ((ifp->if_flags & IFF_ALLMULTI) != 0)
+ mode |= RM_ReceiveMulticast;
+ CSR_WRITE_2(sc, STGE_ReceiveMode, mode);
+ return;
+ }
+
+ /* clear existing filters. */
+ CSR_WRITE_4(sc, STGE_HashTable0, 0);
+ CSR_WRITE_4(sc, STGE_HashTable1, 0);
+
+ /*
+ * Set up the multicast address filter by passing all multicast
+ * addresses through a CRC generator, and then using the low-order
+ * 6 bits as an index into the 64 bit multicast hash table. The
+ * high order bits select the register, while the rest of the bits
+ * select the bit within the register.
+ */
+
+ bzero(mchash, sizeof(mchash));
+
+ count = 0;
+ IF_ADDR_LOCK(sc->sc_ifp);
+ TAILQ_FOREACH(ifma, &sc->sc_ifp->if_multiaddrs, ifma_link) {
+ if (ifma->ifma_addr->sa_family != AF_LINK)
+ continue;
+ crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
+ ifma->ifma_addr), ETHER_ADDR_LEN);
+
+ /* Just want the 6 least significant bits. */
+ crc &= 0x3f;
+
+ /* Set the corresponding bit in the hash table. */
+ mchash[crc >> 5] |= 1 << (crc & 0x1f);
+ count++;
+ }
+ IF_ADDR_UNLOCK(ifp);
+
+ mode &= ~(RM_ReceiveMulticast | RM_ReceiveAllFrames);
+ if (count > 0)
+ mode |= RM_ReceiveMulticastHash;
+ else
+ mode &= ~RM_ReceiveMulticastHash;
+
+ CSR_WRITE_4(sc, STGE_HashTable0, mchash[0]);
+ CSR_WRITE_4(sc, STGE_HashTable1, mchash[1]);
+ CSR_WRITE_2(sc, STGE_ReceiveMode, mode);
+}
+
+static int
+sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
+{
+ int error, value;
+
+ if (!arg1)
+ return (EINVAL);
+ value = *(int *)arg1;
+ error = sysctl_handle_int(oidp, &value, 0, req);
+ if (error || !req->newptr)
+ return (error);
+ if (value < low || value > high)
+ return (EINVAL);
+ *(int *)arg1 = value;
+
+ return (0);
+}
+
+static int
+sysctl_hw_stge_rxint_nframe(SYSCTL_HANDLER_ARGS)
+{
+ return (sysctl_int_range(oidp, arg1, arg2, req,
+ STGE_RXINT_NFRAME_MIN, STGE_RXINT_NFRAME_MAX));
+}
+
+static int
+sysctl_hw_stge_rxint_dmawait(SYSCTL_HANDLER_ARGS)
+{
+ return (sysctl_int_range(oidp, arg1, arg2, req,
+ STGE_RXINT_DMAWAIT_MIN, STGE_RXINT_DMAWAIT_MAX));
+}
diff --git a/sys/dev/stge/if_stgereg.h b/sys/dev/stge/if_stgereg.h
new file mode 100644
index 0000000..1354439
--- /dev/null
+++ b/sys/dev/stge/if_stgereg.h
@@ -0,0 +1,697 @@
+/* $NetBSD: if_stgereg.h,v 1.3 2003/02/10 21:10:07 christos Exp $ */
+
+/*-
+ * Copyright (c) 2001 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $FreeBSD$ */
+
+/*
+ * Sundance Technology PCI vendor ID
+ */
+#define VENDOR_SUNDANCETI 0x13f0
+
+/*
+ * Tamarack Microelectronics PCI vendor ID
+ */
+#define VENDOR_TAMARACK 0x143d
+
+/*
+ * D-Link Systems PCI vendor ID
+ */
+#define VENDOR_DLINK 0x1186
+
+/*
+ * Antares Microsystems PCI vendor ID
+ */
+#define VENDOR_ANTARES 0x1754
+
+/*
+ * Sundance Technology device ID
+ */
+#define DEVICEID_SUNDANCETI_ST1023 0x1023
+#define DEVICEID_SUNDANCETI_ST2021 0x2021
+#define DEVICEID_TAMARACK_TC9021 0x1021
+#define DEVICEID_TAMARACK_TC9021_ALT 0x9021
+
+/*
+ * D-Link Systems device ID
+ */
+#define DEVICEID_DLINK_DL4000 0x4000
+
+/*
+ * Antares Microsystems device ID
+ */
+#define DEVICEID_ANTARES_TC9021 0x1021
+
+/*
+ * Register description for the Sundance Tech. TC9021 10/100/1000
+ * Ethernet controller.
+ *
+ * Note that while DMA addresses are all in 64-bit fields, only
+ * the lower 40 bits of a DMA address are valid.
+ */
+#if (BUS_SPACE_MAXADDR < 0xFFFFFFFFFF)
+#define STGE_DMA_MAXADDR BUS_SPACE_MAXADDR
+#else
+#define STGE_DMA_MAXADDR 0xFFFFFFFFFF
+#endif
+
+/*
+ * Register access macros
+ */
+#define CSR_WRITE_4(_sc, reg, val) \
+ bus_write_4((_sc)->sc_res[0], (reg), (val))
+#define CSR_WRITE_2(_sc, reg, val) \
+ bus_write_2((_sc)->sc_res[0], (reg), (val))
+#define CSR_WRITE_1(_sc, reg, val) \
+ bus_write_1((_sc)->sc_res[0], (reg), (val))
+
+#define CSR_READ_4(_sc, reg) \
+ bus_read_4((_sc)->sc_res[0], (reg))
+#define CSR_READ_2(_sc, reg) \
+ bus_read_2((_sc)->sc_res[0], (reg))
+#define CSR_READ_1(_sc, reg) \
+ bus_read_1((_sc)->sc_res[0], (reg))
+
+/*
+ * TC9021 buffer fragment descriptor.
+ */
+struct stge_frag {
+ uint64_t frag_word0; /* address, length */
+};
+
+#define FRAG_ADDR(x) (((uint64_t)(x)) << 0)
+#define FRAG_ADDR_MASK FRAG_ADDR(0xfffffffffULL)
+#define FRAG_LEN(x) (((uint64_t)(x)) << 48)
+#define FRAG_LEN_MASK FRAG_LEN(0xffffULL)
+
+/*
+ * TC9021 Transmit Frame Descriptor. Note the number of fragments
+ * here is arbitrary, but we can't have any more than 15.
+ */
+#define STGE_NTXFRAGS 15
+struct stge_tfd {
+ uint64_t tfd_next; /* next TFD in list */
+ uint64_t tfd_control; /* control bits */
+ /* the buffer fragments */
+ struct stge_frag tfd_frags[STGE_NTXFRAGS];
+};
+
+#define TFD_FrameId(x) ((x) << 0)
+#define TFD_FrameId_MAX 0xffff
+#define TFD_WordAlign(x) ((x) << 16)
+#define TFD_WordAlign_dword 0 /* align to dword in TxFIFO */
+#define TFD_WordAlign_word 2 /* align to word in TxFIFO */
+#define TFD_WordAlign_disable 1 /* disable alignment */
+#define TFD_TCPChecksumEnable (1ULL << 18)
+#define TFD_UDPChecksumEnable (1ULL << 19)
+#define TFD_IPChecksumEnable (1ULL << 20)
+#define TFD_FcsAppendDisable (1ULL << 21)
+#define TFD_TxIndicate (1ULL << 22)
+#define TFD_TxDMAIndicate (1ULL << 23)
+#define TFD_FragCount(x) ((x) << 24)
+#define TFD_VLANTagInsert (1ULL << 28)
+#define TFD_TFDDone (1ULL << 31)
+#define TFD_VID(x) (((uint64_t)(x)) << 32)
+#define TFD_CFI (1ULL << 44)
+#define TFD_UserPriority(x) (((uint64_t)(x)) << 45)
+
+/*
+ * TC9021 Receive Frame Descriptor. Each RFD has a single fragment
+ * in it, and the chip tells us the beginning and end of the frame.
+ */
+struct stge_rfd {
+ uint64_t rfd_next; /* next RFD in list */
+ uint64_t rfd_status; /* status bits */
+ struct stge_frag rfd_frag; /* the buffer */
+};
+
+/* Low word of rfd_status */
+#define RFD_RxStatus(x) ((x) & 0xffffffff)
+#define RFD_RxDMAFrameLen(x) ((x) & 0xffff)
+#define RFD_RxFIFOOverrun 0x00010000
+#define RFD_RxRuntFrame 0x00020000
+#define RFD_RxAlignmentError 0x00040000
+#define RFD_RxFCSError 0x00080000
+#define RFD_RxOversizedFrame 0x00100000
+#define RFD_RxLengthError 0x00200000
+#define RFD_VLANDetected 0x00400000
+#define RFD_TCPDetected 0x00800000
+#define RFD_TCPError 0x01000000
+#define RFD_UDPDetected 0x02000000
+#define RFD_UDPError 0x04000000
+#define RFD_IPDetected 0x08000000
+#define RFD_IPError 0x10000000
+#define RFD_FrameStart 0x20000000
+#define RFD_FrameEnd 0x40000000
+#define RFD_RFDDone 0x80000000
+/* High word of rfd_status */
+#define RFD_TCI(x) ((((uint64_t)(x)) >> 32) & 0xffff)
+
+/*
+ * EEPROM offsets.
+ */
+#define STGE_EEPROM_ConfigParam 0x00
+#define STGE_EEPROM_AsicCtrl 0x01
+#define STGE_EEPROM_SubSystemVendorId 0x02
+#define STGE_EEPROM_SubSystemId 0x03
+#define STGE_EEPROM_LEDMode 0x06
+#define STGE_EEPROM_StationAddress0 0x10
+#define STGE_EEPROM_StationAddress1 0x11
+#define STGE_EEPROM_StationAddress2 0x12
+
+/*
+ * The TC9021 register space.
+ */
+
+#define STGE_DMACtrl 0x00
+#define DMAC_RxDMAComplete (1U << 3)
+#define DMAC_RxDMAPollNow (1U << 4)
+#define DMAC_TxDMAComplete (1U << 11)
+#define DMAC_TxDMAPollNow (1U << 12)
+#define DMAC_TxDMAInProg (1U << 15)
+#define DMAC_RxEarlyDisable (1U << 16)
+#define DMAC_MWIDisable (1U << 18)
+#define DMAC_TxWriteBackDisable (1U << 19)
+#define DMAC_TxBurstLimit(x) ((x) << 20)
+#define DMAC_TargetAbort (1U << 30)
+#define DMAC_MasterAbort (1U << 31)
+
+#define STGE_RxDMAStatus 0x08
+
+#define STGE_TFDListPtrLo 0x10
+
+#define STGE_TFDListPtrHi 0x14
+
+#define STGE_TxDMABurstThresh 0x18 /* 8-bit */
+
+#define STGE_TxDMAUrgentThresh 0x19 /* 8-bit */
+
+#define STGE_TxDMAPollPeriod 0x1a /* 8-bit, 320ns increments */
+
+#define STGE_RFDListPtrLo 0x1c
+
+#define STGE_RFDListPtrHi 0x20
+
+#define STGE_RxDMABurstThresh 0x24 /* 8-bit */
+
+#define STGE_RxDMAUrgentThresh 0x25 /* 8-bit */
+
+#define STGE_RxDMAPollPeriod 0x26 /* 8-bit, 320ns increments */
+
+#define STGE_RxDMAIntCtrl 0x28
+#define RDIC_RxFrameCount(x) ((x) & 0xff)
+#define RDIC_PriorityThresh(x) ((x) << 10)
+#define RDIC_RxDMAWaitTime(x) ((x) << 16)
+/*
+ * Number of receive frames transferred via DMA before a Rx interrupt is issued.
+ */
+#define STGE_RXINT_NFRAME_DEFAULT 8
+#define STGE_RXINT_NFRAME_MIN 1
+#define STGE_RXINT_NFRAME_MAX 255
+/*
+ * Maximum amount of time (in 64ns increments) to wait before issuing a Rx
+ * interrupt if number of frames recevied is less than STGE_RXINT_NFRAME
+ * (STGE_RXINT_NFRAME_MIN <= STGE_RXINT_NFRAME <= STGE_RXINT_NFRAME_MAX)
+ */
+#define STGE_RXINT_DMAWAIT_DEFAULT 30 /* 30us */
+#define STGE_RXINT_DMAWAIT_MIN 0
+#define STGE_RXINT_DMAWAIT_MAX 4194
+#define STGE_RXINT_USECS2TICK(x) (((x) * 1000)/64)
+
+#define STGE_DebugCtrl 0x2c /* 16-bit */
+#define DC_GPIO0Ctrl (1U << 0)
+#define DC_GPIO1Ctrl (1U << 1)
+#define DC_GPIO0 (1U << 2)
+#define DC_GPIO1 (1U << 3)
+
+#define STGE_AsicCtrl 0x30
+#define AC_ExpRomDisable (1U << 0)
+#define AC_ExpRomSize (1U << 1)
+#define AC_PhySpeed10 (1U << 4)
+#define AC_PhySpeed100 (1U << 5)
+#define AC_PhySpeed1000 (1U << 6)
+#define AC_PhyMedia (1U << 7)
+#define AC_ForcedConfig(x) ((x) << 8)
+#define AC_ForcedConfig_MASK AC_ForcedConfig(7)
+#define AC_D3ResetDisable (1U << 11)
+#define AC_SpeedupMode (1U << 13)
+#define AC_LEDMode (1U << 14)
+#define AC_RstOutPolarity (1U << 15)
+#define AC_GlobalReset (1U << 16)
+#define AC_RxReset (1U << 17)
+#define AC_TxReset (1U << 18)
+#define AC_DMA (1U << 19)
+#define AC_FIFO (1U << 20)
+#define AC_Network (1U << 21)
+#define AC_Host (1U << 22)
+#define AC_AutoInit (1U << 23)
+#define AC_RstOut (1U << 24)
+#define AC_InterruptRequest (1U << 25)
+#define AC_ResetBusy (1U << 26)
+#define AC_LEDSpeed (1U << 27)
+#define AC_LEDModeBit1 (1U << 29)
+
+#define STGE_FIFOCtrl 0x38 /* 16-bit */
+#define FC_RAMTestMode (1U << 0)
+#define FC_Transmitting (1U << 14)
+#define FC_Receiving (1U << 15)
+
+#define STGE_RxEarlyThresh 0x3a /* 16-bit */
+
+#define STGE_FlowOffThresh 0x3c /* 16-bit */
+
+#define STGE_FlowOnTresh 0x3e /* 16-bit */
+
+#define STGE_TxStartThresh 0x44 /* 16-bit */
+
+#define STGE_EepromData 0x48 /* 16-bit */
+
+#define STGE_EepromCtrl 0x4a /* 16-bit */
+#define EC_EepromAddress(x) ((x) & 0xff)
+#define EC_EepromOpcode(x) ((x) << 8)
+#define EC_OP_WE 0
+#define EC_OP_WR 1
+#define EC_OP_RR 2
+#define EC_OP_ER 3
+#define EC_EepromBusy (1U << 15)
+
+#define STGE_ExpRomAddr 0x4c
+
+#define STGE_ExpRomData 0x50 /* 8-bit */
+
+#define STGE_WakeEvent 0x51 /* 8-bit */
+
+#define STGE_Countdown 0x54
+#define CD_Count(x) ((x) & 0xffff)
+#define CD_CountdownSpeed (1U << 24)
+#define CD_CountdownMode (1U << 25)
+#define CD_CountdownIntEnabled (1U << 26)
+
+#define STGE_IntStatusAck 0x5a /* 16-bit */
+
+#define STGE_IntEnable 0x5c /* 16-bit */
+
+#define STGE_IntStatus 0x5e /* 16-bit */
+
+#define IS_InterruptStatus (1U << 0)
+#define IS_HostError (1U << 1)
+#define IS_TxComplete (1U << 2)
+#define IS_MACControlFrame (1U << 3)
+#define IS_RxComplete (1U << 4)
+#define IS_RxEarly (1U << 5)
+#define IS_InRequested (1U << 6)
+#define IS_UpdateStats (1U << 7)
+#define IS_LinkEvent (1U << 8)
+#define IS_TxDMAComplete (1U << 9)
+#define IS_RxDMAComplete (1U << 10)
+#define IS_RFDListEnd (1U << 11)
+#define IS_RxDMAPriority (1U << 12)
+
+#define STGE_TxStatus 0x60
+#define TS_TxError (1U << 0)
+#define TS_LateCollision (1U << 2)
+#define TS_MaxCollisions (1U << 3)
+#define TS_TxUnderrun (1U << 4)
+#define TS_TxIndicateReqd (1U << 6)
+#define TS_TxComplete (1U << 7)
+#define TS_TxFrameId_get(x) ((x) >> 16)
+
+#define STGE_MACCtrl 0x6c
+#define MC_IFSSelect(x) ((x) & 3)
+#define MC_IFS96bit 0
+#define MC_IFS1024bit 1
+#define MC_IFS1792bit 2
+#define MC_IFS4352bit 3
+
+#define MC_DuplexSelect (1U << 5)
+#define MC_RcvLargeFrames (1U << 6)
+#define MC_TxFlowControlEnable (1U << 7)
+#define MC_RxFlowControlEnable (1U << 8)
+#define MC_RcvFCS (1U << 9)
+#define MC_FIFOLoopback (1U << 10)
+#define MC_MACLoopback (1U << 11)
+#define MC_AutoVLANtagging (1U << 12)
+#define MC_AutoVLANuntagging (1U << 13)
+#define MC_CollisionDetect (1U << 16)
+#define MC_CarrierSense (1U << 17)
+#define MC_StatisticsEnable (1U << 21)
+#define MC_StatisticsDisable (1U << 22)
+#define MC_StatisticsEnabled (1U << 23)
+#define MC_TxEnable (1U << 24)
+#define MC_TxDisable (1U << 25)
+#define MC_TxEnabled (1U << 26)
+#define MC_RxEnable (1U << 27)
+#define MC_RxDisable (1U << 28)
+#define MC_RxEnabled (1U << 29)
+#define MC_Paused (1U << 30)
+#define MC_MASK 0x7fe33fa3
+
+#define STGE_VLANTag 0x70
+
+#define STGE_PhySet 0x75 /* 8-bit */
+#define PS_MemLenb9b (1U << 0)
+#define PS_MemLen (1U << 1)
+#define PS_NonCompdet (1U << 2)
+
+#define STGE_PhyCtrl 0x76 /* 8-bit */
+#define PC_MgmtClk (1U << 0)
+#define PC_MgmtData (1U << 1)
+#define PC_MgmtDir (1U << 2) /* MAC->PHY */
+#define PC_PhyDuplexPolarity (1U << 3)
+#define PC_PhyDuplexStatus (1U << 4)
+#define PC_PhyLnkPolarity (1U << 5)
+#define PC_LinkSpeed(x) (((x) >> 6) & 3)
+#define PC_LinkSpeed_Down 0
+#define PC_LinkSpeed_10 1
+#define PC_LinkSpeed_100 2
+#define PC_LinkSpeed_1000 3
+
+#define STGE_StationAddress0 0x78 /* 16-bit */
+
+#define STGE_StationAddress1 0x7a /* 16-bit */
+
+#define STGE_StationAddress2 0x7c /* 16-bit */
+
+#define STGE_VLANHashTable 0x7e /* 16-bit */
+
+#define STGE_VLANId 0x80
+
+#define STGE_MaxFrameSize 0x86
+
+#define STGE_ReceiveMode 0x88 /* 16-bit */
+#define RM_ReceiveUnicast (1U << 0)
+#define RM_ReceiveMulticast (1U << 1)
+#define RM_ReceiveBroadcast (1U << 2)
+#define RM_ReceiveAllFrames (1U << 3)
+#define RM_ReceiveMulticastHash (1U << 4)
+#define RM_ReceiveIPMulticast (1U << 5)
+#define RM_ReceiveVLANMatch (1U << 8)
+#define RM_ReceiveVLANHash (1U << 9)
+
+#define STGE_HashTable0 0x8c
+
+#define STGE_HashTable1 0x90
+
+#define STGE_RMONStatisticsMask 0x98 /* set to disable */
+
+#define STGE_StatisticsMask 0x9c /* set to disable */
+
+#define STGE_RxJumboFrames 0xbc /* 16-bit */
+
+#define STGE_TCPCheckSumErrors 0xc0 /* 16-bit */
+
+#define STGE_IPCheckSumErrors 0xc2 /* 16-bit */
+
+#define STGE_UDPCheckSumErrors 0xc4 /* 16-bit */
+
+#define STGE_TxJumboFrames 0xf4 /* 16-bit */
+
+/*
+ * TC9021 statistics. Available memory and I/O mapped.
+ */
+
+#define STGE_OctetRcvOk 0xa8
+
+#define STGE_McstOctetRcvdOk 0xac
+
+#define STGE_BcstOctetRcvdOk 0xb0
+
+#define STGE_FramesRcvdOk 0xb4
+
+#define STGE_McstFramesRcvdOk 0xb8
+
+#define STGE_BcstFramesRcvdOk 0xbe /* 16-bit */
+
+#define STGE_MacControlFramesRcvd 0xc6 /* 16-bit */
+
+#define STGE_FrameTooLongErrors 0xc8 /* 16-bit */
+
+#define STGE_InRangeLengthErrors 0xca /* 16-bit */
+
+#define STGE_FramesCheckSeqErrors 0xcc /* 16-bit */
+
+#define STGE_FramesLostRxErrors 0xce /* 16-bit */
+
+#define STGE_OctetXmtdOk 0xd0
+
+#define STGE_McstOctetXmtdOk 0xd4
+
+#define STGE_BcstOctetXmtdOk 0xd8
+
+#define STGE_FramesXmtdOk 0xdc
+
+#define STGE_McstFramesXmtdOk 0xe0
+
+#define STGE_FramesWDeferredXmt 0xe4
+
+#define STGE_LateCollisions 0xe8
+
+#define STGE_MultiColFrames 0xec
+
+#define STGE_SingleColFrames 0xf0
+
+#define STGE_BcstFramesXmtdOk 0xf6 /* 16-bit */
+
+#define STGE_CarrierSenseErrors 0xf8 /* 16-bit */
+
+#define STGE_MacControlFramesXmtd 0xfa /* 16-bit */
+
+#define STGE_FramesAbortXSColls 0xfc /* 16-bit */
+
+#define STGE_FramesWEXDeferal 0xfe /* 16-bit */
+
+/*
+ * RMON-compatible statistics. Only accessible if memory-mapped.
+ */
+
+#define STGE_EtherStatsCollisions 0x100
+
+#define STGE_EtherStatsOctetsTransmit 0x104
+
+#define STGE_EtherStatsPktsTransmit 0x108
+
+#define STGE_EtherStatsPkts64OctetsTransmit 0x10c
+
+#define STGE_EtherStatsPkts64to127OctetsTransmit 0x110
+
+#define STGE_EtherStatsPkts128to255OctetsTransmit 0x114
+
+#define STGE_EtherStatsPkts256to511OctetsTransmit 0x118
+
+#define STGE_EtherStatsPkts512to1023OctetsTransmit 0x11c
+
+#define STGE_EtherStatsPkts1024to1518OctetsTransmit 0x120
+
+#define STGE_EtherStatsCRCAlignErrors 0x124
+
+#define STGE_EtherStatsUndersizePkts 0x128
+
+#define STGE_EtherStatsFragments 0x12c
+
+#define STGE_EtherStatsJabbers 0x130
+
+#define STGE_EtherStatsOctets 0x134
+
+#define STGE_EtherStatsPkts 0x138
+
+#define STGE_EtherStatsPkts64Octets 0x13c
+
+#define STGE_EtherStatsPkts65to127Octets 0x140
+
+#define STGE_EtherStatsPkts128to255Octets 0x144
+
+#define STGE_EtherStatsPkts256to511Octets 0x148
+
+#define STGE_EtherStatsPkts512to1023Octets 0x14c
+
+#define STGE_EtherStatsPkts1024to1518Octets 0x150
+
+/*
+ * Transmit descriptor list size.
+ */
+#define STGE_TX_RING_CNT 256
+#define STGE_TX_LOWAT (STGE_TX_RING_CNT/32)
+#define STGE_TX_HIWAT (STGE_TX_RING_CNT - STGE_TX_LOWAT)
+
+/*
+ * Receive descriptor list size.
+ */
+#define STGE_RX_RING_CNT 256
+
+#define STGE_MAXTXSEGS STGE_NTXFRAGS
+
+#define STGE_JUMBO_FRAMELEN 9022
+#define STGE_JUMBO_MTU \
+ (STGE_JUMBO_FRAMELEN - ETHER_HDR_LEN - ETHER_CRC_LEN)
+
+struct stge_txdesc {
+ struct mbuf *tx_m; /* head of our mbuf chain */
+ bus_dmamap_t tx_dmamap; /* our DMA map */
+ STAILQ_ENTRY(stge_txdesc) tx_q;
+};
+
+STAILQ_HEAD(stge_txdq, stge_txdesc);
+
+struct stge_rxdesc {
+ struct mbuf *rx_m;
+ bus_dmamap_t rx_dmamap;
+};
+
+#define STGE_ADDR_LO(x) ((u_int64_t) (x) & 0xffffffff)
+#define STGE_ADDR_HI(x) ((u_int64_t) (x) >> 32)
+
+#define STGE_RING_ALIGN 8
+
+struct stge_chain_data{
+ bus_dma_tag_t stge_parent_tag;
+ bus_dma_tag_t stge_tx_tag;
+ struct stge_txdesc stge_txdesc[STGE_TX_RING_CNT];
+ struct stge_txdq stge_txfreeq;
+ struct stge_txdq stge_txbusyq;
+ bus_dma_tag_t stge_rx_tag;
+ struct stge_rxdesc stge_rxdesc[STGE_RX_RING_CNT];
+ bus_dma_tag_t stge_tx_ring_tag;
+ bus_dmamap_t stge_tx_ring_map;
+ bus_dma_tag_t stge_rx_ring_tag;
+ bus_dmamap_t stge_rx_ring_map;
+ bus_dmamap_t stge_rx_sparemap;
+
+ int stge_tx_prod;
+ int stge_tx_cons;
+ int stge_tx_cnt;
+ int stge_rx_cons;
+#ifdef DEVICE_POLLING
+ int stge_rxcycles;
+#endif
+ int stge_rxlen;
+ struct mbuf *stge_rxhead;
+ struct mbuf *stge_rxtail;
+};
+
+struct stge_ring_data {
+ struct stge_tfd *stge_tx_ring;
+ bus_addr_t stge_tx_ring_paddr;
+ struct stge_rfd *stge_rx_ring;
+ bus_addr_t stge_rx_ring_paddr;
+};
+
+#define STGE_TX_RING_ADDR(sc, i) \
+ ((sc)->sc_rdata.stge_tx_ring_paddr + sizeof(struct stge_tfd) * (i))
+#define STGE_RX_RING_ADDR(sc, i) \
+ ((sc)->sc_rdata.stge_rx_ring_paddr + sizeof(struct stge_rfd) * (i))
+
+#define STGE_TX_RING_SZ \
+ (sizeof(struct stge_tfd) * STGE_TX_RING_CNT)
+#define STGE_RX_RING_SZ \
+ (sizeof(struct stge_rfd) * STGE_RX_RING_CNT)
+
+/*
+ * Software state per device.
+ */
+struct stge_softc {
+ struct ifnet *sc_ifp; /* interface info */
+ device_t sc_dev;
+ device_t sc_miibus;
+ struct resource *sc_res[2];
+ struct resource_spec *sc_spec;
+ void *sc_ih; /* interrupt cookie */
+ int sc_rev; /* silicon revision */
+
+ struct callout sc_tick_ch; /* tick callout */
+
+ struct stge_chain_data sc_cdata;
+ struct stge_ring_data sc_rdata;
+ int sc_if_flags;
+ int sc_if_framesize;
+ int sc_txthresh; /* Tx threshold */
+ uint32_t sc_usefiber:1; /* if we're fiber */
+ uint32_t sc_stge1023:1; /* are we a 1023 */
+ uint32_t sc_DMACtrl; /* prototype DMACtrl reg. */
+ uint32_t sc_MACCtrl; /* prototype MacCtrl reg. */
+ uint16_t sc_IntEnable; /* prototype IntEnable reg. */
+ uint16_t sc_led; /* LED conf. from EEPROM */
+ uint8_t sc_PhyCtrl; /* prototype PhyCtrl reg. */
+ int sc_suspended;
+ int sc_detach;
+
+ int sc_rxint_nframe;
+ int sc_rxint_dmawait;
+ int sc_nerr;
+
+ struct task sc_link_task;
+ struct mtx sc_mii_mtx; /* MII mutex */
+ struct mtx sc_mtx;
+};
+
+#define STGE_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx)
+#define STGE_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx)
+#define STGE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_mtx, MA_OWNED)
+#define STGE_MII_LOCK(_sc) mtx_lock(&(_sc)->sc_mii_mtx)
+#define STGE_MII_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mii_mtx)
+
+#define STGE_MAXERR 5
+
+#define STGE_RXCHAIN_RESET(_sc) \
+do { \
+ (_sc)->sc_cdata.stge_rxhead = NULL; \
+ (_sc)->sc_cdata.stge_rxtail = NULL; \
+ (_sc)->sc_cdata.stge_rxlen = 0; \
+} while (/*CONSTCOND*/0)
+
+#define STGE_TIMEOUT 1000
+
+struct stge_mii_frame {
+ uint8_t mii_stdelim;
+ uint8_t mii_opcode;
+ uint8_t mii_phyaddr;
+ uint8_t mii_regaddr;
+ uint8_t mii_turnaround;
+ uint16_t mii_data;
+};
+
+/*
+ * MII constants
+ */
+#define STGE_MII_STARTDELIM 0x01
+#define STGE_MII_READOP 0x02
+#define STGE_MII_WRITEOP 0x01
+#define STGE_MII_TURNAROUND 0x02
+
+#define STGE_RESET_NONE 0x00
+#define STGE_RESET_TX 0x01
+#define STGE_RESET_RX 0x02
+#define STGE_RESET_FULL 0x04
OpenPOWER on IntegriCloud