summaryrefslogtreecommitdiffstats
path: root/sys/dev/bm
diff options
context:
space:
mode:
authormarcel <marcel@FreeBSD.org>2008-06-07 22:58:32 +0000
committermarcel <marcel@FreeBSD.org>2008-06-07 22:58:32 +0000
commit50176cfd31967b8c6e26f64bcd8555187b4a577b (patch)
tree598b565eb8eff8ec4321f409588d85735bc4b385 /sys/dev/bm
parent7cc97ffd512dc3393fb06812a6b553e964bcfcf7 (diff)
downloadFreeBSD-src-50176cfd31967b8c6e26f64bcd8555187b4a577b.zip
FreeBSD-src-50176cfd31967b8c6e26f64bcd8555187b4a577b.tar.gz
Add support for the Apple Big Mac (BMAC) Ethernet controller,
found on various Apple G3 models. Submitted by: Nathan Whitehorn
Diffstat (limited to 'sys/dev/bm')
-rw-r--r--sys/dev/bm/if_bm.c1452
-rw-r--r--sys/dev/bm/if_bmreg.h176
-rw-r--r--sys/dev/bm/if_bmvar.h127
3 files changed, 1755 insertions, 0 deletions
diff --git a/sys/dev/bm/if_bm.c b/sys/dev/bm/if_bm.c
new file mode 100644
index 0000000..332884d
--- /dev/null
+++ b/sys/dev/bm/if_bm.c
@@ -0,0 +1,1452 @@
+/*-
+ * Copyright 2008 Nathan Whitehorn. All rights reserved.
+ * Copyright 2003 by Peter Grehan. All rights reserved.
+ * Copyright (C) 1998, 1999, 2000 Tsubai Masanari. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * From:
+ * NetBSD: if_bm.c,v 1.9.2.1 2000/11/01 15:02:49 tv Exp
+ */
+
+/*
+ * BMAC/BMAC+ Macio cell 10/100 ethernet driver
+ * The low-cost, low-feature Apple variant of the Sun HME
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/sockio.h>
+#include <sys/endian.h>
+#include <sys/mbuf.h>
+#include <sys/module.h>
+#include <sys/malloc.h>
+#include <sys/kernel.h>
+#include <sys/socket.h>
+
+#include <net/bpf.h>
+#include <net/if.h>
+#include <net/if_arp.h>
+#include <net/ethernet.h>
+#include <net/if_dl.h>
+#include <net/if_media.h>
+#include <net/if_types.h>
+
+#include <machine/pio.h>
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+
+#include <dev/mii/mii.h>
+#include <dev/mii/miivar.h>
+
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/openfirm.h>
+#include <machine/dbdma.h>
+
+MODULE_DEPEND(bm, ether, 1, 1, 1);
+MODULE_DEPEND(bm, miibus, 1, 1, 1);
+
+/* "controller miibus0" required. See GENERIC if you get errors here. */
+#include "miibus_if.h"
+
+#include "if_bmreg.h"
+#include "if_bmvar.h"
+
+static int bm_probe (device_t);
+static int bm_attach (device_t);
+static int bm_detach (device_t);
+static void bm_shutdown (device_t);
+
+static void bm_start (struct ifnet *);
+static void bm_start_locked (struct ifnet *);
+static int bm_encap (struct bm_softc *sc, struct mbuf **m_head);
+static int bm_ioctl (struct ifnet *, u_long, caddr_t);
+static void bm_init (void *);
+static void bm_init_locked (struct bm_softc *sc);
+static void bm_chip_setup (struct bm_softc *sc);
+static void bm_stop (struct bm_softc *sc);
+static void bm_setladrf (struct bm_softc *sc);
+static void bm_dummypacket (struct bm_softc *sc);
+static void bm_txintr (void *xsc);
+static void bm_rxintr (void *xsc);
+
+static int bm_add_rxbuf (struct bm_softc *sc, int i);
+static int bm_add_rxbuf_dma (struct bm_softc *sc, int i);
+static void bm_enable_interrupts (struct bm_softc *sc);
+static void bm_disable_interrupts (struct bm_softc *sc);
+static void bm_tick (void *xsc);
+static int bm_watchdog (struct bm_softc *sc);
+
+static int bm_ifmedia_upd (struct ifnet *);
+static void bm_ifmedia_sts (struct ifnet *, struct ifmediareq *);
+
+static void bm_miicsr_dwrite (struct bm_softc *, u_int16_t);
+static void bm_mii_writebit (struct bm_softc *, int);
+static int bm_mii_readbit (struct bm_softc *);
+static void bm_mii_sync (struct bm_softc *);
+static void bm_mii_send (struct bm_softc *, u_int32_t, int);
+static int bm_mii_readreg (struct bm_softc *, struct bm_mii_frame *);
+static int bm_mii_writereg (struct bm_softc *, struct bm_mii_frame *);
+static int bm_miibus_readreg (device_t, int, int);
+static int bm_miibus_writereg (device_t, int, int, int);
+static void bm_miibus_statchg (device_t);
+
+static device_method_t bm_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, bm_probe),
+ DEVMETHOD(device_attach, bm_attach),
+ DEVMETHOD(device_detach, bm_detach),
+ DEVMETHOD(device_shutdown, bm_shutdown),
+
+ /* bus interface, for miibus */
+ DEVMETHOD(bus_print_child, bus_generic_print_child),
+ DEVMETHOD(bus_driver_added, bus_generic_driver_added),
+
+ /* MII interface */
+ DEVMETHOD(miibus_readreg, bm_miibus_readreg),
+ DEVMETHOD(miibus_writereg, bm_miibus_writereg),
+ DEVMETHOD(miibus_statchg, bm_miibus_statchg),
+ { 0, 0 }
+};
+
+static driver_t bm_macio_driver = {
+ "bm",
+ bm_methods,
+ sizeof(struct bm_softc)
+};
+
+static devclass_t bm_devclass;
+
+DRIVER_MODULE(bm, macio, bm_macio_driver, bm_devclass, 0, 0);
+DRIVER_MODULE(miibus, bm, miibus_driver, miibus_devclass, 0, 0);
+
+/*
+ * MII internal routines
+ */
+
+/*
+ * Write to the MII csr, introducing a delay to allow valid
+ * MII clock pulses to be formed
+ */
+static void
+bm_miicsr_dwrite(struct bm_softc *sc, u_int16_t val)
+{
+ CSR_WRITE_2(sc, BM_MII_CSR, val);
+ /*
+ * Assume this is a clock toggle and generate a 1us delay
+ * to cover both MII's 160ns high/low minimum and 400ns
+ * cycle miniumum
+ */
+ DELAY(1);
+}
+
+/*
+ * Write a bit to the MII bus.
+ */
+static void
+bm_mii_writebit(struct bm_softc *sc, int bit)
+{
+ u_int16_t regval;
+
+ regval = BM_MII_OENABLE;
+ if (bit)
+ regval |= BM_MII_DATAOUT;
+
+ bm_miicsr_dwrite(sc, regval);
+ bm_miicsr_dwrite(sc, regval | BM_MII_CLK);
+ bm_miicsr_dwrite(sc, regval);
+}
+
+/*
+ * Read a bit from the MII bus.
+ */
+static int
+bm_mii_readbit(struct bm_softc *sc)
+{
+ u_int16_t regval, bitin;
+
+ /* ~BM_MII_OENABLE */
+ regval = 0;
+
+ bm_miicsr_dwrite(sc, regval);
+ bm_miicsr_dwrite(sc, regval | BM_MII_CLK);
+ bm_miicsr_dwrite(sc, regval);
+ bitin = CSR_READ_2(sc, BM_MII_CSR) & BM_MII_DATAIN;
+
+ return (bitin == BM_MII_DATAIN);
+}
+
+/*
+ * Sync the PHYs by setting data bit and strobing the clock 32 times.
+ */
+static void
+bm_mii_sync(struct bm_softc *sc)
+{
+ int i;
+ u_int16_t regval;
+
+ regval = BM_MII_OENABLE | BM_MII_DATAOUT;
+
+ bm_miicsr_dwrite(sc, regval);
+ for (i = 0; i < 32; i++) {
+ bm_miicsr_dwrite(sc, regval | BM_MII_CLK);
+ bm_miicsr_dwrite(sc, regval);
+ }
+}
+
+/*
+ * Clock a series of bits through the MII.
+ */
+static void
+bm_mii_send(struct bm_softc *sc, u_int32_t bits, int cnt)
+{
+ int i;
+
+ for (i = (0x1 << (cnt - 1)); i; i >>= 1)
+ bm_mii_writebit(sc, bits & i);
+}
+
+/*
+ * Read a PHY register through the MII.
+ */
+static int
+bm_mii_readreg(struct bm_softc *sc, struct bm_mii_frame *frame)
+{
+ int i, ack, bit;
+
+ BM_LOCK(sc);
+
+ /*
+ * Set up frame for RX.
+ */
+ frame->mii_stdelim = BM_MII_STARTDELIM;
+ frame->mii_opcode = BM_MII_READOP;
+ frame->mii_turnaround = 0;
+ frame->mii_data = 0;
+
+ /*
+ * Sync the PHYs
+ */
+ bm_mii_sync(sc);
+
+ /*
+ * Send command/address info
+ */
+ bm_mii_send(sc, frame->mii_stdelim, 2);
+ bm_mii_send(sc, frame->mii_opcode, 2);
+ bm_mii_send(sc, frame->mii_phyaddr, 5);
+ bm_mii_send(sc, frame->mii_regaddr, 5);
+
+ /*
+ * Check for ack.
+ */
+ ack = bm_mii_readbit(sc);
+
+ /*
+ * Now try reading data bits. If the ack failed, we still
+ * need to clock through 16 cycles to keep the PHY(s) in sync.
+ */
+ for (i = 0x8000; i; i >>= 1) {
+ bit = bm_mii_readbit(sc);
+ if (!ack && bit)
+ frame->mii_data |= i;
+ }
+
+ /*
+ * Skip through idle bit-times
+ */
+ bm_mii_writebit(sc, 0);
+ bm_mii_writebit(sc, 0);
+
+ BM_UNLOCK(sc);
+
+ return ((ack) ? 1 : 0);
+}
+
+/*
+ * Write to a PHY register through the MII.
+ */
+static int
+bm_mii_writereg(struct bm_softc *sc, struct bm_mii_frame *frame)
+{
+ BM_LOCK(sc);
+
+ /*
+ * Set up frame for tx
+ */
+ frame->mii_stdelim = BM_MII_STARTDELIM;
+ frame->mii_opcode = BM_MII_WRITEOP;
+ frame->mii_turnaround = BM_MII_TURNAROUND;
+
+ /*
+ * Sync the phy and start the bitbang write sequence
+ */
+ bm_mii_sync(sc);
+
+ bm_mii_send(sc, frame->mii_stdelim, 2);
+ bm_mii_send(sc, frame->mii_opcode, 2);
+ bm_mii_send(sc, frame->mii_phyaddr, 5);
+ bm_mii_send(sc, frame->mii_regaddr, 5);
+ bm_mii_send(sc, frame->mii_turnaround, 2);
+ bm_mii_send(sc, frame->mii_data, 16);
+
+ /*
+ * Idle bit.
+ */
+ bm_mii_writebit(sc, 0);
+
+ BM_UNLOCK(sc);
+
+ return (0);
+}
+
+/*
+ * MII bus i/f
+ */
+static int
+bm_miibus_readreg(device_t dev, int phy, int reg)
+{
+ struct bm_softc *sc;
+ struct bm_mii_frame frame;
+
+ sc = device_get_softc(dev);
+ bzero(&frame, sizeof(frame));
+
+ frame.mii_phyaddr = phy;
+ frame.mii_regaddr = reg;
+
+ bm_mii_readreg(sc, &frame);
+
+ return (frame.mii_data);
+}
+
+static int
+bm_miibus_writereg(device_t dev, int phy, int reg, int data)
+{
+ struct bm_softc *sc;
+ struct bm_mii_frame frame;
+
+ sc = device_get_softc(dev);
+ bzero(&frame, sizeof(frame));
+
+ frame.mii_phyaddr = phy;
+ frame.mii_regaddr = reg;
+ frame.mii_data = data;
+
+ bm_mii_writereg(sc, &frame);
+
+ return (0);
+}
+
+static void
+bm_miibus_statchg(device_t dev)
+{
+ struct bm_softc *sc = device_get_softc(dev);
+ uint16_t reg;
+ int new_duplex;
+
+ reg = CSR_READ_2(sc, BM_TX_CONFIG);
+ new_duplex = IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX;
+
+ if (new_duplex != sc->sc_duplex) {
+ /* Turn off TX MAC while we fiddle its settings */
+ reg &= ~BM_ENABLE;
+
+ CSR_WRITE_2(sc, BM_TX_CONFIG, reg);
+ while (CSR_READ_2(sc, BM_TX_CONFIG) & BM_ENABLE)
+ DELAY(10);
+ }
+
+ if (new_duplex && !sc->sc_duplex)
+ reg |= BM_TX_IGNORECOLL | BM_TX_FULLDPX;
+ else if (!new_duplex && sc->sc_duplex)
+ reg &= ~(BM_TX_IGNORECOLL | BM_TX_FULLDPX);
+
+ if (new_duplex != sc->sc_duplex) {
+ /* Turn TX MAC back on */
+ reg |= BM_ENABLE;
+
+ CSR_WRITE_2(sc, BM_TX_CONFIG, reg);
+ sc->sc_duplex = new_duplex;
+ }
+}
+
+/*
+ * ifmedia/mii callbacks
+ */
+static int
+bm_ifmedia_upd(struct ifnet *ifp)
+{
+ struct bm_softc *sc = ifp->if_softc;
+ int error;
+
+ BM_LOCK(sc);
+ error = mii_mediachg(sc->sc_mii);
+ BM_UNLOCK(sc);
+ return (error);
+}
+
+static void
+bm_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifm)
+{
+ struct bm_softc *sc = ifp->if_softc;
+
+ BM_LOCK(sc);
+ mii_pollstat(sc->sc_mii);
+ ifm->ifm_active = sc->sc_mii->mii_media_active;
+ ifm->ifm_status = sc->sc_mii->mii_media_status;
+ BM_UNLOCK(sc);
+}
+
+/*
+ * Macio probe/attach
+ */
+static int
+bm_probe(device_t dev)
+{
+ const char *dname = ofw_bus_get_name(dev);
+ const char *dcompat = ofw_bus_get_compat(dev);
+
+ /*
+ * BMAC+ cells have a name of "ethernet" and
+ * a compatible property of "bmac+"
+ */
+ if (strcmp(dname, "bmac") == 0) {
+ device_set_desc(dev, "Apple BMAC Ethernet Adaptor");
+ } else if (strcmp(dcompat, "bmac+") == 0) {
+ device_set_desc(dev, "Apple BMAC+ Ethernet Adaptor");
+ } else
+ return (ENXIO);
+
+ return (0);
+}
+
+static int
+bm_attach(device_t dev)
+{
+ phandle_t node;
+ u_char *eaddr;
+ struct ifnet *ifp;
+ int error, cellid, i;
+ struct bm_txsoft *txs;
+ struct bm_softc *sc = device_get_softc(dev);
+
+ ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
+ ifp->if_softc = sc;
+ sc->sc_dev = dev;
+ sc->sc_duplex = ~IFM_FDX;
+
+ error = 0;
+ mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
+ MTX_DEF | MTX_RECURSE);
+ callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0);
+
+ /* Check for an improved version of Paddington */
+ sc->sc_streaming = 0;
+ cellid = -1;
+ node = ofw_bus_get_node(dev);
+
+ OF_getprop(node, "cell-id", &cellid, sizeof(cellid));
+ if (cellid >= 0xc4)
+ sc->sc_streaming = 1;
+
+ sc->sc_memrid = 0;
+ sc->sc_memr = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
+ &sc->sc_memrid, RF_ACTIVE);
+ if (sc->sc_memr == NULL) {
+ device_printf(dev, "Could not alloc chip registers!\n");
+ return (ENXIO);
+ }
+
+ sc->sc_btag = rman_get_bustag(sc->sc_memr);
+ sc->sc_bhandle = rman_get_bushandle(sc->sc_memr);
+
+ sc->sc_txdmarid = BM_TXDMA_REGISTERS;
+ sc->sc_rxdmarid = BM_RXDMA_REGISTERS;
+
+ sc->sc_txdmar = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
+ &sc->sc_txdmarid, RF_ACTIVE);
+ sc->sc_rxdmar = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
+ &sc->sc_rxdmarid, RF_ACTIVE);
+
+ if (sc->sc_txdmar == NULL || sc->sc_rxdmar == NULL) {
+ device_printf(dev, "Could not map DBDMA registers!\n");
+ return (ENXIO);
+ }
+
+ error = dbdma_allocate_channel(sc->sc_txdmar, bus_get_dma_tag(dev),
+ BM_MAX_DMA_COMMANDS, &sc->sc_txdma);
+ error += dbdma_allocate_channel(sc->sc_rxdmar, bus_get_dma_tag(dev),
+ BM_MAX_DMA_COMMANDS, &sc->sc_rxdma);
+
+ if (error) {
+ device_printf(dev,"Could not allocate DBDMA channel!\n");
+ return (ENXIO);
+ }
+
+ /* alloc DMA tags and buffers */
+ error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
+ BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
+ BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL,
+ NULL, &sc->sc_pdma_tag);
+
+ if (error) {
+ device_printf(dev,"Could not allocate DMA tag!\n");
+ return (ENXIO);
+ }
+
+ error = bus_dma_tag_create(sc->sc_pdma_tag, 1, 0, BUS_SPACE_MAXADDR,
+ BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, MCLBYTES,
+ BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_rdma_tag);
+
+ if (error) {
+ device_printf(dev,"Could not allocate RX DMA channel!\n");
+ return (ENXIO);
+ }
+
+ error = bus_dma_tag_create(sc->sc_pdma_tag, 1, 0, BUS_SPACE_MAXADDR,
+ BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES * BM_NTXSEGS, BM_NTXSEGS,
+ MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_tdma_tag);
+
+ if (error) {
+ device_printf(dev,"Could not allocate TX DMA tag!\n");
+ return (ENXIO);
+ }
+
+ /* init transmit descriptors */
+ STAILQ_INIT(&sc->sc_txfreeq);
+ STAILQ_INIT(&sc->sc_txdirtyq);
+
+ /* create TX DMA maps */
+ error = ENOMEM;
+ for (i = 0; i < BM_MAX_TX_PACKETS; i++) {
+ txs = &sc->sc_txsoft[i];
+ txs->txs_mbuf = NULL;
+ error = bus_dmamap_create(sc->sc_tdma_tag, 0, &txs->txs_dmamap);
+ if (error) {
+ device_printf(sc->sc_dev,
+ "unable to create TX DMA map %d, error = %d\n",
+ i, error);
+ }
+ STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
+ }
+
+ /* Create the receive buffer DMA maps. */
+ for (i = 0; i < BM_MAX_RX_PACKETS; i++) {
+ error = bus_dmamap_create(sc->sc_rdma_tag, 0,
+ &sc->sc_rxsoft[i].rxs_dmamap);
+ if (error) {
+ device_printf(sc->sc_dev,
+ "unable to create RX DMA map %d, error = %d\n",
+ i, error);
+ }
+ sc->sc_rxsoft[i].rxs_mbuf = NULL;
+ }
+
+ /* alloc interrupt */
+
+ sc->sc_txdmairqid = BM_TXDMA_INTERRUPT;
+ sc->sc_txdmairq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
+ &sc->sc_txdmairqid, RF_ACTIVE);
+
+ if (error) {
+ device_printf(dev,"Could not allocate TX interrupt!\n");
+ return (ENXIO);
+ }
+
+ bus_setup_intr(dev,sc->sc_txdmairq,
+ INTR_TYPE_MISC | INTR_MPSAFE | INTR_ENTROPY, NULL, bm_txintr, sc,
+ &sc->sc_txihtx);
+
+ sc->sc_rxdmairqid = BM_RXDMA_INTERRUPT;
+ sc->sc_rxdmairq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
+ &sc->sc_rxdmairqid, RF_ACTIVE);
+
+ if (error) {
+ device_printf(dev,"Could not allocate RX interrupt!\n");
+ return (ENXIO);
+ }
+
+ bus_setup_intr(dev,sc->sc_rxdmairq,
+ INTR_TYPE_MISC | INTR_MPSAFE | INTR_ENTROPY, NULL, bm_rxintr, sc,
+ &sc->sc_rxih);
+
+ /*
+ * Get the ethernet address from OpenFirmware
+ */
+ eaddr = sc->sc_enaddr;
+ OF_getprop(node, "local-mac-address", eaddr, ETHER_ADDR_LEN);
+
+ /* reset the adapter */
+ bm_chip_setup(sc);
+
+ /* setup MII */
+ error = mii_phy_probe(dev, &sc->sc_miibus, bm_ifmedia_upd,
+ bm_ifmedia_sts);
+ if (error != 0)
+ device_printf(dev,"PHY probe failed: %d\n", error);
+
+ sc->sc_mii = device_get_softc(sc->sc_miibus);
+
+ if_initname(ifp, device_get_name(sc->sc_dev),
+ device_get_unit(sc->sc_dev));
+ ifp->if_mtu = ETHERMTU;
+ ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+ ifp->if_start = bm_start;
+ ifp->if_ioctl = bm_ioctl;
+ ifp->if_init = bm_init;
+ IFQ_SET_MAXLEN(&ifp->if_snd, BM_MAX_TX_PACKETS);
+ ifp->if_snd.ifq_drv_maxlen = BM_MAX_TX_PACKETS;
+ IFQ_SET_READY(&ifp->if_snd);
+
+ /* Attach the interface. */
+ ether_ifattach(ifp, sc->sc_enaddr);
+
+ ifp->if_data.ifi_hdrlen = sizeof(struct ether_header);
+ ifp->if_hwassist = 0;
+
+ return (0);
+}
+
+static int
+bm_detach(device_t dev)
+{
+ struct bm_softc *sc = device_get_softc(dev);
+
+ callout_drain(&sc->sc_tick_ch);
+
+ BM_LOCK(sc);
+ bm_stop(sc);
+
+ dbdma_free_channel(sc->sc_txdma);
+ dbdma_free_channel(sc->sc_rxdma);
+
+ bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_memrid, sc->sc_memr);
+ bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_txdmarid,
+ sc->sc_txdmar);
+ bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rxdmarid,
+ sc->sc_rxdmar);
+
+ bus_teardown_intr(dev, sc->sc_txdmairq, sc->sc_txihtx);
+ bus_teardown_intr(dev, sc->sc_rxdmairq, sc->sc_rxih);
+ bus_release_resource(dev, SYS_RES_IRQ, sc->sc_txdmairqid,
+ sc->sc_txdmairq);
+ bus_release_resource(dev, SYS_RES_IRQ, sc->sc_rxdmairqid,
+ sc->sc_rxdmairq);
+ BM_UNLOCK(sc);
+
+ mtx_destroy(&sc->sc_mtx);
+
+ return (0);
+}
+
+static void
+bm_shutdown(device_t dev)
+{
+ bm_stop(device_get_softc(dev));
+}
+
+static void
+bm_dummypacket(struct bm_softc *sc)
+{
+ struct mbuf *m;
+ struct ifnet *ifp;
+
+ ifp = sc->sc_ifp;
+
+ MGETHDR(m, M_DONTWAIT, MT_DATA);
+
+ if (m == NULL)
+ return;
+
+ bcopy(sc->sc_enaddr,
+ mtod(m, struct ether_header *)->ether_dhost, ETHER_ADDR_LEN);
+ bcopy(sc->sc_enaddr,
+ mtod(m, struct ether_header *)->ether_shost, ETHER_ADDR_LEN);
+ mtod(m, struct ether_header *)->ether_type = htons(3);
+ mtod(m, unsigned char *)[14] = 0;
+ mtod(m, unsigned char *)[15] = 0;
+ mtod(m, unsigned char *)[16] = 0xE3;
+ m->m_len = m->m_pkthdr.len = sizeof(struct ether_header) + 3;
+ IF_ENQUEUE(&ifp->if_snd, m);
+ bm_start(ifp);
+}
+
+static void
+bm_rxintr(void *xsc)
+{
+ struct bm_softc *sc = xsc;
+ struct ifnet *ifp = sc->sc_ifp;
+ struct mbuf *m;
+ int i, prev_stop, new_stop;
+ uint16_t status;
+
+ BM_LOCK(sc);
+
+ status = dbdma_get_chan_status(sc->sc_rxdma);
+ if (status & DBDMA_STATUS_DEAD) {
+ dbdma_reset(sc->sc_rxdma);
+ BM_UNLOCK(sc);
+ return;
+ }
+ if (!(status & DBDMA_STATUS_RUN)) {
+ device_printf(sc->sc_dev,"Bad RX Interrupt!\n");
+ BM_UNLOCK(sc);
+ return;
+ }
+
+ prev_stop = sc->next_rxdma_slot - 1;
+ if (prev_stop < 0)
+ prev_stop = sc->rxdma_loop_slot - 1;
+
+ if (prev_stop < 0) {
+ BM_UNLOCK(sc);
+ return;
+ }
+
+ new_stop = -1;
+ dbdma_sync_commands(sc->sc_rxdma, BUS_DMASYNC_POSTREAD);
+
+ for (i = sc->next_rxdma_slot; i < BM_MAX_RX_PACKETS; i++) {
+ if (i == sc->rxdma_loop_slot)
+ i = 0;
+
+ if (i == prev_stop)
+ break;
+
+ status = dbdma_get_cmd_status(sc->sc_rxdma, i);
+
+ if (status == 0)
+ break;
+
+ m = sc->sc_rxsoft[i].rxs_mbuf;
+
+ if (bm_add_rxbuf(sc, i)) {
+ ifp->if_ierrors++;
+ m = NULL;
+ continue;
+ }
+
+ if (m == NULL)
+ continue;
+
+ ifp->if_ipackets++;
+ m->m_pkthdr.rcvif = ifp;
+ m->m_len -= (dbdma_get_residuals(sc->sc_rxdma, i) + 2);
+ m->m_pkthdr.len = m->m_len;
+
+ /* Send up the stack */
+ BM_UNLOCK(sc);
+ (*ifp->if_input)(ifp, m);
+ BM_LOCK(sc);
+
+ /* Clear all fields on this command */
+ bm_add_rxbuf_dma(sc, i);
+
+ new_stop = i;
+ }
+
+ /* Change the last packet we processed to the ring buffer terminator,
+ * and restore a receive buffer to the old terminator */
+ if (new_stop >= 0) {
+ dbdma_insert_stop(sc->sc_rxdma, new_stop);
+ bm_add_rxbuf_dma(sc, prev_stop);
+ if (i < sc->rxdma_loop_slot)
+ sc->next_rxdma_slot = i;
+ else
+ sc->next_rxdma_slot = 0;
+ }
+ dbdma_sync_commands(sc->sc_rxdma, BUS_DMASYNC_PREWRITE);
+
+ dbdma_wake(sc->sc_rxdma);
+
+ BM_UNLOCK(sc);
+}
+
+static void
+bm_txintr(void *xsc)
+{
+ struct bm_softc *sc = xsc;
+ struct ifnet *ifp = sc->sc_ifp;
+ struct bm_txsoft *txs;
+ int progress = 0;
+
+ BM_LOCK(sc);
+
+ while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
+ if (!dbdma_get_cmd_status(sc->sc_txdma, txs->txs_lastdesc))
+ break;
+
+ STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
+ bus_dmamap_unload(sc->sc_tdma_tag, txs->txs_dmamap);
+
+ if (txs->txs_mbuf != NULL) {
+ m_freem(txs->txs_mbuf);
+ txs->txs_mbuf = NULL;
+ }
+
+ /* Set the first used TXDMA slot to the location of the
+ * STOP/NOP command associated with this packet. */
+
+ sc->first_used_txdma_slot = txs->txs_stopdesc;
+
+ STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
+
+ ifp->if_opackets++;
+ progress = 1;
+ }
+
+ if (progress) {
+ /*
+ * We freed some descriptors, so reset IFF_DRV_OACTIVE
+ * and restart.
+ */
+ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+ sc->sc_wdog_timer = STAILQ_EMPTY(&sc->sc_txdirtyq) ? 0 : 5;
+
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
+ !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+ bm_start_locked(ifp);
+ }
+
+ BM_UNLOCK(sc);
+}
+
+static void
+bm_start(struct ifnet *ifp)
+{
+ struct bm_softc *sc = ifp->if_softc;
+
+ BM_LOCK(sc);
+ bm_start_locked(ifp);
+ BM_UNLOCK(sc);
+}
+
+static void
+bm_start_locked(struct ifnet *ifp)
+{
+ struct bm_softc *sc = ifp->if_softc;
+ struct mbuf *mb_head;
+ int prev_stop;
+ int txqueued = 0;
+
+ /*
+ * We lay out our DBDMA program in the following manner:
+ * OUTPUT_MORE
+ * ...
+ * OUTPUT_LAST (+ Interrupt)
+ * STOP
+ *
+ * To extend the channel, we append a new program,
+ * then replace STOP with NOP and wake the channel.
+ * If we stalled on the STOP already, the program proceeds,
+ * if not it will sail through the NOP.
+ */
+
+ while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
+ IFQ_DRV_DEQUEUE(&ifp->if_snd, mb_head);
+
+ if (mb_head == NULL)
+ break;
+
+ prev_stop = sc->next_txdma_slot - 1;
+
+ if (bm_encap(sc, &mb_head)) {
+ /* Put the packet back and stop */
+ ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+ IFQ_DRV_PREPEND(&ifp->if_snd, mb_head);
+ break;
+ }
+
+ dbdma_insert_nop(sc->sc_txdma, prev_stop);
+
+ txqueued = 1;
+
+ BPF_MTAP(ifp, mb_head);
+ }
+
+ dbdma_sync_commands(sc->sc_txdma, BUS_DMASYNC_PREWRITE);
+
+ if (txqueued) {
+ dbdma_wake(sc->sc_txdma);
+ sc->sc_wdog_timer = 5;
+ }
+}
+
+static int
+bm_encap(struct bm_softc *sc, struct mbuf **m_head)
+{
+ bus_dma_segment_t segs[BM_NTXSEGS];
+ struct bm_txsoft *txs;
+ struct mbuf *m;
+ int nsegs = BM_NTXSEGS;
+ int error = 0;
+ uint8_t branch_type;
+ int i;
+
+ /* Limit the command size to the number of free DBDMA slots */
+
+ if (sc->next_txdma_slot >= sc->first_used_txdma_slot)
+ nsegs = BM_MAX_DMA_COMMANDS - 2 - sc->next_txdma_slot +
+ sc->first_used_txdma_slot; /* -2 for branch and indexing */
+ else
+ nsegs = sc->first_used_txdma_slot - sc->next_txdma_slot;
+
+ /* Remove one slot for the STOP/NOP terminator */
+ nsegs--;
+
+ if (nsegs > BM_NTXSEGS)
+ nsegs = BM_NTXSEGS;
+
+ /* Get a work queue entry. */
+ if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) {
+ /* Ran out of descriptors. */
+ return (ENOBUFS);
+ }
+
+ error = bus_dmamap_load_mbuf_sg(sc->sc_tdma_tag, txs->txs_dmamap,
+ *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
+
+ if (error == EFBIG) {
+ m = m_collapse(*m_head, M_DONTWAIT, nsegs);
+ if (m == NULL) {
+ m_freem(*m_head);
+ *m_head = NULL;
+ return (ENOBUFS);
+ }
+ *m_head = m;
+
+ error = bus_dmamap_load_mbuf_sg(sc->sc_tdma_tag,
+ txs->txs_dmamap, *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
+ if (error != 0) {
+ m_freem(*m_head);
+ *m_head = NULL;
+ return (error);
+ }
+ } else if (error != 0)
+ return (error);
+
+ if (nsegs == 0) {
+ m_freem(*m_head);
+ *m_head = NULL;
+ return (EIO);
+ }
+
+ txs->txs_ndescs = nsegs;
+ txs->txs_firstdesc = sc->next_txdma_slot;
+
+ for (i = 0; i < nsegs; i++) {
+ /* Loop back to the beginning if this is our last slot */
+ if (sc->next_txdma_slot == (BM_MAX_DMA_COMMANDS - 1))
+ branch_type = DBDMA_ALWAYS;
+ else
+ branch_type = DBDMA_NEVER;
+
+ if (i+1 == nsegs)
+ txs->txs_lastdesc = sc->next_txdma_slot;
+
+ dbdma_insert_command(sc->sc_txdma, sc->next_txdma_slot++,
+ (i + 1 < nsegs) ? DBDMA_OUTPUT_MORE : DBDMA_OUTPUT_LAST,
+ 0, segs[i].ds_addr, segs[i].ds_len,
+ (i + 1 < nsegs) ? DBDMA_NEVER : DBDMA_ALWAYS,
+ branch_type, DBDMA_NEVER, 0);
+
+ if (branch_type == DBDMA_ALWAYS)
+ sc->next_txdma_slot = 0;
+ }
+
+ /* We have a corner case where the STOP command is the last slot,
+ * but you can't branch in STOP commands. So add a NOP branch here
+ * and the STOP in slot 0. */
+
+ if (sc->next_txdma_slot == (BM_MAX_DMA_COMMANDS - 1)) {
+ dbdma_insert_branch(sc->sc_txdma, sc->next_txdma_slot, 0);
+ sc->next_txdma_slot = 0;
+ }
+
+ txs->txs_stopdesc = sc->next_txdma_slot;
+ dbdma_insert_stop(sc->sc_txdma, sc->next_txdma_slot++);
+
+ STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q);
+ STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q);
+ txs->txs_mbuf = *m_head;
+
+ return (0);
+}
+
+static int
+bm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+{
+ struct bm_softc *sc = ifp->if_softc;
+ struct ifreq *ifr = (struct ifreq *)data;
+ int error;
+
+ error = 0;
+
+ switch(cmd) {
+ case SIOCSIFFLAGS:
+ BM_LOCK(sc);
+ if ((ifp->if_flags & IFF_UP) != 0) {
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
+ ((ifp->if_flags ^ sc->sc_ifpflags) &
+ (IFF_ALLMULTI | IFF_PROMISC)) != 0)
+ bm_setladrf(sc);
+ else
+ bm_init_locked(sc);
+ } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
+ bm_stop(sc);
+ sc->sc_ifpflags = ifp->if_flags;
+ BM_UNLOCK(sc);
+ break;
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ BM_LOCK(sc);
+ bm_setladrf(sc);
+ BM_UNLOCK(sc);
+ case SIOCGIFMEDIA:
+ case SIOCSIFMEDIA:
+ error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd);
+ break;
+ default:
+ error = ether_ioctl(ifp, cmd, data);
+ break;
+ }
+
+ return (error);
+}
+
+static void
+bm_setladrf(struct bm_softc *sc)
+{
+ struct ifnet *ifp = sc->sc_ifp;
+ struct ifmultiaddr *inm;
+ uint16_t hash[4];
+ uint16_t reg;
+ uint32_t crc;
+
+ reg = BM_CRC_ENABLE | BM_REJECT_OWN_PKTS;
+
+ /* Turn off RX MAC while we fiddle its settings */
+ CSR_WRITE_2(sc, BM_RX_CONFIG, reg);
+ while (CSR_READ_2(sc, BM_RX_CONFIG) & BM_ENABLE)
+ DELAY(10);
+
+ if ((ifp->if_flags & IFF_PROMISC) != 0) {
+ reg |= BM_PROMISC;
+
+ CSR_WRITE_2(sc, BM_RX_CONFIG, reg);
+
+ DELAY(15);
+
+ reg = CSR_READ_2(sc, BM_RX_CONFIG);
+ reg |= BM_ENABLE;
+ CSR_WRITE_2(sc, BM_RX_CONFIG, reg);
+ return;
+ }
+
+ if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
+ hash[3] = hash[2] = hash[1] = hash[0] = 0xffff;
+ } else {
+ /* Clear the hash table. */
+ memset(hash, 0, sizeof(hash));
+
+ IF_ADDR_LOCK(ifp);
+ TAILQ_FOREACH(inm, &ifp->if_multiaddrs, ifma_link) {
+ if (inm->ifma_addr->sa_family != AF_LINK)
+ continue;
+ crc = ether_crc32_le(LLADDR((struct sockaddr_dl *)
+ inm->ifma_addr), ETHER_ADDR_LEN);
+
+ /* We just want the 6 most significant bits */
+ crc >>= 26;
+
+ /* Set the corresponding bit in the filter. */
+ hash[crc >> 4] |= 1 << (crc & 0xf);
+ }
+ IF_ADDR_UNLOCK(ifp);
+ }
+
+ /* Write out new hash table */
+ CSR_WRITE_2(sc, BM_HASHTAB0, hash[0]);
+ CSR_WRITE_2(sc, BM_HASHTAB1, hash[1]);
+ CSR_WRITE_2(sc, BM_HASHTAB2, hash[2]);
+ CSR_WRITE_2(sc, BM_HASHTAB3, hash[3]);
+
+ /* And turn the RX MAC back on, this time with the hash bit set */
+ reg |= BM_HASH_FILTER_ENABLE;
+ CSR_WRITE_2(sc, BM_RX_CONFIG, reg);
+
+ while (!(CSR_READ_2(sc, BM_RX_CONFIG) & BM_HASH_FILTER_ENABLE))
+ DELAY(10);
+
+ reg = CSR_READ_2(sc, BM_RX_CONFIG);
+ reg |= BM_ENABLE;
+ CSR_WRITE_2(sc, BM_RX_CONFIG, reg);
+}
+
+static void
+bm_init(void *xsc)
+{
+ struct bm_softc *sc = xsc;
+
+ BM_LOCK(sc);
+ bm_init_locked(sc);
+ BM_UNLOCK(sc);
+}
+
+static void
+bm_chip_setup(struct bm_softc *sc)
+{
+ uint16_t reg;
+ uint16_t *eaddr_sect;
+ char hrow_path[128];
+ ihandle_t hrow_ih;
+
+ eaddr_sect = (uint16_t *)(sc->sc_enaddr);
+
+ /* Enable BMAC cell */
+ OF_package_to_path(OF_parent(ofw_bus_get_node(sc->sc_dev)),
+ hrow_path, sizeof(hrow_path));
+ hrow_ih = OF_open(hrow_path);
+ if (hrow_ih == -1) {
+ device_printf(sc->sc_dev,
+ "Enabling BMAC cell failed! Hoping it's already active.\n");
+ } else {
+ OF_call_method("enable-enet", hrow_ih, 0, 0);
+ OF_close(hrow_ih);
+ }
+
+ /* Reset chip */
+ CSR_WRITE_2(sc, BM_RX_RESET, 0x0000);
+ CSR_WRITE_2(sc, BM_TX_RESET, 0x0001);
+ do {
+ reg = CSR_READ_2(sc, BM_TX_RESET);
+ } while (reg & 0x0001);
+
+ /* Some random junk. OS X uses the system time. We use
+ * the low 16 bits of the MAC address. */
+ CSR_WRITE_2(sc, BM_TX_RANDSEED, eaddr_sect[2]);
+
+ /* Enable transmit */
+ reg = CSR_READ_2(sc, BM_TX_IFC);
+ reg |= BM_ENABLE;
+ CSR_WRITE_2(sc, BM_TX_IFC, reg);
+
+ CSR_READ_2(sc, BM_TX_PEAKCNT);
+}
+
+static void
+bm_stop(struct bm_softc *sc)
+{
+ struct bm_txsoft *txs;
+ uint16_t reg;
+
+ /* Disable TX and RX MACs */
+ reg = CSR_READ_2(sc, BM_TX_CONFIG);
+ reg &= ~BM_ENABLE;
+ CSR_WRITE_2(sc, BM_TX_CONFIG, reg);
+
+ reg = CSR_READ_2(sc, BM_RX_CONFIG);
+ reg &= ~BM_ENABLE;
+ CSR_WRITE_2(sc, BM_RX_CONFIG, reg);
+
+ DELAY(100);
+
+ /* Stop DMA engine */
+ dbdma_stop(sc->sc_rxdma);
+ dbdma_stop(sc->sc_txdma);
+ sc->next_rxdma_slot = 0;
+ sc->rxdma_loop_slot = 0;
+
+ /* Disable interrupts */
+ bm_disable_interrupts(sc);
+
+ /* Don't worry about pending transmits anymore */
+ while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
+ STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
+ if (txs->txs_ndescs != 0) {
+ bus_dmamap_sync(sc->sc_tdma_tag, txs->txs_dmamap,
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(sc->sc_tdma_tag, txs->txs_dmamap);
+ if (txs->txs_mbuf != NULL) {
+ m_freem(txs->txs_mbuf);
+ txs->txs_mbuf = NULL;
+ }
+ }
+ STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
+ }
+
+ /* And we're down */
+ sc->sc_ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+ sc->sc_wdog_timer = 0;
+}
+
+static void
+bm_init_locked(struct bm_softc *sc)
+{
+ uint16_t reg;
+ uint16_t *eaddr_sect;
+ struct bm_rxsoft *rxs;
+ int i;
+
+ eaddr_sect = (uint16_t *)(sc->sc_enaddr);
+
+ /* Zero RX slot info and stop DMA */
+ dbdma_stop(sc->sc_rxdma);
+ dbdma_stop(sc->sc_txdma);
+ sc->next_rxdma_slot = 0;
+ sc->rxdma_loop_slot = 0;
+
+ /* Initialize TX/RX DBDMA programs */
+ dbdma_insert_stop(sc->sc_rxdma, 0);
+ dbdma_insert_stop(sc->sc_txdma, 0);
+ dbdma_set_current_cmd(sc->sc_rxdma, 0);
+ dbdma_set_current_cmd(sc->sc_txdma, 0);
+
+ sc->next_rxdma_slot = 0;
+ sc->next_txdma_slot = 1;
+ sc->first_used_txdma_slot = 0;
+
+ for (i = 0; i < BM_MAX_RX_PACKETS; i++) {
+ rxs = &sc->sc_rxsoft[i];
+ rxs->dbdma_slot = i;
+
+ if (rxs->rxs_mbuf == NULL) {
+ bm_add_rxbuf(sc, i);
+
+ if (rxs->rxs_mbuf == NULL) {
+ /* If we can't add anymore, mark the problem */
+ rxs->dbdma_slot = -1;
+ break;
+ }
+ }
+
+ if (i > 0)
+ bm_add_rxbuf_dma(sc, i);
+ }
+
+ /*
+ * Now terminate the RX ring buffer, and follow with the loop to
+ * the beginning.
+ */
+ dbdma_insert_stop(sc->sc_rxdma, i - 1);
+ dbdma_insert_branch(sc->sc_rxdma, i, 0);
+ sc->rxdma_loop_slot = i;
+
+ /* Now add in the first element of the RX DMA chain */
+ bm_add_rxbuf_dma(sc, 0);
+
+ dbdma_sync_commands(sc->sc_rxdma, BUS_DMASYNC_PREWRITE);
+ dbdma_sync_commands(sc->sc_txdma, BUS_DMASYNC_PREWRITE);
+
+ /* Zero collision counters */
+ CSR_WRITE_2(sc, BM_TX_NCCNT, 0);
+ CSR_WRITE_2(sc, BM_TX_FCCNT, 0);
+ CSR_WRITE_2(sc, BM_TX_EXCNT, 0);
+ CSR_WRITE_2(sc, BM_TX_LTCNT, 0);
+
+ /* Zero receive counters */
+ CSR_WRITE_2(sc, BM_RX_FRCNT, 0);
+ CSR_WRITE_2(sc, BM_RX_LECNT, 0);
+ CSR_WRITE_2(sc, BM_RX_AECNT, 0);
+ CSR_WRITE_2(sc, BM_RX_FECNT, 0);
+ CSR_WRITE_2(sc, BM_RXCV, 0);
+
+ /* Prime transmit */
+ CSR_WRITE_2(sc, BM_TX_THRESH, 0xff);
+
+ CSR_WRITE_2(sc, BM_TXFIFO_CSR, 0);
+ CSR_WRITE_2(sc, BM_TXFIFO_CSR, 0x0001);
+
+ /* Prime receive */
+ CSR_WRITE_2(sc, BM_RXFIFO_CSR, 0);
+ CSR_WRITE_2(sc, BM_RXFIFO_CSR, 0x0001);
+
+ /* Clear status reg */
+ CSR_READ_2(sc, BM_STATUS);
+
+ /* Zero hash filters */
+ CSR_WRITE_2(sc, BM_HASHTAB0, 0);
+ CSR_WRITE_2(sc, BM_HASHTAB1, 0);
+ CSR_WRITE_2(sc, BM_HASHTAB2, 0);
+ CSR_WRITE_2(sc, BM_HASHTAB3, 0);
+
+ /* Write MAC address to chip */
+ CSR_WRITE_2(sc, BM_MACADDR0, eaddr_sect[0]);
+ CSR_WRITE_2(sc, BM_MACADDR1, eaddr_sect[1]);
+ CSR_WRITE_2(sc, BM_MACADDR2, eaddr_sect[2]);
+
+ /* Final receive engine setup */
+ reg = BM_CRC_ENABLE | BM_REJECT_OWN_PKTS | BM_HASH_FILTER_ENABLE;
+ CSR_WRITE_2(sc, BM_RX_CONFIG, reg);
+
+ /* Now turn it all on! */
+ dbdma_reset(sc->sc_rxdma);
+ dbdma_reset(sc->sc_txdma);
+
+ /* Enable RX and TX MACs. Setting the address filter has
+ * the side effect of enabling the RX MAC. */
+ bm_setladrf(sc);
+
+ reg = CSR_READ_2(sc, BM_TX_CONFIG);
+ reg |= BM_ENABLE;
+ CSR_WRITE_2(sc, BM_TX_CONFIG, reg);
+
+ /*
+ * Enable interrupts, unwedge the controller with a dummy packet,
+ * and nudge the DMA queue.
+ */
+ bm_enable_interrupts(sc);
+ bm_dummypacket(sc);
+ dbdma_wake(sc->sc_rxdma); /* Nudge RXDMA */
+
+ sc->sc_ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ sc->sc_ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+ sc->sc_ifpflags = sc->sc_ifp->if_flags;
+
+ /* Resync PHY and MAC states */
+ sc->sc_mii = device_get_softc(sc->sc_miibus);
+ sc->sc_duplex = ~IFM_FDX;
+ mii_mediachg(sc->sc_mii);
+
+ /* Start the one second timer. */
+ sc->sc_wdog_timer = 0;
+ callout_reset(&sc->sc_tick_ch, hz, bm_tick, sc);
+}
+
+static void
+bm_tick(void *arg)
+{
+ struct bm_softc *sc = arg;
+
+ /* Read error counters */
+ sc->sc_ifp->if_collisions += CSR_READ_2(sc, BM_TX_NCCNT) +
+ CSR_READ_2(sc, BM_TX_FCCNT) + CSR_READ_2(sc, BM_TX_EXCNT) +
+ CSR_READ_2(sc, BM_TX_LTCNT);
+
+ sc->sc_ifp->if_ierrors += CSR_READ_2(sc, BM_RX_LECNT) +
+ CSR_READ_2(sc, BM_RX_AECNT) + CSR_READ_2(sc, BM_RX_FECNT);
+
+ /* Zero collision counters */
+ CSR_WRITE_2(sc, BM_TX_NCCNT, 0);
+ CSR_WRITE_2(sc, BM_TX_FCCNT, 0);
+ CSR_WRITE_2(sc, BM_TX_EXCNT, 0);
+ CSR_WRITE_2(sc, BM_TX_LTCNT, 0);
+
+ /* Zero receive counters */
+ CSR_WRITE_2(sc, BM_RX_FRCNT, 0);
+ CSR_WRITE_2(sc, BM_RX_LECNT, 0);
+ CSR_WRITE_2(sc, BM_RX_AECNT, 0);
+ CSR_WRITE_2(sc, BM_RX_FECNT, 0);
+ CSR_WRITE_2(sc, BM_RXCV, 0);
+
+ /* Check for link changes and run watchdog */
+ mii_tick(sc->sc_mii);
+ bm_miibus_statchg(sc->sc_dev);
+
+ if (bm_watchdog(sc) == EJUSTRETURN)
+ return;
+
+ callout_reset(&sc->sc_tick_ch, hz, bm_tick, sc);
+}
+
+static int
+bm_watchdog(struct bm_softc *sc)
+{
+ if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0)
+ return (0);
+
+ device_printf(sc->sc_dev, "device timeout\n");
+
+ bm_init_locked(sc);
+ return (EJUSTRETURN);
+}
+
+static int
+bm_add_rxbuf(struct bm_softc *sc, int idx)
+{
+ struct bm_rxsoft *rxs = &sc->sc_rxsoft[idx];
+ struct mbuf *m;
+ bus_dma_segment_t segs[1];
+ int error, nsegs;
+
+ m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
+ if (m == NULL)
+ return (ENOBUFS);
+ m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
+
+ if (rxs->rxs_mbuf != NULL) {
+ bus_dmamap_sync(sc->sc_rdma_tag, rxs->rxs_dmamap,
+ BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(sc->sc_rdma_tag, rxs->rxs_dmamap);
+ }
+
+ error = bus_dmamap_load_mbuf_sg(sc->sc_rdma_tag, rxs->rxs_dmamap, m,
+ segs, &nsegs, BUS_DMA_NOWAIT);
+ if (error != 0) {
+ device_printf(sc->sc_dev,
+ "cannot load RS DMA map %d, error = %d\n", idx, error);
+ m_freem(m);
+ return (error);
+ }
+ /* If nsegs is wrong then the stack is corrupt. */
+ KASSERT(nsegs == 1,
+ ("%s: too many DMA segments (%d)", __func__, nsegs));
+ rxs->rxs_mbuf = m;
+ rxs->segment = segs[0];
+
+ bus_dmamap_sync(sc->sc_rdma_tag, rxs->rxs_dmamap, BUS_DMASYNC_PREREAD);
+
+ return (0);
+}
+
+static int
+bm_add_rxbuf_dma(struct bm_softc *sc, int idx)
+{
+ struct bm_rxsoft *rxs = &sc->sc_rxsoft[idx];
+
+ dbdma_insert_command(sc->sc_rxdma, idx, DBDMA_INPUT_LAST, 0,
+ rxs->segment.ds_addr, rxs->segment.ds_len, DBDMA_ALWAYS,
+ DBDMA_NEVER, DBDMA_NEVER, 0);
+
+ return (0);
+}
+
+static void
+bm_enable_interrupts(struct bm_softc *sc)
+{
+ CSR_WRITE_2(sc, BM_INTR_DISABLE,
+ (sc->sc_streaming) ? BM_INTR_NONE : BM_INTR_NORMAL);
+}
+
+static void
+bm_disable_interrupts(struct bm_softc *sc)
+{
+ CSR_WRITE_2(sc, BM_INTR_DISABLE, BM_INTR_NONE);
+}
diff --git a/sys/dev/bm/if_bmreg.h b/sys/dev/bm/if_bmreg.h
new file mode 100644
index 0000000..5507cdd
--- /dev/null
+++ b/sys/dev/bm/if_bmreg.h
@@ -0,0 +1,176 @@
+/*
+ * Copyright 1991-1998 by Open Software Foundation, Inc.
+ * All Rights Reserved
+ *
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby granted,
+ * provided that the above copyright notice appears in all copies and
+ * that both the copyright notice and this permission notice appear in
+ * supporting documentation.
+ *
+ * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE.
+ *
+ * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+ * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+/*
+ * Copyright 2003 by Peter Grehan. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * BMAC resource indices
+ */
+
+#define BM_MAIN_REGISTERS 0
+#define BM_TXDMA_REGISTERS 1
+#define BM_RXDMA_REGISTERS 2
+
+#define BM_MAIN_INTERRUPT 0
+#define BM_TXDMA_INTERRUPT 1
+#define BM_RXDMA_INTERRUPT 2
+
+/*
+ * BMAC/BMAC+ register offsets
+ */
+
+#define BM_TX_IFC 0x0000 /* interface control */
+#define BM_TXFIFO_CSR 0x0100 /* TX FIFO control/status */
+#define BM_TX_THRESH 0x0110 /* TX threshold */
+#define BM_RXFIFO_CSR 0x0120 /* receive FIFO control/status */
+#define BM_MEMADD 0x0130 /* unused */
+#define BM_MEMDATA_HI 0x0140 /* unused */
+#define BM_MEMDATA_LO 0x0150 /* unused */
+#define BM_XCVR 0x0160 /* transceiver control register */
+#define BM_CHIPID 0x0170 /* chip ID */
+#define BM_MII_CSR 0x0180 /* MII control register */
+#define BM_SROM_CSR 0x0190 /* unused, OFW provides enet addr */
+#define BM_TX_PTR 0x01A0 /* unused */
+#define BM_RX_PTR 0x01B0 /* unused */
+#define BM_STATUS 0x01C0 /* status register */
+#define BM_INTR_DISABLE 0x0200 /* interrupt control register */
+#define BM_TX_RESET 0x0420 /* TX reset */
+#define BM_TX_CONFIG 0x0430 /* TX config */
+#define BM_IPG1 0x0440 /* inter-packet gap hi */
+#define BM_IPG2 0x0450 /* inter-packet gap lo */
+#define BM_TX_ALIMIT 0x0460 /* TX attempt limit */
+#define BM_TX_STIME 0x0470 /* TX slot time */
+#define BM_TX_PASIZE 0x0480 /* TX preamble size */
+#define BM_TX_PAPAT 0x0490 /* TX preamble pattern */
+#define BM_TX_SFD 0x04A0 /* TX start-frame delimiter */
+#define BM_JAMSIZE 0x04B0 /* collision jam size */
+#define BM_TX_MAXLEN 0x04C0 /* max TX packet length */
+#define BM_TX_MINLEN 0x04D0 /* min TX packet length */
+#define BM_TX_PEAKCNT 0x04E0 /* TX peak attempts count */
+#define BM_TX_DCNT 0x04F0 /* TX defer timer */
+#define BM_TX_NCCNT 0x0500 /* TX normal collision cnt */
+#define BM_TX_FCCNT 0x0510 /* TX first collision cnt */
+#define BM_TX_EXCNT 0x0520 /* TX excess collision cnt */
+#define BM_TX_LTCNT 0x0530 /* TX late collision cnt */
+#define BM_TX_RANDSEED 0x0540 /* TX random seed */
+#define BM_TXSM 0x0550 /* TX state machine */
+#define BM_RX_RESET 0x0620 /* RX reset */
+#define BM_RX_CONFIG 0x0630 /* RX config */
+#define BM_RX_MAXLEN 0x0640 /* max RX packet length */
+#define BM_RX_MINLEN 0x0650 /* min RX packet length */
+#define BM_MACADDR2 0x0660 /* MAC address */
+#define BM_MACADDR1 0x0670
+#define BM_MACADDR0 0x0680
+#define BM_RX_FRCNT 0x0690 /* RX frame count */
+#define BM_RX_LECNT 0x06A0 /* RX too-long frame count */
+#define BM_RX_AECNT 0x06B0 /* RX misaligned frame count */
+#define BM_RX_FECNT 0x06C0 /* RX CRC error count */
+#define BM_RXSM 0x06D0 /* RX state machine */
+#define BM_RXCV 0x06E0 /* RX code violations */
+#define BM_HASHTAB3 0x0700 /* Address hash table */
+#define BM_HASHTAB2 0x0710
+#define BM_HASHTAB1 0x0720
+#define BM_HASHTAB0 0x0730
+#define BM_AFILTER2 0x0740 /* Address filter */
+#define BM_AFILTER1 0x0750
+#define BM_AFILTER0 0x0760
+#define BM_AFILTER_MASK 0x0770
+
+/*
+ * MII control register bits
+ */
+#define BM_MII_CLK 0x0001 /* MDIO clock */
+#define BM_MII_DATAOUT 0x0002 /* MDIO data out */
+#define BM_MII_OENABLE 0x0004 /* MDIO output enable */
+#define BM_MII_DATAIN 0x0008 /* MDIO data in */
+
+/*
+ * MII constants
+ */
+#define BM_MII_STARTDELIM 0x01
+#define BM_MII_READOP 0x02
+#define BM_MII_WRITEOP 0x01
+#define BM_MII_TURNAROUND 0x02
+
+/*
+ * Various flags
+ */
+
+#define BM_ENABLE 0x0001
+
+#define BM_CRC_ENABLE 0x0100
+#define BM_HASH_FILTER_ENABLE 0x0200
+#define BM_REJECT_OWN_PKTS 0x0800
+#define BM_PROMISC 0x0040
+
+#define BM_TX_FULLDPX 0x0200
+#define BM_TX_IGNORECOLL 0x0040
+
+#define BM_INTR_PKT_RX 0x0001
+#define BM_INTR_PKT_TX 0x0100
+#define BM_INTR_TX_UNDERRUN 0x0200
+
+#define BM_INTR_NORMAL ~(BM_INTR_PKT_TX | BM_INTR_TX_UNDERRUN)
+#define BM_INTR_NONE 0xffff
+
+/*
+ * register space access macros
+ */
+#define CSR_WRITE_4(sc, reg, val) \
+ bus_space_write_4(sc->sc_btag, sc->sc_bhandle, reg, val)
+#define CSR_WRITE_2(sc, reg, val) \
+ bus_space_write_2(sc->sc_btag, sc->sc_bhandle, reg, val)
+#define CSR_WRITE_1(sc, reg, val) \
+ bus_space_write_1(sc->sc_btag, sc->sc_bhandle, reg, val)
+
+#define CSR_READ_4(sc, reg) \
+ bus_space_read_4(sc->sc_btag, sc->sc_bhandle, reg)
+#define CSR_READ_2(sc, reg) \
+ bus_space_read_2(sc->sc_btag, sc->sc_bhandle, reg)
+#define CSR_READ_1(sc, reg) \
+ bus_space_read_1(sc->sc_btag, sc->sc_bhandle, reg)
+
diff --git a/sys/dev/bm/if_bmvar.h b/sys/dev/bm/if_bmvar.h
new file mode 100644
index 0000000..c256f30
--- /dev/null
+++ b/sys/dev/bm/if_bmvar.h
@@ -0,0 +1,127 @@
+/*-
+ * Copyright (c) 2008 Nathan Whitehorn
+ * Copyright (c) 2003 Peter Grehan
+ * All rights reserved
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * Number of transmit/receive DBDMA descriptors.
+ * XXX allow override with a tuneable ?
+ */
+#define BM_MAX_DMA_COMMANDS 256
+#define BM_NTXSEGS 16
+
+#define BM_MAX_TX_PACKETS 100
+#define BM_MAX_RX_PACKETS 100
+
+/*
+ * Mutex macros
+ */
+#define BM_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx)
+#define BM_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx)
+
+/*
+ * software state for transmit job mbufs (may be elements of mbuf chains)
+ */
+
+struct bm_txsoft {
+ struct mbuf *txs_mbuf; /* head of our mbuf chain */
+ bus_dmamap_t txs_dmamap; /* our DMA map */
+ int txs_firstdesc; /* first descriptor in packet */
+ int txs_lastdesc; /* last descriptor in packet */
+ int txs_stopdesc; /* the location of the closing STOP */
+
+ int txs_ndescs; /* number of descriptors */
+ STAILQ_ENTRY(bm_txsoft) txs_q;
+};
+
+STAILQ_HEAD(bm_txsq, bm_txsoft);
+
+/*
+ * software state for receive jobs
+ */
+struct bm_rxsoft {
+ struct mbuf *rxs_mbuf; /* head of our mbuf chain */
+ bus_dmamap_t rxs_dmamap; /* our DMA map */
+
+ int dbdma_slot;
+ bus_dma_segment_t segment;
+};
+
+
+struct bm_softc {
+ struct ifnet *sc_ifp;
+ struct mtx sc_mtx;
+ u_char sc_enaddr[ETHER_ADDR_LEN];
+
+ int sc_streaming;
+ int sc_ifpflags;
+ int sc_duplex;
+ int sc_wdog_timer;
+
+ struct callout sc_tick_ch;
+
+ device_t sc_dev; /* back ptr to dev */
+ struct resource *sc_memr; /* macio bus mem resource */
+ int sc_memrid;
+ bus_space_handle_t sc_bhandle;
+ bus_space_tag_t sc_btag;
+ device_t sc_miibus;
+
+ struct mii_data *sc_mii;
+
+ struct resource *sc_txdmar, *sc_rxdmar;
+ int sc_txdmarid, sc_rxdmarid;
+
+ struct resource *sc_txdmairq, *sc_rxdmairq;
+ void *sc_txihtx, *sc_rxih;
+ int sc_txdmairqid, sc_rxdmairqid;
+
+ bus_dma_tag_t sc_pdma_tag;
+
+ bus_dma_tag_t sc_tdma_tag;
+ struct bm_txsoft sc_txsoft[BM_MAX_TX_PACKETS];
+ int first_used_txdma_slot, next_txdma_slot;
+
+ struct bm_txsq sc_txfreeq;
+ struct bm_txsq sc_txdirtyq;
+
+ bus_dma_tag_t sc_rdma_tag;
+ struct bm_rxsoft sc_rxsoft[BM_MAX_TX_PACKETS];
+ int next_rxdma_slot, rxdma_loop_slot;
+
+ dbdma_channel_t *sc_txdma, *sc_rxdma;
+};
+
+struct bm_mii_frame {
+ u_int8_t mii_stdelim;
+ u_int8_t mii_opcode;
+ u_int8_t mii_phyaddr;
+ u_int8_t mii_regaddr;
+ u_int8_t mii_turnaround;
+ u_int16_t mii_data;
+};
+
OpenPOWER on IntegriCloud