summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authortmm <tmm@FreeBSD.org>2002-02-27 17:35:48 +0000
committertmm <tmm@FreeBSD.org>2002-02-27 17:35:48 +0000
commit70f0ff084e8a92b505913a9ba475d0f9d60c417c (patch)
treee629554123ea086d4ec596639bfa5cdcfb4519c1 /sys
parent3a862cdbbdc92cd8b2163f5c49d3ecc25564d920 (diff)
downloadFreeBSD-src-70f0ff084e8a92b505913a9ba475d0f9d60c417c.zip
FreeBSD-src-70f0ff084e8a92b505913a9ba475d0f9d60c417c.tar.gz
Add a driver for the Sun HME PCI/SBus ethernet adaptor, which is onboard
in most machines of the Sun Ultra series. This is a port of the NetBSD driver which I enhanced to make use of the gather functionality and the configurable RX buffer offset to avoid copying all received/sent packet (instead, packets will be directly DMAd from mbuf chains and into mbuf clusters now).
Diffstat (limited to 'sys')
-rw-r--r--sys/dev/hme/if_hme.c1512
-rw-r--r--sys/dev/hme/if_hme_pci.c196
-rw-r--r--sys/dev/hme/if_hme_sbus.c276
-rw-r--r--sys/dev/hme/if_hmereg.h319
-rw-r--r--sys/dev/hme/if_hmevar.h152
5 files changed, 2455 insertions, 0 deletions
diff --git a/sys/dev/hme/if_hme.c b/sys/dev/hme/if_hme.c
new file mode 100644
index 0000000..bf99ec3
--- /dev/null
+++ b/sys/dev/hme/if_hme.c
@@ -0,0 +1,1512 @@
+/*-
+ * Copyright (c) 1999 The NetBSD Foundation, Inc.
+ * Copyright (c) 2001 Thomas Moestl <tmm@FreeBSD.org>.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Paul Kranenburg.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * from: NetBSD: hme.c,v 1.20 2000/12/14 06:27:25 thorpej Exp
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * HME Ethernet module driver.
+ *
+ * The HME is e.g. part of the PCIO PCI multi function device.
+ * It supports TX gathering and TX and RX checksum offloading.
+ * RX buffers must be aligned at a programmable offset modulo 16. We choose 2
+ * for this offset: mbuf clusters are usually on about 2^11 boundaries, 2 bytes
+ * are skipped to make sure the header after the ethernet header is aligned on a
+ * natural boundary, so this ensures minimal wastage in the most common case.
+ *
+ * Also, apparently, the buffers must extend to a DMA burst boundary beyond the
+ * maximum packet size (this is not verified). Buffers starting on odd
+ * boundaries must be mapped so that the burst can start on a natural boundary.
+ *
+ * Checksumming is not yet supported.
+ */
+
+#define HMEDEBUG
+#define KTR_HME KTR_CT2 /* XXX */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/ktr.h>
+#include <sys/mbuf.h>
+#include <sys/malloc.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+
+#include <net/ethernet.h>
+#include <net/if.h>
+#include <net/if_arp.h>
+#include <net/if_dl.h>
+#include <net/if_media.h>
+
+#include <dev/mii/mii.h>
+#include <dev/mii/miivar.h>
+
+#include <machine/bus.h>
+
+#include <hme/if_hmereg.h>
+#include <hme/if_hmevar.h>
+
+static void hme_start(struct ifnet *);
+static void hme_stop(struct hme_softc *);
+static int hme_ioctl(struct ifnet *, u_long, caddr_t);
+static void hme_tick(void *);
+static void hme_watchdog(struct ifnet *);
+#if 0
+static void hme_shutdown(void *);
+#endif
+static void hme_init(void *);
+static int hme_add_rxbuf(struct hme_softc *, unsigned int, int);
+static int hme_meminit(struct hme_softc *);
+static int hme_mac_bitflip(struct hme_softc *, u_int32_t, u_int32_t,
+ u_int32_t, u_int32_t);
+static void hme_mifinit(struct hme_softc *);
+static void hme_reset(struct hme_softc *);
+static void hme_setladrf(struct hme_softc *, int);
+
+static int hme_mediachange(struct ifnet *);
+static void hme_mediastatus(struct ifnet *, struct ifmediareq *);
+
+static int hme_load_mbuf(struct hme_softc *, struct mbuf *);
+static void hme_read(struct hme_softc *, int, int);
+static void hme_eint(struct hme_softc *, u_int);
+static void hme_rint(struct hme_softc *);
+static void hme_tint(struct hme_softc *);
+
+static void hme_cdma_callback(void *, bus_dma_segment_t *, int, int);
+static void hme_rxdma_callback(void *, bus_dma_segment_t *, int, int);
+static void hme_txdma_callback(void *, bus_dma_segment_t *, int, int);
+
+devclass_t hme_devclass;
+
+static int hme_nerr;
+
+DRIVER_MODULE(miibus, hme, miibus_driver, miibus_devclass, 0, 0);
+MODULE_DEPEND(hem, miibus, 1, 1, 1);
+
+#define HME_SPC_READ_4(spc, sc, offs) \
+ bus_space_read_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
+ (sc)->sc_ ## spc ## o + (offs))
+#define HME_SPC_WRITE_4(spc, sc, offs, v) \
+ bus_space_write_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
+ (sc)->sc_ ## spc ## o + (offs), (v))
+
+#define HME_SEB_READ_4(sc, offs) HME_SPC_READ_4(seb, (sc), (offs))
+#define HME_SEB_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(seb, (sc), (offs), (v))
+#define HME_ERX_READ_4(sc, offs) HME_SPC_READ_4(erx, (sc), (offs))
+#define HME_ERX_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(erx, (sc), (offs), (v))
+#define HME_ETX_READ_4(sc, offs) HME_SPC_READ_4(etx, (sc), (offs))
+#define HME_ETX_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(etx, (sc), (offs), (v))
+#define HME_MAC_READ_4(sc, offs) HME_SPC_READ_4(mac, (sc), (offs))
+#define HME_MAC_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(mac, (sc), (offs), (v))
+#define HME_MIF_READ_4(sc, offs) HME_SPC_READ_4(mif, (sc), (offs))
+#define HME_MIF_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(mif, (sc), (offs), (v))
+
+#define HME_MAXERR 5
+#define HME_WHINE(dev, ...) do { \
+ if (hme_nerr++ < HME_MAXERR) \
+ device_printf(dev, __VA_ARGS__); \
+ if (hme_nerr == HME_MAXERR) { \
+ device_printf(dev, "too may errors; not reporting any " \
+ "more\n"); \
+ } \
+} while(0)
+
+int
+hme_config(struct hme_softc *sc)
+{
+ struct ifnet *ifp = &sc->sc_arpcom.ac_if;
+ struct mii_softc *child;
+ bus_size_t size;
+ int error, rdesc, tdesc, i;
+
+ /*
+ * HME common initialization.
+ *
+ * hme_softc fields that must be initialized by the front-end:
+ *
+ * the dma bus tag:
+ * sc_dmatag
+ *
+ * the bus handles, tags and offsets (splitted for SBus compatability):
+ * sc_seb{t,h,o} (Shared Ethernet Block registers)
+ * sc_erx{t,h,o} (Receiver Unit registers)
+ * sc_etx{t,h,o} (Transmitter Unit registers)
+ * sc_mac{t,h,o} (MAC registers)
+ * sc_mif{t,h,o} (Managment Interface registers)
+ *
+ * the maximum bus burst size:
+ * sc_burst
+ *
+ */
+
+ /* Make sure the chip is stopped. */
+ hme_stop(sc);
+
+ /*
+ * Allocate DMA capable memory
+ * Buffer descriptors must be aligned on a 2048 byte boundary;
+ * take this into account when calculating the size. Note that
+ * the maximum number of descriptors (256) occupies 2048 bytes,
+ * so we allocate that much regardless of HME_N*DESC.
+ */
+ size = 4096;
+
+ error = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT,
+ BUS_SPACE_MAXADDR, NULL, NULL, size, HME_NTXDESC + HME_NRXDESC + 1,
+ BUS_SPACE_MAXSIZE_32BIT, 0, &sc->sc_pdmatag);
+ if (error)
+ return (error);
+
+ error = bus_dma_tag_create(sc->sc_pdmatag, 2048, 0,
+ BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
+ 1, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, &sc->sc_cdmatag);
+ if (error)
+ goto fail_ptag;
+
+ error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0,
+ BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
+ HME_NRXDESC, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW,
+ &sc->sc_rdmatag);
+ if (error)
+ goto fail_ctag;
+
+ error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0,
+ BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
+ HME_NTXDESC, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW,
+ &sc->sc_tdmatag);
+ if (error)
+ goto fail_rtag;
+
+ /* Allocate control/TX DMA buffer */
+ error = bus_dmamem_alloc(sc->sc_cdmatag, (void **)&sc->sc_rb.rb_membase,
+ 0, &sc->sc_cdmamap);
+ if (error != 0) {
+ device_printf(sc->sc_dev, "DMA buffer alloc error %d\n", error);
+ goto fail_ttag;
+ }
+
+ /* Load the buffer */
+ sc->sc_rb.rb_dmabase = 0;
+ if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cdmamap,
+ sc->sc_rb.rb_membase, size, hme_cdma_callback, sc, 0)) != 0 ||
+ sc->sc_rb.rb_dmabase == 0) {
+ device_printf(sc->sc_dev, "DMA buffer map load error %d\n",
+ error);
+ goto fail_free;
+ }
+ CTR2(KTR_HME, "hme_config: dma va %p, pa %#lx", sc->sc_rb.rb_membase,
+ sc->sc_rb.rb_dmabase);
+
+ /*
+ * Prepare the RX descriptors. rdesc serves as marker for the last
+ * processed descriptor and may be used later on.
+ */
+ for (rdesc = 0; rdesc < HME_NRXDESC; rdesc++) {
+ error = bus_dmamap_create(sc->sc_rdmatag, 0,
+ &sc->sc_rb.rb_rxdesc[rdesc].hrx_dmamap);
+ if (error != 0)
+ goto fail_rxdesc;
+ }
+ error = bus_dmamap_create(sc->sc_rdmatag, 0,
+ &sc->sc_rb.rb_spare_dmamap);
+ if (error != 0)
+ goto fail_rxdesc;
+ /* Same for the TX descs. */
+ for (tdesc = 0; tdesc < HME_NTXDESC; tdesc++) {
+ error = bus_dmamap_create(sc->sc_tdmatag, 0,
+ &sc->sc_rb.rb_txdesc[tdesc].htx_dmamap);
+ if (error != 0)
+ goto fail_txdesc;
+ }
+ bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+ device_printf(sc->sc_dev, "Ethernet address:");
+ for (i = 0; i < 6; i++)
+ printf("%c%02x", i > 0 ? ':' : ' ', sc->sc_arpcom.ac_enaddr[i]);
+ printf("\n");
+
+ /* Initialize ifnet structure. */
+ ifp->if_softc = sc;
+ ifp->if_unit = device_get_unit(sc->sc_dev);
+ ifp->if_name = "hme";
+ ifp->if_mtu = ETHERMTU;
+ ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX |IFF_MULTICAST;
+ ifp->if_start = hme_start;
+ ifp->if_ioctl = hme_ioctl;
+ ifp->if_init = hme_init;
+ ifp->if_output = ether_output;
+ ifp->if_watchdog = hme_watchdog;
+ ifp->if_snd.ifq_maxlen = HME_NTXDESC;
+
+ hme_mifinit(sc);
+
+ if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, hme_mediachange,
+ hme_mediastatus)) != 0) {
+ device_printf(sc->sc_dev, "phy probe failed: %d\n", error);
+ goto fail_rxdesc;
+ }
+ sc->sc_mii = device_get_softc(sc->sc_miibus);
+
+ /*
+ * Walk along the list of attached MII devices and
+ * establish an `MII instance' to `phy number'
+ * mapping. We'll use this mapping in media change
+ * requests to determine which phy to use to program
+ * the MIF configuration register.
+ */
+ for (child = LIST_FIRST(&sc->sc_mii->mii_phys); child != NULL;
+ child = LIST_NEXT(child, mii_list)) {
+ /*
+ * Note: we support just two PHYs: the built-in
+ * internal device and an external on the MII
+ * connector.
+ */
+ if (child->mii_phy > 1 || child->mii_inst > 1) {
+ device_printf(sc->sc_dev, "cannot accomodate "
+ "MII device %s at phy %d, instance %d\n",
+ device_get_name(child->mii_dev),
+ child->mii_phy, child->mii_inst);
+ continue;
+ }
+
+ sc->sc_phys[child->mii_inst] = child->mii_phy;
+ }
+
+ /* Attach the interface. */
+ ether_ifattach(ifp, ETHER_BPF_SUPPORTED);
+
+ callout_init(&sc->sc_tick_ch, 0);
+ return (0);
+
+fail_txdesc:
+ for (i = 0; i < tdesc; i++) {
+ bus_dmamap_destroy(sc->sc_tdmatag,
+ sc->sc_rb.rb_txdesc[i].htx_dmamap);
+ }
+ bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap);
+fail_rxdesc:
+ for (i = 0; i < rdesc; i++) {
+ bus_dmamap_destroy(sc->sc_rdmatag,
+ sc->sc_rb.rb_rxdesc[i].hrx_dmamap);
+ }
+ bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap);
+fail_free:
+ bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap);
+fail_ttag:
+ bus_dma_tag_destroy(sc->sc_tdmatag);
+fail_rtag:
+ bus_dma_tag_destroy(sc->sc_rdmatag);
+fail_ctag:
+ bus_dma_tag_destroy(sc->sc_cdmatag);
+fail_ptag:
+ bus_dma_tag_destroy(sc->sc_pdmatag);
+ return (error);
+}
+
+static void
+hme_cdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
+{
+ struct hme_softc *sc = (struct hme_softc *)xsc;
+
+ if (error != 0)
+ return;
+ KASSERT(nsegs == 1, ("hme_cdma_callback: bad dma segment count"));
+ sc->sc_rb.rb_dmabase = segs[0].ds_addr;
+}
+
+static void
+hme_tick(void *arg)
+{
+ struct hme_softc *sc = arg;
+ int s;
+
+ s = splnet();
+ mii_tick(sc->sc_mii);
+ splx(s);
+
+ callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc);
+}
+
+static void
+hme_reset(struct hme_softc *sc)
+{
+ int s;
+
+ s = splnet();
+ hme_init(sc);
+ splx(s);
+}
+
+static void
+hme_stop(struct hme_softc *sc)
+{
+ u_int32_t v;
+ int n;
+
+ callout_stop(&sc->sc_tick_ch);
+
+ /* Reset transmitter and receiver */
+ HME_SEB_WRITE_4(sc, HME_SEBI_RESET, HME_SEB_RESET_ETX |
+ HME_SEB_RESET_ERX);
+
+ for (n = 0; n < 20; n++) {
+ v = HME_SEB_READ_4(sc, HME_SEBI_RESET);
+ if ((v & (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)) == 0)
+ return;
+ DELAY(20);
+ }
+
+ device_printf(sc->sc_dev, "hme_stop: reset failed\n");
+}
+
+static void
+hme_rxdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
+{
+ bus_addr_t *a = xsc;
+
+ /* XXX: A cluster should not contain more than one segment, correct? */
+ if (error != 0 || nsegs != 1)
+ return;
+ *a = segs[0].ds_addr;
+}
+
+static int
+hme_add_rxbuf(struct hme_softc *sc, unsigned int ri, int keepold)
+{
+ struct hme_rxdesc *rd;
+ struct mbuf *m;
+ bus_addr_t ba;
+ bus_size_t len, offs;
+ bus_dmamap_t map;
+ int a, unmap;
+ char *b;
+
+ rd = &sc->sc_rb.rb_rxdesc[ri];
+ unmap = rd->hrx_m != NULL;
+ if (unmap && keepold)
+ return (0);
+ if ((m = m_gethdr(M_DONTWAIT, MT_DATA)) == NULL)
+ return (ENOBUFS);
+ m_clget(m, M_DONTWAIT);
+ if ((m->m_flags & M_EXT) == 0)
+ goto fail_mcl;
+ len = m->m_ext.ext_size;
+ b = mtod(m, char *);
+ /*
+ * Required alignment boundary. At least 16 is needed, but since
+ * the mapping must be done in a way that a burst can start on a
+ * natural boundary we might need to extend this.
+ */
+ a = max(0x10, sc->sc_burst);
+ /*
+ * Make sure the buffer suitably aligned: we need an offset of
+ * 2 modulo a. XXX: this ensures at least 16 byte alignment of the
+ * header adjacent to the ethernet header, which should be sufficient
+ * in all cases. Nevertheless, this second-guesses ALIGN().
+ */
+ offs = (a - (((uintptr_t)b - 2) & (a - 1))) % a;
+ len -= offs;
+ /* Align the buffer on the boundary for mapping. */
+ b += offs - 2;
+ ba = 0;
+ if (bus_dmamap_load(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap,
+ b, len + 2, hme_rxdma_callback, &ba, 0) != 0 || ba == 0)
+ goto fail_mcl;
+ if (unmap) {
+ bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap,
+ BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(sc->sc_rdmatag, rd->hrx_dmamap);
+ }
+ map = rd->hrx_dmamap;
+ rd->hrx_dmamap = sc->sc_rb.rb_spare_dmamap;
+ sc->sc_rb.rb_spare_dmamap = map;
+ rd->hrx_offs = offs;
+ rd->hrx_len = len - sc->sc_burst;
+ bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap, BUS_DMASYNC_PREREAD);
+ HME_XD_SETADDR(sc->sc_pci, sc->sc_rb.rb_rxd, ri, ba);
+ /* Lazily leave at least one burst size grace space. */
+ HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ri, HME_XD_OWN |
+ HME_XD_ENCODE_RSIZE(ulmin(HME_BUFSZ, rd->hrx_len)));
+ rd->hrx_m = m;
+ return (0);
+
+fail_mcl:
+ m_freem(m);
+ return (ENOBUFS);
+}
+
+static int
+hme_meminit(struct hme_softc *sc)
+{
+ struct hme_ring *hr = &sc->sc_rb;
+ struct hme_txdesc *td;
+ bus_addr_t dma;
+ caddr_t p;
+ unsigned int i;
+ int error;
+
+ p = hr->rb_membase;
+ dma = hr->rb_dmabase;
+
+ /*
+ * Allocate transmit descriptors
+ */
+ hr->rb_txd = p;
+ hr->rb_txddma = dma;
+ p += HME_NTXDESC * HME_XD_SIZE;
+ dma += HME_NTXDESC * HME_XD_SIZE;
+ /* We have reserved descriptor space until the next 2048 byte boundary.*/
+ dma = (bus_addr_t)roundup((u_long)dma, 2048);
+ p = (caddr_t)roundup((u_long)p, 2048);
+
+ /*
+ * Allocate receive descriptors
+ */
+ hr->rb_rxd = p;
+ hr->rb_rxddma = dma;
+ p += HME_NRXDESC * HME_XD_SIZE;
+ dma += HME_NRXDESC * HME_XD_SIZE;
+ /* Again move forward to the next 2048 byte boundary.*/
+ dma = (bus_addr_t)roundup((u_long)dma, 2048);
+ p = (caddr_t)roundup((u_long)p, 2048);
+
+ /*
+ * Initialize transmit buffer descriptors
+ */
+ for (i = 0; i < HME_NTXDESC; i++) {
+ td = &sc->sc_rb.rb_txdesc[i];
+ HME_XD_SETADDR(sc->sc_pci, hr->rb_txd, i, 0);
+ HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, i, 0);
+ if (td->htx_m != NULL) {
+ m_freem(td->htx_m);
+ td->htx_m = NULL;
+ }
+ if ((td->htx_flags & HTXF_MAPPED) != 0)
+ bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap);
+ }
+
+ /*
+ * Initialize receive buffer descriptors
+ */
+ for (i = 0; i < HME_NRXDESC; i++) {
+ error = hme_add_rxbuf(sc, i, 1);
+ if (error != 0)
+ return (error);
+ }
+
+ hr->rb_tdhead = hr->rb_tdtail = 0;
+ hr->rb_td_nbusy = 0;
+ hr->rb_rdtail = 0;
+ CTR2(KTR_HME, "gem_meminit: tx ring va %p, pa %#lx", hr->rb_txd,
+ hr->rb_txddma);
+ CTR2(KTR_HME, "gem_meminit: rx ring va %p, pa %#lx", hr->rb_rxd,
+ hr->rb_rxddma);
+ CTR2(KTR_HME, "rx entry 1: flags %x, address %x",
+ *(u_int32_t *)hr->rb_rxd, *(u_int32_t *)(hr->rb_rxd + 4));
+ CTR2(KTR_HME, "tx entry 1: flags %x, address %x",
+ *(u_int32_t *)hr->rb_txd, *(u_int32_t *)(hr->rb_txd + 4));
+ return (0);
+}
+
+static int
+hme_mac_bitflip(struct hme_softc *sc, u_int32_t reg, u_int32_t val,
+ u_int32_t clr, u_int32_t set)
+{
+ int i = 0;
+
+ val &= ~clr;
+ val |= set;
+ HME_MAC_WRITE_4(sc, reg, val);
+ if (clr == 0 && set == 0)
+ return (1); /* just write, no bits to wait for */
+ do {
+ DELAY(100);
+ i++;
+ val = HME_MAC_READ_4(sc, reg);
+ if (i > 40) {
+ /* After 3.5ms, we should have been done. */
+ device_printf(sc->sc_dev, "timeout while writing to "
+ "MAC configuration register\n");
+ return (0);
+ }
+ } while ((val & clr) != 0 && (val & set) != set);
+ return (1);
+}
+
+/*
+ * Initialization of interface; set up initialization block
+ * and transmit/receive descriptor rings.
+ */
+static void
+hme_init(void *xsc)
+{
+ struct hme_softc *sc = (struct hme_softc *)xsc;
+ struct ifnet *ifp = &sc->sc_arpcom.ac_if;
+ u_int8_t *ea;
+ u_int32_t v;
+
+ /*
+ * Initialization sequence. The numbered steps below correspond
+ * to the sequence outlined in section 6.3.5.1 in the Ethernet
+ * Channel Engine manual (part of the PCIO manual).
+ * See also the STP2002-STQ document from Sun Microsystems.
+ */
+
+ /* step 1 & 2. Reset the Ethernet Channel */
+ hme_stop(sc);
+
+ /* Re-initialize the MIF */
+ hme_mifinit(sc);
+
+ /* Call MI reset function if any */
+ if (sc->sc_hwreset)
+ (*sc->sc_hwreset)(sc);
+
+#if 0
+ /* Mask all MIF interrupts, just in case */
+ HME_MIF_WRITE_4(sc, HME_MIFI_IMASK, 0xffff);
+#endif
+
+ /* step 3. Setup data structures in host memory */
+ if (hme_meminit(sc) != 0) {
+ device_printf(sc->sc_dev, "out of buffers; init aborted.");
+ return;
+ }
+
+ /* step 4. TX MAC registers & counters */
+ HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0);
+ HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0);
+ HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0);
+ HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0);
+ HME_MAC_WRITE_4(sc, HME_MACI_TXSIZE, ETHER_MAX_LEN);
+
+ /* Load station MAC address */
+ ea = sc->sc_arpcom.ac_enaddr;
+ HME_MAC_WRITE_4(sc, HME_MACI_MACADDR0, (ea[0] << 8) | ea[1]);
+ HME_MAC_WRITE_4(sc, HME_MACI_MACADDR1, (ea[2] << 8) | ea[3]);
+ HME_MAC_WRITE_4(sc, HME_MACI_MACADDR2, (ea[4] << 8) | ea[5]);
+
+ /*
+ * Init seed for backoff
+ * (source suggested by manual: low 10 bits of MAC address)
+ */
+ v = ((ea[4] << 8) | ea[5]) & 0x3fff;
+ HME_MAC_WRITE_4(sc, HME_MACI_RANDSEED, v);
+
+
+ /* Note: Accepting power-on default for other MAC registers here.. */
+
+ /* step 5. RX MAC registers & counters */
+ hme_setladrf(sc, 0);
+
+ /* step 6 & 7. Program Descriptor Ring Base Addresses */
+ HME_ETX_WRITE_4(sc, HME_ETXI_RING, sc->sc_rb.rb_txddma);
+ /* Transmit Descriptor ring size: in increments of 16 */
+ HME_ETX_WRITE_4(sc, HME_ETXI_RSIZE, HME_NTXDESC / 16 - 1);
+
+ HME_ERX_WRITE_4(sc, HME_ERXI_RING, sc->sc_rb.rb_rxddma);
+ HME_MAC_WRITE_4(sc, HME_MACI_RXSIZE, ETHER_MAX_LEN);
+
+ /* step 8. Global Configuration & Interrupt Mask */
+ HME_SEB_WRITE_4(sc, HME_SEBI_IMASK,
+ ~(/*HME_SEB_STAT_GOTFRAME | HME_SEB_STAT_SENTFRAME |*/
+ HME_SEB_STAT_HOSTTOTX |
+ HME_SEB_STAT_RXTOHOST |
+ HME_SEB_STAT_TXALL |
+ HME_SEB_STAT_TXPERR |
+ HME_SEB_STAT_RCNTEXP |
+ HME_SEB_STAT_ALL_ERRORS ));
+
+ switch (sc->sc_burst) {
+ default:
+ v = 0;
+ break;
+ case 16:
+ v = HME_SEB_CFG_BURST16;
+ break;
+ case 32:
+ v = HME_SEB_CFG_BURST32;
+ break;
+ case 64:
+ v = HME_SEB_CFG_BURST64;
+ break;
+ }
+ HME_SEB_WRITE_4(sc, HME_SEBI_CFG, v);
+
+ /* step 9. ETX Configuration: use mostly default values */
+
+ /* Enable DMA */
+ v = HME_ETX_READ_4(sc, HME_ETXI_CFG);
+ v |= HME_ETX_CFG_DMAENABLE;
+ HME_ETX_WRITE_4(sc, HME_ETXI_CFG, v);
+
+ /* step 10. ERX Configuration */
+ v = HME_ERX_READ_4(sc, HME_ERXI_CFG);
+
+ /* Encode Receive Descriptor ring size: four possible values */
+ v &= ~HME_ERX_CFG_RINGSIZEMSK;
+ switch (HME_NRXDESC) {
+ case 32:
+ v |= HME_ERX_CFG_RINGSIZE32;
+ break;
+ case 64:
+ v |= HME_ERX_CFG_RINGSIZE64;
+ break;
+ case 128:
+ v |= HME_ERX_CFG_RINGSIZE128;
+ break;
+ case 256:
+ v |= HME_ERX_CFG_RINGSIZE256;
+ break;
+ default:
+ printf("hme: invalid Receive Descriptor ring size\n");
+ break;
+ }
+
+ /* Enable DMA, fix RX first byte offset to 2. */
+ v &= ~HME_ERX_CFG_FBO_MASK;
+ v |= HME_ERX_CFG_DMAENABLE | (2 << HME_ERX_CFG_FBO_SHIFT);
+ CTR1(KTR_HME, "hme_init: programming ERX_CFG to %x", (u_int)v);
+ HME_ERX_WRITE_4(sc, HME_ERXI_CFG, v);
+
+ /* step 11. XIF Configuration */
+ v = HME_MAC_READ_4(sc, HME_MACI_XIF);
+ v |= HME_MAC_XIF_OE;
+ /* If an external transceiver is connected, enable its MII drivers */
+ if ((HME_MIF_READ_4(sc, HME_MIFI_CFG) & HME_MIF_CFG_MDI1) != 0)
+ v |= HME_MAC_XIF_MIIENABLE;
+ CTR1(KTR_HME, "hme_init: programming XIF to %x", (u_int)v);
+ HME_MAC_WRITE_4(sc, HME_MACI_XIF, v);
+
+ /* step 12. RX_MAC Configuration Register */
+ v = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
+ v |= HME_MAC_RXCFG_ENABLE;
+ v &= ~(HME_MAC_RXCFG_DCRCS);
+ CTR1(KTR_HME, "hme_init: programming RX_MAC to %x", (u_int)v);
+ HME_MAC_WRITE_4(sc, HME_MACI_RXCFG, v);
+
+ /* step 13. TX_MAC Configuration Register */
+ v = HME_MAC_READ_4(sc, HME_MACI_TXCFG);
+ v |= (HME_MAC_TXCFG_ENABLE | HME_MAC_TXCFG_DGIVEUP);
+ CTR1(KTR_HME, "hme_init: programming TX_MAC to %x", (u_int)v);
+ HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, v);
+
+ /* step 14. Issue Transmit Pending command */
+
+ /* Call MI initialization function if any */
+ if (sc->sc_hwinit)
+ (*sc->sc_hwinit)(sc);
+
+#ifdef HMEDEBUG
+ /* Debug: double-check. */
+ CTR4(KTR_HME, "hme_init: tx ring %#x, rsz %#x, rx ring %#x, "
+ "rxsize %#x", HME_ETX_READ_4(sc, HME_ETXI_RING),
+ HME_ETX_READ_4(sc, HME_ETXI_RSIZE),
+ HME_ERX_READ_4(sc, HME_ERXI_RING),
+ HME_MAC_READ_4(sc, HME_MACI_RXSIZE));
+ CTR3(KTR_HME, "hme_init: intr mask %#x, erx cfg %#x, etx cfg %#x",
+ HME_SEB_READ_4(sc, HME_SEBI_IMASK),
+ HME_ERX_READ_4(sc, HME_ERXI_CFG),
+ HME_ETX_READ_4(sc, HME_ETXI_CFG));
+ CTR2(KTR_HME, "hme_init: mac rxcfg %#x, maci txcfg %#x",
+ HME_MAC_READ_4(sc, HME_MACI_RXCFG),
+ HME_MAC_READ_4(sc, HME_MACI_TXCFG));
+#endif
+
+ /* Start the one second timer. */
+ callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc);
+
+ ifp->if_flags |= IFF_RUNNING;
+ ifp->if_flags &= ~IFF_OACTIVE;
+ ifp->if_timer = 0;
+ hme_start(ifp);
+}
+
+struct hme_txdma_arg {
+ struct hme_softc *hta_sc;
+ struct mbuf *hta_m;
+ int hta_err;
+ int hta_flags;
+ int hta_offs;
+ int hta_pad;
+};
+
+/* Values for hta_flags */
+#define HTAF_SOP 1 /* Start of packet (first mbuf in chain) */
+#define HTAF_EOP 2 /* Start of packet (last mbuf in chain) */
+
+static void
+hme_txdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
+{
+ struct hme_txdma_arg *ta = xsc;
+ struct hme_txdesc *td;
+ bus_addr_t addr;
+ bus_size_t sz;
+ caddr_t txd;
+ u_int32_t flags;
+ int i, *tdhead, pci;
+
+ ta->hta_err = error;
+ if (error != 0)
+ return;
+
+ tdhead = &ta->hta_sc->sc_rb.rb_tdhead;
+ pci = ta->hta_sc->sc_pci;
+ txd = ta->hta_sc->sc_rb.rb_txd;
+ for (i = 0; i < nsegs; i++) {
+ if (ta->hta_sc->sc_rb.rb_td_nbusy == HME_NTXDESC) {
+ ta->hta_err = -1;
+ return;
+ }
+ td = &ta->hta_sc->sc_rb.rb_txdesc[*tdhead];
+ addr = segs[i].ds_addr;
+ sz = segs[i].ds_len;
+ if (i == 0) {
+ /* Adjust the offsets. */
+ addr += ta->hta_offs;
+ sz -= ta->hta_offs;
+ } else {
+ /*
+ * Do not touch this otherwise; it is set by
+ * hme_load_mbuf.
+ */
+ td->htx_flags = 0;
+ }
+ if (i == nsegs - 1) {
+ /* Subtract the pad. */
+ if (sz < ta->hta_pad) {
+ /*
+ * Ooops. This should not have happened; it
+ * means that we got a zero-size segment or
+ * segment sizes were unnatural.
+ */
+ device_printf(ta->hta_sc->sc_dev,
+ "hme_txdma_callback: alignment glitch\n");
+ ta->hta_err = EINVAL;
+ return;
+ }
+ sz -= ta->hta_pad;
+ /* If sz is 0 now, this does not matter. */
+ }
+ /* Fill the ring entry. */
+ flags = HME_XD_ENCODE_TSIZE(sz);
+ if ((ta->hta_flags & HTAF_SOP) != 0 && i == 0)
+ flags |= HME_XD_SOP;
+ if ((ta->hta_flags & HTAF_EOP) != 0 && i == nsegs - 1) {
+ flags |= HME_XD_EOP;
+ td->htx_m = ta->hta_m;
+ } else
+ td->htx_m = NULL;
+ CTR5(KTR_HME, "hme_txdma_callback: seg %d/%d, ri %d, "
+ "flags %#x, addr %#x", i + 1, nsegs, *tdhead, (u_int)flags,
+ (u_int)addr);
+ HME_XD_SETFLAGS(pci, txd, *tdhead, flags);
+ HME_XD_SETADDR(pci, txd, *tdhead, addr);
+
+ ta->hta_sc->sc_rb.rb_td_nbusy++;
+ *tdhead = ((*tdhead) + 1) % HME_NTXDESC;
+ }
+}
+
+/*
+ * Routine to dma map an mbuf chain, set up the descriptor rings accordingly and
+ * start the transmission.
+ * Returns 0 on success, -1 if there were not enough free descriptors to map
+ * the packet, or an errno otherwise.
+ */
+static int
+hme_load_mbuf(struct hme_softc *sc, struct mbuf *m0)
+{
+ struct hme_txdma_arg cba;
+ struct mbuf *m = m0, *n;
+ struct hme_txdesc *td;
+ char *start;
+ int error, len, si, ri, totlen, sum;
+ u_int32_t flags;
+
+ if ((m->m_flags & M_PKTHDR) == 0)
+ panic("gem_dmamap_load_mbuf: no packet header");
+ totlen = m->m_pkthdr.len;
+ sum = 0;
+ si = sc->sc_rb.rb_tdhead;
+ cba.hta_sc = sc;
+ cba.hta_err = 0;
+ cba.hta_flags = HTAF_SOP;
+ cba.hta_m = m0;
+ for (; m != NULL && sum < totlen; m = n) {
+ len = m->m_len;
+ n = m->m_next;
+ if (len == 0)
+ continue;
+ sum += len;
+ td = &sc->sc_rb.rb_txdesc[sc->sc_rb.rb_tdhead];
+ td->htx_flags = HTXF_MAPPED;
+ if (n == NULL || sum >= totlen)
+ cba.hta_flags |= HTAF_EOP;
+ /*
+ * This is slightly evil: we must map the buffer in a way that
+ * allows dma transfers to start on a natural burst boundary.
+ * This is done by rounding down the mapping address, and
+ * recording the required offset for the callback. With this,
+ * we cannot cross a page boundary because the burst size
+ * is a small power of two.
+ */
+ cba.hta_offs = (sc->sc_burst -
+ (mtod(m, uintptr_t) & (sc->sc_burst - 1))) % sc->sc_burst;
+ start = mtod(m, char *) - cba.hta_offs;
+ len += cba.hta_offs;
+ /*
+ * Similarly, the end of the mapping should be on a natural
+ * burst boundary. XXX: Let's hope that any segment ends
+ * generated by the busdma code are also on such boundaries.
+ */
+ cba.hta_pad = (sc->sc_burst - (((uintptr_t)start + len) &
+ (sc->sc_burst - 1))) % sc->sc_burst;
+ len += cba.hta_pad;
+ /* Most of the work is done in the callback. */
+ if ((error = bus_dmamap_load(sc->sc_tdmatag, td->htx_dmamap,
+ start, len, hme_txdma_callback, &cba, 0)) != 0 ||
+ cba.hta_err != 0)
+ goto fail;
+ bus_dmamap_sync(sc->sc_tdmatag, td->htx_dmamap,
+ BUS_DMASYNC_PREWRITE);
+
+ cba.hta_flags = 0;
+ }
+ /* Turn decriptor ownership to the hme, back to forth. */
+ ri = sc->sc_rb.rb_tdhead;
+ CTR2(KTR_HME, "hme_load_mbuf: next desc is %d (%#x)",
+ ri, HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri));
+ do {
+ ri = (ri + HME_NTXDESC - 1) % HME_NTXDESC;
+ flags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri) |
+ HME_XD_OWN;
+ CTR3(KTR_HME, "hme_load_mbuf: activating ri %d, si %d (%#x)",
+ ri, si, flags);
+ HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri, flags);
+ } while (ri != si);
+
+ bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+ /* start the transmission. */
+ HME_ETX_WRITE_4(sc, HME_ETXI_PENDING, HME_ETX_TP_DMAWAKEUP);
+ return (0);
+fail:
+ for (; si != sc->sc_rb.rb_tdhead; si = (si + 1) % HME_NTXDESC) {
+ td = &sc->sc_rb.rb_txdesc[si];
+ if ((td->htx_flags & HTXF_MAPPED) != 0)
+ bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap);
+ td->htx_flags = 0;
+ td->htx_m = NULL;
+ sc->sc_rb.rb_td_nbusy--;
+ HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, si, 0);
+ }
+ sc->sc_rb.rb_tdhead = si;
+ error = cba.hta_err != 0 ? cba.hta_err : error;
+ if (error != -1)
+ device_printf(sc->sc_dev, "could not load mbuf: %d\n", error);
+ return (error);
+}
+
+/*
+ * Pass a packet to the higher levels.
+ */
+static void
+hme_read(struct hme_softc *sc, int ix, int len)
+{
+ struct ifnet *ifp = &sc->sc_arpcom.ac_if;
+ struct ether_header *eh;
+ struct mbuf *m;
+ int offs;
+
+ if (len <= sizeof(struct ether_header) ||
+ len > ETHERMTU + sizeof(struct ether_header)) {
+#ifdef HMEDEBUG
+ HME_WHINE(sc->sc_dev, "invalid packet size %d; dropping\n",
+ len);
+#endif
+ goto drop;
+ }
+
+ m = sc->sc_rb.rb_rxdesc[ix].hrx_m;
+ offs = sc->sc_rb.rb_rxdesc[ix].hrx_offs;
+ CTR2(KTR_HME, "hme_read: offs %d, len %d", offs, len);
+
+ if (hme_add_rxbuf(sc, ix, 0) != 0) {
+ /*
+ * hme_add_rxbuf will leave the old buffer in the ring until
+ * it is sure that a new buffer can be mapped. If it can not,
+ * drop the packet, but leave the interface up.
+ */
+ goto drop;
+ }
+
+ ifp->if_ipackets++;
+
+ /* Changed the rings; sync. */
+ bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+ m->m_pkthdr.rcvif = ifp;
+ m->m_pkthdr.len = m->m_len = len + offs;
+ m_adj(m, offs);
+ eh = mtod(m, struct ether_header *);
+ m_adj(m, sizeof(struct ether_header));
+ /* Pass the packet up. */
+ ether_input(ifp, eh, m);
+ return;
+
+drop:
+ ifp->if_ierrors++;
+ /*
+ * Dropped a packet, reinitialize the descriptor and turn the
+ * ownership back to the hardware.
+ */
+ HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ix, HME_XD_OWN |
+ HME_XD_ENCODE_RSIZE(ulmin(HME_BUFSZ,
+ sc->sc_rb.rb_rxdesc[ix].hrx_len)));
+ bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+}
+
+static void
+hme_start(struct ifnet *ifp)
+{
+ struct hme_softc *sc = (struct hme_softc *)ifp->if_softc;
+ struct mbuf *m;
+ int error, enq = 0;
+
+ if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
+ return;
+
+ for (;;) {
+ IF_DEQUEUE(&ifp->if_snd, m);
+ if (m == NULL)
+ break;
+
+ error = hme_load_mbuf(sc, m);
+ if (error != 0) {
+ ifp->if_flags |= IFF_OACTIVE;
+ IF_PREPEND(&ifp->if_snd, m);
+ } else
+ enq = 1;
+ }
+
+ if (sc->sc_rb.rb_td_nbusy == HME_NTXDESC)
+ ifp->if_flags |= IFF_OACTIVE;
+ /* Set watchdog timer if a packet was queued */
+ if (enq)
+ ifp->if_timer = 5;
+}
+
+/*
+ * Transmit interrupt.
+ */
+static void
+hme_tint(struct hme_softc *sc)
+{
+ struct ifnet *ifp = &sc->sc_arpcom.ac_if;
+ struct hme_txdesc *td;
+ unsigned int ri, txflags;
+
+ /*
+ * Unload collision counters
+ */
+ ifp->if_collisions +=
+ HME_MAC_READ_4(sc, HME_MACI_NCCNT) +
+ HME_MAC_READ_4(sc, HME_MACI_FCCNT) +
+ HME_MAC_READ_4(sc, HME_MACI_EXCNT) +
+ HME_MAC_READ_4(sc, HME_MACI_LTCNT);
+
+ /*
+ * then clear the hardware counters.
+ */
+ HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0);
+ HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0);
+ HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0);
+ HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0);
+
+ /* Fetch current position in the transmit ring */
+ for (ri = sc->sc_rb.rb_tdtail;; ri = (ri + 1) % HME_NTXDESC) {
+ if (sc->sc_rb.rb_td_nbusy <= 0) {
+ CTR0(KTR_HME, "hme_tint: not busy!");
+ break;
+ }
+
+ txflags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri);
+ CTR2(KTR_HME, "hme_tint: index %d, flags %#x", ri, txflags);
+
+ if ((txflags & HME_XD_OWN) != 0)
+ break;
+
+ td = &sc->sc_rb.rb_txdesc[ri];
+ CTR1(KTR_HME, "hme_tint: not owned, dflags %#x", td->htx_flags);
+ if ((td->htx_flags & HTXF_MAPPED) != 0) {
+ bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap);
+ }
+ td->htx_flags = 0;
+ --sc->sc_rb.rb_td_nbusy;
+ ifp->if_flags &= ~IFF_OACTIVE;
+
+ /* Complete packet transmitted? */
+ if ((txflags & HME_XD_EOP) == 0)
+ continue;
+
+ ifp->if_opackets++;
+ m_freem(td->htx_m);
+ td->htx_m = NULL;
+ }
+ /* Turn off watchdog */
+ if (sc->sc_rb.rb_td_nbusy == 0)
+ ifp->if_timer = 0;
+
+ /* Update ring */
+ sc->sc_rb.rb_tdtail = ri;
+
+ hme_start(ifp);
+
+ if (sc->sc_rb.rb_td_nbusy == 0)
+ ifp->if_timer = 0;
+}
+
+/*
+ * Receive interrupt.
+ */
+static void
+hme_rint(struct hme_softc *sc)
+{
+ caddr_t xdr = sc->sc_rb.rb_rxd;
+ unsigned int ri, len;
+ u_int32_t flags;
+
+ /*
+ * Process all buffers with valid data.
+ */
+ for (ri = sc->sc_rb.rb_rdtail;; ri = (ri + 1) % HME_NRXDESC) {
+ flags = HME_XD_GETFLAGS(sc->sc_pci, xdr, ri);
+ CTR2(KTR_HME, "hme_rint: index %d, flags %#x", ri, flags);
+ if ((flags & HME_XD_OWN) != 0)
+ break;
+
+ if ((flags & HME_XD_OFL) != 0) {
+ device_printf(sc->sc_dev, "buffer overflow, ri=%d; "
+ "flags=0x%x\n", ri, flags);
+ } else {
+ len = HME_XD_DECODE_RSIZE(flags);
+ hme_read(sc, ri, len);
+ }
+ }
+
+ sc->sc_rb.rb_rdtail = ri;
+}
+
+static void
+hme_eint(struct hme_softc *sc, u_int status)
+{
+
+ if ((status & HME_SEB_STAT_MIFIRQ) != 0) {
+ device_printf(sc->sc_dev, "XXXlink status changed\n");
+ return;
+ }
+
+ HME_WHINE(sc->sc_dev, "error signaled, status=%#x\n", status);
+}
+
+void
+hme_intr(void *v)
+{
+ struct hme_softc *sc = (struct hme_softc *)v;
+ u_int32_t status;
+
+ status = HME_SEB_READ_4(sc, HME_SEBI_STAT);
+ CTR1(KTR_HME, "hme_intr: status %#x", (u_int)status);
+
+ if ((status & HME_SEB_STAT_ALL_ERRORS) != 0)
+ hme_eint(sc, status);
+
+ if ((status & (HME_SEB_STAT_TXALL | HME_SEB_STAT_HOSTTOTX)) != 0)
+ hme_tint(sc);
+
+ if ((status & HME_SEB_STAT_RXTOHOST) != 0)
+ hme_rint(sc);
+}
+
+
+static void
+hme_watchdog(struct ifnet *ifp)
+{
+ struct hme_softc *sc = ifp->if_softc;
+#ifdef HMEDEBUG
+ u_int32_t status;
+
+ status = HME_SEB_READ_4(sc, HME_SEBI_STAT);
+ CTR1(KTR_HME, "hme_watchdog: status %x", (u_int)status);
+#endif
+ device_printf(sc->sc_dev, "device timeout\n");
+ ++ifp->if_oerrors;
+
+ hme_reset(sc);
+}
+
+/*
+ * Initialize the MII Management Interface
+ */
+static void
+hme_mifinit(struct hme_softc *sc)
+{
+ u_int32_t v;
+
+ /* Configure the MIF in frame mode */
+ v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
+ v &= ~HME_MIF_CFG_BBMODE;
+ HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
+}
+
+/*
+ * MII interface
+ */
+int
+hme_mii_readreg(device_t dev, int phy, int reg)
+{
+ struct hme_softc *sc = device_get_softc(dev);
+ int n;
+ u_int32_t v;
+
+ /* Select the desired PHY in the MIF configuration register */
+ v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
+ /* Clear PHY select bit */
+ v &= ~HME_MIF_CFG_PHY;
+ if (phy == HME_PHYAD_EXTERNAL)
+ /* Set PHY select bit to get at external device */
+ v |= HME_MIF_CFG_PHY;
+ HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
+
+ /* Construct the frame command */
+ v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
+ HME_MIF_FO_TAMSB |
+ (MII_COMMAND_READ << HME_MIF_FO_OPC_SHIFT) |
+ (phy << HME_MIF_FO_PHYAD_SHIFT) |
+ (reg << HME_MIF_FO_REGAD_SHIFT);
+
+ HME_MIF_WRITE_4(sc, HME_MIFI_FO, v);
+ for (n = 0; n < 100; n++) {
+ DELAY(1);
+ v = HME_MIF_READ_4(sc, HME_MIFI_FO);
+ if (v & HME_MIF_FO_TALSB)
+ return (v & HME_MIF_FO_DATA);
+ }
+
+ device_printf(sc->sc_dev, "mii_read timeout\n");
+ return (0);
+}
+
+int
+hme_mii_writereg(device_t dev, int phy, int reg, int val)
+{
+ struct hme_softc *sc = device_get_softc(dev);
+ int n;
+ u_int32_t v;
+
+ /* Select the desired PHY in the MIF configuration register */
+ v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
+ /* Clear PHY select bit */
+ v &= ~HME_MIF_CFG_PHY;
+ if (phy == HME_PHYAD_EXTERNAL)
+ /* Set PHY select bit to get at external device */
+ v |= HME_MIF_CFG_PHY;
+ HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
+
+ /* Construct the frame command */
+ v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
+ HME_MIF_FO_TAMSB |
+ (MII_COMMAND_WRITE << HME_MIF_FO_OPC_SHIFT) |
+ (phy << HME_MIF_FO_PHYAD_SHIFT) |
+ (reg << HME_MIF_FO_REGAD_SHIFT) |
+ (val & HME_MIF_FO_DATA);
+
+ HME_MIF_WRITE_4(sc, HME_MIFI_FO, v);
+ for (n = 0; n < 100; n++) {
+ DELAY(1);
+ v = HME_MIF_READ_4(sc, HME_MIFI_FO);
+ if (v & HME_MIF_FO_TALSB)
+ return (1);
+ }
+
+ device_printf(sc->sc_dev, "mii_write timeout\n");
+ return (0);
+}
+
+void
+hme_mii_statchg(device_t dev)
+{
+ struct hme_softc *sc = device_get_softc(dev);
+ int instance = IFM_INST(sc->sc_mii->mii_media.ifm_cur->ifm_media);
+ int phy = sc->sc_phys[instance];
+ u_int32_t v;
+
+#ifdef HMEDEBUG
+ if (sc->sc_debug)
+ printf("hme_mii_statchg: status change: phy = %d\n", phy);
+#endif
+
+ /* Select the current PHY in the MIF configuration register */
+ v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
+ v &= ~HME_MIF_CFG_PHY;
+ if (phy == HME_PHYAD_EXTERNAL)
+ v |= HME_MIF_CFG_PHY;
+ HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
+
+ /* Set the MAC Full Duplex bit appropriately */
+ v = HME_MAC_READ_4(sc, HME_MACI_TXCFG);
+ if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, v, HME_MAC_TXCFG_ENABLE, 0))
+ return;
+ if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0)
+ v |= HME_MAC_TXCFG_FULLDPLX;
+ else
+ v &= ~HME_MAC_TXCFG_FULLDPLX;
+ HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, v);
+ if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, v, 0, HME_MAC_TXCFG_ENABLE))
+ return;
+
+ /* If an external transceiver is selected, enable its MII drivers */
+ v = HME_MAC_READ_4(sc, HME_MACI_XIF);
+ v &= ~HME_MAC_XIF_MIIENABLE;
+ if (phy == HME_PHYAD_EXTERNAL)
+ v |= HME_MAC_XIF_MIIENABLE;
+ HME_MAC_WRITE_4(sc, HME_MACI_XIF, v);
+}
+
+static int
+hme_mediachange(struct ifnet *ifp)
+{
+ struct hme_softc *sc = ifp->if_softc;
+
+ return (mii_mediachg(sc->sc_mii));
+}
+
+static void
+hme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
+{
+ struct hme_softc *sc = ifp->if_softc;
+
+ if ((ifp->if_flags & IFF_UP) == 0)
+ return;
+
+ mii_pollstat(sc->sc_mii);
+ ifmr->ifm_active = sc->sc_mii->mii_media_active;
+ ifmr->ifm_status = sc->sc_mii->mii_media_status;
+}
+
+/*
+ * Process an ioctl request.
+ */
+static int
+hme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+{
+ struct hme_softc *sc = ifp->if_softc;
+ struct ifreq *ifr = (struct ifreq *)data;
+ int s, error = 0;
+
+ s = splnet();
+
+ switch (cmd) {
+ case SIOCSIFADDR:
+ case SIOCGIFADDR:
+ case SIOCSIFMTU:
+ error = ether_ioctl(ifp, cmd, data);
+ break;
+ case SIOCSIFFLAGS:
+ if ((ifp->if_flags & IFF_UP) == 0 &&
+ (ifp->if_flags & IFF_RUNNING) != 0) {
+ /*
+ * If interface is marked down and it is running, then
+ * stop it.
+ */
+ hme_stop(sc);
+ ifp->if_flags &= ~IFF_RUNNING;
+ } else if ((ifp->if_flags & IFF_UP) != 0 &&
+ (ifp->if_flags & IFF_RUNNING) == 0) {
+ /*
+ * If interface is marked up and it is stopped, then
+ * start it.
+ */
+ hme_init(sc);
+ } else if ((ifp->if_flags & IFF_UP) != 0) {
+ /*
+ * Reset the interface to pick up changes in any other
+ * flags that affect hardware registers.
+ */
+ /*hme_stop(sc);*/
+ hme_init(sc);
+ }
+#ifdef HMEDEBUG
+ sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0;
+#endif
+ break;
+
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ hme_setladrf(sc, 1);
+ error = 0;
+ break;
+ case SIOCGIFMEDIA:
+ case SIOCSIFMEDIA:
+ error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd);
+ break;
+ default:
+ error = ENOTTY;
+ break;
+ }
+
+ splx(s);
+ return (error);
+}
+
+#if 0
+static void
+hme_shutdown(void *arg)
+{
+
+ hme_stop((struct hme_softc *)arg);
+}
+#endif
+
+/*
+ * Set up the logical address filter.
+ */
+static void
+hme_setladrf(struct hme_softc *sc, int reenable)
+{
+ struct ifnet *ifp = &sc->sc_arpcom.ac_if;
+ struct ifmultiaddr *inm;
+ struct sockaddr_dl *sdl;
+ u_char *cp;
+ u_int32_t crc;
+ u_int32_t hash[4];
+ u_int32_t macc;
+ int len;
+
+ /* Clear hash table */
+ hash[3] = hash[2] = hash[1] = hash[0] = 0;
+
+ /* Get current RX configuration */
+ macc = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
+
+ /*
+ * Disable the receiver while changing it's state as the documentation
+ * mandates.
+ * We then must wait until the bit clears in the register. This should
+ * take at most 3.5ms.
+ */
+ if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, HME_MAC_RXCFG_ENABLE, 0))
+ return;
+ /* Disable the hash filter before writing to the filter registers. */
+ if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc,
+ HME_MAC_RXCFG_HENABLE, 0))
+ return;
+
+ if (reenable)
+ macc |= HME_MAC_RXCFG_ENABLE;
+ else
+ macc &= ~HME_MAC_RXCFG_ENABLE;
+
+ if ((ifp->if_flags & IFF_PROMISC) != 0) {
+ /* Turn on promiscuous mode; turn off the hash filter */
+ macc |= HME_MAC_RXCFG_PMISC;
+ macc &= ~HME_MAC_RXCFG_HENABLE;
+ ifp->if_flags |= IFF_ALLMULTI;
+ goto chipit;
+ }
+
+ /* Turn off promiscuous mode; turn on the hash filter */
+ macc &= ~HME_MAC_RXCFG_PMISC;
+ macc |= HME_MAC_RXCFG_HENABLE;
+
+ /*
+ * Set up multicast address filter by passing all multicast addresses
+ * through a crc generator, and then using the high order 6 bits as an
+ * index into the 64 bit logical address filter. The high order bit
+ * selects the word, while the rest of the bits select the bit within
+ * the word.
+ */
+
+ TAILQ_FOREACH(inm, &sc->sc_arpcom.ac_if.if_multiaddrs, ifma_link) {
+ if (inm->ifma_addr->sa_family != AF_LINK)
+ continue;
+ sdl = (struct sockaddr_dl *)inm->ifma_addr;
+ cp = LLADDR(sdl);
+ crc = 0xffffffff;
+ for (len = sdl->sdl_alen; --len >= 0;) {
+ int octet = *cp++;
+ int i;
+
+#define MC_POLY_LE 0xedb88320UL /* mcast crc, little endian */
+ for (i = 0; i < 8; i++) {
+ if ((crc & 1) ^ (octet & 1)) {
+ crc >>= 1;
+ crc ^= MC_POLY_LE;
+ } else {
+ crc >>= 1;
+ }
+ octet >>= 1;
+ }
+ }
+ /* Just want the 6 most significant bits. */
+ crc >>= 26;
+
+ /* Set the corresponding bit in the filter. */
+ hash[crc >> 4] |= 1 << (crc & 0xf);
+ }
+
+ ifp->if_flags &= ~IFF_ALLMULTI;
+
+chipit:
+ /* Now load the hash table into the chip */
+ HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB0, hash[0]);
+ HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB1, hash[1]);
+ HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB2, hash[2]);
+ HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB3, hash[3]);
+ hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, 0,
+ macc & (HME_MAC_RXCFG_ENABLE | HME_MAC_RXCFG_HENABLE));
+}
diff --git a/sys/dev/hme/if_hme_pci.c b/sys/dev/hme/if_hme_pci.c
new file mode 100644
index 0000000..6128608
--- /dev/null
+++ b/sys/dev/hme/if_hme_pci.c
@@ -0,0 +1,196 @@
+/*
+ * Copyright (c) 2000 Matthew R. Green
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: NetBSD: if_hme_pci.c,v 1.4 2001/08/27 22:18:49 augustss Exp
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * PCI front-end device driver for the HME ethernet device.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/resource.h>
+#include <sys/socket.h>
+
+#include <machine/bus.h>
+#include <machine/ofw_machdep.h>
+#include <machine/resource.h>
+
+#include <sys/rman.h>
+
+#include <net/ethernet.h>
+#include <net/if.h>
+#include <net/if_arp.h>
+#include <net/if_dl.h>
+#include <net/if_media.h>
+
+#include <mii/mii.h>
+#include <mii/miivar.h>
+
+#include <pci/pcivar.h>
+#include <pci/pcireg.h>
+
+#include <hme/if_hmereg.h>
+#include <hme/if_hmevar.h>
+
+#include "miibus_if.h"
+
+struct hme_pci_softc {
+ struct hme_softc hsc_hme; /* HME device */
+ struct resource *hsc_sres;
+ int hsc_srid;
+ struct resource *hsc_ires;
+ int hsc_irid;
+ bus_space_tag_t hsc_memt;
+ bus_space_handle_t hsc_memh;
+ void *hsc_ih;
+};
+
+static int hme_pci_probe(device_t);
+static int hme_pci_attach(device_t);
+
+static device_method_t hme_pci_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, hme_pci_probe),
+ DEVMETHOD(device_attach, hme_pci_attach),
+
+ /* bus interface */
+ DEVMETHOD(bus_print_child, bus_generic_print_child),
+ DEVMETHOD(bus_driver_added, bus_generic_driver_added),
+
+ /* MII interface */
+ DEVMETHOD(miibus_readreg, hme_mii_readreg),
+ DEVMETHOD(miibus_writereg, hme_mii_writereg),
+ DEVMETHOD(miibus_statchg, hme_mii_statchg),
+
+ { 0, 0 }
+};
+
+static driver_t hme_pci_driver = {
+ "hme",
+ hme_pci_methods,
+ sizeof(struct hme_pci_softc)
+};
+
+DRIVER_MODULE(if_hme, pci, hme_pci_driver, hme_devclass, 0, 0);
+
+int
+hme_pci_probe(device_t dev)
+{
+
+ if (pci_get_vendor(dev) == 0x108e &&
+ pci_get_device(dev) == 0x1001) {
+ device_set_desc(dev, "Sun HME 10/100 Ethernet");
+ return (0);
+ }
+ return (ENXIO);
+}
+
+int
+hme_pci_attach(device_t dev)
+{
+ struct hme_pci_softc *hsc = device_get_softc(dev);
+ struct hme_softc *sc = &hsc->hsc_hme;
+ u_int16_t csr;
+ int error;
+
+ /*
+ * enable io/memory-space accesses. this is kinda of gross; but
+ * the hme comes up with neither IO space enabled, or memory space.
+ */
+ csr = pci_read_config(dev, PCIR_COMMAND, 2);
+ csr |= PCIM_CMD_PORTEN | PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN;
+ pci_write_config(dev, PCIR_COMMAND, csr, 2);
+
+ sc->sc_pci = 1; /* XXXXX should all be done in bus_dma. */
+ sc->sc_dev = dev;
+
+ /*
+ * Map five register banks:
+ *
+ * bank 0: HME SEB registers: +0x0000
+ * bank 1: HME ETX registers: +0x2000
+ * bank 2: HME ERX registers: +0x4000
+ * bank 3: HME MAC registers: +0x6000
+ * bank 4: HME MIF registers: +0x7000
+ *
+ */
+ hsc->hsc_srid = PCI_HME_BASEADDR;
+ hsc->hsc_sres = bus_alloc_resource(dev, SYS_RES_MEMORY, &hsc->hsc_srid,
+ 0, ~0, 1, RF_ACTIVE);
+ if (hsc->hsc_sres == NULL) {
+ device_printf(dev, "could not map device registers\n");
+ return (ENXIO);
+ }
+ hsc->hsc_irid = 0;
+ hsc->hsc_ires = bus_alloc_resource(dev, SYS_RES_IRQ, &hsc->hsc_irid, 0,
+ ~0, 1, RF_ACTIVE);
+ if (hsc->hsc_ires == NULL) {
+ device_printf(dev, "could not allocate interrupt\n");
+ error = ENXIO;
+ goto fail_sres;
+ }
+ sc->sc_sebt = sc->sc_etxt = sc->sc_erxt = sc->sc_mact = sc->sc_mift =
+ rman_get_bustag(hsc->hsc_sres);
+ sc->sc_sebh = sc->sc_etxh = sc->sc_erxh = sc->sc_mach = sc->sc_mifh =
+ rman_get_bushandle(hsc->hsc_sres);
+ sc->sc_sebo = 0;
+ sc->sc_etxo = 0x2000;
+ sc->sc_erxo = 0x4000;
+ sc->sc_maco = 0x6000;
+ sc->sc_mifo = 0x7000;
+
+ OF_getetheraddr(dev, sc->sc_arpcom.ac_enaddr);
+
+ sc->sc_burst = 64; /* XXX */
+
+ /*
+ * call the main configure
+ */
+ if ((error = hme_config(sc)) != 0) {
+ device_printf(dev, "could not be configured\n");
+ goto fail_ires;
+ }
+
+ if ((error = bus_setup_intr(dev, hsc->hsc_ires, INTR_TYPE_NET, hme_intr,
+ sc, &hsc->hsc_ih)) != 0) {
+ device_printf(dev, "couldn't establish interrupt\n");
+ goto fail_ires;
+ }
+ return (0);
+
+fail_ires:
+ bus_release_resource(dev, SYS_RES_IRQ, hsc->hsc_irid, hsc->hsc_ires);
+fail_sres:
+ bus_release_resource(dev, SYS_RES_MEMORY, hsc->hsc_srid, hsc->hsc_sres);
+ return (ENXIO);
+}
diff --git a/sys/dev/hme/if_hme_sbus.c b/sys/dev/hme/if_hme_sbus.c
new file mode 100644
index 0000000..1e6d82c
--- /dev/null
+++ b/sys/dev/hme/if_hme_sbus.c
@@ -0,0 +1,276 @@
+/*-
+ * Copyright (c) 1999 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Paul Kranenburg.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * from: NetBSD: if_hme_sbus.c,v 1.9 2001/11/13 06:58:17 lukem Exp
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * SBus front-end device driver for the HME ethernet device.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/resource.h>
+#include <sys/socket.h>
+
+#include <machine/bus.h>
+#include <machine/ofw_machdep.h>
+#include <machine/resource.h>
+
+#include <sys/rman.h>
+
+#include <ofw/openfirm.h>
+
+#include <net/ethernet.h>
+#include <net/if.h>
+#include <net/if_arp.h>
+#include <net/if_dl.h>
+#include <net/if_media.h>
+
+#include <mii/mii.h>
+#include <mii/miivar.h>
+
+#include <sparc64/sbus/sbusvar.h>
+
+#include <hme/if_hmereg.h>
+#include <hme/if_hmevar.h>
+
+#include "miibus_if.h"
+
+struct hme_sbus_softc {
+ struct hme_softc hsc_hme; /* HME device */
+ struct resource *hsc_seb_res;
+ int hsc_seb_rid;
+ struct resource *hsc_etx_res;
+ int hsc_etx_rid;
+ struct resource *hsc_erx_res;
+ int hsc_erx_rid;
+ struct resource *hsc_mac_res;
+ int hsc_mac_rid;
+ struct resource *hsc_mif_res;
+ int hsc_mif_rid;
+ struct resource *hsc_ires;
+ int hsc_irid;
+ void *hsc_ih;
+};
+
+static int hme_sbus_probe(device_t);
+static int hme_sbus_attach(device_t);
+
+static device_method_t hme_sbus_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, hme_sbus_probe),
+ DEVMETHOD(device_attach, hme_sbus_attach),
+
+ /* bus interface */
+ DEVMETHOD(bus_print_child, bus_generic_print_child),
+ DEVMETHOD(bus_driver_added, bus_generic_driver_added),
+
+ /* MII interface */
+ DEVMETHOD(miibus_readreg, hme_mii_readreg),
+ DEVMETHOD(miibus_writereg, hme_mii_writereg),
+ DEVMETHOD(miibus_statchg, hme_mii_statchg),
+
+ { 0, 0 }
+};
+
+static driver_t hme_sbus_driver = {
+ "hme",
+ hme_sbus_methods,
+ sizeof(struct hme_sbus_softc)
+};
+
+DRIVER_MODULE(if_hme, sbus, hme_sbus_driver, hme_devclass, 0, 0);
+
+static int
+hme_sbus_probe(device_t dev)
+{
+ char *name;
+
+ name = sbus_get_name(dev);
+ if (strcmp(name, "SUNW,qfe") == 0 ||
+ strcmp(name, "SUNW,hme") == 0) {
+ device_set_desc(dev, "Sun HME 10/100 Ethernet");
+ return (0);
+ }
+ return (ENXIO);
+}
+
+static int
+hme_sbus_attach(device_t dev)
+{
+ struct hme_sbus_softc *hsc = device_get_softc(dev);
+ struct hme_softc *sc = &hsc->hsc_hme;
+ u_int32_t burst;
+ u_long start, count;
+ int error;
+
+ /*
+ * Map five register banks:
+ *
+ * bank 0: HME SEB registers
+ * bank 1: HME ETX registers
+ * bank 2: HME ERX registers
+ * bank 3: HME MAC registers
+ * bank 4: HME MIF registers
+ *
+ */
+ sc->sc_sebo = sc->sc_etxo = sc->sc_erxo = sc->sc_maco = sc->sc_mifo = 0;
+ hsc->hsc_seb_rid = 0;
+ hsc->hsc_seb_res = bus_alloc_resource(dev, SYS_RES_MEMORY,
+ &hsc->hsc_seb_rid, 0, ~0, 1, RF_ACTIVE);
+ if (hsc->hsc_seb_res == NULL) {
+ device_printf(dev, "cannot map SEB registers\n");
+ return (ENXIO);
+ }
+ sc->sc_sebt = rman_get_bustag(hsc->hsc_seb_res);
+ sc->sc_sebh = rman_get_bushandle(hsc->hsc_seb_res);
+
+ hsc->hsc_etx_rid = 1;
+ hsc->hsc_etx_res = bus_alloc_resource(dev, SYS_RES_MEMORY,
+ &hsc->hsc_etx_rid, 0, ~0, 1, RF_ACTIVE);
+ if (hsc->hsc_etx_res == NULL) {
+ device_printf(dev, "cannot map ETX registers\n");
+ goto fail_seb_res;
+ }
+ sc->sc_etxt = rman_get_bustag(hsc->hsc_etx_res);
+ sc->sc_etxh = rman_get_bushandle(hsc->hsc_etx_res);
+
+ hsc->hsc_erx_rid = 2;
+ hsc->hsc_erx_res = bus_alloc_resource(dev, SYS_RES_MEMORY,
+ &hsc->hsc_erx_rid, 0, ~0, 1, RF_ACTIVE);
+ if (hsc->hsc_erx_res == NULL) {
+ device_printf(dev, "cannot map ERX registers\n");
+ goto fail_etx_res;
+ }
+ sc->sc_erxt = rman_get_bustag(hsc->hsc_erx_res);
+ sc->sc_erxh = rman_get_bushandle(hsc->hsc_erx_res);
+
+ hsc->hsc_mac_rid = 3;
+ hsc->hsc_mac_res = bus_alloc_resource(dev, SYS_RES_MEMORY,
+ &hsc->hsc_mac_rid, 0, ~0, 1, RF_ACTIVE);
+ if (hsc->hsc_mac_res == NULL) {
+ device_printf(dev, "cannot map MAC registers\n");
+ goto fail_erx_res;
+ }
+ sc->sc_mact = rman_get_bustag(hsc->hsc_mac_res);
+ sc->sc_mach = rman_get_bushandle(hsc->hsc_mac_res);
+
+ /*
+ * At least on some HMEs, the MIF registers seem to be inside the MAC
+ * range, so map try to kluge around it.
+ */
+ hsc->hsc_mif_rid = 4;
+ hsc->hsc_mif_res = bus_alloc_resource(dev, SYS_RES_MEMORY,
+ &hsc->hsc_mif_rid, 0, ~0, 1, RF_ACTIVE);
+ if (hsc->hsc_mif_res == NULL) {
+ if (bus_get_resource(dev, SYS_RES_MEMORY, hsc->hsc_mif_rid,
+ &start, &count) != 0) {
+ device_printf(dev, "cannot get MIF registers\n");
+ goto fail_mac_res;
+ }
+ if (start < rman_get_start(hsc->hsc_mac_res) ||
+ start + count - 1 > rman_get_end(hsc->hsc_mac_res)) {
+ device_printf(dev, "cannot move MIF registers to MAC "
+ "bank\n");
+ goto fail_mac_res;
+ }
+ sc->sc_mift = sc->sc_mact;
+ sc->sc_mifh = sc->sc_mach;
+ sc->sc_mifo = sc->sc_maco + start -
+ rman_get_start(hsc->hsc_mac_res);
+ } else {
+ sc->sc_mift = rman_get_bustag(hsc->hsc_mif_res);
+ sc->sc_mifh = rman_get_bushandle(hsc->hsc_mif_res);
+ }
+
+ hsc->hsc_irid = 0;
+ hsc->hsc_ires = bus_alloc_resource(dev, SYS_RES_IRQ, &hsc->hsc_irid, 0,
+ ~0, 1, RF_ACTIVE);
+ if (hsc->hsc_ires == NULL) {
+ device_printf(dev, "could not allocate interrupt\n");
+ error = ENXIO;
+ goto fail_mif_res;
+ }
+
+
+ OF_getetheraddr(dev, sc->sc_arpcom.ac_enaddr);
+
+ burst = sbus_get_burstsz(dev);
+ /* Translate into plain numerical format */
+ sc->sc_burst = (burst & SBUS_BURST_32) ? 32 :
+ (burst & SBUS_BURST_16) ? 16 : 0;
+
+ sc->sc_pci = 0; /* XXX: should all be done in bus_dma. */
+ sc->sc_dev = dev;
+
+ if ((error = hme_config(sc)) != 0) {
+ device_printf(dev, "could not be configured\n");
+ goto fail_ires;
+ }
+
+
+ if ((error = bus_setup_intr(dev, hsc->hsc_ires, INTR_TYPE_NET, hme_intr,
+ sc, &hsc->hsc_ih)) != 0) {
+ device_printf(dev, "couldn't establish interrupt\n");
+ goto fail_ires;
+ }
+ return (0);
+
+fail_ires:
+ bus_release_resource(dev, SYS_RES_IRQ, hsc->hsc_irid, hsc->hsc_ires);
+fail_mif_res:
+ if (hsc->hsc_mif_res != NULL) {
+ bus_release_resource(dev, SYS_RES_MEMORY, hsc->hsc_mif_rid,
+ hsc->hsc_mif_res);
+ }
+fail_mac_res:
+ bus_release_resource(dev, SYS_RES_MEMORY, hsc->hsc_mac_rid,
+ hsc->hsc_mac_res);
+fail_erx_res:
+ bus_release_resource(dev, SYS_RES_MEMORY, hsc->hsc_erx_rid,
+ hsc->hsc_erx_res);
+fail_etx_res:
+ bus_release_resource(dev, SYS_RES_MEMORY, hsc->hsc_etx_rid,
+ hsc->hsc_etx_res);
+fail_seb_res:
+ bus_release_resource(dev, SYS_RES_MEMORY, hsc->hsc_seb_rid,
+ hsc->hsc_seb_res);
+ return (ENXIO);
+}
diff --git a/sys/dev/hme/if_hmereg.h b/sys/dev/hme/if_hmereg.h
new file mode 100644
index 0000000..178871d
--- /dev/null
+++ b/sys/dev/hme/if_hmereg.h
@@ -0,0 +1,319 @@
+/*-
+ * Copyright (c) 1999 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Paul Kranenburg.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * from: NetBSD: hmereg.h,v 1.7 2001/04/30 03:47:34 lukem Exp
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * HME Shared Ethernet Block register offsets
+ */
+#define HME_SEBI_RESET (0*4)
+#define HME_SEBI_CFG (1*4)
+#define HME_SEBI_STAT (64*4)
+#define HME_SEBI_IMASK (65*4)
+
+/* HME SEB bits. */
+#define HME_SEB_RESET_ETX 0x00000001 /* reset external transmitter */
+#define HME_SEB_RESET_ERX 0x00000002 /* reset external receiver */
+
+#define HME_SEB_CFG_BURSTMASK 0x00000003 /* covers all burst bits */
+#define HME_SEB_CFG_BURST16 0x00000000 /* 16 byte bursts */
+#define HME_SEB_CFG_BURST32 0x00000001 /* 32 byte bursts */
+#define HME_SEB_CFG_BURST64 0x00000002 /* 64 byte bursts */
+#define HME_SEB_CFG_64BIT 0x00000004 /* ? */
+#define HME_SEB_CFG_PARITY 0x00000008 /* ? */
+
+#define HME_SEB_STAT_GOTFRAME 0x00000001 /* frame received */
+#define HME_SEB_STAT_RCNTEXP 0x00000002 /* rx frame count expired */
+#define HME_SEB_STAT_ACNTEXP 0x00000004 /* align error count expired */
+#define HME_SEB_STAT_CCNTEXP 0x00000008 /* crc error count expired */
+#define HME_SEB_STAT_LCNTEXP 0x00000010 /* length error count expired */
+#define HME_SEB_STAT_RFIFOVF 0x00000020 /* rx fifo overflow */
+#define HME_SEB_STAT_CVCNTEXP 0x00000040 /* code violation counter exp */
+#define HME_SEB_STAT_STSTERR 0x00000080 /* xif sqe test failed */
+#define HME_SEB_STAT_SENTFRAME 0x00000100 /* frame sent */
+#define HME_SEB_STAT_TFIFO_UND 0x00000200 /* tx fifo underrun */
+#define HME_SEB_STAT_MAXPKTERR 0x00000400 /* max-packet size error */
+#define HME_SEB_STAT_NCNTEXP 0x00000800 /* normal collision count exp */
+#define HME_SEB_STAT_ECNTEXP 0x00001000 /* excess collision count exp */
+#define HME_SEB_STAT_LCCNTEXP 0x00002000 /* late collision count exp */
+#define HME_SEB_STAT_FCNTEXP 0x00004000 /* first collision count exp */
+#define HME_SEB_STAT_DTIMEXP 0x00008000 /* defer timer expired */
+#define HME_SEB_STAT_RXTOHOST 0x00010000 /* pkt moved from rx fifo->memory */
+#define HME_SEB_STAT_NORXD 0x00020000 /* out of receive descriptors */
+#define HME_SEB_STAT_RXERR 0x00040000 /* rx dma error */
+#define HME_SEB_STAT_RXLATERR 0x00080000 /* late error during rx dma */
+#define HME_SEB_STAT_RXPERR 0x00100000 /* parity error during rx dma */
+#define HME_SEB_STAT_RXTERR 0x00200000 /* tag error during rx dma */
+#define HME_SEB_STAT_EOPERR 0x00400000 /* tx descriptor did not set EOP */
+#define HME_SEB_STAT_MIFIRQ 0x00800000 /* mif needs attention */
+#define HME_SEB_STAT_HOSTTOTX 0x01000000 /* pkt moved from memory->tx fifo */
+#define HME_SEB_STAT_TXALL 0x02000000 /* all pkts in fifo transmitted */
+#define HME_SEB_STAT_TXEACK 0x04000000 /* error during tx dma */
+#define HME_SEB_STAT_TXLERR 0x08000000 /* late error during tx dma */
+#define HME_SEB_STAT_TXPERR 0x10000000 /* parity error during tx dma */
+#define HME_SEB_STAT_TXTERR 0x20000000 /* tag error durig tx dma */
+#define HME_SEB_STAT_SLVERR 0x40000000 /* pio access error */
+#define HME_SEB_STAT_SLVPERR 0x80000000 /* pio access parity error */
+#define HME_SEB_STAT_BITS "\177\020" \
+ "b\0GOTFRAME\0b\1RCNTEXP\0b\2ACNTEXP\0" \
+ "b\3CCNTEXP\0b\4LCNTEXP\0b\5RFIFOVF\0" \
+ "b\6CVCNTEXP\0b\7STSTERR\0b\10SENTFRAME\0" \
+ "b\11TFIFO_UND\0b\12MAXPKTERR\0b\13NCNTEXP\0" \
+ "b\14ECNTEXP\0b\15LCCNTEXP\0b\16FCNTEXP\0" \
+ "b\17DTIMEXP\0b\20RXTOHOST\0b\21NORXD\0" \
+ "b\22RXERR\0b\23RXLATERR\0b\24RXPERR\0" \
+ "b\25RXTERR\0b\26EOPERR\0b\27MIFIRQ\0" \
+ "b\30HOSTTOTX\0b\31TXALL\0b\32XTEACK\0" \
+ "b\33TXLERR\0b\34TXPERR\0b\35TXTERR\0" \
+ "b\36SLVERR\0b\37SLVPERR\0\0"
+
+#define HME_SEB_STAT_ALL_ERRORS \
+ (HME_SEB_STAT_SLVPERR | HME_SEB_STAT_SLVERR | HME_SEB_STAT_TXTERR |\
+ HME_SEB_STAT_TXPERR | HME_SEB_STAT_TXLERR | HME_SEB_STAT_TXEACK |\
+ HME_SEB_STAT_EOPERR | HME_SEB_STAT_RXTERR | HME_SEB_STAT_RXPERR |\
+ HME_SEB_STAT_RXLATERR | HME_SEB_STAT_RXERR | HME_SEB_STAT_NORXD |\
+ HME_SEB_STAT_DTIMEXP | HME_SEB_STAT_FCNTEXP | HME_SEB_STAT_LCCNTEXP |\
+ HME_SEB_STAT_ECNTEXP | HME_SEB_STAT_NCNTEXP | HME_SEB_STAT_MAXPKTERR|\
+ HME_SEB_STAT_TFIFO_UND| HME_SEB_STAT_STSTERR | HME_SEB_STAT_CVCNTEXP |\
+ HME_SEB_STAT_RFIFOVF | HME_SEB_STAT_LCNTEXP | HME_SEB_STAT_CCNTEXP |\
+ HME_SEB_STAT_ACNTEXP)
+
+#define HME_SEB_STAT_VLAN_ERRORS \
+ (HME_SEB_STAT_SLVPERR | HME_SEB_STAT_SLVERR | HME_SEB_STAT_TXTERR |\
+ HME_SEB_STAT_TXPERR | HME_SEB_STAT_TXLERR | HME_SEB_STAT_TXEACK |\
+ HME_SEB_STAT_EOPERR | HME_SEB_STAT_RXTERR | HME_SEB_STAT_RXPERR |\
+ HME_SEB_STAT_RXLATERR | HME_SEB_STAT_RXERR | HME_SEB_STAT_NORXD |\
+ HME_SEB_STAT_DTIMEXP | HME_SEB_STAT_FCNTEXP | HME_SEB_STAT_LCCNTEXP |\
+ HME_SEB_STAT_ECNTEXP | HME_SEB_STAT_NCNTEXP | \
+ HME_SEB_STAT_TFIFO_UND| HME_SEB_STAT_STSTERR | HME_SEB_STAT_CVCNTEXP |\
+ HME_SEB_STAT_RFIFOVF | HME_SEB_STAT_LCNTEXP | HME_SEB_STAT_CCNTEXP |\
+ HME_SEB_STAT_ACNTEXP)
+
+/*
+ * HME Transmitter register offsets
+ */
+#define HME_ETXI_PENDING (0*4) /* Pending/wakeup */
+#define HME_ETXI_CFG (1*4)
+#define HME_ETXI_RING (2*4) /* Descriptor Ring pointer */
+#define HME_ETXI_BBASE (3*4) /* Buffer base address (ro) */
+#define HME_ETXI_BDISP (4*4) /* Buffer displacement (ro) */
+#define HME_ETXI_FIFO_WPTR (5*4) /* FIFO write pointer */
+#define HME_ETXI_FIFO_SWPTR (6*4) /* FIFO shadow write pointer */
+#define HME_ETXI_FIFO_RPTR (7*4) /* FIFO read pointer */
+#define HME_ETXI_FIFO_SRPTR (8*4) /* FIFO shadow read pointer */
+#define HME_ETXI_FIFO_PKTCNT (9*4) /* FIFO packet counter */
+#define HME_ETXI_STATEMACHINE (10*4) /* State machine */
+#define HME_ETXI_RSIZE (11*4) /* Ring size */
+#define HME_ETXI_BPTR (12*4) /* Buffer pointer */
+
+
+/* TXI_PENDING bits */
+#define HME_ETX_TP_DMAWAKEUP 0x00000001 /* Start tx (rw, auto-clear) */
+
+/* TXI_CFG bits */
+#define HME_ETX_CFG_DMAENABLE 0x00000001 /* Enable TX dma */
+#define HME_ETX_CFG_FIFOTHRESH 0x000003fe /* TX fifo threshold */
+#define HME_ETX_CFG_IRQDAFTER 0x00000400 /* Intr after tx-fifo empty */
+#define HME_ETX_CFG_IRQDBEFORE 0x00000000 /* Intr before tx-fifo empty */
+
+
+/*
+ * HME Receiver register offsets
+ */
+#define HME_ERXI_CFG (0*4)
+#define HME_ERXI_RING (1*4) /* Descriptor Ring pointer */
+#define HME_ERXI_BPTR (2*4) /* Data Buffer pointer (ro) */
+#define HME_ERXI_FIFO_WPTR (3*4) /* FIFO write pointer */
+#define HME_ERXI_FIFO_SWPTR (4*4) /* FIFO shadow write pointer */
+#define HME_ERXI_FIFO_RPTR (5*4) /* FIFO read pointer */
+#define HME_ERXI_FIFO_SRPTR (6*4) /* FIFO shadow read pointer */
+#define HME_ERXI_STATEMACHINE (7*4) /* State machine */
+
+/* RXI_CFG bits */
+#define HME_ERX_CFG_DMAENABLE 0x00000001 /* Enable RX dma */
+#define HME_ERX_CFG_FBO_MASK 0x00000038 /* RX first byte offset */
+#define HME_ERX_CFG_FBO_SHIFT 0x00000003
+#define HME_ERX_CFG_RINGSIZE32 0x00000000 /* Descriptor ring size: 32 */
+#define HME_ERX_CFG_RINGSIZE64 0x00000200 /* Descriptor ring size: 64 */
+#define HME_ERX_CFG_RINGSIZE128 0x00000400 /* Descriptor ring size: 128 */
+#define HME_ERX_CFG_RINGSIZE256 0x00000600 /* Descriptor ring size: 256 */
+#define HME_ERX_CFG_RINGSIZEMSK 0x00000600 /* Descriptor ring size: 256 */
+#define HME_ERX_CFG_CSUMSTART 0x007f0000 /* cksum offset */
+
+/*
+ * HME MAC-core register offsets
+ */
+#define HME_MACI_XIF (0*4)
+#define HME_MACI_TXSWRST (130*4) /* TX reset */
+#define HME_MACI_TXCFG (131*4) /* TX config */
+#define HME_MACI_JSIZE (139*4) /* TX jam size */
+#define HME_MACI_TXSIZE (140*4) /* TX max size */
+#define HME_MACI_NCCNT (144*4) /* TX normal collision cnt */
+#define HME_MACI_FCCNT (145*4) /* TX first collision cnt */
+#define HME_MACI_EXCNT (146*4) /* TX excess collision cnt */
+#define HME_MACI_LTCNT (147*4) /* TX late collision cnt */
+#define HME_MACI_RANDSEED (148*4) /* */
+#define HME_MACI_RXSWRST (194*4) /* RX reset */
+#define HME_MACI_RXCFG (195*4) /* RX config */
+#define HME_MACI_RXSIZE (196*4) /* RX max size */
+#define HME_MACI_MACADDR2 (198*4) /* MAC address */
+#define HME_MACI_MACADDR1 (199*4)
+#define HME_MACI_MACADDR0 (200*4)
+#define HME_MACI_HASHTAB3 (208*4) /* Address hash table */
+#define HME_MACI_HASHTAB2 (209*4)
+#define HME_MACI_HASHTAB1 (210*4)
+#define HME_MACI_HASHTAB0 (211*4)
+#define HME_MACI_AFILTER2 (212*4) /* Address filter */
+#define HME_MACI_AFILTER1 (213*4)
+#define HME_MACI_AFILTER0 (214*4)
+#define HME_MACI_AFILTER_MASK (215*4)
+
+/* XIF config register. */
+#define HME_MAC_XIF_OE 0x00000001 /* Output driver enable */
+#define HME_MAC_XIF_XLBACK 0x00000002 /* Loopback-mode XIF enable */
+#define HME_MAC_XIF_MLBACK 0x00000004 /* Loopback-mode MII enable */
+#define HME_MAC_XIF_MIIENABLE 0x00000008 /* MII receive buffer enable */
+#define HME_MAC_XIF_SQENABLE 0x00000010 /* SQE test enable */
+#define HME_MAC_XIF_SQETWIN 0x000003e0 /* SQE time window */
+#define HME_MAC_XIF_LANCE 0x00000010 /* Lance mode enable */
+#define HME_MAC_XIF_LIPG0 0x000003e0 /* Lance mode IPG0 */
+
+/* Transmit config register. */
+#define HME_MAC_TXCFG_ENABLE 0x00000001 /* Enable the transmitter */
+#define HME_MAC_TXCFG_SMODE 0x00000020 /* Enable slow transmit mode */
+#define HME_MAC_TXCFG_CIGN 0x00000040 /* Ignore transmit collisions */
+#define HME_MAC_TXCFG_FCSOFF 0x00000080 /* Do not emit FCS */
+#define HME_MAC_TXCFG_DBACKOFF 0x00000100 /* Disable backoff */
+#define HME_MAC_TXCFG_FULLDPLX 0x00000200 /* Enable full-duplex */
+#define HME_MAC_TXCFG_DGIVEUP 0x00000400 /* Don't give up on transmits */
+
+/* Receive config register. */
+#define HME_MAC_RXCFG_ENABLE 0x00000001 /* Enable the receiver */
+#define HME_MAC_RXCFG_PSTRIP 0x00000020 /* Pad byte strip enable */
+#define HME_MAC_RXCFG_PMISC 0x00000040 /* Enable promiscous mode */
+#define HME_MAC_RXCFG_DERR 0x00000080 /* Disable error checking */
+#define HME_MAC_RXCFG_DCRCS 0x00000100 /* Disable CRC stripping */
+#define HME_MAC_RXCFG_ME 0x00000200 /* Receive packets addressed to me */
+#define HME_MAC_RXCFG_PGRP 0x00000400 /* Enable promisc group mode */
+#define HME_MAC_RXCFG_HENABLE 0x00000800 /* Enable the hash filter */
+#define HME_MAC_RXCFG_AENABLE 0x00001000 /* Enable the address filter */
+
+/*
+ * HME MIF register offsets
+ */
+#define HME_MIFI_BB_CLK (0*4) /* bit-bang clock */
+#define HME_MIFI_BB_DATA (1*4) /* bit-bang data */
+#define HME_MIFI_BB_OE (2*4) /* bit-bang output enable */
+#define HME_MIFI_FO (3*4) /* frame output */
+#define HME_MIFI_CFG (4*4) /* */
+#define HME_MIFI_IMASK (5*4) /* Interrupt mask for status change */
+#define HME_MIFI_STAT (6*4) /* Status (ro, auto-clear) */
+#define HME_MIFI_SM (7*4) /* State machine (ro) */
+
+/* MIF Configuration register */
+#define HME_MIF_CFG_PHY 0x00000001 /* PHY select */
+#define HME_MIF_CFG_PE 0x00000002 /* Poll enable */
+#define HME_MIF_CFG_BBMODE 0x00000004 /* Bit-bang mode */
+#define HME_MIF_CFG_PRADDR 0x000000f8 /* Poll register adddress */
+#define HME_MIF_CFG_MDI0 0x00000100 /* MDI_0 (ro) */
+#define HME_MIF_CFG_MDI1 0x00000200 /* MDI_1 (ro) */
+#define HME_MIF_CFG_PPADDR 0x00007c00 /* Poll phy adddress */
+
+/* MIF Frame/Output register */
+#define HME_MIF_FO_ST 0xc0000000 /* Start of frame */
+#define HME_MIF_FO_ST_SHIFT 30 /* */
+#define HME_MIF_FO_OPC 0x30000000 /* Opcode */
+#define HME_MIF_FO_OPC_SHIFT 28 /* */
+#define HME_MIF_FO_PHYAD 0x0f800000 /* PHY Address */
+#define HME_MIF_FO_PHYAD_SHIFT 23 /* */
+#define HME_MIF_FO_REGAD 0x007c0000 /* Register Address */
+#define HME_MIF_FO_REGAD_SHIFT 18 /* */
+#define HME_MIF_FO_TAMSB 0x00020000 /* Turn-around MSB */
+#define HME_MIF_FO_TALSB 0x00010000 /* Turn-around LSB */
+#define HME_MIF_FO_DATA 0x0000ffff /* data to read or write */
+
+/* Wired HME PHY addresses */
+#define HME_PHYAD_INTERNAL 1
+#define HME_PHYAD_EXTERNAL 0
+
+/*
+ * Buffer Descriptors.
+ */
+#ifdef notdef
+struct hme_xd {
+ volatile u_int32_t xd_flags;
+ volatile u_int32_t xd_addr; /* Buffer address (DMA) */
+};
+#endif
+#define HME_XD_SIZE 8
+#define HME_XD_FLAGS(base, index) ((base) + ((index) * HME_XD_SIZE) + 0)
+#define HME_XD_ADDR(base, index) ((base) + ((index) * HME_XD_SIZE) + 4)
+#define HME_XD_GETFLAGS(p, b, i) \
+ ((p) ? le32toh(*((u_int32_t *)HME_XD_FLAGS(b,i))) : \
+ (*((u_int32_t *)HME_XD_FLAGS(b,i))))
+#define HME_XD_SETFLAGS(p, b, i, f) do { \
+ *((u_int32_t *)HME_XD_FLAGS(b,i)) = ((p) ? htole32(f) : (f)); \
+} while(0)
+#define HME_XD_SETADDR(p, b, i, a) do { \
+ *((u_int32_t *)HME_XD_ADDR(b,i)) = ((p) ? htole32(a) : (a)); \
+} while(0)
+
+/* Descriptor flag values */
+#define HME_XD_OWN 0x80000000 /* ownership: 1=hw, 0=sw */
+#define HME_XD_SOP 0x40000000 /* start of packet marker (tx) */
+#define HME_XD_OFL 0x40000000 /* buffer overflow (rx) */
+#define HME_XD_EOP 0x20000000 /* end of packet marker (tx) */
+#define HME_XD_TXCKSUM 0x10000000 /* checksum enable (tx) */
+#define HME_XD_RXLENMSK 0x3fff0000 /* packet length mask (rx) */
+#define HME_XD_RXLENSHIFT 16
+#define HME_XD_TXLENMSK 0x00003fff /* packet length mask (tx) */
+#define HME_XD_RXCKSUM 0x0000ffff /* packet checksum (rx) */
+
+/* Macros to encode/decode the receive buffer size from the flags field */
+#define HME_XD_ENCODE_RSIZE(sz) \
+ (((sz) << HME_XD_RXLENSHIFT) & HME_XD_RXLENMSK)
+#define HME_XD_DECODE_RSIZE(flags) \
+ (((flags) & HME_XD_RXLENMSK) >> HME_XD_RXLENSHIFT)
+
+/* Provide encode/decode macros for the transmit buffers for symmetry */
+#define HME_XD_ENCODE_TSIZE(sz) \
+ (((sz) << 0) & HME_XD_TXLENMSK)
+#define HME_XD_DECODE_TSIZE(flags) \
+ (((flags) & HME_XD_TXLENMSK) >> 0)
+
+#define PCI_HME_BASEADDR 0x10
diff --git a/sys/dev/hme/if_hmevar.h b/sys/dev/hme/if_hmevar.h
new file mode 100644
index 0000000..13bf90c
--- /dev/null
+++ b/sys/dev/hme/if_hmevar.h
@@ -0,0 +1,152 @@
+/*-
+ * Copyright (c) 1999 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Paul Kranenburg.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * from: NetBSD: hmevar.h,v 1.5 2000/06/25 01:10:04 eeh Exp
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/callout.h>
+
+/*
+ * Number of receive and transmit descriptors. For each receive descriptor,
+ * an mbuf cluster is allocated and set up to receive a packet, and a dma map
+ * is created. Therefore, this number should not be too high to not waste
+ * memory.
+ * TX descriptors have less static cost (a dma map is allocated which could
+ * cause bounce buffers to be reserved; other that that, the only required
+ * memory is sizeof(struct hme_txdesc)).
+ * Both must be a multiple of 16, and <= 128.
+ */
+#define HME_NRXDESC 32
+#define HME_NTXDESC 64
+
+/* Maximum size of a mapped RX buffer. */
+#define HME_BUFSZ 1600
+
+/*
+ * RX DMA descriptor. The descriptors are preallocated; the dma map is
+ * reused.
+ */
+struct hme_rxdesc {
+ struct mbuf *hrx_m;
+ bus_dmamap_t hrx_dmamap;
+ int hrx_offs;
+ bus_size_t hrx_len;
+};
+
+struct hme_txdesc {
+ struct mbuf *htx_m;
+ bus_dmamap_t htx_dmamap;
+ int htx_flags;
+};
+
+/* Value for htx_flags */
+#define HTXF_MAPPED 1
+
+struct hme_ring {
+ /* Ring Descriptors */
+ caddr_t rb_membase; /* Packet buffer: CPU address */
+ bus_addr_t rb_dmabase; /* Packet buffer: DMA address */
+ caddr_t rb_txd; /* Transmit descriptors */
+ bus_addr_t rb_txddma; /* DMA address of same */
+ caddr_t rb_rxd; /* Receive descriptors */
+ bus_addr_t rb_rxddma; /* DMA address of same */
+
+ /* Ring Descriptor state */
+ int rb_tdhead, rb_tdtail;
+ int rb_rdtail;
+ int rb_td_nbusy;
+
+ /* Descriptors */
+ struct hme_rxdesc rb_rxdesc[HME_NRXDESC];
+ struct hme_txdesc rb_txdesc[HME_NTXDESC];
+
+ bus_dmamap_t rb_spare_dmamap;
+};
+
+struct hme_softc {
+ struct arpcom sc_arpcom;
+ struct ifmedia sc_ifmedia;
+ device_t sc_dev;
+ device_t sc_miibus;
+ struct mii_data *sc_mii; /* MII media control */
+ struct callout sc_tick_ch; /* tick callout */
+
+ /* The following bus handles are to be provided by the bus front-end */
+ bus_dma_tag_t sc_pdmatag; /* bus dma parent tag */
+ bus_dma_tag_t sc_cdmatag; /* control bus dma tag */
+ bus_dmamap_t sc_cdmamap; /* control bus dma handle */
+ bus_dma_tag_t sc_rdmatag; /* RX bus dma tag */
+ bus_dma_tag_t sc_tdmatag; /* RX bus dma tag */
+ bus_space_handle_t sc_sebh; /* HME Global registers */
+ bus_space_handle_t sc_erxh; /* HME ERX registers */
+ bus_space_handle_t sc_etxh; /* HME ETX registers */
+ bus_space_handle_t sc_mach; /* HME MAC registers */
+ bus_space_handle_t sc_mifh; /* HME MIF registers */
+ bus_space_tag_t sc_sebt; /* HME Global registers */
+ bus_space_tag_t sc_erxt; /* HME ERX registers */
+ bus_space_tag_t sc_etxt; /* HME ETX registers */
+ bus_space_tag_t sc_mact; /* HME MAC registers */
+ bus_space_tag_t sc_mift; /* HME MIF registers */
+ bus_addr_t sc_sebo; /* HME Global registers */
+ bus_addr_t sc_erxo; /* HME ERX registers */
+ bus_addr_t sc_etxo; /* HME ETX registers */
+ bus_addr_t sc_maco; /* HME MAC registers */
+ bus_addr_t sc_mifo; /* HME MIF registers */
+ int sc_burst; /* DVMA burst size in effect */
+ int sc_phys[2]; /* MII instance -> PHY map */
+
+ int sc_pci; /* XXXXX -- PCI buses are LE. */
+
+ /* Ring descriptor */
+ struct hme_ring sc_rb;
+
+ int sc_debug;
+
+ /* Special hardware hooks */
+ void (*sc_hwreset)(struct hme_softc *);
+ void (*sc_hwinit)(struct hme_softc *);
+};
+
+extern devclass_t hme_devclass;
+
+int hme_config(struct hme_softc *);
+void hme_intr(void *);
+
+/* MII methods & callbacks */
+int hme_mii_readreg(device_t, int, int);
+int hme_mii_writereg(device_t, int, int, int);
+void hme_mii_statchg(device_t);
OpenPOWER on IntegriCloud