diff options
Diffstat (limited to 'sys/dev/gem')
-rw-r--r-- | sys/dev/gem/if_gem.c | 1936 | ||||
-rw-r--r-- | sys/dev/gem/if_gem_pci.c | 256 | ||||
-rw-r--r-- | sys/dev/gem/if_gemreg.h | 539 | ||||
-rw-r--r-- | sys/dev/gem/if_gemvar.h | 238 |
4 files changed, 2969 insertions, 0 deletions
diff --git a/sys/dev/gem/if_gem.c b/sys/dev/gem/if_gem.c new file mode 100644 index 0000000..5809e7b --- /dev/null +++ b/sys/dev/gem/if_gem.c @@ -0,0 +1,1936 @@ +/*- + * Copyright (C) 2001 Eduardo Horvath. + * Copyright (c) 2001-2003 Thomas Moestl + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: NetBSD: gem.c,v 1.21 2002/06/01 23:50:58 lukem Exp + */ + +#include <sys/cdefs.h> +__FBSDID("$FreeBSD$"); + +/* + * Driver for Sun GEM ethernet controllers. + */ + +#if 0 +#define GEM_DEBUG +#endif + +#include <sys/param.h> +#include <sys/systm.h> +#include <sys/bus.h> +#include <sys/callout.h> +#include <sys/endian.h> +#include <sys/mbuf.h> +#include <sys/malloc.h> +#include <sys/kernel.h> +#include <sys/socket.h> +#include <sys/sockio.h> + +#include <net/bpf.h> +#include <net/ethernet.h> +#include <net/if.h> +#include <net/if_arp.h> +#include <net/if_dl.h> +#include <net/if_media.h> + +#include <machine/bus.h> + +#include <dev/mii/mii.h> +#include <dev/mii/miivar.h> + +#include <dev/gem/if_gemreg.h> +#include <dev/gem/if_gemvar.h> + +#define TRIES 10000 + +static void gem_start(struct ifnet *); +static void gem_stop(struct ifnet *, int); +static int gem_ioctl(struct ifnet *, u_long, caddr_t); +static void gem_cddma_callback(void *, bus_dma_segment_t *, int, int); +static void gem_rxdma_callback(void *, bus_dma_segment_t *, int, + bus_size_t, int); +static void gem_txdma_callback(void *, bus_dma_segment_t *, int, + bus_size_t, int); +static void gem_tick(void *); +static void gem_watchdog(struct ifnet *); +static void gem_init(void *); +static void gem_init_regs(struct gem_softc *sc); +static int gem_ringsize(int sz); +static int gem_meminit(struct gem_softc *); +static int gem_load_txmbuf(struct gem_softc *, struct mbuf *); +static void gem_mifinit(struct gem_softc *); +static int gem_bitwait(struct gem_softc *sc, bus_addr_t r, + u_int32_t clr, u_int32_t set); +static int gem_reset_rx(struct gem_softc *); +static int gem_reset_tx(struct gem_softc *); +static int gem_disable_rx(struct gem_softc *); +static int gem_disable_tx(struct gem_softc *); +static void gem_rxdrain(struct gem_softc *); +static int gem_add_rxbuf(struct gem_softc *, int); +static void gem_setladrf(struct gem_softc *); + +struct mbuf *gem_get(struct gem_softc *, int, int); +static void gem_eint(struct gem_softc *, u_int); +static void gem_rint(struct gem_softc *); +#if 0 +static void gem_rint_timeout(void *); +#endif +static void gem_tint(struct gem_softc *); +#ifdef notyet +static void gem_power(int, void *); +#endif + +devclass_t gem_devclass; +DRIVER_MODULE(miibus, gem, miibus_driver, miibus_devclass, 0, 0); +MODULE_DEPEND(gem, miibus, 1, 1, 1); + +#ifdef GEM_DEBUG +#include <sys/ktr.h> +#define KTR_GEM KTR_CT2 +#endif + +#define GEM_NSEGS GEM_NTXDESC + +/* + * gem_attach: + * + * Attach a Gem interface to the system. + */ +int +gem_attach(sc) + struct gem_softc *sc; +{ + struct ifnet *ifp = &sc->sc_arpcom.ac_if; + struct mii_softc *child; + int i, error; + u_int32_t v; + + /* Make sure the chip is stopped. */ + ifp->if_softc = sc; + gem_reset(sc); + + error = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT, + BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, GEM_NSEGS, + BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &sc->sc_pdmatag); + if (error) + return (error); + + error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, + BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MAXBSIZE, + 1, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, NULL, NULL, + &sc->sc_rdmatag); + if (error) + goto fail_ptag; + + error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, + BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, + GEM_TD_BUFSIZE, GEM_NTXDESC, BUS_SPACE_MAXSIZE_32BIT, + BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_tdmatag); + if (error) + goto fail_rtag; + + error = bus_dma_tag_create(sc->sc_pdmatag, PAGE_SIZE, 0, + BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, + sizeof(struct gem_control_data), 1, + sizeof(struct gem_control_data), BUS_DMA_ALLOCNOW, + busdma_lock_mutex, &Giant, &sc->sc_cdmatag); + if (error) + goto fail_ttag; + + /* + * Allocate the control data structures, and create and load the + * DMA map for it. + */ + if ((error = bus_dmamem_alloc(sc->sc_cdmatag, + (void **)&sc->sc_control_data, 0, &sc->sc_cddmamap))) { + device_printf(sc->sc_dev, "unable to allocate control data," + " error = %d\n", error); + goto fail_ctag; + } + + sc->sc_cddma = 0; + if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cddmamap, + sc->sc_control_data, sizeof(struct gem_control_data), + gem_cddma_callback, sc, 0)) != 0 || sc->sc_cddma == 0) { + device_printf(sc->sc_dev, "unable to load control data DMA " + "map, error = %d\n", error); + goto fail_cmem; + } + + /* + * Initialize the transmit job descriptors. + */ + STAILQ_INIT(&sc->sc_txfreeq); + STAILQ_INIT(&sc->sc_txdirtyq); + + /* + * Create the transmit buffer DMA maps. + */ + error = ENOMEM; + for (i = 0; i < GEM_TXQUEUELEN; i++) { + struct gem_txsoft *txs; + + txs = &sc->sc_txsoft[i]; + txs->txs_mbuf = NULL; + txs->txs_ndescs = 0; + if ((error = bus_dmamap_create(sc->sc_tdmatag, 0, + &txs->txs_dmamap)) != 0) { + device_printf(sc->sc_dev, "unable to create tx DMA map " + "%d, error = %d\n", i, error); + goto fail_txd; + } + STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); + } + + /* + * Create the receive buffer DMA maps. + */ + for (i = 0; i < GEM_NRXDESC; i++) { + if ((error = bus_dmamap_create(sc->sc_rdmatag, 0, + &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { + device_printf(sc->sc_dev, "unable to create rx DMA map " + "%d, error = %d\n", i, error); + goto fail_rxd; + } + sc->sc_rxsoft[i].rxs_mbuf = NULL; + } + + + gem_mifinit(sc); + + if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, gem_mediachange, + gem_mediastatus)) != 0) { + device_printf(sc->sc_dev, "phy probe failed: %d\n", error); + goto fail_rxd; + } + sc->sc_mii = device_get_softc(sc->sc_miibus); + + /* + * From this point forward, the attachment cannot fail. A failure + * before this point releases all resources that may have been + * allocated. + */ + + /* Announce ourselves. */ + device_printf(sc->sc_dev, "Ethernet address:"); + for (i = 0; i < 6; i++) + printf("%c%02x", i > 0 ? ':' : ' ', sc->sc_arpcom.ac_enaddr[i]); + + /* Get RX FIFO size */ + sc->sc_rxfifosize = 64 * + bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_RX_FIFO_SIZE); + printf(", %uKB RX fifo", sc->sc_rxfifosize / 1024); + + /* Get TX FIFO size */ + v = bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_FIFO_SIZE); + printf(", %uKB TX fifo\n", v / 16); + + /* Initialize ifnet structure. */ + ifp->if_softc = sc; + if_initname(ifp, device_get_name(sc->sc_dev), + device_get_unit(sc->sc_dev)); + ifp->if_mtu = ETHERMTU; + ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; + ifp->if_start = gem_start; + ifp->if_ioctl = gem_ioctl; + ifp->if_watchdog = gem_watchdog; + ifp->if_init = gem_init; + ifp->if_output = ether_output; + ifp->if_snd.ifq_maxlen = GEM_TXQUEUELEN; + /* + * Walk along the list of attached MII devices and + * establish an `MII instance' to `phy number' + * mapping. We'll use this mapping in media change + * requests to determine which phy to use to program + * the MIF configuration register. + */ + for (child = LIST_FIRST(&sc->sc_mii->mii_phys); child != NULL; + child = LIST_NEXT(child, mii_list)) { + /* + * Note: we support just two PHYs: the built-in + * internal device and an external on the MII + * connector. + */ + if (child->mii_phy > 1 || child->mii_inst > 1) { + device_printf(sc->sc_dev, "cannot accomodate " + "MII device %s at phy %d, instance %d\n", + device_get_name(child->mii_dev), + child->mii_phy, child->mii_inst); + continue; + } + + sc->sc_phys[child->mii_inst] = child->mii_phy; + } + + /* + * Now select and activate the PHY we will use. + * + * The order of preference is External (MDI1), + * Internal (MDI0), Serial Link (no MII). + */ + if (sc->sc_phys[1]) { +#ifdef GEM_DEBUG + printf("using external phy\n"); +#endif + sc->sc_mif_config |= GEM_MIF_CONFIG_PHY_SEL; + } else { +#ifdef GEM_DEBUG + printf("using internal phy\n"); +#endif + sc->sc_mif_config &= ~GEM_MIF_CONFIG_PHY_SEL; + } + bus_space_write_4(sc->sc_bustag, sc->sc_h, GEM_MIF_CONFIG, + sc->sc_mif_config); + /* Attach the interface. */ + ether_ifattach(ifp, sc->sc_arpcom.ac_enaddr); + +#if notyet + /* + * Add a suspend hook to make sure we come back up after a + * resume. + */ + sc->sc_powerhook = powerhook_establish(gem_power, sc); + if (sc->sc_powerhook == NULL) + device_printf(sc->sc_dev, "WARNING: unable to establish power " + "hook\n"); +#endif + + callout_init(&sc->sc_tick_ch, 0); + callout_init(&sc->sc_rx_ch, 0); + return (0); + + /* + * Free any resources we've allocated during the failed attach + * attempt. Do this in reverse order and fall through. + */ +fail_rxd: + for (i = 0; i < GEM_NRXDESC; i++) { + if (sc->sc_rxsoft[i].rxs_dmamap != NULL) + bus_dmamap_destroy(sc->sc_rdmatag, + sc->sc_rxsoft[i].rxs_dmamap); + } +fail_txd: + for (i = 0; i < GEM_TXQUEUELEN; i++) { + if (sc->sc_txsoft[i].txs_dmamap != NULL) + bus_dmamap_destroy(sc->sc_tdmatag, + sc->sc_txsoft[i].txs_dmamap); + } + bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap); +fail_cmem: + bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, + sc->sc_cddmamap); +fail_ctag: + bus_dma_tag_destroy(sc->sc_cdmatag); +fail_ttag: + bus_dma_tag_destroy(sc->sc_tdmatag); +fail_rtag: + bus_dma_tag_destroy(sc->sc_rdmatag); +fail_ptag: + bus_dma_tag_destroy(sc->sc_pdmatag); + return (error); +} + +void +gem_detach(sc) + struct gem_softc *sc; +{ + struct ifnet *ifp = &sc->sc_arpcom.ac_if; + int i; + + ether_ifdetach(ifp); + gem_stop(ifp, 1); + device_delete_child(sc->sc_dev, sc->sc_miibus); + + for (i = 0; i < GEM_NRXDESC; i++) { + if (sc->sc_rxsoft[i].rxs_dmamap != NULL) + bus_dmamap_destroy(sc->sc_rdmatag, + sc->sc_rxsoft[i].rxs_dmamap); + } + for (i = 0; i < GEM_TXQUEUELEN; i++) { + if (sc->sc_txsoft[i].txs_dmamap != NULL) + bus_dmamap_destroy(sc->sc_tdmatag, + sc->sc_txsoft[i].txs_dmamap); + } + GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD); + GEM_CDSYNC(sc, BUS_DMASYNC_POSTWRITE); + bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap); + bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, + sc->sc_cddmamap); + bus_dma_tag_destroy(sc->sc_cdmatag); + bus_dma_tag_destroy(sc->sc_tdmatag); + bus_dma_tag_destroy(sc->sc_rdmatag); + bus_dma_tag_destroy(sc->sc_pdmatag); +} + +void +gem_suspend(sc) + struct gem_softc *sc; +{ + struct ifnet *ifp = &sc->sc_arpcom.ac_if; + + gem_stop(ifp, 0); +} + +void +gem_resume(sc) + struct gem_softc *sc; +{ + struct ifnet *ifp = &sc->sc_arpcom.ac_if; + + if (ifp->if_flags & IFF_UP) + gem_init(ifp); +} + +static void +gem_cddma_callback(xsc, segs, nsegs, error) + void *xsc; + bus_dma_segment_t *segs; + int nsegs; + int error; +{ + struct gem_softc *sc = (struct gem_softc *)xsc; + + if (error != 0) + return; + if (nsegs != 1) { + /* can't happen... */ + panic("gem_cddma_callback: bad control buffer segment count"); + } + sc->sc_cddma = segs[0].ds_addr; +} + +static void +gem_rxdma_callback(xsc, segs, nsegs, totsz, error) + void *xsc; + bus_dma_segment_t *segs; + int nsegs; + bus_size_t totsz; + int error; +{ + struct gem_rxsoft *rxs = (struct gem_rxsoft *)xsc; + + if (error != 0) + return; + KASSERT(nsegs == 1, ("gem_rxdma_callback: bad dma segment count")); + rxs->rxs_paddr = segs[0].ds_addr; +} + +static void +gem_txdma_callback(xsc, segs, nsegs, totsz, error) + void *xsc; + bus_dma_segment_t *segs; + int nsegs; + bus_size_t totsz; + int error; +{ + struct gem_txdma *txd = (struct gem_txdma *)xsc; + struct gem_softc *sc = txd->txd_sc; + struct gem_txsoft *txs = txd->txd_txs; + bus_size_t len = 0; + uint64_t flags = 0; + int seg, nexttx; + + if (error != 0) + return; + /* + * Ensure we have enough descriptors free to describe + * the packet. Note, we always reserve one descriptor + * at the end of the ring as a termination point, to + * prevent wrap-around. + */ + if (nsegs > sc->sc_txfree - 1) { + txs->txs_ndescs = -1; + return; + } + txs->txs_ndescs = nsegs; + + nexttx = txs->txs_firstdesc; + /* + * Initialize the transmit descriptors. + */ + for (seg = 0; seg < nsegs; + seg++, nexttx = GEM_NEXTTX(nexttx)) { +#ifdef GEM_DEBUG + CTR5(KTR_GEM, "txdma_cb: mapping seg %d (txd %d), len " + "%lx, addr %#lx (%#lx)", seg, nexttx, + segs[seg].ds_len, segs[seg].ds_addr, + GEM_DMA_WRITE(sc, segs[seg].ds_addr)); +#endif + + if (segs[seg].ds_len == 0) + continue; + sc->sc_txdescs[nexttx].gd_addr = + GEM_DMA_WRITE(sc, segs[seg].ds_addr); + KASSERT(segs[seg].ds_len < GEM_TD_BUFSIZE, + ("gem_txdma_callback: segment size too large!")); + flags = segs[seg].ds_len & GEM_TD_BUFSIZE; + if (len == 0) { +#ifdef GEM_DEBUG + CTR2(KTR_GEM, "txdma_cb: start of packet at seg %d, " + "tx %d", seg, nexttx); +#endif + flags |= GEM_TD_START_OF_PACKET; + if (++sc->sc_txwin > GEM_NTXSEGS * 2 / 3) { + sc->sc_txwin = 0; + flags |= GEM_TD_INTERRUPT_ME; + } + } + if (len + segs[seg].ds_len == totsz) { +#ifdef GEM_DEBUG + CTR2(KTR_GEM, "txdma_cb: end of packet at seg %d, " + "tx %d", seg, nexttx); +#endif + flags |= GEM_TD_END_OF_PACKET; + } + sc->sc_txdescs[nexttx].gd_flags = GEM_DMA_WRITE(sc, flags); + txs->txs_lastdesc = nexttx; + len += segs[seg].ds_len; + } + KASSERT((flags & GEM_TD_END_OF_PACKET) != 0, + ("gem_txdma_callback: missed end of packet!")); +} + +static void +gem_tick(arg) + void *arg; +{ + struct gem_softc *sc = arg; + int s; + + s = splnet(); + mii_tick(sc->sc_mii); + splx(s); + + callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); +} + +static int +gem_bitwait(sc, r, clr, set) + struct gem_softc *sc; + bus_addr_t r; + u_int32_t clr; + u_int32_t set; +{ + int i; + u_int32_t reg; + + for (i = TRIES; i--; DELAY(100)) { + reg = bus_space_read_4(sc->sc_bustag, sc->sc_h, r); + if ((r & clr) == 0 && (r & set) == set) + return (1); + } + return (0); +} + +void +gem_reset(sc) + struct gem_softc *sc; +{ + bus_space_tag_t t = sc->sc_bustag; + bus_space_handle_t h = sc->sc_h; + int s; + + s = splnet(); +#ifdef GEM_DEBUG + CTR1(KTR_GEM, "%s: gem_reset", device_get_name(sc->sc_dev)); +#endif + gem_reset_rx(sc); + gem_reset_tx(sc); + + /* Do a full reset */ + bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX); + if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0)) + device_printf(sc->sc_dev, "cannot reset device\n"); + splx(s); +} + + +/* + * gem_rxdrain: + * + * Drain the receive queue. + */ +static void +gem_rxdrain(sc) + struct gem_softc *sc; +{ + struct gem_rxsoft *rxs; + int i; + + for (i = 0; i < GEM_NRXDESC; i++) { + rxs = &sc->sc_rxsoft[i]; + if (rxs->rxs_mbuf != NULL) { + bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, + BUS_DMASYNC_POSTREAD); + bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap); + m_freem(rxs->rxs_mbuf); + rxs->rxs_mbuf = NULL; + } + } +} + +/* + * Reset the whole thing. + */ +static void +gem_stop(ifp, disable) + struct ifnet *ifp; + int disable; +{ + struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; + struct gem_txsoft *txs; + +#ifdef GEM_DEBUG + CTR1(KTR_GEM, "%s: gem_stop", device_get_name(sc->sc_dev)); +#endif + + callout_stop(&sc->sc_tick_ch); + + /* XXX - Should we reset these instead? */ + gem_disable_tx(sc); + gem_disable_rx(sc); + + /* + * Release any queued transmit buffers. + */ + while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { + STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); + if (txs->txs_ndescs != 0) { + bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, + BUS_DMASYNC_POSTWRITE); + bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); + if (txs->txs_mbuf != NULL) { + m_freem(txs->txs_mbuf); + txs->txs_mbuf = NULL; + } + } + STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); + } + + if (disable) + gem_rxdrain(sc); + + /* + * Mark the interface down and cancel the watchdog timer. + */ + ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); + ifp->if_timer = 0; +} + +/* + * Reset the receiver + */ +int +gem_reset_rx(sc) + struct gem_softc *sc; +{ + bus_space_tag_t t = sc->sc_bustag; + bus_space_handle_t h = sc->sc_h; + + /* + * Resetting while DMA is in progress can cause a bus hang, so we + * disable DMA first. + */ + gem_disable_rx(sc); + bus_space_write_4(t, h, GEM_RX_CONFIG, 0); + /* Wait till it finishes */ + if (!gem_bitwait(sc, GEM_RX_CONFIG, 1, 0)) + device_printf(sc->sc_dev, "cannot disable read dma\n"); + + /* Wait 5ms extra. */ + DELAY(5000); + + /* Finally, reset the ERX */ + bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX); + /* Wait till it finishes */ + if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_TX, 0)) { + device_printf(sc->sc_dev, "cannot reset receiver\n"); + return (1); + } + return (0); +} + + +/* + * Reset the transmitter + */ +static int +gem_reset_tx(sc) + struct gem_softc *sc; +{ + bus_space_tag_t t = sc->sc_bustag; + bus_space_handle_t h = sc->sc_h; + int i; + + /* + * Resetting while DMA is in progress can cause a bus hang, so we + * disable DMA first. + */ + gem_disable_tx(sc); + bus_space_write_4(t, h, GEM_TX_CONFIG, 0); + /* Wait till it finishes */ + if (!gem_bitwait(sc, GEM_TX_CONFIG, 1, 0)) + device_printf(sc->sc_dev, "cannot disable read dma\n"); + + /* Wait 5ms extra. */ + DELAY(5000); + + /* Finally, reset the ETX */ + bus_space_write_4(t, h, GEM_RESET, GEM_RESET_TX); + /* Wait till it finishes */ + for (i = TRIES; i--; DELAY(100)) + if ((bus_space_read_4(t, h, GEM_RESET) & GEM_RESET_TX) == 0) + break; + if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_TX, 0)) { + device_printf(sc->sc_dev, "cannot reset receiver\n"); + return (1); + } + return (0); +} + +/* + * disable receiver. + */ +static int +gem_disable_rx(sc) + struct gem_softc *sc; +{ + bus_space_tag_t t = sc->sc_bustag; + bus_space_handle_t h = sc->sc_h; + u_int32_t cfg; + + /* Flip the enable bit */ + cfg = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); + cfg &= ~GEM_MAC_RX_ENABLE; + bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, cfg); + + /* Wait for it to finish */ + return (gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0)); +} + +/* + * disable transmitter. + */ +static int +gem_disable_tx(sc) + struct gem_softc *sc; +{ + bus_space_tag_t t = sc->sc_bustag; + bus_space_handle_t h = sc->sc_h; + u_int32_t cfg; + + /* Flip the enable bit */ + cfg = bus_space_read_4(t, h, GEM_MAC_TX_CONFIG); + cfg &= ~GEM_MAC_TX_ENABLE; + bus_space_write_4(t, h, GEM_MAC_TX_CONFIG, cfg); + + /* Wait for it to finish */ + return (gem_bitwait(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0)); +} + +/* + * Initialize interface. + */ +static int +gem_meminit(sc) + struct gem_softc *sc; +{ + struct gem_rxsoft *rxs; + int i, error; + + /* + * Initialize the transmit descriptor ring. + */ + memset((void *)sc->sc_txdescs, 0, sizeof(sc->sc_txdescs)); + for (i = 0; i < GEM_NTXDESC; i++) { + sc->sc_txdescs[i].gd_flags = 0; + sc->sc_txdescs[i].gd_addr = 0; + } + sc->sc_txfree = GEM_MAXTXFREE; + sc->sc_txnext = 0; + sc->sc_txwin = 0; + + /* + * Initialize the receive descriptor and receive job + * descriptor rings. + */ + for (i = 0; i < GEM_NRXDESC; i++) { + rxs = &sc->sc_rxsoft[i]; + if (rxs->rxs_mbuf == NULL) { + if ((error = gem_add_rxbuf(sc, i)) != 0) { + device_printf(sc->sc_dev, "unable to " + "allocate or map rx buffer %d, error = " + "%d\n", i, error); + /* + * XXX Should attempt to run with fewer receive + * XXX buffers instead of just failing. + */ + gem_rxdrain(sc); + return (1); + } + } else + GEM_INIT_RXDESC(sc, i); + } + sc->sc_rxptr = 0; + GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE); + GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD); + + return (0); +} + +static int +gem_ringsize(sz) + int sz; +{ + int v = 0; + + switch (sz) { + case 32: + v = GEM_RING_SZ_32; + break; + case 64: + v = GEM_RING_SZ_64; + break; + case 128: + v = GEM_RING_SZ_128; + break; + case 256: + v = GEM_RING_SZ_256; + break; + case 512: + v = GEM_RING_SZ_512; + break; + case 1024: + v = GEM_RING_SZ_1024; + break; + case 2048: + v = GEM_RING_SZ_2048; + break; + case 4096: + v = GEM_RING_SZ_4096; + break; + case 8192: + v = GEM_RING_SZ_8192; + break; + default: + printf("gem: invalid Receive Descriptor ring size\n"); + break; + } + return (v); +} + +/* + * Initialization of interface; set up initialization block + * and transmit/receive descriptor rings. + */ +static void +gem_init(xsc) + void *xsc; +{ + struct gem_softc *sc = (struct gem_softc *)xsc; + struct ifnet *ifp = &sc->sc_arpcom.ac_if; + bus_space_tag_t t = sc->sc_bustag; + bus_space_handle_t h = sc->sc_h; + int s; + u_int32_t v; + + s = splnet(); + +#ifdef GEM_DEBUG + CTR1(KTR_GEM, "%s: gem_init: calling stop", device_get_name(sc->sc_dev)); +#endif + /* + * Initialization sequence. The numbered steps below correspond + * to the sequence outlined in section 6.3.5.1 in the Ethernet + * Channel Engine manual (part of the PCIO manual). + * See also the STP2002-STQ document from Sun Microsystems. + */ + + /* step 1 & 2. Reset the Ethernet Channel */ + gem_stop(&sc->sc_arpcom.ac_if, 0); + gem_reset(sc); +#ifdef GEM_DEBUG + CTR1(KTR_GEM, "%s: gem_init: restarting", device_get_name(sc->sc_dev)); +#endif + + /* Re-initialize the MIF */ + gem_mifinit(sc); + + /* step 3. Setup data structures in host memory */ + gem_meminit(sc); + + /* step 4. TX MAC registers & counters */ + gem_init_regs(sc); + /* XXX: VLAN code from NetBSD temporarily removed. */ + bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME, + (ETHER_MAX_LEN + sizeof(struct ether_header)) | (0x2000<<16)); + + /* step 5. RX MAC registers & counters */ + gem_setladrf(sc); + + /* step 6 & 7. Program Descriptor Ring Base Addresses */ + /* NOTE: we use only 32-bit DMA addresses here. */ + bus_space_write_4(t, h, GEM_TX_RING_PTR_HI, 0); + bus_space_write_4(t, h, GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0)); + + bus_space_write_4(t, h, GEM_RX_RING_PTR_HI, 0); + bus_space_write_4(t, h, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0)); +#ifdef GEM_DEBUG + CTR3(KTR_GEM, "loading rx ring %lx, tx ring %lx, cddma %lx", + GEM_CDRXADDR(sc, 0), GEM_CDTXADDR(sc, 0), sc->sc_cddma); +#endif + + /* step 8. Global Configuration & Interrupt Mask */ + bus_space_write_4(t, h, GEM_INTMASK, + ~(GEM_INTR_TX_INTME| + GEM_INTR_TX_EMPTY| + GEM_INTR_RX_DONE|GEM_INTR_RX_NOBUF| + GEM_INTR_RX_TAG_ERR|GEM_INTR_PCS| + GEM_INTR_MAC_CONTROL|GEM_INTR_MIF| + GEM_INTR_BERR)); + bus_space_write_4(t, h, GEM_MAC_RX_MASK, + GEM_MAC_RX_DONE|GEM_MAC_RX_FRAME_CNT); + bus_space_write_4(t, h, GEM_MAC_TX_MASK, 0xffff); /* XXXX */ + bus_space_write_4(t, h, GEM_MAC_CONTROL_MASK, 0); /* XXXX */ + + /* step 9. ETX Configuration: use mostly default values */ + + /* Enable DMA */ + v = gem_ringsize(GEM_NTXDESC /*XXX*/); + bus_space_write_4(t, h, GEM_TX_CONFIG, + v|GEM_TX_CONFIG_TXDMA_EN| + ((0x400<<10)&GEM_TX_CONFIG_TXFIFO_TH)); + + /* step 10. ERX Configuration */ + + /* Encode Receive Descriptor ring size: four possible values */ + v = gem_ringsize(GEM_NRXDESC /*XXX*/); + + /* Enable DMA */ + bus_space_write_4(t, h, GEM_RX_CONFIG, + v|(GEM_THRSH_1024<<GEM_RX_CONFIG_FIFO_THRS_SHIFT)| + (2<<GEM_RX_CONFIG_FBOFF_SHFT)|GEM_RX_CONFIG_RXDMA_EN| + (0<<GEM_RX_CONFIG_CXM_START_SHFT)); + /* + * The following value is for an OFF Threshold of about 3/4 full + * and an ON Threshold of 1/4 full. + */ + bus_space_write_4(t, h, GEM_RX_PAUSE_THRESH, + (3 * sc->sc_rxfifosize / 256) | + ( (sc->sc_rxfifosize / 256) << 12)); + bus_space_write_4(t, h, GEM_RX_BLANKING, (6<<12)|6); + + /* step 11. Configure Media */ + mii_mediachg(sc->sc_mii); + + /* step 12. RX_MAC Configuration Register */ + v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); + v |= GEM_MAC_RX_ENABLE; + bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v); + + /* step 14. Issue Transmit Pending command */ + + /* step 15. Give the reciever a swift kick */ + bus_space_write_4(t, h, GEM_RX_KICK, GEM_NRXDESC-4); + + /* Start the one second timer. */ + callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); + + ifp->if_flags |= IFF_RUNNING; + ifp->if_flags &= ~IFF_OACTIVE; + ifp->if_timer = 0; + sc->sc_ifflags = ifp->if_flags; + splx(s); +} + +static int +gem_load_txmbuf(sc, m0) + struct gem_softc *sc; + struct mbuf *m0; +{ + struct gem_txdma txd; + struct gem_txsoft *txs; + int error; + + /* Get a work queue entry. */ + if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) { + /* Ran out of descriptors. */ + return (-1); + } + txd.txd_sc = sc; + txd.txd_txs = txs; + txs->txs_mbuf = m0; + txs->txs_firstdesc = sc->sc_txnext; + error = bus_dmamap_load_mbuf(sc->sc_tdmatag, txs->txs_dmamap, m0, + gem_txdma_callback, &txd, BUS_DMA_NOWAIT); + if (error != 0) + goto fail; + if (txs->txs_ndescs == -1) { + error = -1; + goto fail; + } + + /* Sync the DMA map. */ + bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, + BUS_DMASYNC_PREWRITE); + +#ifdef GEM_DEBUG + CTR3(KTR_GEM, "load_mbuf: setting firstdesc=%d, lastdesc=%d, " + "ndescs=%d", txs->txs_firstdesc, txs->txs_lastdesc, + txs->txs_ndescs); +#endif + STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q); + STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q); + + sc->sc_txnext = GEM_NEXTTX(txs->txs_lastdesc); + sc->sc_txfree -= txs->txs_ndescs; + return (0); + +fail: +#ifdef GEM_DEBUG + CTR1(KTR_GEM, "gem_load_txmbuf failed (%d)", error); +#endif + bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); + return (error); +} + +static void +gem_init_regs(sc) + struct gem_softc *sc; +{ + bus_space_tag_t t = sc->sc_bustag; + bus_space_handle_t h = sc->sc_h; + const u_char *laddr = sc->sc_arpcom.ac_enaddr; + u_int32_t v; + + /* These regs are not cleared on reset */ + if (!sc->sc_inited) { + + /* Wooo. Magic values. */ + bus_space_write_4(t, h, GEM_MAC_IPG0, 0); + bus_space_write_4(t, h, GEM_MAC_IPG1, 8); + bus_space_write_4(t, h, GEM_MAC_IPG2, 4); + + bus_space_write_4(t, h, GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN); + /* Max frame and max burst size */ + bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME, + ETHER_MAX_LEN | (0x2000<<16)); + + bus_space_write_4(t, h, GEM_MAC_PREAMBLE_LEN, 0x7); + bus_space_write_4(t, h, GEM_MAC_JAM_SIZE, 0x4); + bus_space_write_4(t, h, GEM_MAC_ATTEMPT_LIMIT, 0x10); + /* Dunno.... */ + bus_space_write_4(t, h, GEM_MAC_CONTROL_TYPE, 0x8088); + bus_space_write_4(t, h, GEM_MAC_RANDOM_SEED, + ((laddr[5]<<8)|laddr[4])&0x3ff); + + /* Secondary MAC addr set to 0:0:0:0:0:0 */ + bus_space_write_4(t, h, GEM_MAC_ADDR3, 0); + bus_space_write_4(t, h, GEM_MAC_ADDR4, 0); + bus_space_write_4(t, h, GEM_MAC_ADDR5, 0); + + /* MAC control addr set to 01:80:c2:00:00:01 */ + bus_space_write_4(t, h, GEM_MAC_ADDR6, 0x0001); + bus_space_write_4(t, h, GEM_MAC_ADDR7, 0xc200); + bus_space_write_4(t, h, GEM_MAC_ADDR8, 0x0180); + + /* MAC filter addr set to 0:0:0:0:0:0 */ + bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER0, 0); + bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER1, 0); + bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER2, 0); + + bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK1_2, 0); + bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK0, 0); + + sc->sc_inited = 1; + } + + /* Counters need to be zeroed */ + bus_space_write_4(t, h, GEM_MAC_NORM_COLL_CNT, 0); + bus_space_write_4(t, h, GEM_MAC_FIRST_COLL_CNT, 0); + bus_space_write_4(t, h, GEM_MAC_EXCESS_COLL_CNT, 0); + bus_space_write_4(t, h, GEM_MAC_LATE_COLL_CNT, 0); + bus_space_write_4(t, h, GEM_MAC_DEFER_TMR_CNT, 0); + bus_space_write_4(t, h, GEM_MAC_PEAK_ATTEMPTS, 0); + bus_space_write_4(t, h, GEM_MAC_RX_FRAME_COUNT, 0); + bus_space_write_4(t, h, GEM_MAC_RX_LEN_ERR_CNT, 0); + bus_space_write_4(t, h, GEM_MAC_RX_ALIGN_ERR, 0); + bus_space_write_4(t, h, GEM_MAC_RX_CRC_ERR_CNT, 0); + bus_space_write_4(t, h, GEM_MAC_RX_CODE_VIOL, 0); + + /* Un-pause stuff */ +#if 0 + bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0x1BF0); +#else + bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0); +#endif + + /* + * Set the station address. + */ + bus_space_write_4(t, h, GEM_MAC_ADDR0, (laddr[4]<<8)|laddr[5]); + bus_space_write_4(t, h, GEM_MAC_ADDR1, (laddr[2]<<8)|laddr[3]); + bus_space_write_4(t, h, GEM_MAC_ADDR2, (laddr[0]<<8)|laddr[1]); + + /* + * Enable MII outputs. Enable GMII if there is a gigabit PHY. + */ + sc->sc_mif_config = bus_space_read_4(t, h, GEM_MIF_CONFIG); + v = GEM_MAC_XIF_TX_MII_ENA; + if (sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) { + v |= GEM_MAC_XIF_FDPLX_LED; + if (sc->sc_flags & GEM_GIGABIT) + v |= GEM_MAC_XIF_GMII_MODE; + } + bus_space_write_4(t, h, GEM_MAC_XIF_CONFIG, v); +} + +static void +gem_start(ifp) + struct ifnet *ifp; +{ + struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; + struct mbuf *m0 = NULL; + int firsttx, ntx = 0, ofree, txmfail; + + if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) + return; + + /* + * Remember the previous number of free descriptors and + * the first descriptor we'll use. + */ + ofree = sc->sc_txfree; + firsttx = sc->sc_txnext; + +#ifdef GEM_DEBUG + CTR3(KTR_GEM, "%s: gem_start: txfree %d, txnext %d", + device_get_name(sc->sc_dev), ofree, firsttx); +#endif + + /* + * Loop through the send queue, setting up transmit descriptors + * until we drain the queue, or use up all available transmit + * descriptors. + */ + txmfail = 0; + do { + /* + * Grab a packet off the queue. + */ + IF_DEQUEUE(&ifp->if_snd, m0); + if (m0 == NULL) + break; + + txmfail = gem_load_txmbuf(sc, m0); + if (txmfail > 0) { + /* Drop the mbuf and complain. */ + printf("gem_start: error %d while loading mbuf dma " + "map\n", txmfail); + continue; + } + /* Not enough descriptors. */ + if (txmfail == -1) { + if (sc->sc_txfree == GEM_MAXTXFREE) + panic("gem_start: mbuf chain too long!"); + IF_PREPEND(&ifp->if_snd, m0); + break; + } + + ntx++; + /* Kick the transmitter. */ +#ifdef GEM_DEBUG + CTR2(KTR_GEM, "%s: gem_start: kicking tx %d", + device_get_name(sc->sc_dev), sc->sc_txnext); +#endif + bus_space_write_4(sc->sc_bustag, sc->sc_h, GEM_TX_KICK, + sc->sc_txnext); + + if (ifp->if_bpf != NULL) + bpf_mtap(ifp->if_bpf, m0); + } while (1); + + if (txmfail == -1 || sc->sc_txfree == 0) { + /* No more slots left; notify upper layer. */ + ifp->if_flags |= IFF_OACTIVE; + } + + if (ntx > 0) { + GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE); + +#ifdef GEM_DEBUG + CTR2(KTR_GEM, "%s: packets enqueued, OWN on %d", + device_get_name(sc->sc_dev), firsttx); +#endif + + /* Set a watchdog timer in case the chip flakes out. */ + ifp->if_timer = 5; +#ifdef GEM_DEBUG + CTR2(KTR_GEM, "%s: gem_start: watchdog %d", + device_get_name(sc->sc_dev), ifp->if_timer); +#endif + } +} + +/* + * Transmit interrupt. + */ +static void +gem_tint(sc) + struct gem_softc *sc; +{ + struct ifnet *ifp = &sc->sc_arpcom.ac_if; + bus_space_tag_t t = sc->sc_bustag; + bus_space_handle_t mac = sc->sc_h; + struct gem_txsoft *txs; + int txlast; + int progress = 0; + + +#ifdef GEM_DEBUG + CTR1(KTR_GEM, "%s: gem_tint", device_get_name(sc->sc_dev)); +#endif + + /* + * Unload collision counters + */ + ifp->if_collisions += + bus_space_read_4(t, mac, GEM_MAC_NORM_COLL_CNT) + + bus_space_read_4(t, mac, GEM_MAC_FIRST_COLL_CNT) + + bus_space_read_4(t, mac, GEM_MAC_EXCESS_COLL_CNT) + + bus_space_read_4(t, mac, GEM_MAC_LATE_COLL_CNT); + + /* + * then clear the hardware counters. + */ + bus_space_write_4(t, mac, GEM_MAC_NORM_COLL_CNT, 0); + bus_space_write_4(t, mac, GEM_MAC_FIRST_COLL_CNT, 0); + bus_space_write_4(t, mac, GEM_MAC_EXCESS_COLL_CNT, 0); + bus_space_write_4(t, mac, GEM_MAC_LATE_COLL_CNT, 0); + + /* + * Go through our Tx list and free mbufs for those + * frames that have been transmitted. + */ + GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD); + while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { + +#ifdef GEM_DEBUG + if (ifp->if_flags & IFF_DEBUG) { + int i; + printf(" txsoft %p transmit chain:\n", txs); + for (i = txs->txs_firstdesc;; i = GEM_NEXTTX(i)) { + printf("descriptor %d: ", i); + printf("gd_flags: 0x%016llx\t", (long long) + GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_flags)); + printf("gd_addr: 0x%016llx\n", (long long) + GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_addr)); + if (i == txs->txs_lastdesc) + break; + } + } +#endif + + /* + * In theory, we could harveast some descriptors before + * the ring is empty, but that's a bit complicated. + * + * GEM_TX_COMPLETION points to the last descriptor + * processed +1. + */ + txlast = bus_space_read_4(t, mac, GEM_TX_COMPLETION); +#ifdef GEM_DEBUG + CTR3(KTR_GEM, "gem_tint: txs->txs_firstdesc = %d, " + "txs->txs_lastdesc = %d, txlast = %d", + txs->txs_firstdesc, txs->txs_lastdesc, txlast); +#endif + if (txs->txs_firstdesc <= txs->txs_lastdesc) { + if ((txlast >= txs->txs_firstdesc) && + (txlast <= txs->txs_lastdesc)) + break; + } else { + /* Ick -- this command wraps */ + if ((txlast >= txs->txs_firstdesc) || + (txlast <= txs->txs_lastdesc)) + break; + } + +#ifdef GEM_DEBUG + CTR0(KTR_GEM, "gem_tint: releasing a desc"); +#endif + STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); + + sc->sc_txfree += txs->txs_ndescs; + + bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, + BUS_DMASYNC_POSTWRITE); + bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); + if (txs->txs_mbuf != NULL) { + m_freem(txs->txs_mbuf); + txs->txs_mbuf = NULL; + } + + STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); + + ifp->if_opackets++; + progress = 1; + } + +#ifdef GEM_DEBUG + CTR3(KTR_GEM, "gem_tint: GEM_TX_STATE_MACHINE %x " + "GEM_TX_DATA_PTR %llx " + "GEM_TX_COMPLETION %x", + bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_STATE_MACHINE), + ((long long) bus_space_read_4(sc->sc_bustag, sc->sc_h, + GEM_TX_DATA_PTR_HI) << 32) | + bus_space_read_4(sc->sc_bustag, sc->sc_h, + GEM_TX_DATA_PTR_LO), + bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_COMPLETION)); +#endif + + if (progress) { + if (sc->sc_txfree == GEM_NTXDESC - 1) + sc->sc_txwin = 0; + + /* Freed some descriptors, so reset IFF_OACTIVE and restart. */ + ifp->if_flags &= ~IFF_OACTIVE; + gem_start(ifp); + + if (STAILQ_EMPTY(&sc->sc_txdirtyq)) + ifp->if_timer = 0; + } + +#ifdef GEM_DEBUG + CTR2(KTR_GEM, "%s: gem_tint: watchdog %d", + device_get_name(sc->sc_dev), ifp->if_timer); +#endif +} + +#if 0 +static void +gem_rint_timeout(arg) + void *arg; +{ + + gem_rint((struct gem_softc *)arg); +} +#endif + +/* + * Receive interrupt. + */ +static void +gem_rint(sc) + struct gem_softc *sc; +{ + struct ifnet *ifp = &sc->sc_arpcom.ac_if; + bus_space_tag_t t = sc->sc_bustag; + bus_space_handle_t h = sc->sc_h; + struct gem_rxsoft *rxs; + struct mbuf *m; + u_int64_t rxstat; + u_int32_t rxcomp; + int i, len, progress = 0; + + callout_stop(&sc->sc_rx_ch); +#ifdef GEM_DEBUG + CTR1(KTR_GEM, "%s: gem_rint", device_get_name(sc->sc_dev)); +#endif + + /* + * Read the completion register once. This limits + * how long the following loop can execute. + */ + rxcomp = bus_space_read_4(t, h, GEM_RX_COMPLETION); + +#ifdef GEM_DEBUG + CTR2(KTR_GEM, "gem_rint: sc->rxptr %d, complete %d", + sc->sc_rxptr, rxcomp); +#endif + GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD); + for (i = sc->sc_rxptr; i != rxcomp; + i = GEM_NEXTRX(i)) { + rxs = &sc->sc_rxsoft[i]; + + rxstat = GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags); + + if (rxstat & GEM_RD_OWN) { +#if 0 /* XXX: In case of emergency, re-enable this. */ + /* + * The descriptor is still marked as owned, although + * it is supposed to have completed. This has been + * observed on some machines. Just exiting here + * might leave the packet sitting around until another + * one arrives to trigger a new interrupt, which is + * generally undesirable, so set up a timeout. + */ + callout_reset(&sc->sc_rx_ch, GEM_RXOWN_TICKS, + gem_rint_timeout, sc); +#endif + break; + } + + progress++; + ifp->if_ipackets++; + + if (rxstat & GEM_RD_BAD_CRC) { + ifp->if_ierrors++; + device_printf(sc->sc_dev, "receive error: CRC error\n"); + GEM_INIT_RXDESC(sc, i); + continue; + } + +#ifdef GEM_DEBUG + if (ifp->if_flags & IFF_DEBUG) { + printf(" rxsoft %p descriptor %d: ", rxs, i); + printf("gd_flags: 0x%016llx\t", (long long) + GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags)); + printf("gd_addr: 0x%016llx\n", (long long) + GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_addr)); + } +#endif + + /* + * No errors; receive the packet. Note the Gem + * includes the CRC with every packet. + */ + len = GEM_RD_BUFLEN(rxstat); + + /* + * Allocate a new mbuf cluster. If that fails, we are + * out of memory, and must drop the packet and recycle + * the buffer that's already attached to this descriptor. + */ + m = rxs->rxs_mbuf; + if (gem_add_rxbuf(sc, i) != 0) { + ifp->if_ierrors++; + GEM_INIT_RXDESC(sc, i); + continue; + } + m->m_data += 2; /* We're already off by two */ + + m->m_pkthdr.rcvif = ifp; + m->m_pkthdr.len = m->m_len = len - ETHER_CRC_LEN; + + /* Pass it on. */ + (*ifp->if_input)(ifp, m); + } + + if (progress) { + GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE); + /* Update the receive pointer. */ + if (i == sc->sc_rxptr) { + device_printf(sc->sc_dev, "rint: ring wrap\n"); + } + sc->sc_rxptr = i; + bus_space_write_4(t, h, GEM_RX_KICK, GEM_PREVRX(i)); + } + +#ifdef GEM_DEBUG + CTR2(KTR_GEM, "gem_rint: done sc->rxptr %d, complete %d", + sc->sc_rxptr, bus_space_read_4(t, h, GEM_RX_COMPLETION)); +#endif +} + + +/* + * gem_add_rxbuf: + * + * Add a receive buffer to the indicated descriptor. + */ +static int +gem_add_rxbuf(sc, idx) + struct gem_softc *sc; + int idx; +{ + struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx]; + struct mbuf *m; + int error; + + m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); + if (m == NULL) + return (ENOBUFS); + m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; + +#ifdef GEM_DEBUG + /* bzero the packet to check dma */ + memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size); +#endif + + if (rxs->rxs_mbuf != NULL) { + bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, + BUS_DMASYNC_POSTREAD); + bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap); + } + + rxs->rxs_mbuf = m; + + error = bus_dmamap_load_mbuf(sc->sc_rdmatag, rxs->rxs_dmamap, + m, gem_rxdma_callback, rxs, BUS_DMA_NOWAIT); + if (error != 0 || rxs->rxs_paddr == 0) { + device_printf(sc->sc_dev, "can't load rx DMA map %d, error = " + "%d\n", idx, error); + panic("gem_add_rxbuf"); /* XXX */ + } + + bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, BUS_DMASYNC_PREREAD); + + GEM_INIT_RXDESC(sc, idx); + + return (0); +} + + +static void +gem_eint(sc, status) + struct gem_softc *sc; + u_int status; +{ + + if ((status & GEM_INTR_MIF) != 0) { + device_printf(sc->sc_dev, "XXXlink status changed\n"); + return; + } + + device_printf(sc->sc_dev, "status=%x\n", status); +} + + +void +gem_intr(v) + void *v; +{ + struct gem_softc *sc = (struct gem_softc *)v; + bus_space_tag_t t = sc->sc_bustag; + bus_space_handle_t seb = sc->sc_h; + u_int32_t status; + + status = bus_space_read_4(t, seb, GEM_STATUS); +#ifdef GEM_DEBUG + CTR3(KTR_GEM, "%s: gem_intr: cplt %x, status %x", + device_get_name(sc->sc_dev), (status>>19), + (u_int)status); +#endif + + if ((status & (GEM_INTR_RX_TAG_ERR | GEM_INTR_BERR)) != 0) + gem_eint(sc, status); + + if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0) + gem_tint(sc); + + if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0) + gem_rint(sc); + + /* We should eventually do more than just print out error stats. */ + if (status & GEM_INTR_TX_MAC) { + int txstat = bus_space_read_4(t, seb, GEM_MAC_TX_STATUS); + if (txstat & ~GEM_MAC_TX_XMIT_DONE) + device_printf(sc->sc_dev, "MAC tx fault, status %x\n", + txstat); + if (txstat & (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG)) + gem_init(sc); + } + if (status & GEM_INTR_RX_MAC) { + int rxstat = bus_space_read_4(t, seb, GEM_MAC_RX_STATUS); + if (rxstat & ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT)) + device_printf(sc->sc_dev, "MAC rx fault, status %x\n", + rxstat); + if ((rxstat & GEM_MAC_RX_OVERFLOW) != 0) + gem_init(sc); + } +} + + +static void +gem_watchdog(ifp) + struct ifnet *ifp; +{ + struct gem_softc *sc = ifp->if_softc; + +#ifdef GEM_DEBUG + CTR3(KTR_GEM, "gem_watchdog: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x " + "GEM_MAC_RX_CONFIG %x", + bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_RX_CONFIG), + bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_STATUS), + bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_CONFIG)); + CTR3(KTR_GEM, "gem_watchdog: GEM_TX_CONFIG %x GEM_MAC_TX_STATUS %x " + "GEM_MAC_TX_CONFIG %x", + bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_CONFIG), + bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_TX_STATUS), + bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_TX_CONFIG)); +#endif + + device_printf(sc->sc_dev, "device timeout\n"); + ++ifp->if_oerrors; + + /* Try to get more packets going. */ + gem_start(ifp); +} + +/* + * Initialize the MII Management Interface + */ +static void +gem_mifinit(sc) + struct gem_softc *sc; +{ + bus_space_tag_t t = sc->sc_bustag; + bus_space_handle_t mif = sc->sc_h; + + /* Configure the MIF in frame mode */ + sc->sc_mif_config = bus_space_read_4(t, mif, GEM_MIF_CONFIG); + sc->sc_mif_config &= ~GEM_MIF_CONFIG_BB_ENA; + bus_space_write_4(t, mif, GEM_MIF_CONFIG, sc->sc_mif_config); +} + +/* + * MII interface + * + * The GEM MII interface supports at least three different operating modes: + * + * Bitbang mode is implemented using data, clock and output enable registers. + * + * Frame mode is implemented by loading a complete frame into the frame + * register and polling the valid bit for completion. + * + * Polling mode uses the frame register but completion is indicated by + * an interrupt. + * + */ +int +gem_mii_readreg(dev, phy, reg) + device_t dev; + int phy, reg; +{ + struct gem_softc *sc = device_get_softc(dev); + bus_space_tag_t t = sc->sc_bustag; + bus_space_handle_t mif = sc->sc_h; + int n; + u_int32_t v; + +#ifdef GEM_DEBUG_PHY + printf("gem_mii_readreg: phy %d reg %d\n", phy, reg); +#endif + +#if 0 + /* Select the desired PHY in the MIF configuration register */ + v = bus_space_read_4(t, mif, GEM_MIF_CONFIG); + /* Clear PHY select bit */ + v &= ~GEM_MIF_CONFIG_PHY_SEL; + if (phy == GEM_PHYAD_EXTERNAL) + /* Set PHY select bit to get at external device */ + v |= GEM_MIF_CONFIG_PHY_SEL; + bus_space_write_4(t, mif, GEM_MIF_CONFIG, v); +#endif + + /* Construct the frame command */ + v = (reg << GEM_MIF_REG_SHIFT) | (phy << GEM_MIF_PHY_SHIFT) | + GEM_MIF_FRAME_READ; + + bus_space_write_4(t, mif, GEM_MIF_FRAME, v); + for (n = 0; n < 100; n++) { + DELAY(1); + v = bus_space_read_4(t, mif, GEM_MIF_FRAME); + if (v & GEM_MIF_FRAME_TA0) + return (v & GEM_MIF_FRAME_DATA); + } + + device_printf(sc->sc_dev, "mii_read timeout\n"); + return (0); +} + +int +gem_mii_writereg(dev, phy, reg, val) + device_t dev; + int phy, reg, val; +{ + struct gem_softc *sc = device_get_softc(dev); + bus_space_tag_t t = sc->sc_bustag; + bus_space_handle_t mif = sc->sc_h; + int n; + u_int32_t v; + +#ifdef GEM_DEBUG_PHY + printf("gem_mii_writereg: phy %d reg %d val %x\n", phy, reg, val); +#endif + +#if 0 + /* Select the desired PHY in the MIF configuration register */ + v = bus_space_read_4(t, mif, GEM_MIF_CONFIG); + /* Clear PHY select bit */ + v &= ~GEM_MIF_CONFIG_PHY_SEL; + if (phy == GEM_PHYAD_EXTERNAL) + /* Set PHY select bit to get at external device */ + v |= GEM_MIF_CONFIG_PHY_SEL; + bus_space_write_4(t, mif, GEM_MIF_CONFIG, v); +#endif + /* Construct the frame command */ + v = GEM_MIF_FRAME_WRITE | + (phy << GEM_MIF_PHY_SHIFT) | + (reg << GEM_MIF_REG_SHIFT) | + (val & GEM_MIF_FRAME_DATA); + + bus_space_write_4(t, mif, GEM_MIF_FRAME, v); + for (n = 0; n < 100; n++) { + DELAY(1); + v = bus_space_read_4(t, mif, GEM_MIF_FRAME); + if (v & GEM_MIF_FRAME_TA0) + return (1); + } + + device_printf(sc->sc_dev, "mii_write timeout\n"); + return (0); +} + +void +gem_mii_statchg(dev) + device_t dev; +{ + struct gem_softc *sc = device_get_softc(dev); +#ifdef GEM_DEBUG + int instance = IFM_INST(sc->sc_mii->mii_media.ifm_cur->ifm_media); +#endif + bus_space_tag_t t = sc->sc_bustag; + bus_space_handle_t mac = sc->sc_h; + u_int32_t v; + +#ifdef GEM_DEBUG + if (sc->sc_debug) + printf("gem_mii_statchg: status change: phy = %d\n", + sc->sc_phys[instance]); +#endif + + /* Set tx full duplex options */ + bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, 0); + DELAY(10000); /* reg must be cleared and delay before changing. */ + v = GEM_MAC_TX_ENA_IPG0|GEM_MAC_TX_NGU|GEM_MAC_TX_NGU_LIMIT| + GEM_MAC_TX_ENABLE; + if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) { + v |= GEM_MAC_TX_IGN_CARRIER|GEM_MAC_TX_IGN_COLLIS; + } + bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, v); + + /* XIF Configuration */ + /* We should really calculate all this rather than rely on defaults */ + v = bus_space_read_4(t, mac, GEM_MAC_XIF_CONFIG); + v = GEM_MAC_XIF_LINK_LED; + v |= GEM_MAC_XIF_TX_MII_ENA; + + /* If an external transceiver is connected, enable its MII drivers */ + sc->sc_mif_config = bus_space_read_4(t, mac, GEM_MIF_CONFIG); + if ((sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) != 0) { + /* External MII needs echo disable if half duplex. */ + if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) + /* turn on full duplex LED */ + v |= GEM_MAC_XIF_FDPLX_LED; + else + /* half duplex -- disable echo */ + v |= GEM_MAC_XIF_ECHO_DISABL; + + if (IFM_SUBTYPE(sc->sc_mii->mii_media_active) == IFM_1000_T) + v |= GEM_MAC_XIF_GMII_MODE; + else + v &= ~GEM_MAC_XIF_GMII_MODE; + } else { + /* Internal MII needs buf enable */ + v |= GEM_MAC_XIF_MII_BUF_ENA; + } + bus_space_write_4(t, mac, GEM_MAC_XIF_CONFIG, v); +} + +int +gem_mediachange(ifp) + struct ifnet *ifp; +{ + struct gem_softc *sc = ifp->if_softc; + + /* XXX Add support for serial media. */ + + return (mii_mediachg(sc->sc_mii)); +} + +void +gem_mediastatus(ifp, ifmr) + struct ifnet *ifp; + struct ifmediareq *ifmr; +{ + struct gem_softc *sc = ifp->if_softc; + + if ((ifp->if_flags & IFF_UP) == 0) + return; + + mii_pollstat(sc->sc_mii); + ifmr->ifm_active = sc->sc_mii->mii_media_active; + ifmr->ifm_status = sc->sc_mii->mii_media_status; +} + +/* + * Process an ioctl request. + */ +static int +gem_ioctl(ifp, cmd, data) + struct ifnet *ifp; + u_long cmd; + caddr_t data; +{ + struct gem_softc *sc = ifp->if_softc; + struct ifreq *ifr = (struct ifreq *)data; + int s, error = 0; + + switch (cmd) { + case SIOCSIFADDR: + case SIOCGIFADDR: + case SIOCSIFMTU: + error = ether_ioctl(ifp, cmd, data); + break; + case SIOCSIFFLAGS: + if (ifp->if_flags & IFF_UP) { + if ((sc->sc_ifflags ^ ifp->if_flags) == IFF_PROMISC) + gem_setladrf(sc); + else + gem_init(sc); + } else { + if (ifp->if_flags & IFF_RUNNING) + gem_stop(ifp, 0); + } + sc->sc_ifflags = ifp->if_flags; + error = 0; + break; + case SIOCADDMULTI: + case SIOCDELMULTI: + gem_setladrf(sc); + error = 0; + break; + case SIOCGIFMEDIA: + case SIOCSIFMEDIA: + error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd); + break; + default: + error = ENOTTY; + break; + } + + /* Try to get things going again */ + if (ifp->if_flags & IFF_UP) + gem_start(ifp); + splx(s); + return (error); +} + +/* + * Set up the logical address filter. + */ +static void +gem_setladrf(sc) + struct gem_softc *sc; +{ + struct ifnet *ifp = &sc->sc_arpcom.ac_if; + struct ifmultiaddr *inm; + struct sockaddr_dl *sdl; + bus_space_tag_t t = sc->sc_bustag; + bus_space_handle_t h = sc->sc_h; + u_char *cp; + u_int32_t crc; + u_int32_t hash[16]; + u_int32_t v; + int len; + int i; + + /* Get current RX configuration */ + v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); + + /* + * Turn off promiscuous mode, promiscuous group mode (all multicast), + * and hash filter. Depending on the case, the right bit will be + * enabled. + */ + v &= ~(GEM_MAC_RX_PROMISCUOUS|GEM_MAC_RX_HASH_FILTER| + GEM_MAC_RX_PROMISC_GRP); + + if ((ifp->if_flags & IFF_PROMISC) != 0) { + /* Turn on promiscuous mode */ + v |= GEM_MAC_RX_PROMISCUOUS; + goto chipit; + } + if ((ifp->if_flags & IFF_ALLMULTI) != 0) { + hash[3] = hash[2] = hash[1] = hash[0] = 0xffff; + ifp->if_flags |= IFF_ALLMULTI; + v |= GEM_MAC_RX_PROMISC_GRP; + goto chipit; + } + + /* + * Set up multicast address filter by passing all multicast addresses + * through a crc generator, and then using the high order 8 bits as an + * index into the 256 bit logical address filter. The high order 4 + * bits selects the word, while the other 4 bits select the bit within + * the word (where bit 0 is the MSB). + */ + + /* Clear hash table */ + memset(hash, 0, sizeof(hash)); + + TAILQ_FOREACH(inm, &sc->sc_arpcom.ac_if.if_multiaddrs, ifma_link) { + if (inm->ifma_addr->sa_family != AF_LINK) + continue; + sdl = (struct sockaddr_dl *)inm->ifma_addr; + cp = LLADDR(sdl); + crc = 0xffffffff; + for (len = sdl->sdl_alen; --len >= 0;) { + int octet = *cp++; + int i; + +#define MC_POLY_LE 0xedb88320UL /* mcast crc, little endian */ + for (i = 0; i < 8; i++) { + if ((crc & 1) ^ (octet & 1)) { + crc >>= 1; + crc ^= MC_POLY_LE; + } else { + crc >>= 1; + } + octet >>= 1; + } + } + /* Just want the 8 most significant bits. */ + crc >>= 24; + + /* Set the corresponding bit in the filter. */ + hash[crc >> 4] |= 1 << (15 - (crc & 15)); + } + + v |= GEM_MAC_RX_HASH_FILTER; + ifp->if_flags &= ~IFF_ALLMULTI; + + /* Now load the hash table into the chip (if we are using it) */ + for (i = 0; i < 16; i++) { + bus_space_write_4(t, h, + GEM_MAC_HASH0 + i * (GEM_MAC_HASH1-GEM_MAC_HASH0), + hash[i]); + } + +chipit: + bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v); +} diff --git a/sys/dev/gem/if_gem_pci.c b/sys/dev/gem/if_gem_pci.c new file mode 100644 index 0000000..92fc1d3 --- /dev/null +++ b/sys/dev/gem/if_gem_pci.c @@ -0,0 +1,256 @@ +/* + * Copyright (C) 2001 Eduardo Horvath. + * All rights reserved. + * + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: NetBSD: if_gem_pci.c,v 1.7 2001/10/18 15:09:15 thorpej Exp + * + */ + +#include <sys/cdefs.h> +__FBSDID("$FreeBSD$"); + +/* + * PCI bindings for Sun GEM ethernet controllers. + */ + +#include <sys/param.h> +#include <sys/systm.h> +#include <sys/bus.h> +#include <sys/malloc.h> +#include <sys/kernel.h> +#include <sys/resource.h> +#include <sys/socket.h> + +#include <machine/endian.h> + +#include <net/ethernet.h> +#include <net/if.h> +#include <net/if_arp.h> +#include <net/if_dl.h> +#include <net/if_media.h> + +#include <machine/bus.h> +#include <machine/resource.h> +#include <dev/ofw/openfirm.h> +#include <machine/ofw_machdep.h> + +#include <sys/rman.h> + +#include <dev/mii/mii.h> +#include <dev/mii/miivar.h> + +#include <dev/gem/if_gemreg.h> +#include <dev/gem/if_gemvar.h> + +#include <dev/pci/pcivar.h> +#include <dev/pci/pcireg.h> + +#include "miibus_if.h" + +struct gem_pci_softc { + struct gem_softc gsc_gem; /* GEM device */ + struct resource *gsc_sres; + int gsc_srid; + struct resource *gsc_ires; + int gsc_irid; + void *gsc_ih; +}; + +static int gem_pci_probe(device_t); +static int gem_pci_attach(device_t); +static int gem_pci_detach(device_t); +static int gem_pci_suspend(device_t); +static int gem_pci_resume(device_t); + +static device_method_t gem_pci_methods[] = { + /* Device interface */ + DEVMETHOD(device_probe, gem_pci_probe), + DEVMETHOD(device_attach, gem_pci_attach), + DEVMETHOD(device_detach, gem_pci_detach), + DEVMETHOD(device_suspend, gem_pci_suspend), + DEVMETHOD(device_resume, gem_pci_resume), + /* Use the suspend handler here, it is all that is required. */ + DEVMETHOD(device_shutdown, gem_pci_suspend), + + /* bus interface */ + DEVMETHOD(bus_print_child, bus_generic_print_child), + DEVMETHOD(bus_driver_added, bus_generic_driver_added), + + /* MII interface */ + DEVMETHOD(miibus_readreg, gem_mii_readreg), + DEVMETHOD(miibus_writereg, gem_mii_writereg), + DEVMETHOD(miibus_statchg, gem_mii_statchg), + + { 0, 0 } +}; + +static driver_t gem_pci_driver = { + "gem", + gem_pci_methods, + sizeof(struct gem_pci_softc) +}; + + +DRIVER_MODULE(gem, pci, gem_pci_driver, gem_devclass, 0, 0); +MODULE_DEPEND(gem, pci, 1, 1, 1); +MODULE_DEPEND(gem, ether, 1, 1, 1); + +struct gem_pci_dev { + u_int32_t gpd_devid; + int gpd_variant; + char *gpd_desc; +} gem_pci_devlist[] = { + { 0x1101108e, GEM_SUN_GEM, "Sun ERI 10/100 Ethernet Adaptor" }, + { 0x2bad108e, GEM_SUN_GEM, "Sun GEM Gigabit Ethernet Adaptor" }, + { 0x0021106b, GEM_APPLE_GMAC, "Apple GMAC Ethernet Adaptor" }, + { 0x0024106b, GEM_APPLE_GMAC, "Apple GMAC2 Ethernet Adaptor" }, + { 0, 0, NULL } +}; + +/* + * Attach routines need to be split out to different bus-specific files. + */ +static int +gem_pci_probe(dev) + device_t dev; +{ + int i; + u_int32_t devid; + struct gem_pci_softc *gsc; + + devid = pci_get_devid(dev); + for (i = 0; gem_pci_devlist[i].gpd_desc != NULL; i++) { + if (devid == gem_pci_devlist[i].gpd_devid) { + device_set_desc(dev, gem_pci_devlist[i].gpd_desc); + gsc = device_get_softc(dev); + gsc->gsc_gem.sc_variant = + gem_pci_devlist[i].gpd_variant; + return (0); + } + } + + return (ENXIO); +} + +static int +gem_pci_attach(dev) + device_t dev; +{ + struct gem_pci_softc *gsc = device_get_softc(dev); + struct gem_softc *sc = &gsc->gsc_gem; + + pci_enable_busmaster(dev); + + /* + * Some Sun GEMs/ERIs do have their intpin register bogusly set to 0, + * although it should be 1. correct that. + */ + if (pci_get_intpin(dev) == 0) + pci_set_intpin(dev, 1); + + sc->sc_dev = dev; + sc->sc_pci = 1; /* XXX */ + + gsc->gsc_srid = PCI_GEM_BASEADDR; + gsc->gsc_sres = bus_alloc_resource(dev, SYS_RES_MEMORY, &gsc->gsc_srid, + 0, ~0, 1, RF_ACTIVE); + if (gsc->gsc_sres == NULL) { + device_printf(dev, "failed to allocate bus space resource\n"); + return (ENXIO); + } + + gsc->gsc_irid = 0; + gsc->gsc_ires = bus_alloc_resource(dev, SYS_RES_IRQ, &gsc->gsc_irid, 0, + ~0, 1, RF_SHAREABLE | RF_ACTIVE); + if (gsc->gsc_ires == NULL) { + device_printf(dev, "failed to allocate interrupt resource\n"); + goto fail_sres; + } + + sc->sc_bustag = rman_get_bustag(gsc->gsc_sres); + sc->sc_h = rman_get_bushandle(gsc->gsc_sres); + + /* All platform that this driver is used on must provide this. */ + OF_getetheraddr(dev, sc->sc_arpcom.ac_enaddr); + + /* + * call the main configure + */ + if (gem_attach(sc) != 0) { + device_printf(dev, "could not be configured\n"); + goto fail_ires; + } + + if (bus_setup_intr(dev, gsc->gsc_ires, INTR_TYPE_NET, gem_intr, sc, + &gsc->gsc_ih) != 0) { + device_printf(dev, "failed to set up interrupt\n"); + gem_detach(sc); + goto fail_ires; + } + return (0); + +fail_ires: + bus_release_resource(dev, SYS_RES_IRQ, gsc->gsc_irid, gsc->gsc_ires); +fail_sres: + bus_release_resource(dev, SYS_RES_MEMORY, gsc->gsc_srid, gsc->gsc_sres); + return (ENXIO); +} + +static int +gem_pci_detach(dev) + device_t dev; +{ + struct gem_pci_softc *gsc = device_get_softc(dev); + struct gem_softc *sc = &gsc->gsc_gem; + + gem_detach(sc); + + bus_teardown_intr(dev, gsc->gsc_ires, gsc->gsc_ih); + bus_release_resource(dev, SYS_RES_IRQ, gsc->gsc_irid, gsc->gsc_ires); + bus_release_resource(dev, SYS_RES_MEMORY, gsc->gsc_srid, gsc->gsc_sres); + return (0); +} + +static int +gem_pci_suspend(dev) + device_t dev; +{ + struct gem_pci_softc *gsc = device_get_softc(dev); + struct gem_softc *sc = &gsc->gsc_gem; + + gem_suspend(sc); + return (0); +} + +static int +gem_pci_resume(dev) + device_t dev; +{ + struct gem_pci_softc *gsc = device_get_softc(dev); + struct gem_softc *sc = &gsc->gsc_gem; + + gem_resume(sc); + return (0); +} diff --git a/sys/dev/gem/if_gemreg.h b/sys/dev/gem/if_gemreg.h new file mode 100644 index 0000000..700c732 --- /dev/null +++ b/sys/dev/gem/if_gemreg.h @@ -0,0 +1,539 @@ +/* + * Copyright (C) 2001 Eduardo Horvath. + * All rights reserved. + * + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: NetBSD: gemreg.h,v 1.15 2002/05/11 00:36:02 matt Exp + * + * $FreeBSD$ + */ + +#ifndef _IF_GEMREG_H +#define _IF_GEMREG_H + +/* Register definitions for Sun GEM gigabit ethernet */ + +#define GEM_SEB_STATE 0x0000 /* SEB state reg, R/O */ +#define GEM_CONFIG 0x0004 /* config reg */ +#define GEM_STATUS 0x000c /* status reg */ +/* Note: Reading the status reg clears bits 0-6 */ +#define GEM_INTMASK 0x0010 +#define GEM_INTACK 0x0014 /* Interrupt acknowledge, W/O */ +#define GEM_STATUS_ALIAS 0x001c +/* This is the same as the GEM_STATUS reg but reading it does not clear bits. */ +#define GEM_ERROR_STATUS 0x1000 /* PCI error status R/C */ +#define GEM_ERROR_MASK 0x1004 +#define GEM_BIF_CONFIG 0x1008 /* BIF config reg */ +#define GEM_BIF_DIAG 0x100c +#define GEM_RESET 0x1010 /* Software reset register */ + + +/* Bits in GEM_SEB register */ +#define GEM_SEB_ARB 0x000000002 /* Arbitration status */ +#define GEM_SEB_RXWON 0x000000004 + + +/* Bits in GEM_CONFIG register */ +#define GEM_CONFIG_BURST_64 0x000000000 /* 0->infininte, 1->64KB */ +#define GEM_CONFIG_BURST_INF 0x000000001 /* 0->infininte, 1->64KB */ +#define GEM_CONFIG_TXDMA_LIMIT 0x00000003e +#define GEM_CONFIG_RXDMA_LIMIT 0x0000007c0 + +#define GEM_CONFIG_TXDMA_LIMIT_SHIFT 1 +#define GEM_CONFIG_RXDMA_LIMIT_SHIFT 6 + + +/* Top part of GEM_STATUS has TX completion information */ +#define GEM_STATUS_TX_COMPL 0xfff800000 /* TX completion reg. */ + + +/* Interrupt bits, for both the GEM_STATUS and GEM_INTMASK regs. */ +#define GEM_INTR_TX_INTME 0x000000001 /* Frame w/INTME bit set sent */ +#define GEM_INTR_TX_EMPTY 0x000000002 /* TX ring empty */ +#define GEM_INTR_TX_DONE 0x000000004 /* TX complete */ +#define GEM_INTR_RX_DONE 0x000000010 /* Got a packet */ +#define GEM_INTR_RX_NOBUF 0x000000020 +#define GEM_INTR_RX_TAG_ERR 0x000000040 +#define GEM_INTR_PCS 0x000002000 +#define GEM_INTR_TX_MAC 0x000004000 +#define GEM_INTR_RX_MAC 0x000008000 +#define GEM_INTR_MAC_CONTROL 0x000010000 /* MAC control interrupt */ +#define GEM_INTR_MIF 0x000020000 +#define GEM_INTR_BERR 0x000040000 /* Bus error interrupt */ +#define GEM_INTR_BITS "\177\020" \ + "b\0INTME\0b\1TXEMPTY\0b\2TXDONE\0" \ + "b\4RXDONE\0b\5RXNOBUF\0b\6RX_TAG_ERR\0" \ + "b\15PCS\0b\16TXMAC\0b\17RXMAC\0" \ + "b\20MAC_CONTROL\0b\21MIF\0b\22BERR\0\0" \ + + + +/* GEM_ERROR_STATUS and GEM_ERROR_MASK PCI error bits */ +#define GEM_ERROR_STAT_BADACK 0x000000001 /* No ACK64# */ +#define GEM_ERROR_STAT_DTRTO 0x000000002 /* Delayed xaction timeout */ +#define GEM_ERROR_STAT_OTHERS 0x000000004 + + +/* GEM_BIF_CONFIG register bits */ +#define GEM_BIF_CONFIG_SLOWCLK 0x000000001 /* Parity error timing */ +#define GEM_BIF_CONFIG_HOST_64 0x000000002 /* 64-bit host */ +#define GEM_BIF_CONFIG_B64D_DIS 0x000000004 /* no 64-bit data cycle */ +#define GEM_BIF_CONFIG_M66EN 0x000000008 + + +/* GEM_RESET register bits -- TX and RX self clear when complete. */ +#define GEM_RESET_TX 0x000000001 /* Reset TX half */ +#define GEM_RESET_RX 0x000000002 /* Reset RX half */ +#define GEM_RESET_RSTOUT 0x000000004 /* Force PCI RSTOUT# */ + + +/* GEM TX DMA registers */ +#define GEM_TX_KICK 0x2000 /* Write last valid desc + 1 */ +#define GEM_TX_CONFIG 0x2004 +#define GEM_TX_RING_PTR_LO 0x2008 +#define GEM_TX_RING_PTR_HI 0x200c + +#define GEM_TX_FIFO_WR_PTR 0x2014 /* FIFO write pointer */ +#define GEM_TX_FIFO_SDWR_PTR 0x2018 /* FIFO shadow write pointer */ +#define GEM_TX_FIFO_RD_PTR 0x201c /* FIFO read pointer */ +#define GEM_TX_FIFO_SDRD_PTR 0x2020 /* FIFO shadow read pointer */ +#define GEM_TX_FIFO_PKT_CNT 0x2024 /* FIFO packet counter */ + +#define GEM_TX_STATE_MACHINE 0x2028 /* ETX state machine reg */ +#define GEM_TX_DATA_PTR_LO 0x2030 +#define GEM_TX_DATA_PTR_HI 0x2034 + +#define GEM_TX_COMPLETION 0x2100 +#define GEM_TX_FIFO_ADDRESS 0x2104 +#define GEM_TX_FIFO_TAG 0x2108 +#define GEM_TX_FIFO_DATA_LO 0x210c +#define GEM_TX_FIFO_DATA_HI_T1 0x2110 +#define GEM_TX_FIFO_DATA_HI_T0 0x2114 +#define GEM_TX_FIFO_SIZE 0x2118 +#define GEM_TX_DEBUG 0x3028 + + +/* GEM_TX_CONFIG register bits. */ +#define GEM_TX_CONFIG_TXDMA_EN 0x00000001 /* TX DMA enable */ +#define GEM_TX_CONFIG_TXRING_SZ 0x0000001e /* TX ring size */ +#define GEM_TX_CONFIG_TXFIFO_TH 0x001ffc00 /* TX fifo threshold */ +#define GEM_TX_CONFIG_PACED 0x00200000 /* TX_all_int modifier */ + +#define GEM_RING_SZ_32 (0<<1) /* 32 descriptors */ +#define GEM_RING_SZ_64 (1<<1) +#define GEM_RING_SZ_128 (2<<1) +#define GEM_RING_SZ_256 (3<<1) +#define GEM_RING_SZ_512 (4<<1) +#define GEM_RING_SZ_1024 (5<<1) +#define GEM_RING_SZ_2048 (6<<1) +#define GEM_RING_SZ_4096 (7<<1) +#define GEM_RING_SZ_8192 (8<<1) + + +/* GEM_TX_COMPLETION register bits */ +#define GEM_TX_COMPLETION_MASK 0x00001fff /* # of last descriptor */ + + +/* GEM RX DMA registers */ +#define GEM_RX_CONFIG 0x4000 +#define GEM_RX_RING_PTR_LO 0x4004 /* 64-bits unaligned GAK! */ +#define GEM_RX_RING_PTR_HI 0x4008 /* 64-bits unaligned GAK! */ + +#define GEM_RX_FIFO_WR_PTR 0x400c /* FIFO write pointer */ +#define GEM_RX_FIFO_SDWR_PTR 0x4010 /* FIFO shadow write pointer */ +#define GEM_RX_FIFO_RD_PTR 0x4014 /* FIFO read pointer */ +#define GEM_RX_FIFO_PKT_CNT 0x4018 /* FIFO packet counter */ + +#define GEM_RX_STATE_MACHINE 0x401c /* ERX state machine reg */ +#define GEM_RX_PAUSE_THRESH 0x4020 + +#define GEM_RX_DATA_PTR_LO 0x4024 /* ERX state machine reg */ +#define GEM_RX_DATA_PTR_HI 0x4028 /* Damn thing is unaligned */ + +#define GEM_RX_KICK 0x4100 /* Write last valid desc + 1 */ +#define GEM_RX_COMPLETION 0x4104 /* First pending desc */ +#define GEM_RX_BLANKING 0x4108 /* Interrupt blanking reg */ + +#define GEM_RX_FIFO_ADDRESS 0x410c +#define GEM_RX_FIFO_TAG 0x4110 +#define GEM_RX_FIFO_DATA_LO 0x4114 +#define GEM_RX_FIFO_DATA_HI_T1 0x4118 +#define GEM_RX_FIFO_DATA_HI_T0 0x411c +#define GEM_RX_FIFO_SIZE 0x4120 + + +/* GEM_RX_CONFIG register bits. */ +#define GEM_RX_CONFIG_RXDMA_EN 0x00000001 /* RX DMA enable */ +#define GEM_RX_CONFIG_RXRING_SZ 0x0000001e /* RX ring size */ +#define GEM_RX_CONFIG_BATCH_DIS 0x00000020 /* desc batching disable */ +#define GEM_RX_CONFIG_FBOFF 0x00001c00 /* first byte offset */ +#define GEM_RX_CONFIG_CXM_START 0x000fe000 /* checksum start offset */ +#define GEM_RX_CONFIG_FIFO_THRS 0x07000000 /* fifo threshold size */ + +#define GEM_THRSH_64 0 +#define GEM_THRSH_128 1 +#define GEM_THRSH_256 2 +#define GEM_THRSH_512 3 +#define GEM_THRSH_1024 4 +#define GEM_THRSH_2048 5 + +#define GEM_RX_CONFIG_FIFO_THRS_SHIFT 24 +#define GEM_RX_CONFIG_FBOFF_SHFT 10 +#define GEM_RX_CONFIG_CXM_START_SHFT 13 + + +/* GEM_RX_PAUSE_THRESH register bits -- sizes in multiples of 64 bytes */ +#define GEM_RX_PTH_XOFF_THRESH 0x000001ff +#define GEM_RX_PTH_XON_THRESH 0x001ff000 + + +/* GEM_RX_BLANKING register bits */ +#define GEM_RX_BLANKING_PACKETS 0x000001ff /* Delay intr for x packets */ +#define GEM_RX_BLANKING_TIME 0x000ff000 /* Delay intr for x ticks */ +#define GEM_RX_BLANKING_TIME_SHIFT 12 +/* One tick is 2048 PCI clocks, or 16us at 66MHz */ + + +/* GEM_MAC registers */ +#define GEM_MAC_TXRESET 0x6000 /* Store 1, cleared when done */ +#define GEM_MAC_RXRESET 0x6004 /* ditto */ +#define GEM_MAC_SEND_PAUSE_CMD 0x6008 +#define GEM_MAC_TX_STATUS 0x6010 +#define GEM_MAC_RX_STATUS 0x6014 +#define GEM_MAC_CONTROL_STATUS 0x6018 /* MAC control status reg */ +#define GEM_MAC_TX_MASK 0x6020 /* TX MAC mask register */ +#define GEM_MAC_RX_MASK 0x6024 +#define GEM_MAC_CONTROL_MASK 0x6028 +#define GEM_MAC_TX_CONFIG 0x6030 +#define GEM_MAC_RX_CONFIG 0x6034 +#define GEM_MAC_CONTROL_CONFIG 0x6038 +#define GEM_MAC_XIF_CONFIG 0x603c +#define GEM_MAC_IPG0 0x6040 /* inter packet gap 0 */ +#define GEM_MAC_IPG1 0x6044 /* inter packet gap 1 */ +#define GEM_MAC_IPG2 0x6048 /* inter packet gap 2 */ +#define GEM_MAC_SLOT_TIME 0x604c +#define GEM_MAC_MAC_MIN_FRAME 0x6050 +#define GEM_MAC_MAC_MAX_FRAME 0x6054 +#define GEM_MAC_PREAMBLE_LEN 0x6058 +#define GEM_MAC_JAM_SIZE 0x605c +#define GEM_MAC_ATTEMPT_LIMIT 0x6060 +#define GEM_MAC_CONTROL_TYPE 0x6064 + +#define GEM_MAC_ADDR0 0x6080 /* Normal MAC address 0 */ +#define GEM_MAC_ADDR1 0x6084 +#define GEM_MAC_ADDR2 0x6088 +#define GEM_MAC_ADDR3 0x608c /* Alternate MAC address 0 */ +#define GEM_MAC_ADDR4 0x6090 +#define GEM_MAC_ADDR5 0x6094 +#define GEM_MAC_ADDR6 0x6098 /* Control MAC address 0 */ +#define GEM_MAC_ADDR7 0x609c +#define GEM_MAC_ADDR8 0x60a0 + +#define GEM_MAC_ADDR_FILTER0 0x60a4 +#define GEM_MAC_ADDR_FILTER1 0x60a8 +#define GEM_MAC_ADDR_FILTER2 0x60ac +#define GEM_MAC_ADR_FLT_MASK1_2 0x60b0 /* Address filter mask 1,2 */ +#define GEM_MAC_ADR_FLT_MASK0 0x60b4 /* Address filter mask 0 reg */ + +#define GEM_MAC_HASH0 0x60c0 /* Hash table 0 */ +#define GEM_MAC_HASH1 0x60c4 +#define GEM_MAC_HASH2 0x60c8 +#define GEM_MAC_HASH3 0x60cc +#define GEM_MAC_HASH4 0x60d0 +#define GEM_MAC_HASH5 0x60d4 +#define GEM_MAC_HASH6 0x60d8 +#define GEM_MAC_HASH7 0x60dc +#define GEM_MAC_HASH8 0x60e0 +#define GEM_MAC_HASH9 0x60e4 +#define GEM_MAC_HASH10 0x60e8 +#define GEM_MAC_HASH11 0x60ec +#define GEM_MAC_HASH12 0x60f0 +#define GEM_MAC_HASH13 0x60f4 +#define GEM_MAC_HASH14 0x60f8 +#define GEM_MAC_HASH15 0x60fc + +#define GEM_MAC_NORM_COLL_CNT 0x6100 /* Normal collision counter */ +#define GEM_MAC_FIRST_COLL_CNT 0x6104 /* 1st successful collision cntr */ +#define GEM_MAC_EXCESS_COLL_CNT 0x6108 /* Excess collision counter */ +#define GEM_MAC_LATE_COLL_CNT 0x610c /* Late collision counter */ +#define GEM_MAC_DEFER_TMR_CNT 0x6110 /* defer timer counter */ +#define GEM_MAC_PEAK_ATTEMPTS 0x6114 +#define GEM_MAC_RX_FRAME_COUNT 0x6118 +#define GEM_MAC_RX_LEN_ERR_CNT 0x611c +#define GEM_MAC_RX_ALIGN_ERR 0x6120 +#define GEM_MAC_RX_CRC_ERR_CNT 0x6124 +#define GEM_MAC_RX_CODE_VIOL 0x6128 +#define GEM_MAC_RANDOM_SEED 0x6130 +#define GEM_MAC_MAC_STATE 0x6134 /* MAC sstate machine reg */ + + +/* GEM_MAC_SEND_PAUSE_CMD register bits */ +#define GEM_MAC_PAUSE_CMD_TIME 0x0000ffff +#define GEM_MAC_PAUSE_CMD_SEND 0x00010000 + + +/* GEM_MAC_TX_STATUS and _MASK register bits */ +#define GEM_MAC_TX_XMIT_DONE 0x00000001 +#define GEM_MAC_TX_UNDERRUN 0x00000002 +#define GEM_MAC_TX_PKT_TOO_LONG 0x00000004 +#define GEM_MAC_TX_NCC_EXP 0x00000008 /* Normal collision cnt exp */ +#define GEM_MAC_TX_ECC_EXP 0x00000010 +#define GEM_MAC_TX_LCC_EXP 0x00000020 +#define GEM_MAC_TX_FCC_EXP 0x00000040 +#define GEM_MAC_TX_DEFER_EXP 0x00000080 +#define GEM_MAC_TX_PEAK_EXP 0x00000100 + + +/* GEM_MAC_RX_STATUS and _MASK register bits */ +#define GEM_MAC_RX_DONE 0x00000001 +#define GEM_MAC_RX_OVERFLOW 0x00000002 +#define GEM_MAC_RX_FRAME_CNT 0x00000004 +#define GEM_MAC_RX_ALIGN_EXP 0x00000008 +#define GEM_MAC_RX_CRC_EXP 0x00000010 +#define GEM_MAC_RX_LEN_EXP 0x00000020 +#define GEM_MAC_RX_CVI_EXP 0x00000040 /* Code violation */ + + +/* GEM_MAC_CONTROL_STATUS and GEM_MAC_CONTROL_MASK register bits */ +#define GEM_MAC_PAUSED 0x00000001 /* Pause received */ +#define GEM_MAC_PAUSE 0x00000002 /* enter pause state */ +#define GEM_MAC_RESUME 0x00000004 /* exit pause state */ +#define GEM_MAC_PAUSE_TIME 0xffff0000 + +/* GEM_MAC_XIF_CONFIG register bits */ +#define GEM_MAC_XIF_TX_MII_ENA 0x00000001 /* Enable XIF output drivers */ +#define GEM_MAC_XIF_MII_LOOPBK 0x00000002 /* Enable MII loopback mode */ +#define GEM_MAC_XIF_ECHO_DISABL 0x00000004 /* Disable echo */ +#define GEM_MAC_XIF_GMII_MODE 0x00000008 /* Select GMII/MII mode */ +#define GEM_MAC_XIF_MII_BUF_ENA 0x00000010 /* Enable MII recv buffers */ +#define GEM_MAC_XIF_LINK_LED 0x00000020 /* force link LED active */ +#define GEM_MAC_XIF_FDPLX_LED 0x00000040 /* force FDPLX LED active */ + +/* GEM_MAC_TX_CONFIG register bits */ +#define GEM_MAC_TX_ENABLE 0x00000001 /* TX enable */ +#define GEM_MAC_TX_IGN_CARRIER 0x00000002 /* Ignore carrier sense */ +#define GEM_MAC_TX_IGN_COLLIS 0x00000004 /* ignore collitions */ +#define GEM_MAC_TX_ENA_IPG0 0x00000008 /* extend Rx-to-TX IPG */ +#define GEM_MAC_TX_NGU 0x00000010 /* Never give up */ +#define GEM_MAC_TX_NGU_LIMIT 0x00000020 /* Never give up limit */ +#define GEM_MAC_TX_NO_BACKOFF 0x00000040 +#define GEM_MAC_TX_SLOWDOWN 0x00000080 +#define GEM_MAC_TX_NO_FCS 0x00000100 /* no FCS will be generated */ +#define GEM_MAC_TX_CARR_EXTEND 0x00000200 /* Ena TX Carrier Extension */ +/* Carrier Extension is required for half duplex Gbps operation */ + + +/* GEM_MAC_RX_CONFIG register bits */ +#define GEM_MAC_RX_ENABLE 0x00000001 /* RX enable */ +#define GEM_MAC_RX_STRIP_PAD 0x00000002 /* strip pad bytes */ +#define GEM_MAC_RX_STRIP_CRC 0x00000004 +#define GEM_MAC_RX_PROMISCUOUS 0x00000008 /* promiscuous mode */ +#define GEM_MAC_RX_PROMISC_GRP 0x00000010 /* promiscuous group mode */ +#define GEM_MAC_RX_HASH_FILTER 0x00000020 /* enable hash filter */ +#define GEM_MAC_RX_ADDR_FILTER 0x00000040 /* enable address filter */ +#define GEM_MAC_RX_ERRCHK_DIS 0x00000080 /* disable error checking */ +#define GEM_MAC_RX_CARR_EXTEND 0x00000100 /* Ena RX Carrier Extension */ +/* + * Carrier Extension enables reception of packet bursts generated by + * senders with carrier extension enabled. + */ + + +/* GEM_MAC_CONTROL_CONFIG bits */ +#define GEM_MAC_CC_TX_PAUSE 0x00000001 /* send pause enabled */ +#define GEM_MAC_CC_RX_PAUSE 0x00000002 /* receive pause enabled */ +#define GEM_MAC_CC_PASS_PAUSE 0x00000004 /* pass pause up */ + + +/* GEM MIF registers */ +/* Bit bang registers use low bit only */ +#define GEM_MIF_BB_CLOCK 0x6200 /* bit bang clock */ +#define GEM_MIF_BB_DATA 0x6204 /* bit bang data */ +#define GEM_MIF_BB_OUTPUT_ENAB 0x6208 +#define GEM_MIF_FRAME 0x620c /* MIF frame - ctl and data */ +#define GEM_MIF_CONFIG 0x6210 +#define GEM_MIF_INTERRUPT_MASK 0x6214 +#define GEM_MIF_BASIC_STATUS 0x6218 +#define GEM_MIF_STATE_MACHINE 0x621c + + +/* GEM_MIF_FRAME bits */ +#define GEM_MIF_FRAME_DATA 0x0000ffff +#define GEM_MIF_FRAME_TA0 0x00010000 /* TA bit, 1 for completion */ +#define GEM_MIF_FRAME_TA1 0x00020000 /* TA bits */ +#define GEM_MIF_FRAME_REG_ADDR 0x007c0000 +#define GEM_MIF_FRAME_PHY_ADDR 0x0f800000 /* phy address, should be 0 */ +#define GEM_MIF_FRAME_OP 0x30000000 /* operation - write/read */ +#define GEM_MIF_FRAME_START 0xc0000000 /* START bits */ + +#define GEM_MIF_FRAME_READ 0x60020000 +#define GEM_MIF_FRAME_WRITE 0x50020000 + +#define GEM_MIF_REG_SHIFT 18 +#define GEM_MIF_PHY_SHIFT 23 + + +/* GEM_MIF_CONFIG register bits */ +#define GEM_MIF_CONFIG_PHY_SEL 0x00000001 /* PHY select */ +#define GEM_MIF_CONFIG_POLL_ENA 0x00000002 /* poll enable */ +#define GEM_MIF_CONFIG_BB_ENA 0x00000004 /* bit bang enable */ +#define GEM_MIF_CONFIG_REG_ADR 0x000000f8 /* poll register address */ +#define GEM_MIF_CONFIG_MDI0 0x00000100 /* MDIO_0 Data/MDIO_0 atached */ +#define GEM_MIF_CONFIG_MDI1 0x00000200 /* MDIO_1 Data/MDIO_1 atached */ +#define GEM_MIF_CONFIG_PHY_ADR 0x00007c00 /* poll PHY address */ +/* MDI0 is onboard tranciever MID1 is external, PHYAD for both is 0 */ + + +/* GEM_MIF_BASIC_STATUS and GEM_MIF_INTERRUPT_MASK bits */ +#define GEM_MIF_STATUS 0x0000ffff +#define GEM_MIF_BASIC 0xffff0000 +/* + * The Basic part is the last value read in the POLL field of the config + * register. + * + * The status part indicates the bits that have changed. + */ + + +/* The GEM PCS/Serial link register. */ +#define GEM_MII_CONTROL 0x9000 +#define GEM_MII_STATUS 0x9004 +#define GEM_MII_ANAR 0x9008 /* MII advertisement reg */ +#define GEM_MII_ANLPAR 0x900c /* LP ability reg */ +#define GEM_MII_CONFIG 0x9010 +#define GEM_MII_STATE_MACHINE 0x9014 +#define GEM_MII_INTERRUP_STATUS 0x9018 +#define GEM_MII_DATAPATH_MODE 0x9050 +#define GEM_MII_SLINK_CONTROL 0x9054 /* Serial link control */ +#define GEM_MII_OUTPUT_SELECT 0x9058 +#define GEM_MII_SLINK_STATUS 0x905c /* serial link status */ + + +/* GEM_MII_CONTROL bits */ +/* + * DO NOT TOUCH THIS REGISTER ON ERI -- IT HARD HANGS. + */ +#define GEM_MII_CONTROL_RESET 0x00008000 +#define GEM_MII_CONTROL_LOOPBK 0x00004000 /* 10-bit i/f loopback */ +#define GEM_MII_CONTROL_1000M 0x00002000 /* speed select, always 0 */ +#define GEM_MII_CONTROL_AUTONEG 0x00001000 /* auto negotiation enabled */ +#define GEM_MII_CONTROL_POWERDN 0x00000800 +#define GEM_MII_CONTROL_ISOLATE 0x00000400 /* isolate phy from mii */ +#define GEM_MII_CONTROL_RAN 0x00000200 /* restart auto negotioation */ +#define GEM_MII_CONTROL_FDUPLEX 0x00000100 /* full duplex, always 0 */ +#define GEM_MII_CONTROL_COL_TST 0x00000080 /* collision test */ + + +/* GEM_MII_STATUS reg */ +#define GEM_MII_STATUS_GB_FDX 0x00000400 /* can perform GBit FDX */ +#define GEM_MII_STATUS_GB_HDX 0x00000200 /* can perform GBit HDX */ +#define GEM_MII_STATUS_ANEG_CPT 0x00000020 /* auto negotiate compete */ +#define GEM_MII_STATUS_REM_FLT 0x00000010 /* remote fault detected */ +#define GEM_MII_STATUS_ACFG 0x00000008 /* can auto negotiate */ +#define GEM_MII_STATUS_LINK_STS 0x00000004 /* link status */ +#define GEM_MII_STATUS_JABBER 0x00000002 /* jabber condition detected */ +#define GEM_MII_STATUS_EXTCAP 0x00000001 /* extended register capability */ + + +/* GEM_MII_ANAR and GEM_MII_ANLAR reg bits */ +#define GEM_MII_ANEG_NP 0x00008000 /* next page bit */ +#define GEM_MII_ANEG_ACK 0x00004000 /* ack reception of */ + /* Link Partner Capability */ +#define GEM_MII_ANEG_RF 0x00003000 /* advertise remote fault cap */ +#define GEM_MII_ANEG_ASYM_PAUSE 0x00000100 /* asymmetric pause */ +#define GEM_MII_ANEG_SYM_PAUSE 0x00000080 /* symmetric pause */ +#define GEM_MII_ANEG_HLF_DUPLX 0x00000040 +#define GEM_MII_ANEG_FUL_DUPLX 0x00000020 + + +/* GEM_MII_CONFIG reg */ +#define GEM_MII_CONFIG_TIMER 0x0000001c /* link monitor timer values */ +#define GEM_MII_CONFIG_ENABLE 0x00000001 /* Enable PCS */ + + +/* GEM_MII_DATAPATH_MODE reg */ +#define GEM_MII_DATAPATH_SERIAL 0x00000001 /* Serial link */ +#define GEM_MII_DATAPATH_SERDES 0x00000002 /* Use PCS via 10bit interfac */ +#define GEM_MII_DATAPATH_MII 0x00000004 /* Use MII, not PCS */ +#define GEM_MII_DATAPATH_MIIOUT 0x00000008 /* enable serial output on GMII */ + + +/* GEM_MII_SLINK_CONTROL reg */ +#define GEM_MII_SLINK_LOOPBACK 0x00000001 /* enable loopback at sl */ +#define GEM_MII_SLINK_EN_SYNC_D 0x00000002 /* enable sync detection */ +#define GEM_MII_SLINK_LOCK_REF 0x00000004 /* lock reference clock */ +#define GEM_MII_SLINK_EMPHASIS 0x00000008 /* enable emphasis */ +#define GEM_MII_SLINK_SELFTEST 0x000001c0 +#define GEM_MII_SLINK_POWER_OFF 0x00000200 /* Power down serial link */ + + +/* GEM_MII_SLINK_STATUS reg */ +#define GEM_MII_SLINK_TEST 0x00000000 /* undergoing test */ +#define GEM_MII_SLINK_LOCKED 0x00000001 /* waiting 500us lockrefn */ +#define GEM_MII_SLINK_COMMA 0x00000002 /* waiting for comma detect */ +#define GEM_MII_SLINK_SYNC 0x00000003 /* recv data synchronized */ + + +/* Wired GEM PHY addresses */ +#define GEM_PHYAD_INTERNAL 1 +#define GEM_PHYAD_EXTERNAL 0 + +/* + * GEM descriptor table structures. + */ +struct gem_desc { + uint64_t gd_flags; + uint64_t gd_addr; +}; + +/* Transmit flags */ +#define GEM_TD_BUFSIZE 0x0000000000007fffLL +#define GEM_TD_CXSUM_START 0x00000000001f8000LL /* Cxsum start offset */ +#define GEM_TD_CXSUM_STUFF 0x000000001fe00000LL /* Cxsum stuff offset */ +#define GEM_TD_CXSUM_ENABLE 0x0000000020000000LL /* Cxsum generation enable */ +#define GEM_TD_END_OF_PACKET 0x0000000040000000LL +#define GEM_TD_START_OF_PACKET 0x0000000080000000LL +#define GEM_TD_INTERRUPT_ME 0x0000000100000000LL /* Interrupt me now */ +#define GEM_TD_NO_CRC 0x0000000200000000LL /* do not insert crc */ +/* + * Only need to set GEM_TD_CXSUM_ENABLE, GEM_TD_CXSUM_STUFF, + * GEM_TD_CXSUM_START, and GEM_TD_INTERRUPT_ME in 1st descriptor of a group. + */ + +/* Receive flags */ +#define GEM_RD_CHECKSUM 0x000000000000ffffLL +#define GEM_RD_BUFSIZE 0x000000007fff0000LL +#define GEM_RD_OWN 0x0000000080000000LL /* 1 - owned by h/w */ +#define GEM_RD_HASHVAL 0x0ffff00000000000LL +#define GEM_RD_HASH_PASS 0x1000000000000000LL /* passed hash filter */ +#define GEM_RD_ALTERNATE_MAC 0x2000000000000000LL /* Alternate MAC adrs */ +#define GEM_RD_BAD_CRC 0x4000000000000000LL + +#define GEM_RD_BUFSHIFT 16 +#define GEM_RD_BUFLEN(x) (((x)&GEM_RD_BUFSIZE)>>GEM_RD_BUFSHIFT) + +/* PCI support */ +#define PCI_GEM_BASEADDR 0x10 + +#endif diff --git a/sys/dev/gem/if_gemvar.h b/sys/dev/gem/if_gemvar.h new file mode 100644 index 0000000..d93728e --- /dev/null +++ b/sys/dev/gem/if_gemvar.h @@ -0,0 +1,238 @@ +/* + * Copyright (C) 2001 Eduardo Horvath. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: NetBSD: gemvar.h,v 1.8 2002/05/15 02:36:12 matt Exp + * + * $FreeBSD$ + */ + +#ifndef _IF_GEMVAR_H +#define _IF_GEMVAR_H + + +#include <sys/queue.h> +#include <sys/callout.h> + +/* + * Misc. definitions for the Sun ``Gem'' Ethernet controller family driver. + */ + +/* + * Transmit descriptor list size. This is arbitrary, but allocate + * enough descriptors for 64 pending transmissions and 16 segments + * per packet. This limit is not actually enforced (packets with more segments + * can be sent, depending on the busdma backend); it is however used as an + * estimate for the tx window size. + */ +#define GEM_NTXSEGS 16 + +#define GEM_TXQUEUELEN 64 +#define GEM_NTXDESC (GEM_TXQUEUELEN * GEM_NTXSEGS) +#define GEM_MAXTXFREE (GEM_NTXDESC - 1) +#define GEM_NTXDESC_MASK (GEM_NTXDESC - 1) +#define GEM_NEXTTX(x) ((x + 1) & GEM_NTXDESC_MASK) + +/* + * Receive descriptor list size. We have one Rx buffer per incoming + * packet, so this logic is a little simpler. + */ +#define GEM_NRXDESC 128 +#define GEM_NRXDESC_MASK (GEM_NRXDESC - 1) +#define GEM_PREVRX(x) ((x - 1) & GEM_NRXDESC_MASK) +#define GEM_NEXTRX(x) ((x + 1) & GEM_NRXDESC_MASK) + +/* + * How many ticks to wait until to retry on a RX descriptor that is still owned + * by the hardware. + */ +#define GEM_RXOWN_TICKS (hz / 50) + +/* + * Control structures are DMA'd to the GEM chip. We allocate them in + * a single clump that maps to a single DMA segment to make several things + * easier. + */ +struct gem_control_data { + /* + * The transmit descriptors. + */ + struct gem_desc gcd_txdescs[GEM_NTXDESC]; + + /* + * The receive descriptors. + */ + struct gem_desc gcd_rxdescs[GEM_NRXDESC]; +}; + +#define GEM_CDOFF(x) offsetof(struct gem_control_data, x) +#define GEM_CDTXOFF(x) GEM_CDOFF(gcd_txdescs[(x)]) +#define GEM_CDRXOFF(x) GEM_CDOFF(gcd_rxdescs[(x)]) + +/* + * Software state for transmit job mbufs (may be elements of mbuf chains). + */ +struct gem_txsoft { + struct mbuf *txs_mbuf; /* head of our mbuf chain */ + bus_dmamap_t txs_dmamap; /* our DMA map */ + int txs_firstdesc; /* first descriptor in packet */ + int txs_lastdesc; /* last descriptor in packet */ + int txs_ndescs; /* number of descriptors */ + STAILQ_ENTRY(gem_txsoft) txs_q; +}; + +STAILQ_HEAD(gem_txsq, gem_txsoft); + +/* Argument structure for busdma callback */ +struct gem_txdma { + struct gem_softc *txd_sc; + struct gem_txsoft *txd_txs; +}; + +/* + * Software state for receive jobs. + */ +struct gem_rxsoft { + struct mbuf *rxs_mbuf; /* head of our mbuf chain */ + bus_dmamap_t rxs_dmamap; /* our DMA map */ + bus_addr_t rxs_paddr; /* physical address of the segment */ +}; + +/* + * Software state per device. + */ +struct gem_softc { + struct arpcom sc_arpcom; /* arp common data */ + device_t sc_miibus; + struct mii_data *sc_mii; /* MII media control */ + device_t sc_dev; /* generic device information */ + struct callout sc_tick_ch; /* tick callout */ + struct callout sc_rx_ch; /* delayed rx callout */ + + /* The following bus handles are to be provided by the bus front-end */ + bus_space_tag_t sc_bustag; /* bus tag */ + bus_dma_tag_t sc_pdmatag; /* parent bus dma tag */ + bus_dma_tag_t sc_rdmatag; /* RX bus dma tag */ + bus_dma_tag_t sc_tdmatag; /* TX bus dma tag */ + bus_dma_tag_t sc_cdmatag; /* control data bus dma tag */ + bus_dmamap_t sc_dmamap; /* bus dma handle */ + bus_space_handle_t sc_h; /* bus space handle for all regs */ + + int sc_phys[2]; /* MII instance -> PHY map */ + + int sc_mif_config; /* Selected MII reg setting */ + + int sc_pci; /* XXXXX -- PCI buses are LE. */ + u_int sc_variant; /* which GEM are we dealing with? */ +#define GEM_UNKNOWN 0 /* don't know */ +#define GEM_SUN_GEM 1 /* Sun GEM variant */ +#define GEM_APPLE_GMAC 2 /* Apple GMAC variant */ + + u_int sc_flags; /* */ +#define GEM_GIGABIT 0x0001 /* has a gigabit PHY */ + + /* + * Ring buffer DMA stuff. + */ + bus_dma_segment_t sc_cdseg; /* control data memory */ + int sc_cdnseg; /* number of segments */ + bus_dmamap_t sc_cddmamap; /* control data DMA map */ + bus_addr_t sc_cddma; + + /* + * Software state for transmit and receive descriptors. + */ + struct gem_txsoft sc_txsoft[GEM_TXQUEUELEN]; + struct gem_rxsoft sc_rxsoft[GEM_NRXDESC]; + + /* + * Control data structures. + */ + struct gem_control_data *sc_control_data; +#define sc_txdescs sc_control_data->gcd_txdescs +#define sc_rxdescs sc_control_data->gcd_rxdescs + + int sc_txfree; /* number of free Tx descriptors */ + int sc_txnext; /* next ready Tx descriptor */ + int sc_txwin; /* Tx descriptors since last Tx int */ + + struct gem_txsq sc_txfreeq; /* free Tx descsofts */ + struct gem_txsq sc_txdirtyq; /* dirty Tx descsofts */ + + int sc_rxptr; /* next ready RX descriptor/descsoft */ + int sc_rxfifosize; /* Rx FIFO size (bytes) */ + + /* ========== */ + int sc_inited; + int sc_debug; + int sc_ifflags; +}; + +#define GEM_DMA_READ(sc, v) (((sc)->sc_pci) ? le64toh(v) : be64toh(v)) +#define GEM_DMA_WRITE(sc, v) (((sc)->sc_pci) ? htole64(v) : htobe64(v)) + +#define GEM_CDTXADDR(sc, x) ((sc)->sc_cddma + GEM_CDTXOFF((x))) +#define GEM_CDRXADDR(sc, x) ((sc)->sc_cddma + GEM_CDRXOFF((x))) + +#define GEM_CDSYNC(sc, ops) \ + bus_dmamap_sync((sc)->sc_cdmatag, (sc)->sc_cddmamap, (ops)); \ + +#define GEM_INIT_RXDESC(sc, x) \ +do { \ + struct gem_rxsoft *__rxs = &sc->sc_rxsoft[(x)]; \ + struct gem_desc *__rxd = &sc->sc_rxdescs[(x)]; \ + struct mbuf *__m = __rxs->rxs_mbuf; \ + \ + __m->m_data = __m->m_ext.ext_buf; \ + __rxd->gd_addr = \ + GEM_DMA_WRITE((sc), __rxs->rxs_paddr); \ + __rxd->gd_flags = \ + GEM_DMA_WRITE((sc), \ + (((__m->m_ext.ext_size)<<GEM_RD_BUFSHIFT) \ + & GEM_RD_BUFSIZE) | GEM_RD_OWN); \ +} while (0) + +#ifdef _KERNEL +extern devclass_t gem_devclass; + +int gem_attach(struct gem_softc *); +void gem_detach(struct gem_softc *); +void gem_suspend(struct gem_softc *); +void gem_resume(struct gem_softc *); +void gem_intr(void *); + +int gem_mediachange(struct ifnet *); +void gem_mediastatus(struct ifnet *, struct ifmediareq *); + +void gem_reset(struct gem_softc *); + +/* MII methods & callbacks */ +int gem_mii_readreg(device_t, int, int); +int gem_mii_writereg(device_t, int, int, int); +void gem_mii_statchg(device_t); + +#endif /* _KERNEL */ + + +#endif |