summaryrefslogtreecommitdiffstats
path: root/sys/dev/age
diff options
context:
space:
mode:
authoryongari <yongari@FreeBSD.org>2008-05-19 01:39:59 +0000
committeryongari <yongari@FreeBSD.org>2008-05-19 01:39:59 +0000
commitaf582dfcb76c08f67172823da6f436419c5b39f8 (patch)
treeef7065d7462d8c119b9890c31825c45404343b0e /sys/dev/age
parentcdaaf3f428ed7bc910ea474db5412daec1415fbc (diff)
downloadFreeBSD-src-af582dfcb76c08f67172823da6f436419c5b39f8.zip
FreeBSD-src-af582dfcb76c08f67172823da6f436419c5b39f8.tar.gz
Add age(4), a driver for Attansic/Atheros L1 gigabit ethernet
controller. L1 has several threshold/timer registers and they seem to require careful tuned parameters to get best performance. Datasheet for L1 is not available to open source driver writers so age(4) focus on stability and correctness of basic Tx/Rx operation. ATM the performance of age(4) is far from optimal which in turn means there are mis-programmed registers or incorrectly configured registers. Currently age(4) supports all known hardware assistance including - MSI support. - TCP Segmentation Offload. - Hardware VLAN tag insertion/stripping. - TCP/UDP checksum offload. - Interrupt moderation. - Hardware statistics counter support. - Jumbo frame support. - WOL support. L1 gigabit ethernet controller is mainly found on ASUS motherboards. Note, it seems that there are other variants of hardware as known as L2(Fast ethernet) and newer gigabit ethernet (AR81xx) from Atheros. These are not supported by age(4) and requires a seperate driver. Big thanks to all people who reported feedback or tested patches. Tested by: kevlo, bsam, Francois Ranchin < fyr AT fyrou DOT net > Thomas Nystroem < thn AT saeab DOT se > Roman Pogosyan < asternetadmin AT gmail DOT com > Derek Tattersal < dlt AT mebtel DOT net > Oliver Seitz < karlkiste AT yahoo DOT com >
Diffstat (limited to 'sys/dev/age')
-rw-r--r--sys/dev/age/if_age.c3356
-rw-r--r--sys/dev/age/if_agereg.h656
-rw-r--r--sys/dev/age/if_agevar.h272
3 files changed, 4284 insertions, 0 deletions
diff --git a/sys/dev/age/if_age.c b/sys/dev/age/if_age.c
new file mode 100644
index 0000000..96ca3a8
--- /dev/null
+++ b/sys/dev/age/if_age.c
@@ -0,0 +1,3356 @@
+/*-
+ * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/* Driver for Attansic Technology Corp. L1 Gigabit Ethernet. */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/endian.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/rman.h>
+#include <sys/module.h>
+#include <sys/queue.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <sys/sysctl.h>
+#include <sys/taskqueue.h>
+
+#include <net/bpf.h>
+#include <net/if.h>
+#include <net/if_arp.h>
+#include <net/ethernet.h>
+#include <net/if_dl.h>
+#include <net/if_media.h>
+#include <net/if_types.h>
+#include <net/if_vlan_var.h>
+
+#include <netinet/in.h>
+#include <netinet/in_systm.h>
+#include <netinet/ip.h>
+#include <netinet/tcp.h>
+
+#include <dev/mii/mii.h>
+#include <dev/mii/miivar.h>
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+
+#include <machine/bus.h>
+#include <machine/in_cksum.h>
+
+#include <dev/age/if_agereg.h>
+#include <dev/age/if_agevar.h>
+
+/* "device miibus" required. See GENERIC if you get errors here. */
+#include "miibus_if.h"
+
+#ifndef IFCAP_VLAN_HWTSO
+#define IFCAP_VLAN_HWTSO 0
+#endif
+#define AGE_CSUM_FEATURES (CSUM_TCP | CSUM_UDP)
+
+MODULE_DEPEND(age, pci, 1, 1, 1);
+MODULE_DEPEND(age, ether, 1, 1, 1);
+MODULE_DEPEND(age, miibus, 1, 1, 1);
+
+/* Tunables. */
+static int msi_disable = 0;
+static int msix_disable = 0;
+TUNABLE_INT("hw.age.msi_disable", &msi_disable);
+TUNABLE_INT("hw.age.msix_disable", &msix_disable);
+
+/*
+ * Devices supported by this driver.
+ */
+static struct age_dev {
+ uint16_t age_vendorid;
+ uint16_t age_deviceid;
+ const char *age_name;
+} age_devs[] = {
+ { VENDORID_ATTANSIC, DEVICEID_ATTANSIC_L1,
+ "Attansic Technology Corp, L1 Gigabit Ethernet" },
+};
+
+static int age_miibus_readreg(device_t, int, int);
+static int age_miibus_writereg(device_t, int, int, int);
+static void age_miibus_statchg(device_t);
+static void age_mediastatus(struct ifnet *, struct ifmediareq *);
+static int age_mediachange(struct ifnet *);
+static int age_read_vpd_word(struct age_softc *, uint32_t, uint32_t,
+ uint32_t *);
+static int age_probe(device_t);
+static void age_get_macaddr(struct age_softc *);
+static void age_phy_reset(struct age_softc *);
+static int age_attach(device_t);
+static int age_detach(device_t);
+static void age_sysctl_node(struct age_softc *);
+static void age_dmamap_cb(void *, bus_dma_segment_t *, int, int);
+static int age_check_boundary(struct age_softc *);
+static int age_dma_alloc(struct age_softc *);
+static void age_dma_free(struct age_softc *);
+static int age_shutdown(device_t);
+static void age_setwol(struct age_softc *);
+static int age_suspend(device_t);
+static int age_resume(device_t);
+static int age_encap(struct age_softc *, struct mbuf **);
+static void age_tx_task(void *, int);
+static void age_start(struct ifnet *);
+static void age_watchdog(struct age_softc *);
+static int age_ioctl(struct ifnet *, u_long, caddr_t);
+static void age_mac_config(struct age_softc *);
+static void age_link_task(void *, int);
+static void age_stats_update(struct age_softc *);
+static int age_intr(void *);
+static void age_int_task(void *, int);
+static void age_txintr(struct age_softc *, int);
+static void age_rxeof(struct age_softc *sc, struct rx_rdesc *);
+static int age_rxintr(struct age_softc *, int, int);
+static void age_tick(void *);
+static void age_reset(struct age_softc *);
+static void age_init(void *);
+static void age_init_locked(struct age_softc *);
+static void age_stop(struct age_softc *);
+static void age_stop_txmac(struct age_softc *);
+static void age_stop_rxmac(struct age_softc *);
+static void age_init_tx_ring(struct age_softc *);
+static int age_init_rx_ring(struct age_softc *);
+static void age_init_rr_ring(struct age_softc *);
+static void age_init_cmb_block(struct age_softc *);
+static void age_init_smb_block(struct age_softc *);
+static int age_newbuf(struct age_softc *, struct age_rxdesc *);
+static void age_rxvlan(struct age_softc *);
+static void age_rxfilter(struct age_softc *);
+static int sysctl_age_stats(SYSCTL_HANDLER_ARGS);
+static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
+static int sysctl_hw_age_proc_limit(SYSCTL_HANDLER_ARGS);
+static int sysctl_hw_age_int_mod(SYSCTL_HANDLER_ARGS);
+
+
+static device_method_t age_methods[] = {
+ /* Device interface. */
+ DEVMETHOD(device_probe, age_probe),
+ DEVMETHOD(device_attach, age_attach),
+ DEVMETHOD(device_detach, age_detach),
+ DEVMETHOD(device_shutdown, age_shutdown),
+ DEVMETHOD(device_suspend, age_suspend),
+ DEVMETHOD(device_resume, age_resume),
+
+ /* MII interface. */
+ DEVMETHOD(miibus_readreg, age_miibus_readreg),
+ DEVMETHOD(miibus_writereg, age_miibus_writereg),
+ DEVMETHOD(miibus_statchg, age_miibus_statchg),
+
+ { NULL, NULL }
+};
+
+static driver_t age_driver = {
+ "age",
+ age_methods,
+ sizeof(struct age_softc)
+};
+
+static devclass_t age_devclass;
+
+DRIVER_MODULE(age, pci, age_driver, age_devclass, 0, 0);
+DRIVER_MODULE(miibus, age, miibus_driver, miibus_devclass, 0, 0);
+
+static struct resource_spec age_res_spec_mem[] = {
+ { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE },
+ { -1, 0, 0 }
+};
+
+static struct resource_spec age_irq_spec_legacy[] = {
+ { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
+ { -1, 0, 0 }
+};
+
+static struct resource_spec age_irq_spec_msi[] = {
+ { SYS_RES_IRQ, 1, RF_ACTIVE },
+ { -1, 0, 0 }
+};
+
+static struct resource_spec age_irq_spec_msix[] = {
+ { SYS_RES_IRQ, 1, RF_ACTIVE },
+ { -1, 0, 0 }
+};
+
+/*
+ * Read a PHY register on the MII of the L1.
+ */
+static int
+age_miibus_readreg(device_t dev, int phy, int reg)
+{
+ struct age_softc *sc;
+ uint32_t v;
+ int i;
+
+ sc = device_get_softc(dev);
+ if (phy != sc->age_phyaddr)
+ return (0);
+
+ CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ |
+ MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
+ for (i = AGE_PHY_TIMEOUT; i > 0; i--) {
+ DELAY(1);
+ v = CSR_READ_4(sc, AGE_MDIO);
+ if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
+ break;
+ }
+
+ if (i == 0) {
+ device_printf(sc->age_dev, "phy read timeout : %d\n", reg);
+ return (0);
+ }
+
+ return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT);
+}
+
+/*
+ * Write a PHY register on the MII of the L1.
+ */
+static int
+age_miibus_writereg(device_t dev, int phy, int reg, int val)
+{
+ struct age_softc *sc;
+ uint32_t v;
+ int i;
+
+ sc = device_get_softc(dev);
+ if (phy != sc->age_phyaddr)
+ return (0);
+
+ CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE |
+ (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT |
+ MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
+ for (i = AGE_PHY_TIMEOUT; i > 0; i--) {
+ DELAY(1);
+ v = CSR_READ_4(sc, AGE_MDIO);
+ if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
+ break;
+ }
+
+ if (i == 0)
+ device_printf(sc->age_dev, "phy write timeout : %d\n", reg);
+
+ return (0);
+}
+
+/*
+ * Callback from MII layer when media changes.
+ */
+static void
+age_miibus_statchg(device_t dev)
+{
+ struct age_softc *sc;
+
+ sc = device_get_softc(dev);
+ taskqueue_enqueue(taskqueue_swi, &sc->age_link_task);
+}
+
+/*
+ * Get the current interface media status.
+ */
+static void
+age_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
+{
+ struct age_softc *sc;
+ struct mii_data *mii;
+
+ sc = ifp->if_softc;
+ AGE_LOCK(sc);
+ mii = device_get_softc(sc->age_miibus);
+
+ mii_pollstat(mii);
+ AGE_UNLOCK(sc);
+ ifmr->ifm_status = mii->mii_media_status;
+ ifmr->ifm_active = mii->mii_media_active;
+}
+
+/*
+ * Set hardware to newly-selected media.
+ */
+static int
+age_mediachange(struct ifnet *ifp)
+{
+ struct age_softc *sc;
+ struct mii_data *mii;
+ struct mii_softc *miisc;
+ int error;
+
+ sc = ifp->if_softc;
+ AGE_LOCK(sc);
+ mii = device_get_softc(sc->age_miibus);
+ if (mii->mii_instance != 0) {
+ LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
+ mii_phy_reset(miisc);
+ }
+ error = mii_mediachg(mii);
+ AGE_UNLOCK(sc);
+
+ return (error);
+}
+
+static int
+age_read_vpd_word(struct age_softc *sc, uint32_t vpdc, uint32_t offset,
+ uint32_t *word)
+{
+ int i;
+
+ pci_write_config(sc->age_dev, vpdc + PCIR_VPD_ADDR, offset, 2);
+ for (i = AGE_TIMEOUT; i > 0; i--) {
+ DELAY(10);
+ if ((pci_read_config(sc->age_dev, vpdc + PCIR_VPD_ADDR, 2) &
+ 0x8000) == 0x8000)
+ break;
+ }
+ if (i == 0) {
+ device_printf(sc->age_dev, "VPD read timeout!\n");
+ *word = 0;
+ return (ETIMEDOUT);
+ }
+
+ *word = pci_read_config(sc->age_dev, vpdc + PCIR_VPD_DATA, 4);
+ return (0);
+}
+
+static int
+age_probe(device_t dev)
+{
+ struct age_dev *sp;
+ int i;
+ uint16_t vendor, devid;
+
+ vendor = pci_get_vendor(dev);
+ devid = pci_get_device(dev);
+ sp = age_devs;
+ for (i = 0; i < sizeof(age_devs) / sizeof(age_devs[0]);
+ i++, sp++) {
+ if (vendor == sp->age_vendorid &&
+ devid == sp->age_deviceid) {
+ device_set_desc(dev, sp->age_name);
+ return (BUS_PROBE_DEFAULT);
+ }
+ }
+
+ return (ENXIO);
+}
+
+static void
+age_get_macaddr(struct age_softc *sc)
+{
+ uint32_t ea[2], off, reg, word;
+ int vpd_error, match, vpdc;
+
+ reg = CSR_READ_4(sc, AGE_SPI_CTRL);
+ if ((reg & SPI_VPD_ENB) != 0) {
+ /* Get VPD stored in TWSI EEPROM. */
+ reg &= ~SPI_VPD_ENB;
+ CSR_WRITE_4(sc, AGE_SPI_CTRL, reg);
+ }
+
+ vpd_error = 0;
+ ea[0] = ea[1] = 0;
+ if ((vpd_error = pci_find_extcap(sc->age_dev, PCIY_VPD, &vpdc)) == 0) {
+ /*
+ * PCI VPD capability exists, but it seems that it's
+ * not in the standard form as stated in PCI VPD
+ * specification such that driver could not use
+ * pci_get_vpd_readonly(9) with keyword 'NA'.
+ * Search VPD data starting at address 0x0100. The data
+ * chwould be used as initializers to set AGE_PAR0,
+ * AGE_PAR1 register including other PCI configuration
+ * registers.
+ */
+ word = 0;
+ match = 0;
+ reg = 0;
+ for (off = AGE_VPD_REG_CONF_START; off < AGE_VPD_REG_CONF_END;
+ off += sizeof(uint32_t)) {
+ vpd_error = age_read_vpd_word(sc, vpdc, off, &word);
+ if (vpd_error != 0)
+ break;
+ if (match != 0) {
+ switch (reg) {
+ case AGE_PAR0:
+ ea[0] = word;
+ break;
+ case AGE_PAR1:
+ ea[1] = word;
+ break;
+ default:
+ break;
+ }
+ match = 0;
+ } else if ((word & 0xFF) == AGE_VPD_REG_CONF_SIG) {
+ match = 1;
+ reg = word >> 16;
+ } else
+ break;
+ }
+ if (off >= AGE_VPD_REG_CONF_END)
+ vpd_error = ENOENT;
+ if (vpd_error == 0) {
+ /*
+ * Don't blindly trust ethernet address obtained
+ * from VPD. Check whether ethernet address is
+ * valid one. Otherwise fall-back to reading
+ * PAR register.
+ */
+ ea[1] &= 0xFFFF;
+ if ((ea[0] == 0 && ea[1] == 0) ||
+ (ea[0] == 0xFFFFFFFF && ea[1] == 0xFFFF)) {
+ if (1 || bootverbose)
+ device_printf(sc->age_dev,
+ "invalid ethernet address "
+ "returned from VPD.\n");
+ vpd_error = EINVAL;
+ }
+ }
+ if (vpd_error != 0 && (1 || bootverbose))
+ device_printf(sc->age_dev, "VPD access failure!\n");
+ } else {
+ if (1 || bootverbose)
+ device_printf(sc->age_dev,
+ "PCI VPD capability not found!\n");
+ }
+
+ /*
+ * It seems that L1 also provides a way to extract ethernet
+ * address via SPI flash interface. Because SPI flash memory
+ * device of different vendors vary in their instruction
+ * codes for read ID instruction, it's very hard to get
+ * instructions codes without detailed information for the
+ * flash memory device used on ethernet controller. To simplify
+ * code, just read AGE_PAR0/AGE_PAR1 register to get ethernet
+ * address which is supposed to be set by hardware during
+ * power on reset.
+ */
+ if (vpd_error != 0) {
+ /*
+ * VPD is mapped to SPI flash memory or BIOS set it.
+ */
+ ea[0] = CSR_READ_4(sc, AGE_PAR0);
+ ea[1] = CSR_READ_4(sc, AGE_PAR1);
+ }
+
+ ea[1] &= 0xFFFF;
+ if ((ea[0] == 0 && ea[1] == 0) ||
+ (ea[0] == 0xFFFFFFFF && ea[1] == 0xFFFF)) {
+ device_printf(sc->age_dev,
+ "generating fake ethernet address.\n");
+ ea[0] = arc4random();
+ /* Set OUI to ASUSTek COMPUTER INC. */
+ sc->age_eaddr[0] = 0x00;
+ sc->age_eaddr[1] = 0x1B;
+ sc->age_eaddr[2] = 0xFC;
+ sc->age_eaddr[3] = (ea[0] >> 16) & 0xFF;
+ sc->age_eaddr[4] = (ea[0] >> 8) & 0xFF;
+ sc->age_eaddr[5] = (ea[0] >> 0) & 0xFF;
+ } else {
+ sc->age_eaddr[0] = (ea[1] >> 8) & 0xFF;
+ sc->age_eaddr[1] = (ea[1] >> 0) & 0xFF;
+ sc->age_eaddr[2] = (ea[0] >> 24) & 0xFF;
+ sc->age_eaddr[3] = (ea[0] >> 16) & 0xFF;
+ sc->age_eaddr[4] = (ea[0] >> 8) & 0xFF;
+ sc->age_eaddr[5] = (ea[0] >> 0) & 0xFF;
+ }
+}
+
+static void
+age_phy_reset(struct age_softc *sc)
+{
+
+ /* Reset PHY. */
+ CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_RST);
+ pause("agephy", hz / 1000);
+ CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_CLR);
+ pause("agephy", hz / 1000);
+}
+
+static int
+age_attach(device_t dev)
+{
+ struct age_softc *sc;
+ struct ifnet *ifp;
+ uint16_t burst;
+ int error, i, msic, msixc, pmc;
+
+ error = 0;
+ sc = device_get_softc(dev);
+ sc->age_dev = dev;
+
+ mtx_init(&sc->age_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
+ MTX_DEF);
+ callout_init_mtx(&sc->age_tick_ch, &sc->age_mtx, 0);
+ TASK_INIT(&sc->age_int_task, 0, age_int_task, sc);
+ TASK_INIT(&sc->age_link_task, 0, age_link_task, sc);
+
+ /* Map the device. */
+ pci_enable_busmaster(dev);
+ sc->age_res_spec = age_res_spec_mem;
+ sc->age_irq_spec = age_irq_spec_legacy;
+ error = bus_alloc_resources(dev, sc->age_res_spec, sc->age_res);
+ if (error != 0) {
+ device_printf(dev, "cannot allocate memory resources.\n");
+ goto fail;
+ }
+
+ /* Set PHY address. */
+ sc->age_phyaddr = AGE_PHY_ADDR;
+
+ /* Reset PHY. */
+ age_phy_reset(sc);
+
+ /* Reset the ethernet controller. */
+ age_reset(sc);
+
+ /* Get PCI and chip id/revision. */
+ sc->age_rev = pci_get_revid(dev);
+ sc->age_chip_rev = CSR_READ_4(sc, AGE_MASTER_CFG) >>
+ MASTER_CHIP_REV_SHIFT;
+ if (1 || bootverbose) {
+ device_printf(dev, "PCI device revision : 0x%04x\n", sc->age_rev);
+ device_printf(dev, "Chip id/revision : 0x%04x\n",
+ sc->age_chip_rev);
+ }
+
+ /*
+ * XXX
+ * Unintialized hardware returns an invalid chip id/revision
+ * as well as 0xFFFFFFFF for Tx/Rx fifo length. It seems that
+ * unplugged cable results in putting hardware into automatic
+ * power down mode which in turn returns invalld chip revision.
+ */
+ if (sc->age_chip_rev == 0xFFFF) {
+ device_printf(dev,"invalid chip revision : 0x%04x -- "
+ "not initialized?\n", sc->age_chip_rev);
+ error = ENXIO;
+ goto fail;
+ }
+
+ device_printf(dev, "%d Tx FIFO, %d Rx FIFO\n",
+ CSR_READ_4(sc, AGE_SRAM_TX_FIFO_LEN),
+ CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN));
+
+ /* Allocate IRQ resources. */
+ msixc = pci_msix_count(dev);
+ msic = pci_msi_count(dev);
+ if (1 || bootverbose) {
+ device_printf(dev, "MSIX count : %d\n", msixc);
+ device_printf(dev, "MSI count : %d\n", msic);
+ }
+
+ /* Prefer MSIX over MSI. */
+ if (msix_disable == 0 || msi_disable == 0) {
+ if (msix_disable == 0 && msixc == AGE_MSIX_MESSAGES &&
+ pci_alloc_msix(dev, &msixc) == 0) {
+ if (msic == AGE_MSIX_MESSAGES) {
+ device_printf(dev, "Using %d MSIX messages.\n",
+ msixc);
+ sc->age_flags |= AGE_FLAG_MSIX;
+ sc->age_irq_spec = age_irq_spec_msix;
+ } else
+ pci_release_msi(dev);
+ }
+ if (msi_disable == 0 && (sc->age_flags & AGE_FLAG_MSIX) == 0 &&
+ msic == AGE_MSI_MESSAGES &&
+ pci_alloc_msi(dev, &msic) == 0) {
+ if (msic == AGE_MSI_MESSAGES) {
+ device_printf(dev, "Using %d MSI messages.\n",
+ msic);
+ sc->age_flags |= AGE_FLAG_MSI;
+ sc->age_irq_spec = age_irq_spec_msi;
+ } else
+ pci_release_msi(dev);
+ }
+ }
+
+ error = bus_alloc_resources(dev, sc->age_irq_spec, sc->age_irq);
+ if (error != 0) {
+ device_printf(dev, "cannot allocate IRQ resources.\n");
+ goto fail;
+ }
+
+
+ /* Get DMA parameters from PCIe device control register. */
+ if (pci_find_extcap(dev, PCIY_EXPRESS, &i) == 0) {
+ sc->age_flags |= AGE_FLAG_PCIE;
+ burst = pci_read_config(dev, i + 0x08, 2);
+ /* Max read request size. */
+ sc->age_dma_rd_burst = ((burst >> 12) & 0x07) <<
+ DMA_CFG_RD_BURST_SHIFT;
+ /* Max payload size. */
+ sc->age_dma_wr_burst = ((burst >> 5) & 0x07) <<
+ DMA_CFG_WR_BURST_SHIFT;
+ if (1 || bootverbose) {
+ device_printf(dev, "Read request size : %d bytes.\n",
+ 128 << ((burst >> 12) & 0x07));
+ device_printf(dev, "TLP payload size : %d bytes.\n",
+ 128 << ((burst >> 5) & 0x07));
+ }
+ } else {
+ sc->age_dma_rd_burst = DMA_CFG_RD_BURST_128;
+ sc->age_dma_wr_burst = DMA_CFG_WR_BURST_128;
+ }
+
+ /* Create device sysctl node. */
+ age_sysctl_node(sc);
+
+ if ((error = age_dma_alloc(sc) != 0))
+ goto fail;
+
+ /* Load station address. */
+ age_get_macaddr(sc);
+
+ ifp = sc->age_ifp = if_alloc(IFT_ETHER);
+ if (ifp == NULL) {
+ device_printf(dev, "cannot allocate ifnet structure.\n");
+ error = ENXIO;
+ goto fail;
+ }
+
+ ifp->if_softc = sc;
+ if_initname(ifp, device_get_name(dev), device_get_unit(dev));
+ ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+ ifp->if_ioctl = age_ioctl;
+ ifp->if_start = age_start;
+ ifp->if_init = age_init;
+ ifp->if_snd.ifq_drv_maxlen = AGE_TX_RING_CNT - 1;
+ IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
+ IFQ_SET_READY(&ifp->if_snd);
+ ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_TSO4;
+ ifp->if_hwassist = AGE_CSUM_FEATURES | CSUM_TSO;
+ if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0) {
+ sc->age_flags |= AGE_FLAG_PMCAP;
+ ifp->if_capabilities |= IFCAP_WOL_MAGIC | IFCAP_WOL_MCAST;
+ }
+ ifp->if_capenable = ifp->if_capabilities;
+
+ /* Set up MII bus. */
+ if ((error = mii_phy_probe(dev, &sc->age_miibus, age_mediachange,
+ age_mediastatus)) != 0) {
+ device_printf(dev, "no PHY found!\n");
+ goto fail;
+ }
+
+ ether_ifattach(ifp, sc->age_eaddr);
+
+ /* VLAN capability setup. */
+ ifp->if_capabilities |= IFCAP_VLAN_MTU;
+ ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
+ ifp->if_capenable = ifp->if_capabilities;
+
+ /* Tell the upper layer(s) we support long frames. */
+ ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
+
+ /* Create local taskq. */
+ TASK_INIT(&sc->age_tx_task, 1, age_tx_task, ifp);
+ sc->age_tq = taskqueue_create_fast("age_taskq", M_WAITOK,
+ taskqueue_thread_enqueue, &sc->age_tq);
+ if (sc->age_tq == NULL) {
+ device_printf(dev, "could not create taskqueue.\n");
+ ether_ifdetach(ifp);
+ error = ENXIO;
+ goto fail;
+ }
+ taskqueue_start_threads(&sc->age_tq, 1, PI_NET, "%s taskq",
+ device_get_nameunit(sc->age_dev));
+
+ if ((sc->age_flags & AGE_FLAG_MSIX) != 0)
+ msic = AGE_MSIX_MESSAGES;
+ else if ((sc->age_flags & AGE_FLAG_MSI) != 0)
+ msic = AGE_MSI_MESSAGES;
+ else
+ msic = 1;
+ for (i = 0; i < msic; i++) {
+ error = bus_setup_intr(dev, sc->age_irq[i],
+ INTR_TYPE_NET | INTR_MPSAFE, age_intr, NULL, sc,
+ &sc->age_intrhand[i]);
+ if (error != 0)
+ break;
+ }
+ if (error != 0) {
+ device_printf(dev, "could not set up interrupt handler.\n");
+ taskqueue_free(sc->age_tq);
+ sc->age_tq = NULL;
+ ether_ifdetach(ifp);
+ goto fail;
+ }
+
+fail:
+ if (error != 0)
+ age_detach(dev);
+
+ return (error);
+}
+
+static int
+age_detach(device_t dev)
+{
+ struct age_softc *sc;
+ struct ifnet *ifp;
+ int i, msic;
+
+ sc = device_get_softc(dev);
+
+ ifp = sc->age_ifp;
+ if (device_is_attached(dev)) {
+ AGE_LOCK(sc);
+ sc->age_flags |= AGE_FLAG_DETACH;
+ age_stop(sc);
+ AGE_UNLOCK(sc);
+ callout_drain(&sc->age_tick_ch);
+ taskqueue_drain(sc->age_tq, &sc->age_int_task);
+ taskqueue_drain(sc->age_tq, &sc->age_tx_task);
+ taskqueue_drain(taskqueue_swi, &sc->age_link_task);
+ ether_ifdetach(ifp);
+ }
+
+ if (sc->age_tq != NULL) {
+ taskqueue_drain(sc->age_tq, &sc->age_int_task);
+ taskqueue_free(sc->age_tq);
+ sc->age_tq = NULL;
+ }
+
+ if (sc->age_miibus != NULL) {
+ device_delete_child(dev, sc->age_miibus);
+ sc->age_miibus = NULL;
+ }
+ bus_generic_detach(dev);
+ age_dma_free(sc);
+
+ if (ifp != NULL) {
+ if_free(ifp);
+ sc->age_ifp = NULL;
+ }
+
+ if ((sc->age_flags & AGE_FLAG_MSIX) != 0)
+ msic = AGE_MSIX_MESSAGES;
+ else if ((sc->age_flags & AGE_FLAG_MSI) != 0)
+ msic = AGE_MSI_MESSAGES;
+ else
+ msic = 1;
+ for (i = 0; i < msic; i++) {
+ if (sc->age_intrhand[i] != NULL) {
+ bus_teardown_intr(dev, sc->age_irq[i],
+ sc->age_intrhand[i]);
+ sc->age_intrhand[i] = NULL;
+ }
+ }
+
+ bus_release_resources(dev, sc->age_irq_spec, sc->age_irq);
+ if ((sc->age_flags & (AGE_FLAG_MSI | AGE_FLAG_MSIX)) != 0)
+ pci_release_msi(dev);
+ bus_release_resources(dev, sc->age_res_spec, sc->age_res);
+ mtx_destroy(&sc->age_mtx);
+
+ return (0);
+}
+
+static void
+age_sysctl_node(struct age_softc *sc)
+{
+ int error;
+
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->age_dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(sc->age_dev)), OID_AUTO,
+ "stats", CTLTYPE_INT | CTLFLAG_RW, sc, 0, sysctl_age_stats,
+ "I", "Statistics");
+
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->age_dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(sc->age_dev)), OID_AUTO,
+ "int_mod", CTLTYPE_INT | CTLFLAG_RW, &sc->age_int_mod, 0,
+ sysctl_hw_age_int_mod, "I", "age interrupt moderation");
+
+ /* Pull in device tunables. */
+ sc->age_int_mod = AGE_IM_TIMER_DEFAULT;
+ error = resource_int_value(device_get_name(sc->age_dev),
+ device_get_unit(sc->age_dev), "int_mod", &sc->age_int_mod);
+ if (error == 0) {
+ if (sc->age_int_mod < AGE_IM_TIMER_MIN ||
+ sc->age_int_mod > AGE_IM_TIMER_MAX) {
+ device_printf(sc->age_dev,
+ "int_mod value out of range; using default: %d\n",
+ AGE_IM_TIMER_DEFAULT);
+ sc->age_int_mod = AGE_IM_TIMER_DEFAULT;
+ }
+ }
+
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->age_dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(sc->age_dev)), OID_AUTO,
+ "process_limit", CTLTYPE_INT | CTLFLAG_RW, &sc->age_process_limit,
+ 0, sysctl_hw_age_proc_limit, "I",
+ "max number of Rx events to process");
+
+ /* Pull in device tunables. */
+ sc->age_process_limit = AGE_PROC_DEFAULT;
+ error = resource_int_value(device_get_name(sc->age_dev),
+ device_get_unit(sc->age_dev), "process_limit",
+ &sc->age_process_limit);
+ if (error == 0) {
+ if (sc->age_process_limit < AGE_PROC_MIN ||
+ sc->age_process_limit > AGE_PROC_MAX) {
+ device_printf(sc->age_dev,
+ "process_limit value out of range; "
+ "using default: %d\n", AGE_PROC_DEFAULT);
+ sc->age_process_limit = AGE_PROC_DEFAULT;
+ }
+ }
+}
+
+struct age_dmamap_arg {
+ bus_addr_t age_busaddr;
+};
+
+static void
+age_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
+{
+ struct age_dmamap_arg *ctx;
+
+ if (error != 0)
+ return;
+
+ KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
+
+ ctx = (struct age_dmamap_arg *)arg;
+ ctx->age_busaddr = segs[0].ds_addr;
+}
+
+/*
+ * Attansic L1 controller have single register to specify high
+ * address part of DMA blocks. So all descriptor structures and
+ * DMA memory blocks should have the same high address of given
+ * 4GB address space(i.e. crossing 4GB boundary is not allowed).
+ */
+static int
+age_check_boundary(struct age_softc *sc)
+{
+ bus_addr_t rx_ring_end, rr_ring_end, tx_ring_end;
+ bus_addr_t cmb_block_end, smb_block_end;
+
+ /* Tx/Rx descriptor queue should reside within 4GB boundary. */
+ tx_ring_end = sc->age_rdata.age_tx_ring_paddr + AGE_TX_RING_SZ;
+ rx_ring_end = sc->age_rdata.age_rx_ring_paddr + AGE_RX_RING_SZ;
+ rr_ring_end = sc->age_rdata.age_rr_ring_paddr + AGE_RR_RING_SZ;
+ cmb_block_end = sc->age_rdata.age_cmb_block_paddr + AGE_CMB_BLOCK_SZ;
+ smb_block_end = sc->age_rdata.age_smb_block_paddr + AGE_SMB_BLOCK_SZ;
+
+ if ((AGE_ADDR_HI(tx_ring_end) !=
+ AGE_ADDR_HI(sc->age_rdata.age_tx_ring_paddr)) ||
+ (AGE_ADDR_HI(rx_ring_end) !=
+ AGE_ADDR_HI(sc->age_rdata.age_rx_ring_paddr)) ||
+ (AGE_ADDR_HI(rr_ring_end) !=
+ AGE_ADDR_HI(sc->age_rdata.age_rr_ring_paddr)) ||
+ (AGE_ADDR_HI(cmb_block_end) !=
+ AGE_ADDR_HI(sc->age_rdata.age_cmb_block_paddr)) ||
+ (AGE_ADDR_HI(smb_block_end) !=
+ AGE_ADDR_HI(sc->age_rdata.age_smb_block_paddr)))
+ return (EFBIG);
+
+ if ((AGE_ADDR_HI(tx_ring_end) != AGE_ADDR_HI(rx_ring_end)) ||
+ (AGE_ADDR_HI(tx_ring_end) != AGE_ADDR_HI(rr_ring_end)) ||
+ (AGE_ADDR_HI(tx_ring_end) != AGE_ADDR_HI(cmb_block_end)) ||
+ (AGE_ADDR_HI(tx_ring_end) != AGE_ADDR_HI(smb_block_end)))
+ return (EFBIG);
+
+ return (0);
+}
+
+static int
+age_dma_alloc(struct age_softc *sc)
+{
+ struct age_txdesc *txd;
+ struct age_rxdesc *rxd;
+ bus_addr_t lowaddr;
+ struct age_dmamap_arg ctx;
+ int error, i;
+
+ lowaddr = BUS_SPACE_MAXADDR;
+
+again:
+ /* Create parent ring/DMA block tag. */
+ error = bus_dma_tag_create(
+ bus_get_dma_tag(sc->age_dev), /* parent */
+ 1, 0, /* alignment, boundary */
+ lowaddr, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
+ 0, /* nsegments */
+ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc->age_cdata.age_parent_tag);
+ if (error != 0) {
+ device_printf(sc->age_dev,
+ "could not create parent DMA tag.\n");
+ goto fail;
+ }
+
+ /* Create tag for Tx ring. */
+ error = bus_dma_tag_create(
+ sc->age_cdata.age_parent_tag, /* parent */
+ AGE_TX_RING_ALIGN, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ AGE_TX_RING_SZ, /* maxsize */
+ 1, /* nsegments */
+ AGE_TX_RING_SZ, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc->age_cdata.age_tx_ring_tag);
+ if (error != 0) {
+ device_printf(sc->age_dev,
+ "could not create Tx ring DMA tag.\n");
+ goto fail;
+ }
+
+ /* Create tag for Rx ring. */
+ error = bus_dma_tag_create(
+ sc->age_cdata.age_parent_tag, /* parent */
+ AGE_RX_RING_ALIGN, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ AGE_RX_RING_SZ, /* maxsize */
+ 1, /* nsegments */
+ AGE_RX_RING_SZ, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc->age_cdata.age_rx_ring_tag);
+ if (error != 0) {
+ device_printf(sc->age_dev,
+ "could not create Rx ring DMA tag.\n");
+ goto fail;
+ }
+
+ /* Create tag for Rx return ring. */
+ error = bus_dma_tag_create(
+ sc->age_cdata.age_parent_tag, /* parent */
+ AGE_RR_RING_ALIGN, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ AGE_RR_RING_SZ, /* maxsize */
+ 1, /* nsegments */
+ AGE_RR_RING_SZ, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc->age_cdata.age_rr_ring_tag);
+ if (error != 0) {
+ device_printf(sc->age_dev,
+ "could not create Rx return ring DMA tag.\n");
+ goto fail;
+ }
+
+ /* Create tag for coalesing message block. */
+ error = bus_dma_tag_create(
+ sc->age_cdata.age_parent_tag, /* parent */
+ AGE_CMB_ALIGN, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ AGE_CMB_BLOCK_SZ, /* maxsize */
+ 1, /* nsegments */
+ AGE_CMB_BLOCK_SZ, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc->age_cdata.age_cmb_block_tag);
+ if (error != 0) {
+ device_printf(sc->age_dev,
+ "could not create CMB DMA tag.\n");
+ goto fail;
+ }
+
+ /* Create tag for statistics message block. */
+ error = bus_dma_tag_create(
+ sc->age_cdata.age_parent_tag, /* parent */
+ AGE_SMB_ALIGN, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ AGE_SMB_BLOCK_SZ, /* maxsize */
+ 1, /* nsegments */
+ AGE_SMB_BLOCK_SZ, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc->age_cdata.age_smb_block_tag);
+ if (error != 0) {
+ device_printf(sc->age_dev,
+ "could not create SMB DMA tag.\n");
+ goto fail;
+ }
+
+ /* Allocate DMA'able memory and load the DMA map. */
+ error = bus_dmamem_alloc(sc->age_cdata.age_tx_ring_tag,
+ (void **)&sc->age_rdata.age_tx_ring,
+ BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
+ &sc->age_cdata.age_tx_ring_map);
+ if (error != 0) {
+ device_printf(sc->age_dev,
+ "could not allocate DMA'able memory for Tx ring.\n");
+ goto fail;
+ }
+ ctx.age_busaddr = 0;
+ error = bus_dmamap_load(sc->age_cdata.age_tx_ring_tag,
+ sc->age_cdata.age_tx_ring_map, sc->age_rdata.age_tx_ring,
+ AGE_TX_RING_SZ, age_dmamap_cb, &ctx, 0);
+ if (error != 0 || ctx.age_busaddr == 0) {
+ device_printf(sc->age_dev,
+ "could not load DMA'able memory for Tx ring.\n");
+ goto fail;
+ }
+ sc->age_rdata.age_tx_ring_paddr = ctx.age_busaddr;
+ /* Rx ring */
+ error = bus_dmamem_alloc(sc->age_cdata.age_rx_ring_tag,
+ (void **)&sc->age_rdata.age_rx_ring,
+ BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
+ &sc->age_cdata.age_rx_ring_map);
+ if (error != 0) {
+ device_printf(sc->age_dev,
+ "could not allocate DMA'able memory for Rx ring.\n");
+ goto fail;
+ }
+ ctx.age_busaddr = 0;
+ error = bus_dmamap_load(sc->age_cdata.age_rx_ring_tag,
+ sc->age_cdata.age_rx_ring_map, sc->age_rdata.age_rx_ring,
+ AGE_RX_RING_SZ, age_dmamap_cb, &ctx, 0);
+ if (error != 0 || ctx.age_busaddr == 0) {
+ device_printf(sc->age_dev,
+ "could not load DMA'able memory for Rx ring.\n");
+ goto fail;
+ }
+ sc->age_rdata.age_rx_ring_paddr = ctx.age_busaddr;
+ /* Rx return ring */
+ error = bus_dmamem_alloc(sc->age_cdata.age_rr_ring_tag,
+ (void **)&sc->age_rdata.age_rr_ring,
+ BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
+ &sc->age_cdata.age_rr_ring_map);
+ if (error != 0) {
+ device_printf(sc->age_dev,
+ "could not allocate DMA'able memory for Rx return ring.\n");
+ goto fail;
+ }
+ ctx.age_busaddr = 0;
+ error = bus_dmamap_load(sc->age_cdata.age_rr_ring_tag,
+ sc->age_cdata.age_rr_ring_map, sc->age_rdata.age_rr_ring,
+ AGE_RR_RING_SZ, age_dmamap_cb,
+ &ctx, 0);
+ if (error != 0 || ctx.age_busaddr == 0) {
+ device_printf(sc->age_dev,
+ "could not load DMA'able memory for Rx return ring.\n");
+ goto fail;
+ }
+ sc->age_rdata.age_rr_ring_paddr = ctx.age_busaddr;
+ /* CMB block */
+ error = bus_dmamem_alloc(sc->age_cdata.age_cmb_block_tag,
+ (void **)&sc->age_rdata.age_cmb_block,
+ BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
+ &sc->age_cdata.age_cmb_block_map);
+ if (error != 0) {
+ device_printf(sc->age_dev,
+ "could not allocate DMA'able memory for CMB block.\n");
+ goto fail;
+ }
+ ctx.age_busaddr = 0;
+ error = bus_dmamap_load(sc->age_cdata.age_cmb_block_tag,
+ sc->age_cdata.age_cmb_block_map, sc->age_rdata.age_cmb_block,
+ AGE_CMB_BLOCK_SZ, age_dmamap_cb, &ctx, 0);
+ if (error != 0 || ctx.age_busaddr == 0) {
+ device_printf(sc->age_dev,
+ "could not load DMA'able memory for CMB block.\n");
+ goto fail;
+ }
+ sc->age_rdata.age_cmb_block_paddr = ctx.age_busaddr;
+ /* SMB block */
+ error = bus_dmamem_alloc(sc->age_cdata.age_smb_block_tag,
+ (void **)&sc->age_rdata.age_smb_block,
+ BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
+ &sc->age_cdata.age_smb_block_map);
+ if (error != 0) {
+ device_printf(sc->age_dev,
+ "could not allocate DMA'able memory for SMB block.\n");
+ goto fail;
+ }
+ ctx.age_busaddr = 0;
+ error = bus_dmamap_load(sc->age_cdata.age_smb_block_tag,
+ sc->age_cdata.age_smb_block_map, sc->age_rdata.age_smb_block,
+ AGE_SMB_BLOCK_SZ, age_dmamap_cb, &ctx, 0);
+ if (error != 0 || ctx.age_busaddr == 0) {
+ device_printf(sc->age_dev,
+ "could not load DMA'able memory for SMB block.\n");
+ goto fail;
+ }
+ sc->age_rdata.age_smb_block_paddr = ctx.age_busaddr;
+
+ /*
+ * All ring buffer and DMA blocks should have the same
+ * high address part of 64bit DMA address space.
+ */
+ if (lowaddr != BUS_SPACE_MAXADDR_32BIT &&
+ (error = age_check_boundary(sc)) != 0) {
+ device_printf(sc->age_dev, "4GB boundary crossed, "
+ "switching to 32bit DMA addressing mode.\n");
+ age_dma_free(sc);
+ /* Limit DMA address space to 32bit and try again. */
+ lowaddr = BUS_SPACE_MAXADDR_32BIT;
+ goto again;
+ }
+
+ /*
+ * Create Tx/Rx buffer parent tag.
+ * L1 supports full 64bit DMA addressing in Tx/Rx buffers
+ * so it needs separate parent DMA tag.
+ */
+ error = bus_dma_tag_create(
+ bus_get_dma_tag(sc->age_dev), /* parent */
+ 1, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
+ 0, /* nsegments */
+ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc->age_cdata.age_buffer_tag);
+ if (error != 0) {
+ device_printf(sc->age_dev,
+ "could not create parent buffer DMA tag.\n");
+ goto fail;
+ }
+
+ /* Create tag for Tx buffers. */
+ error = bus_dma_tag_create(
+ sc->age_cdata.age_buffer_tag, /* parent */
+ 1, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ AGE_TSO_MAXSIZE, /* maxsize */
+ AGE_MAXTXSEGS, /* nsegments */
+ AGE_TSO_MAXSEGSIZE, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc->age_cdata.age_tx_tag);
+ if (error != 0) {
+ device_printf(sc->age_dev, "could not create Tx DMA tag.\n");
+ goto fail;
+ }
+
+ /* Create tag for Rx buffers. */
+ error = bus_dma_tag_create(
+ sc->age_cdata.age_buffer_tag, /* parent */
+ 1, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ MCLBYTES, /* maxsize */
+ 1, /* nsegments */
+ MCLBYTES, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc->age_cdata.age_rx_tag);
+ if (error != 0) {
+ device_printf(sc->age_dev, "could not create Rx DMA tag.\n");
+ goto fail;
+ }
+
+ /* Create DMA maps for Tx buffers. */
+ for (i = 0; i < AGE_TX_RING_CNT; i++) {
+ txd = &sc->age_cdata.age_txdesc[i];
+ txd->tx_m = NULL;
+ txd->tx_dmamap = NULL;
+ error = bus_dmamap_create(sc->age_cdata.age_tx_tag, 0,
+ &txd->tx_dmamap);
+ if (error != 0) {
+ device_printf(sc->age_dev,
+ "could not create Tx dmamap.\n");
+ goto fail;
+ }
+ }
+ /* Create DMA maps for Rx buffers. */
+ if ((error = bus_dmamap_create(sc->age_cdata.age_rx_tag, 0,
+ &sc->age_cdata.age_rx_sparemap)) != 0) {
+ device_printf(sc->age_dev,
+ "could not create spare Rx dmamap.\n");
+ goto fail;
+ }
+ for (i = 0; i < AGE_RX_RING_CNT; i++) {
+ rxd = &sc->age_cdata.age_rxdesc[i];
+ rxd->rx_m = NULL;
+ rxd->rx_dmamap = NULL;
+ error = bus_dmamap_create(sc->age_cdata.age_rx_tag, 0,
+ &rxd->rx_dmamap);
+ if (error != 0) {
+ device_printf(sc->age_dev,
+ "could not create Rx dmamap.\n");
+ goto fail;
+ }
+ }
+
+fail:
+ return (error);
+}
+
+static void
+age_dma_free(struct age_softc *sc)
+{
+ struct age_txdesc *txd;
+ struct age_rxdesc *rxd;
+ int i;
+
+ /* Tx buffers */
+ if (sc->age_cdata.age_tx_tag != NULL) {
+ for (i = 0; i < AGE_TX_RING_CNT; i++) {
+ txd = &sc->age_cdata.age_txdesc[i];
+ if (txd->tx_dmamap != NULL) {
+ bus_dmamap_destroy(sc->age_cdata.age_tx_tag,
+ txd->tx_dmamap);
+ txd->tx_dmamap = NULL;
+ }
+ }
+ bus_dma_tag_destroy(sc->age_cdata.age_tx_tag);
+ sc->age_cdata.age_tx_tag = NULL;
+ }
+ /* Rx buffers */
+ if (sc->age_cdata.age_rx_tag != NULL) {
+ for (i = 0; i < AGE_RX_RING_CNT; i++) {
+ rxd = &sc->age_cdata.age_rxdesc[i];
+ if (rxd->rx_dmamap != NULL) {
+ bus_dmamap_destroy(sc->age_cdata.age_rx_tag,
+ rxd->rx_dmamap);
+ rxd->rx_dmamap = NULL;
+ }
+ }
+ if (sc->age_cdata.age_rx_sparemap != NULL) {
+ bus_dmamap_destroy(sc->age_cdata.age_rx_tag,
+ sc->age_cdata.age_rx_sparemap);
+ sc->age_cdata.age_rx_sparemap = NULL;
+ }
+ bus_dma_tag_destroy(sc->age_cdata.age_rx_tag);
+ sc->age_cdata.age_rx_tag = NULL;
+ }
+ /* Tx ring. */
+ if (sc->age_cdata.age_tx_ring_tag != NULL) {
+ if (sc->age_cdata.age_tx_ring_map != NULL)
+ bus_dmamap_unload(sc->age_cdata.age_tx_ring_tag,
+ sc->age_cdata.age_tx_ring_map);
+ if (sc->age_cdata.age_tx_ring_map != NULL &&
+ sc->age_rdata.age_tx_ring != NULL)
+ bus_dmamem_free(sc->age_cdata.age_tx_ring_tag,
+ sc->age_rdata.age_tx_ring,
+ sc->age_cdata.age_tx_ring_map);
+ sc->age_rdata.age_tx_ring = NULL;
+ sc->age_cdata.age_tx_ring_map = NULL;
+ bus_dma_tag_destroy(sc->age_cdata.age_tx_ring_tag);
+ sc->age_cdata.age_tx_ring_tag = NULL;
+ }
+ /* Rx ring. */
+ if (sc->age_cdata.age_rx_ring_tag != NULL) {
+ if (sc->age_cdata.age_rx_ring_map != NULL)
+ bus_dmamap_unload(sc->age_cdata.age_rx_ring_tag,
+ sc->age_cdata.age_rx_ring_map);
+ if (sc->age_cdata.age_rx_ring_map != NULL &&
+ sc->age_rdata.age_rx_ring != NULL)
+ bus_dmamem_free(sc->age_cdata.age_rx_ring_tag,
+ sc->age_rdata.age_rx_ring,
+ sc->age_cdata.age_rx_ring_map);
+ sc->age_rdata.age_rx_ring = NULL;
+ sc->age_cdata.age_rx_ring_map = NULL;
+ bus_dma_tag_destroy(sc->age_cdata.age_rx_ring_tag);
+ sc->age_cdata.age_rx_ring_tag = NULL;
+ }
+ /* Rx return ring. */
+ if (sc->age_cdata.age_rr_ring_tag != NULL) {
+ if (sc->age_cdata.age_rr_ring_map != NULL)
+ bus_dmamap_unload(sc->age_cdata.age_rr_ring_tag,
+ sc->age_cdata.age_rr_ring_map);
+ if (sc->age_cdata.age_rr_ring_map != NULL &&
+ sc->age_rdata.age_rr_ring != NULL)
+ bus_dmamem_free(sc->age_cdata.age_rr_ring_tag,
+ sc->age_rdata.age_rr_ring,
+ sc->age_cdata.age_rr_ring_map);
+ sc->age_rdata.age_rr_ring = NULL;
+ sc->age_cdata.age_rr_ring_map = NULL;
+ bus_dma_tag_destroy(sc->age_cdata.age_rr_ring_tag);
+ sc->age_cdata.age_rr_ring_tag = NULL;
+ }
+ /* CMB block */
+ if (sc->age_cdata.age_cmb_block_tag != NULL) {
+ if (sc->age_cdata.age_cmb_block_map != NULL)
+ bus_dmamap_unload(sc->age_cdata.age_cmb_block_tag,
+ sc->age_cdata.age_cmb_block_map);
+ if (sc->age_cdata.age_cmb_block_map != NULL &&
+ sc->age_rdata.age_cmb_block != NULL)
+ bus_dmamem_free(sc->age_cdata.age_cmb_block_tag,
+ sc->age_rdata.age_cmb_block,
+ sc->age_cdata.age_cmb_block_map);
+ sc->age_rdata.age_cmb_block = NULL;
+ sc->age_cdata.age_cmb_block_map = NULL;
+ bus_dma_tag_destroy(sc->age_cdata.age_cmb_block_tag);
+ sc->age_cdata.age_cmb_block_tag = NULL;
+ }
+ /* SMB block */
+ if (sc->age_cdata.age_smb_block_tag != NULL) {
+ if (sc->age_cdata.age_smb_block_map != NULL)
+ bus_dmamap_unload(sc->age_cdata.age_smb_block_tag,
+ sc->age_cdata.age_smb_block_map);
+ if (sc->age_cdata.age_smb_block_map != NULL &&
+ sc->age_rdata.age_smb_block != NULL)
+ bus_dmamem_free(sc->age_cdata.age_smb_block_tag,
+ sc->age_rdata.age_smb_block,
+ sc->age_cdata.age_smb_block_map);
+ sc->age_rdata.age_smb_block = NULL;
+ sc->age_cdata.age_smb_block_map = NULL;
+ bus_dma_tag_destroy(sc->age_cdata.age_smb_block_tag);
+ sc->age_cdata.age_smb_block_tag = NULL;
+ }
+
+ if (sc->age_cdata.age_buffer_tag != NULL) {
+ bus_dma_tag_destroy(sc->age_cdata.age_buffer_tag);
+ sc->age_cdata.age_buffer_tag = NULL;
+ }
+ if (sc->age_cdata.age_parent_tag != NULL) {
+ bus_dma_tag_destroy(sc->age_cdata.age_parent_tag);
+ sc->age_cdata.age_parent_tag = NULL;
+ }
+}
+
+/*
+ * Make sure the interface is stopped at reboot time.
+ */
+static int
+age_shutdown(device_t dev)
+{
+
+ return (age_suspend(dev));
+}
+
+static void
+age_setwol(struct age_softc *sc)
+{
+ struct ifnet *ifp;
+ struct mii_data *mii;
+ uint32_t reg, pmcs;
+ uint16_t pmstat;
+ int aneg, i, pmc;
+
+ AGE_LOCK_ASSERT(sc);
+
+ if (pci_find_extcap(sc->age_dev, PCIY_PMG, &pmc) == 0) {
+ CSR_WRITE_4(sc, AGE_WOL_CFG, 0);
+ /*
+ * No PME capability, PHY power down.
+ * XXX
+ * Due to an unknown reason powering down PHY resulted
+ * in unexpected results such as inaccessbility of
+ * hardware of freshly rebooted system. Disable
+ * powering down PHY until I got more information for
+ * Attansic/Atheros PHY hardwares.
+ */
+#ifdef notyet
+ age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
+ MII_BMCR, BMCR_PDOWN);
+#endif
+ return;
+ }
+
+ ifp = sc->age_ifp;
+ if ((ifp->if_capenable & IFCAP_WOL) != 0) {
+ /*
+ * Note, this driver resets the link speed to 10/100Mbps with
+ * auto-negotiation but we don't know whether that operation
+ * would succeed or not as it have no control after powering
+ * off. If the renegotiation fail WOL may not work. Running
+ * at 1Gbps will draw more power than 375mA at 3.3V which is
+ * specified in PCI specification and that would result in
+ * complete shutdowning power to ethernet controller.
+ *
+ * TODO
+ * Save current negotiated media speed/duplex/flow-control
+ * to softc and restore the same link again after resuming.
+ * PHY handling such as power down/resetting to 100Mbps
+ * may be better handled in suspend method in phy driver.
+ */
+ mii = device_get_softc(sc->age_miibus);
+ mii_pollstat(mii);
+ aneg = 0;
+ if ((mii->mii_media_status & IFM_AVALID) != 0) {
+ switch IFM_SUBTYPE(mii->mii_media_active) {
+ case IFM_10_T:
+ case IFM_100_TX:
+ goto got_link;
+ case IFM_1000_T:
+ aneg++;
+ default:
+ break;
+ }
+ }
+ age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
+ MII_100T2CR, 0);
+ age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
+ MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD |
+ ANAR_10 | ANAR_CSMA);
+ age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
+ MII_BMCR, BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG);
+ DELAY(1000);
+ if (aneg != 0) {
+ /* Poll link state until jme(4) get a 10/100 link. */
+ for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
+ mii_pollstat(mii);
+ if ((mii->mii_media_status & IFM_AVALID) != 0) {
+ switch (IFM_SUBTYPE(
+ mii->mii_media_active)) {
+ case IFM_10_T:
+ case IFM_100_TX:
+ age_mac_config(sc);
+ goto got_link;
+ default:
+ break;
+ }
+ }
+ AGE_UNLOCK(sc);
+ pause("agelnk", hz);
+ AGE_LOCK(sc);
+ }
+ if (i == MII_ANEGTICKS_GIGE)
+ device_printf(sc->age_dev,
+ "establishing link failed, "
+ "WOL may not work!");
+ }
+ /*
+ * No link, force MAC to have 100Mbps, full-duplex link.
+ * This is the last resort and may/may not work.
+ */
+ mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
+ mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
+ age_mac_config(sc);
+ }
+
+got_link:
+ pmcs = 0;
+ if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
+ pmcs |= WOL_CFG_MAGIC | WOL_CFG_MAGIC_ENB;
+ CSR_WRITE_4(sc, AGE_WOL_CFG, pmcs);
+ reg = CSR_READ_4(sc, AGE_MAC_CFG);
+ reg &= ~(MAC_CFG_DBG | MAC_CFG_PROMISC);
+ reg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST);
+ if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0)
+ reg |= MAC_CFG_ALLMULTI | MAC_CFG_BCAST;
+ if ((ifp->if_capenable & IFCAP_WOL) != 0) {
+ reg |= MAC_CFG_RX_ENB;
+ CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
+ }
+
+ /* Request PME. */
+ pmstat = pci_read_config(sc->age_dev, pmc + PCIR_POWER_STATUS, 2);
+ pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
+ if ((ifp->if_capenable & IFCAP_WOL) != 0)
+ pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
+ pci_write_config(sc->age_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
+#ifdef notyet
+ /* See above for powering down PHY issues. */
+ if ((ifp->if_capenable & IFCAP_WOL) == 0) {
+ /* No WOL, PHY power down. */
+ age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
+ MII_BMCR, BMCR_PDOWN);
+ }
+#endif
+}
+
+static int
+age_suspend(device_t dev)
+{
+ struct age_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ AGE_LOCK(sc);
+ age_stop(sc);
+ age_setwol(sc);
+ AGE_UNLOCK(sc);
+
+ return (0);
+}
+
+static int
+age_resume(device_t dev)
+{
+ struct age_softc *sc;
+ struct ifnet *ifp;
+ uint16_t cmd;
+
+ sc = device_get_softc(dev);
+
+ AGE_LOCK(sc);
+ /*
+ * Clear INTx emulation disable for hardwares that
+ * is set in resume event. From Linux.
+ */
+ cmd = pci_read_config(sc->age_dev, PCIR_COMMAND, 2);
+ if ((cmd & 0x0400) != 0) {
+ cmd &= ~0x0400;
+ pci_write_config(sc->age_dev, PCIR_COMMAND, cmd, 2);
+ }
+ ifp = sc->age_ifp;
+ if ((ifp->if_flags & IFF_UP) != 0)
+ age_init_locked(sc);
+
+ AGE_UNLOCK(sc);
+
+ return (0);
+}
+
+static int
+age_encap(struct age_softc *sc, struct mbuf **m_head)
+{
+ struct age_txdesc *txd, *txd_last;
+ struct tx_desc *desc;
+ struct mbuf *m;
+ struct ip *ip;
+ struct tcphdr *tcp;
+ bus_dma_segment_t txsegs[AGE_MAXTXSEGS];
+ bus_dmamap_t map;
+ uint32_t cflags, ip_off, poff, vtag;
+ int error, i, nsegs, prod, si;
+
+ AGE_LOCK_ASSERT(sc);
+
+ M_ASSERTPKTHDR((*m_head));
+
+ m = *m_head;
+ ip = NULL;
+ tcp = NULL;
+ cflags = vtag = 0;
+ ip_off = poff = 0;
+ if ((m->m_pkthdr.csum_flags & (AGE_CSUM_FEATURES | CSUM_TSO)) != 0) {
+ /*
+ * L1 requires offset of TCP/UDP payload in its Tx
+ * descriptor to perform hardware Tx checksum offload.
+ * Additionally, TSO requires IP/TCP header size and
+ * modification of IP/TCP header in order to make TSO
+ * engine work. This kind of operation takes many CPU
+ * cycles on FreeBSD so fast host CPU is needed to get
+ * smooth TSO performance.
+ */
+ struct ether_header *eh;
+
+ if (M_WRITABLE(m) == 0) {
+ /* Get a writable copy. */
+ m = m_dup(*m_head, M_DONTWAIT);
+ /* Release original mbufs. */
+ m_freem(*m_head);
+ if (m == NULL) {
+ *m_head = NULL;
+ return (ENOBUFS);
+ }
+ *m_head = m;
+ }
+ ip_off = sizeof(struct ether_header);
+ m = m_pullup(m, ip_off);
+ if (m == NULL) {
+ *m_head = NULL;
+ return (ENOBUFS);
+ }
+ eh = mtod(m, struct ether_header *);
+ /*
+ * Check if hardware VLAN insertion is off.
+ * Additional check for LLC/SNAP frame?
+ */
+ if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
+ ip_off = sizeof(struct ether_vlan_header);
+ m = m_pullup(m, ip_off);
+ if (m == NULL) {
+ *m_head = NULL;
+ return (ENOBUFS);
+ }
+ }
+ m = m_pullup(m, ip_off + sizeof(struct ip));
+ if (m == NULL) {
+ *m_head = NULL;
+ return (ENOBUFS);
+ }
+ ip = (struct ip *)(mtod(m, char *) + ip_off);
+ poff = ip_off + (ip->ip_hl << 2);
+ if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
+ m = m_pullup(m, poff + sizeof(struct tcphdr));
+ if (m == NULL) {
+ *m_head = NULL;
+ return (ENOBUFS);
+ }
+ tcp = (struct tcphdr *)(mtod(m, char *) + poff);
+ /*
+ * L1 requires IP/TCP header size and offset as
+ * well as TCP pseudo checksum which complicates
+ * TSO configuration. I guess this comes from the
+ * adherence to Microsoft NDIS Large Send
+ * specification which requires insertion of
+ * pseudo checksum by upper stack. The pseudo
+ * checksum that NDIS refers to doesn't include
+ * TCP payload length so age(4) should recompute
+ * the pseudo checksum here. Hopefully this wouldn't
+ * be much burden on modern CPUs.
+ * Reset IP checksum and recompute TCP pseudo
+ * checksum as NDIS specification said.
+ */
+ ip->ip_sum = 0;
+ if (poff + (tcp->th_off << 2) == m->m_pkthdr.len)
+ tcp->th_sum = in_pseudo(ip->ip_src.s_addr,
+ ip->ip_dst.s_addr,
+ htons((tcp->th_off << 2) + IPPROTO_TCP));
+ else
+ tcp->th_sum = in_pseudo(ip->ip_src.s_addr,
+ ip->ip_dst.s_addr, htons(IPPROTO_TCP));
+ }
+ *m_head = m;
+ }
+
+ si = prod = sc->age_cdata.age_tx_prod;
+ txd = &sc->age_cdata.age_txdesc[prod];
+ txd_last = txd;
+ map = txd->tx_dmamap;
+
+ error = bus_dmamap_load_mbuf_sg(sc->age_cdata.age_tx_tag, map,
+ *m_head, txsegs, &nsegs, 0);
+ if (error == EFBIG) {
+ m = m_collapse(*m_head, M_DONTWAIT, AGE_MAXTXSEGS);
+ if (m == NULL) {
+ m_freem(*m_head);
+ *m_head = NULL;
+ return (ENOMEM);
+ }
+ *m_head = m;
+ error = bus_dmamap_load_mbuf_sg(sc->age_cdata.age_tx_tag, map,
+ *m_head, txsegs, &nsegs, 0);
+ if (error != 0) {
+ m_freem(*m_head);
+ *m_head = NULL;
+ return (error);
+ }
+ } else if (error != 0)
+ return (error);
+ if (nsegs == 0) {
+ m_freem(*m_head);
+ *m_head = NULL;
+ return (EIO);
+ }
+
+ /* Check descriptor overrun. */
+ if (sc->age_cdata.age_tx_cnt + nsegs >= AGE_TX_RING_CNT - 2) {
+ bus_dmamap_unload(sc->age_cdata.age_tx_tag, map);
+ return (ENOBUFS);
+ }
+
+ m = *m_head;
+ /* Configure Tx IP/TCP/UDP checksum offload. */
+ if ((m->m_pkthdr.csum_flags & AGE_CSUM_FEATURES) != 0) {
+ cflags |= AGE_TD_CSUM;
+ if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
+ cflags |= AGE_TD_TCPCSUM;
+ if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
+ cflags |= AGE_TD_UDPCSUM;
+ /* Set checksum start offset. */
+ cflags |= (poff << AGE_TD_CSUM_PLOADOFFSET_SHIFT);
+ /* Set checksum insertion position of TCP/UDP. */
+ cflags |= ((poff + m->m_pkthdr.csum_data) <<
+ AGE_TD_CSUM_XSUMOFFSET_SHIFT);
+ }
+
+ /* Configure TSO. */
+ if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
+ if (poff + (tcp->th_off << 2) == m->m_pkthdr.len) {
+ /* Not TSO but IP/TCP checksum offload. */
+ cflags |= AGE_TD_IPCSUM | AGE_TD_TCPCSUM;
+ /* Clear TSO in order not to set AGE_TD_TSO_HDR. */
+ m->m_pkthdr.csum_flags &= ~CSUM_TSO;
+ } else {
+ /* Request TSO and set MSS. */
+ cflags |= AGE_TD_TSO_IPV4;
+ cflags |= AGE_TD_IPCSUM | AGE_TD_TCPCSUM;
+ cflags |= ((uint32_t)m->m_pkthdr.tso_segsz <<
+ AGE_TD_TSO_MSS_SHIFT);
+ }
+ /* Set IP/TCP header size. */
+ cflags |= ip->ip_hl << AGE_TD_IPHDR_LEN_SHIFT;
+ cflags |= tcp->th_off << AGE_TD_TSO_TCPHDR_LEN_SHIFT;
+ }
+
+ /* Configure VLAN hardware tag insertion. */
+ if ((m->m_flags & M_VLANTAG) != 0) {
+ vtag = AGE_TX_VLAN_TAG(m->m_pkthdr.ether_vtag);
+ vtag = ((vtag << AGE_TD_VLAN_SHIFT) & AGE_TD_VLAN_MASK);
+ cflags |= AGE_TD_INSERT_VLAN_TAG;
+ }
+
+ desc = NULL;
+ for (i = 0; i < nsegs; i++) {
+ desc = &sc->age_rdata.age_tx_ring[prod];
+ desc->addr = htole64(txsegs[i].ds_addr);
+ desc->len = htole32(AGE_TX_BYTES(txsegs[i].ds_len) | vtag);
+ desc->flags = htole32(cflags);
+ sc->age_cdata.age_tx_cnt++;
+ AGE_DESC_INC(prod, AGE_TX_RING_CNT);
+ }
+ /* Update producer index. */
+ sc->age_cdata.age_tx_prod = prod;
+
+ /* Set EOP on the last descriptor. */
+ prod = (prod + AGE_TX_RING_CNT - 1) % AGE_TX_RING_CNT;
+ desc = &sc->age_rdata.age_tx_ring[prod];
+ desc->flags |= htole32(AGE_TD_EOP);
+
+ /* Lastly set TSO header and modify IP/TCP header for TSO operation. */
+ if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
+ desc = &sc->age_rdata.age_tx_ring[si];
+ desc->flags |= htole32(AGE_TD_TSO_HDR);
+ }
+
+ /* Swap dmamap of the first and the last. */
+ txd = &sc->age_cdata.age_txdesc[prod];
+ map = txd_last->tx_dmamap;
+ txd_last->tx_dmamap = txd->tx_dmamap;
+ txd->tx_dmamap = map;
+ txd->tx_m = m;
+
+ /* Sync descriptors. */
+ bus_dmamap_sync(sc->age_cdata.age_tx_tag, map, BUS_DMASYNC_PREWRITE);
+ bus_dmamap_sync(sc->age_cdata.age_tx_ring_tag,
+ sc->age_cdata.age_tx_ring_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+ return (0);
+}
+
+static void
+age_tx_task(void *arg, int pending)
+{
+ struct ifnet *ifp;
+
+ ifp = (struct ifnet *)arg;
+ age_start(ifp);
+}
+
+static void
+age_start(struct ifnet *ifp)
+{
+ struct age_softc *sc;
+ struct mbuf *m_head;
+ int enq;
+
+ sc = ifp->if_softc;
+
+ AGE_LOCK(sc);
+
+ if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
+ IFF_DRV_RUNNING || (sc->age_flags & AGE_FLAG_LINK) == 0) {
+ AGE_UNLOCK(sc);
+ return;
+ }
+
+ for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
+ IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
+ if (m_head == NULL)
+ break;
+ /*
+ * Pack the data into the transmit ring. If we
+ * don't have room, set the OACTIVE flag and wait
+ * for the NIC to drain the ring.
+ */
+ if (age_encap(sc, &m_head)) {
+ if (m_head == NULL)
+ break;
+ IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
+ ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+ break;
+ }
+
+ enq++;
+ /*
+ * If there's a BPF listener, bounce a copy of this frame
+ * to him.
+ */
+ ETHER_BPF_MTAP(ifp, m_head);
+ }
+
+ if (enq > 0) {
+ /* Update mbox. */
+ AGE_COMMIT_MBOX(sc);
+ /* Set a timeout in case the chip goes out to lunch. */
+ sc->age_watchdog_timer = AGE_TX_TIMEOUT;
+ }
+
+ AGE_UNLOCK(sc);
+}
+
+static void
+age_watchdog(struct age_softc *sc)
+{
+ struct ifnet *ifp;
+
+ AGE_LOCK_ASSERT(sc);
+
+ if (sc->age_watchdog_timer == 0 || --sc->age_watchdog_timer)
+ return;
+
+ ifp = sc->age_ifp;
+ if ((sc->age_flags & AGE_FLAG_LINK) == 0) {
+ if_printf(sc->age_ifp, "watchdog timeout (missed link)\n");
+ ifp->if_oerrors++;
+ age_init_locked(sc);
+ return;
+ }
+ if (sc->age_cdata.age_tx_cnt == 0) {
+ if_printf(sc->age_ifp,
+ "watchdog timeout (missed Tx interrupts) -- recovering\n");
+ if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+ taskqueue_enqueue(sc->age_tq, &sc->age_tx_task);
+ return;
+ }
+ if_printf(sc->age_ifp, "watchdog timeout\n");
+ ifp->if_oerrors++;
+ age_init_locked(sc);
+ if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+ taskqueue_enqueue(sc->age_tq, &sc->age_tx_task);
+}
+
+static int
+age_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+{
+ struct age_softc *sc;
+ struct ifreq *ifr;
+ struct mii_data *mii;
+ uint32_t reg;
+ int error, mask;
+
+ sc = ifp->if_softc;
+ ifr = (struct ifreq *)data;
+ error = 0;
+ switch (cmd) {
+ case SIOCSIFMTU:
+ if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > AGE_JUMBO_MTU)
+ error = EINVAL;
+ else if (ifp->if_mtu != ifr->ifr_mtu) {
+ AGE_LOCK(sc);
+ ifp->if_mtu = ifr->ifr_mtu;
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
+ age_init_locked(sc);
+ AGE_UNLOCK(sc);
+ }
+ break;
+ case SIOCSIFFLAGS:
+ AGE_LOCK(sc);
+ if ((ifp->if_flags & IFF_UP) != 0) {
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
+ if (((ifp->if_flags ^ sc->age_if_flags)
+ & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
+ age_rxfilter(sc);
+ } else {
+ if ((sc->age_flags & AGE_FLAG_DETACH) == 0)
+ age_init_locked(sc);
+ }
+ } else {
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
+ age_stop(sc);
+ }
+ sc->age_if_flags = ifp->if_flags;
+ AGE_UNLOCK(sc);
+ break;
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ AGE_LOCK(sc);
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
+ age_rxfilter(sc);
+ AGE_UNLOCK(sc);
+ break;
+ case SIOCSIFMEDIA:
+ case SIOCGIFMEDIA:
+ mii = device_get_softc(sc->age_miibus);
+ error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
+ break;
+ case SIOCSIFCAP:
+ AGE_LOCK(sc);
+ mask = ifr->ifr_reqcap ^ ifp->if_capenable;
+ if ((mask & IFCAP_TXCSUM) != 0 &&
+ (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
+ ifp->if_capenable ^= IFCAP_TXCSUM;
+ if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
+ ifp->if_hwassist |= AGE_CSUM_FEATURES;
+ else
+ ifp->if_hwassist &= ~AGE_CSUM_FEATURES;
+ }
+ if ((mask & IFCAP_RXCSUM) != 0 &&
+ (ifp->if_capabilities & IFCAP_RXCSUM) != 0) {
+ ifp->if_capenable ^= IFCAP_RXCSUM;
+ reg = CSR_READ_4(sc, AGE_MAC_CFG);
+ reg &= ~MAC_CFG_RXCSUM_ENB;
+ if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
+ reg |= MAC_CFG_RXCSUM_ENB;
+ CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
+ }
+ if ((mask & IFCAP_TSO4) != 0 &&
+ (ifp->if_capabilities & IFCAP_TSO4) != 0) {
+ ifp->if_capenable ^= IFCAP_TSO4;
+ if ((ifp->if_capenable & IFCAP_TSO4) != 0)
+ ifp->if_hwassist |= CSUM_TSO;
+ else
+ ifp->if_hwassist &= ~CSUM_TSO;
+ }
+
+ if ((mask & IFCAP_WOL_MCAST) != 0 &&
+ (ifp->if_capabilities & IFCAP_WOL_MCAST) != 0)
+ ifp->if_capenable ^= IFCAP_WOL_MCAST;
+ if ((mask & IFCAP_WOL_MAGIC) != 0 &&
+ (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0)
+ ifp->if_capenable ^= IFCAP_WOL_MAGIC;
+
+ if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
+ (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
+ ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
+ age_rxvlan(sc);
+ }
+ if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
+ (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0)
+ ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
+ if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
+ (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
+ ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
+ /*
+ * VLAN hardware tagging is required to do checksum
+ * offload or TSO on VLAN interface. Checksum offload
+ * on VLAN interface also requires hardware assistance
+ * of parent interface.
+ */
+ if ((ifp->if_capenable & IFCAP_TXCSUM) == 0)
+ ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM;
+ if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
+ ifp->if_capenable &=
+ ~(IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM);
+ AGE_UNLOCK(sc);
+ VLAN_CAPABILITIES(ifp);
+ break;
+ default:
+ error = ether_ioctl(ifp, cmd, data);
+ break;
+ }
+
+ return (error);
+}
+
+static void
+age_mac_config(struct age_softc *sc)
+{
+ struct mii_data *mii;
+ uint32_t reg;
+
+ AGE_LOCK_ASSERT(sc);
+
+ mii = device_get_softc(sc->age_miibus);
+ reg = CSR_READ_4(sc, AGE_MAC_CFG);
+ reg &= ~MAC_CFG_FULL_DUPLEX;
+ reg &= ~(MAC_CFG_TX_FC | MAC_CFG_RX_FC);
+ reg &= ~MAC_CFG_SPEED_MASK;
+ /* Reprogram MAC with resolved speed/duplex. */
+ switch (IFM_SUBTYPE(mii->mii_media_active)) {
+ case IFM_10_T:
+ case IFM_100_TX:
+ reg |= MAC_CFG_SPEED_10_100;
+ break;
+ case IFM_1000_T:
+ reg |= MAC_CFG_SPEED_1000;
+ break;
+ }
+ if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
+ reg |= MAC_CFG_FULL_DUPLEX;
+#ifdef notyet
+ if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
+ reg |= MAC_CFG_TX_FC;
+ if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
+ reg |= MAC_CFG_RX_FC;
+#endif
+ }
+
+ CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
+}
+
+static void
+age_link_task(void *arg, int pending)
+{
+ struct age_softc *sc;
+ struct mii_data *mii;
+ struct ifnet *ifp;
+ uint32_t reg;
+
+ sc = (struct age_softc *)arg;
+
+ AGE_LOCK(sc);
+ mii = device_get_softc(sc->age_miibus);
+ ifp = sc->age_ifp;
+ if (mii == NULL || ifp == NULL ||
+ (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
+ AGE_UNLOCK(sc);
+ return;
+ }
+
+ sc->age_flags &= ~AGE_FLAG_LINK;
+ if ((mii->mii_media_status & IFM_AVALID) != 0) {
+ switch (IFM_SUBTYPE(mii->mii_media_active)) {
+ case IFM_10_T:
+ case IFM_100_TX:
+ case IFM_1000_T:
+ sc->age_flags |= AGE_FLAG_LINK;
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* Stop Rx/Tx MACs. */
+ age_stop_rxmac(sc);
+ age_stop_txmac(sc);
+
+ /* Program MACs with resolved speed/duplex/flow-control. */
+ if ((sc->age_flags & AGE_FLAG_LINK) != 0) {
+ age_mac_config(sc);
+ reg = CSR_READ_4(sc, AGE_MAC_CFG);
+ /* Restart DMA engine and Tx/Rx MAC. */
+ CSR_WRITE_4(sc, AGE_DMA_CFG, CSR_READ_4(sc, AGE_DMA_CFG) |
+ DMA_CFG_RD_ENB | DMA_CFG_WR_ENB);
+ reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB;
+ CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
+ }
+
+ AGE_UNLOCK(sc);
+}
+
+static void
+age_stats_update(struct age_softc *sc)
+{
+ struct age_stats *stat;
+ struct smb *smb;
+ struct ifnet *ifp;
+
+ AGE_LOCK_ASSERT(sc);
+
+ stat = &sc->age_stat;
+
+ bus_dmamap_sync(sc->age_cdata.age_smb_block_tag,
+ sc->age_cdata.age_smb_block_map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+
+ smb = sc->age_rdata.age_smb_block;
+ if (smb->updated == 0)
+ return;
+
+ ifp = sc->age_ifp;
+ /* Rx stats. */
+ stat->rx_frames += smb->rx_frames;
+ stat->rx_bcast_frames += smb->rx_bcast_frames;
+ stat->rx_mcast_frames += smb->rx_mcast_frames;
+ stat->rx_pause_frames += smb->rx_pause_frames;
+ stat->rx_control_frames += smb->rx_control_frames;
+ stat->rx_crcerrs += smb->rx_crcerrs;
+ stat->rx_lenerrs += smb->rx_lenerrs;
+ stat->rx_bytes += smb->rx_bytes;
+ stat->rx_runts += smb->rx_runts;
+ stat->rx_fragments += smb->rx_fragments;
+ stat->rx_pkts_64 += smb->rx_pkts_64;
+ stat->rx_pkts_65_127 += smb->rx_pkts_65_127;
+ stat->rx_pkts_128_255 += smb->rx_pkts_128_255;
+ stat->rx_pkts_256_511 += smb->rx_pkts_256_511;
+ stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023;
+ stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518;
+ stat->rx_pkts_1519_max += smb->rx_pkts_1519_max;
+ stat->rx_pkts_truncated += smb->rx_pkts_truncated;
+ stat->rx_fifo_oflows += smb->rx_fifo_oflows;
+ stat->rx_desc_oflows += smb->rx_desc_oflows;
+ stat->rx_alignerrs += smb->rx_alignerrs;
+ stat->rx_bcast_bytes += smb->rx_bcast_bytes;
+ stat->rx_mcast_bytes += smb->rx_mcast_bytes;
+ stat->rx_pkts_filtered += smb->rx_pkts_filtered;
+
+ /* Tx stats. */
+ stat->tx_frames += smb->tx_frames;
+ stat->tx_bcast_frames += smb->tx_bcast_frames;
+ stat->tx_mcast_frames += smb->tx_mcast_frames;
+ stat->tx_pause_frames += smb->tx_pause_frames;
+ stat->tx_excess_defer += smb->tx_excess_defer;
+ stat->tx_control_frames += smb->tx_control_frames;
+ stat->tx_deferred += smb->tx_deferred;
+ stat->tx_bytes += smb->tx_bytes;
+ stat->tx_pkts_64 += smb->tx_pkts_64;
+ stat->tx_pkts_65_127 += smb->tx_pkts_65_127;
+ stat->tx_pkts_128_255 += smb->tx_pkts_128_255;
+ stat->tx_pkts_256_511 += smb->tx_pkts_256_511;
+ stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023;
+ stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518;
+ stat->tx_pkts_1519_max += smb->tx_pkts_1519_max;
+ stat->tx_single_colls += smb->tx_single_colls;
+ stat->tx_multi_colls += smb->tx_multi_colls;
+ stat->tx_late_colls += smb->tx_late_colls;
+ stat->tx_excess_colls += smb->tx_excess_colls;
+ stat->tx_underrun += smb->tx_underrun;
+ stat->tx_desc_underrun += smb->tx_desc_underrun;
+ stat->tx_lenerrs += smb->tx_lenerrs;
+ stat->tx_pkts_truncated += smb->tx_pkts_truncated;
+ stat->tx_bcast_bytes += smb->tx_bcast_bytes;
+ stat->tx_mcast_bytes += smb->tx_mcast_bytes;
+
+ /* Update counters in ifnet. */
+ ifp->if_opackets += smb->tx_frames;
+
+ ifp->if_collisions += smb->tx_single_colls +
+ smb->tx_multi_colls + smb->tx_late_colls +
+ smb->tx_excess_colls * HDPX_CFG_RETRY_DEFAULT;
+
+ ifp->if_oerrors += smb->tx_excess_colls +
+ smb->tx_late_colls + smb->tx_underrun +
+ smb->tx_pkts_truncated;
+
+ ifp->if_ipackets += smb->rx_frames;
+
+ ifp->if_ierrors += smb->rx_crcerrs + smb->rx_lenerrs +
+ smb->rx_runts + smb->rx_pkts_truncated +
+ smb->rx_fifo_oflows + smb->rx_desc_oflows +
+ smb->rx_alignerrs;
+
+ /* Update done, clear. */
+ smb->updated = 0;
+
+ bus_dmamap_sync(sc->age_cdata.age_smb_block_tag,
+ sc->age_cdata.age_smb_block_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+}
+
+static int
+age_intr(void *arg)
+{
+ struct age_softc *sc;
+ uint32_t status;
+
+ sc = (struct age_softc *)arg;
+
+ status = CSR_READ_4(sc, AGE_INTR_STATUS);
+ if (status == 0 || (status & AGE_INTRS) == 0)
+ return (FILTER_STRAY);
+ /* Disable interrupts. */
+ CSR_WRITE_4(sc, AGE_INTR_STATUS, status | INTR_DIS_INT);
+ taskqueue_enqueue(sc->age_tq, &sc->age_int_task);
+
+ return (FILTER_HANDLED);
+}
+
+static void
+age_int_task(void *arg, int pending)
+{
+ struct age_softc *sc;
+ struct ifnet *ifp;
+ struct cmb *cmb;
+ uint32_t status;
+
+ sc = (struct age_softc *)arg;
+
+ AGE_LOCK(sc);
+
+ bus_dmamap_sync(sc->age_cdata.age_cmb_block_tag,
+ sc->age_cdata.age_cmb_block_map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+ cmb = sc->age_rdata.age_cmb_block;
+ status = le32toh(cmb->intr_status);
+ if (sc->age_morework != 0)
+ status |= INTR_CMB_RX;
+ if ((status & AGE_INTRS) == 0)
+ goto done;
+
+ sc->age_tpd_cons = (le32toh(cmb->tpd_cons) & TPD_CONS_MASK) >>
+ TPD_CONS_SHIFT;
+ sc->age_rr_prod = (le32toh(cmb->rprod_cons) & RRD_PROD_MASK) >>
+ RRD_PROD_SHIFT;
+ /* Let hardware know CMB was served. */
+ cmb->intr_status = 0;
+ bus_dmamap_sync(sc->age_cdata.age_cmb_block_tag,
+ sc->age_cdata.age_cmb_block_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+#if 0
+ printf("INTR: 0x%08x\n", status);
+ status &= ~INTR_DIS_DMA;
+ CSR_WRITE_4(sc, AGE_INTR_STATUS, status | INTR_DIS_INT);
+#endif
+ ifp = sc->age_ifp;
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
+ if ((status & INTR_CMB_RX) != 0)
+ sc->age_morework = age_rxintr(sc, sc->age_rr_prod,
+ sc->age_process_limit);
+ if ((status & INTR_CMB_TX) != 0)
+ age_txintr(sc, sc->age_tpd_cons);
+ if ((status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST)) != 0) {
+ if ((status & INTR_DMA_RD_TO_RST) != 0)
+ device_printf(sc->age_dev,
+ "DMA read error! -- resetting\n");
+ if ((status & INTR_DMA_WR_TO_RST) != 0)
+ device_printf(sc->age_dev,
+ "DMA write error! -- resetting\n");
+ age_init_locked(sc);
+ }
+ if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+ taskqueue_enqueue(sc->age_tq, &sc->age_tx_task);
+ if ((status & INTR_SMB) != 0)
+ age_stats_update(sc);
+ }
+
+ /* Check whether CMB was updated while serving Tx/Rx/SMB handler. */
+ bus_dmamap_sync(sc->age_cdata.age_cmb_block_tag,
+ sc->age_cdata.age_cmb_block_map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+ status = le32toh(cmb->intr_status);
+ if (sc->age_morework != 0 || (status & AGE_INTRS) != 0) {
+ taskqueue_enqueue(sc->age_tq, &sc->age_int_task);
+ AGE_UNLOCK(sc);
+ return;
+ }
+
+done:
+ /* Re-enable interrupts. */
+ CSR_WRITE_4(sc, AGE_INTR_STATUS, 0);
+ AGE_UNLOCK(sc);
+}
+
+static void
+age_txintr(struct age_softc *sc, int tpd_cons)
+{
+ struct ifnet *ifp;
+ struct age_txdesc *txd;
+ int cons, prog;
+
+ AGE_LOCK_ASSERT(sc);
+
+ ifp = sc->age_ifp;
+
+ bus_dmamap_sync(sc->age_cdata.age_tx_ring_tag,
+ sc->age_cdata.age_tx_ring_map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+
+ /*
+ * Go through our Tx list and free mbufs for those
+ * frames which have been transmitted.
+ */
+ cons = sc->age_cdata.age_tx_cons;
+ for (prog = 0; cons != tpd_cons; AGE_DESC_INC(cons, AGE_TX_RING_CNT)) {
+ if (sc->age_cdata.age_tx_cnt <= 0)
+ break;
+ prog++;
+ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+ sc->age_cdata.age_tx_cnt--;
+ txd = &sc->age_cdata.age_txdesc[cons];
+ /*
+ * Clear Tx descriptors, it's not required but would
+ * help debugging in case of Tx issues.
+ */
+ txd->tx_desc->addr = 0;
+ txd->tx_desc->len = 0;
+ txd->tx_desc->flags = 0;
+
+ if (txd->tx_m == NULL)
+ continue;
+ /* Reclaim transmitted mbufs. */
+ bus_dmamap_sync(sc->age_cdata.age_tx_tag, txd->tx_dmamap,
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(sc->age_cdata.age_tx_tag, txd->tx_dmamap);
+ m_freem(txd->tx_m);
+ txd->tx_m = NULL;
+ }
+
+ if (prog > 0) {
+ sc->age_cdata.age_tx_cons = cons;
+
+ /*
+ * Unarm watchdog timer only when there are no pending
+ * Tx descriptors in queue.
+ */
+ if (sc->age_cdata.age_tx_cnt == 0)
+ sc->age_watchdog_timer = 0;
+ bus_dmamap_sync(sc->age_cdata.age_tx_ring_tag,
+ sc->age_cdata.age_tx_ring_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ }
+}
+
+/* Receive a frame. */
+static void
+age_rxeof(struct age_softc *sc, struct rx_rdesc *rxrd)
+{
+ struct age_rxdesc *rxd;
+ struct rx_desc *desc;
+ struct ifnet *ifp;
+ struct mbuf *mp, *m;
+ uint32_t status, index, vtag;
+ int count, nsegs, pktlen;
+ int rx_cons;
+
+ AGE_LOCK_ASSERT(sc);
+
+ ifp = sc->age_ifp;
+ status = le32toh(rxrd->flags);
+ index = le32toh(rxrd->index);
+ rx_cons = AGE_RX_CONS(index);
+ nsegs = AGE_RX_NSEGS(index);
+
+ sc->age_cdata.age_rxlen = AGE_RX_BYTES(le32toh(rxrd->len));
+ if ((status & AGE_RRD_ERROR) != 0 &&
+ (status & (AGE_RRD_CRC | AGE_RRD_CODE | AGE_RRD_DRIBBLE |
+ AGE_RRD_RUNT | AGE_RRD_OFLOW | AGE_RRD_TRUNC)) != 0) {
+ /*
+ * We want to pass the following frames to upper
+ * layer regardless of error status of Rx return
+ * ring.
+ *
+ * o IP/TCP/UDP checksum is bad.
+ * o frame length and protocol specific length
+ * does not match.
+ */
+ sc->age_cdata.age_rx_cons += nsegs;
+ sc->age_cdata.age_rx_cons %= AGE_RX_RING_CNT;
+ return;
+ }
+
+ pktlen = 0;
+ for (count = 0; count < nsegs; count++,
+ AGE_DESC_INC(rx_cons, AGE_RX_RING_CNT)) {
+ rxd = &sc->age_cdata.age_rxdesc[rx_cons];
+ mp = rxd->rx_m;
+ desc = rxd->rx_desc;
+ /* Add a new receive buffer to the ring. */
+ if (age_newbuf(sc, rxd) != 0) {
+ ifp->if_iqdrops++;
+ /* Reuse Rx buffers. */
+ if (sc->age_cdata.age_rxhead != NULL) {
+ m_freem(sc->age_cdata.age_rxhead);
+ AGE_RXCHAIN_RESET(sc);
+ }
+ break;
+ }
+
+ /* The length of the first mbuf is computed last. */
+ if (count != 0) {
+ mp->m_len = AGE_RX_BYTES(le32toh(desc->len));
+ pktlen += mp->m_len;
+ }
+
+ /* Chain received mbufs. */
+ if (sc->age_cdata.age_rxhead == NULL) {
+ sc->age_cdata.age_rxhead = mp;
+ sc->age_cdata.age_rxtail = mp;
+ } else {
+ mp->m_flags &= ~M_PKTHDR;
+ sc->age_cdata.age_rxprev_tail =
+ sc->age_cdata.age_rxtail;
+ sc->age_cdata.age_rxtail->m_next = mp;
+ sc->age_cdata.age_rxtail = mp;
+ }
+
+ if (count == nsegs - 1) {
+ /*
+ * It seems that L1 controller has no way
+ * to tell hardware to strip CRC bytes.
+ */
+ sc->age_cdata.age_rxlen -= ETHER_CRC_LEN;
+ if (nsegs > 1) {
+ /* Remove the CRC bytes in chained mbufs. */
+ pktlen -= ETHER_CRC_LEN;
+ if (mp->m_len <= ETHER_CRC_LEN) {
+ sc->age_cdata.age_rxtail =
+ sc->age_cdata.age_rxprev_tail;
+ sc->age_cdata.age_rxtail->m_len -=
+ (ETHER_CRC_LEN - mp->m_len);
+ sc->age_cdata.age_rxtail->m_next = NULL;
+ m_freem(mp);
+ } else {
+ mp->m_len -= ETHER_CRC_LEN;
+ }
+ }
+
+ m = sc->age_cdata.age_rxhead;
+ m->m_flags |= M_PKTHDR;
+ m->m_pkthdr.rcvif = ifp;
+ m->m_pkthdr.len = sc->age_cdata.age_rxlen;
+ /* Set the first mbuf length. */
+ m->m_len = sc->age_cdata.age_rxlen - pktlen;
+
+ /*
+ * Set checksum information.
+ * It seems that L1 controller can compute partial
+ * checksum. The partial checksum value can be used
+ * to accelerate checksum computation for fragmented
+ * TCP/UDP packets. Upper network stack already
+ * takes advantage of the partial checksum value in
+ * IP reassembly stage. But I'm not sure the
+ * correctness of the partial hardware checksum
+ * assistance due to lack of data sheet. If it is
+ * proven to work on L1 I'll enable it.
+ */
+ if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 &&
+ (status & AGE_RRD_IPV4) != 0) {
+ m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
+ if ((status & AGE_RRD_IPCSUM_NOK) == 0)
+ m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
+ if ((status & (AGE_RRD_TCP | AGE_RRD_UDP)) &&
+ (status & AGE_RRD_TCP_UDPCSUM_NOK) == 0) {
+ m->m_pkthdr.csum_flags |=
+ CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
+ m->m_pkthdr.csum_data = 0xffff;
+ }
+ /*
+ * Don't mark bad checksum for TCP/UDP frames
+ * as fragmented frames may always have set
+ * bad checksummed bit of descriptor status.
+ */
+ }
+
+ /* Check for VLAN tagged frames. */
+ if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
+ (status & AGE_RRD_VLAN) != 0) {
+ vtag = AGE_RX_VLAN(le32toh(rxrd->vtags));
+ m->m_pkthdr.ether_vtag = AGE_RX_VLAN_TAG(vtag);
+ m->m_flags |= M_VLANTAG;
+ }
+
+ /* Pass it on. */
+ AGE_UNLOCK(sc);
+ (*ifp->if_input)(ifp, m);
+ AGE_LOCK(sc);
+
+ /* Reset mbuf chains. */
+ AGE_RXCHAIN_RESET(sc);
+ }
+ }
+
+ if (count != nsegs) {
+ sc->age_cdata.age_rx_cons += nsegs;
+ sc->age_cdata.age_rx_cons %= AGE_RX_RING_CNT;
+ } else
+ sc->age_cdata.age_rx_cons = rx_cons;
+}
+
+static int
+age_rxintr(struct age_softc *sc, int rr_prod, int count)
+{
+ struct rx_rdesc *rxrd;
+ int rr_cons, nsegs, pktlen, prog;
+
+ AGE_LOCK_ASSERT(sc);
+
+ rr_cons = sc->age_cdata.age_rr_cons;
+ if (rr_cons == rr_prod)
+ return (0);
+
+ bus_dmamap_sync(sc->age_cdata.age_rr_ring_tag,
+ sc->age_cdata.age_rr_ring_map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_sync(sc->age_cdata.age_rx_ring_tag,
+ sc->age_cdata.age_rx_ring_map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+
+ for (prog = 0; rr_cons != rr_prod; prog++) {
+ if (count <= 0)
+ break;
+ rxrd = &sc->age_rdata.age_rr_ring[rr_cons];
+ nsegs = AGE_RX_NSEGS(le32toh(rxrd->index));
+ if (nsegs == 0)
+ break;
+ /*
+ * Check number of segments against received bytes.
+ * Non-matching value would indicate that hardware
+ * is still trying to update Rx return descriptors.
+ * I'm not sure whether this check is really needed.
+ */
+ pktlen = AGE_RX_BYTES(le32toh(rxrd->len));
+ if (nsegs != ((pktlen + (MCLBYTES - ETHER_ALIGN - 1)) /
+ (MCLBYTES - ETHER_ALIGN)))
+ break;
+
+ prog++;
+ /* Received a frame. */
+ age_rxeof(sc, rxrd);
+ /* Clear return ring. */
+ rxrd->index = 0;
+ AGE_DESC_INC(rr_cons, AGE_RR_RING_CNT);
+ }
+
+ if (prog > 0) {
+ /* Update the consumer index. */
+ sc->age_cdata.age_rr_cons = rr_cons;
+
+ /* Sync descriptors. */
+ bus_dmamap_sync(sc->age_cdata.age_rx_ring_tag,
+ sc->age_cdata.age_rx_ring_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ bus_dmamap_sync(sc->age_cdata.age_rr_ring_tag,
+ sc->age_cdata.age_rr_ring_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+ /* Notify hardware availability of new Rx buffers. */
+ AGE_COMMIT_MBOX(sc);
+ }
+
+ return (count > 0 ? 0 : EAGAIN);
+}
+
+static void
+age_tick(void *arg)
+{
+ struct age_softc *sc;
+ struct mii_data *mii;
+
+ sc = (struct age_softc *)arg;
+
+ AGE_LOCK_ASSERT(sc);
+
+ mii = device_get_softc(sc->age_miibus);
+ mii_tick(mii);
+ age_watchdog(sc);
+ callout_reset(&sc->age_tick_ch, hz, age_tick, sc);
+}
+
+static void
+age_reset(struct age_softc *sc)
+{
+ uint32_t reg;
+ int i;
+
+ CSR_WRITE_4(sc, AGE_MASTER_CFG, MASTER_RESET);
+ for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
+ DELAY(1);
+ if ((CSR_READ_4(sc, AGE_MASTER_CFG) & MASTER_RESET) == 0)
+ break;
+ }
+ if (i == 0)
+ device_printf(sc->age_dev, "master reset timeout!\n");
+
+ for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
+ if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0)
+ break;
+ DELAY(10);
+ }
+
+ if (i == 0)
+ device_printf(sc->age_dev, "reset timeout(0x%08x)!\n", reg);
+ /* Initialize PCIe module. From Linux. */
+ CSR_WRITE_4(sc, 0x12FC, 0x6500);
+ CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000);
+}
+
+static void
+age_init(void *xsc)
+{
+ struct age_softc *sc;
+
+ sc = (struct age_softc *)xsc;
+ AGE_LOCK(sc);
+ age_init_locked(sc);
+ AGE_UNLOCK(sc);
+}
+
+static void
+age_init_locked(struct age_softc *sc)
+{
+ struct ifnet *ifp;
+ struct mii_data *mii;
+ uint8_t eaddr[ETHER_ADDR_LEN];
+ bus_addr_t paddr;
+ uint32_t reg, fsize;
+ uint32_t rxf_hi, rxf_lo, rrd_hi, rrd_lo;
+ int error;
+
+ AGE_LOCK_ASSERT(sc);
+
+ ifp = sc->age_ifp;
+ mii = device_get_softc(sc->age_miibus);
+
+ /*
+ * Cancel any pending I/O.
+ */
+ age_stop(sc);
+
+ /*
+ * Reset the chip to a known state.
+ */
+ age_reset(sc);
+
+ /* Initialize descriptors. */
+ error = age_init_rx_ring(sc);
+ if (error != 0) {
+ device_printf(sc->age_dev, "no memory for Rx buffers.\n");
+ age_stop(sc);
+ return;
+ }
+ age_init_rr_ring(sc);
+ age_init_tx_ring(sc);
+ age_init_cmb_block(sc);
+ age_init_smb_block(sc);
+
+ /* Reprogram the station address. */
+ bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
+ CSR_WRITE_4(sc, AGE_PAR0,
+ eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]);
+ CSR_WRITE_4(sc, AGE_PAR1, eaddr[0] << 8 | eaddr[1]);
+
+ /* Set descriptor base addresses. */
+ paddr = sc->age_rdata.age_tx_ring_paddr;
+ CSR_WRITE_4(sc, AGE_DESC_ADDR_HI, AGE_ADDR_HI(paddr));
+ paddr = sc->age_rdata.age_rx_ring_paddr;
+ CSR_WRITE_4(sc, AGE_DESC_RD_ADDR_LO, AGE_ADDR_LO(paddr));
+ paddr = sc->age_rdata.age_rr_ring_paddr;
+ CSR_WRITE_4(sc, AGE_DESC_RRD_ADDR_LO, AGE_ADDR_LO(paddr));
+ paddr = sc->age_rdata.age_tx_ring_paddr;
+ CSR_WRITE_4(sc, AGE_DESC_TPD_ADDR_LO, AGE_ADDR_LO(paddr));
+ paddr = sc->age_rdata.age_cmb_block_paddr;
+ CSR_WRITE_4(sc, AGE_DESC_CMB_ADDR_LO, AGE_ADDR_LO(paddr));
+ paddr = sc->age_rdata.age_smb_block_paddr;
+ CSR_WRITE_4(sc, AGE_DESC_SMB_ADDR_LO, AGE_ADDR_LO(paddr));
+ /* Set Rx/Rx return descriptor counter. */
+ CSR_WRITE_4(sc, AGE_DESC_RRD_RD_CNT,
+ ((AGE_RR_RING_CNT << DESC_RRD_CNT_SHIFT) &
+ DESC_RRD_CNT_MASK) |
+ ((AGE_RX_RING_CNT << DESC_RD_CNT_SHIFT) & DESC_RD_CNT_MASK));
+ /* Set Tx descriptor counter. */
+ CSR_WRITE_4(sc, AGE_DESC_TPD_CNT,
+ (AGE_TX_RING_CNT << DESC_TPD_CNT_SHIFT) & DESC_TPD_CNT_MASK);
+
+ /* Tell hardware that we're ready to load descriptors. */
+ CSR_WRITE_4(sc, AGE_DMA_BLOCK, DMA_BLOCK_LOAD);
+
+ /*
+ * Initialize mailbox register.
+ * Updated producer/consumer index information is exchanged
+ * through this mailbox register. However Tx producer and
+ * Rx return consumer/Rx producer are all shared such that
+ * it's hard to separate code path between Tx and Rx without
+ * locking. If L1 hardware have a separate mail box register
+ * for Tx and Rx consumer/producer management we could have
+ * indepent Tx/Rx handler which in turn Rx handler could have
+ * been run without any locking.
+ */
+ AGE_COMMIT_MBOX(sc);
+
+ /* Configure IPG/IFG parameters. */
+ CSR_WRITE_4(sc, AGE_IPG_IFG_CFG,
+ ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK) |
+ ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) |
+ ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) |
+ ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK));
+
+ /* Set parameters for half-duplex media. */
+ CSR_WRITE_4(sc, AGE_HDPX_CFG,
+ ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) &
+ HDPX_CFG_LCOL_MASK) |
+ ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) &
+ HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN |
+ ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) &
+ HDPX_CFG_ABEBT_MASK) |
+ ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) &
+ HDPX_CFG_JAMIPG_MASK));
+
+ /* Configure interrupt moderation timer. */
+ CSR_WRITE_2(sc, AGE_IM_TIMER, AGE_USECS(sc->age_int_mod));
+ reg = CSR_READ_4(sc, AGE_MASTER_CFG);
+ reg &= ~MASTER_MTIMER_ENB;
+ if (AGE_USECS(sc->age_int_mod) == 0)
+ reg &= ~MASTER_ITIMER_ENB;
+ else
+ reg |= MASTER_ITIMER_ENB;
+ CSR_WRITE_4(sc, AGE_MASTER_CFG, reg);
+ if (1 || bootverbose)
+ device_printf(sc->age_dev, "interrupt moderation is %d us.\n",
+ sc->age_int_mod);
+ CSR_WRITE_2(sc, AGE_INTR_CLR_TIMER, AGE_USECS(1000));
+
+ /* Set Maximum frame size but don't let MTU be lass than ETHER_MTU. */
+ if (ifp->if_mtu < ETHERMTU)
+ sc->age_max_frame_size = ETHERMTU;
+ else
+ sc->age_max_frame_size = ifp->if_mtu;
+ sc->age_max_frame_size += ETHER_HDR_LEN +
+ sizeof(struct ether_vlan_header) + ETHER_CRC_LEN;
+ CSR_WRITE_4(sc, AGE_FRAME_SIZE, sc->age_max_frame_size);
+ /* Configure jumbo frame. */
+ fsize = roundup(sc->age_max_frame_size, sizeof(uint64_t));
+ CSR_WRITE_4(sc, AGE_RXQ_JUMBO_CFG,
+ (((fsize / sizeof(uint64_t)) <<
+ RXQ_JUMBO_CFG_SZ_THRESH_SHIFT) & RXQ_JUMBO_CFG_SZ_THRESH_MASK) |
+ ((RXQ_JUMBO_CFG_LKAH_DEFAULT <<
+ RXQ_JUMBO_CFG_LKAH_SHIFT) & RXQ_JUMBO_CFG_LKAH_MASK) |
+ ((AGE_USECS(8) << RXQ_JUMBO_CFG_RRD_TIMER_SHIFT) &
+ RXQ_JUMBO_CFG_RRD_TIMER_MASK));
+
+ /* Configure flow-control parameters. From Linux. */
+ if ((sc->age_flags & AGE_FLAG_PCIE) != 0) {
+ /*
+ * Magic workaround for old-L1.
+ * Don't know which hw revision requires this magic.
+ */
+ CSR_WRITE_4(sc, 0x12FC, 0x6500);
+ /*
+ * Another magic workaround for flow-control mode
+ * change. From Linux.
+ */
+ CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000);
+ }
+ /*
+ * TODO
+ * Should understand pause parameter relationships between FIFO
+ * size and number of Rx descriptors and Rx return descriptors.
+ *
+ * Magic parameters came from Linux.
+ */
+ switch (sc->age_chip_rev) {
+ case 0x8001:
+ case 0x9001:
+ case 0x9002:
+ case 0x9003:
+ rxf_hi = AGE_RX_RING_CNT / 16;
+ rxf_lo = (AGE_RX_RING_CNT * 7) / 8;
+ rrd_hi = (AGE_RR_RING_CNT * 7) / 8;
+ rrd_lo = AGE_RR_RING_CNT / 16;
+ break;
+ default:
+ reg = CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN);
+ rxf_lo = reg / 16;
+ if (rxf_lo < 192)
+ rxf_lo = 192;
+ rxf_hi = (reg * 7) / 8;
+ if (rxf_hi < rxf_lo)
+ rxf_hi = rxf_lo + 16;
+ reg = CSR_READ_4(sc, AGE_SRAM_RRD_LEN);
+ rrd_lo = reg / 8;
+ rrd_hi = (reg * 7) / 8;
+ if (rrd_lo < 2)
+ rrd_lo = 2;
+ if (rrd_hi < rrd_lo)
+ rrd_hi = rrd_lo + 3;
+ break;
+ }
+ CSR_WRITE_4(sc, AGE_RXQ_FIFO_PAUSE_THRESH,
+ ((rxf_lo << RXQ_FIFO_PAUSE_THRESH_LO_SHIFT) &
+ RXQ_FIFO_PAUSE_THRESH_LO_MASK) |
+ ((rxf_hi << RXQ_FIFO_PAUSE_THRESH_HI_SHIFT) &
+ RXQ_FIFO_PAUSE_THRESH_HI_MASK));
+ CSR_WRITE_4(sc, AGE_RXQ_RRD_PAUSE_THRESH,
+ ((rrd_lo << RXQ_RRD_PAUSE_THRESH_LO_SHIFT) &
+ RXQ_RRD_PAUSE_THRESH_LO_MASK) |
+ ((rrd_hi << RXQ_RRD_PAUSE_THRESH_HI_SHIFT) &
+ RXQ_RRD_PAUSE_THRESH_HI_MASK));
+
+ /* Configure RxQ. */
+ CSR_WRITE_4(sc, AGE_RXQ_CFG,
+ ((RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) &
+ RXQ_CFG_RD_BURST_MASK) |
+ ((RXQ_CFG_RRD_BURST_THRESH_DEFAULT <<
+ RXQ_CFG_RRD_BURST_THRESH_SHIFT) & RXQ_CFG_RRD_BURST_THRESH_MASK) |
+ ((RXQ_CFG_RD_PREF_MIN_IPG_DEFAULT <<
+ RXQ_CFG_RD_PREF_MIN_IPG_SHIFT) & RXQ_CFG_RD_PREF_MIN_IPG_MASK) |
+ RXQ_CFG_CUT_THROUGH_ENB | RXQ_CFG_ENB);
+
+ /* Configure TxQ. */
+ CSR_WRITE_4(sc, AGE_TXQ_CFG,
+ ((TXQ_CFG_TPD_BURST_DEFAULT << TXQ_CFG_TPD_BURST_SHIFT) &
+ TXQ_CFG_TPD_BURST_MASK) |
+ ((TXQ_CFG_TX_FIFO_BURST_DEFAULT << TXQ_CFG_TX_FIFO_BURST_SHIFT) &
+ TXQ_CFG_TX_FIFO_BURST_MASK) |
+ ((TXQ_CFG_TPD_FETCH_DEFAULT <<
+ TXQ_CFG_TPD_FETCH_THRESH_SHIFT) & TXQ_CFG_TPD_FETCH_THRESH_MASK) |
+ TXQ_CFG_ENB);
+
+ CSR_WRITE_4(sc, AGE_TX_JUMBO_TPD_TH_IPG,
+ (((fsize / sizeof(uint64_t) << TX_JUMBO_TPD_TH_SHIFT)) &
+ TX_JUMBO_TPD_TH_MASK) |
+ ((TX_JUMBO_TPD_IPG_DEFAULT << TX_JUMBO_TPD_IPG_SHIFT) &
+ TX_JUMBO_TPD_IPG_MASK));
+ /* Configure DMA parameters. */
+ CSR_WRITE_4(sc, AGE_DMA_CFG,
+ DMA_CFG_ENH_ORDER | DMA_CFG_RCB_64 |
+ sc->age_dma_rd_burst | DMA_CFG_RD_ENB |
+ sc->age_dma_wr_burst | DMA_CFG_WR_ENB);
+
+ /* Configure CMB DMA write threshold. */
+ CSR_WRITE_4(sc, AGE_CMB_WR_THRESH,
+ ((CMB_WR_THRESH_RRD_DEFAULT << CMB_WR_THRESH_RRD_SHIFT) &
+ CMB_WR_THRESH_RRD_MASK) |
+ ((CMB_WR_THRESH_TPD_DEFAULT << CMB_WR_THRESH_TPD_SHIFT) &
+ CMB_WR_THRESH_TPD_MASK));
+
+ /* Set CMB/SMB timer and enable them. */
+ CSR_WRITE_4(sc, AGE_CMB_WR_TIMER,
+ ((AGE_USECS(2) << CMB_WR_TIMER_TX_SHIFT) & CMB_WR_TIMER_TX_MASK) |
+ ((AGE_USECS(2) << CMB_WR_TIMER_RX_SHIFT) & CMB_WR_TIMER_RX_MASK));
+ /* Request SMB updates for every seconds. */
+ CSR_WRITE_4(sc, AGE_SMB_TIMER, AGE_USECS(1000 * 1000));
+ CSR_WRITE_4(sc, AGE_CSMB_CTRL, CSMB_CTRL_SMB_ENB | CSMB_CTRL_CMB_ENB);
+
+ /*
+ * Disable all WOL bits as WOL can interfere normal Rx
+ * operation.
+ */
+ CSR_WRITE_4(sc, AGE_WOL_CFG, 0);
+
+ /*
+ * Configure Tx/Rx MACs.
+ * - Auto-padding for short frames.
+ * - Enable CRC generation.
+ * Start with full-duplex/1000Mbps media. Actual reconfiguration
+ * of MAC is followed after link establishment.
+ */
+ CSR_WRITE_4(sc, AGE_MAC_CFG,
+ MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD |
+ MAC_CFG_FULL_DUPLEX | MAC_CFG_SPEED_1000 |
+ ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) &
+ MAC_CFG_PREAMBLE_MASK));
+ /* Set up the receive filter. */
+ age_rxfilter(sc);
+ age_rxvlan(sc);
+
+ reg = CSR_READ_4(sc, AGE_MAC_CFG);
+ if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
+ reg |= MAC_CFG_RXCSUM_ENB;
+
+ /* Ack all pending interrupts and clear it. */
+ CSR_WRITE_4(sc, AGE_INTR_STATUS, 0);
+ CSR_WRITE_4(sc, AGE_INTR_MASK, AGE_INTRS);
+
+ /* Finally enable Tx/Rx MAC. */
+ CSR_WRITE_4(sc, AGE_MAC_CFG, reg | MAC_CFG_TX_ENB | MAC_CFG_RX_ENB);
+
+ sc->age_flags &= ~AGE_FLAG_LINK;
+ /* Switch to the current media. */
+ mii_mediachg(mii);
+
+ callout_reset(&sc->age_tick_ch, hz, age_tick, sc);
+
+ ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+}
+
+static void
+age_stop(struct age_softc *sc)
+{
+ struct ifnet *ifp;
+ struct age_txdesc *txd;
+ struct age_rxdesc *rxd;
+ uint32_t reg;
+ int i;
+
+ AGE_LOCK_ASSERT(sc);
+ /*
+ * Mark the interface down and cancel the watchdog timer.
+ */
+ ifp = sc->age_ifp;
+ ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+ sc->age_flags &= ~AGE_FLAG_LINK;
+ callout_stop(&sc->age_tick_ch);
+ sc->age_watchdog_timer = 0;
+
+ /*
+ * Disable interrupts.
+ */
+ CSR_WRITE_4(sc, AGE_INTR_MASK, 0);
+ CSR_WRITE_4(sc, AGE_INTR_STATUS, 0xFFFFFFFF);
+ /* Stop CMB/SMB updates. */
+ CSR_WRITE_4(sc, AGE_CSMB_CTRL, 0);
+ /* Stop Rx/Tx MAC. */
+ age_stop_rxmac(sc);
+ age_stop_txmac(sc);
+ /* Stop DMA. */
+ CSR_WRITE_4(sc, AGE_DMA_CFG,
+ CSR_READ_4(sc, AGE_DMA_CFG) & ~(DMA_CFG_RD_ENB | DMA_CFG_WR_ENB));
+ /* Stop TxQ/RxQ. */
+ CSR_WRITE_4(sc, AGE_TXQ_CFG,
+ CSR_READ_4(sc, AGE_TXQ_CFG) & ~TXQ_CFG_ENB);
+ CSR_WRITE_4(sc, AGE_RXQ_CFG,
+ CSR_READ_4(sc, AGE_RXQ_CFG) & ~RXQ_CFG_ENB);
+ for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
+ if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0)
+ break;
+ DELAY(10);
+ }
+ if (i == 0)
+ device_printf(sc->age_dev,
+ "stopping Rx/Tx MACs timed out(0x%08x)!\n", reg);
+
+ /* Reclaim Rx buffers that have been processed. */
+ if (sc->age_cdata.age_rxhead != NULL)
+ m_freem(sc->age_cdata.age_rxhead);
+ AGE_RXCHAIN_RESET(sc);
+ /*
+ * Free RX and TX mbufs still in the queues.
+ */
+ for (i = 0; i < AGE_RX_RING_CNT; i++) {
+ rxd = &sc->age_cdata.age_rxdesc[i];
+ if (rxd->rx_m != NULL) {
+ bus_dmamap_sync(sc->age_cdata.age_rx_tag,
+ rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(sc->age_cdata.age_rx_tag,
+ rxd->rx_dmamap);
+ m_freem(rxd->rx_m);
+ rxd->rx_m = NULL;
+ }
+ }
+ for (i = 0; i < AGE_TX_RING_CNT; i++) {
+ txd = &sc->age_cdata.age_txdesc[i];
+ if (txd->tx_m != NULL) {
+ bus_dmamap_sync(sc->age_cdata.age_tx_tag,
+ txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(sc->age_cdata.age_tx_tag,
+ txd->tx_dmamap);
+ m_freem(txd->tx_m);
+ txd->tx_m = NULL;
+ }
+ }
+}
+
+static void
+age_stop_txmac(struct age_softc *sc)
+{
+ uint32_t reg;
+ int i;
+
+ AGE_LOCK_ASSERT(sc);
+
+ reg = CSR_READ_4(sc, AGE_MAC_CFG);
+ if ((reg & MAC_CFG_TX_ENB) != 0) {
+ reg &= ~MAC_CFG_TX_ENB;
+ CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
+ }
+ /* Stop Tx DMA engine. */
+ reg = CSR_READ_4(sc, AGE_DMA_CFG);
+ if ((reg & DMA_CFG_RD_ENB) != 0) {
+ reg &= ~DMA_CFG_RD_ENB;
+ CSR_WRITE_4(sc, AGE_DMA_CFG, reg);
+ }
+ for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
+ if ((CSR_READ_4(sc, AGE_IDLE_STATUS) &
+ (IDLE_STATUS_TXMAC | IDLE_STATUS_DMARD)) == 0)
+ break;
+ DELAY(10);
+ }
+ if (i == 0)
+ device_printf(sc->age_dev, "stopping TxMAC timeout!\n");
+}
+
+static void
+age_stop_rxmac(struct age_softc *sc)
+{
+ uint32_t reg;
+ int i;
+
+ AGE_LOCK_ASSERT(sc);
+
+ reg = CSR_READ_4(sc, AGE_MAC_CFG);
+ if ((reg & MAC_CFG_RX_ENB) != 0) {
+ reg &= ~MAC_CFG_RX_ENB;
+ CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
+ }
+ /* Stop Rx DMA engine. */
+ reg = CSR_READ_4(sc, AGE_DMA_CFG);
+ if ((reg & DMA_CFG_WR_ENB) != 0) {
+ reg &= ~DMA_CFG_WR_ENB;
+ CSR_WRITE_4(sc, AGE_DMA_CFG, reg);
+ }
+ for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
+ if ((CSR_READ_4(sc, AGE_IDLE_STATUS) &
+ (IDLE_STATUS_RXMAC | IDLE_STATUS_DMAWR)) == 0)
+ break;
+ DELAY(10);
+ }
+ if (i == 0)
+ device_printf(sc->age_dev, "stopping RxMAC timeout!\n");
+}
+
+static void
+age_init_tx_ring(struct age_softc *sc)
+{
+ struct age_ring_data *rd;
+ struct age_txdesc *txd;
+ int i;
+
+ AGE_LOCK_ASSERT(sc);
+
+ sc->age_cdata.age_tx_prod = 0;
+ sc->age_cdata.age_tx_cons = 0;
+ sc->age_cdata.age_tx_cnt = 0;
+
+ rd = &sc->age_rdata;
+ bzero(rd->age_tx_ring, AGE_TX_RING_SZ);
+ for (i = 0; i < AGE_TX_RING_CNT; i++) {
+ txd = &sc->age_cdata.age_txdesc[i];
+ txd->tx_desc = &rd->age_tx_ring[i];
+ txd->tx_m = NULL;
+ }
+
+ bus_dmamap_sync(sc->age_cdata.age_tx_ring_tag,
+ sc->age_cdata.age_tx_ring_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+}
+
+static int
+age_init_rx_ring(struct age_softc *sc)
+{
+ struct age_ring_data *rd;
+ struct age_rxdesc *rxd;
+ int i;
+
+ AGE_LOCK_ASSERT(sc);
+
+ sc->age_cdata.age_rx_cons = AGE_RX_RING_CNT - 1;
+ sc->age_morework = 0;
+ rd = &sc->age_rdata;
+ bzero(rd->age_rx_ring, AGE_RX_RING_SZ);
+ for (i = 0; i < AGE_RX_RING_CNT; i++) {
+ rxd = &sc->age_cdata.age_rxdesc[i];
+ rxd->rx_m = NULL;
+ rxd->rx_desc = &rd->age_rx_ring[i];
+ if (age_newbuf(sc, rxd) != 0)
+ return (ENOBUFS);
+ }
+
+ bus_dmamap_sync(sc->age_cdata.age_rx_ring_tag,
+ sc->age_cdata.age_rx_ring_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+ return (0);
+}
+
+static void
+age_init_rr_ring(struct age_softc *sc)
+{
+ struct age_ring_data *rd;
+
+ AGE_LOCK_ASSERT(sc);
+
+ sc->age_cdata.age_rr_cons = 0;
+ AGE_RXCHAIN_RESET(sc);
+
+ rd = &sc->age_rdata;
+ bzero(rd->age_rr_ring, AGE_RR_RING_SZ);
+ bus_dmamap_sync(sc->age_cdata.age_rr_ring_tag,
+ sc->age_cdata.age_rr_ring_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+}
+
+static void
+age_init_cmb_block(struct age_softc *sc)
+{
+ struct age_ring_data *rd;
+
+ AGE_LOCK_ASSERT(sc);
+
+ rd = &sc->age_rdata;
+ bzero(rd->age_cmb_block, AGE_CMB_BLOCK_SZ);
+ bus_dmamap_sync(sc->age_cdata.age_cmb_block_tag,
+ sc->age_cdata.age_cmb_block_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+}
+
+static void
+age_init_smb_block(struct age_softc *sc)
+{
+ struct age_ring_data *rd;
+
+ AGE_LOCK_ASSERT(sc);
+
+ rd = &sc->age_rdata;
+ bzero(rd->age_smb_block, AGE_SMB_BLOCK_SZ);
+ bus_dmamap_sync(sc->age_cdata.age_smb_block_tag,
+ sc->age_cdata.age_smb_block_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+}
+
+static int
+age_newbuf(struct age_softc *sc, struct age_rxdesc *rxd)
+{
+ struct rx_desc *desc;
+ struct mbuf *m;
+ bus_dma_segment_t segs[1];
+ bus_dmamap_t map;
+ int nsegs;
+
+ AGE_LOCK_ASSERT(sc);
+
+ m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
+ if (m == NULL)
+ return (ENOBUFS);
+ m->m_len = m->m_pkthdr.len = MCLBYTES;
+ m_adj(m, ETHER_ALIGN);
+
+ if (bus_dmamap_load_mbuf_sg(sc->age_cdata.age_rx_tag,
+ sc->age_cdata.age_rx_sparemap, m, segs, &nsegs, 0) != 0) {
+ m_freem(m);
+ return (ENOBUFS);
+ }
+ KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
+
+ if (rxd->rx_m != NULL) {
+ bus_dmamap_sync(sc->age_cdata.age_rx_tag, rxd->rx_dmamap,
+ BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(sc->age_cdata.age_rx_tag, rxd->rx_dmamap);
+ }
+ map = rxd->rx_dmamap;
+ rxd->rx_dmamap = sc->age_cdata.age_rx_sparemap;
+ sc->age_cdata.age_rx_sparemap = map;
+ bus_dmamap_sync(sc->age_cdata.age_rx_tag, rxd->rx_dmamap,
+ BUS_DMASYNC_PREREAD);
+ rxd->rx_m = m;
+
+ desc = rxd->rx_desc;
+ desc->addr = htole64(segs[0].ds_addr);
+ desc->len = htole32((segs[0].ds_len & AGE_RD_LEN_MASK) <<
+ AGE_RD_LEN_SHIFT);
+ return (0);
+}
+
+static void
+age_rxvlan(struct age_softc *sc)
+{
+ struct ifnet *ifp;
+ uint32_t reg;
+
+ AGE_LOCK_ASSERT(sc);
+
+ ifp = sc->age_ifp;
+ reg = CSR_READ_4(sc, AGE_MAC_CFG);
+ reg &= ~MAC_CFG_VLAN_TAG_STRIP;
+ if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
+ reg |= MAC_CFG_VLAN_TAG_STRIP;
+ CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
+}
+
+static void
+age_rxfilter(struct age_softc *sc)
+{
+ struct ifnet *ifp;
+ struct ifmultiaddr *ifma;
+ uint32_t crc;
+ uint32_t mchash[2];
+ uint32_t rxcfg;
+
+ AGE_LOCK_ASSERT(sc);
+
+ ifp = sc->age_ifp;
+
+ rxcfg = CSR_READ_4(sc, AGE_MAC_CFG);
+ rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC);
+ if ((ifp->if_flags & IFF_BROADCAST) != 0)
+ rxcfg |= MAC_CFG_BCAST;
+ if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
+ if ((ifp->if_flags & IFF_PROMISC) != 0)
+ rxcfg |= MAC_CFG_PROMISC;
+ if ((ifp->if_flags & IFF_ALLMULTI) != 0)
+ rxcfg |= MAC_CFG_ALLMULTI;
+ CSR_WRITE_4(sc, AGE_MAR0, 0xFFFFFFFF);
+ CSR_WRITE_4(sc, AGE_MAR1, 0xFFFFFFFF);
+ CSR_WRITE_4(sc, AGE_MAC_CFG, rxcfg);
+ return;
+ }
+
+ /* Program new filter. */
+ bzero(mchash, sizeof(mchash));
+
+ IF_ADDR_LOCK(ifp);
+ TAILQ_FOREACH(ifma, &sc->age_ifp->if_multiaddrs, ifma_link) {
+ if (ifma->ifma_addr->sa_family != AF_LINK)
+ continue;
+ crc = ether_crc32_le(LLADDR((struct sockaddr_dl *)
+ ifma->ifma_addr), ETHER_ADDR_LEN);
+ mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
+ }
+ IF_ADDR_UNLOCK(ifp);
+
+ CSR_WRITE_4(sc, AGE_MAR0, mchash[0]);
+ CSR_WRITE_4(sc, AGE_MAR1, mchash[1]);
+ CSR_WRITE_4(sc, AGE_MAC_CFG, rxcfg);
+}
+
+static int
+sysctl_age_stats(SYSCTL_HANDLER_ARGS)
+{
+ struct age_softc *sc;
+ struct age_stats *stats;
+ int error, result;
+
+ result = -1;
+ error = sysctl_handle_int(oidp, &result, 0, req);
+
+ if (error != 0 || req->newptr == NULL)
+ return (error);
+
+ if (result != 1)
+ return (error);
+
+ sc = (struct age_softc *)arg1;
+ stats = &sc->age_stat;
+ printf("%s statistics:\n", device_get_nameunit(sc->age_dev));
+ printf("Transmit good frames : %ju\n",
+ (uintmax_t)stats->tx_frames);
+ printf("Transmit good broadcast frames : %ju\n",
+ (uintmax_t)stats->tx_bcast_frames);
+ printf("Transmit good multicast frames : %ju\n",
+ (uintmax_t)stats->tx_mcast_frames);
+ printf("Transmit pause control frames : %u\n",
+ stats->tx_pause_frames);
+ printf("Transmit control frames : %u\n",
+ stats->tx_control_frames);
+ printf("Transmit frames with excessive deferrals : %u\n",
+ stats->tx_excess_defer);
+ printf("Transmit deferrals : %u\n",
+ stats->tx_deferred);
+ printf("Transmit good octets : %ju\n",
+ (uintmax_t)stats->tx_bytes);
+ printf("Transmit good broadcast octets : %ju\n",
+ (uintmax_t)stats->tx_bcast_bytes);
+ printf("Transmit good multicast octets : %ju\n",
+ (uintmax_t)stats->tx_mcast_bytes);
+ printf("Transmit frames 64 bytes : %ju\n",
+ (uintmax_t)stats->tx_pkts_64);
+ printf("Transmit frames 65 to 127 bytes : %ju\n",
+ (uintmax_t)stats->tx_pkts_65_127);
+ printf("Transmit frames 128 to 255 bytes : %ju\n",
+ (uintmax_t)stats->tx_pkts_128_255);
+ printf("Transmit frames 256 to 511 bytes : %ju\n",
+ (uintmax_t)stats->tx_pkts_256_511);
+ printf("Transmit frames 512 to 1024 bytes : %ju\n",
+ (uintmax_t)stats->tx_pkts_512_1023);
+ printf("Transmit frames 1024 to 1518 bytes : %ju\n",
+ (uintmax_t)stats->tx_pkts_1024_1518);
+ printf("Transmit frames 1519 to MTU bytes : %ju\n",
+ (uintmax_t)stats->tx_pkts_1519_max);
+ printf("Transmit single collisions : %u\n",
+ stats->tx_single_colls);
+ printf("Transmit multiple collisions : %u\n",
+ stats->tx_multi_colls);
+ printf("Transmit late collisions : %u\n",
+ stats->tx_late_colls);
+ printf("Transmit abort due to excessive collisions : %u\n",
+ stats->tx_excess_colls);
+ printf("Transmit underruns due to FIFO underruns : %u\n",
+ stats->tx_underrun);
+ printf("Transmit descriptor write-back errors : %u\n",
+ stats->tx_desc_underrun);
+ printf("Transmit frames with length mismatched frame size : %u\n",
+ stats->tx_lenerrs);
+ printf("Transmit frames with truncated due to MTU size : %u\n",
+ stats->tx_lenerrs);
+
+ printf("Receive good frames : %ju\n",
+ (uintmax_t)stats->rx_frames);
+ printf("Receive good broadcast frames : %ju\n",
+ (uintmax_t)stats->rx_bcast_frames);
+ printf("Receive good multicast frames : %ju\n",
+ (uintmax_t)stats->rx_mcast_frames);
+ printf("Receive pause control frames : %u\n",
+ stats->rx_pause_frames);
+ printf("Receive control frames : %u\n",
+ stats->rx_control_frames);
+ printf("Receive CRC errors : %u\n",
+ stats->rx_crcerrs);
+ printf("Receive frames with length errors : %u\n",
+ stats->rx_lenerrs);
+ printf("Receive good octets : %ju\n",
+ (uintmax_t)stats->rx_bytes);
+ printf("Receive good broadcast octets : %ju\n",
+ (uintmax_t)stats->rx_bcast_bytes);
+ printf("Receive good multicast octets : %ju\n",
+ (uintmax_t)stats->rx_mcast_bytes);
+ printf("Receive frames too short : %u\n",
+ stats->rx_runts);
+ printf("Receive fragmented frames : %ju\n",
+ (uintmax_t)stats->rx_fragments);
+ printf("Receive frames 64 bytes : %ju\n",
+ (uintmax_t)stats->rx_pkts_64);
+ printf("Receive frames 65 to 127 bytes : %ju\n",
+ (uintmax_t)stats->rx_pkts_65_127);
+ printf("Receive frames 128 to 255 bytes : %ju\n",
+ (uintmax_t)stats->rx_pkts_128_255);
+ printf("Receive frames 256 to 511 bytes : %ju\n",
+ (uintmax_t)stats->rx_pkts_256_511);
+ printf("Receive frames 512 to 1024 bytes : %ju\n",
+ (uintmax_t)stats->rx_pkts_512_1023);
+ printf("Receive frames 1024 to 1518 bytes : %ju\n",
+ (uintmax_t)stats->rx_pkts_1024_1518);
+ printf("Receive frames 1519 to MTU bytes : %ju\n",
+ (uintmax_t)stats->rx_pkts_1519_max);
+ printf("Receive frames too long : %ju\n",
+ (uint64_t)stats->rx_pkts_truncated);
+ printf("Receive frames with FIFO overflow : %u\n",
+ stats->rx_fifo_oflows);
+ printf("Receive frames with return descriptor overflow : %u\n",
+ stats->rx_desc_oflows);
+ printf("Receive frames with alignment errors : %u\n",
+ stats->rx_alignerrs);
+ printf("Receive frames dropped due to address filtering : %ju\n",
+ (uint64_t)stats->rx_pkts_filtered);
+
+ return (error);
+}
+
+static int
+sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
+{
+ int error, value;
+
+ if (arg1 == NULL)
+ return (EINVAL);
+ value = *(int *)arg1;
+ error = sysctl_handle_int(oidp, &value, 0, req);
+ if (error || req->newptr == NULL)
+ return (error);
+ if (value < low || value > high)
+ return (EINVAL);
+ *(int *)arg1 = value;
+
+ return (0);
+}
+
+static int
+sysctl_hw_age_proc_limit(SYSCTL_HANDLER_ARGS)
+{
+ return (sysctl_int_range(oidp, arg1, arg2, req,
+ AGE_PROC_MIN, AGE_PROC_MAX));
+}
+
+static int
+sysctl_hw_age_int_mod(SYSCTL_HANDLER_ARGS)
+{
+
+ return (sysctl_int_range(oidp, arg1, arg2, req, AGE_IM_TIMER_MIN,
+ AGE_IM_TIMER_MAX));
+}
diff --git a/sys/dev/age/if_agereg.h b/sys/dev/age/if_agereg.h
new file mode 100644
index 0000000..091eff1
--- /dev/null
+++ b/sys/dev/age/if_agereg.h
@@ -0,0 +1,656 @@
+/*-
+ * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _IF_AGEREG_H
+#define _IF_AGEREG_H
+
+/*
+ * Attansic Technology Corp. PCI vendor ID
+ */
+#define VENDORID_ATTANSIC 0x1969
+
+/*
+ * Attansic L1 device ID
+ */
+#define DEVICEID_ATTANSIC_L1 0x1048
+
+#define AGE_VPD_REG_CONF_START 0x0100
+#define AGE_VPD_REG_CONF_END 0x01FF
+#define AGE_VPD_REG_CONF_SIG 0x5A
+
+#define AGE_SPI_CTRL 0x200
+#define SPI_STAT_NOT_READY 0x00000001
+#define SPI_STAT_WR_ENB 0x00000002
+#define SPI_STAT_WRP_ENB 0x00000080
+#define SPI_INST_MASK 0x000000FF
+#define SPI_START 0x00000100
+#define SPI_INST_START 0x00000800
+#define SPI_VPD_ENB 0x00002000
+#define SPI_LOADER_START 0x00008000
+#define SPI_CS_HI_MASK 0x00030000
+#define SPI_CS_HOLD_MASK 0x000C0000
+#define SPI_CLK_LO_MASK 0x00300000
+#define SPI_CLK_HI_MASK 0x00C00000
+#define SPI_CS_SETUP_MASK 0x03000000
+#define SPI_EPROM_PG_MASK 0x0C000000
+#define SPI_INST_SHIFT 8
+#define SPI_CS_HI_SHIFT 16
+#define SPI_CS_HOLD_SHIFT 18
+#define SPI_CLK_LO_SHIFT 20
+#define SPI_CLK_HI_SHIFT 22
+#define SPI_CS_SETUP_SHIFT 24
+#define SPI_EPROM_PG_SHIFT 26
+#define SPI_WAIT_READY 0x10000000
+
+#define AGE_SPI_ADDR 0x204 /* 16bits */
+
+#define AGE_SPI_DATA 0x208
+
+#define AGE_SPI_CONFIG 0x20C
+
+#define AGE_SPI_OP_PROGRAM 0x210 /* 8bits */
+
+#define AGE_SPI_OP_SC_ERASE 0x211 /* 8bits */
+
+#define AGE_SPI_OP_CHIP_ERASE 0x212 /* 8bits */
+
+#define AGE_SPI_OP_RDID 0x213 /* 8bits */
+
+#define AGE_SPI_OP_WREN 0x214 /* 8bits */
+
+#define AGE_SPI_OP_RDSR 0x215 /* 8bits */
+
+#define AGE_SPI_OP_WRSR 0x216 /* 8bits */
+
+#define AGE_SPI_OP_READ 0x217 /* 8bits */
+
+#define AGE_TWSI_CTRL 0x218
+
+#define AGE_DEV_MISC_CTRL 0x21C
+
+#define AGE_MASTER_CFG 0x1400
+#define MASTER_RESET 0x00000001
+#define MASTER_MTIMER_ENB 0x00000002
+#define MASTER_ITIMER_ENB 0x00000004
+#define MASTER_MANUAL_INT_ENB 0x00000008
+#define MASTER_CHIP_REV_MASK 0x00FF0000
+#define MASTER_CHIP_ID_MASK 0xFF000000
+#define MASTER_CHIP_REV_SHIFT 16
+#define MASTER_CHIP_ID_SHIFT 24
+
+/* Number of ticks per usec for L1. */
+#define AGE_TICK_USECS 2
+#define AGE_USECS(x) ((x) / AGE_TICK_USECS)
+
+#define AGE_MANUAL_TIMER 0x1404
+
+#define AGE_IM_TIMER 0x1408 /* 16bits */
+#define AGE_IM_TIMER_MIN 0
+#define AGE_IM_TIMER_MAX 130000 /* 130ms */
+#define AGE_IM_TIMER_DEFAULT 100
+
+#define AGE_GPHY_CTRL 0x140C /* 16bits */
+#define GPHY_CTRL_RST 0x0000
+#define GPHY_CTRL_CLR 0x0001
+
+#define AGE_INTR_CLR_TIMER 0x140E /* 16bits */
+
+#define AGE_IDLE_STATUS 0x1410
+#define IDLE_STATUS_RXMAC 0x00000001
+#define IDLE_STATUS_TXMAC 0x00000002
+#define IDLE_STATUS_RXQ 0x00000004
+#define IDLE_STATUS_TXQ 0x00000008
+#define IDLE_STATUS_DMARD 0x00000010
+#define IDLE_STATUS_DMAWR 0x00000020
+#define IDLE_STATUS_SMB 0x00000040
+#define IDLE_STATUS_CMB 0x00000080
+
+#define AGE_MDIO 0x1414
+#define MDIO_DATA_MASK 0x0000FFFF
+#define MDIO_REG_ADDR_MASK 0x001F0000
+#define MDIO_OP_READ 0x00200000
+#define MDIO_OP_WRITE 0x00000000
+#define MDIO_SUP_PREAMBLE 0x00400000
+#define MDIO_OP_EXECUTE 0x00800000
+#define MDIO_CLK_25_4 0x00000000
+#define MDIO_CLK_25_6 0x02000000
+#define MDIO_CLK_25_8 0x03000000
+#define MDIO_CLK_25_10 0x04000000
+#define MDIO_CLK_25_14 0x05000000
+#define MDIO_CLK_25_20 0x06000000
+#define MDIO_CLK_25_28 0x07000000
+#define MDIO_OP_BUSY 0x08000000
+#define MDIO_DATA_SHIFT 0
+#define MDIO_REG_ADDR_SHIFT 16
+
+#define MDIO_REG_ADDR(x) \
+ (((x) << MDIO_REG_ADDR_SHIFT) & MDIO_REG_ADDR_MASK)
+/* Default PHY address. */
+#define AGE_PHY_ADDR 0
+
+#define AGE_PHY_STATUS 0x1418
+
+#define AGE_BIST0 0x141C
+#define BIST0_ENB 0x00000001
+#define BIST0_SRAM_FAIL 0x00000002
+#define BIST0_FUSE_FLAG 0x00000004
+
+#define AGE_BIST1 0x1420
+#define BIST1_ENB 0x00000001
+#define BIST1_SRAM_FAIL 0x00000002
+#define BIST1_FUSE_FLAG 0x00000004
+
+#define AGE_MAC_CFG 0x1480
+#define MAC_CFG_TX_ENB 0x00000001
+#define MAC_CFG_RX_ENB 0x00000002
+#define MAC_CFG_TX_FC 0x00000004
+#define MAC_CFG_RX_FC 0x00000008
+#define MAC_CFG_LOOP 0x00000010
+#define MAC_CFG_FULL_DUPLEX 0x00000020
+#define MAC_CFG_TX_CRC_ENB 0x00000040
+#define MAC_CFG_TX_AUTO_PAD 0x00000080
+#define MAC_CFG_TX_LENCHK 0x00000100
+#define MAC_CFG_RX_JUMBO_ENB 0x00000200
+#define MAC_CFG_PREAMBLE_MASK 0x00003C00
+#define MAC_CFG_VLAN_TAG_STRIP 0x00004000
+#define MAC_CFG_PROMISC 0x00008000
+#define MAC_CFG_TX_PAUSE 0x00010000
+#define MAC_CFG_SCNT 0x00020000
+#define MAC_CFG_SYNC_RST_TX 0x00040000
+#define MAC_CFG_SPEED_MASK 0x00300000
+#define MAC_CFG_SPEED_10_100 0x00100000
+#define MAC_CFG_SPEED_1000 0x00200000
+#define MAC_CFG_DBG_TX_BACKOFF 0x00400000
+#define MAC_CFG_TX_JUMBO_ENB 0x00800000
+#define MAC_CFG_RXCSUM_ENB 0x01000000
+#define MAC_CFG_ALLMULTI 0x02000000
+#define MAC_CFG_BCAST 0x04000000
+#define MAC_CFG_DBG 0x08000000
+#define MAC_CFG_PREAMBLE_SHIFT 10
+#define MAC_CFG_PREAMBLE_DEFAULT 7
+
+#define AGE_IPG_IFG_CFG 0x1484
+#define IPG_IFG_IPGT_MASK 0x0000007F
+#define IPG_IFG_MIFG_MASK 0x0000FF00
+#define IPG_IFG_IPG1_MASK 0x007F0000
+#define IPG_IFG_IPG2_MASK 0x7F000000
+#define IPG_IFG_IPGT_SHIFT 0
+#define IPG_IFG_IPGT_DEFAULT 0x60
+#define IPG_IFG_MIFG_SHIFT 8
+#define IPG_IFG_MIFG_DEFAULT 0x50
+#define IPG_IFG_IPG1_SHIFT 16
+#define IPG_IFG_IPG1_DEFAULT 0x40
+#define IPG_IFG_IPG2_SHIFT 24
+#define IPG_IFG_IPG2_DEFAULT 0x60
+
+/* station address */
+#define AGE_PAR0 0x1488
+#define AGE_PAR1 0x148C
+
+/* 64bit multicast hash register. */
+#define AGE_MAR0 0x1490
+#define AGE_MAR1 0x1494
+
+/* half-duplex parameter configuration. */
+#define AGE_HDPX_CFG 0x1498
+#define HDPX_CFG_LCOL_MASK 0x000003FF
+#define HDPX_CFG_RETRY_MASK 0x0000F000
+#define HDPX_CFG_EXC_DEF_EN 0x00010000
+#define HDPX_CFG_NO_BACK_C 0x00020000
+#define HDPX_CFG_NO_BACK_P 0x00040000
+#define HDPX_CFG_ABEBE 0x00080000
+#define HDPX_CFG_ABEBT_MASK 0x00F00000
+#define HDPX_CFG_JAMIPG_MASK 0x0F000000
+#define HDPX_CFG_LCOL_SHIFT 0
+#define HDPX_CFG_LCOL_DEFAULT 0x37
+#define HDPX_CFG_RETRY_SHIFT 12
+#define HDPX_CFG_RETRY_DEFAULT 0x0F
+#define HDPX_CFG_ABEBT_SHIFT 20
+#define HDPX_CFG_ABEBT_DEFAULT 0x0A
+#define HDPX_CFG_JAMIPG_SHIFT 24
+#define HDPX_CFG_JAMIPG_DEFAULT 0x07
+
+#define AGE_FRAME_SIZE 0x149C
+
+#define AGE_WOL_CFG 0x14A0
+#define WOL_CFG_PATTERN 0x00000001
+#define WOL_CFG_PATTERN_ENB 0x00000002
+#define WOL_CFG_MAGIC 0x00000004
+#define WOL_CFG_MAGIC_ENB 0x00000008
+#define WOL_CFG_LINK_CHG 0x00000010
+#define WOL_CFG_LINK_CHG_ENB 0x00000020
+#define WOL_CFG_PATTERN_DET 0x00000100
+#define WOL_CFG_MAGIC_DET 0x00000200
+#define WOL_CFG_LINK_CHG_DET 0x00000400
+#define WOL_CFG_CLK_SWITCH_ENB 0x00008000
+#define WOL_CFG_PATTERN0 0x00010000
+#define WOL_CFG_PATTERN1 0x00020000
+#define WOL_CFG_PATTERN2 0x00040000
+#define WOL_CFG_PATTERN3 0x00080000
+#define WOL_CFG_PATTERN4 0x00100000
+#define WOL_CFG_PATTERN5 0x00200000
+#define WOL_CFG_PATTERN6 0x00400000
+
+/* WOL pattern length. */
+#define AGE_PATTERN_CFG0 0x14A4
+#define PATTERN_CFG_0_LEN_MASK 0x0000007F
+#define PATTERN_CFG_1_LEN_MASK 0x00007F00
+#define PATTERN_CFG_2_LEN_MASK 0x007F0000
+#define PATTERN_CFG_3_LEN_MASK 0x7F000000
+
+#define AGE_PATTERN_CFG1 0x14A8
+#define PATTERN_CFG_4_LEN_MASK 0x0000007F
+#define PATTERN_CFG_5_LEN_MASK 0x00007F00
+#define PATTERN_CFG_6_LEN_MASK 0x007F0000
+
+#define AGE_SRAM_RD_ADDR 0x1500
+
+#define AGE_SRAM_RD_LEN 0x1504
+
+#define AGE_SRAM_RRD_ADDR 0x1508
+
+#define AGE_SRAM_RRD_LEN 0x150C
+
+#define AGE_SRAM_TPD_ADDR 0x1510
+
+#define AGE_SRAM_TPD_LEN 0x1514
+
+#define AGE_SRAM_TRD_ADDR 0x1518
+
+#define AGE_SRAM_TRD_LEN 0x151C
+
+#define AGE_SRAM_RX_FIFO_ADDR 0x1520
+
+#define AGE_SRAM_RX_FIFO_LEN 0x1524
+
+#define AGE_SRAM_TX_FIFO_ADDR 0x1528
+
+#define AGE_SRAM_TX_FIFO_LEN 0x152C
+
+#define AGE_SRAM_TCPH_ADDR 0x1530
+#define SRAM_TCPH_ADDR_MASK 0x00000FFF
+#define SRAM_PATH_ADDR_MASK 0x0FFF0000
+#define SRAM_TCPH_ADDR_SHIFT 0
+#define SRAM_PATH_ADDR_SHIFT 16
+
+#define AGE_DMA_BLOCK 0x1534
+#define DMA_BLOCK_LOAD 0x00000001
+
+/*
+ * All descriptors and CMB/SMB share the same high address.
+ */
+#define AGE_DESC_ADDR_HI 0x1540
+
+#define AGE_DESC_RD_ADDR_LO 0x1544
+
+#define AGE_DESC_RRD_ADDR_LO 0x1548
+
+#define AGE_DESC_TPD_ADDR_LO 0x154C
+
+#define AGE_DESC_CMB_ADDR_LO 0x1550
+
+#define AGE_DESC_SMB_ADDR_LO 0x1554
+
+#define AGE_DESC_RRD_RD_CNT 0x1558
+#define DESC_RD_CNT_MASK 0x000007FF
+#define DESC_RRD_CNT_MASK 0x07FF0000
+#define DESC_RD_CNT_SHIFT 0
+#define DESC_RRD_CNT_SHIFT 16
+
+#define AGE_DESC_TPD_CNT 0x155C
+#define DESC_TPD_CNT_MASK 0x00003FF
+#define DESC_TPD_CNT_SHIFT 0
+
+#define AGE_TXQ_CFG 0x1580
+#define TXQ_CFG_TPD_BURST_MASK 0x0000001F
+#define TXQ_CFG_ENB 0x00000020
+#define TXQ_CFG_ENHANCED_MODE 0x00000040
+#define TXQ_CFG_TPD_FETCH_THRESH_MASK 0x00003F00
+#define TXQ_CFG_TX_FIFO_BURST_MASK 0xFFFF0000
+#define TXQ_CFG_TPD_BURST_SHIFT 0
+#define TXQ_CFG_TPD_BURST_DEFAULT 4
+#define TXQ_CFG_TPD_FETCH_THRESH_SHIFT 8
+#define TXQ_CFG_TPD_FETCH_DEFAULT 16
+#define TXQ_CFG_TX_FIFO_BURST_SHIFT 16
+#define TXQ_CFG_TX_FIFO_BURST_DEFAULT 256
+
+#define AGE_TX_JUMBO_TPD_TH_IPG 0x1584
+#define TX_JUMBO_TPD_TH_MASK 0x000007FF
+#define TX_JUMBO_TPD_IPG_MASK 0x001F0000
+#define TX_JUMBO_TPD_TH_SHIFT 0
+#define TX_JUMBO_TPD_IPG_SHIFT 16
+#define TX_JUMBO_TPD_IPG_DEFAULT 1
+
+#define AGE_RXQ_CFG 0x15A0
+#define RXQ_CFG_RD_BURST_MASK 0x000000FF
+#define RXQ_CFG_RRD_BURST_THRESH_MASK 0x0000FF00
+#define RXQ_CFG_RD_PREF_MIN_IPG_MASK 0x001F0000
+#define RXQ_CFG_CUT_THROUGH_ENB 0x40000000
+#define RXQ_CFG_ENB 0x80000000
+#define RXQ_CFG_RD_BURST_SHIFT 0
+#define RXQ_CFG_RD_BURST_DEFAULT 8
+#define RXQ_CFG_RRD_BURST_THRESH_SHIFT 8
+#define RXQ_CFG_RRD_BURST_THRESH_DEFAULT 8
+#define RXQ_CFG_RD_PREF_MIN_IPG_SHIFT 16
+#define RXQ_CFG_RD_PREF_MIN_IPG_DEFAULT 1
+
+#define AGE_RXQ_JUMBO_CFG 0x15A4
+#define RXQ_JUMBO_CFG_SZ_THRESH_MASK 0x000007FF
+#define RXQ_JUMBO_CFG_LKAH_MASK 0x00007800
+#define RXQ_JUMBO_CFG_RRD_TIMER_MASK 0xFFFF0000
+#define RXQ_JUMBO_CFG_SZ_THRESH_SHIFT 0
+#define RXQ_JUMBO_CFG_LKAH_SHIFT 11
+#define RXQ_JUMBO_CFG_LKAH_DEFAULT 0x01
+#define RXQ_JUMBO_CFG_RRD_TIMER_SHIFT 16
+
+#define AGE_RXQ_FIFO_PAUSE_THRESH 0x15A8
+#define RXQ_FIFO_PAUSE_THRESH_LO_MASK 0x00000FFF
+#define RXQ_FIFO_PAUSE_THRESH_HI_MASK 0x0FFF000
+#define RXQ_FIFO_PAUSE_THRESH_LO_SHIFT 0
+#define RXQ_FIFO_PAUSE_THRESH_HI_SHIFT 16
+
+#define AGE_RXQ_RRD_PAUSE_THRESH 0x15AC
+#define RXQ_RRD_PAUSE_THRESH_HI_MASK 0x00000FFF
+#define RXQ_RRD_PAUSE_THRESH_LO_MASK 0x0FFF0000
+#define RXQ_RRD_PAUSE_THRESH_HI_SHIFT 0
+#define RXQ_RRD_PAUSE_THRESH_LO_SHIFT 16
+
+#define AGE_DMA_CFG 0x15C0
+#define DMA_CFG_IN_ORDER 0x00000001
+#define DMA_CFG_ENH_ORDER 0x00000002
+#define DMA_CFG_OUT_ORDER 0x00000004
+#define DMA_CFG_RCB_64 0x00000000
+#define DMA_CFG_RCB_128 0x00000008
+#define DMA_CFG_RD_BURST_128 0x00000000
+#define DMA_CFG_RD_BURST_256 0x00000010
+#define DMA_CFG_RD_BURST_512 0x00000020
+#define DMA_CFG_RD_BURST_1024 0x00000030
+#define DMA_CFG_RD_BURST_2048 0x00000040
+#define DMA_CFG_RD_BURST_4096 0x00000050
+#define DMA_CFG_WR_BURST_128 0x00000000
+#define DMA_CFG_WR_BURST_256 0x00000080
+#define DMA_CFG_WR_BURST_512 0x00000100
+#define DMA_CFG_WR_BURST_1024 0x00000180
+#define DMA_CFG_WR_BURST_2048 0x00000200
+#define DMA_CFG_WR_BURST_4096 0x00000280
+#define DMA_CFG_RD_ENB 0x00000400
+#define DMA_CFG_WR_ENB 0x00000800
+#define DMA_CFG_RD_BURST_MASK 0x07
+#define DMA_CFG_RD_BURST_SHIFT 4
+#define DMA_CFG_WR_BURST_MASK 0x07
+#define DMA_CFG_WR_BURST_SHIFT 7
+
+#define AGE_CSMB_CTRL 0x15D0
+#define CSMB_CTRL_CMB_KICK 0x00000001
+#define CSMB_CTRL_SMB_KICK 0x00000002
+#define CSMB_CTRL_CMB_ENB 0x00000004
+#define CSMB_CTRL_SMB_ENB 0x00000008
+
+/* CMB DMA Write Threshold Register */
+#define AGE_CMB_WR_THRESH 0x15D4
+#define CMB_WR_THRESH_RRD_MASK 0x000007FF
+#define CMB_WR_THRESH_TPD_MASK 0x07FF0000
+#define CMB_WR_THRESH_RRD_SHIFT 0
+#define CMB_WR_THRESH_RRD_DEFAULT 4
+#define CMB_WR_THRESH_TPD_SHIFT 16
+#define CMB_WR_THRESH_TPD_DEFAULT 4
+
+/* RX/TX count-down timer to trigger CMB-write. */
+#define AGE_CMB_WR_TIMER 0x15D8
+#define CMB_WR_TIMER_RX_MASK 0x0000FFFF
+#define CMB_WR_TIMER_TX_MASK 0xFFFF0000
+#define CMB_WR_TIMER_RX_SHIFT 0
+#define CMB_WR_TIMER_TX_SHIFT 16
+
+/* Number of packet received since last CMB write */
+#define AGE_CMB_RX_PKT_CNT 0x15DC
+
+/* Number of packet transmitted since last CMB write */
+#define AGE_CMB_TX_PKT_CNT 0x15E0
+
+/* SMB auto DMA timer register */
+#define AGE_SMB_TIMER 0x15E4
+
+#define AGE_MBOX 0x15F0
+#define MBOX_RD_PROD_IDX_MASK 0x000007FF
+#define MBOX_RRD_CONS_IDX_MASK 0x003FF800
+#define MBOX_TD_PROD_IDX_MASK 0xFFC00000
+#define MBOX_RD_PROD_IDX_SHIFT 0
+#define MBOX_RRD_CONS_IDX_SHIFT 11
+#define MBOX_TD_PROD_IDX_SHIFT 22
+
+#define AGE_INTR_STATUS 0x1600
+#define INTR_SMB 0x00000001
+#define INTR_MOD_TIMER 0x00000002
+#define INTR_MANUAL_TIMER 0x00000004
+#define INTR_RX_FIFO_OFLOW 0x00000008
+#define INTR_RD_UNDERRUN 0x00000010
+#define INTR_RRD_OFLOW 0x00000020
+#define INTR_TX_FIFO_UNDERRUN 0x00000040
+#define INTR_LINK_CHG 0x00000080
+#define INTR_HOST_RD_UNDERRUN 0x00000100
+#define INTR_HOST_RRD_OFLOW 0x00000200
+#define INTR_DMA_RD_TO_RST 0x00000400
+#define INTR_DMA_WR_TO_RST 0x00000800
+#define INTR_GPHY 0x00001000
+#define INTR_RX_PKT 0x00010000
+#define INTR_TX_PKT 0x00020000
+#define INTR_TX_DMA 0x00040000
+#define INTR_RX_DMA 0x00080000
+#define INTR_CMB_RX 0x00100000
+#define INTR_CMB_TX 0x00200000
+#define INTR_MAC_RX 0x00400000
+#define INTR_MAC_TX 0x00800000
+#define INTR_UNDERRUN 0x01000000
+#define INTR_FRAME_ERROR 0x02000000
+#define INTR_FRAME_OK 0x04000000
+#define INTR_CSUM_ERROR 0x08000000
+#define INTR_PHY_LINK_DOWN 0x10000000
+#define INTR_DIS_SMB 0x20000000
+#define INTR_DIS_DMA 0x40000000
+#define INTR_DIS_INT 0x80000000
+
+/* Interrupt Mask Register */
+#define AGE_INTR_MASK 0x1604
+
+#define AGE_INTRS \
+ (INTR_SMB | INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST | \
+ INTR_CMB_TX | INTR_CMB_RX)
+
+/* Statistics counters collected by the MAC. */
+struct smb {
+ /* Rx stats. */
+ uint32_t rx_frames;
+ uint32_t rx_bcast_frames;
+ uint32_t rx_mcast_frames;
+ uint32_t rx_pause_frames;
+ uint32_t rx_control_frames;
+ uint32_t rx_crcerrs;
+ uint32_t rx_lenerrs;
+ uint32_t rx_bytes;
+ uint32_t rx_runts;
+ uint32_t rx_fragments;
+ uint32_t rx_pkts_64;
+ uint32_t rx_pkts_65_127;
+ uint32_t rx_pkts_128_255;
+ uint32_t rx_pkts_256_511;
+ uint32_t rx_pkts_512_1023;
+ uint32_t rx_pkts_1024_1518;
+ uint32_t rx_pkts_1519_max;
+ uint32_t rx_pkts_truncated;
+ uint32_t rx_fifo_oflows;
+ uint32_t rx_desc_oflows;
+ uint32_t rx_alignerrs;
+ uint32_t rx_bcast_bytes;
+ uint32_t rx_mcast_bytes;
+ uint32_t rx_pkts_filtered;
+ /* Tx stats. */
+ uint32_t tx_frames;
+ uint32_t tx_bcast_frames;
+ uint32_t tx_mcast_frames;
+ uint32_t tx_pause_frames;
+ uint32_t tx_excess_defer;
+ uint32_t tx_control_frames;
+ uint32_t tx_deferred;
+ uint32_t tx_bytes;
+ uint32_t tx_pkts_64;
+ uint32_t tx_pkts_65_127;
+ uint32_t tx_pkts_128_255;
+ uint32_t tx_pkts_256_511;
+ uint32_t tx_pkts_512_1023;
+ uint32_t tx_pkts_1024_1518;
+ uint32_t tx_pkts_1519_max;
+ uint32_t tx_single_colls;
+ uint32_t tx_multi_colls;
+ uint32_t tx_late_colls;
+ uint32_t tx_excess_colls;
+ uint32_t tx_underrun;
+ uint32_t tx_desc_underrun;
+ uint32_t tx_lenerrs;
+ uint32_t tx_pkts_truncated;
+ uint32_t tx_bcast_bytes;
+ uint32_t tx_mcast_bytes;
+ uint32_t updated;
+} __packed;
+
+/* Coalescing message block */
+struct cmb {
+ uint32_t intr_status;
+ uint32_t rprod_cons;
+#define RRD_PROD_MASK 0x0000FFFF
+#define RD_CONS_MASK 0xFFFF0000
+#define RRD_PROD_SHIFT 0
+#define RD_CONS_SHIFT 16
+ uint32_t tpd_cons;
+#define CMB_UPDATED 0x00000001
+#define TPD_CONS_MASK 0xFFFF0000
+#define TPD_CONS_SHIFT 16
+} __packed;
+
+/* Rx return descriptor */
+struct rx_rdesc {
+ uint32_t index;
+#define AGE_RRD_NSEGS_MASK 0x000000FF
+#define AGE_RRD_CONS_MASK 0xFFFF0000
+#define AGE_RRD_NSEGS_SHIFT 0
+#define AGE_RRD_CONS_SHIFT 16
+ uint32_t len;
+#define AGE_RRD_CSUM_MASK 0x0000FFFF
+#define AGE_RRD_LEN_MASK 0xFFFF0000
+#define AGE_RRD_CSUM_SHIFT 0
+#define AGE_RRD_LEN_SHIFT 16
+ uint32_t flags;
+#define AGE_RRD_ETHERNET 0x00000080
+#define AGE_RRD_VLAN 0x00000100
+#define AGE_RRD_ERROR 0x00000200
+#define AGE_RRD_IPV4 0x00000400
+#define AGE_RRD_UDP 0x00000800
+#define AGE_RRD_TCP 0x00001000
+#define AGE_RRD_BCAST 0x00002000
+#define AGE_RRD_MCAST 0x00004000
+#define AGE_RRD_PAUSE 0x00008000
+#define AGE_RRD_CRC 0x00010000
+#define AGE_RRD_CODE 0x00020000
+#define AGE_RRD_DRIBBLE 0x00040000
+#define AGE_RRD_RUNT 0x00080000
+#define AGE_RRD_OFLOW 0x00100000
+#define AGE_RRD_TRUNC 0x00200000
+#define AGE_RRD_IPCSUM_NOK 0x00400000
+#define AGE_RRD_TCP_UDPCSUM_NOK 0x00800000
+#define AGE_RRD_LENGTH_NOK 0x01000000
+#define AGE_RRD_DES_ADDR_FILTERED 0x02000000
+ uint32_t vtags;
+#define AGE_RRD_VLAN_MASK 0xFFFF0000
+#define AGE_RRD_VLAN_SHIFT 16
+} __packed;
+
+#define AGE_RX_NSEGS(x) \
+ (((x) & AGE_RRD_NSEGS_MASK) >> AGE_RRD_NSEGS_SHIFT)
+#define AGE_RX_CONS(x) \
+ (((x) & AGE_RRD_CONS_MASK) >> AGE_RRD_CONS_SHIFT)
+#define AGE_RX_CSUM(x) \
+ (((x) & AGE_RRD_CSUM_MASK) >> AGE_RRD_CSUM_SHIFT)
+#define AGE_RX_BYTES(x) \
+ (((x) & AGE_RRD_LEN_MASK) >> AGE_RRD_LEN_SHIFT)
+#define AGE_RX_VLAN(x) \
+ (((x) & AGE_RRD_VLAN_MASK) >> AGE_RRD_VLAN_SHIFT)
+#define AGE_RX_VLAN_TAG(x) \
+ (((x) >> 4) | (((x) & 7) << 13) | (((x) & 8) << 9))
+
+/* Rx descriptor. */
+struct rx_desc {
+ uint64_t addr;
+ uint32_t len;
+#define AGE_RD_LEN_MASK 0x0000FFFF
+#define AGE_CONS_UPD_REQ_MASK 0xFFFF0000
+#define AGE_RD_LEN_SHIFT 0
+#define AGE_CONS_UPD_REQ_SHIFT 16
+} __packed;
+
+/* Tx descriptor. */
+struct tx_desc {
+ uint64_t addr;
+ uint32_t len;
+#define AGE_TD_VLAN_MASK 0xFFFF0000
+#define AGE_TD_PKT_INT 0x00008000
+#define AGE_TD_DMA_INT 0x00004000
+#define AGE_TD_BUFLEN_MASK 0x00003FFF
+#define AGE_TD_VLAN_SHIFT 16
+#define AGE_TX_VLAN_TAG(x) \
+ (((x) << 4) | ((x) >> 13) | (((x) >> 9) & 8))
+#define AGE_TD_BUFLEN_SHIFT 0
+#define AGE_TX_BYTES(x) \
+ (((x) << AGE_TD_BUFLEN_SHIFT) & AGE_TD_BUFLEN_MASK)
+ uint32_t flags;
+#define AGE_TD_TSO_MSS 0xFFF80000
+#define AGE_TD_TSO_HDR 0x00040000
+#define AGE_TD_TSO_TCPHDR_LEN 0x0003C000
+#define AGE_TD_IPHDR_LEN 0x00003C00
+#define AGE_TD_LLC_SNAP 0x00000200
+#define AGE_TD_VLAN_TAGGED 0x00000100
+#define AGE_TD_UDPCSUM 0x00000080
+#define AGE_TD_TCPCSUM 0x00000040
+#define AGE_TD_IPCSUM 0x00000020
+#define AGE_TD_TSO_IPV4 0x00000010
+#define AGE_TD_TSO_IPV6 0x00000012
+#define AGE_TD_CSUM 0x00000008
+#define AGE_TD_INSERT_VLAN_TAG 0x00000004
+#define AGE_TD_COALESCE 0x00000002
+#define AGE_TD_EOP 0x00000001
+
+#define AGE_TD_CSUM_PLOADOFFSET 0x00FF0000
+#define AGE_TD_CSUM_XSUMOFFSET 0xFF000000
+#define AGE_TD_CSUM_XSUMOFFSET_SHIFT 24
+#define AGE_TD_CSUM_PLOADOFFSET_SHIFT 16
+#define AGE_TD_TSO_MSS_SHIFT 19
+#define AGE_TD_TSO_TCPHDR_LEN_SHIFT 14
+#define AGE_TD_IPHDR_LEN_SHIFT 10
+} __packed;
+
+#endif /* _IF_AGEREG_H */
diff --git a/sys/dev/age/if_agevar.h b/sys/dev/age/if_agevar.h
new file mode 100644
index 0000000..fe50eef
--- /dev/null
+++ b/sys/dev/age/if_agevar.h
@@ -0,0 +1,272 @@
+/*-
+ * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _IF_AGEVAR_H
+#define _IF_AGEVAR_H
+
+#define AGE_TX_RING_CNT 256
+#define AGE_RX_RING_CNT 256
+#define AGE_RR_RING_CNT (AGE_TX_RING_CNT + AGE_RX_RING_CNT)
+/* The following ring alignments are just guessing. */
+#define AGE_TX_RING_ALIGN 16
+#define AGE_RX_RING_ALIGN 16
+#define AGE_RR_RING_ALIGN 16
+#define AGE_CMB_ALIGN 16
+#define AGE_SMB_ALIGN 16
+
+#define AGE_TSO_MAXSEGSIZE 4096
+#define AGE_TSO_MAXSIZE (65535 + sizeof(struct ether_vlan_header))
+#define AGE_MAXTXSEGS 32
+
+#define AGE_ADDR_LO(x) ((uint64_t) (x) & 0xFFFFFFFF)
+#define AGE_ADDR_HI(x) ((uint64_t) (x) >> 32)
+
+#define AGE_MSI_MESSAGES 1
+#define AGE_MSIX_MESSAGES 1
+
+/* TODO : Should get real jumbo MTU size. */
+#define AGE_JUMBO_FRAMELEN 10240
+#define AGE_JUMBO_MTU \
+ (AGE_JUMBO_FRAMELEN - ETHER_VLAN_ENCAP_LEN - \
+ ETHER_HDR_LEN - ETHER_CRC_LEN)
+
+#define AGE_DESC_INC(x, y) ((x) = ((x) + 1) % (y))
+
+#define AGE_PROC_MIN 30
+#define AGE_PROC_MAX (AGE_RX_RING_CNT - 1)
+#define AGE_PROC_DEFAULT (AGE_RX_RING_CNT / 2)
+
+struct age_txdesc {
+ struct mbuf *tx_m;
+ bus_dmamap_t tx_dmamap;
+ struct tx_desc *tx_desc;
+};
+
+STAILQ_HEAD(age_txdq, age_txdesc);
+
+struct age_rxdesc {
+ struct mbuf *rx_m;
+ bus_dmamap_t rx_dmamap;
+ struct rx_desc *rx_desc;
+};
+
+struct age_chain_data{
+ bus_dma_tag_t age_parent_tag;
+ bus_dma_tag_t age_buffer_tag;
+ bus_dma_tag_t age_tx_tag;
+ struct age_txdesc age_txdesc[AGE_TX_RING_CNT];
+ bus_dma_tag_t age_rx_tag;
+ struct age_rxdesc age_rxdesc[AGE_RX_RING_CNT];
+ bus_dma_tag_t age_tx_ring_tag;
+ bus_dmamap_t age_tx_ring_map;
+ bus_dma_tag_t age_rx_ring_tag;
+ bus_dmamap_t age_rx_ring_map;
+ bus_dmamap_t age_rx_sparemap;
+ bus_dma_tag_t age_rr_ring_tag;
+ bus_dmamap_t age_rr_ring_map;
+ bus_dma_tag_t age_cmb_block_tag;
+ bus_dmamap_t age_cmb_block_map;
+ bus_dma_tag_t age_smb_block_tag;
+ bus_dmamap_t age_smb_block_map;
+
+ int age_tx_prod;
+ int age_tx_cons;
+ int age_tx_cnt;
+ int age_rx_cons;
+ int age_rr_cons;
+ int age_rxlen;
+
+ struct mbuf *age_rxhead;
+ struct mbuf *age_rxtail;
+ struct mbuf *age_rxprev_tail;
+};
+
+struct age_ring_data {
+ struct tx_desc *age_tx_ring;
+ bus_addr_t age_tx_ring_paddr;
+ struct rx_desc *age_rx_ring;
+ bus_addr_t age_rx_ring_paddr;
+ struct rx_rdesc *age_rr_ring;
+ bus_addr_t age_rr_ring_paddr;
+ struct cmb *age_cmb_block;
+ bus_addr_t age_cmb_block_paddr;
+ struct smb *age_smb_block;
+ bus_addr_t age_smb_block_paddr;
+};
+
+#define AGE_TX_RING_SZ \
+ (sizeof(struct tx_desc) * AGE_TX_RING_CNT)
+#define AGE_RX_RING_SZ \
+ (sizeof(struct rx_desc) * AGE_RX_RING_CNT)
+#define AGE_RR_RING_SZ \
+ (sizeof(struct rx_rdesc) * AGE_RR_RING_CNT)
+#define AGE_CMB_BLOCK_SZ sizeof(struct cmb)
+#define AGE_SMB_BLOCK_SZ sizeof(struct smb)
+
+struct age_stats {
+ /* Rx stats. */
+ uint64_t rx_frames;
+ uint64_t rx_bcast_frames;
+ uint64_t rx_mcast_frames;
+ uint32_t rx_pause_frames;
+ uint32_t rx_control_frames;
+ uint32_t rx_crcerrs;
+ uint32_t rx_lenerrs;
+ uint64_t rx_bytes;
+ uint32_t rx_runts;
+ uint64_t rx_fragments;
+ uint64_t rx_pkts_64;
+ uint64_t rx_pkts_65_127;
+ uint64_t rx_pkts_128_255;
+ uint64_t rx_pkts_256_511;
+ uint64_t rx_pkts_512_1023;
+ uint64_t rx_pkts_1024_1518;
+ uint64_t rx_pkts_1519_max;
+ uint64_t rx_pkts_truncated;
+ uint32_t rx_fifo_oflows;
+ uint32_t rx_desc_oflows;
+ uint32_t rx_alignerrs;
+ uint64_t rx_bcast_bytes;
+ uint64_t rx_mcast_bytes;
+ uint64_t rx_pkts_filtered;
+ /* Tx stats. */
+ uint64_t tx_frames;
+ uint64_t tx_bcast_frames;
+ uint64_t tx_mcast_frames;
+ uint32_t tx_pause_frames;
+ uint32_t tx_excess_defer;
+ uint32_t tx_control_frames;
+ uint32_t tx_deferred;
+ uint64_t tx_bytes;
+ uint64_t tx_pkts_64;
+ uint64_t tx_pkts_65_127;
+ uint64_t tx_pkts_128_255;
+ uint64_t tx_pkts_256_511;
+ uint64_t tx_pkts_512_1023;
+ uint64_t tx_pkts_1024_1518;
+ uint64_t tx_pkts_1519_max;
+ uint32_t tx_single_colls;
+ uint32_t tx_multi_colls;
+ uint32_t tx_late_colls;
+ uint32_t tx_excess_colls;
+ uint32_t tx_underrun;
+ uint32_t tx_desc_underrun;
+ uint32_t tx_lenerrs;
+ uint32_t tx_pkts_truncated;
+ uint64_t tx_bcast_bytes;
+ uint64_t tx_mcast_bytes;
+};
+
+/*
+ * Software state per device.
+ */
+struct age_softc {
+ struct ifnet *age_ifp;
+ device_t age_dev;
+ device_t age_miibus;
+ struct resource *age_res[1];
+ struct resource_spec *age_res_spec;
+ struct resource *age_irq[AGE_MSI_MESSAGES];
+ struct resource_spec *age_irq_spec;
+ void *age_intrhand[AGE_MSI_MESSAGES];
+ int age_rev;
+ int age_chip_rev;
+ int age_phyaddr;
+ uint8_t age_eaddr[ETHER_ADDR_LEN];
+ uint32_t age_dma_rd_burst;
+ uint32_t age_dma_wr_burst;
+ int age_flags;
+#define AGE_FLAG_PCIE 0x0001
+#define AGE_FLAG_PCIX 0x0002
+#define AGE_FLAG_MSI 0x0004
+#define AGE_FLAG_MSIX 0x0008
+#define AGE_FLAG_PMCAP 0x0010
+#define AGE_FLAG_DETACH 0x4000
+#define AGE_FLAG_LINK 0x8000
+
+ struct callout age_tick_ch;
+ struct age_stats age_stat;
+ struct age_chain_data age_cdata;
+ struct age_ring_data age_rdata;
+ int age_if_flags;
+ int age_watchdog_timer;
+ int age_process_limit;
+ int age_int_mod;
+ int age_max_frame_size;
+ int age_morework;
+ int age_rr_prod;
+ int age_tpd_cons;
+
+ struct task age_int_task;
+ struct task age_tx_task;
+ struct task age_link_task;
+ struct taskqueue *age_tq;
+ struct mtx age_mtx;
+};
+
+/* Register access macros. */
+#define CSR_WRITE_4(_sc, reg, val) \
+ bus_write_4((_sc)->age_res[0], (reg), (val))
+#define CSR_WRITE_2(_sc, reg, val) \
+ bus_write_2((_sc)->age_res[0], (reg), (val))
+#define CSR_READ_2(_sc, reg) \
+ bus_read_2((_sc)->age_res[0], (reg))
+#define CSR_READ_4(_sc, reg) \
+ bus_read_4((_sc)->age_res[0], (reg))
+
+#define AGE_LOCK(_sc) mtx_lock(&(_sc)->age_mtx)
+#define AGE_UNLOCK(_sc) mtx_unlock(&(_sc)->age_mtx)
+#define AGE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->age_mtx, MA_OWNED)
+
+
+#define AGE_COMMIT_MBOX(_sc) \
+do { \
+ CSR_WRITE_4(_sc, AGE_MBOX, \
+ (((_sc)->age_cdata.age_rx_cons << MBOX_RD_PROD_IDX_SHIFT) & \
+ MBOX_RD_PROD_IDX_MASK) | \
+ (((_sc)->age_cdata.age_rr_cons << \
+ MBOX_RRD_CONS_IDX_SHIFT) & MBOX_RRD_CONS_IDX_MASK) | \
+ (((_sc)->age_cdata.age_tx_prod << MBOX_TD_PROD_IDX_SHIFT) & \
+ MBOX_TD_PROD_IDX_MASK)); \
+} while (0)
+
+#define AGE_RXCHAIN_RESET(_sc) \
+do { \
+ (_sc)->age_cdata.age_rxhead = NULL; \
+ (_sc)->age_cdata.age_rxtail = NULL; \
+ (_sc)->age_cdata.age_rxprev_tail = NULL; \
+ (_sc)->age_cdata.age_rxlen = 0; \
+} while (0)
+
+#define AGE_TX_TIMEOUT 5
+#define AGE_RESET_TIMEOUT 100
+#define AGE_TIMEOUT 1000
+#define AGE_PHY_TIMEOUT 1000
+
+#endif /* _IF_AGEVAR_H */
OpenPOWER on IntegriCloud