summaryrefslogtreecommitdiffstats
path: root/sys/dev
diff options
context:
space:
mode:
Diffstat (limited to 'sys/dev')
-rw-r--r--sys/dev/ahci/ahci.c8
-rw-r--r--sys/dev/ahci/ahci.h2
-rw-r--r--sys/dev/ahci/ahci_mv_fdt.c156
-rw-r--r--sys/dev/cesa/cesa.c144
-rw-r--r--sys/dev/cesa/cesa.h16
-rw-r--r--sys/dev/drm2/drm_pciids.h1
-rw-r--r--sys/dev/drm2/i915/i915_drv.c1
-rw-r--r--sys/dev/etherswitch/arswitch/arswitch.c90
-rw-r--r--sys/dev/etherswitch/arswitch/arswitch_8327.c30
-rw-r--r--sys/dev/etherswitch/arswitch/arswitch_8327.h5
-rw-r--r--sys/dev/etherswitch/arswitch/arswitchvar.h10
-rw-r--r--sys/dev/etherswitch/e6000sw/e6000sw.c1990
-rw-r--r--sys/dev/etherswitch/e6000sw/e6000swreg.h322
-rw-r--r--sys/dev/etherswitch/etherswitch.c95
-rw-r--r--sys/dev/etherswitch/etherswitch.h51
-rw-r--r--sys/dev/etherswitch/etherswitch_if.m97
-rw-r--r--sys/dev/fdt/fdt_common.c15
-rw-r--r--sys/dev/fdt/fdt_common.h6
-rw-r--r--sys/dev/flash/mx25l.c3
-rw-r--r--sys/dev/gpio/gpio_if.m77
-rw-r--r--sys/dev/gpio/gpioc.c47
-rw-r--r--sys/dev/hyperv/netvsc/if_hn.c9
-rw-r--r--sys/dev/iicbus/is31fl319x.c676
-rw-r--r--sys/dev/iicbus/is31fl319xreg.h63
-rw-r--r--sys/dev/iicbus/ofw_iicbus.c2
-rw-r--r--sys/dev/iicbus/pca9552.c414
-rw-r--r--sys/dev/iicbus/pca9552reg.h42
-rw-r--r--sys/dev/iicbus/twsi/mv_twsi.c14
-rw-r--r--sys/dev/iicbus/twsi/twsi.c12
-rw-r--r--sys/dev/ismt/ismt.c6
-rw-r--r--sys/dev/ixgbe/if_ix.c2
-rw-r--r--sys/dev/mii/e1000phy.c3
-rw-r--r--sys/dev/mii/micphy.c7
-rw-r--r--sys/dev/neta/if_mvneta.c3608
-rw-r--r--sys/dev/neta/if_mvneta_fdt.c225
-rw-r--r--sys/dev/neta/if_mvnetareg.h926
-rw-r--r--sys/dev/neta/if_mvnetavar.h327
-rw-r--r--sys/dev/oce/oce_mbox.c2
-rw-r--r--sys/dev/ofw/ofw_bus_subr.c19
-rw-r--r--sys/dev/ofw/ofw_bus_subr.h1
-rw-r--r--sys/dev/ofw/openfirmio.c10
-rw-r--r--sys/dev/ofw/openpromio.c8
-rw-r--r--sys/dev/sdhci/sdhci_fdt.c81
-rw-r--r--sys/dev/spibus/ofw_spibus.c20
-rw-r--r--sys/dev/virtio/network/if_vtnetvar.h4
-rw-r--r--sys/dev/vmware/vmxnet3/if_vmxvar.h8
46 files changed, 9028 insertions, 627 deletions
diff --git a/sys/dev/ahci/ahci.c b/sys/dev/ahci/ahci.c
index 6ef9c5e..778d034 100644
--- a/sys/dev/ahci/ahci.c
+++ b/sys/dev/ahci/ahci.c
@@ -1609,6 +1609,14 @@ ahci_execute_transaction(struct ahci_slot *slot)
}
/*
+ * Some Marvell controllers require additional time
+ * after soft reset to work properly. Setup delay
+ * to 50ms after soft reset.
+ */
+ if (ch->quirks & AHCI_Q_MRVL_SR_DEL)
+ DELAY(50000);
+
+ /*
* Marvell HBAs with non-RAID firmware do not wait for
* readiness after soft reset, so we have to wait here.
* Marvell RAIDs do not have this problem, but instead
diff --git a/sys/dev/ahci/ahci.h b/sys/dev/ahci/ahci.h
index 07e78a5..8639bcf 100644
--- a/sys/dev/ahci/ahci.h
+++ b/sys/dev/ahci/ahci.h
@@ -598,6 +598,7 @@ enum ahci_err_type {
#define AHCI_Q_FORCE_PI 0x00040000
#define AHCI_Q_RESTORE_CAP 0x00080000
#define AHCI_Q_NOMSIX 0x00100000
+#define AHCI_Q_MRVL_SR_DEL 0x00200000
#define AHCI_Q_NOCCS 0x00400000
#define AHCI_Q_NOAUX 0x00800000
@@ -624,6 +625,7 @@ enum ahci_err_type {
"\023FORCE_PI" \
"\024RESTORE_CAP" \
"\025NOMSIX" \
+ "\026MRVL_SR_DEL" \
"\027NOCCS" \
"\030NOAUX"
diff --git a/sys/dev/ahci/ahci_mv_fdt.c b/sys/dev/ahci/ahci_mv_fdt.c
new file mode 100644
index 0000000..cf5b853
--- /dev/null
+++ b/sys/dev/ahci/ahci_mv_fdt.c
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2017 Semihalf.
+ * Copyright (c) 2017 Stormshield.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/stdint.h>
+#include <sys/stddef.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/bus.h>
+#include <sys/module.h>
+#include <sys/sysctl.h>
+#include <sys/rman.h>
+#include <sys/unistd.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <dev/ahci/ahci.h>
+
+#define AHCI_VENDOR_SPECIFIC_0_ADDR 0xa0
+#define AHCI_VENDOR_SPECIFIC_0_DATA 0xa4
+
+#define AHCI_HC_DEVSTR "Marvell AHCI Controller"
+#define AHCI_HC_VENDOR "Marvell"
+
+static device_attach_t ahci_mv_fdt_attach;
+
+static struct ofw_compat_data compatible_data[] = {
+ {"marvell,armada-380-ahci", true},
+ {NULL, false}
+};
+
+static void
+ahci_mv_regret_config(struct ahci_controller *ctlr)
+{
+
+ /*
+ * Enable the regret bit to allow the SATA unit to regret
+ * a request that didn't receive an acknowledge
+ * and a avoid deadlock
+ */
+ ATA_OUTL(ctlr->r_mem, AHCI_VENDOR_SPECIFIC_0_ADDR, 0x4);
+ ATA_OUTL(ctlr->r_mem, AHCI_VENDOR_SPECIFIC_0_DATA, 0x80);
+}
+
+static int
+ahci_mv_fdt_probe(device_t dev)
+{
+
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ if (!ofw_bus_search_compatible(dev, compatible_data)->ocd_data)
+ return (ENXIO);
+
+ device_set_desc(dev, AHCI_HC_DEVSTR);
+
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+ahci_mv_fdt_attach(device_t dev)
+{
+ struct ahci_controller *ctlr;
+ int rc;
+
+ ctlr = device_get_softc(dev);
+ ctlr->dev = dev;
+ ctlr->r_rid = 0;
+ ctlr->quirks = AHCI_Q_2CH;
+ ctlr->numirqs = 1;
+
+ if (ofw_bus_is_compatible(dev, "marvell,armada-380-ahci"))
+ ctlr->quirks |= AHCI_Q_MRVL_SR_DEL;
+
+ /* Allocate memory for controller */
+ ctlr->r_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
+ &ctlr->r_rid, RF_ACTIVE | RF_SHAREABLE);
+ if (ctlr->r_mem == NULL) {
+ device_printf(dev, "Failed to alloc memory for controller\n");
+ return (ENOMEM);
+ }
+
+ /* Reset controller */
+ rc = ahci_ctlr_reset(dev);
+ if (rc != 0) {
+ device_printf(dev, "Failed to reset controller\n");
+ bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem);
+ return (ENXIO);
+ }
+
+ ahci_mv_regret_config(ctlr);
+
+ rc = ahci_attach(dev);
+ if (rc != 0) {
+ device_printf(dev, "Failed to initialize AHCI, with error %d\n", rc);
+ return (ENXIO);
+ }
+
+ return (0);
+}
+
+static device_method_t ahci_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, ahci_mv_fdt_probe),
+ DEVMETHOD(device_attach, ahci_mv_fdt_attach),
+ DEVMETHOD(device_detach, ahci_detach),
+ DEVMETHOD(bus_alloc_resource, ahci_alloc_resource),
+ DEVMETHOD(bus_release_resource, ahci_release_resource),
+ DEVMETHOD(bus_setup_intr, ahci_setup_intr),
+ DEVMETHOD(bus_teardown_intr, ahci_teardown_intr),
+ DEVMETHOD(bus_print_child, ahci_print_child),
+ DEVMETHOD(bus_child_location_str, ahci_child_location_str),
+ DEVMETHOD(bus_get_dma_tag, ahci_get_dma_tag),
+ DEVMETHOD_END
+};
+
+static devclass_t ahci_devclass;
+static driver_t ahci_driver = {
+ "ahci",
+ ahci_methods,
+ sizeof(struct ahci_controller)
+};
+
+DRIVER_MODULE(ahci, simplebus, ahci_driver, ahci_devclass, NULL, NULL);
+DRIVER_MODULE(ahci, ofwbus, ahci_driver, ahci_devclass, NULL, NULL);
diff --git a/sys/dev/cesa/cesa.c b/sys/dev/cesa/cesa.c
index 5b3b06c..3b74d70 100644
--- a/sys/dev/cesa/cesa.c
+++ b/sys/dev/cesa/cesa.c
@@ -69,7 +69,6 @@ __FBSDID("$FreeBSD$");
#include "cryptodev_if.h"
#include <arm/mv/mvreg.h>
-#include <arm/mv/mvwin.h>
#include <arm/mv/mvvar.h>
#include "cesa.h"
@@ -80,7 +79,6 @@ static void cesa_intr(void *);
static int cesa_newsession(device_t, u_int32_t *, struct cryptoini *);
static int cesa_freesession(device_t, u_int64_t);
static int cesa_process(device_t, struct cryptop *, int);
-static int decode_win_cesa_setup(struct cesa_softc *sc);
static struct resource_spec cesa_res_spec[] = {
{ SYS_RES_MEMORY, 0, RF_ACTIVE },
@@ -953,11 +951,13 @@ cesa_execute(struct cesa_softc *sc)
ctd = STAILQ_FIRST(&cr->cr_tdesc);
CESA_TDMA_WRITE(sc, CESA_TDMA_ND, ctd->ctd_cthd_paddr);
-#if defined (SOC_MV_ARMADA38X)
- CESA_REG_WRITE(sc, CESA_SA_CMD, CESA_SA_CMD_ACTVATE | CESA_SA_CMD_SHA2);
-#else
- CESA_REG_WRITE(sc, CESA_SA_CMD, CESA_SA_CMD_ACTVATE);
-#endif
+
+ if (sc->sc_soc_id == MV_DEV_88F6828 ||
+ sc->sc_soc_id == MV_DEV_88F6820 ||
+ sc->sc_soc_id == MV_DEV_88F6810)
+ CESA_REG_WRITE(sc, CESA_SA_CMD, CESA_SA_CMD_ACTVATE | CESA_SA_CMD_SHA2);
+ else
+ CESA_REG_WRITE(sc, CESA_SA_CMD, CESA_SA_CMD_ACTVATE);
CESA_UNLOCK(sc, requests);
}
@@ -968,6 +968,7 @@ cesa_setup_sram(struct cesa_softc *sc)
phandle_t sram_node;
ihandle_t sram_ihandle;
pcell_t sram_handle, sram_reg[2];
+ void *sram_va;
int rv;
rv = OF_getencprop(ofw_bus_get_node(sc->sc_dev), "sram-handle",
@@ -986,15 +987,17 @@ cesa_setup_sram(struct cesa_softc *sc)
/* Store SRAM size to be able to unmap in detach() */
sc->sc_sram_size = sram_reg[1];
-#if defined(SOC_MV_ARMADA38X)
- void *sram_va;
+ if (sc->sc_soc_id != MV_DEV_88F6828 &&
+ sc->sc_soc_id != MV_DEV_88F6820 &&
+ sc->sc_soc_id != MV_DEV_88F6810)
+ return (0);
/* SRAM memory was not mapped in platform_sram_devmap(), map it now */
sram_va = pmap_mapdev(sc->sc_sram_base_pa, sc->sc_sram_size);
if (sram_va == NULL)
return (ENOMEM);
sc->sc_sram_base_va = (vm_offset_t)sram_va;
-#endif
+
return (0);
}
@@ -1018,7 +1021,7 @@ static int
cesa_attach(device_t dev)
{
struct cesa_softc *sc;
- uint32_t d, r;
+ uint32_t d, r, val;
int error;
int i;
@@ -1027,34 +1030,40 @@ cesa_attach(device_t dev)
sc->sc_error = 0;
sc->sc_dev = dev;
- /* Check if CESA peripheral device has power turned on */
-#if defined(SOC_MV_KIRKWOOD)
- if (soc_power_ctrl_get(CPU_PM_CTRL_CRYPTO) == CPU_PM_CTRL_CRYPTO) {
- device_printf(dev, "not powered on\n");
- return (ENXIO);
- }
-#else
- if (soc_power_ctrl_get(CPU_PM_CTRL_CRYPTO) != CPU_PM_CTRL_CRYPTO) {
- device_printf(dev, "not powered on\n");
- return (ENXIO);
- }
-#endif
soc_id(&d, &r);
switch (d) {
case MV_DEV_88F6281:
case MV_DEV_88F6282:
+ /* Check if CESA peripheral device has power turned on */
+ if (soc_power_ctrl_get(CPU_PM_CTRL_CRYPTO) ==
+ CPU_PM_CTRL_CRYPTO) {
+ device_printf(dev, "not powered on\n");
+ return (ENXIO);
+ }
+ sc->sc_tperr = 0;
+ break;
case MV_DEV_88F6828:
+ case MV_DEV_88F6820:
+ case MV_DEV_88F6810:
sc->sc_tperr = 0;
break;
case MV_DEV_MV78100:
case MV_DEV_MV78100_Z0:
+ /* Check if CESA peripheral device has power turned on */
+ if (soc_power_ctrl_get(CPU_PM_CTRL_CRYPTO) !=
+ CPU_PM_CTRL_CRYPTO) {
+ device_printf(dev, "not powered on\n");
+ return (ENXIO);
+ }
sc->sc_tperr = CESA_ICR_TPERR;
break;
default:
return (ENXIO);
}
+ sc->sc_soc_id = d;
+
/* Initialize mutexes */
mtx_init(&sc->sc_sc_lock, device_get_nameunit(dev),
"CESA Shared Data", MTX_DEF);
@@ -1074,13 +1083,6 @@ cesa_attach(device_t dev)
goto err0;
}
- /* Setup CESA decoding windows */
- error = decode_win_cesa_setup(sc);
- if (error) {
- device_printf(dev, "could not setup decoding windows\n");
- goto err1;
- }
-
/* Acquire SRAM base address */
error = cesa_setup_sram(sc);
if (error) {
@@ -1189,12 +1191,15 @@ cesa_attach(device_t dev)
* - Outstanding reads enabled,
* - No byte-swap.
*/
- CESA_TDMA_WRITE(sc, CESA_TDMA_CR, CESA_TDMA_CR_DBL128 |
- CESA_TDMA_CR_SBL128 | CESA_TDMA_CR_ORDEN | CESA_TDMA_CR_NBS |
-#if defined (SOC_MV_ARMADA38X)
- CESA_TDMA_NUM_OUTSTAND |
-#endif
- CESA_TDMA_CR_ENABLE);
+ val = CESA_TDMA_CR_DBL128 | CESA_TDMA_CR_SBL128 |
+ CESA_TDMA_CR_ORDEN | CESA_TDMA_CR_NBS | CESA_TDMA_CR_ENABLE;
+
+ if (sc->sc_soc_id == MV_DEV_88F6828 ||
+ sc->sc_soc_id == MV_DEV_88F6820 ||
+ sc->sc_soc_id == MV_DEV_88F6810)
+ val |= CESA_TDMA_NUM_OUTSTAND;
+
+ CESA_TDMA_WRITE(sc, CESA_TDMA_CR, val);
/*
* Initialize SA:
@@ -1228,7 +1233,10 @@ cesa_attach(device_t dev)
crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0);
crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
- crypto_register(sc->sc_cid, CRYPTO_SHA2_256_HMAC, 0, 0);
+ if (sc->sc_soc_id == MV_DEV_88F6828 ||
+ sc->sc_soc_id == MV_DEV_88F6820 ||
+ sc->sc_soc_id == MV_DEV_88F6810)
+ crypto_register(sc->sc_cid, CRYPTO_SHA2_256_HMAC, 0, 0);
return (0);
err8:
@@ -1246,9 +1254,10 @@ err4:
err3:
bus_teardown_intr(dev, sc->sc_res[RES_CESA_IRQ], sc->sc_icookie);
err2:
-#if defined(SOC_MV_ARMADA38X)
- pmap_unmapdev(sc->sc_sram_base_va, sc->sc_sram_size);
-#endif
+ if (sc->sc_soc_id == MV_DEV_88F6828 ||
+ sc->sc_soc_id == MV_DEV_88F6820 ||
+ sc->sc_soc_id == MV_DEV_88F6810)
+ pmap_unmapdev(sc->sc_sram_base_va, sc->sc_sram_size);
err1:
bus_release_resources(dev, cesa_res_spec, sc->sc_res);
err0:
@@ -1296,10 +1305,12 @@ cesa_detach(device_t dev)
/* Relase I/O and IRQ resources */
bus_release_resources(dev, cesa_res_spec, sc->sc_res);
-#if defined(SOC_MV_ARMADA38X)
/* Unmap SRAM memory */
- pmap_unmapdev(sc->sc_sram_base_va, sc->sc_sram_size);
-#endif
+ if (sc->sc_soc_id == MV_DEV_88F6828 ||
+ sc->sc_soc_id == MV_DEV_88F6820 ||
+ sc->sc_soc_id == MV_DEV_88F6810)
+ pmap_unmapdev(sc->sc_sram_base_va, sc->sc_sram_size);
+
/* Destroy mutexes */
mtx_destroy(&sc->sc_sessions_lock);
mtx_destroy(&sc->sc_requests_lock);
@@ -1686,50 +1697,3 @@ cesa_process(device_t dev, struct cryptop *crp, int hint)
return (0);
}
-
-/*
- * Set CESA TDMA decode windows.
- */
-static int
-decode_win_cesa_setup(struct cesa_softc *sc)
-{
- struct mem_region availmem_regions[FDT_MEM_REGIONS];
- int availmem_regions_sz;
- uint32_t br, cr, i;
-
- /* Grab physical memory regions information from DTS */
- if (fdt_get_mem_regions(availmem_regions, &availmem_regions_sz,
- NULL) != 0)
- return (ENXIO);
-
- if (availmem_regions_sz > MV_WIN_CESA_MAX) {
- device_printf(sc->sc_dev, "Too much memory regions, cannot "
- " set CESA windows to cover whole DRAM \n");
- return (ENXIO);
- }
-
- /* Disable and clear all CESA windows */
- for (i = 0; i < MV_WIN_CESA_MAX; i++) {
- CESA_TDMA_WRITE(sc, MV_WIN_CESA_BASE(i), 0);
- CESA_TDMA_WRITE(sc, MV_WIN_CESA_CTRL(i), 0);
- }
-
- /* Fill CESA TDMA decoding windows with information acquired from DTS */
- for (i = 0; i < availmem_regions_sz; i++) {
- br = availmem_regions[i].mr_start;
- cr = availmem_regions[i].mr_size;
-
- /* Don't add entries with size lower than 64KB */
- if (cr & 0xffff0000) {
- cr = (((cr - 1) & 0xffff0000) |
- (MV_WIN_DDR_ATTR(i) << MV_WIN_CPU_ATTR_SHIFT) |
- (MV_WIN_DDR_TARGET << MV_WIN_CPU_TARGET_SHIFT) |
- MV_WIN_CPU_ENABLE_BIT);
- CESA_TDMA_WRITE(sc, MV_WIN_CESA_BASE(i), br);
- CESA_TDMA_WRITE(sc, MV_WIN_CESA_CTRL(i), cr);
- }
- }
-
- return (0);
-}
-
diff --git a/sys/dev/cesa/cesa.h b/sys/dev/cesa/cesa.h
index e8f6372..eb1342a 100644
--- a/sys/dev/cesa/cesa.h
+++ b/sys/dev/cesa/cesa.h
@@ -61,8 +61,8 @@
*/
/* Values below are optimized for requests containing about 1.5 kB of data */
-#define CESA_SA_DESC_PER_REQ 2
-#define CESA_TDMA_DESC_PER_REQ 8
+#define CESA_SA_DESC_PER_REQ 8
+#define CESA_TDMA_DESC_PER_REQ 32
#define CESA_SA_DESCRIPTORS (CESA_SA_DESC_PER_REQ * CESA_REQUESTS)
#define CESA_TDMA_DESCRIPTORS (CESA_TDMA_DESC_PER_REQ * CESA_REQUESTS)
@@ -231,6 +231,7 @@ struct cesa_packet {
struct cesa_softc {
device_t sc_dev;
int32_t sc_cid;
+ uint32_t sc_soc_id;
struct resource *sc_res[RES_CESA_NUM];
void *sc_icookie;
bus_dma_tag_t sc_data_dtag;
@@ -335,10 +336,7 @@ struct cesa_chain_info {
#define CESA_TDMA_CR_ENABLE (1 << 12)
#define CESA_TDMA_CR_FETCHND (1 << 13)
#define CESA_TDMA_CR_ACTIVE (1 << 14)
-
-#if defined (SOC_MV_ARMADA38X)
#define CESA_TDMA_NUM_OUTSTAND (2 << 16)
-#endif
#define CESA_TDMA_ECR 0x08C8
#define CESA_TDMA_ECR_MISS (1 << 0)
@@ -352,18 +350,10 @@ struct cesa_chain_info {
#define CESA_TDMA_EMR_BOTH_HIT CESA_TDMA_ECR_BOTH_HIT
#define CESA_TDMA_EMR_DATA_ERROR CESA_TDMA_ECR_DATA_ERROR
-/* CESA TDMA address decoding registers */
-#define MV_WIN_CESA_CTRL(n) (0x8 * (n) + 0xA04)
-#define MV_WIN_CESA_BASE(n) (0x8 * (n) + 0xA00)
-#define MV_WIN_CESA_MAX 4
-
/* CESA SA registers definitions */
#define CESA_SA_CMD 0x0E00
#define CESA_SA_CMD_ACTVATE (1 << 0)
-
-#if defined (SOC_MV_ARMADA38X)
#define CESA_SA_CMD_SHA2 (1 << 31)
-#endif
#define CESA_SA_DPR 0x0E04
diff --git a/sys/dev/drm2/drm_pciids.h b/sys/dev/drm2/drm_pciids.h
index d861ac8..9afce44 100644
--- a/sys/dev/drm2/drm_pciids.h
+++ b/sys/dev/drm2/drm_pciids.h
@@ -88,6 +88,7 @@
{0x8086, 0x0155, 0, "Intel Valleyview (desktop)"}, \
{0x8086, 0x0157, 0, "Intel Valleyview (mobile)"}, \
{0x8086, 0x0F30, 0, "Intel Valleyview (mobile)"}, \
+ {0x8086, 0x0F31, 0, "Intel Valleyview (mobile)"}, \
{0x8086, 0x2562, 0, "Intel i845G GMCH"}, \
{0x8086, 0x2572, 0, "Intel i865G GMCH"}, \
{0x8086, 0x2582, 0, "Intel i915G"}, \
diff --git a/sys/dev/drm2/i915/i915_drv.c b/sys/dev/drm2/i915/i915_drv.c
index 621316d..e41338d 100644
--- a/sys/dev/drm2/i915/i915_drv.c
+++ b/sys/dev/drm2/i915/i915_drv.c
@@ -416,6 +416,7 @@ static const struct intel_gfx_device_id {
INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT2 mobile */
INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT2 mobile */
INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info),
+ INTEL_VGA_DEVICE(0x0f31, &intel_valleyview_m_info),
INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info),
INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info),
{0, 0}
diff --git a/sys/dev/etherswitch/arswitch/arswitch.c b/sys/dev/etherswitch/arswitch/arswitch.c
index 8396f8a..82f22ad 100644
--- a/sys/dev/etherswitch/arswitch/arswitch.c
+++ b/sys/dev/etherswitch/arswitch/arswitch.c
@@ -77,6 +77,14 @@
static SYSCTL_NODE(_debug, OID_AUTO, arswitch, CTLFLAG_RD, 0, "arswitch");
#endif
+/* Map ETHERSWITCH_PORT_LED_* to Atheros pattern codes */
+static int led_pattern_table[] = {
+ [ETHERSWITCH_PORT_LED_DEFAULT] = 0x3,
+ [ETHERSWITCH_PORT_LED_ON] = 0x2,
+ [ETHERSWITCH_PORT_LED_OFF] = 0x0,
+ [ETHERSWITCH_PORT_LED_BLINK] = 0x1
+};
+
static inline int arswitch_portforphy(int phy);
static void arswitch_tick(void *arg);
static int arswitch_ifmedia_upd(struct ifnet *);
@@ -85,6 +93,8 @@ static int ar8xxx_port_vlan_setup(struct arswitch_softc *sc,
etherswitch_port_t *p);
static int ar8xxx_port_vlan_get(struct arswitch_softc *sc,
etherswitch_port_t *p);
+static int arswitch_setled(struct arswitch_softc *sc, int phy, int led,
+ int style);
static int
arswitch_probe(device_t dev)
@@ -188,9 +198,23 @@ arswitch_attach_phys(struct arswitch_softc *sc)
device_printf(sc->sc_dev,
"attaching PHY %d failed\n",
phy);
+ return (err);
+ }
+
+ if (AR8X16_IS_SWITCH(sc, AR8327)) {
+ int led;
+ char ledname[IFNAMSIZ+4];
+
+ for (led = 0; led < 3; led++) {
+ sprintf(ledname, "%s%dled%d", name,
+ arswitch_portforphy(phy), led+1);
+ sc->dev_led[phy][led].sc = sc;
+ sc->dev_led[phy][led].phy = phy;
+ sc->dev_led[phy][led].lednum = led;
+ }
}
}
- return (err);
+ return (0);
}
static int
@@ -683,6 +707,38 @@ arswitch_getport(device_t dev, etherswitch_port_t *p)
} else {
return (ENXIO);
}
+
+ if (!arswitch_is_cpuport(sc, p->es_port) &&
+ AR8X16_IS_SWITCH(sc, AR8327)) {
+ int led;
+ p->es_nleds = 3;
+
+ for (led = 0; led < p->es_nleds; led++)
+ {
+ int style;
+ uint32_t val;
+
+ /* Find the right style enum for our pattern */
+ val = arswitch_readreg(dev,
+ ar8327_led_mapping[p->es_port-1][led].reg);
+ val = (val>>ar8327_led_mapping[p->es_port-1][led].shift)&0x03;
+
+ for (style = 0; style < ETHERSWITCH_PORT_LED_MAX; style++)
+ {
+ if (led_pattern_table[style] == val) break;
+ }
+
+ /* can't happen */
+ if (style == ETHERSWITCH_PORT_LED_MAX)
+ style = ETHERSWITCH_PORT_LED_DEFAULT;
+
+ p->es_led[led] = style;
+ }
+ } else
+ {
+ p->es_nleds = 0;
+ }
+
return (0);
}
@@ -727,7 +783,7 @@ ar8xxx_port_vlan_setup(struct arswitch_softc *sc, etherswitch_port_t *p)
static int
arswitch_setport(device_t dev, etherswitch_port_t *p)
{
- int err;
+ int err, i;
struct arswitch_softc *sc;
struct ifmedia *ifm;
struct mii_data *mii;
@@ -744,9 +800,20 @@ arswitch_setport(device_t dev, etherswitch_port_t *p)
return (err);
}
- /* Do not allow media changes on CPU port. */
+ /* Do not allow media or led changes on CPU port. */
if (arswitch_is_cpuport(sc, p->es_port))
return (0);
+
+ if (AR8X16_IS_SWITCH(sc, AR8327))
+ {
+ for (i = 0; i < 3; i++)
+ {
+ int err;
+ err = arswitch_setled(sc, p->es_port-1, i, p->es_led[i]);
+ if (err)
+ return (err);
+ }
+ }
mii = arswitch_miiforport(sc, p->es_port);
if (mii == NULL)
@@ -758,6 +825,23 @@ arswitch_setport(device_t dev, etherswitch_port_t *p)
return (ifmedia_ioctl(ifp, &p->es_ifr, ifm, SIOCSIFMEDIA));
}
+static int
+arswitch_setled(struct arswitch_softc *sc, int phy, int led, int style)
+{
+ int shift;
+
+ if (phy < 0 || phy > sc->numphys)
+ return EINVAL;
+
+ if (style < 0 || style > ETHERSWITCH_PORT_LED_MAX)
+ return (EINVAL);
+
+ shift = ar8327_led_mapping[phy][led].shift;
+ return (arswitch_modifyreg(sc->sc_dev,
+ ar8327_led_mapping[phy][led].reg,
+ 0x03 << shift, led_pattern_table[style] << shift));
+}
+
static void
arswitch_statchg(device_t dev)
{
diff --git a/sys/dev/etherswitch/arswitch/arswitch_8327.c b/sys/dev/etherswitch/arswitch/arswitch_8327.c
index 92e44fc..b0b2541 100644
--- a/sys/dev/etherswitch/arswitch/arswitch_8327.c
+++ b/sys/dev/etherswitch/arswitch/arswitch_8327.c
@@ -75,6 +75,36 @@
* lead to traffic storms/loops.
*/
+/* Map port+led to register+shift */
+struct ar8327_led_mapping ar8327_led_mapping[AR8327_NUM_PHYS][ETHERSWITCH_PORT_MAX_LEDS] =
+{
+ { /* PHY0 */
+ {AR8327_REG_LED_CTRL0, 14 },
+ {AR8327_REG_LED_CTRL1, 14 },
+ {AR8327_REG_LED_CTRL2, 14 }
+ },
+ { /* PHY1 */
+ {AR8327_REG_LED_CTRL3, 8 },
+ {AR8327_REG_LED_CTRL3, 10 },
+ {AR8327_REG_LED_CTRL3, 12 }
+ },
+ { /* PHY2 */
+ {AR8327_REG_LED_CTRL3, 14 },
+ {AR8327_REG_LED_CTRL3, 16 },
+ {AR8327_REG_LED_CTRL3, 18 }
+ },
+ { /* PHY3 */
+ {AR8327_REG_LED_CTRL3, 20 },
+ {AR8327_REG_LED_CTRL3, 22 },
+ {AR8327_REG_LED_CTRL3, 24 }
+ },
+ { /* PHY4 */
+ {AR8327_REG_LED_CTRL0, 30 },
+ {AR8327_REG_LED_CTRL1, 30 },
+ {AR8327_REG_LED_CTRL2, 30 }
+ }
+};
+
static int
ar8327_vlan_op(struct arswitch_softc *sc, uint32_t op, uint32_t vid,
uint32_t data)
diff --git a/sys/dev/etherswitch/arswitch/arswitch_8327.h b/sys/dev/etherswitch/arswitch/arswitch_8327.h
index 1f35d96..e2a5f5e 100644
--- a/sys/dev/etherswitch/arswitch/arswitch_8327.h
+++ b/sys/dev/etherswitch/arswitch/arswitch_8327.h
@@ -85,6 +85,11 @@ struct ar8327_port_cfg {
uint32_t rxpause;
};
+extern struct ar8327_led_mapping {
+ int reg;
+ int shift;
+} ar8327_led_mapping[AR8327_NUM_PHYS][ETHERSWITCH_PORT_MAX_LEDS];
+
extern void ar8327_attach(struct arswitch_softc *sc);
#endif /* __ARSWITCH_8327_H__ */
diff --git a/sys/dev/etherswitch/arswitch/arswitchvar.h b/sys/dev/etherswitch/arswitch/arswitchvar.h
index a322a4f..dc08799 100644
--- a/sys/dev/etherswitch/arswitch/arswitchvar.h
+++ b/sys/dev/etherswitch/arswitch/arswitchvar.h
@@ -48,6 +48,15 @@ typedef enum {
#define ARSWITCH_NUM_PORTS MAX(AR8327_NUM_PORTS, AR8X16_NUM_PORTS)
#define ARSWITCH_NUM_PHYS MAX(AR8327_NUM_PHYS, AR8X16_NUM_PHYS)
+#define ARSWITCH_NUM_LEDS 3
+
+struct arswitch_dev_led {
+ struct arswitch_softc *sc;
+ struct cdev *led;
+ int phy;
+ int lednum;
+};
+
struct arswitch_softc {
struct mtx sc_mtx; /* serialize access to softc */
device_t sc_dev;
@@ -66,6 +75,7 @@ struct arswitch_softc {
char *ifname[ARSWITCH_NUM_PHYS];
device_t miibus[ARSWITCH_NUM_PHYS];
struct ifnet *ifp[ARSWITCH_NUM_PHYS];
+ struct arswitch_dev_led dev_led[ARSWITCH_NUM_PHYS][ARSWITCH_NUM_LEDS];
struct callout callout_tick;
etherswitch_info_t info;
diff --git a/sys/dev/etherswitch/e6000sw/e6000sw.c b/sys/dev/etherswitch/e6000sw/e6000sw.c
index ae552b8..2739dd9 100644
--- a/sys/dev/etherswitch/e6000sw/e6000sw.c
+++ b/sys/dev/etherswitch/e6000sw/e6000sw.c
@@ -28,36 +28,32 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
-#include <sys/types.h>
+#include "opt_platform.h"
+
#include <sys/param.h>
-#include <sys/systm.h>
-#include <sys/sockio.h>
+#include <sys/bus.h>
+#include <sys/errno.h>
#include <sys/kernel.h>
#include <sys/kthread.h>
-#include <sys/socket.h>
#include <sys/module.h>
-#include <sys/errno.h>
-#include <sys/bus.h>
-#include <sys/conf.h>
-#include <sys/uio.h>
-#include <sys/fcntl.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <sys/sysctl.h>
#include <net/if.h>
#include <net/if_media.h>
#include <net/if_types.h>
-#include <machine/bus.h>
-#include <machine/resource.h>
-
-#include <arm/mv/mvwin.h>
-#include <arm/mv/mvreg.h>
-#include <arm/mv/mvvar.h>
-
#include <dev/etherswitch/etherswitch.h>
-#include <dev/mdio/mdio.h>
#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
-#include <dev/mge/if_mgevar.h>
+
+#ifdef FDT
+#include <dev/fdt/fdt_common.h>
+#include <dev/ofw/ofw_bus.h>
+#else
+#include <machine/stdarg.h>
+#endif
#include "e6000swreg.h"
#include "etherswitch_if.h"
@@ -67,78 +63,116 @@ __FBSDID("$FreeBSD$");
MALLOC_DECLARE(M_E6000SW);
MALLOC_DEFINE(M_E6000SW, "e6000sw", "e6000sw switch");
-#define E6000SW_LOCK(_sc) \
- sx_xlock(&(_sc)->sx)
-#define E6000SW_UNLOCK(_sc) \
- sx_unlock(&(_sc)->sx)
-#define E6000SW_LOCK_ASSERT(_sc, _what) \
- sx_assert(&(_sc)->sx, (_what))
-#define E6000SW_TRYLOCK(_sc) \
- sx_tryxlock(&(_sc)->sx)
+#define E6000SW_LOCK(_sc) sx_xlock(&(_sc)->sx)
+#define E6000SW_UNLOCK(_sc) sx_unlock(&(_sc)->sx)
+#define E6000SW_LOCK_ASSERT(_sc, _what) sx_assert(&(_sc)->sx, (_what))
+#define E6000SW_TRYLOCK(_sc) sx_tryxlock(&(_sc)->sx)
+#define E6000SW_MULTICHIP(_sc) (((_sc)->sw_addr != 0) ? 1 : 0)
+#define E6000SW_WAITREADY(_sc, _reg, _bit) \
+ e6000sw_waitready((_sc), REG_GLOBAL, (_reg), (_bit))
+#define E6000SW_WAITREADY2(_sc, _reg, _bit) \
+ e6000sw_waitready((_sc), REG_GLOBAL2, (_reg), (_bit))
+#define MDIO_READ(dev, addr, reg) \
+ MDIO_READREG(device_get_parent(dev), (addr), (reg))
+#define MDIO_WRITE(dev, addr, reg, val) \
+ MDIO_WRITEREG(device_get_parent(dev), (addr), (reg), (val))
typedef struct e6000sw_softc {
device_t dev;
+#ifdef FDT
+ phandle_t node;
+#endif
struct sx sx;
- struct ifnet *ifp[E6000SW_NUM_PHYS];
- char *ifname[E6000SW_NUM_PHYS];
- device_t miibus[E6000SW_NUM_PHYS];
- struct mii_data *mii[E6000SW_NUM_PHYS];
- struct callout tick_callout;
-
+ struct ifnet *ifp[E6000SW_MAX_PORTS];
+ char *ifname[E6000SW_MAX_PORTS];
+ device_t miibus[E6000SW_MAX_PORTS];
+ struct proc *kproc;
+
+ int vlans[E6000SW_NUM_VLANS];
+ uint32_t swid;
+ uint32_t vlan_mode;
uint32_t cpuports_mask;
-
- int vid[E6000SW_NUM_VGROUPS];
- int members[E6000SW_NUM_VGROUPS];
- int vgroup[E6000SW_NUM_PORTS];
+ uint32_t fixed_mask;
+ uint32_t fixed25_mask;
+ uint32_t used_mask;
+ uint32_t ports_mask;
+ int phy_base; /* SMI base addr of PHY regs */
+ int port_base; /* SMI base addr of port regs */
+ int sw_addr;
+ int num_laggs;
+ int num_ports;
+
+ ssize_t iosize;
+ void *iobuf;
} e6000sw_softc_t;
static etherswitch_info_t etherswitch_info = {
- .es_nports = E6000SW_NUM_PORTS,
- .es_nvlangroups = E6000SW_NUM_VGROUPS,
+ .es_nports = 0,
+ .es_nlaggroups = 0,
+ .es_nvlangroups = 0,
+ .es_vlan_caps = ETHERSWITCH_VLAN_PORT | ETHERSWITCH_VLAN_DOT1Q,
+ .es_switch_caps = ETHERSWITCH_CAPS_PORTS_MASK |
+ ETHERSWITCH_CAPS_PSTATE | ETHERSWITCH_CAPS_LAGG,
.es_name = "Marvell 6000 series switch"
};
-static void e6000sw_identify(driver_t *driver, device_t parent);
-static int e6000sw_probe(device_t dev);
-static int e6000sw_attach(device_t dev);
-static int e6000sw_detach(device_t dev);
-static int e6000sw_readphy(device_t dev, int phy, int reg);
-static int e6000sw_writephy(device_t dev, int phy, int reg, int data);
-static etherswitch_info_t* e6000sw_getinfo(device_t dev);
-static void e6000sw_lock(device_t dev);
-static void e6000sw_unlock(device_t dev);
-static int e6000sw_getport(device_t dev, etherswitch_port_t *p);
-static int e6000sw_setport(device_t dev, etherswitch_port_t *p);
-static int e6000sw_readreg_wrapper(device_t dev, int addr_reg);
-static int e6000sw_writereg_wrapper(device_t dev, int addr_reg, int val);
-static int e6000sw_readphy_wrapper(device_t dev, int phy, int reg);
-static int e6000sw_writephy_wrapper(device_t dev, int phy, int reg, int data);
-static int e6000sw_getvgroup_wrapper(device_t dev, etherswitch_vlangroup_t *vg);
-static int e6000sw_setvgroup_wrapper(device_t dev, etherswitch_vlangroup_t *vg);
-static int e6000sw_setvgroup(device_t dev, etherswitch_vlangroup_t *vg);
-static int e6000sw_getvgroup(device_t dev, etherswitch_vlangroup_t *vg);
-static void e6000sw_setup(device_t dev, e6000sw_softc_t *sc);
-static void e6000sw_port_vlan_conf(e6000sw_softc_t *sc);
-static void e6000sw_tick(void *arg);
-static void e6000sw_set_atustat(device_t dev, e6000sw_softc_t *sc, int bin,
- int flag);
-static int e6000sw_atu_flush(device_t dev, e6000sw_softc_t *sc, int flag);
-static __inline void e6000sw_writereg(e6000sw_softc_t *sc, int addr, int reg,
- int val);
-static __inline uint32_t e6000sw_readreg(e6000sw_softc_t *sc, int addr,
- int reg);
-static int e6000sw_ifmedia_upd(struct ifnet *ifp);
-static void e6000sw_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
-static int e6000sw_atu_mac_table(device_t dev, e6000sw_softc_t *sc, struct
- atu_opt *atu, int flag);
-static int e6000sw_get_pvid(e6000sw_softc_t *sc, int port, int *pvid);
-static int e6000sw_set_pvid(e6000sw_softc_t *sc, int port, int pvid);
-static __inline int e6000sw_cpuport(e6000sw_softc_t *sc, int port);
-static __inline struct mii_data *e6000sw_miiforphy(e6000sw_softc_t *sc,
- unsigned int phy);
-
-static struct proc *e6000sw_kproc;
+static void e6000sw_identify(driver_t *, device_t);
+static int e6000sw_probe(device_t);
+static int e6000sw_attach(device_t);
+static int e6000sw_detach(device_t);
+static int e6000sw_read_xmdio(device_t, int, int, int);
+static int e6000sw_write_xmdio(device_t, int, int, int, int);
+static int e6000sw_readphy(device_t, int, int);
+static int e6000sw_writephy(device_t, int, int, int);
+static etherswitch_info_t* e6000sw_getinfo(device_t);
+static int e6000sw_getconf(device_t, etherswitch_conf_t *);
+static int e6000sw_setconf(device_t, etherswitch_conf_t *);
+static void e6000sw_lock(device_t);
+static void e6000sw_unlock(device_t);
+static int e6000sw_getport(device_t, etherswitch_port_t *);
+static int e6000sw_setport(device_t, etherswitch_port_t *);
+static int e6000sw_set_vlan_mode(e6000sw_softc_t *, uint32_t);
+static int e6000sw_readreg_wrapper(device_t, int);
+static int e6000sw_writereg_wrapper(device_t, int, int);
+static int e6000sw_readphy_wrapper(device_t, int, int);
+static int e6000sw_writephy_wrapper(device_t, int, int, int);
+static int e6000sw_getvgroup_wrapper(device_t, etherswitch_vlangroup_t *);
+static int e6000sw_setvgroup_wrapper(device_t, etherswitch_vlangroup_t *);
+static int e6000sw_setvgroup(device_t, etherswitch_vlangroup_t *);
+static int e6000sw_getvgroup(device_t, etherswitch_vlangroup_t *);
+static int e6000sw_resetlagg(e6000sw_softc_t *);
+static int e6000sw_getlaggroup_wrapper(device_t, etherswitch_laggroup_t *);
+static int e6000sw_setlaggroup_wrapper(device_t, etherswitch_laggroup_t *);
+static int e6000sw_setlaggroup(device_t, etherswitch_laggroup_t *);
+static int e6000sw_getlaggroup(device_t, etherswitch_laggroup_t *);
+static ssize_t e6000sw_getiosize(device_t);
+static ssize_t e6000sw_getioblksize(device_t);
+static void *e6000sw_getiobuf(device_t);
+static int e6000sw_ioread(device_t, off_t, ssize_t);
+static int e6000sw_iowrite(device_t, off_t, ssize_t);
+static void e6000sw_setup(device_t, e6000sw_softc_t *);
+static void e6000sw_tick(void *);
+static void e6000sw_set_atustat(device_t, e6000sw_softc_t *, int, int);
+static int e6000sw_atu_flush(device_t, e6000sw_softc_t *, int);
+static __inline void e6000sw_writereg(e6000sw_softc_t *, int, int, int);
+static __inline uint32_t e6000sw_readreg(e6000sw_softc_t *, int, int);
+static int e6000sw_ifmedia_upd(struct ifnet *);
+static void e6000sw_ifmedia_sts(struct ifnet *, struct ifmediareq *);
+static int e6000sw_atu_mac_table(device_t, e6000sw_softc_t *, struct atu_opt *,
+ int);
+static int e6000sw_vtu_flush(e6000sw_softc_t *);
+static int e6000sw_vtu_update(e6000sw_softc_t *, int, int, int, int, int);
+static int e6000sw_waitready(e6000sw_softc_t *, uint32_t, uint32_t, uint32_t);
+static void e6000sw_get_pvid(e6000sw_softc_t *, int, int *);
+static void e6000sw_set_pvid(e6000sw_softc_t *, int, int);
+static __inline bool e6000sw_is_cpuport(e6000sw_softc_t *, int);
+static __inline bool e6000sw_is_fixedport(e6000sw_softc_t *, int);
+static __inline bool e6000sw_is_fixed25port(e6000sw_softc_t *, int);
+static __inline bool e6000sw_is_phyport(e6000sw_softc_t *, int);
+static __inline bool e6000sw_is_portenabled(e6000sw_softc_t *, int);
+static __inline struct mii_data *e6000sw_miiforphy(e6000sw_softc_t *,
+ unsigned int);
static device_method_t e6000sw_methods[] = {
/* device interface */
@@ -156,6 +190,8 @@ static device_method_t e6000sw_methods[] = {
/* etherswitch interface */
DEVMETHOD(etherswitch_getinfo, e6000sw_getinfo),
+ DEVMETHOD(etherswitch_getconf, e6000sw_getconf),
+ DEVMETHOD(etherswitch_setconf, e6000sw_setconf),
DEVMETHOD(etherswitch_lock, e6000sw_lock),
DEVMETHOD(etherswitch_unlock, e6000sw_unlock),
DEVMETHOD(etherswitch_getport, e6000sw_getport),
@@ -166,6 +202,13 @@ static device_method_t e6000sw_methods[] = {
DEVMETHOD(etherswitch_writephyreg, e6000sw_writephy_wrapper),
DEVMETHOD(etherswitch_setvgroup, e6000sw_setvgroup_wrapper),
DEVMETHOD(etherswitch_getvgroup, e6000sw_getvgroup_wrapper),
+ DEVMETHOD(etherswitch_setlaggroup, e6000sw_setlaggroup_wrapper),
+ DEVMETHOD(etherswitch_getlaggroup, e6000sw_getlaggroup_wrapper),
+ DEVMETHOD(etherswitch_getioblksize, e6000sw_getioblksize),
+ DEVMETHOD(etherswitch_getiosize, e6000sw_getiosize),
+ DEVMETHOD(etherswitch_getiobuf, e6000sw_getiobuf),
+ DEVMETHOD(etherswitch_ioread, e6000sw_ioread),
+ DEVMETHOD(etherswitch_iowrite, e6000sw_iowrite),
DEVMETHOD_END
};
@@ -181,6 +224,145 @@ DRIVER_MODULE(etherswitch, e6000sw, etherswitch_driver, etherswitch_devclass, 0,
DRIVER_MODULE(miibus, e6000sw, miibus_driver, miibus_devclass, 0, 0);
MODULE_DEPEND(e6000sw, mdio, 1, 1, 1);
+static SYSCTL_NODE(_hw, OID_AUTO, e6000sw, CTLFLAG_RD, 0,
+ "Marvell E6000 series Switch Parameters");
+
+static int e6000sw_eeprom_wp = TRUE;
+SYSCTL_INT(_hw_e6000sw, OID_AUTO, eeprom_wp, CTLFLAG_RDTUN, &e6000sw_eeprom_wp,
+ 0, "Enable eeprom write protect.");
+static int e6000sw_default_disabled = FALSE;
+SYSCTL_INT(_hw_e6000sw, OID_AUTO, default_disabled, CTLFLAG_RDTUN,
+ &e6000sw_default_disabled, 0, "Keep ports disabled at boot.");
+
+#undef E6000SW_DEBUG
+#if defined(E6000SW_DEBUG)
+static void
+e6000sw_atu_dump(e6000sw_softc_t *sc, int fid)
+{
+ uint16_t data, mac1, mac2, mac3, reg;
+
+ if (E6000SW_WAITREADY(sc, ATU_OPERATION, ATU_UNIT_BUSY)) {
+ device_printf(sc->dev, "ATU unit is busy, cannot access\n");
+ return;
+ }
+
+ /* Set the start MAC address and FID. */
+ e6000sw_writereg(sc, REG_GLOBAL, ATU_FID, fid);
+ e6000sw_writereg(sc, REG_GLOBAL, ATU_DATA, 0);
+ e6000sw_writereg(sc, REG_GLOBAL, ATU_MAC_ADDR01, 0);
+ e6000sw_writereg(sc, REG_GLOBAL, ATU_MAC_ADDR23, 0);
+ e6000sw_writereg(sc, REG_GLOBAL, ATU_MAC_ADDR45, 0);
+ reg = e6000sw_readreg(sc, REG_GLOBAL, ATU_OPERATION) & ~ATU_OP_MASK;
+ e6000sw_writereg(sc, REG_GLOBAL, ATU_OPERATION, reg | GET_NEXT_IN_FIB);
+ for (;;) {
+ reg = e6000sw_readreg(sc, REG_GLOBAL, ATU_OPERATION);
+ if ((reg & VTU_OP_MASK) != GET_NEXT_IN_FIB) {
+ device_printf(sc->dev, "Out of sync!\n");
+ return;
+ }
+ e6000sw_writereg(sc, REG_GLOBAL, ATU_OPERATION,
+ reg | ATU_UNIT_BUSY);
+ if (E6000SW_WAITREADY(sc, ATU_OPERATION, ATU_UNIT_BUSY)) {
+ device_printf(sc->dev, "Timeout while reading\n");
+ return;
+ }
+ data = e6000sw_readreg(sc, REG_GLOBAL, ATU_DATA);
+ if ((data & ATU_STATE_MASK) == 0)
+ return;
+
+ mac1 = e6000sw_readreg(sc, REG_GLOBAL, ATU_MAC_ADDR01);
+ mac2 = e6000sw_readreg(sc, REG_GLOBAL, ATU_MAC_ADDR23);
+ mac3 = e6000sw_readreg(sc, REG_GLOBAL, ATU_MAC_ADDR45);
+ if (data & ATU_DATA_LAG)
+ device_printf(sc->dev, "fid: %4d lag: %3d ", fid,
+ (data & ATU_LAG_MASK) >> ATU_LAG_SHIFT);
+ else
+ device_printf(sc->dev, "fid: %4d port: %2d ", fid,
+ ffs((data & ATU_PORT_MASK(sc)) >> ATU_PORT_SHIFT) - 1);
+ printf("MAC: %02x:%02x:%02x:%02x:%02x:%02x (%#x)\n",
+ (mac1 >> 8) & 0xff, mac1 & 0xff,
+ (mac2 >> 8) & 0xff, mac2 & 0xff,
+ (mac3 >> 8) & 0xff, mac3 & 0xff, data);
+ }
+}
+
+#define E6000SW_BUFSZ 32
+
+static void
+e6000sw_vtu_dump(e6000sw_softc_t *sc)
+{
+ char *buf, discard[E6000SW_BUFSZ], tagged[E6000SW_BUFSZ];
+ char unmodified[E6000SW_BUFSZ], untagged[E6000SW_BUFSZ];
+ char tmp[E6000SW_BUFSZ];
+ int i, port, vlan;
+ uint32_t reg;
+
+ if (E6000SW_WAITREADY(sc, VTU_OPERATION, VTU_BUSY)) {
+ device_printf(sc->dev, "VTU unit is busy, cannot access\n");
+ return;
+ }
+
+ /* Start at VID 1. */
+ e6000sw_writereg(sc, REG_GLOBAL, VTU_VID, 0);
+ reg = e6000sw_readreg(sc, REG_GLOBAL, VTU_OPERATION) & ~VTU_OP_MASK;
+ e6000sw_writereg(sc, REG_GLOBAL, VTU_OPERATION, reg | VTU_GET_NEXT);
+ for (;;) {
+ reg = e6000sw_readreg(sc, REG_GLOBAL, VTU_OPERATION);
+ if ((reg & VTU_OP_MASK) != VTU_GET_NEXT) {
+ device_printf(sc->dev, "Out of sync!\n");
+ return;
+ }
+ e6000sw_writereg(sc, REG_GLOBAL, VTU_OPERATION, reg | VTU_BUSY);
+ if (E6000SW_WAITREADY(sc, VTU_OPERATION, VTU_BUSY)) {
+ device_printf(sc->dev, "Timeout while reading\n");
+ return;
+ }
+
+ vlan = e6000sw_readreg(sc, REG_GLOBAL, VTU_VID);
+ if (vlan == VTU_VID_MASK || (vlan & VTU_VID_VALID) == 0)
+ return;
+
+ memset(discard, 0, sizeof(discard));
+ memset(tagged, 0, sizeof(tagged));
+ memset(unmodified, 0, sizeof(unmodified));
+ memset(untagged, 0, sizeof(untagged));
+ reg = e6000sw_readreg(sc, REG_GLOBAL, VTU_DATA);
+ for (i = 0; i < sc->num_ports; i++) {
+ if (i == VTU_PPREG(sc))
+ reg = e6000sw_readreg(sc, REG_GLOBAL, VTU_DATA2);
+ port = (reg >> VTU_PORT(sc, i)) & VTU_PORT_MASK;
+ if (port == VTU_PORT_UNMODIFIED)
+ buf = unmodified;
+ else if (port == VTU_PORT_UNTAGGED)
+ buf = untagged;
+ else if (port == VTU_PORT_TAGGED)
+ buf = tagged;
+ else if (port == VTU_PORT_DISCARD)
+ buf = discard;
+ else
+ buf = NULL;
+ memset(tmp, 0, sizeof(tmp));
+ snprintf(tmp, sizeof(tmp), "%d", i);
+ if (buf != NULL) {
+ if (strlen(buf) > 0)
+ strlcat(buf, ",", E6000SW_BUFSZ);
+ strlcat(buf, tmp, E6000SW_BUFSZ);
+ }
+ }
+
+ reg = e6000sw_readreg(sc, REG_GLOBAL, VTU_FID);
+ device_printf(sc->dev,
+ "fid: %4d%s vlan: %4d discard: %22s tagged: %22s untagged: %22s unmodified: %22s\n",
+ reg & VTU_FID_MASK(sc), (reg & VTU_FID_POLICY) ? "*" : "",
+ vlan & VTU_VID_MASK,
+ strlen(discard) > 0 ? discard : "none",
+ strlen(tagged) > 0 ? tagged : "none",
+ strlen(untagged) > 0 ? untagged : "none",
+ strlen(unmodified) > 0 ? unmodified : "none");
+ }
+}
+#endif
+
static void
e6000sw_identify(driver_t *driver, device_t parent)
{
@@ -192,108 +374,484 @@ e6000sw_identify(driver_t *driver, device_t parent)
static int
e6000sw_probe(device_t dev)
{
+ int is_8190;
e6000sw_softc_t *sc;
const char *description;
- unsigned int id;
+#ifdef FDT
+ phandle_t dsa_node, switch_node;
+#endif
+ is_8190 = 0;
sc = device_get_softc(dev);
- bzero(sc, sizeof(e6000sw_softc_t));
sc->dev = dev;
- /* Lock is necessary due to assertions. */
- sx_init(&sc->sx, "e6000sw");
- E6000SW_LOCK(sc);
- id = e6000sw_readreg(sc, REG_PORT(0), SWITCH_ID);
+ /* Do not set iosize until iobuf is ready. */
+ sc->iosize = -1;
+ sc->iobuf = NULL;
+
+#ifdef FDT
+ dsa_node = fdt_find_compatible(OF_finddevice("/"),
+ "marvell,dsa", 0);
+ switch_node = OF_child(dsa_node);
+ if (switch_node == 0)
+ return (ENXIO);
+ sc->node = switch_node;
+
+ if (OF_getencprop(sc->node, "reg", &sc->sw_addr,
+ sizeof(sc->sw_addr)) < 0)
+ return (ENXIO);
+#else
+ if (resource_int_value(device_get_name(sc->dev),
+ device_get_unit(sc->dev), "addr", &sc->sw_addr) != 0)
+ return (ENXIO);
+ resource_int_value(device_get_name(sc->dev),
+ device_get_unit(sc->dev), "is8190", &is_8190);
+#endif
+ if (sc->sw_addr < 0 || sc->sw_addr > 32)
+ return (ENXIO);
+
+ /* Set defaults for 88E6XXX family. */
+ sc->ports_mask = 0x7f;
+ sc->port_base = 0x10;
+
+ /* 88E6190 with 11 ports uses a different mapping. */
+ if (is_8190 != 0) {
+ sc->port_base = 0;
+ sc->ports_mask = 0x7ff;
+ }
- switch (id & 0xfff0) {
- case 0x3520:
+ /*
+ * Create temporary lock, just to satisfy assertions,
+ * when obtaining the switch ID. Destroy immediately afterwards.
+ */
+ sx_init(&sc->sx, "e6000sw_tmp");
+ E6000SW_LOCK(sc);
+ sc->swid = e6000sw_readreg(sc, REG_PORT(sc, 0), SWITCH_ID) & 0xfff0;
+ E6000SW_UNLOCK(sc);
+ sx_destroy(&sc->sx);
+
+ sc->num_laggs = 16;
+ switch (sc->swid) {
+ case MV88E6141:
+ description = "Marvell 88E6141";
+ sc->phy_base = 0x10;
+ sc->num_ports = 6;
+ break;
+ case MV88E6341:
+ description = "Marvell 88E6341";
+ sc->phy_base = 0x10;
+ sc->num_ports = 6;
+ break;
+ case MV88E6352:
description = "Marvell 88E6352";
+ sc->num_ports = 7;
break;
- case 0x1720:
+ case MV88E6172:
description = "Marvell 88E6172";
+ sc->num_ports = 7;
break;
- case 0x1760:
+ case MV88E6176:
description = "Marvell 88E6176";
+ sc->num_ports = 7;
+ break;
+ case MV88E6190:
+ description = "Marvell 88E6190";
+ //sc->num_laggs = 32; /* Only 16 LAGGs for now. */
+ sc->num_ports = 11;
break;
default:
- E6000SW_UNLOCK(sc);
- sx_destroy(&sc->sx);
- device_printf(dev, "Unrecognized device.\n");
+ device_printf(dev, "Unrecognized device, id 0x%x.\n", sc->swid);
return (ENXIO);
}
device_set_desc(dev, description);
- E6000SW_UNLOCK(sc);
-
return (BUS_PROBE_DEFAULT);
}
+#ifdef FDT
+static int
+e6000sw_parse_child_fdt(e6000sw_softc_t *sc, phandle_t child, int *pport)
+{
+ char *name, *portlabel;
+ int speed;
+ phandle_t fixed_link;
+ uint32_t port;
+
+ if (pport == NULL)
+ return (ENXIO);
+
+ if (OF_getencprop(child, "reg", (void *)&port, sizeof(port)) < 0)
+ return (ENXIO);
+ if (port >= sc->num_ports)
+ return (ENXIO);
+ *pport = port;
+
+ if (OF_getprop_alloc(child, "label", 1, (void **)&portlabel) > 0) {
+ if (strncmp(portlabel, "cpu", 3) == 0) {
+ if (bootverbose)
+ device_printf(sc->dev, "CPU port at %d\n", port);
+ sc->cpuports_mask |= (1 << port);
+ sc->fixed_mask |= (1 << port);
+ }
+ free(portlabel, M_OFWPROP);
+ }
+
+ fixed_link = OF_child(child);
+ if (fixed_link != 0 &&
+ OF_getprop_alloc(fixed_link, "name", 1, (void **)&name) > 0) {
+ if (strncmp(name, "fixed-link", 10) == 0) {
+ /* Assume defaults: 1g - full-duplex. */
+ sc->fixed_mask |= (1 << port);
+ if (OF_getencprop(fixed_link, "speed", &speed,
+ sizeof(speed)) > 0) {
+ if (speed == 2500 &&
+ (MVSWITCH(sc, MV88E6141) ||
+ MVSWITCH(sc, MV88E6341))) {
+ sc->fixed25_mask |= (1 << port);
+ }
+ }
+ }
+ free(name, M_OFWPROP);
+ }
+ if (bootverbose) {
+ if ((sc->fixed_mask & (1 << port)) != 0)
+ device_printf(sc->dev, "fixed port at %d\n", port);
+ else
+ device_printf(sc->dev, "PHY at port %d\n", port);
+ }
+
+ return (0);
+}
+#else
+
+static int
+e6000sw_check_hint_val(device_t dev, int *val, char *fmt, ...)
+{
+ char *resname;
+ int err, len;
+ va_list ap;
+
+ len = min(strlen(fmt) * 2, 128);
+ if (len == 0)
+ return (-1);
+ resname = malloc(len, M_E6000SW, M_WAITOK);
+ memset(resname, 0, len);
+ va_start(ap, fmt);
+ vsnprintf(resname, len - 1, fmt, ap);
+ va_end(ap);
+ err = resource_int_value(device_get_name(dev), device_get_unit(dev),
+ resname, val);
+ free(resname, M_E6000SW);
+
+ return (err);
+}
+
+static int
+e6000sw_parse_hinted_port(e6000sw_softc_t *sc, int port)
+{
+ int err, val;
+
+ err = e6000sw_check_hint_val(sc->dev, &val, "port%ddisabled", port);
+ if (err == 0 && val != 0)
+ return (1);
+
+ err = e6000sw_check_hint_val(sc->dev, &val, "port%dcpu", port);
+ if (err == 0 && val != 0) {
+ sc->cpuports_mask |= (1 << port);
+ sc->fixed_mask |= (1 << port);
+ if (bootverbose)
+ device_printf(sc->dev, "CPU port at %d\n", port);
+ }
+ err = e6000sw_check_hint_val(sc->dev, &val, "port%dspeed", port);
+ if (err == 0 && val != 0) {
+ sc->fixed_mask |= (1 << port);
+ if (val == 2500)
+ sc->fixed25_mask |= (1 << port);
+ }
+
+ if (bootverbose) {
+ if ((sc->fixed_mask & (1 << port)) != 0)
+ device_printf(sc->dev, "fixed port at %d\n", port);
+ else
+ device_printf(sc->dev, "PHY at port %d\n", port);
+ }
+
+ return (0);
+}
+#endif
+
+static int
+e6000sw_init_interface(e6000sw_softc_t *sc, int port)
+{
+ char name[IFNAMSIZ];
+
+ snprintf(name, IFNAMSIZ, "%sport", device_get_nameunit(sc->dev));
+
+ sc->ifp[port] = if_alloc(IFT_ETHER);
+ if (sc->ifp[port] == NULL)
+ return (ENOMEM);
+ sc->ifp[port]->if_softc = sc;
+ sc->ifp[port]->if_flags |= IFF_UP | IFF_BROADCAST |
+ IFF_DRV_RUNNING | IFF_SIMPLEX;
+ sc->ifname[port] = malloc(strlen(name) + 1, M_E6000SW, M_NOWAIT);
+ if (sc->ifname[port] == NULL) {
+ if_free(sc->ifp[port]);
+ return (ENOMEM);
+ }
+ memcpy(sc->ifname[port], name, strlen(name) + 1);
+ if_initname(sc->ifp[port], sc->ifname[port], port);
+
+ return (0);
+}
+
+static int
+e6000sw_attach_miibus(e6000sw_softc_t *sc, int port)
+{
+ int err;
+
+ err = mii_attach(sc->dev, &sc->miibus[port], sc->ifp[port],
+ e6000sw_ifmedia_upd, e6000sw_ifmedia_sts, BMSR_DEFCAPMASK,
+ port + sc->phy_base, MII_OFFSET_ANY, 0);
+ if (err != 0)
+ return (err);
+
+ return (0);
+}
+
+static void
+e6000sw_serdes_power(device_t dev, int port, bool sgmii)
+{
+ uint32_t reg;
+
+ /* SGMII */
+ reg = e6000sw_read_xmdio(dev, port, E6000SW_SERDES_DEV,
+ E6000SW_SERDES_SGMII_CTL);
+ if (sgmii)
+ reg &= ~E6000SW_SERDES_PDOWN;
+ else
+ reg |= E6000SW_SERDES_PDOWN;
+ e6000sw_write_xmdio(dev, port, E6000SW_SERDES_DEV,
+ E6000SW_SERDES_SGMII_CTL, reg);
+
+ /* 10GBASE-R/10GBASE-X4/X2 */
+ reg = e6000sw_read_xmdio(dev, port, E6000SW_SERDES_DEV,
+ E6000SW_SERDES_PCS_CTL1);
+ if (sgmii)
+ reg |= E6000SW_SERDES_PDOWN;
+ else
+ reg &= ~E6000SW_SERDES_PDOWN;
+ e6000sw_write_xmdio(dev, port, E6000SW_SERDES_DEV,
+ E6000SW_SERDES_PCS_CTL1, reg);
+}
+
static int
e6000sw_attach(device_t dev)
{
+ bool sgmii;
e6000sw_softc_t *sc;
- int phy, err, port;
- char name[IFNAMSIZ];
+#ifdef FDT
+ phandle_t child;
+#endif
+ int err, port;
+ uint32_t reg;
err = 0;
sc = device_get_softc(dev);
+
+ if (E6000SW_MULTICHIP(sc))
+ device_printf(dev, "multi-chip addressing mode (%#x)\n",
+ sc->sw_addr);
+ else
+ device_printf(dev, "single-chip addressing mode\n");
+
+ sx_init(&sc->sx, "e6000sw");
+ sc->iobuf = malloc(E6000SW_IOBUF_BLKSIZE, M_E6000SW, M_WAITOK);
+ sc->iosize = E6000SW_IOBUF_SIZE;
+
E6000SW_LOCK(sc);
- sc->cpuports_mask = E6000SW_CPUPORTS_MASK;
- for (port = 0; port < E6000SW_NUM_PORTS; port++)
- sc->vgroup[port] = E6000SW_PORT_NO_VGROUP;
e6000sw_setup(dev, sc);
- snprintf(name, IFNAMSIZ, "%sport", device_get_nameunit(sc->dev));
- for (phy = 0; phy < E6000SW_NUM_PHYS; phy++) {
- sc->ifp[phy] = if_alloc(IFT_ETHER);
- if (sc->ifp[phy] == NULL)
+#ifdef FDT
+ for (child = OF_child(sc->node); child != 0; child = OF_peer(child)) {
+ err = e6000sw_parse_child_fdt(sc, child, &port);
+ if (err != 0) {
+ device_printf(sc->dev, "failed to parse DTS\n");
goto out_fail;
- sc->ifp[phy]->if_softc = sc;
- sc->ifp[phy]->if_flags |= IFF_UP | IFF_BROADCAST |
- IFF_DRV_RUNNING | IFF_SIMPLEX;
- sc->ifname[phy] = malloc(strlen(name) + 1, M_E6000SW, M_WAITOK);
- if (sc->ifname[phy] == NULL)
+ }
+#else
+ for (port = 0; port < sc->num_ports; port++) {
+ err = e6000sw_parse_hinted_port(sc, port);
+ if (err != 0)
+ continue;
+#endif
+
+ /* Port is in use. */
+ sc->used_mask |= (1 << port);
+
+ err = e6000sw_init_interface(sc, port);
+ if (err != 0) {
+ device_printf(sc->dev, "failed to init interface\n");
goto out_fail;
- bcopy(name, sc->ifname[phy], strlen(name) + 1);
- if_initname(sc->ifp[phy], sc->ifname[phy], phy);
- err = mii_attach(sc->dev, &sc->miibus[phy], sc->ifp[phy],
- e6000sw_ifmedia_upd, e6000sw_ifmedia_sts, BMSR_DEFCAPMASK,
- phy, MII_OFFSET_ANY, 0);
+ }
+
+ if (e6000sw_is_fixedport(sc, port)) {
+ /* Link must be down to change speed force value. */
+ reg = e6000sw_readreg(sc, REG_PORT(sc, port),
+ PSC_CONTROL);
+ reg &= ~PSC_CONTROL_LINK_UP;
+ reg |= PSC_CONTROL_FORCED_LINK;
+ e6000sw_writereg(sc, REG_PORT(sc, port), PSC_CONTROL,
+ reg);
+
+ /*
+ * Force speed, full-duplex, EEE off and flow-control
+ * on.
+ */
+ reg &= ~(PSC_CONTROL_SPD2500 | PSC_CONTROL_ALT_SPD |
+ PSC_CONTROL_FORCED_FC | PSC_CONTROL_FC_ON |
+ PSC_CONTROL_FORCED_EEE);
+ if (e6000sw_is_fixed25port(sc, port))
+ reg |= PSC_CONTROL_SPD2500;
+ else
+ reg |= PSC_CONTROL_SPD1000;
+ if (MVSWITCH(sc, MV88E6190) &&
+ e6000sw_is_fixed25port(sc, port))
+ reg |= PSC_CONTROL_ALT_SPD;
+ reg |= PSC_CONTROL_FORCED_DPX | PSC_CONTROL_FULLDPX |
+ PSC_CONTROL_FORCED_LINK | PSC_CONTROL_LINK_UP |
+ PSC_CONTROL_FORCED_SPD;
+ if (!MVSWITCH(sc, MV88E6190))
+ reg |= PSC_CONTROL_FORCED_FC | PSC_CONTROL_FC_ON;
+ if (MVSWITCH(sc, MV88E6141) ||
+ MVSWITCH(sc, MV88E6341) ||
+ MVSWITCH(sc, MV88E6190))
+ reg |= PSC_CONTROL_FORCED_EEE;
+ e6000sw_writereg(sc, REG_PORT(sc, port), PSC_CONTROL,
+ reg);
+ /* Power on the SERDES interfaces. */
+ if (MVSWITCH(sc, MV88E6190) &&
+ (port == 9 || port == 10)) {
+ if (e6000sw_is_fixed25port(sc, port))
+ sgmii = false;
+ else
+ sgmii = true;
+ e6000sw_serdes_power(sc->dev, port, sgmii);
+ }
+ }
+
+ /* Don't attach miibus at CPU/fixed ports */
+ if (!e6000sw_is_phyport(sc, port))
+ continue;
+
+ err = e6000sw_attach_miibus(sc, port);
if (err != 0) {
- device_printf(sc->dev,
- "attaching PHY %d failed\n",
- phy);
+ device_printf(sc->dev, "failed to attach miibus\n");
goto out_fail;
}
- sc->mii[phy] = device_get_softc(sc->miibus[phy]);
}
+
+ etherswitch_info.es_nports = sc->num_ports;
+ etherswitch_info.es_nlaggroups = sc->num_laggs;
+ etherswitch_info.es_ports_mask[0] = sc->used_mask;
+
+ /* Reset LAGG settings. */
+ e6000sw_resetlagg(sc);
+
+ /* Default to port vlan. */
+ e6000sw_set_vlan_mode(sc, ETHERSWITCH_VLAN_PORT);
+
+ reg = e6000sw_readreg(sc, REG_GLOBAL, SWITCH_GLOBAL_STATUS);
+ if (reg & SWITCH_GLOBAL_STATUS_IR)
+ device_printf(dev, "switch is ready.\n");
E6000SW_UNLOCK(sc);
bus_generic_probe(dev);
- bus_enumerate_hinted_children(dev);
bus_generic_attach(dev);
- kproc_create(e6000sw_tick, sc, &e6000sw_kproc, 0, 0,
- "e6000sw tick kproc");
+ kproc_create(e6000sw_tick, sc, &sc->kproc, 0, 0, "e6000sw tick kproc");
return (0);
out_fail:
+ E6000SW_UNLOCK(sc);
e6000sw_detach(dev);
- return (ENXIO);
+ return (err);
}
-static __inline void
-e6000sw_poll_done(e6000sw_softc_t *sc)
+/* XMDIO/Clause 45 access. */
+static int
+e6000sw_read_xmdio(device_t dev, int phy, int devaddr, int devreg)
{
+ e6000sw_softc_t *sc;
+ uint32_t reg;
+
+ sc = device_get_softc(dev);
+ E6000SW_LOCK_ASSERT(sc, SA_XLOCKED);
+ if (E6000SW_WAITREADY2(sc, SMI_PHY_CMD_REG, SMI_CMD_BUSY)) {
+ device_printf(dev, "Timeout while waiting for switch\n");
+ return (ETIMEDOUT);
+ }
- while (e6000sw_readreg(sc, REG_GLOBAL2, PHY_CMD) &
- (1 << PHY_CMD_SMI_BUSY))
- continue;
+ reg = devaddr & SMI_CMD_REG_ADDR_MASK;
+ reg |= (phy << SMI_CMD_DEV_ADDR) & SMI_CMD_DEV_ADDR_MASK;
+
+ /* Load C45 register address. */
+ e6000sw_writereg(sc, REG_GLOBAL2, SMI_PHY_DATA_REG, devreg);
+ e6000sw_writereg(sc, REG_GLOBAL2, SMI_PHY_CMD_REG,
+ reg | SMI_CMD_OP_C45_ADDR);
+ if (E6000SW_WAITREADY2(sc, SMI_PHY_CMD_REG, SMI_CMD_BUSY)) {
+ device_printf(dev, "Timeout while waiting for switch\n");
+ return (ETIMEDOUT);
+ }
+
+ /* Start C45 read operation. */
+ e6000sw_writereg(sc, REG_GLOBAL2, SMI_PHY_CMD_REG,
+ reg | SMI_CMD_OP_C45_READ);
+ if (E6000SW_WAITREADY2(sc, SMI_PHY_CMD_REG, SMI_CMD_BUSY)) {
+ device_printf(dev, "Timeout while waiting for switch\n");
+ return (ETIMEDOUT);
+ }
+
+ /* Read C45 data. */
+ reg = e6000sw_readreg(sc, REG_GLOBAL2, SMI_PHY_DATA_REG);
+
+ return (reg & PHY_DATA_MASK);
}
+static int
+e6000sw_write_xmdio(device_t dev, int phy, int devaddr, int devreg, int val)
+{
+ e6000sw_softc_t *sc;
+ uint32_t reg;
+
+ sc = device_get_softc(dev);
+ E6000SW_LOCK_ASSERT(sc, SA_XLOCKED);
+ if (E6000SW_WAITREADY2(sc, SMI_PHY_CMD_REG, SMI_CMD_BUSY)) {
+ device_printf(dev, "Timeout while waiting for switch\n");
+ return (ETIMEDOUT);
+ }
+
+ reg = devaddr & SMI_CMD_REG_ADDR_MASK;
+ reg |= (phy << SMI_CMD_DEV_ADDR) & SMI_CMD_DEV_ADDR_MASK;
+
+ /* Load C45 register address. */
+ e6000sw_writereg(sc, REG_GLOBAL2, SMI_PHY_DATA_REG, devreg);
+ e6000sw_writereg(sc, REG_GLOBAL2, SMI_PHY_CMD_REG,
+ reg | SMI_CMD_OP_C45_ADDR);
+ if (E6000SW_WAITREADY2(sc, SMI_PHY_CMD_REG, SMI_CMD_BUSY)) {
+ device_printf(dev, "Timeout while waiting for switch\n");
+ return (ETIMEDOUT);
+ }
+
+ /* Load data and start the C45 write operation. */
+ e6000sw_writereg(sc, REG_GLOBAL2, SMI_PHY_DATA_REG, devreg);
+ e6000sw_writereg(sc, REG_GLOBAL2, SMI_PHY_CMD_REG,
+ reg | SMI_CMD_OP_C45_WRITE);
+
+ return (0);
+}
/*
* PHY registers are paged. Put page index in reg 22 (accessible from every
@@ -306,55 +864,52 @@ e6000sw_readphy(device_t dev, int phy, int reg)
uint32_t val;
sc = device_get_softc(dev);
- val = 0;
-
- if (phy >= E6000SW_NUM_PHYS || reg >= E6000SW_NUM_PHY_REGS) {
+ if (!e6000sw_is_phyport(sc, phy) || reg >= E6000SW_NUM_PHY_REGS) {
device_printf(dev, "Wrong register address.\n");
return (EINVAL);
}
E6000SW_LOCK_ASSERT(sc, SA_XLOCKED);
+ if (E6000SW_WAITREADY2(sc, SMI_PHY_CMD_REG, SMI_CMD_BUSY)) {
+ device_printf(dev, "Timeout while waiting for switch\n");
+ return (ETIMEDOUT);
+ }
- e6000sw_poll_done(sc);
- val |= 1 << PHY_CMD_SMI_BUSY;
- val |= PHY_CMD_MODE_MDIO << PHY_CMD_MODE;
- val |= PHY_CMD_OPCODE_READ << PHY_CMD_OPCODE;
- val |= (reg << PHY_CMD_REG_ADDR) & PHY_CMD_REG_ADDR_MASK;
- val |= (phy << PHY_CMD_DEV_ADDR) & PHY_CMD_DEV_ADDR_MASK;
- e6000sw_writereg(sc, REG_GLOBAL2, SMI_PHY_CMD_REG, val);
- e6000sw_poll_done(sc);
- val = e6000sw_readreg(sc, REG_GLOBAL2, SMI_PHY_DATA_REG)
- & PHY_DATA_MASK;
+ e6000sw_writereg(sc, REG_GLOBAL2, SMI_PHY_CMD_REG,
+ SMI_CMD_OP_C22_READ | (reg & SMI_CMD_REG_ADDR_MASK) |
+ ((phy << SMI_CMD_DEV_ADDR) & SMI_CMD_DEV_ADDR_MASK));
+ if (E6000SW_WAITREADY2(sc, SMI_PHY_CMD_REG, SMI_CMD_BUSY)) {
+ device_printf(dev, "Timeout while waiting for switch\n");
+ return (ETIMEDOUT);
+ }
- return (val);
+ val = e6000sw_readreg(sc, REG_GLOBAL2, SMI_PHY_DATA_REG);
+
+ return (val & PHY_DATA_MASK);
}
static int
e6000sw_writephy(device_t dev, int phy, int reg, int data)
{
e6000sw_softc_t *sc;
- uint32_t val;
sc = device_get_softc(dev);
- val = 0;
-
- if (phy >= E6000SW_NUM_PHYS || reg >= E6000SW_NUM_PHY_REGS) {
+ if (!e6000sw_is_phyport(sc, phy) || reg >= E6000SW_NUM_PHY_REGS) {
device_printf(dev, "Wrong register address.\n");
return (EINVAL);
}
E6000SW_LOCK_ASSERT(sc, SA_XLOCKED);
+ if (E6000SW_WAITREADY2(sc, SMI_PHY_CMD_REG, SMI_CMD_BUSY)) {
+ device_printf(dev, "Timeout while waiting for switch\n");
+ return (ETIMEDOUT);
+ }
- e6000sw_poll_done(sc);
- val |= PHY_CMD_MODE_MDIO << PHY_CMD_MODE;
- val |= 1 << PHY_CMD_SMI_BUSY;
- val |= PHY_CMD_OPCODE_WRITE << PHY_CMD_OPCODE;
- val |= (reg << PHY_CMD_REG_ADDR) & PHY_CMD_REG_ADDR_MASK;
- val |= (phy << PHY_CMD_DEV_ADDR) & PHY_CMD_DEV_ADDR_MASK;
e6000sw_writereg(sc, REG_GLOBAL2, SMI_PHY_DATA_REG,
- data & PHY_DATA_MASK);
- e6000sw_writereg(sc, REG_GLOBAL2, SMI_PHY_CMD_REG, val);
- e6000sw_poll_done(sc);
+ data & PHY_DATA_MASK);
+ e6000sw_writereg(sc, REG_GLOBAL2, SMI_PHY_CMD_REG,
+ SMI_CMD_OP_C22_WRITE | (reg & SMI_CMD_REG_ADDR_MASK) |
+ ((phy << SMI_CMD_DEV_ADDR) & SMI_CMD_DEV_ADDR_MASK));
return (0);
}
@@ -367,8 +922,10 @@ e6000sw_detach(device_t dev)
sc = device_get_softc(dev);
bus_generic_detach(dev);
+ if (sc->iobuf != NULL)
+ free(sc->iobuf, M_E6000SW);
sx_destroy(&sc->sx);
- for (phy = 0; phy < E6000SW_NUM_PHYS; phy++) {
+ for (phy = 0; phy < sc->num_ports; phy++) {
if (sc->miibus[phy] != NULL)
device_delete_child(dev, sc->miibus[phy]);
if (sc->ifp[phy] != NULL)
@@ -383,10 +940,54 @@ e6000sw_detach(device_t dev)
static etherswitch_info_t*
e6000sw_getinfo(device_t dev)
{
+#if defined(E6000SW_DEBUG)
+ int i;
+ struct e6000sw_softc *sc;
+
+ sc = device_get_softc(dev);
+ E6000SW_LOCK(sc);
+ if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q) {
+ e6000sw_vtu_dump(sc);
+ for (i = 0; i < etherswitch_info.es_nvlangroups; i++)
+ if (sc->vlans[i] != 0)
+ e6000sw_atu_dump(sc, i + 1);
+ } else
+ e6000sw_atu_dump(sc, 0);
+ E6000SW_UNLOCK(sc);
+#endif
return (&etherswitch_info);
}
+static int
+e6000sw_getconf(device_t dev, etherswitch_conf_t *conf)
+{
+ struct e6000sw_softc *sc;
+
+ /* Return the VLAN mode. */
+ sc = device_get_softc(dev);
+ conf->cmd = ETHERSWITCH_CONF_VLAN_MODE;
+ conf->vlan_mode = sc->vlan_mode;
+
+ return (0);
+}
+
+static int
+e6000sw_setconf(device_t dev, etherswitch_conf_t *conf)
+{
+ struct e6000sw_softc *sc;
+
+ /* Set the VLAN mode. */
+ sc = device_get_softc(dev);
+ if (conf->cmd & ETHERSWITCH_CONF_VLAN_MODE) {
+ E6000SW_LOCK(sc);
+ e6000sw_set_vlan_mode(sc, conf->vlan_mode);
+ E6000SW_UNLOCK(sc);
+ }
+
+ return (0);
+}
+
static void
e6000sw_lock(device_t dev)
{
@@ -412,40 +1013,66 @@ e6000sw_unlock(device_t dev)
static int
e6000sw_getport(device_t dev, etherswitch_port_t *p)
{
- struct mii_data *mii;
int err;
struct ifmediareq *ifmr;
+ struct mii_data *mii;
+ uint16_t reg;
- err = 0;
e6000sw_softc_t *sc = device_get_softc(dev);
E6000SW_LOCK_ASSERT(sc, SA_UNLOCKED);
+ if (p->es_port >= sc->num_ports || p->es_port < 0)
+ return (EINVAL);
+ if (!e6000sw_is_portenabled(sc, p->es_port))
+ return (0);
+
E6000SW_LOCK(sc);
+ e6000sw_get_pvid(sc, p->es_port, &p->es_pvid);
- if (p->es_port >= E6000SW_NUM_PORTS ||
- p->es_port < 0) {
- err = EINVAL;
- goto out;
+ /* Port state. */
+ reg = e6000sw_readreg(sc, REG_PORT(sc, p->es_port), PORT_CONTROL);
+ switch (reg & PORT_CONTROL_ENABLE) {
+ case PORT_CONTROL_BLOCKING:
+ p->es_state = ETHERSWITCH_PSTATE_BLOCKING;
+ break;
+ case PORT_CONTROL_LEARNING:
+ p->es_state = ETHERSWITCH_PSTATE_LEARNING;
+ break;
+ case PORT_CONTROL_FORWARDING:
+ p->es_state = ETHERSWITCH_PSTATE_FORWARDING;
+ break;
+ default:
+ p->es_state = ETHERSWITCH_PSTATE_DISABLED;
}
- e6000sw_get_pvid(sc, p->es_port, &p->es_pvid);
+ /* Port flags. */
+ reg = e6000sw_readreg(sc, REG_PORT(sc, p->es_port), PORT_CONTROL2);
+ if (reg & PORT_CONTROL2_DISC_TAGGED)
+ p->es_flags |= ETHERSWITCH_PORT_DROPTAGGED;
+ if (reg & PORT_CONTROL2_DISC_UNTAGGED)
+ p->es_flags |= ETHERSWITCH_PORT_DROPUNTAGGED;
- if (e6000sw_cpuport(sc, p->es_port)) {
- p->es_flags |= ETHERSWITCH_PORT_CPU;
+ err = 0;
+ if (e6000sw_is_fixedport(sc, p->es_port)) {
+ if (e6000sw_is_cpuport(sc, p->es_port))
+ p->es_flags |= ETHERSWITCH_PORT_CPU;
ifmr = &p->es_ifmr;
ifmr->ifm_status = IFM_ACTIVE | IFM_AVALID;
ifmr->ifm_count = 0;
- ifmr->ifm_current = ifmr->ifm_active =
- IFM_ETHER | IFM_1000_T | IFM_FDX;
+ if (e6000sw_is_fixed25port(sc, p->es_port))
+ ifmr->ifm_active = IFM_2500_KX; /* IFM_2500_T */
+ else
+ ifmr->ifm_active = IFM_1000_T;
+ ifmr->ifm_active |= IFM_ETHER | IFM_FDX;
+ ifmr->ifm_current = ifmr->ifm_active;
ifmr->ifm_mask = 0;
} else {
mii = e6000sw_miiforphy(sc, p->es_port);
err = ifmedia_ioctl(mii->mii_ifp, &p->es_ifr,
&mii->mii_media, SIOCGIFMEDIA);
}
-
-out:
E6000SW_UNLOCK(sc);
+
return (err);
}
@@ -455,32 +1082,194 @@ e6000sw_setport(device_t dev, etherswitch_port_t *p)
e6000sw_softc_t *sc;
int err;
struct mii_data *mii;
+ uint16_t reg;
- err = 0;
sc = device_get_softc(dev);
E6000SW_LOCK_ASSERT(sc, SA_UNLOCKED);
+ if (p->es_port >= sc->num_ports || p->es_port < 0)
+ return (EINVAL);
+ if (!e6000sw_is_portenabled(sc, p->es_port))
+ return (0);
+
+ err = 0;
E6000SW_LOCK(sc);
- if (p->es_port >= E6000SW_NUM_PORTS ||
- p->es_port < 0) {
- err = EINVAL;
- goto out;
+ /* Port state. */
+ reg = e6000sw_readreg(sc, REG_PORT(sc, p->es_port), PORT_CONTROL);
+ reg &= ~PORT_CONTROL_ENABLE;
+ switch (p->es_state) {
+ case ETHERSWITCH_PSTATE_BLOCKING:
+ reg |= PORT_CONTROL_BLOCKING;
+ break;
+ case ETHERSWITCH_PSTATE_LEARNING:
+ reg |= PORT_CONTROL_LEARNING;
+ break;
+ case ETHERSWITCH_PSTATE_FORWARDING:
+ reg |= PORT_CONTROL_FORWARDING;
+ break;
+ default:
+ reg |= PORT_CONTROL_DISABLED;
}
-
+ e6000sw_writereg(sc, REG_PORT(sc, p->es_port), PORT_CONTROL, reg);
+
+ /* Port flags. */
+ reg = e6000sw_readreg(sc, REG_PORT(sc, p->es_port), PORT_CONTROL2);
+ if (p->es_flags & ETHERSWITCH_PORT_DROPTAGGED)
+ reg |= PORT_CONTROL2_DISC_TAGGED;
+ else
+ reg &= ~PORT_CONTROL2_DISC_TAGGED;
+ if (p->es_flags & ETHERSWITCH_PORT_DROPUNTAGGED)
+ reg |= PORT_CONTROL2_DISC_UNTAGGED;
+ else
+ reg &= ~PORT_CONTROL2_DISC_UNTAGGED;
+ e6000sw_writereg(sc, REG_PORT(sc, p->es_port), PORT_CONTROL2, reg);
if (p->es_pvid != 0)
e6000sw_set_pvid(sc, p->es_port, p->es_pvid);
- if (!e6000sw_cpuport(sc, p->es_port)) {
+ if (e6000sw_is_phyport(sc, p->es_port)) {
mii = e6000sw_miiforphy(sc, p->es_port);
err = ifmedia_ioctl(mii->mii_ifp, &p->es_ifr, &mii->mii_media,
SIOCSIFMEDIA);
}
-
-out:
E6000SW_UNLOCK(sc);
+
return (err);
}
+static __inline void
+e6000sw_port_vlan_assign(e6000sw_softc_t *sc, int port, uint32_t fid,
+ uint32_t members)
+{
+ uint32_t reg;
+
+ reg = e6000sw_readreg(sc, REG_PORT(sc, port), PORT_VLAN_MAP);
+ reg &= ~sc->ports_mask;
+ reg &= ~PORT_VLAN_MAP_FID_MASK;
+ reg |= members & sc->ports_mask & ~(1 << port);
+ reg |= (fid << PORT_VLAN_MAP_FID) & PORT_VLAN_MAP_FID_MASK;
+ e6000sw_writereg(sc, REG_PORT(sc, port), PORT_VLAN_MAP, reg);
+ reg = e6000sw_readreg(sc, REG_PORT(sc, port), PORT_CONTROL1);
+ reg &= ~PORT_CONTROL1_FID_MASK;
+ reg |= (fid >> 4) & PORT_CONTROL1_FID_MASK;
+ e6000sw_writereg(sc, REG_PORT(sc, port), PORT_CONTROL1, reg);
+}
+
+static int
+e6000sw_init_vlan(struct e6000sw_softc *sc)
+{
+ int i, port, ret;
+ uint32_t members;
+
+#if defined(E6000SW_DEBUG)
+ e6000sw_vtu_dump(sc);
+#endif
+
+ /* Disable all ports */
+ for (port = 0; port < sc->num_ports; port++) {
+ ret = e6000sw_readreg(sc, REG_PORT(sc, port), PORT_CONTROL);
+ e6000sw_writereg(sc, REG_PORT(sc, port), PORT_CONTROL,
+ (ret & ~PORT_CONTROL_ENABLE));
+ }
+
+ /* Flush VTU. */
+ e6000sw_vtu_flush(sc);
+
+ for (port = 0; port < sc->num_ports; port++) {
+ /* Reset the egress and frame mode. */
+ ret = e6000sw_readreg(sc, REG_PORT(sc, port), PORT_CONTROL);
+ ret &= ~(PORT_CONTROL_EGRESS | PORT_CONTROL_FRAME);
+ e6000sw_writereg(sc, REG_PORT(sc, port), PORT_CONTROL, ret);
+
+ /* Set the the 802.1q mode. */
+ ret = e6000sw_readreg(sc, REG_PORT(sc, port), PORT_CONTROL2);
+ ret &= ~PORT_CONTROL2_DOT1Q;
+ if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q)
+ ret |= PORT_CONTROL2_DOT1Q;
+ e6000sw_writereg(sc, REG_PORT(sc, port), PORT_CONTROL2, ret);
+ }
+
+ for (port = 0; port < sc->num_ports; port++) {
+ if (!e6000sw_is_portenabled(sc, port))
+ continue;
+
+ ret = e6000sw_readreg(sc, REG_PORT(sc, port), PORT_VID);
+
+ /* Set port priority */
+ ret &= ~PORT_VID_PRIORITY_MASK;
+
+ /* Set VID map */
+ ret &= ~PORT_VID_DEF_VID_MASK;
+ if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q)
+ ret |= 1;
+ else
+ ret |= (port + 1);
+ e6000sw_writereg(sc, REG_PORT(sc, port), PORT_VID, ret);
+ }
+
+ /* Assign the member ports to each origin port. */
+ for (port = 0; port < sc->num_ports; port++) {
+ members = 0;
+ if (e6000sw_is_portenabled(sc, port)) {
+ for (i = 0; i < sc->num_ports; i++) {
+ if (i == port || !e6000sw_is_portenabled(sc, i))
+ continue;
+ members |= (1 << i);
+ }
+ }
+ /* Default to FID 0. */
+ e6000sw_port_vlan_assign(sc, port, 0, members);
+ }
+
+ /* Reset internal VLAN table. */
+ for (i = 0; i < nitems(sc->vlans); i++)
+ sc->vlans[i] = 0;
+
+ /* Create default VLAN (1). */
+ if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q) {
+ sc->vlans[0] = 1;
+ e6000sw_vtu_update(sc, 0, sc->vlans[0], 1, 0, sc->used_mask);
+ }
+
+ if (e6000sw_default_disabled == false) {
+ /* Enable all ports */
+ for (port = 0; port < sc->num_ports; port++) {
+ if (!e6000sw_is_portenabled(sc, port))
+ continue;
+ ret = e6000sw_readreg(sc, REG_PORT(sc, port), PORT_CONTROL);
+ e6000sw_writereg(sc, REG_PORT(sc, port), PORT_CONTROL,
+ (ret | PORT_CONTROL_ENABLE));
+ }
+ }
+
+#if defined(E6000SW_DEBUG)
+ if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q)
+ e6000sw_vtu_dump(sc);
+#endif
+
+ return (0);
+}
+
+static int
+e6000sw_set_vlan_mode(struct e6000sw_softc *sc, uint32_t mode)
+{
+
+ E6000SW_LOCK_ASSERT(sc, SA_XLOCKED);
+ switch (mode) {
+ case ETHERSWITCH_VLAN_PORT:
+ sc->vlan_mode = ETHERSWITCH_VLAN_PORT;
+ etherswitch_info.es_nvlangroups = sc->num_ports;
+ return (e6000sw_init_vlan(sc));
+ break;
+ case ETHERSWITCH_VLAN_DOT1Q:
+ sc->vlan_mode = ETHERSWITCH_VLAN_DOT1Q;
+ etherswitch_info.es_nvlangroups = E6000SW_NUM_VLANS;
+ return (e6000sw_init_vlan(sc));
+ break;
+ default:
+ return (EINVAL);
+ }
+}
+
/*
* Registers in this switch are divided into sections, specified in
* documentation. So as to access any of them, section index and reg index
@@ -490,9 +1279,11 @@ out:
static int
e6000sw_readreg_wrapper(device_t dev, int addr_reg)
{
-
+ e6000sw_softc_t *sc;
+
+ sc = device_get_softc(dev);
if ((addr_reg > (REG_GLOBAL2 * 32 + REG_NUM_MAX)) ||
- (addr_reg < (REG_PORT(0) * 32))) {
+ (addr_reg < (REG_PORT(sc, 0) * 32))) {
device_printf(dev, "Wrong register address.\n");
return (EINVAL);
}
@@ -504,9 +1295,11 @@ e6000sw_readreg_wrapper(device_t dev, int addr_reg)
static int
e6000sw_writereg_wrapper(device_t dev, int addr_reg, int val)
{
-
+ e6000sw_softc_t *sc;
+
+ sc = device_get_softc(dev);
if ((addr_reg > (REG_GLOBAL2 * 32 + REG_NUM_MAX)) ||
- (addr_reg < (REG_PORT(0) * 32))) {
+ (addr_reg < (REG_PORT(sc, 0) * 32))) {
device_printf(dev, "Wrong register address.\n");
return (EINVAL);
}
@@ -588,98 +1381,365 @@ e6000sw_getvgroup_wrapper(device_t dev, etherswitch_vlangroup_t *vg)
return (ret);
}
-static __inline void
-e6000sw_flush_port(e6000sw_softc_t *sc, int port)
+static int
+e6000sw_setlaggroup_wrapper(device_t dev, etherswitch_laggroup_t *lag)
{
+ e6000sw_softc_t *sc;
+ int ret;
+
+ sc = device_get_softc(dev);
+ E6000SW_LOCK_ASSERT(sc, SA_UNLOCKED);
+
+ E6000SW_LOCK(sc);
+ ret = e6000sw_setlaggroup(dev, lag);
+ E6000SW_UNLOCK(sc);
+
+ return (ret);
+}
+
+static int
+e6000sw_getlaggroup_wrapper(device_t dev, etherswitch_laggroup_t *lag)
+{
+ e6000sw_softc_t *sc;
+ int ret;
+
+ sc = device_get_softc(dev);
+ E6000SW_LOCK_ASSERT(sc, SA_UNLOCKED);
+
+ E6000SW_LOCK(sc);
+ ret = e6000sw_getlaggroup(dev, lag);
+ E6000SW_UNLOCK(sc);
+
+ return (ret);
+}
+
+static int
+e6000sw_resetlagg(e6000sw_softc_t *sc)
+{
+ int i;
uint32_t reg;
- reg = e6000sw_readreg(sc, REG_PORT(port),
- PORT_VLAN_MAP);
- reg &= ~PORT_VLAN_MAP_TABLE_MASK;
- reg &= ~PORT_VLAN_MAP_FID_MASK;
- e6000sw_writereg(sc, REG_PORT(port),
- PORT_VLAN_MAP, reg);
- if (sc->vgroup[port] != E6000SW_PORT_NO_VGROUP) {
- /*
- * If port belonged somewhere, owner-group
- * should have its entry removed.
- */
- sc->members[sc->vgroup[port]] &= ~(1 << port);
- sc->vgroup[port] = E6000SW_PORT_NO_VGROUP;
+ for (i = 0; i < sc->num_ports; i++) {
+ if (!e6000sw_is_portenabled(sc, i))
+ continue;
+ reg = e6000sw_readreg(sc, REG_PORT(sc, i), PORT_CONTROL1);
+ if (reg & PORT_CONTROL1_LAG_PORT) {
+ /* Disable LAG on port. */
+ reg &= ~PORT_CONTROL1_LAG_PORT;
+ reg &= ~(PORT_CONTROL1_LAG_ID_MASK <<
+ PORT_CONTROL1_LAG_ID_SHIFT);
+ e6000sw_writereg(sc, REG_PORT(sc, i),
+ PORT_CONTROL1, reg);
+ }
}
+ for (i = 0; i < sc->num_laggs; i++)
+ e6000sw_writereg(sc, REG_GLOBAL2, LAG_MAPPING,
+ i << LAGID_SHIFT | LAG_UPDATE);
+ for (i = 0; i < E6000SW_NUM_LAGMASK; i++)
+ e6000sw_writereg(sc, REG_GLOBAL2, LAG_MASK,
+ i << LAG_MASKNUM_SHIFT | LAG_UPDATE | sc->ports_mask);
+
+ return (0);
}
-static __inline void
-e6000sw_port_assign_vgroup(e6000sw_softc_t *sc, int port, int fid, int vgroup,
- int members)
+static int
+e6000sw_setlaggmask(e6000sw_softc_t *sc)
{
+ int count, cycle, i, m, mask, port;
+ struct lagg_map {
+ int cycle;
+ int lag;
+ int pcount;
+ uint32_t ports;
+ } *map;
uint32_t reg;
- reg = e6000sw_readreg(sc, REG_PORT(port),
- PORT_VLAN_MAP);
- reg &= ~PORT_VLAN_MAP_TABLE_MASK;
- reg &= ~PORT_VLAN_MAP_FID_MASK;
- reg |= members & ~(1 << port);
- reg |= (fid << PORT_VLAN_MAP_FID) & PORT_VLAN_MAP_FID_MASK;
- e6000sw_writereg(sc, REG_PORT(port), PORT_VLAN_MAP,
- reg);
- sc->vgroup[port] = vgroup;
+ map = malloc(sizeof(*map) * sc->num_laggs, M_E6000SW, M_WAITOK);
+ for (i = 0; i < sc->num_laggs; i++) {
+ map[i].lag = 0;
+ map[i].cycle = 0;
+ map[i].ports = 0;
+ map[i].pcount = 0;
+ }
+ count = 0;
+ for (i = 0; i < sc->num_laggs; i++) {
+
+ /* Read the LAGG ports. */
+ e6000sw_writereg(sc, REG_GLOBAL2, LAG_MAPPING,
+ i << LAGID_SHIFT);
+ reg = e6000sw_readreg(sc, REG_GLOBAL2, LAG_MAPPING);
+ if ((reg & sc->ports_mask) == 0)
+ continue;
+ map[count].lag = i;
+ map[count].ports = reg & sc->ports_mask;
+ for (port = 0; port < sc->num_ports; port++) {
+ if ((map[count].ports & (1 << port)) == 0)
+ continue;
+ map[count].pcount++;
+ }
+ ++count;
+ }
+
+ for (mask = 0; mask < E6000SW_NUM_LAGMASK; mask++) {
+ e6000sw_writereg(sc, REG_GLOBAL2, LAG_MASK,
+ mask << LAG_MASKNUM_SHIFT);
+ reg = e6000sw_readreg(sc, REG_GLOBAL2, LAG_MASK);
+ reg |= sc->ports_mask;
+ for (port = 0; port < sc->num_ports; port++) {
+
+ for (m = 0; m < count; m++) {
+ cycle = mask % map[m].pcount;
+ if ((map[m].ports & (1 << port)) == 0)
+ continue;
+ if (map[m].cycle != cycle)
+ reg &= ~(1 << port);
+ map[m].cycle = ++map[m].cycle % map[m].pcount;
+ }
+ }
+ e6000sw_writereg(sc, REG_GLOBAL2, LAG_MASK, reg | LAG_UPDATE);
+ }
+
+ free(map, M_E6000SW);
+
+ return (0);
}
static int
-e6000sw_setvgroup(device_t dev, etherswitch_vlangroup_t *vg)
+e6000sw_setlaggroup(device_t dev, etherswitch_laggroup_t *lag)
{
e6000sw_softc_t *sc;
- int port, fid;
+ int i, laggid;
+ uint32_t laggports, reg;
sc = device_get_softc(dev);
+
E6000SW_LOCK_ASSERT(sc, SA_XLOCKED);
- if (vg->es_vlangroup >= E6000SW_NUM_VGROUPS)
+ laggports = 0;
+ for (i = 0; i < sc->num_ports; i++) {
+ if (!e6000sw_is_portenabled(sc, i))
+ continue;
+ reg = e6000sw_readreg(sc, REG_PORT(sc, i), PORT_CONTROL1);
+ laggid = reg >> PORT_CONTROL1_LAG_ID_SHIFT;
+ laggid &= PORT_CONTROL1_LAG_ID_MASK;
+ if ((lag->es_untagged_ports & (1 << i)) == 0) {
+ if ((reg & PORT_CONTROL1_LAG_PORT) != 0 &&
+ laggid == lag->es_laggroup) {
+ /* Disable LAG on port. */
+ reg &= ~PORT_CONTROL1_LAG_PORT;
+ reg &= ~(PORT_CONTROL1_LAG_ID_MASK <<
+ PORT_CONTROL1_LAG_ID_SHIFT);
+ e6000sw_writereg(sc, REG_PORT(sc, i),
+ PORT_CONTROL1, reg);
+ }
+ continue;
+ }
+ reg |= PORT_CONTROL1_LAG_PORT;
+ laggid = lag->es_laggroup & PORT_CONTROL1_LAG_ID_MASK;
+ reg |= laggid << PORT_CONTROL1_LAG_ID_SHIFT;
+ e6000sw_writereg(sc, REG_PORT(sc, i), PORT_CONTROL1, reg);
+
+ laggports |= (1 << i);
+ }
+
+ /* Update LAG mapping. */
+ reg = (lag->es_laggroup & PORT_CONTROL1_LAG_ID_MASK) << LAGID_SHIFT;
+ e6000sw_writereg(sc, REG_GLOBAL2, LAG_MAPPING, reg);
+ reg = e6000sw_readreg(sc, REG_GLOBAL2, LAG_MAPPING);
+ reg &= ~sc->ports_mask;
+ reg |= laggports | LAG_UPDATE;
+ e6000sw_writereg(sc, REG_GLOBAL2, LAG_MAPPING, reg);
+
+ lag->es_lagg_valid = 1;
+
+ return (e6000sw_setlaggmask(sc));
+}
+
+static int
+e6000sw_set_port_vlan(e6000sw_softc_t *sc, etherswitch_vlangroup_t *vg)
+{
+ uint32_t port;
+
+ port = vg->es_vlangroup;
+ if (port > sc->num_ports)
return (EINVAL);
+
if (vg->es_member_ports != vg->es_untagged_ports) {
- device_printf(dev, "Tagged ports not supported.\n");
+ device_printf(sc->dev, "Tagged ports not supported.\n");
return (EINVAL);
}
- vg->es_untagged_ports &= PORT_VLAN_MAP_TABLE_MASK;
- fid = vg->es_vlangroup + 1;
- for (port = 0; port < E6000SW_NUM_PORTS; port++) {
- if ((sc->members[vg->es_vlangroup] & (1 << port)) ||
- (vg->es_untagged_ports & (1 << port)))
- e6000sw_flush_port(sc, port);
- if (vg->es_untagged_ports & (1 << port))
- e6000sw_port_assign_vgroup(sc, port, fid,
- vg->es_vlangroup, vg->es_untagged_ports);
+ e6000sw_port_vlan_assign(sc, port, 0, vg->es_untagged_ports);
+ vg->es_vid = port | ETHERSWITCH_VID_VALID;
+
+ return (0);
+}
+
+static int
+e6000sw_set_dot1q_vlan(e6000sw_softc_t *sc, etherswitch_vlangroup_t *vg)
+{
+ int i, vlan;
+
+ vlan = vg->es_vid & ETHERSWITCH_VID_MASK;
+
+ /* Set VLAN to '0' removes it from table. */
+ if (vlan == 0) {
+ e6000sw_vtu_update(sc, VTU_PURGE,
+ sc->vlans[vg->es_vlangroup], 0, 0, 0);
+ sc->vlans[vg->es_vlangroup] = 0;
+ return (0);
}
- sc->vid[vg->es_vlangroup] = vg->es_vid;
- sc->members[vg->es_vlangroup] = vg->es_untagged_ports;
+
+ /* Is this VLAN already in table ? */
+ for (i = 0; i < etherswitch_info.es_nvlangroups; i++)
+ if (i != vg->es_vlangroup && vlan == sc->vlans[i])
+ return (EINVAL);
+
+ sc->vlans[vg->es_vlangroup] = vlan;
+ e6000sw_vtu_update(sc, 0, vlan, vg->es_vlangroup + 1,
+ vg->es_member_ports & sc->used_mask,
+ vg->es_untagged_ports & sc->used_mask);
return (0);
}
static int
-e6000sw_getvgroup(device_t dev, etherswitch_vlangroup_t *vg)
+e6000sw_setvgroup(device_t dev, etherswitch_vlangroup_t *vg)
{
e6000sw_softc_t *sc;
sc = device_get_softc(dev);
E6000SW_LOCK_ASSERT(sc, SA_XLOCKED);
- if (vg->es_vlangroup >= E6000SW_NUM_VGROUPS)
+ if (sc->vlan_mode == ETHERSWITCH_VLAN_PORT)
+ return (e6000sw_set_port_vlan(sc, vg));
+ else if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q)
+ return (e6000sw_set_dot1q_vlan(sc, vg));
+
+ return (EINVAL);
+}
+
+static int
+e6000sw_getlaggroup(device_t dev, etherswitch_laggroup_t *lag)
+{
+ e6000sw_softc_t *sc;
+ int laggid;
+ uint32_t reg;
+
+ sc = device_get_softc(dev);
+ E6000SW_LOCK_ASSERT(sc, SA_XLOCKED);
+
+ lag->es_lagg_valid = 0;
+ lag->es_member_ports = lag->es_untagged_ports = 0;
+ /* Read the LAGG ports. */
+ laggid = lag->es_laggroup & PORT_CONTROL1_LAG_ID_MASK;
+ e6000sw_writereg(sc, REG_GLOBAL2, LAG_MAPPING, laggid << LAGID_SHIFT);
+ reg = e6000sw_readreg(sc, REG_GLOBAL2, LAG_MAPPING);
+ lag->es_member_ports = reg & sc->ports_mask;
+ lag->es_untagged_ports = reg & sc->ports_mask;
+
+ /* Is this LAG group in use ? */
+ if (lag->es_untagged_ports != 0)
+ lag->es_lagg_valid = 1;
+
+ return (0);
+}
+
+static int
+e6000sw_get_port_vlan(e6000sw_softc_t *sc, etherswitch_vlangroup_t *vg)
+{
+ uint32_t port, reg;
+
+ port = vg->es_vlangroup;
+ if (port > sc->num_ports)
+ return (EINVAL);
+
+ if (!e6000sw_is_portenabled(sc, port)) {
+ vg->es_vid = port;
+ return (0);
+ }
+
+ reg = e6000sw_readreg(sc, REG_PORT(sc, port), PORT_VLAN_MAP);
+ vg->es_untagged_ports = vg->es_member_ports = reg & sc->ports_mask;
+ vg->es_vid = port | ETHERSWITCH_VID_VALID;
+ vg->es_fid = (reg & PORT_VLAN_MAP_FID_MASK) >> PORT_VLAN_MAP_FID;
+ reg = e6000sw_readreg(sc, REG_PORT(sc, port), PORT_CONTROL1);
+ vg->es_fid |= (reg & PORT_CONTROL1_FID_MASK) << 4;
+
+ return (0);
+}
+
+static int
+e6000sw_get_dot1q_vlan(e6000sw_softc_t *sc, etherswitch_vlangroup_t *vg)
+{
+ int i, port;
+ uint32_t reg;
+
+ vg->es_fid = 0;
+ vg->es_vid = sc->vlans[vg->es_vlangroup];
+ vg->es_untagged_ports = vg->es_member_ports = 0;
+ if (vg->es_vid == 0)
+ return (0);
+
+ if (E6000SW_WAITREADY(sc, VTU_OPERATION, VTU_BUSY)) {
+ device_printf(sc->dev, "VTU unit is busy, cannot access\n");
+ return (EBUSY);
+ }
+
+ e6000sw_writereg(sc, REG_GLOBAL, VTU_VID, vg->es_vid - 1);
+
+ reg = e6000sw_readreg(sc, REG_GLOBAL, VTU_OPERATION);
+ reg &= ~VTU_OP_MASK;
+ reg |= VTU_GET_NEXT | VTU_BUSY;
+ e6000sw_writereg(sc, REG_GLOBAL, VTU_OPERATION, reg);
+ if (E6000SW_WAITREADY(sc, VTU_OPERATION, VTU_BUSY)) {
+ device_printf(sc->dev, "Timeout while reading\n");
+ return (EBUSY);
+ }
+
+ reg = e6000sw_readreg(sc, REG_GLOBAL, VTU_VID);
+ if (reg == VTU_VID_MASK || (reg & VTU_VID_VALID) == 0)
+ return (EINVAL);
+ if ((reg & VTU_VID_MASK) != vg->es_vid)
return (EINVAL);
- vg->es_untagged_ports = vg->es_member_ports =
- sc->members[vg->es_vlangroup];
- vg->es_vid = ETHERSWITCH_VID_VALID;
+
+ vg->es_vid |= ETHERSWITCH_VID_VALID;
+ reg = e6000sw_readreg(sc, REG_GLOBAL, VTU_DATA);
+ for (i = 0; i < sc->num_ports; i++) {
+ if (i == VTU_PPREG(sc))
+ reg = e6000sw_readreg(sc, REG_GLOBAL, VTU_DATA2);
+ port = (reg >> VTU_PORT(sc, i)) & VTU_PORT_MASK;
+ if (port == VTU_PORT_UNTAGGED) {
+ vg->es_untagged_ports |= (1 << i);
+ vg->es_member_ports |= (1 << i);
+ } else if (port == VTU_PORT_TAGGED)
+ vg->es_member_ports |= (1 << i);
+ }
return (0);
}
+static int
+e6000sw_getvgroup(device_t dev, etherswitch_vlangroup_t *vg)
+{
+ e6000sw_softc_t *sc;
+
+ sc = device_get_softc(dev);
+ E6000SW_LOCK_ASSERT(sc, SA_XLOCKED);
+
+ if (sc->vlan_mode == ETHERSWITCH_VLAN_PORT)
+ return (e6000sw_get_port_vlan(sc, vg));
+ else if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q)
+ return (e6000sw_get_dot1q_vlan(sc, vg));
+
+ return (EINVAL);
+}
+
static __inline struct mii_data*
e6000sw_miiforphy(e6000sw_softc_t *sc, unsigned int phy)
{
- if (phy >= E6000SW_NUM_PHYS)
+ if (!e6000sw_is_phyport(sc, phy))
return (NULL);
return (device_get_softc(sc->miibus[phy]));
@@ -717,13 +1777,42 @@ e6000sw_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
ifmr->ifm_status = mii->mii_media_status;
}
+static int
+e6000sw_smi_waitready(e6000sw_softc_t *sc, int phy)
+{
+ int i;
+
+ for (i = 0; i < E6000SW_SMI_TIMEOUT; i++) {
+ if ((MDIO_READ(sc->dev, phy, SMI_CMD) & SMI_CMD_BUSY) == 0)
+ return (0);
+ DELAY(1);
+ }
+
+ return (1);
+}
+
static __inline uint32_t
e6000sw_readreg(e6000sw_softc_t *sc, int addr, int reg)
{
E6000SW_LOCK_ASSERT(sc, SA_XLOCKED);
- return (MDIO_READREG(device_get_parent(sc->dev), addr, reg));
+ if (!E6000SW_MULTICHIP(sc))
+ return (MDIO_READ(sc->dev, addr, reg) & 0xffff);
+
+ if (e6000sw_smi_waitready(sc, sc->sw_addr)) {
+ printf("e6000sw: readreg timeout\n");
+ return (0xffff);
+ }
+ MDIO_WRITE(sc->dev, sc->sw_addr, SMI_CMD,
+ SMI_CMD_OP_C22_READ | (reg & SMI_CMD_REG_ADDR_MASK) |
+ ((addr << SMI_CMD_DEV_ADDR) & SMI_CMD_DEV_ADDR_MASK));
+ if (e6000sw_smi_waitready(sc, sc->sw_addr)) {
+ printf("e6000sw: readreg timeout\n");
+ return (0xffff);
+ }
+
+ return (MDIO_READ(sc->dev, sc->sw_addr, SMI_DATA) & 0xffff);
}
static __inline void
@@ -732,55 +1821,147 @@ e6000sw_writereg(e6000sw_softc_t *sc, int addr, int reg, int val)
E6000SW_LOCK_ASSERT(sc, SA_XLOCKED);
- MDIO_WRITEREG(device_get_parent(sc->dev), addr, reg, val);
+ if (!E6000SW_MULTICHIP(sc)) {
+ MDIO_WRITE(sc->dev, addr, reg, val);
+ return;
+ }
+
+ if (e6000sw_smi_waitready(sc, sc->sw_addr)) {
+ printf("e6000sw: readreg timeout\n");
+ return;
+ }
+ MDIO_WRITE(sc->dev, sc->sw_addr, SMI_DATA, val);
+ MDIO_WRITE(sc->dev, sc->sw_addr, SMI_CMD,
+ SMI_CMD_OP_C22_WRITE | (reg & SMI_CMD_REG_ADDR_MASK) |
+ ((addr << SMI_CMD_DEV_ADDR) & SMI_CMD_DEV_ADDR_MASK));
+}
+
+static __inline bool
+e6000sw_is_cpuport(e6000sw_softc_t *sc, int port)
+{
+
+ return ((sc->cpuports_mask & (1 << port)) ? true : false);
}
-static __inline int
-e6000sw_cpuport(e6000sw_softc_t *sc, int port)
+static __inline bool
+e6000sw_is_fixedport(e6000sw_softc_t *sc, int port)
{
- return (sc->cpuports_mask & (1 << port));
+ return ((sc->fixed_mask & (1 << port)) ? true : false);
}
-static __inline int
-e6000sw_set_pvid(e6000sw_softc_t *sc, int port, int pvid)
+static __inline bool
+e6000sw_is_fixed25port(e6000sw_softc_t *sc, int port)
{
- e6000sw_writereg(sc, REG_PORT(port), PORT_VID, pvid &
- PORT_VID_DEF_VID_MASK);
+ return ((sc->fixed25_mask & (1 << port)) ? true : false);
+}
- return (0);
+static __inline bool
+e6000sw_is_phyport(e6000sw_softc_t *sc, int port)
+{
+ uint32_t phy_mask;
+ phy_mask = ~(sc->fixed_mask | sc->cpuports_mask);
+
+ return ((phy_mask & (1 << port)) ? true : false);
+}
+
+static __inline bool
+e6000sw_is_portenabled(e6000sw_softc_t *sc, int port)
+{
+
+ return ((sc->used_mask & (1 << port)) ? true : false);
+}
+
+static __inline void
+e6000sw_set_pvid(e6000sw_softc_t *sc, int port, int pvid)
+{
+ uint32_t data;
+
+ data = e6000sw_readreg(sc, REG_PORT(sc, port), PORT_VID);
+ data &= ~PORT_VID_DEF_VID_MASK;
+ data |= (pvid & PORT_VID_DEF_VID_MASK);
+ e6000sw_writereg(sc, REG_PORT(sc, port), PORT_VID, data);
}
-static __inline int
+static __inline void
e6000sw_get_pvid(e6000sw_softc_t *sc, int port, int *pvid)
{
if (pvid == NULL)
- return (ENXIO);
+ return;
- *pvid = e6000sw_readreg(sc, REG_PORT(port), PORT_VID) &
+ *pvid = e6000sw_readreg(sc, REG_PORT(sc, port), PORT_VID) &
PORT_VID_DEF_VID_MASK;
+}
- return (0);
+/*
+ * Convert port status to ifmedia.
+ */
+static void
+e6000sw_update_ifmedia(uint16_t portstatus, u_int *media_status, u_int *media_active)
+{
+ *media_active = IFM_ETHER;
+ *media_status = IFM_AVALID;
+
+ if ((portstatus & PORT_STATUS_LINK_MASK) != 0)
+ *media_status |= IFM_ACTIVE;
+ else {
+ *media_active |= IFM_NONE;
+ return;
+ }
+
+ switch (portstatus & PORT_STATUS_SPEED_MASK) {
+ case PORT_STATUS_SPEED_10:
+ *media_active |= IFM_10_T;
+ break;
+ case PORT_STATUS_SPEED_100:
+ *media_active |= IFM_100_TX;
+ break;
+ case PORT_STATUS_SPEED_1000:
+ *media_active |= IFM_1000_T;
+ break;
+ }
+
+ if ((portstatus & PORT_STATUS_DUPLEX_MASK) == 0)
+ *media_active |= IFM_FDX;
+ else
+ *media_active |= IFM_HDX;
}
static void
-e6000sw_tick (void *arg)
+e6000sw_tick(void *arg)
{
e6000sw_softc_t *sc;
+ struct mii_data *mii;
struct mii_softc *miisc;
- int i;
+ uint16_t portstatus;
+ int port;
sc = arg;
E6000SW_LOCK_ASSERT(sc, SA_UNLOCKED);
+
for (;;) {
E6000SW_LOCK(sc);
- for (i = 0; i < E6000SW_NUM_PHYS; i++) {
- mii_tick(sc->mii[i]);
- LIST_FOREACH(miisc, &sc->mii[i]->mii_phys, mii_list) {
- if (IFM_INST(sc->mii[i]->mii_media.ifm_cur->ifm_media)
+ for (port = 0; port < sc->num_ports; port++) {
+ /* Tick only on PHY ports */
+ if (!e6000sw_is_portenabled(sc, port) ||
+ !e6000sw_is_phyport(sc, port))
+ continue;
+
+ mii = e6000sw_miiforphy(sc, port);
+ if (mii == NULL)
+ continue;
+
+ portstatus = e6000sw_readreg(sc, REG_PORT(sc, port),
+ PORT_STATUS);
+
+ e6000sw_update_ifmedia(portstatus,
+ &mii->mii_media_status, &mii->mii_media_active);
+
+ LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
+ if (IFM_INST(mii->mii_media.ifm_cur->ifm_media)
!= miisc->mii_inst)
continue;
mii_phy_update(miisc, MII_POLLSTAT);
@@ -794,12 +1975,13 @@ e6000sw_tick (void *arg)
static void
e6000sw_setup(device_t dev, e6000sw_softc_t *sc)
{
- uint16_t atu_ctrl, atu_age;
+ uint16_t atu_ctrl;
- /* Set aging time */
- e6000sw_writereg(sc, REG_GLOBAL, ATU_CONTROL,
- (E6000SW_DEFAULT_AGETIME << ATU_CONTROL_AGETIME) |
- (1 << ATU_CONTROL_LEARN2ALL));
+ /* Set aging time. */
+ atu_ctrl = e6000sw_readreg(sc, REG_GLOBAL, ATU_CONTROL);
+ atu_ctrl &= ~ATU_CONTROL_AGETIME_MASK;
+ atu_ctrl |= E6000SW_DEFAULT_AGETIME << ATU_CONTROL_AGETIME;
+ e6000sw_writereg(sc, REG_GLOBAL, ATU_CONTROL, atu_ctrl);
/* Send all with specific mac address to cpu port */
e6000sw_writereg(sc, REG_GLOBAL2, MGMT_EN_2x, MGMT_EN_ALL);
@@ -815,70 +1997,9 @@ e6000sw_setup(device_t dev, e6000sw_softc_t *sc)
SWITCH_MGMT_FC_PRI_MASK |
(1 << SWITCH_MGMT_FORCEFLOW));
- /* Set VLAN configuration */
- e6000sw_port_vlan_conf(sc);
-
e6000sw_atu_flush(dev, sc, NO_OPERATION);
e6000sw_atu_mac_table(dev, sc, NULL, NO_OPERATION);
e6000sw_set_atustat(dev, sc, 0, COUNT_ALL);
-
- /* Set ATU AgeTime to 15 seconds */
- atu_age = 1;
-
- atu_ctrl = e6000sw_readreg(sc, REG_GLOBAL, ATU_CONTROL);
-
- /* Set new AgeTime field */
- atu_ctrl &= ~ATU_CONTROL_AGETIME_MASK;
- e6000sw_writereg(sc, REG_GLOBAL, ATU_CONTROL, atu_ctrl |
- (atu_age << ATU_CONTROL_AGETIME));
-}
-
-static void
-e6000sw_port_vlan_conf(e6000sw_softc_t *sc)
-{
- int port, ret;
- etherswitch_vlangroup_t vg;
- device_t dev;
-
- dev = sc->dev;
- /* Disable all ports */
- for (port = 0; port < E6000SW_NUM_PORTS; port++) {
- ret = e6000sw_readreg(sc, REG_PORT(port), PORT_CONTROL);
- e6000sw_writereg(sc, REG_PORT(port), PORT_CONTROL,
- (ret & ~PORT_CONTROL_ENABLE));
- }
-
- /* Set port priority */
- for (port = 0; port < E6000SW_NUM_PORTS; port++) {
- ret = e6000sw_readreg(sc, REG_PORT(port), PORT_VID);
- ret &= ~PORT_VID_PRIORITY_MASK;
- e6000sw_writereg(sc, REG_PORT(port), PORT_VID, ret);
- }
-
- vg.es_vlangroup = 0;
- vg.es_vid = 0;
- vg.es_member_ports = vg.es_untagged_ports = E6000SW_DEF_VLANGROUP0;
- e6000sw_setvgroup(dev, &vg);
- vg.es_vlangroup = 1;
- vg.es_vid = 1;
- vg.es_member_ports = vg.es_untagged_ports = E6000SW_DEF_VLANGROUP1;
- e6000sw_setvgroup(dev, &vg);
-
- device_printf(dev, "Default vlangroups set.\n");
- /* Set VID map */
- for (port = 0; port < E6000SW_NUM_PORTS; port++) {
- ret = e6000sw_readreg(sc, REG_PORT(port), PORT_VID);
- ret &= ~PORT_VID_DEF_VID_MASK;
- ret |= (port + 1);
- e6000sw_writereg(sc, REG_PORT(port), PORT_VID, ret);
- }
-
- /* Enable all ports */
- for (port = 0; port < E6000SW_NUM_PORTS; port++) {
- ret = e6000sw_readreg(sc, REG_PORT(port), PORT_CONTROL);
- e6000sw_writereg(sc, REG_PORT(port), PORT_CONTROL, (ret |
- PORT_CONTROL_ENABLE));
- }
}
static void
@@ -897,7 +2018,6 @@ e6000sw_atu_mac_table(device_t dev, e6000sw_softc_t *sc, struct atu_opt *atu,
{
uint16_t ret_opt;
uint16_t ret_data;
- int retries;
if (flag == NO_OPERATION)
return (0);
@@ -907,41 +2027,34 @@ e6000sw_atu_mac_table(device_t dev, e6000sw_softc_t *sc, struct atu_opt *atu,
return (EINVAL);
}
- ret_opt = e6000sw_readreg(sc, REG_GLOBAL, ATU_OPERATION);
-
- if (ret_opt & ATU_UNIT_BUSY) {
- device_printf(dev, "ATU unit is busy, cannot access"
- "register\n");
+ if (E6000SW_WAITREADY(sc, ATU_OPERATION, ATU_UNIT_BUSY)) {
+ device_printf(dev, "ATU unit is busy, cannot access\n");
return (EBUSY);
- } else {
- if(flag & LOAD_FROM_FIB) {
- ret_data = e6000sw_readreg(sc, REG_GLOBAL, ATU_DATA);
- e6000sw_writereg(sc, REG_GLOBAL2, ATU_DATA, (ret_data &
- ~ENTRY_STATE));
- }
- e6000sw_writereg(sc, REG_GLOBAL, ATU_MAC_ADDR01, atu->mac_01);
- e6000sw_writereg(sc, REG_GLOBAL, ATU_MAC_ADDR23, atu->mac_23);
- e6000sw_writereg(sc, REG_GLOBAL, ATU_MAC_ADDR45, atu->mac_45);
- e6000sw_writereg(sc, REG_GLOBAL, ATU_FID, atu->fid);
-
- e6000sw_writereg(sc, REG_GLOBAL, ATU_OPERATION, (ret_opt |
- ATU_UNIT_BUSY | flag));
-
- retries = E6000SW_RETRIES;
- while (--retries & (e6000sw_readreg(sc, REG_GLOBAL,
- ATU_OPERATION) & ATU_UNIT_BUSY))
- DELAY(1);
-
- if (retries == 0)
- device_printf(dev, "Timeout while flushing\n");
- else if (flag & GET_NEXT_IN_FIB) {
- atu->mac_01 = e6000sw_readreg(sc, REG_GLOBAL,
- ATU_MAC_ADDR01);
- atu->mac_23 = e6000sw_readreg(sc, REG_GLOBAL,
- ATU_MAC_ADDR23);
- atu->mac_45 = e6000sw_readreg(sc, REG_GLOBAL,
- ATU_MAC_ADDR45);
- }
+ }
+
+ ret_opt = e6000sw_readreg(sc, REG_GLOBAL, ATU_OPERATION);
+ if(flag & LOAD_FROM_FIB) {
+ ret_data = e6000sw_readreg(sc, REG_GLOBAL, ATU_DATA);
+ e6000sw_writereg(sc, REG_GLOBAL2, ATU_DATA, (ret_data &
+ ~ENTRY_STATE));
+ }
+ e6000sw_writereg(sc, REG_GLOBAL, ATU_MAC_ADDR01, atu->mac_01);
+ e6000sw_writereg(sc, REG_GLOBAL, ATU_MAC_ADDR23, atu->mac_23);
+ e6000sw_writereg(sc, REG_GLOBAL, ATU_MAC_ADDR45, atu->mac_45);
+ e6000sw_writereg(sc, REG_GLOBAL, ATU_FID, atu->fid);
+
+ e6000sw_writereg(sc, REG_GLOBAL, ATU_OPERATION,
+ (ret_opt | ATU_UNIT_BUSY | flag));
+
+ if (E6000SW_WAITREADY(sc, ATU_OPERATION, ATU_UNIT_BUSY))
+ device_printf(dev, "Timeout while waiting ATU\n");
+ else if (flag & GET_NEXT_IN_FIB) {
+ atu->mac_01 = e6000sw_readreg(sc, REG_GLOBAL,
+ ATU_MAC_ADDR01);
+ atu->mac_23 = e6000sw_readreg(sc, REG_GLOBAL,
+ ATU_MAC_ADDR23);
+ atu->mac_45 = e6000sw_readreg(sc, REG_GLOBAL,
+ ATU_MAC_ADDR45);
}
return (0);
@@ -951,25 +2064,180 @@ static int
e6000sw_atu_flush(device_t dev, e6000sw_softc_t *sc, int flag)
{
uint16_t ret;
- int retries;
if (flag == NO_OPERATION)
return (0);
+ if (E6000SW_WAITREADY(sc, ATU_OPERATION, ATU_UNIT_BUSY)) {
+ device_printf(dev, "ATU unit is busy, cannot access\n");
+ return (EBUSY);
+ }
ret = e6000sw_readreg(sc, REG_GLOBAL, ATU_OPERATION);
- if (ret & ATU_UNIT_BUSY) {
- device_printf(dev, "Atu unit is busy, cannot flush\n");
+ e6000sw_writereg(sc, REG_GLOBAL, ATU_OPERATION,
+ (ret | ATU_UNIT_BUSY | flag));
+ if (E6000SW_WAITREADY(sc, ATU_OPERATION, ATU_UNIT_BUSY))
+ device_printf(dev, "Timeout while flushing ATU\n");
+
+ return (0);
+}
+
+static int
+e6000sw_waitready(e6000sw_softc_t *sc, uint32_t phy, uint32_t reg,
+ uint32_t busybit)
+{
+ int i;
+
+ for (i = 0; i < E6000SW_RETRIES; i++) {
+ if ((e6000sw_readreg(sc, phy, reg) & busybit) == 0)
+ return (0);
+ DELAY(1);
+ }
+
+ return (1);
+}
+
+static int
+e6000sw_vtu_flush(e6000sw_softc_t *sc)
+{
+
+ if (E6000SW_WAITREADY(sc, VTU_OPERATION, VTU_BUSY)) {
+ device_printf(sc->dev, "VTU unit is busy, cannot access\n");
return (EBUSY);
- } else {
- e6000sw_writereg(sc, REG_GLOBAL, ATU_OPERATION, (ret |
- ATU_UNIT_BUSY | flag));
- retries = E6000SW_RETRIES;
- while (--retries & (e6000sw_readreg(sc, REG_GLOBAL,
- ATU_OPERATION) & ATU_UNIT_BUSY))
- DELAY(1);
-
- if (retries == 0)
- device_printf(dev, "Timeout while flushing\n");
+ }
+
+ e6000sw_writereg(sc, REG_GLOBAL, VTU_OPERATION, VTU_FLUSH | VTU_BUSY);
+ if (E6000SW_WAITREADY(sc, VTU_OPERATION, VTU_BUSY)) {
+ device_printf(sc->dev, "Timeout while flushing VTU\n");
+ return (ETIMEDOUT);
+ }
+
+ return (0);
+}
+
+static int
+e6000sw_vtu_update(e6000sw_softc_t *sc, int purge, int vid, int fid,
+ int members, int untagged)
+{
+ int i, op;
+ uint32_t data[2];
+
+ if (E6000SW_WAITREADY(sc, VTU_OPERATION, VTU_BUSY)) {
+ device_printf(sc->dev, "VTU unit is busy, cannot access\n");
+ return (EBUSY);
+ }
+
+ *data = (vid & VTU_VID_MASK);
+ if (purge == 0)
+ *data |= VTU_VID_VALID;
+ e6000sw_writereg(sc, REG_GLOBAL, VTU_VID, *data);
+
+ if (purge == 0) {
+ data[0] = 0;
+ data[1] = 0;
+ for (i = 0; i < sc->num_ports; i++) {
+ if ((untagged & (1 << i)) != 0)
+ data[i / VTU_PPREG(sc)] |=
+ VTU_PORT_UNTAGGED << VTU_PORT(sc, i);
+ else if ((members & (1 << i)) != 0)
+ data[i / VTU_PPREG(sc)] |=
+ VTU_PORT_TAGGED << VTU_PORT(sc, i);
+ else
+ data[i / VTU_PPREG(sc)] |=
+ VTU_PORT_DISCARD << VTU_PORT(sc, i);
+ }
+ e6000sw_writereg(sc, REG_GLOBAL, VTU_DATA, data[0]);
+ e6000sw_writereg(sc, REG_GLOBAL, VTU_DATA2, data[1]);
+ e6000sw_writereg(sc, REG_GLOBAL, VTU_FID,
+ fid & VTU_FID_MASK(sc));
+ op = VTU_LOAD;
+ } else
+ op = VTU_PURGE;
+
+ e6000sw_writereg(sc, REG_GLOBAL, VTU_OPERATION, op | VTU_BUSY);
+ if (E6000SW_WAITREADY(sc, VTU_OPERATION, VTU_BUSY)) {
+ device_printf(sc->dev, "Timeout while flushing VTU\n");
+ return (ETIMEDOUT);
+ }
+
+ return (0);
+}
+
+static ssize_t
+e6000sw_getiosize(device_t dev)
+{
+ e6000sw_softc_t *sc;
+
+ sc = device_get_softc(dev);
+
+ return (sc->iosize);
+}
+
+static ssize_t
+e6000sw_getioblksize(device_t dev __unused)
+{
+
+ return (E6000SW_IOBUF_BLKSIZE);
+}
+
+static void *
+e6000sw_getiobuf(device_t dev)
+{
+ e6000sw_softc_t *sc;
+
+ sc = device_get_softc(dev);
+
+ return (sc->iobuf);
+}
+
+static int
+e6000sw_ioread(device_t dev, off_t off, ssize_t len)
+{
+ e6000sw_softc_t *sc;
+ ssize_t resid;
+ uint8_t *iobuf;
+ uint32_t reg;
+
+ sc = device_get_softc(dev);
+ iobuf = (uint8_t *)sc->iobuf;
+ for (resid = 0; resid < len; resid++) {
+ if (E6000SW_WAITREADY2(sc, EEPROM_CMD, EEPROM_BUSY)) {
+ device_printf(sc->dev, "EEPROM is busy, cannot access\n");
+ return (ETIMEDOUT);
+ }
+ e6000sw_writereg(sc, REG_GLOBAL2, EEPROM_ADDR, off + resid);
+ e6000sw_writereg(sc, REG_GLOBAL2, EEPROM_CMD,
+ EEPROM_READ_CMD | EEPROM_BUSY);
+ if (E6000SW_WAITREADY2(sc, EEPROM_CMD, EEPROM_BUSY)) {
+ device_printf(sc->dev, "EEPROM is busy, cannot access\n");
+ return (ETIMEDOUT);
+ }
+ reg = e6000sw_readreg(sc, REG_GLOBAL2, EEPROM_CMD);
+ iobuf[resid] = reg & EEPROM_DATA_MASK;
+ }
+
+ return (0);
+}
+
+static int
+e6000sw_iowrite(device_t dev, off_t off, ssize_t len)
+{
+ e6000sw_softc_t *sc;
+ ssize_t resid;
+ uint8_t *iobuf;
+
+ if (e6000sw_eeprom_wp)
+ return (EPERM);
+ sc = device_get_softc(dev);
+ iobuf = (uint8_t *)sc->iobuf;
+ for (resid = 0; resid < len; resid++) {
+ if (E6000SW_WAITREADY2(sc, EEPROM_CMD, EEPROM_BUSY)) {
+ device_printf(sc->dev, "EEPROM is busy, cannot access\n");
+ return (ETIMEDOUT);
+ }
+ e6000sw_writereg(sc, REG_GLOBAL2, EEPROM_ADDR, off + resid);
+ e6000sw_writereg(sc, REG_GLOBAL2, EEPROM_CMD,
+ EEPROM_BUSY | EEPROM_WRITE_CMD | EEPROM_WRITE_EN |
+ (iobuf[resid] & EEPROM_DATA_MASK));
}
return (0);
diff --git a/sys/dev/etherswitch/e6000sw/e6000swreg.h b/sys/dev/etherswitch/e6000sw/e6000swreg.h
index b8cdfe3..98f8827 100644
--- a/sys/dev/etherswitch/e6000sw/e6000swreg.h
+++ b/sys/dev/etherswitch/e6000sw/e6000swreg.h
@@ -29,7 +29,7 @@
*/
#ifndef _E6000SWREG_H_
-#define _E6000SWREG_H_
+#define _E6000SWREG_H_
struct atu_opt {
uint16_t mac_01;
@@ -42,144 +42,264 @@ struct atu_opt {
* Definitions for the Marvell 88E6000 series Ethernet Switch.
*/
-#define CPU_PORT 0x5
+/* Switch IDs. */
+#define MV88E6141 0x3400
+#define MV88E6341 0x3410
+#define MV88E6352 0x3520
+#define MV88E6172 0x1720
+#define MV88E6176 0x1760
+#define MV88E6190 0x1900
+
+#define MVSWITCH(_sc, id) ((_sc)->swid == (id))
/*
* Switch Registers
*/
-#define REG_GLOBAL 0x1b
-#define REG_GLOBAL2 0x1c
-#define REG_PORT(p) (0x10 + (p))
+#define REG_GLOBAL 0x1b
+#define REG_GLOBAL2 0x1c
+#define REG_PORT(_sc, p) (((_sc)->port_base) + (p))
-#define REG_NUM_MAX 31
+#define REG_NUM_MAX 31
/*
* Per-Port Switch Registers
*/
-#define PORT_STATUS 0x0
-#define PSC_CONTROL 0x1
-#define SWITCH_ID 0x3
-#define PORT_CONTROL 0x4
-#define PORT_CONTROL_1 0x5
-#define PORT_VLAN_MAP 0x6
-#define PORT_VID 0x7
-#define PORT_ASSOCIATION_VECTOR 0xb
-#define PORT_ATU_CTRL 0xc
-#define RX_COUNTER 0x12
-#define TX_COUNTER 0x13
-
-#define PORT_VID_DEF_VID 0
-#define PORT_VID_DEF_VID_MASK 0xfff
-#define PORT_VID_PRIORITY_MASK 0xc00
-
-#define PORT_CONTROL_ENABLE 0x3
+#define PORT_STATUS 0x0
+#define PORT_STATUS_SPEED_MASK 0x300
+#define PORT_STATUS_SPEED_10 0
+#define PORT_STATUS_SPEED_100 1
+#define PORT_STATUS_SPEED_1000 2
+#define PORT_STATUS_DUPLEX_MASK (1 << 10)
+#define PORT_STATUS_LINK_MASK (1 << 11)
+#define PORT_STATUS_PHY_DETECT_MASK (1 << 12)
+
+#define PSC_CONTROL 0x1
+#define PSC_CONTROL_FORCED_SPD (1 << 13)
+#define PSC_CONTROL_ALT_SPD (1 << 12)
+#define PSC_CONTROL_EEE_ON (1 << 9)
+#define PSC_CONTROL_FORCED_EEE (1 << 8)
+#define PSC_CONTROL_FC_ON (1 << 7)
+#define PSC_CONTROL_FORCED_FC (1 << 6)
+#define PSC_CONTROL_LINK_UP (1 << 5)
+#define PSC_CONTROL_FORCED_LINK (1 << 4)
+#define PSC_CONTROL_FULLDPX (1 << 3)
+#define PSC_CONTROL_FORCED_DPX (1 << 2)
+#define PSC_CONTROL_SPD10G 0x3
+#define PSC_CONTROL_SPD2500 PSC_CONTROL_SPD10G
+#define PSC_CONTROL_SPD1000 0x2
+#define SWITCH_ID 0x3
+#define PORT_CONTROL 0x4
+#define PORT_CONTROL1 0x5
+#define PORT_CONTROL1_LAG_PORT (1 << 14)
+#define PORT_CONTROL1_LAG_ID_MASK 0xf
+#define PORT_CONTROL1_LAG_ID_SHIFT 8
+#define PORT_CONTROL1_FID_MASK 0xf
+#define PORT_VLAN_MAP 0x6
+#define PORT_VID 0x7
+#define PORT_CONTROL2 0x8
+#define PORT_ASSOCIATION_VECTOR 0xb
+#define PORT_ATU_CTRL 0xc
+#define RX_COUNTER 0x12
+#define TX_COUNTER 0x13
+
+#define PORT_VID_DEF_VID 0
+#define PORT_VID_DEF_VID_MASK 0xfff
+#define PORT_VID_PRIORITY_MASK 0xc00
+
+#define PORT_CONTROL_DISABLED 0
+#define PORT_CONTROL_BLOCKING 1
+#define PORT_CONTROL_LEARNING 2
+#define PORT_CONTROL_FORWARDING 3
+#define PORT_CONTROL_ENABLE 3
+#define PORT_CONTROL_FRAME 0x0300
+#define PORT_CONTROL_EGRESS 0x3000
+#define PORT_CONTROL2_DOT1Q 0x0c00
+#define PORT_CONTROL2_DISC_TAGGED (1 << 9)
+#define PORT_CONTROL2_DISC_UNTAGGED (1 << 8)
/* PORT_VLAN fields */
-#define PORT_VLAN_MAP_TABLE_MASK 0x7f
-#define PORT_VLAN_MAP_FID 12
-#define PORT_VLAN_MAP_FID_MASK 0xf000
+#define PORT_VLAN_MAP_FID 12
+#define PORT_VLAN_MAP_FID_MASK 0xf000
+
/*
* Switch Global Register 1 accessed via REG_GLOBAL_ADDR
*/
-#define SWITCH_GLOBAL_STATUS 0
-#define SWITCH_GLOBAL_CONTROL 4
-#define SWITCH_GLOBAL_CONTROL2 28
+#define SWITCH_GLOBAL_STATUS 0
+#define SWITCH_GLOBAL_STATUS_IR (1 << 11)
+#define SWITCH_GLOBAL_CONTROL 4
+#define SWITCH_GLOBAL_CONTROL2 28
-#define MONITOR_CONTROL 26
+#define MONITOR_CONTROL 26
+
+/* VTU operation */
+#define VTU_FID 2
+#define VTU_OPERATION 5
+#define VTU_VID 6
+#define VTU_DATA 7
+#define VTU_DATA2 8
+
+#define VTU_FID_MASK(_sc) (MVSWITCH((_sc), MV88E6190) ? 0xfff: 0xff)
+#define VTU_FID_POLICY (1 << 12)
+#define VTU_PORT_UNMODIFIED 0
+#define VTU_PORT_UNTAGGED 1
+#define VTU_PORT_TAGGED 2
+#define VTU_PORT_DISCARD 3
+#define VTU_PPREG(_sc) (MVSWITCH((_sc), MV88E6190) ? 8 : 4)
+#define VTU_PORT(_sc, p) (((p) % VTU_PPREG(_sc)) * (16 / VTU_PPREG(_sc)))
+#define VTU_PORT_MASK 3
+#define VTU_BUSY (1 << 15)
+#define VTU_VID_VALID (1 << 12)
+#define VTU_VID_MASK 0xfff
+
+/* VTU opcodes */
+#define VTU_OP_MASK (7 << 12)
+#define VTU_NOP (0 << 12)
+#define VTU_FLUSH (1 << 12)
+#define VTU_LOAD (3 << 12)
+#define VTU_PURGE (3 << 12)
+#define VTU_GET_NEXT (4 << 12)
+#define STU_LOAD (5 << 12)
+#define STU_PURGE (5 << 12)
+#define STU_GET_NEXT (6 << 12)
+#define VTU_GET_VIOLATION_DATA (7 << 12)
+#define VTU_CLEAR_VIOLATION_DATA (7 << 12)
/* ATU operation */
-#define ATU_FID 1
-#define ATU_CONTROL 10
-#define ATU_OPERATION 11
-#define ATU_DATA 12
-#define ATU_MAC_ADDR01 13
-#define ATU_MAC_ADDR23 14
-#define ATU_MAC_ADDR45 15
+#define ATU_FID 1
+#define ATU_CONTROL 10
+#define ATU_OPERATION 11
+#define ATU_DATA 12
+#define ATU_MAC_ADDR01 13
+#define ATU_MAC_ADDR23 14
+#define ATU_MAC_ADDR45 15
-#define ATU_UNIT_BUSY (1 << 15)
-#define ENTRY_STATE 0xf
+#define ATU_DATA_LAG (1 << 15)
+#define ATU_PORT_MASK(_sc) (MVSWITCH((_sc), MV88E6190) ? 0xfff0: 0xff0)
+#define ATU_PORT_SHIFT 4
+#define ATU_LAG_MASK 0xf0
+#define ATU_LAG_SHIFT 4
+#define ATU_STATE_MASK 0xf
+#define ATU_UNIT_BUSY (1 << 15)
+#define ENTRY_STATE 0xf
/* ATU_CONTROL fields */
-#define ATU_CONTROL_AGETIME 4
-#define ATU_CONTROL_AGETIME_MASK 0xff0
-#define ATU_CONTROL_LEARN2ALL 3
+#define ATU_CONTROL_AGETIME 4
+#define ATU_CONTROL_AGETIME_MASK 0xff0
+#define ATU_CONTROL_LEARN2ALL 3
/* ATU opcode */
-#define NO_OPERATION (0 << 0)
-#define FLUSH_ALL (1 << 0)
-#define FLUSH_NON_STATIC (1 << 1)
-#define LOAD_FROM_FIB (3 << 0)
-#define PURGE_FROM_FIB (3 << 0)
-#define GET_NEXT_IN_FIB (1 << 2)
-#define FLUSH_ALL_IN_FIB (5 << 0)
-#define FLUSH_NON_STATIC_IN_FIB (3 << 1)
-#define GET_VIOLATION_DATA (7 << 0)
-#define CLEAR_VIOLATION_DATA (7 << 0)
+#define ATU_OP_MASK (7 << 12)
+#define NO_OPERATION (0 << 12)
+#define FLUSH_ALL (1 << 12)
+#define FLUSH_NON_STATIC (2 << 12)
+#define LOAD_FROM_FIB (3 << 12)
+#define PURGE_FROM_FIB (3 << 12)
+#define GET_NEXT_IN_FIB (4 << 12)
+#define FLUSH_ALL_IN_FIB (5 << 12)
+#define FLUSH_NON_STATIC_IN_FIB (6 << 12)
+#define GET_VIOLATION_DATA (7 << 12)
+#define CLEAR_VIOLATION_DATA (7 << 12)
/* ATU Stats */
-#define COUNT_ALL (0 << 0)
+#define COUNT_ALL (0 << 0)
/*
* Switch Global Register 2 accessed via REG_GLOBAL2_ADDR
*/
-#define MGMT_EN_2x 2
-#define MGMT_EN_0x 3
-#define SWITCH_MGMT 5
-#define ATU_STATS 14
+#define MGMT_EN_2x 2
+#define MGMT_EN_0x 3
+#define SWITCH_MGMT 5
+#define LAG_MASK 7
+#define LAG_MAPPING 8
+#define ATU_STATS 14
-#define MGMT_EN_ALL 0xffff
+#define MGMT_EN_ALL 0xffff
+#define LAG_UPDATE (1 << 15)
+#define LAG_MASKNUM_SHIFT 12
+#define LAGID_SHIFT 11
/* SWITCH_MGMT fields */
-#define SWITCH_MGMT_PRI 0
-#define SWITCH_MGMT_PRI_MASK 7
+#define SWITCH_MGMT_PRI 0
+#define SWITCH_MGMT_PRI_MASK 7
#define SWITCH_MGMT_RSVD2CPU 3
-#define SWITCH_MGMT_FC_PRI 4
-#define SWITCH_MGMT_FC_PRI_MASK (7 << 4)
-#define SWITCH_MGMT_FORCEFLOW 7
+#define SWITCH_MGMT_FC_PRI 4
+#define SWITCH_MGMT_FC_PRI_MASK (7 << 4)
+#define SWITCH_MGMT_FORCEFLOW 7
/* ATU_STATS fields */
-#define ATU_STATS_BIN 14
-#define ATU_STATS_FLAG 12
+#define ATU_STATS_BIN 14
+#define ATU_STATS_FLAG 12
+
+/* Offset of SMI registers in multi-chip setup. */
+#define SMI_CMD 0
+#define SMI_DATA 1
/*
- * PHY registers accessed via 'Switch Global Registers' (REG_GLOBAL2).
+ * 'Switch Global Registers 2' (REG_GLOBAL2).
*/
-#define SMI_PHY_CMD_REG 0x18
-#define SMI_PHY_DATA_REG 0x19
-
-#define PHY_CMD 0x18
-#define PHY_DATA 0x19
-#define PHY_DATA_MASK 0xffff
-
-#define PHY_CMD_SMI_BUSY 15
-#define PHY_CMD_MODE 12
-#define PHY_CMD_MODE_MDIO 1
-#define PHY_CMD_MODE_XMDIO 0
-#define PHY_CMD_OPCODE 10
-#define PHY_CMD_OPCODE_WRITE 1
-#define PHY_CMD_OPCODE_READ 2
-#define PHY_CMD_DEV_ADDR 5
-#define PHY_CMD_DEV_ADDR_MASK 0x3e0
-#define PHY_CMD_REG_ADDR 0
-#define PHY_CMD_REG_ADDR_MASK 0x1f
-
-#define PHY_PAGE_REG 22
-
-#define E6000SW_NUM_PHYS 5
-#define E6000SW_NUM_PHY_REGS 29
-#define E6000SW_CPUPORTS_MASK ((1 << 5) | (1 << 6))
-#define E6000SW_NUM_VGROUPS 8
-#define E6000SW_NUM_PORTS 7
-#define E6000SW_PORT_NO_VGROUP -1
-#define E6000SW_DEFAULT_AGETIME 20
-#define E6000SW_RETRIES 100
-
-
-/* Default vlangroups */
-#define E6000SW_DEF_VLANGROUP0 (1 | (1 << 1) | (1 << 2) | (1 << 3) | \
- (1 << 6))
-#define E6000SW_DEF_VLANGROUP1 ((1 << 4) | (1 << 5))
+
+/* EEPROM registers */
+#define EEPROM_CMD 0x14
+#define EEPROM_BUSY (1 << 15)
+#define EEPROM_READ_CMD (4 << 12)
+#define EEPROM_WRITE_CMD (3 << 12)
+#define EEPROM_WRITE_EN (1 << 10)
+#define EEPROM_DATA_MASK 0xff
+#define EEPROM_ADDR 0x15
+
+/* PHY registers */
+#define SMI_PHY_CMD_REG 0x18
+#define SMI_CMD_BUSY (1 << 15)
+#define SMI_CMD_MODE_C22 (1 << 12)
+#define SMI_CMD_C22_WRITE (1 << 10)
+#define SMI_CMD_C22_READ (2 << 10)
+#define SMI_CMD_OP_C22_WRITE \
+ (SMI_CMD_C22_WRITE | SMI_CMD_BUSY | SMI_CMD_MODE_C22)
+#define SMI_CMD_OP_C22_READ \
+ (SMI_CMD_C22_READ | SMI_CMD_BUSY | SMI_CMD_MODE_C22)
+#define SMI_CMD_C45 (0 << 12)
+#define SMI_CMD_C45_ADDR (0 << 10)
+#define SMI_CMD_C45_WRITE (1 << 10)
+#define SMI_CMD_C45_READ (3 << 10)
+#define SMI_CMD_OP_C45_ADDR \
+ (SMI_CMD_C45_ADDR | SMI_CMD_BUSY | SMI_CMD_C45)
+#define SMI_CMD_OP_C45_WRITE \
+ (SMI_CMD_C45_WRITE | SMI_CMD_BUSY | SMI_CMD_C45)
+#define SMI_CMD_OP_C45_READ \
+ (SMI_CMD_C45_READ | SMI_CMD_BUSY | SMI_CMD_C45)
+#define SMI_CMD_DEV_ADDR 5
+#define SMI_CMD_DEV_ADDR_MASK 0x3e0
+#define SMI_CMD_REG_ADDR_MASK 0x1f
+#define SMI_PHY_DATA_REG 0x19
+#define PHY_DATA_MASK 0xffff
+
+#define PHY_PAGE_REG 22
+
+/*
+ * Scratch and Misc register accessed via
+ * 'Switch Global Registers' (REG_GLOBAL2)
+ */
+#define SCR_AND_MISC_REG 0x1a
+
+#define SCR_AND_MISC_PTR_CFG 0x7000
+#define SCR_AND_MISC_DATA_CFG_MASK 0xf0
+
+/* SERDES registers. */
+#define E6000SW_SERDES_DEV 4
+#define E6000SW_SERDES_PCS_CTL1 0x1000
+#define E6000SW_SERDES_SGMII_CTL 0x2000
+#define E6000SW_SERDES_PDOWN (1 << 11)
+
+#define E6000SW_NUM_VLANS 128
+#define E6000SW_NUM_LAGMASK 8
+#define E6000SW_NUM_PHY_REGS 29
+#define E6000SW_MAX_PORTS 11
+#define E6000SW_DEFAULT_AGETIME 20
+#define E6000SW_RETRIES 100
+#define E6000SW_SMI_TIMEOUT 16
+#define E6000SW_IOBUF_BLKSIZE (4 * 1024) /* 4 KiB block */
+#define E6000SW_IOBUF_SIZE (64 * 1024) /* 64 KiB max. */
#endif /* _E6000SWREG_H_ */
diff --git a/sys/dev/etherswitch/etherswitch.c b/sys/dev/etherswitch/etherswitch.c
index ee9b710..1f4b0dc 100644
--- a/sys/dev/etherswitch/etherswitch.c
+++ b/sys/dev/etherswitch/etherswitch.c
@@ -72,13 +72,16 @@ driver_t etherswitch_driver = {
sizeof(struct etherswitch_softc),
};
-static d_ioctl_t etherswitchioctl;
+static d_ioctl_t etherswitchioctl;
+static d_read_t etherswitchioread;
+static d_write_t etherswitchiowrite;
static struct cdevsw etherswitch_cdevsw = {
.d_version = D_VERSION,
- .d_flags = D_TRACKCLOSE,
.d_ioctl = etherswitchioctl,
.d_name = "etherswitch",
+ .d_read = etherswitchioread,
+ .d_write = etherswitchiowrite,
};
static void
@@ -180,6 +183,14 @@ etherswitchioctl(struct cdev *cdev, u_long cmd, caddr_t data, int flags, struct
error = ETHERSWITCH_SETVGROUP(etherswitch, (etherswitch_vlangroup_t *)data);
break;
+ case IOETHERSWITCHGETLAGGROUP:
+ error = ETHERSWITCH_GETLAGGROUP(etherswitch, (etherswitch_laggroup_t *)data);
+ break;
+
+ case IOETHERSWITCHSETLAGGROUP:
+ error = ETHERSWITCH_SETLAGGROUP(etherswitch, (etherswitch_laggroup_t *)data);
+ break;
+
case IOETHERSWITCHGETPHYREG:
phyreg = (etherswitch_phyreg_t *)data;
phyreg->val = ETHERSWITCH_READPHYREG(etherswitch, phyreg->phy, phyreg->reg);
@@ -207,4 +218,84 @@ etherswitchioctl(struct cdev *cdev, u_long cmd, caddr_t data, int flags, struct
return (error);
}
+static int
+etherswitchioread(struct cdev *cdev, struct uio *uio, int ioflag)
+{
+ device_t etherswitch;
+ int error;
+ ssize_t ioblksize, iosize, len;
+ struct etherswitch_softc *sc;
+ void *iobuf;
+
+ sc = (struct etherswitch_softc *)cdev->si_drv1;
+ etherswitch = device_get_parent(sc->sc_dev);
+ ioblksize = ETHERSWITCH_GETIOBLKSIZE(etherswitch);
+ iosize = ETHERSWITCH_GETIOSIZE(etherswitch);
+ iobuf = ETHERSWITCH_GETIOBUF(etherswitch);
+ if (ioblksize == -1 || iosize == -1 || iobuf == NULL)
+ return (EINVAL);
+ if (uio->uio_offset == iosize)
+ return (0);
+ if (uio->uio_offset > iosize)
+ return (EIO);
+ if (uio->uio_resid > ioblksize)
+ return (EIO);
+
+ error = 0;
+ while (uio->uio_resid > 0) {
+ if (uio->uio_offset >= iosize)
+ break;
+ len = MIN(ioblksize - (uio->uio_offset & (ioblksize - 1)),
+ uio->uio_resid);
+ error = ETHERSWITCH_IOREAD(etherswitch, uio->uio_offset, len);
+ if (error != 0)
+ break;
+ error = uiomove(iobuf, len, uio);
+ if (error != 0)
+ break;
+ }
+
+ return (error);
+}
+
+static int
+etherswitchiowrite(struct cdev *cdev, struct uio *uio, int ioflag)
+{
+ device_t etherswitch;
+ int error;
+ off_t offset;
+ ssize_t ioblksize, iosize, len;
+ struct etherswitch_softc *sc;
+ void *iobuf;
+
+ sc = (struct etherswitch_softc *)cdev->si_drv1;
+ etherswitch = device_get_parent(sc->sc_dev);
+ ioblksize = ETHERSWITCH_GETIOBLKSIZE(etherswitch);
+ iosize = ETHERSWITCH_GETIOSIZE(etherswitch);
+ iobuf = ETHERSWITCH_GETIOBUF(etherswitch);
+ if (ioblksize == -1 || iosize == -1 || iobuf == NULL)
+ return (EINVAL);
+ if (uio->uio_offset >= iosize)
+ return (EIO);
+ if (uio->uio_resid > ioblksize)
+ return (EIO);
+
+ error = 0;
+ while (uio->uio_resid > 0) {
+ if (uio->uio_offset >= iosize)
+ break;
+ len = MIN(ioblksize - (uio->uio_offset & (ioblksize - 1)),
+ uio->uio_resid);
+ offset = uio->uio_offset;
+ error = uiomove(iobuf, len, uio);
+ if (error != 0)
+ break;
+ error = ETHERSWITCH_IOWRITE(etherswitch, offset, len);
+ if (error != 0)
+ break;
+ }
+
+ return (error);
+}
+
MODULE_VERSION(etherswitch, 1);
diff --git a/sys/dev/etherswitch/etherswitch.h b/sys/dev/etherswitch/etherswitch.h
index 2619019..c047c72 100644
--- a/sys/dev/etherswitch/etherswitch.h
+++ b/sys/dev/etherswitch/etherswitch.h
@@ -14,7 +14,7 @@ extern driver_t etherswitch_driver;
struct etherswitch_reg {
uint16_t reg;
- uint16_t val;
+ uint32_t val;
};
typedef struct etherswitch_reg etherswitch_reg_t;
@@ -36,11 +36,23 @@ typedef struct etherswitch_phyreg etherswitch_phyreg_t;
#define ETHERSWITCH_VLAN_CAPS_BITS \
"\020\1ISL\2PORT\3DOT1Q\4DOT1Q4K\5QinQ"
+#define ETHERSWITCH_CAPS_PORTS_MASK (1 << 0) /* Ports mask */
+#define ETHERSWITCH_CAPS_LAGG (1 << 1) /* LAGG support */
+#define ETHERSWITCH_CAPS_PSTATE (1 << 2) /* Port state */
+#define ETHERSWITCH_CAPS_BITS \
+"\020\1PORTSMASK\2LAGG\3PSTATE"
+
+#define MAX_PORTS 1024
+#define MAX_PORTS_UINT32 (MAX_PORTS / sizeof(uint32_t))
+
struct etherswitch_info {
int es_nports;
int es_nvlangroups;
+ int es_nlaggroups;
char es_name[ETHERSWITCH_NAMEMAX];
uint32_t es_vlan_caps;
+ uint32_t es_switch_caps;
+ uint32_t es_ports_mask[MAX_PORTS_UINT32];
};
typedef struct etherswitch_info etherswitch_info_t;
@@ -61,13 +73,36 @@ typedef struct etherswitch_conf etherswitch_conf_t;
#define ETHERSWITCH_PORT_DROPUNTAGGED (1 << 4)
#define ETHERSWITCH_PORT_DOUBLE_TAG (1 << 5)
#define ETHERSWITCH_PORT_INGRESS (1 << 6)
-#define ETHERSWITCH_PORT_FLAGS_BITS \
-"\020\1CPUPORT\2STRIPTAG\3ADDTAG\4FIRSTLOCK\5DROPUNTAGGED\6QinQ\7INGRESS"
+#define ETHERSWITCH_PORT_DROPTAGGED (1 << 7)
+#define ETHERSWITCH_PORT_FLAGS_BITS \
+"\020\1CPUPORT\2STRIPTAG\3ADDTAG\4FIRSTLOCK\5DROPUNTAGGED\6QinQ\7INGRESS" \
+"\10DROPTAGGED"
+
+#define ETHERSWITCH_PSTATE_DISABLED (1 << 0)
+#define ETHERSWITCH_PSTATE_BLOCKING (1 << 1)
+#define ETHERSWITCH_PSTATE_LEARNING (1 << 2)
+#define ETHERSWITCH_PSTATE_FORWARDING (1 << 3)
+#define ETHERSWITCH_PSTATE_BITS \
+"\020\1DISABLED\2BLOCKING\3LEARNING\4FORWARDING"
+
+#define ETHERSWITCH_PORT_MAX_LEDS 3
+
+enum etherswitch_port_led {
+ ETHERSWITCH_PORT_LED_DEFAULT,
+ ETHERSWITCH_PORT_LED_ON,
+ ETHERSWITCH_PORT_LED_OFF,
+ ETHERSWITCH_PORT_LED_BLINK,
+ ETHERSWITCH_PORT_LED_MAX
+};
+typedef enum etherswitch_port_led etherswitch_port_led_t;
struct etherswitch_port {
int es_port;
int es_pvid;
+ int es_nleds;
uint32_t es_flags;
+ uint32_t es_state;
+ etherswitch_port_led_t es_led[ETHERSWITCH_PORT_MAX_LEDS];
union {
struct ifreq es_uifr;
struct ifmediareq es_uifmr;
@@ -77,6 +112,14 @@ struct etherswitch_port {
};
typedef struct etherswitch_port etherswitch_port_t;
+struct etherswitch_laggroup {
+ int es_lagg_valid;
+ int es_laggroup;
+ int es_member_ports;
+ int es_untagged_ports;
+};
+typedef struct etherswitch_laggroup etherswitch_laggroup_t;
+
struct etherswitch_vlangroup {
int es_vlangroup;
int es_vid;
@@ -99,5 +142,7 @@ typedef struct etherswitch_vlangroup etherswitch_vlangroup_t;
#define IOETHERSWITCHSETPHYREG _IOW('i', 9, etherswitch_phyreg_t)
#define IOETHERSWITCHGETCONF _IOR('i', 10, etherswitch_conf_t)
#define IOETHERSWITCHSETCONF _IOW('i', 11, etherswitch_conf_t)
+#define IOETHERSWITCHGETLAGGROUP _IOWR('i', 12, etherswitch_laggroup_t)
+#define IOETHERSWITCHSETLAGGROUP _IOW('i', 13, etherswitch_laggroup_t)
#endif
diff --git a/sys/dev/etherswitch/etherswitch_if.m b/sys/dev/etherswitch/etherswitch_if.m
index a2aea02..b684027 100644
--- a/sys/dev/etherswitch/etherswitch_if.m
+++ b/sys/dev/etherswitch/etherswitch_if.m
@@ -25,6 +25,18 @@ CODE {
}
static int
+ null_etherswitch_getlaggroup(device_t dev, etherswitch_laggroup_t *conf)
+ {
+ return (EINVAL);
+ }
+
+ static int
+ null_etherswitch_setlaggroup(device_t dev, etherswitch_laggroup_t *conf)
+ {
+ return (EINVAL);
+ }
+
+ static int
null_etherswitch_getconf(device_t dev, etherswitch_conf_t *conf)
{
return (0);
@@ -35,6 +47,36 @@ CODE {
{
return (0);
}
+
+ static ssize_t
+ null_etherswitch_getioblksize(device_t dev)
+ {
+ return (-1);
+ }
+
+ static ssize_t
+ null_etherswitch_getiosize(device_t dev)
+ {
+ return (-1);
+ }
+
+ static void *
+ null_etherswitch_getiobuf(device_t dev)
+ {
+ return (NULL);
+ }
+
+ static int
+ null_etherswitch_ioread(device_t dev, off_t off, ssize_t len)
+ {
+ return (EIO);
+ }
+
+ static int
+ null_etherswitch_iowrite(device_t dev, off_t off, ssize_t len)
+ {
+ return (EIO);
+ }
};
#
@@ -127,6 +169,22 @@ METHOD int setvgroup {
}
#
+# Get LAGG configuration
+#
+METHOD int getlaggroup {
+ device_t dev;
+ etherswitch_laggroup_t *vg;
+} DEFAULT null_etherswitch_getlaggroup;
+
+#
+# Set LAGG configuration
+#
+METHOD int setlaggroup {
+ device_t dev;
+ etherswitch_laggroup_t *vg;
+} DEFAULT null_etherswitch_setlaggroup;
+
+#
# Get the Switch configuration
#
METHOD int getconf {
@@ -141,3 +199,42 @@ METHOD int setconf {
device_t dev;
etherswitch_conf_t *conf;
} DEFAULT null_etherswitch_setconf;
+
+#
+# Get the IO buffer block size
+#
+METHOD ssize_t getioblksize {
+ device_t dev;
+} DEFAULT null_etherswitch_getioblksize;
+
+#
+# Get the IO buffer size
+#
+METHOD ssize_t getiosize {
+ device_t dev;
+} DEFAULT null_etherswitch_getiosize;
+
+#
+# Get the IO buffer
+#
+METHOD void * getiobuf {
+ device_t dev;
+} DEFAULT null_etherswitch_getiobuf;
+
+#
+# Perform a read operation and save data into IO buffer
+#
+METHOD int ioread {
+ device_t dev;
+ off_t off;
+ ssize_t len;
+} DEFAULT null_etherswitch_ioread;
+
+#
+# Perform a write operation (write the data in the IO buffer)
+#
+METHOD int iowrite {
+ device_t dev;
+ off_t off;
+ ssize_t len;
+} DEFAULT null_etherswitch_iowrite;
diff --git a/sys/dev/fdt/fdt_common.c b/sys/dev/fdt/fdt_common.c
index 4e0d6e2..395b77b 100644
--- a/sys/dev/fdt/fdt_common.c
+++ b/sys/dev/fdt/fdt_common.c
@@ -59,6 +59,7 @@ __FBSDID("$FreeBSD$");
#define FDT_TYPE_LEN 64
#define FDT_REG_CELLS 4
+#define FDT_RANGES_SIZE 48
vm_paddr_t fdt_immr_pa;
vm_offset_t fdt_immr_va;
@@ -144,7 +145,7 @@ fdt_get_range_by_busaddr(phandle_t node, u_long addr, u_long *base,
int
fdt_get_range(phandle_t node, int range_id, u_long *base, u_long *size)
{
- pcell_t ranges[6], *rangesptr;
+ pcell_t ranges[FDT_RANGES_SIZE], *rangesptr;
pcell_t addr_cells, size_cells, par_addr_cells;
u_long par_bus_addr, pbase, psize;
int err, len, tuple_size, tuples;
@@ -416,13 +417,13 @@ fdt_addrsize_cells(phandle_t node, int *addr_cells, int *size_cells)
* Retrieve #{address,size}-cells.
*/
cell_size = sizeof(cell);
- if (OF_getprop(node, "#address-cells", &cell, cell_size) < cell_size)
+ if (OF_getencprop(node, "#address-cells", &cell, cell_size) < cell_size)
cell = 2;
- *addr_cells = fdt32_to_cpu((int)cell);
+ *addr_cells = (int)cell;
- if (OF_getprop(node, "#size-cells", &cell, cell_size) < cell_size)
+ if (OF_getencprop(node, "#size-cells", &cell, cell_size) < cell_size)
cell = 1;
- *size_cells = fdt32_to_cpu((int)cell);
+ *size_cells = (int)cell;
if (*addr_cells > 3 || *size_cells > 2)
return (ERANGE);
@@ -537,11 +538,11 @@ fdt_get_phyaddr(phandle_t node, device_t dev, int *phy_addr, void **phy_sc)
phy_node = OF_node_from_xref(phy_handle);
- if (OF_getprop(phy_node, "reg", (void *)&phy_reg,
+ if (OF_getencprop(phy_node, "reg", (void *)&phy_reg,
sizeof(phy_reg)) <= 0)
return (ENXIO);
- *phy_addr = fdt32_to_cpu(phy_reg);
+ *phy_addr = phy_reg;
/*
* Search for softc used to communicate with phy.
diff --git a/sys/dev/fdt/fdt_common.h b/sys/dev/fdt/fdt_common.h
index 94f84ff..4d5e47b 100644
--- a/sys/dev/fdt/fdt_common.h
+++ b/sys/dev/fdt/fdt_common.h
@@ -70,12 +70,6 @@ extern vm_paddr_t fdt_immr_pa;
extern vm_offset_t fdt_immr_va;
extern vm_offset_t fdt_immr_size;
-struct fdt_pm_mask_entry {
- char *compat;
- uint32_t mask;
-};
-extern struct fdt_pm_mask_entry fdt_pm_mask_table[];
-
#if defined(FDT_DTB_STATIC)
extern u_char fdt_static_dtb;
#endif
diff --git a/sys/dev/flash/mx25l.c b/sys/dev/flash/mx25l.c
index 4303da8..355c870 100644
--- a/sys/dev/flash/mx25l.c
+++ b/sys/dev/flash/mx25l.c
@@ -122,6 +122,7 @@ struct mx25l_flash_ident flash_devices[] = {
{ "en25q64", 0x1c, 0x3017, 64 * 1024, 128, FL_ERASE_4K },
{ "m25p32", 0x20, 0x2016, 64 * 1024, 64, FL_NONE },
{ "m25p64", 0x20, 0x2017, 64 * 1024, 128, FL_NONE },
+ { "n25q128", 0x20, 0xba18, 64 * 1024, 256, FL_ERASE_4K },
{ "mx25ll32", 0xc2, 0x2016, 64 * 1024, 64, FL_NONE },
{ "mx25ll64", 0xc2, 0x2017, 64 * 1024, 128, FL_NONE },
{ "mx25ll128", 0xc2, 0x2018, 64 * 1024, 256, FL_ERASE_4K | FL_ERASE_32K },
@@ -137,6 +138,7 @@ struct mx25l_flash_ident flash_devices[] = {
{ "w25x32", 0xef, 0x3016, 64 * 1024, 64, FL_ERASE_4K },
{ "w25x64", 0xef, 0x3017, 64 * 1024, 128, FL_ERASE_4K },
{ "w25q32", 0xef, 0x4016, 64 * 1024, 64, FL_ERASE_4K },
+ { "w25q32jv", 0xef, 0x7016, 64 * 1024, 64, FL_ERASE_4K },
{ "w25q64", 0xef, 0x4017, 64 * 1024, 128, FL_ERASE_4K },
{ "w25q64bv", 0xef, 0x4017, 64 * 1024, 128, FL_ERASE_4K },
{ "w25q128", 0xef, 0x4018, 64 * 1024, 256, FL_ERASE_4K },
@@ -392,6 +394,7 @@ mx25l_read(struct mx25l_softc *sc, off_t offset, caddr_t data, off_t count)
txBuf[4] = 0;
}
+ memset(data, 0, count);
cmd.tx_cmd = txBuf;
cmd.rx_cmd = rxBuf;
cmd.tx_data = data;
diff --git a/sys/dev/gpio/gpio_if.m b/sys/dev/gpio/gpio_if.m
index 70838cd..cbff41d 100644
--- a/sys/dev/gpio/gpio_if.m
+++ b/sys/dev/gpio/gpio_if.m
@@ -63,6 +63,43 @@ CODE {
return (0);
}
+
+ static int
+ gpio_default_pwm_getcaps(device_t dev __unused, int32_t pwm __unused,
+ uint32_t pini __unused, uint32_t *caps)
+ {
+
+ *caps = 0;
+
+ return (0);
+ }
+
+ static int
+ gpio_default_pwm_max(device_t dev __unused, uint32_t *pwmmax)
+ {
+
+ *pwmmax = 0;
+
+ return (0);
+ }
+
+ static int
+ gpio_default_pwm_get(device_t dev __unused, uint32_t pwm __unused,
+ uint32_t pin __unused, uint32_t reg __unused,
+ uint32_t *value __unused)
+ {
+
+ return (EINVAL);
+ }
+
+ static int
+ gpio_default_pwm_set(device_t dev __unused, uint32_t pwm __unused,
+ uint32_t pin __unused, uint32_t reg __unused,
+ uint32_t value __unused)
+ {
+
+ return (EINVAL);
+ }
};
HEADER {
@@ -147,6 +184,46 @@ METHOD int pin_setflags {
};
#
+# Get maximum pwm number
+#
+METHOD int pwm_max {
+ device_t dev;
+ int *maxpwm;
+} DEFAULT gpio_default_pwm_max;
+
+#
+# Get pwm capabilities
+#
+METHOD int pwm_getcaps {
+ device_t dev;
+ int32_t pwm_num;
+ uint32_t pin_num;
+ uint32_t *caps;
+} DEFAULT gpio_default_pwm_getcaps;
+
+#
+# Get pwm settings of pin specifed by pin_num
+#
+METHOD int pwm_get {
+ device_t dev;
+ int32_t pwm_num;
+ uint32_t pin_num;
+ uint32_t pwm_reg;
+ uint32_t *pwm_value;
+} DEFAULT gpio_default_pwm_get;
+
+#
+# Set pwm settings of pin specifed by pin_num
+#
+METHOD int pwm_set {
+ device_t dev;
+ int32_t pwm_num;
+ uint32_t pin_num;
+ uint32_t pwm_reg;
+ uint32_t pwm_value;
+} DEFAULT gpio_default_pwm_set;
+
+#
# Allow the GPIO controller to map the gpio-specifier on its own.
#
METHOD int map_gpios {
diff --git a/sys/dev/gpio/gpioc.c b/sys/dev/gpio/gpioc.c
index 36705ba..c330c98 100644
--- a/sys/dev/gpio/gpioc.c
+++ b/sys/dev/gpio/gpioc.c
@@ -121,9 +121,10 @@ gpioc_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int fflag,
struct thread *td)
{
device_t bus;
- int max_pin, res;
+ int max_pin, max_pwm, res;
struct gpioc_softc *sc = cdev->si_drv1;
struct gpio_pin pin;
+ struct gpio_pwm_req pwmreq;
struct gpio_req req;
struct gpio_access_32 *a32;
struct gpio_config_32 *c32;
@@ -144,9 +145,16 @@ gpioc_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int fflag,
res = GPIO_PIN_GETFLAGS(sc->sc_pdev, pin.gp_pin,
&pin.gp_flags);
/* Fail early */
- if (res)
+ if (res != 0)
+ break;
+ res = GPIO_PIN_GETCAPS(sc->sc_pdev, pin.gp_pin,
+ &pin.gp_caps);
+ if (res != 0)
+ break;
+ res = GPIO_PWM_GETCAPS(sc->sc_pdev, -1, pin.gp_pin,
+ &pin.gp_pwm_caps);
+ if (res != 0)
break;
- GPIO_PIN_GETCAPS(sc->sc_pdev, pin.gp_pin, &pin.gp_caps);
GPIOBUS_PIN_GETNAME(bus, pin.gp_pin, pin.gp_name);
bcopy(&pin, arg, sizeof(pin));
break;
@@ -197,6 +205,39 @@ gpioc_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int fflag,
res = GPIO_PIN_CONFIG_32(sc->sc_pdev, c32->first_pin,
c32->num_pins, c32->pin_flags);
break;
+ case GPIOMAXPWM:
+ max_pwm = -1;
+ res = GPIO_PWM_MAX(sc->sc_pdev, &max_pwm);
+ bcopy(&max_pwm, arg, sizeof(max_pwm));
+ break;
+ case GPIOPWMGETCONFIG:
+ bcopy(arg, &pwmreq, sizeof(pwmreq));
+ res = GPIO_PWM_GETCAPS(sc->sc_pdev, pwmreq.gp_pwm,
+ pwmreq.gp_pwm_pin, &pwmreq.gp_pwm_caps);
+ dprintf("pwm getcaps pwm %d pin %d -> caps %#x\n",
+ pwmreq.gp_pwm, pwmreq.gp_pwm_pin,
+ pwmreq.gp_pwm_caps);
+ bcopy(&pwmreq, arg, sizeof(pwmreq));
+ break;
+ case GPIOPWMGET:
+ bcopy(arg, &pwmreq, sizeof(pwmreq));
+ res = GPIO_PWM_GET(sc->sc_pdev, pwmreq.gp_pwm,
+ pwmreq.gp_pwm_pin, pwmreq.gp_pwm_reg,
+ &pwmreq.gp_pwm_value);
+ dprintf("pwm get pwm %d pin %d -> reg %#x %d\n",
+ pwmreq.gp_pwm, pwmreq.gp_pwm_pin, pwmreq.gp_pwm_reg,
+ pwmreq.gp_pwm_value);
+ bcopy(&pwmreq, arg, sizeof(pwmreq));
+ break;
+ case GPIOPWMSET:
+ bcopy(arg, &pwmreq, sizeof(pwmreq));
+ res = GPIO_PWM_SET(sc->sc_pdev, pwmreq.gp_pwm,
+ pwmreq.gp_pwm_pin, pwmreq.gp_pwm_reg,
+ pwmreq.gp_pwm_value);
+ dprintf("pwm set pwm %d pin %d -> reg %#x %d\n",
+ pwmreq.gp_pwm, pwmreq.gp_pwm_pin, pwmreq.gp_pwm_reg,
+ pwmreq.gp_pwm_value);
+ break;
default:
return (ENOTTY);
break;
diff --git a/sys/dev/hyperv/netvsc/if_hn.c b/sys/dev/hyperv/netvsc/if_hn.c
index 77c43a6..a4179be 100644
--- a/sys/dev/hyperv/netvsc/if_hn.c
+++ b/sys/dev/hyperv/netvsc/if_hn.c
@@ -537,7 +537,7 @@ SYSCTL_INT(_hw_hn, OID_AUTO, use_txdesc_bufring, CTLFLAG_RD,
#ifdef HN_IFSTART_SUPPORT
/* Use ifnet.if_start instead of ifnet.if_transmit */
-static int hn_use_if_start = 0;
+static int hn_use_if_start = 1;
SYSCTL_INT(_hw_hn, OID_AUTO, use_if_start, CTLFLAG_RDTUN,
&hn_use_if_start, 0, "Use if_start TX method");
#endif
@@ -584,7 +584,7 @@ SYSCTL_PROC(_hw_hn, OID_AUTO, vfmap, CTLFLAG_RD | CTLTYPE_STRING,
0, 0, hn_vfmap_sysctl, "A", "VF mapping");
/* Transparent VF */
-static int hn_xpnt_vf = 1;
+static int hn_xpnt_vf = 0;
SYSCTL_INT(_hw_hn, OID_AUTO, vf_transparent, CTLFLAG_RDTUN,
&hn_xpnt_vf, 0, "Transparent VF mod");
@@ -861,7 +861,8 @@ hn_set_hlen(struct mbuf *m_head)
PULLUP_HDR(m_head, ehlen + sizeof(*ip6));
ip6 = mtodo(m_head, ehlen);
- if (ip6->ip6_nxt != IPPROTO_TCP) {
+ if (ip6->ip6_nxt != IPPROTO_TCP &&
+ ip6->ip6_nxt != IPPROTO_UDP) {
m_freem(m_head);
return (NULL);
}
@@ -2363,7 +2364,7 @@ hn_attach(device_t dev)
ifp->if_start = hn_start;
IFQ_SET_MAXLEN(&ifp->if_snd, qdepth);
- ifp->if_snd.ifq_drv_maxlen = qdepth - 1;
+ ifp->if_snd.ifq_drv_maxlen = 0;
IFQ_SET_READY(&ifp->if_snd);
} else
#endif
diff --git a/sys/dev/iicbus/is31fl319x.c b/sys/dev/iicbus/is31fl319x.c
new file mode 100644
index 0000000..1ad7f52
--- /dev/null
+++ b/sys/dev/iicbus/is31fl319x.c
@@ -0,0 +1,676 @@
+/*-
+ * Copyright (c) 2017 Rubicon Communications, LLC (Netgate)
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * Driver for the ISSI IS31FL319x - 3/6/9 channel light effect LED driver.
+ */
+
+#include "opt_platform.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/gpio.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/sysctl.h>
+
+#include <dev/iicbus/iicbus.h>
+#include <dev/iicbus/iiconf.h>
+
+#include <dev/gpio/gpiobusvar.h>
+#ifdef FDT
+#include <dev/ofw/ofw_bus.h>
+#endif
+
+#include <dev/iicbus/is31fl319xreg.h>
+
+#include "gpio_if.h"
+#include "iicbus_if.h"
+
+#define IS31FL3193 1
+#define IS31FL3196 2
+#define IS31FL3199 3
+
+static struct ofw_compat_data compat_data[] = {
+ { "issi,is31fl3193", IS31FL3193 },
+ { "issi,is31fl3196", IS31FL3196 },
+ { "issi,is31fl3199", IS31FL3199 },
+ { NULL, 0 }
+};
+
+struct is31fl319x_reg {
+ struct is31fl319x_softc *sc;
+ uint8_t data;
+ uint8_t id;
+ uint8_t reg;
+};
+
+struct is31fl319x_softc {
+ device_t sc_dev;
+ device_t sc_gpio_busdev;
+ int sc_max_pins;
+ uint8_t sc_pwm[IS31FL319X_MAX_PINS];
+ uint8_t sc_conf1;
+ struct is31fl319x_reg sc_t0[IS31FL319X_MAX_PINS];
+ struct is31fl319x_reg sc_t123[IS31FL319X_MAX_PINS / 3];
+ struct is31fl319x_reg sc_t4[IS31FL319X_MAX_PINS];
+};
+
+static __inline int
+is31fl319x_write(device_t dev, uint8_t reg, uint8_t *data, size_t len)
+{
+
+ return (iicdev_writeto(dev, reg, data, len, IIC_INTRWAIT));
+}
+
+static __inline int
+is31fl319x_reg_update(struct is31fl319x_softc *sc, uint8_t reg)
+{
+ uint8_t data = 0;
+
+ return (iicdev_writeto(sc->sc_dev, reg, &data, 1, IIC_INTRWAIT));
+}
+
+static int
+is31fl319x_pwm_sysctl(SYSCTL_HANDLER_ARGS)
+{
+ int error, led;
+ int32_t enable;
+ struct is31fl319x_softc *sc;
+
+ sc = (struct is31fl319x_softc *)arg1;
+ led = arg2;
+
+ enable = ((sc->sc_conf1 & IS31FL319X_CONF1_PWM(led)) != 0) ? 0 : 1;
+ error = sysctl_handle_int(oidp, &enable, sizeof(enable), req);
+ if (error != 0 || req->newptr == NULL)
+ return (error);
+
+ sc->sc_conf1 &= ~IS31FL319X_CONF1_PWM(led);
+ if (enable == 0)
+ sc->sc_conf1 |= IS31FL319X_CONF1_PWM(led);
+ if (is31fl319x_write(sc->sc_dev, IS31FL319X_CONF1, &sc->sc_conf1,
+ sizeof(sc->sc_conf1)) != 0)
+ return (ENXIO);
+ if (is31fl319x_reg_update(sc, IS31FL319X_DATA_UPDATE) != 0)
+ return (ENXIO);
+ if (is31fl319x_reg_update(sc, IS31FL319X_TIME_UPDATE) != 0)
+ return (ENXIO);
+
+ return (0);
+}
+
+static int
+is31fl319x_pin_timer_sysctl(SYSCTL_HANDLER_ARGS)
+{
+ int error;
+ int32_t a, b, ms;
+ struct is31fl319x_reg *timer;
+ struct is31fl319x_softc *sc;
+
+ timer = (struct is31fl319x_reg *)arg1;
+ sc = timer->sc;
+
+ a = timer->data & IS31FL319X_T0_A_MASK;
+ b = (timer->data & IS31FL319X_T0_B_MASK) >> 4;
+ ms = 260 * a * (2 << b);
+ error = sysctl_handle_int(oidp, &ms, sizeof(ms), req);
+ if (error != 0 || req->newptr == NULL)
+ return (error);
+
+ if (ms > IS31FL319X_T0_MAX_TIME)
+ ms = IS31FL319X_T0_MAX_TIME;
+
+ a = b = 0;
+ if (ms >= 260) {
+ ms /= 260;
+ while (ms / (2 << b) > 15) {
+ if (ms / (2 << b) > 15)
+ b++;
+ else
+ break;
+ }
+ a = ms / (2 << b);
+ }
+ timer->data = (b << 4) | a;
+
+ if (is31fl319x_write(sc->sc_dev, timer->reg, &timer->data,
+ sizeof(timer->data)) != 0)
+ return (ENXIO);
+ if (is31fl319x_reg_update(sc, IS31FL319X_TIME_UPDATE) != 0)
+ return (ENXIO);
+
+ return (0);
+}
+
+static int
+is31fl319x_dt_sysctl(SYSCTL_HANDLER_ARGS)
+{
+ int error;
+ int32_t enable;
+ struct is31fl319x_reg *led;
+ struct is31fl319x_softc *sc;
+
+ led = (struct is31fl319x_reg *)arg1;
+ sc = led->sc;
+
+ enable = ((led->data & IS31FL319X_DT) != 0) ? 1 : 0;
+ error = sysctl_handle_int(oidp, &enable, sizeof(enable), req);
+ if (error != 0 || req->newptr == NULL)
+ return (error);
+
+ if (enable)
+ led->data |= IS31FL319X_DT;
+ else
+ led->data &= ~IS31FL319X_DT;
+ if (is31fl319x_write(sc->sc_dev, IS31FL319X_T123(led->id), &led->data,
+ sizeof(led->data)) != 0)
+ return (ENXIO);
+ if (is31fl319x_reg_update(sc, IS31FL319X_TIME_UPDATE) != 0)
+ return (ENXIO);
+
+ return (0);
+}
+
+static int
+is31fl319x_t1t3_sysctl(SYSCTL_HANDLER_ARGS)
+{
+ int error;
+ int32_t a, ms;
+ struct is31fl319x_reg *led;
+ struct is31fl319x_softc *sc;
+
+ led = (struct is31fl319x_reg *)arg1;
+ sc = led->sc;
+
+ a = (led->data & IS31FL319X_T1_A_MASK);
+ if (a >= 5 && a <= 6)
+ ms = 0;
+ else if (a == 7)
+ ms = 100;
+ else
+ ms = 260 * (2 << a);
+ error = sysctl_handle_int(oidp, &ms, sizeof(ms), req);
+ if (error != 0 || req->newptr == NULL)
+ return (error);
+
+ if (ms > IS31FL319X_T1_MAX_TIME)
+ ms = IS31FL319X_T1_MAX_TIME;
+
+ a = 0;
+ if (ms == 0)
+ a = 5; /* Breathing function disabled. */
+ if (ms == 100)
+ a = 7; /* 100 ms */
+ else if (ms >= 260) {
+ ms /= 260;
+ while (ms / (2 << a) > 1) {
+ if (ms / (2 << a) > 1)
+ a++;
+ else
+ break;
+ }
+ }
+ led->data &= ~IS31FL319X_T1_A_MASK;
+ led->data |= (a & IS31FL319X_T1_A_MASK);
+
+ if (is31fl319x_write(sc->sc_dev, IS31FL319X_T123(led->id), &led->data,
+ sizeof(led->data)) != 0)
+ return (ENXIO);
+ if (is31fl319x_reg_update(sc, IS31FL319X_TIME_UPDATE) != 0)
+ return (ENXIO);
+
+ return (0);
+}
+
+static int
+is31fl319x_t2_sysctl(SYSCTL_HANDLER_ARGS)
+{
+ int error;
+ int32_t b, ms;
+ struct is31fl319x_reg *led;
+ struct is31fl319x_softc *sc;
+
+ led = (struct is31fl319x_reg *)arg1;
+ sc = led->sc;
+
+ b = (led->data & IS31FL319X_T2_B_MASK) >> 4;
+ if (b > 0)
+ ms = 260 * (2 << (b - 1));
+ else
+ ms = 0;
+ error = sysctl_handle_int(oidp, &ms, sizeof(ms), req);
+ if (error != 0 || req->newptr == NULL)
+ return (error);
+
+ if (ms > IS31FL319X_T2_MAX_TIME)
+ ms = IS31FL319X_T2_MAX_TIME;
+
+ b = 0;
+ if (ms >= 260) {
+ ms /= 260;
+ b = 1;
+ while (ms / (2 << (b - 1)) > 1) {
+ if (ms / (2 << (b - 1)) > 1)
+ b++;
+ else
+ break;
+ }
+ }
+ led->data &= ~IS31FL319X_T2_B_MASK;
+ led->data |= ((b << 4) & IS31FL319X_T2_B_MASK);
+
+ if (is31fl319x_write(sc->sc_dev, IS31FL319X_T123(led->id), &led->data,
+ sizeof(led->data)) != 0)
+ return (ENXIO);
+ if (is31fl319x_reg_update(sc, IS31FL319X_TIME_UPDATE) != 0)
+ return (ENXIO);
+
+ return (0);
+}
+
+static void
+is31fl319x_sysctl_attach(device_t dev)
+{
+ char strbuf[4];
+ struct is31fl319x_softc *sc;
+ struct sysctl_ctx_list *ctx;
+ struct sysctl_oid *tree_node, *led_node, *ledN_node, *pin_node;
+ struct sysctl_oid *pinN_node;
+ struct sysctl_oid_list *tree, *led_tree, *ledN_tree, *pin_tree;
+ struct sysctl_oid_list *pinN_tree;
+ int led, pin;
+
+ ctx = device_get_sysctl_ctx(dev);
+ tree_node = device_get_sysctl_tree(dev);
+ tree = SYSCTL_CHILDREN(tree_node);
+ pin_node = SYSCTL_ADD_NODE(ctx, tree, OID_AUTO, "pin", CTLFLAG_RD,
+ NULL, "Output Pins");
+ pin_tree = SYSCTL_CHILDREN(pin_node);
+
+ sc = device_get_softc(dev);
+ for (pin = 0; pin < sc->sc_max_pins; pin++) {
+
+ snprintf(strbuf, sizeof(strbuf), "%d", pin);
+ pinN_node = SYSCTL_ADD_NODE(ctx, pin_tree, OID_AUTO, strbuf,
+ CTLFLAG_RD, NULL, "Output Pin");
+ pinN_tree = SYSCTL_CHILDREN(pinN_node);
+
+ sc->sc_t0[pin].sc = sc;
+ sc->sc_t0[pin].data = 0;
+ sc->sc_t0[pin].id = pin;
+ sc->sc_t0[pin].reg = IS31FL319X_T0(pin);
+ SYSCTL_ADD_PROC(ctx, pinN_tree, OID_AUTO, "T0",
+ CTLFLAG_RW | CTLTYPE_UINT | CTLFLAG_MPSAFE,
+ &sc->sc_t0[pin], 0, is31fl319x_pin_timer_sysctl, "IU",
+ "T0 timer in ms");
+ sc->sc_t4[pin].sc = sc;
+ sc->sc_t4[pin].data = 0;
+ sc->sc_t4[pin].id = pin;
+ sc->sc_t4[pin].reg = IS31FL319X_T4(pin);
+ SYSCTL_ADD_PROC(ctx, pinN_tree, OID_AUTO, "T4",
+ CTLFLAG_RW | CTLTYPE_UINT | CTLFLAG_MPSAFE,
+ &sc->sc_t4[pin], 0, is31fl319x_pin_timer_sysctl, "IU",
+ "T4 timer in ms");
+ }
+ led_node = SYSCTL_ADD_NODE(ctx, tree, OID_AUTO, "led", CTLFLAG_RD,
+ NULL, "RGB LEDs");
+ led_tree = SYSCTL_CHILDREN(led_node);
+ for (led = 0; led < (sc->sc_max_pins / 3); led++) {
+ snprintf(strbuf, sizeof(strbuf), "%d", led);
+ ledN_node = SYSCTL_ADD_NODE(ctx, led_tree, OID_AUTO, strbuf,
+ CTLFLAG_RD, NULL, "RGB LED");
+ ledN_tree = SYSCTL_CHILDREN(ledN_node);
+
+ SYSCTL_ADD_PROC(ctx, ledN_tree, OID_AUTO, "pwm",
+ CTLFLAG_RW | CTLTYPE_UINT | CTLFLAG_MPSAFE, sc, led,
+ is31fl319x_pwm_sysctl, "IU", "Enable the PWM control");
+ sc->sc_t123[led].sc = sc;
+ sc->sc_t123[led].data = 0;
+ sc->sc_t123[led].id = led;
+ sc->sc_t123[led].reg = IS31FL319X_T123(led);
+ SYSCTL_ADD_PROC(ctx, ledN_tree, OID_AUTO, "T1-T3",
+ CTLFLAG_RW | CTLTYPE_UINT | CTLFLAG_MPSAFE,
+ &sc->sc_t123[led], 0, is31fl319x_t1t3_sysctl, "IU",
+ "T1 and T3 timer");
+ SYSCTL_ADD_PROC(ctx, ledN_tree, OID_AUTO, "DT",
+ CTLFLAG_RW | CTLTYPE_UINT | CTLFLAG_MPSAFE,
+ &sc->sc_t123[led], 0, is31fl319x_dt_sysctl, "IU",
+ "T3 Double Time (T3 = 2T1)");
+ SYSCTL_ADD_PROC(ctx, ledN_tree, OID_AUTO, "T2",
+ CTLFLAG_RW | CTLTYPE_UINT | CTLFLAG_MPSAFE,
+ &sc->sc_t123[led], 0, is31fl319x_t2_sysctl, "IU",
+ "T2 timer");
+ }
+}
+
+static int
+is31fl319x_probe(device_t dev)
+{
+ const char *desc;
+ struct is31fl319x_softc *sc;
+#ifdef FDT
+ phandle_t node;
+
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+ sc = device_get_softc(dev);
+ switch (ofw_bus_search_compatible(dev, compat_data)->ocd_data) {
+ case IS31FL3193:
+ desc = "ISSI IS31FL3193 3 channel light effect LED driver";
+ sc->sc_max_pins = 3;
+ break;
+ case IS31FL3196:
+ desc = "ISSI IS31FL3196 6 channel light effect LED driver";
+ sc->sc_max_pins = 6;
+ break;
+ case IS31FL3199:
+ desc = "ISSI IS31FL3199 9 channel light effect LED driver";
+ sc->sc_max_pins = 9;
+ break;
+ default:
+ return (ENXIO);
+ }
+ node = ofw_bus_get_node(dev);
+ if (!OF_hasprop(node, "gpio-controller"))
+ /* Node is not a GPIO controller. */
+ return (ENXIO);
+#else
+ sc = device_get_softc(dev);
+ sc->sc_max_pins = IS31FL319X_MAX_PINS;
+ desc = "ISSI IS31FL319x light effect LED driver";
+#endif
+ device_set_desc(dev, desc);
+
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+is31fl319x_attach(device_t dev)
+{
+ struct is31fl319x_softc *sc;
+ uint8_t data[3];
+
+ sc = device_get_softc(dev);
+ sc->sc_dev = dev;
+
+ /* Reset the LED driver. */
+ data[0] = 0;
+ if (is31fl319x_write(dev, IS31FL319X_RESET, data, 1) != 0)
+ return (ENXIO);
+
+ /* Disable the shutdown mode. */
+ data[0] = 1;
+ if (is31fl319x_write(dev, IS31FL319X_SHUTDOWN, data, 1) != 0)
+ return (ENXIO);
+
+ /* Attach gpiobus. */
+ sc->sc_gpio_busdev = gpiobus_attach_bus(dev);
+ if (sc->sc_gpio_busdev == NULL)
+ return (ENXIO);
+
+ is31fl319x_sysctl_attach(dev);
+
+ /* Update the booting status, kernel is loading. */
+ data[0] = 0;
+ data[1] = 0;
+ data[2] = 35;
+ if (is31fl319x_write(dev, IS31FL319X_PWM(6), data, sizeof(data)) != 0)
+ return (ENXIO);
+ data[2] = 100;
+ if (is31fl319x_write(dev, IS31FL319X_PWM(3), data, sizeof(data)) != 0)
+ return (ENXIO);
+
+ /* Enable breath on LED 2 and 3. */
+ sc->sc_conf1 |= (6 << 4);
+ if (is31fl319x_write(sc->sc_dev, IS31FL319X_CONF1, &sc->sc_conf1,
+ sizeof(sc->sc_conf1)) != 0)
+ return (ENXIO);
+
+ /* Update register data. */
+ if (is31fl319x_reg_update(sc, IS31FL319X_DATA_UPDATE) != 0)
+ return (ENXIO);
+ if (is31fl319x_reg_update(sc, IS31FL319X_TIME_UPDATE) != 0)
+ return (ENXIO);
+
+ return (0);
+}
+
+static device_t
+is31fl319x_gpio_get_bus(device_t dev)
+{
+ struct is31fl319x_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ return (sc->sc_gpio_busdev);
+}
+
+static int
+is31fl319x_gpio_pin_max(device_t dev, int *maxpin)
+{
+ struct is31fl319x_softc *sc;
+
+ sc = device_get_softc(dev);
+ *maxpin = sc->sc_max_pins - 1;
+
+ return (0);
+}
+
+static int
+is31fl319x_gpio_pin_getname(device_t dev, uint32_t pin, char *name)
+{
+ const char *buf[] = { "R", "G", "B" };
+ struct is31fl319x_softc *sc;
+
+ sc = device_get_softc(dev);
+ if (pin >= sc->sc_max_pins)
+ return (EINVAL);
+
+ memset(name, 0, GPIOMAXNAME);
+ snprintf(name, GPIOMAXNAME, "%s %d", buf[pin % 3], pin / 3);
+
+ return (0);
+}
+
+static int
+is31fl319x_gpio_pin_getcaps(device_t dev, uint32_t pin, uint32_t *caps)
+{
+ struct is31fl319x_softc *sc;
+
+ sc = device_get_softc(dev);
+ if (pin >= sc->sc_max_pins)
+ return (EINVAL);
+
+ *caps = GPIO_PIN_PWM;
+
+ return (0);
+}
+
+static int
+is31fl319x_gpio_pin_getflags(device_t dev, uint32_t pin, uint32_t *flags)
+{
+ struct is31fl319x_softc *sc;
+
+ sc = device_get_softc(dev);
+ if (pin >= sc->sc_max_pins)
+ return (EINVAL);
+
+ *flags = GPIO_PIN_PWM;
+
+ return (0);
+}
+
+static int
+is31fl319x_gpio_pin_setflags(device_t dev, uint32_t pin, uint32_t flags)
+{
+ struct is31fl319x_softc *sc;
+
+ sc = device_get_softc(dev);
+ if (pin >= sc->sc_max_pins)
+ return (EINVAL);
+
+ if ((flags & GPIO_PIN_PWM) == 0)
+ return (EINVAL);
+
+ return (0);
+}
+
+static int
+is31fl319x_gpio_pin_set(device_t dev, uint32_t pin, uint32_t value)
+{
+ struct is31fl319x_softc *sc;
+
+ sc = device_get_softc(dev);
+ if (pin >= sc->sc_max_pins)
+ return (EINVAL);
+
+ if (value != 0)
+ sc->sc_pwm[pin] = IS31FL319X_PWM_MAX;
+ else
+ sc->sc_pwm[pin] = 0;
+ if (is31fl319x_write(dev, IS31FL319X_PWM(pin),
+ &sc->sc_pwm[pin], 1) != 0)
+ return (ENXIO);
+
+ return (is31fl319x_reg_update(sc, IS31FL319X_DATA_UPDATE));
+}
+
+static int
+is31fl319x_gpio_pin_get(device_t dev, uint32_t pin, uint32_t *val)
+{
+ struct is31fl319x_softc *sc;
+
+ sc = device_get_softc(dev);
+ if (pin >= sc->sc_max_pins)
+ return (EINVAL);
+
+ *val = (sc->sc_pwm[pin] != 0) ? 1 : 0;
+
+ return (0);
+}
+
+static int
+is31fl319x_gpio_pin_toggle(device_t dev, uint32_t pin)
+{
+ struct is31fl319x_softc *sc;
+ uint32_t val;
+
+ sc = device_get_softc(dev);
+ if (pin >= sc->sc_max_pins)
+ return (EINVAL);
+
+ val = (sc->sc_pwm[pin] != 0) ? 1 : 0;
+
+ return (is31fl319x_gpio_pin_set(dev, pin, val ^ 1));
+}
+
+static int
+is31fl319x_gpio_pwm_get(device_t dev, int32_t pwm, uint32_t pin, uint32_t reg,
+ uint32_t *val)
+{
+ struct is31fl319x_softc *sc;
+
+ sc = device_get_softc(dev);
+ if (pin >= sc->sc_max_pins)
+ return (EINVAL);
+
+ if (pwm != -1 || reg != GPIO_PWM_DUTY)
+ return (EINVAL);
+
+ *val = (uint32_t)sc->sc_pwm[pin];
+
+ return (0);
+}
+
+static int
+is31fl319x_gpio_pwm_set(device_t dev, int32_t pwm, uint32_t pin, uint32_t reg,
+ uint32_t val)
+{
+ struct is31fl319x_softc *sc;
+
+ sc = device_get_softc(dev);
+ if (pin >= sc->sc_max_pins)
+ return (EINVAL);
+
+ if (pwm != -1 || reg != GPIO_PWM_DUTY)
+ return (EINVAL);
+
+ sc->sc_pwm[pin] = (uint8_t)val;
+ if (is31fl319x_write(dev, IS31FL319X_PWM(pin),
+ &sc->sc_pwm[pin], 1) != 0)
+ return (ENXIO);
+
+ return (is31fl319x_reg_update(sc, IS31FL319X_DATA_UPDATE));
+}
+
+static phandle_t
+is31fl319x_gpio_get_node(device_t bus, device_t dev)
+{
+
+ /* Used by ofw_gpiobus. */
+ return (ofw_bus_get_node(bus));
+}
+
+static device_method_t is31fl319x_methods[] = {
+ DEVMETHOD(device_probe, is31fl319x_probe),
+ DEVMETHOD(device_attach, is31fl319x_attach),
+
+ /* GPIO protocol */
+ DEVMETHOD(gpio_get_bus, is31fl319x_gpio_get_bus),
+ DEVMETHOD(gpio_pin_max, is31fl319x_gpio_pin_max),
+ DEVMETHOD(gpio_pin_getname, is31fl319x_gpio_pin_getname),
+ DEVMETHOD(gpio_pin_getcaps, is31fl319x_gpio_pin_getcaps),
+ DEVMETHOD(gpio_pin_getflags, is31fl319x_gpio_pin_getflags),
+ DEVMETHOD(gpio_pin_setflags, is31fl319x_gpio_pin_setflags),
+ DEVMETHOD(gpio_pin_get, is31fl319x_gpio_pin_get),
+ DEVMETHOD(gpio_pin_set, is31fl319x_gpio_pin_set),
+ DEVMETHOD(gpio_pin_toggle, is31fl319x_gpio_pin_toggle),
+ DEVMETHOD(gpio_pwm_get, is31fl319x_gpio_pwm_get),
+ DEVMETHOD(gpio_pwm_set, is31fl319x_gpio_pwm_set),
+
+ /* ofw_bus interface */
+ DEVMETHOD(ofw_bus_get_node, is31fl319x_gpio_get_node),
+
+ DEVMETHOD_END
+};
+
+static driver_t is31fl319x_driver = {
+ "gpio",
+ is31fl319x_methods,
+ sizeof(struct is31fl319x_softc),
+};
+
+static devclass_t is31fl319x_devclass;
+
+DRIVER_MODULE(is31fl319x, iicbus, is31fl319x_driver, is31fl319x_devclass,
+ NULL, NULL);
+MODULE_VERSION(is31fl319x, 1);
+MODULE_DEPEND(is31fl319x, iicbus, 1, 1, 1);
diff --git a/sys/dev/iicbus/is31fl319xreg.h b/sys/dev/iicbus/is31fl319xreg.h
new file mode 100644
index 0000000..d780eb9
--- /dev/null
+++ b/sys/dev/iicbus/is31fl319xreg.h
@@ -0,0 +1,63 @@
+/*-
+ * Copyright (c) 2017 Rubicon Communications, LLC (Netgate)
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * ISSI IS31FL319X [3|6|9]-Channel Light Effect LED Driver.
+ */
+
+#ifndef _IS31FL319XREG_H_
+#define _IS31FL319XREG_H_
+
+#define IS31FL319X_SHUTDOWN 0x00
+#define IS31FL319X_LEDCTRL1 0x01
+#define IS31FL319X_LEDCTRL2 0x02
+#define IS31FL319X_CONF1 0x03
+#define IS31FL319X_CONF1_PWM(led) (1 << (4 + led))
+#define IS31FL319X_CONF2 0x04
+#define IS31FL319X_RAMPMODE 0x05
+#define IS31FL319X_BREATHMARK 0x06
+#define IS31FL319X_PWM(out) (0x07 + (out))
+#define IS31FL319X_PWM_MAX 0xff
+#define IS31FL319X_DATA_UPDATE 0x10
+#define IS31FL319X_T0(out) (0x11 + (out))
+#define IS31FL319X_T0_A_MASK 0x0f
+#define IS31FL319X_T0_B_MASK 0x30
+#define IS31FL319X_T0_MAX_TIME 31200
+#define IS31FL319X_T123(led) (0x1a + (led))
+#define IS31FL319X_T1_A_MASK 0x07
+#define IS31FL319X_T1_MAX_TIME 4160
+#define IS31FL319X_T2_B_MASK 0x70
+#define IS31FL319X_T2_MAX_TIME 16640
+#define IS31FL319X_DT (1 << 7)
+#define IS31FL319X_T4(out) (0x1d + (out))
+#define IS31FL319X_TIME_UPDATE 0x26
+#define IS31FL319X_RESET 0xff
+
+#define IS31FL319X_MAX_PINS 9
+
+#endif /* _IS31FL319XREG_H_ */
diff --git a/sys/dev/iicbus/ofw_iicbus.c b/sys/dev/iicbus/ofw_iicbus.c
index ffdb87f..7577886 100644
--- a/sys/dev/iicbus/ofw_iicbus.c
+++ b/sys/dev/iicbus/ofw_iicbus.c
@@ -86,6 +86,8 @@ EARLY_DRIVER_MODULE(ofw_iicbus, iichb, ofw_iicbus_driver, ofw_iicbus_devclass,
0, 0, BUS_PASS_BUS);
EARLY_DRIVER_MODULE(ofw_iicbus, twsi, ofw_iicbus_driver, ofw_iicbus_devclass,
0, 0, BUS_PASS_BUS);
+EARLY_DRIVER_MODULE(ofw_iicbus, twsi, ofw_iicbus_driver, ofwiicbus_devclass,
+ 0, 0, BUS_PASS_BUS);
MODULE_VERSION(ofw_iicbus, 1);
MODULE_DEPEND(ofw_iicbus, iicbus, 1, 1, 1);
diff --git a/sys/dev/iicbus/pca9552.c b/sys/dev/iicbus/pca9552.c
new file mode 100644
index 0000000..43fe540
--- /dev/null
+++ b/sys/dev/iicbus/pca9552.c
@@ -0,0 +1,414 @@
+/*-
+ * Copyright (c) 2017 Rubicon Communications, LLC (Netgate)
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * Driver for the NXP PCA9552 - I2C LED driver with programmable blink rates.
+ */
+
+#include "opt_platform.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/gpio.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/sysctl.h>
+
+#include <dev/iicbus/iicbus.h>
+#include <dev/iicbus/iiconf.h>
+
+#include <dev/gpio/gpiobusvar.h>
+#ifdef FDT
+#include <dev/ofw/ofw_bus.h>
+#endif
+
+#include <dev/iicbus/pca9552reg.h>
+
+#include "gpio_if.h"
+#include "iicbus_if.h"
+
+#define PCA9552_GPIO_PINS 16
+#define PCA9552_GPIO_CAPS GPIO_PIN_INPUT | GPIO_PIN_OUTPUT | \
+ GPIO_PIN_OPENDRAIN
+
+struct pca9552_softc {
+ device_t sc_dev;
+ device_t sc_gpio_busdev;
+ uint16_t sc_addr;
+};
+
+static int
+pca9552_read(device_t dev, uint16_t addr, uint8_t ctrl, uint8_t *data,
+ size_t len)
+{
+ struct iic_msg msg[2] = {
+ { addr, IIC_M_WR | IIC_M_NOSTOP, 1, &ctrl },
+ { addr, IIC_M_RD, len, data },
+ };
+
+ return (iicbus_transfer(dev, msg, nitems(msg)));
+}
+
+static int
+pca9552_write(device_t dev, uint16_t addr, uint8_t *data, size_t len)
+{
+ struct iic_msg msg[1] = {
+ { addr, IIC_M_WR, len, data },
+ };
+
+ return (iicbus_transfer(dev, msg, nitems(msg)));
+}
+
+static int
+pca9552_probe(device_t dev)
+{
+#ifdef FDT
+ phandle_t node;
+
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+ if (!ofw_bus_is_compatible(dev, "nxp,pca9552"))
+ return (ENXIO);
+ node = ofw_bus_get_node(dev);
+ if (!OF_hasprop(node, "gpio-controller"))
+ /* Node is not a GPIO controller. */
+ return (ENXIO);
+#endif
+ device_set_desc(dev, "NXP PCA9552 LED driver");
+
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+pca9552_period_sysctl(SYSCTL_HANDLER_ARGS)
+{
+ int error, new;
+ struct pca9552_softc *sc;
+ uint8_t data[2], psc;
+
+ sc = (struct pca9552_softc *)arg1;
+ error = pca9552_read(sc->sc_dev, sc->sc_addr, PCA9552_PSC(arg2), &psc,
+ sizeof(psc));
+ if (error != 0)
+ return (error);
+
+ new = ((((int)psc) + 1) * 1000) / 44;
+ error = sysctl_handle_int(oidp, &new, sizeof(new), req);
+ if (error != 0 || req->newptr == NULL)
+ return (error);
+ new = ((new * 44) / 1000) - 1;
+ if (new != psc && new >= 0 && new <= 255) {
+ data[0] = PCA9552_PSC(arg2);
+ data[1] = new;
+ error = pca9552_write(sc->sc_dev, sc->sc_addr, data,
+ sizeof(data));
+ if (error != 0)
+ return (error);
+ }
+
+ return (error);
+}
+
+static int
+pca9552_duty_sysctl(SYSCTL_HANDLER_ARGS)
+{
+ int error, new;
+ struct pca9552_softc *sc;
+ uint8_t data[2], duty;
+
+ sc = (struct pca9552_softc *)arg1;
+ error = pca9552_read(sc->sc_dev, sc->sc_addr, PCA9552_PWM(arg2), &duty,
+ sizeof(duty));
+ if (error != 0)
+ return (error);
+
+ new = duty;
+ error = sysctl_handle_int(oidp, &new, sizeof(new), req);
+ if (error != 0 || req->newptr == NULL)
+ return (error);
+ if (new != duty && new >= 0 && new <= 255) {
+ data[0] = PCA9552_PWM(arg2);
+ data[1] = new;
+ error = pca9552_write(sc->sc_dev, sc->sc_addr, data,
+ sizeof(data));
+ if (error != 0)
+ return (error);
+ }
+
+ return (error);
+}
+
+static int
+pca9552_attach(device_t dev)
+{
+ char pwmbuf[4];
+ int i;
+ struct pca9552_softc *sc;
+ struct sysctl_ctx_list *ctx;
+ struct sysctl_oid *pwm_node, *pwmN_node, *tree_node;
+ struct sysctl_oid_list *pwm_tree, *pwmN_tree, *tree;
+ uint8_t data[2];
+
+ sc = device_get_softc(dev);
+ sc->sc_dev = dev;
+ sc->sc_addr = iicbus_get_addr(dev);
+
+ ctx = device_get_sysctl_ctx(dev);
+ tree_node = device_get_sysctl_tree(dev);
+ tree = SYSCTL_CHILDREN(tree_node);
+
+ /* Reset output. */
+ for (i = 0; i < 4; i++) {
+ data[0] = PCA9552_LS(i * 4);
+ data[1] = 0x55;
+ if (pca9552_write(dev, sc->sc_addr, data, sizeof(data)) != 0)
+ return (ENXIO);
+ }
+
+ /* Attach gpiobus. */
+ sc->sc_gpio_busdev = gpiobus_attach_bus(dev);
+ if (sc->sc_gpio_busdev == NULL)
+ return (ENXIO);
+
+ pwm_node = SYSCTL_ADD_NODE(ctx, tree, OID_AUTO, "pwm",
+ CTLFLAG_RD, NULL, "PWM settings");
+ pwm_tree = SYSCTL_CHILDREN(pwm_node);
+
+ for (i = 0; i < 2; i++) {
+ snprintf(pwmbuf, sizeof(pwmbuf), "%d", i);
+ pwmN_node = SYSCTL_ADD_NODE(ctx, pwm_tree, OID_AUTO, pwmbuf,
+ CTLFLAG_RD, NULL, "PWM settings");
+ pwmN_tree = SYSCTL_CHILDREN(pwmN_node);
+
+ SYSCTL_ADD_PROC(ctx, pwmN_tree, OID_AUTO, "period",
+ CTLFLAG_RW | CTLTYPE_UINT | CTLFLAG_MPSAFE, sc, i,
+ pca9552_period_sysctl, "IU", "PCA9552 PWM period (in ms)");
+ SYSCTL_ADD_PROC(ctx, pwmN_tree, OID_AUTO, "duty",
+ CTLFLAG_RW | CTLTYPE_UINT | CTLFLAG_MPSAFE, sc, i,
+ pca9552_duty_sysctl, "IU", "PCA9552 PWM duty cycle");
+ }
+
+ return (0);
+}
+
+static device_t
+pca9552_gpio_get_bus(device_t dev)
+{
+ struct pca9552_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ return (sc->sc_gpio_busdev);
+}
+
+static int
+pca9552_gpio_pin_max(device_t dev, int *maxpin)
+{
+
+ *maxpin = PCA9552_GPIO_PINS - 1;
+
+ return (0);
+}
+
+static int
+pca9552_gpio_pin_getname(device_t dev, uint32_t pin, char *name)
+{
+
+ if (pin >= PCA9552_GPIO_PINS)
+ return (EINVAL);
+
+ memset(name, 0, GPIOMAXNAME);
+ snprintf(name, GPIOMAXNAME, "LED %d", pin);
+
+ return (0);
+}
+
+static int
+pca9552_gpio_pin_getcaps(device_t dev, uint32_t pin, uint32_t *caps)
+{
+
+ if (pin >= PCA9552_GPIO_PINS)
+ return (EINVAL);
+
+ *caps = PCA9552_GPIO_CAPS;
+
+ return (0);
+}
+
+static int
+pca9552_gpio_pin_getflags(device_t dev, uint32_t pin, uint32_t *flags)
+{
+ struct pca9552_softc *sc;
+ uint8_t curr;
+
+ if (pin >= PCA9552_GPIO_PINS)
+ return (EINVAL);
+
+ sc = device_get_softc(dev);
+ if (pca9552_read(dev, sc->sc_addr, PCA9552_LS(pin), &curr,
+ sizeof(curr)) != 0)
+ return (ENXIO);
+
+ switch ((curr >> PCA9552_LS_SHIFT(pin)) & 0x3) {
+ case 0:
+ *flags = GPIO_PIN_OUTPUT | GPIO_PIN_OPENDRAIN;
+ break;
+ case 1:
+ *flags = GPIO_PIN_INPUT;
+ break;
+ default:
+ *flags = 0;
+ }
+
+ return (0);
+}
+
+static int
+pca9552_gpio_pin_setflags(device_t dev, uint32_t pin, uint32_t flags)
+{
+ struct pca9552_softc *sc;
+ uint8_t curr, new[2];
+
+ if (pin >= PCA9552_GPIO_PINS)
+ return (EINVAL);
+ if ((flags & (GPIO_PIN_INPUT | GPIO_PIN_OUTPUT)) == 0)
+ return (0);
+
+ sc = device_get_softc(dev);
+ if (pca9552_read(dev, sc->sc_addr, PCA9552_LS(pin), &curr,
+ sizeof(curr)) != 0)
+ return (ENXIO);
+
+ curr &= ~(0x3 << PCA9552_LS_SHIFT(pin));
+ if ((flags & GPIO_PIN_INPUT) != 0)
+ curr |= (0x1 << PCA9552_LS_SHIFT(pin));
+ new[0] = PCA9552_LS(pin);
+ new[1] = curr;
+ if (pca9552_write(dev, sc->sc_addr, new, sizeof(new)) != 0)
+ return (ENXIO);
+
+ return (0);
+}
+
+static int
+pca9552_gpio_pin_set(device_t dev, uint32_t pin, unsigned int value)
+{
+ struct pca9552_softc *sc;
+ uint8_t curr, new[2];
+
+ if (pin >= PCA9552_GPIO_PINS)
+ return (EINVAL);
+
+ sc = device_get_softc(dev);
+ if (pca9552_read(dev, sc->sc_addr, PCA9552_LS(pin), &curr,
+ sizeof(curr)) != 0)
+ return (ENXIO);
+
+ curr &= ~(0x3 << PCA9552_LS_SHIFT(pin));
+ if (value != 0)
+ curr |= (0x1 << PCA9552_LS_SHIFT(pin));
+ new[0] = PCA9552_LS(pin);
+ new[1] = curr;
+ if (pca9552_write(dev, sc->sc_addr, new, sizeof(new)) != 0)
+ return (ENXIO);
+
+ return (0);
+}
+
+static int
+pca9552_gpio_pin_get(device_t dev, uint32_t pin, unsigned int *val)
+{
+ struct pca9552_softc *sc;
+ uint8_t data;
+
+ if (pin >= PCA9552_GPIO_PINS)
+ return (EINVAL);
+
+ sc = device_get_softc(dev);
+ if (pca9552_read(dev, sc->sc_addr, PCA9552_INPUT(pin), &data,
+ sizeof(data)) != 0)
+ return (ENXIO);
+
+ *val = ((data & (1 << (pin % 8))) != 0) ? 1 : 0;
+
+ return (0);
+}
+
+static int
+pca9552_gpio_pin_toggle(device_t dev, uint32_t pin)
+{
+ unsigned int val;
+
+ if (pca9552_gpio_pin_get(dev, pin, &val) != 0)
+ return (ENXIO);
+
+ return (pca9552_gpio_pin_set(dev, pin, val ^ 1));
+}
+
+static phandle_t
+pca9552_gpio_get_node(device_t bus, device_t dev)
+{
+
+ /* Used by ofw_gpiobus. */
+ return (ofw_bus_get_node(bus));
+}
+
+static device_method_t pca9552_methods[] = {
+ DEVMETHOD(device_probe, pca9552_probe),
+ DEVMETHOD(device_attach, pca9552_attach),
+
+ /* GPIO protocol */
+ DEVMETHOD(gpio_get_bus, pca9552_gpio_get_bus),
+ DEVMETHOD(gpio_pin_max, pca9552_gpio_pin_max),
+ DEVMETHOD(gpio_pin_getname, pca9552_gpio_pin_getname),
+ DEVMETHOD(gpio_pin_getcaps, pca9552_gpio_pin_getcaps),
+ DEVMETHOD(gpio_pin_getflags, pca9552_gpio_pin_getflags),
+ DEVMETHOD(gpio_pin_setflags, pca9552_gpio_pin_setflags),
+ DEVMETHOD(gpio_pin_get, pca9552_gpio_pin_get),
+ DEVMETHOD(gpio_pin_set, pca9552_gpio_pin_set),
+ DEVMETHOD(gpio_pin_toggle, pca9552_gpio_pin_toggle),
+
+ /* ofw_bus interface */
+ DEVMETHOD(ofw_bus_get_node, pca9552_gpio_get_node),
+
+ DEVMETHOD_END
+};
+
+static driver_t pca9552_driver = {
+ "gpio",
+ pca9552_methods,
+ sizeof(struct pca9552_softc),
+};
+
+static devclass_t pca9552_devclass;
+
+DRIVER_MODULE(pca9552, iicbus, pca9552_driver, pca9552_devclass, NULL, NULL);
+MODULE_VERSION(pca9552, 1);
+MODULE_DEPEND(pca9552, iicbus, 1, 1, 1);
diff --git a/sys/dev/iicbus/pca9552reg.h b/sys/dev/iicbus/pca9552reg.h
new file mode 100644
index 0000000..d5a5a43
--- /dev/null
+++ b/sys/dev/iicbus/pca9552reg.h
@@ -0,0 +1,42 @@
+/*-
+ * Copyright (c) 2017 Rubicon Communications, LLC (Netgate)
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * NXP PCA9552 LED Driver Registers.
+ */
+
+#ifndef _PCA9552REG_H_
+#define _PCA9552REG_H_
+
+#define PCA9552_INPUT(a) (0x0 + ((a) / 8))
+#define PCA9552_PSC(a) (0x2 + 2 * (a))
+#define PCA9552_PWM(a) (0x3 + 2 * (a))
+#define PCA9552_LS(a) (0x6 + ((a) / 4))
+#define PCA9552_LS_SHIFT(a) (((a) % 4) * 2)
+
+#endif /* _PCA9552REG_H_ */
diff --git a/sys/dev/iicbus/twsi/mv_twsi.c b/sys/dev/iicbus/twsi/mv_twsi.c
index 998a197..997263e 100644
--- a/sys/dev/iicbus/twsi/mv_twsi.c
+++ b/sys/dev/iicbus/twsi/mv_twsi.c
@@ -91,6 +91,7 @@ __FBSDID("$FreeBSD$");
#define debugf(fmt, args...)
#endif
+static phandle_t mv_twsi_get_node(device_t, device_t);
static int mv_twsi_probe(device_t);
static int mv_twsi_attach(device_t);
@@ -105,7 +106,10 @@ static device_method_t mv_twsi_methods[] = {
DEVMETHOD(device_probe, mv_twsi_probe),
DEVMETHOD(device_attach, mv_twsi_attach),
- { 0, 0 }
+ /* ofw_bus interface */
+ DEVMETHOD(ofw_bus_get_node, mv_twsi_get_node),
+
+ DEVMETHOD_END
};
DEFINE_CLASS_1(twsi, mv_twsi_driver, mv_twsi_methods,
@@ -117,6 +121,14 @@ DRIVER_MODULE(twsi, simplebus, mv_twsi_driver, mv_twsi_devclass, 0, 0);
DRIVER_MODULE(iicbus, twsi, iicbus_driver, iicbus_devclass, 0, 0);
MODULE_DEPEND(twsi, iicbus, 1, 1, 1);
+static phandle_t
+mv_twsi_get_node(device_t bus, device_t dev)
+{
+
+ /* Used by ofw_iicbus. */
+ return (ofw_bus_get_node(bus));
+}
+
static int
mv_twsi_probe(device_t dev)
{
diff --git a/sys/dev/iicbus/twsi/twsi.c b/sys/dev/iicbus/twsi/twsi.c
index dee0b7a..aa5f943 100644
--- a/sys/dev/iicbus/twsi/twsi.c
+++ b/sys/dev/iicbus/twsi/twsi.c
@@ -114,6 +114,7 @@ twsi_control_clear(struct twsi_softc *sc, uint32_t mask)
uint32_t val;
val = TWSI_READ(sc, sc->reg_control);
+ val &= ~(TWSI_CONTROL_STOP | TWSI_CONTROL_START);
val &= ~mask;
TWSI_WRITE(sc, sc->reg_control, val);
}
@@ -124,6 +125,7 @@ twsi_control_set(struct twsi_softc *sc, uint32_t mask)
uint32_t val;
val = TWSI_READ(sc, sc->reg_control);
+ val &= ~(TWSI_CONTROL_STOP | TWSI_CONTROL_START);
val |= mask;
TWSI_WRITE(sc, sc->reg_control, val);
}
@@ -204,8 +206,8 @@ twsi_locked_start(device_t dev, struct twsi_softc *sc, int32_t mask,
}
TWSI_WRITE(sc, sc->reg_data, slave);
- DELAY(1000);
twsi_clear_iflg(sc);
+ DELAY(1000);
if (twsi_poll_ctrl(sc, timeout, TWSI_CONTROL_IFLG)) {
debugf("timeout sending slave address\n");
@@ -251,7 +253,7 @@ twsi_reset(device_t dev, u_char speed, u_char addr, u_char *oldaddr)
TWSI_WRITE(sc, sc->reg_soft_reset, 0x0);
DELAY(2000);
TWSI_WRITE(sc, sc->reg_baud_rate, param);
- TWSI_WRITE(sc, sc->reg_control, TWSI_CONTROL_TWSIEN | TWSI_CONTROL_ACK);
+ TWSI_WRITE(sc, sc->reg_control, TWSI_CONTROL_TWSIEN);
DELAY(1000);
mtx_unlock(&sc->mutex);
@@ -266,9 +268,10 @@ twsi_stop(device_t dev)
sc = device_get_softc(dev);
mtx_lock(&sc->mutex);
+ twsi_control_clear(sc, TWSI_CONTROL_ACK);
twsi_control_set(sc, TWSI_CONTROL_STOP);
- DELAY(1000);
twsi_clear_iflg(sc);
+ DELAY(1000);
mtx_unlock(&sc->mutex);
return (IIC_NOERR);
@@ -341,8 +344,8 @@ twsi_read(device_t dev, char *buf, int len, int *read, int last, int delay)
else
twsi_control_set(sc, TWSI_CONTROL_ACK);
- DELAY (1000);
twsi_clear_iflg(sc);
+ DELAY(1000);
if (twsi_poll_ctrl(sc, delay, TWSI_CONTROL_IFLG)) {
debugf("timeout reading data\n");
@@ -382,6 +385,7 @@ twsi_write(device_t dev, const char *buf, int len, int *sent, int timeout)
TWSI_WRITE(sc, sc->reg_data, *buf++);
twsi_clear_iflg(sc);
+ DELAY(1000);
if (twsi_poll_ctrl(sc, timeout, TWSI_CONTROL_IFLG)) {
debugf("timeout writing data\n");
rv = IIC_ETIMEOUT;
diff --git a/sys/dev/ismt/ismt.c b/sys/dev/ismt/ismt.c
index 3fbcfed..7d0622c 100644
--- a/sys/dev/ismt/ismt.c
+++ b/sys/dev/ismt/ismt.c
@@ -72,7 +72,7 @@ __FBSDID("$FreeBSD$");
#define ISMT_DESC_LPR 0x80 /* Large Packet Received */
/* Macros */
-#define ISMT_DESC_ADDR_RW(addr, is_read) ((addr << 1) | (is_read))
+#define ISMT_DESC_ADDR_RW(addr, is_read) ((addr) | (is_read))
/* iSMT General Register address offsets (SMBBAR + <addr>) */
#define ISMT_GR_GCTRL 0x000 /* General Control */
@@ -717,6 +717,7 @@ fail:
#define ID_INTEL_S1200_SMT0 0x0c598086
#define ID_INTEL_S1200_SMT1 0x0c5a8086
#define ID_INTEL_C2000_SMT 0x1f158086
+#define ID_INTEL_DNV_SMT 0x19ac8086
static int
ismt_probe(device_t dev)
@@ -733,6 +734,9 @@ ismt_probe(device_t dev)
case ID_INTEL_C2000_SMT:
desc = "Atom Processor C2000 SMBus 2.0";
break;
+ case ID_INTEL_DNV_SMT:
+ desc = "Denverton Host SMBus Controller";
+ break;
default:
return (ENXIO);
}
diff --git a/sys/dev/ixgbe/if_ix.c b/sys/dev/ixgbe/if_ix.c
index 45dface..9cb35f9 100644
--- a/sys/dev/ixgbe/if_ix.c
+++ b/sys/dev/ixgbe/if_ix.c
@@ -3028,7 +3028,7 @@ ixgbe_init_locked(struct adapter *adapter)
* need to be kick-started
*/
if (hw->phy.type == ixgbe_phy_none) {
- err = hw->phy.ops.identify(hw);
+ err = hw->phy.ops.identify_sfp(hw);
if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
device_printf(dev,
"Unsupported SFP+ module type was detected.\n");
diff --git a/sys/dev/mii/e1000phy.c b/sys/dev/mii/e1000phy.c
index d97c7d5..075368c 100644
--- a/sys/dev/mii/e1000phy.c
+++ b/sys/dev/mii/e1000phy.c
@@ -209,6 +209,9 @@ e1000phy_reset(struct mii_softc *sc)
}
} else {
switch (sc->mii_mpd_model) {
+ case MII_MODEL_xxMARVELL_E1000:
+ reg |= E1000_SCR_AUTO_X_MODE;
+ break;
case MII_MODEL_xxMARVELL_E1111:
case MII_MODEL_xxMARVELL_E1112:
case MII_MODEL_xxMARVELL_E1116:
diff --git a/sys/dev/mii/micphy.c b/sys/dev/mii/micphy.c
index 4f829c8..7b88cf0 100644
--- a/sys/dev/mii/micphy.c
+++ b/sys/dev/mii/micphy.c
@@ -224,6 +224,13 @@ ksz9031_load_values(struct mii_softc *sc, phandle_t node)
ksz90x1_load_values(sc, node, 2, MII_KSZ9031_CLOCK_PAD_SKEW,
"rxc-skew-ps", 0x1f, 0, "txc-skew-ps", 0x1f, 5,
NULL, 0, 0, NULL, 0, 0);
+
+ /*
+ * Fix for errata 5.
+ * Set the device's Auto-Negotiation FLP (Fast Link Pulse) to 16ms.
+ */
+ ksz9031_write(sc, 0, 0x4, 0x6);
+ ksz9031_write(sc, 0, 0x3, 0x1a80);
}
static void
diff --git a/sys/dev/neta/if_mvneta.c b/sys/dev/neta/if_mvneta.c
new file mode 100644
index 0000000..f453642
--- /dev/null
+++ b/sys/dev/neta/if_mvneta.c
@@ -0,0 +1,3608 @@
+/*
+ * Copyright (c) 2017 Stormshield.
+ * Copyright (c) 2017 Semihalf.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "opt_platform.h"
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/endian.h>
+#include <sys/mbuf.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/socket.h>
+#include <sys/sysctl.h>
+#include <sys/smp.h>
+#include <sys/taskqueue.h>
+#ifdef MVNETA_KTR
+#include <sys/ktr.h>
+#endif
+
+#include <net/ethernet.h>
+#include <net/bpf.h>
+#include <net/if.h>
+#include <net/if_arp.h>
+#include <net/if_dl.h>
+#include <net/if_media.h>
+#include <net/if_types.h>
+#include <net/if_vlan_var.h>
+
+#include <netinet/in_systm.h>
+#include <netinet/in.h>
+#include <netinet/ip.h>
+#include <netinet/tcp_lro.h>
+
+#include <sys/sockio.h>
+#include <sys/bus.h>
+#include <machine/bus.h>
+#include <sys/rman.h>
+#include <machine/resource.h>
+
+#include <dev/mii/mii.h>
+#include <dev/mii/miivar.h>
+
+#include <dev/ofw/openfirm.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <dev/mdio/mdio.h>
+
+#include <arm/mv/mvreg.h>
+#include <arm/mv/mvvar.h>
+#include <arm/mv/mvwin.h>
+
+#include "if_mvnetareg.h"
+#include "if_mvnetavar.h"
+
+#include "miibus_if.h"
+#include "mdio_if.h"
+
+#ifdef MVNETA_DEBUG
+#define STATIC /* nothing */
+#else
+#define STATIC static
+#endif
+
+#define DASSERT(x) KASSERT((x), (#x))
+
+/* Device Register Initialization */
+STATIC int mvneta_initreg(struct ifnet *);
+
+/* Descriptor Ring Control for each of queues */
+STATIC int mvneta_ring_alloc_rx_queue(struct mvneta_softc *, int);
+STATIC int mvneta_ring_alloc_tx_queue(struct mvneta_softc *, int);
+STATIC void mvneta_ring_dealloc_rx_queue(struct mvneta_softc *, int);
+STATIC void mvneta_ring_dealloc_tx_queue(struct mvneta_softc *, int);
+STATIC int mvneta_ring_init_rx_queue(struct mvneta_softc *, int);
+STATIC int mvneta_ring_init_tx_queue(struct mvneta_softc *, int);
+STATIC void mvneta_ring_flush_rx_queue(struct mvneta_softc *, int);
+STATIC void mvneta_ring_flush_tx_queue(struct mvneta_softc *, int);
+STATIC void mvneta_dmamap_cb(void *, bus_dma_segment_t *, int, int);
+STATIC int mvneta_dma_create(struct mvneta_softc *);
+
+/* Rx/Tx Queue Control */
+STATIC int mvneta_rx_queue_init(struct ifnet *, int);
+STATIC int mvneta_tx_queue_init(struct ifnet *, int);
+STATIC int mvneta_rx_queue_enable(struct ifnet *, int);
+STATIC int mvneta_tx_queue_enable(struct ifnet *, int);
+STATIC void mvneta_rx_lockq(struct mvneta_softc *, int);
+STATIC void mvneta_rx_unlockq(struct mvneta_softc *, int);
+STATIC void mvneta_tx_lockq(struct mvneta_softc *, int);
+STATIC void mvneta_tx_unlockq(struct mvneta_softc *, int);
+
+/* Interrupt Handlers */
+STATIC void mvneta_disable_intr(struct mvneta_softc *);
+STATIC void mvneta_enable_intr(struct mvneta_softc *);
+STATIC void mvneta_rxtxth_intr(void *);
+STATIC int mvneta_misc_intr(struct mvneta_softc *);
+STATIC void mvneta_tick(void *);
+/* struct ifnet and mii callbacks*/
+STATIC int mvneta_xmitfast_locked(struct mvneta_softc *, int, struct mbuf **);
+STATIC int mvneta_xmit_locked(struct mvneta_softc *, int);
+#ifdef MVNETA_MULTIQUEUE
+STATIC int mvneta_transmit(struct ifnet *, struct mbuf *);
+#else /* !MVNETA_MULTIQUEUE */
+STATIC void mvneta_start(struct ifnet *);
+#endif
+STATIC void mvneta_qflush(struct ifnet *);
+STATIC void mvneta_tx_task(void *, int);
+STATIC int mvneta_ioctl(struct ifnet *, u_long, caddr_t);
+STATIC void mvneta_init(void *);
+STATIC void mvneta_init_locked(void *);
+STATIC void mvneta_stop(struct mvneta_softc *);
+STATIC void mvneta_stop_locked(struct mvneta_softc *);
+STATIC int mvneta_mediachange(struct ifnet *);
+STATIC void mvneta_mediastatus(struct ifnet *, struct ifmediareq *);
+STATIC void mvneta_portup(struct mvneta_softc *);
+STATIC void mvneta_portdown(struct mvneta_softc *);
+
+/* Link State Notify */
+STATIC void mvneta_update_autoneg(struct mvneta_softc *, int);
+STATIC int mvneta_update_media(struct mvneta_softc *, int);
+STATIC void mvneta_adjust_link(struct mvneta_softc *);
+STATIC void mvneta_update_eee(struct mvneta_softc *);
+STATIC void mvneta_update_fc(struct mvneta_softc *);
+STATIC void mvneta_link_isr(struct mvneta_softc *);
+STATIC void mvneta_linkupdate(struct mvneta_softc *, boolean_t);
+STATIC void mvneta_linkup(struct mvneta_softc *);
+STATIC void mvneta_linkdown(struct mvneta_softc *);
+STATIC void mvneta_linkreset(struct mvneta_softc *);
+
+/* Tx Subroutines */
+STATIC int mvneta_tx_queue(struct mvneta_softc *, struct mbuf **, int);
+STATIC void mvneta_tx_set_csumflag(struct ifnet *,
+ struct mvneta_tx_desc *, struct mbuf *);
+STATIC void mvneta_tx_queue_complete(struct mvneta_softc *, int);
+STATIC void mvneta_tx_drain(struct mvneta_softc *);
+
+/* Rx Subroutines */
+STATIC int mvneta_rx(struct mvneta_softc *, int, int);
+STATIC void mvneta_rx_queue(struct mvneta_softc *, int, int);
+STATIC void mvneta_rx_queue_refill(struct mvneta_softc *, int);
+STATIC void mvneta_rx_set_csumflag(struct ifnet *,
+ struct mvneta_rx_desc *, struct mbuf *);
+STATIC void mvneta_rx_buf_free(struct mvneta_softc *, struct mvneta_buf *);
+
+/* MAC address filter */
+STATIC void mvneta_filter_setup(struct mvneta_softc *);
+
+/* sysctl(9) */
+STATIC int sysctl_read_mib(SYSCTL_HANDLER_ARGS);
+STATIC int sysctl_clear_mib(SYSCTL_HANDLER_ARGS);
+STATIC int sysctl_set_queue_rxthtime(SYSCTL_HANDLER_ARGS);
+STATIC void sysctl_mvneta_init(struct mvneta_softc *);
+
+/* MIB */
+STATIC void mvneta_clear_mib(struct mvneta_softc *);
+STATIC void mvneta_update_mib(struct mvneta_softc *);
+
+/* Switch */
+STATIC boolean_t mvneta_has_switch(device_t);
+
+#define mvneta_sc_lock(sc) mtx_lock(&sc->mtx)
+#define mvneta_sc_unlock(sc) mtx_unlock(&sc->mtx)
+
+STATIC struct mtx mii_mutex;
+STATIC int mii_init = 0;
+
+/* Device */
+STATIC int mvneta_detach(device_t);
+/* MII */
+STATIC int mvneta_miibus_readreg(device_t, int, int);
+STATIC int mvneta_miibus_writereg(device_t, int, int, int);
+
+static device_method_t mvneta_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_detach, mvneta_detach),
+ /* MII interface */
+ DEVMETHOD(miibus_readreg, mvneta_miibus_readreg),
+ DEVMETHOD(miibus_writereg, mvneta_miibus_writereg),
+ /* MDIO interface */
+ DEVMETHOD(mdio_readreg, mvneta_miibus_readreg),
+ DEVMETHOD(mdio_writereg, mvneta_miibus_writereg),
+
+ /* End */
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_0(mvneta, mvneta_driver, mvneta_methods, sizeof(struct mvneta_softc));
+
+DRIVER_MODULE(miibus, mvneta, miibus_driver, miibus_devclass, 0, 0);
+DRIVER_MODULE(mdio, mvneta, mdio_driver, mdio_devclass, 0, 0);
+MODULE_DEPEND(mvneta, mdio, 1, 1, 1);
+MODULE_DEPEND(mvneta, ether, 1, 1, 1);
+MODULE_DEPEND(mvneta, miibus, 1, 1, 1);
+MODULE_DEPEND(mvneta, mvxpbm, 1, 1, 1);
+
+/*
+ * List of MIB register and names
+ */
+enum mvneta_mib_idx
+{
+ MVNETA_MIB_RX_GOOD_OCT_IDX,
+ MVNETA_MIB_RX_BAD_OCT_IDX,
+ MVNETA_MIB_TX_MAC_TRNS_ERR_IDX,
+ MVNETA_MIB_RX_GOOD_FRAME_IDX,
+ MVNETA_MIB_RX_BAD_FRAME_IDX,
+ MVNETA_MIB_RX_BCAST_FRAME_IDX,
+ MVNETA_MIB_RX_MCAST_FRAME_IDX,
+ MVNETA_MIB_RX_FRAME64_OCT_IDX,
+ MVNETA_MIB_RX_FRAME127_OCT_IDX,
+ MVNETA_MIB_RX_FRAME255_OCT_IDX,
+ MVNETA_MIB_RX_FRAME511_OCT_IDX,
+ MVNETA_MIB_RX_FRAME1023_OCT_IDX,
+ MVNETA_MIB_RX_FRAMEMAX_OCT_IDX,
+ MVNETA_MIB_TX_GOOD_OCT_IDX,
+ MVNETA_MIB_TX_GOOD_FRAME_IDX,
+ MVNETA_MIB_TX_EXCES_COL_IDX,
+ MVNETA_MIB_TX_MCAST_FRAME_IDX,
+ MVNETA_MIB_TX_BCAST_FRAME_IDX,
+ MVNETA_MIB_TX_MAC_CTL_ERR_IDX,
+ MVNETA_MIB_FC_SENT_IDX,
+ MVNETA_MIB_FC_GOOD_IDX,
+ MVNETA_MIB_FC_BAD_IDX,
+ MVNETA_MIB_PKT_UNDERSIZE_IDX,
+ MVNETA_MIB_PKT_FRAGMENT_IDX,
+ MVNETA_MIB_PKT_OVERSIZE_IDX,
+ MVNETA_MIB_PKT_JABBER_IDX,
+ MVNETA_MIB_MAC_RX_ERR_IDX,
+ MVNETA_MIB_MAC_CRC_ERR_IDX,
+ MVNETA_MIB_MAC_COL_IDX,
+ MVNETA_MIB_MAC_LATE_COL_IDX,
+};
+
+STATIC struct mvneta_mib_def {
+ uint32_t regnum;
+ int reg64;
+ const char *sysctl_name;
+ const char *desc;
+} mvneta_mib_list[] = {
+ [MVNETA_MIB_RX_GOOD_OCT_IDX] = {MVNETA_MIB_RX_GOOD_OCT, 1,
+ "rx_good_oct", "Good Octets Rx"},
+ [MVNETA_MIB_RX_BAD_OCT_IDX] = {MVNETA_MIB_RX_BAD_OCT, 0,
+ "rx_bad_oct", "Bad Octets Rx"},
+ [MVNETA_MIB_TX_MAC_TRNS_ERR_IDX] = {MVNETA_MIB_TX_MAC_TRNS_ERR, 0,
+ "tx_mac_err", "MAC Transmit Error"},
+ [MVNETA_MIB_RX_GOOD_FRAME_IDX] = {MVNETA_MIB_RX_GOOD_FRAME, 0,
+ "rx_good_frame", "Good Frames Rx"},
+ [MVNETA_MIB_RX_BAD_FRAME_IDX] = {MVNETA_MIB_RX_BAD_FRAME, 0,
+ "rx_bad_frame", "Bad Frames Rx"},
+ [MVNETA_MIB_RX_BCAST_FRAME_IDX] = {MVNETA_MIB_RX_BCAST_FRAME, 0,
+ "rx_bcast_frame", "Broadcast Frames Rx"},
+ [MVNETA_MIB_RX_MCAST_FRAME_IDX] = {MVNETA_MIB_RX_MCAST_FRAME, 0,
+ "rx_mcast_frame", "Multicast Frames Rx"},
+ [MVNETA_MIB_RX_FRAME64_OCT_IDX] = {MVNETA_MIB_RX_FRAME64_OCT, 0,
+ "rx_frame_1_64", "Frame Size 1 - 64"},
+ [MVNETA_MIB_RX_FRAME127_OCT_IDX] = {MVNETA_MIB_RX_FRAME127_OCT, 0,
+ "rx_frame_65_127", "Frame Size 65 - 127"},
+ [MVNETA_MIB_RX_FRAME255_OCT_IDX] = {MVNETA_MIB_RX_FRAME255_OCT, 0,
+ "rx_frame_128_255", "Frame Size 128 - 255"},
+ [MVNETA_MIB_RX_FRAME511_OCT_IDX] = {MVNETA_MIB_RX_FRAME511_OCT, 0,
+ "rx_frame_256_511", "Frame Size 256 - 511"},
+ [MVNETA_MIB_RX_FRAME1023_OCT_IDX] = {MVNETA_MIB_RX_FRAME1023_OCT, 0,
+ "rx_frame_512_1023", "Frame Size 512 - 1023"},
+ [MVNETA_MIB_RX_FRAMEMAX_OCT_IDX] = {MVNETA_MIB_RX_FRAMEMAX_OCT, 0,
+ "rx_fame_1024_max", "Frame Size 1024 - Max"},
+ [MVNETA_MIB_TX_GOOD_OCT_IDX] = {MVNETA_MIB_TX_GOOD_OCT, 1,
+ "tx_good_oct", "Good Octets Tx"},
+ [MVNETA_MIB_TX_GOOD_FRAME_IDX] = {MVNETA_MIB_TX_GOOD_FRAME, 0,
+ "tx_good_frame", "Good Frames Tx"},
+ [MVNETA_MIB_TX_EXCES_COL_IDX] = {MVNETA_MIB_TX_EXCES_COL, 0,
+ "tx_exces_collision", "Excessive Collision"},
+ [MVNETA_MIB_TX_MCAST_FRAME_IDX] = {MVNETA_MIB_TX_MCAST_FRAME, 0,
+ "tx_mcast_frame", "Multicast Frames Tx"},
+ [MVNETA_MIB_TX_BCAST_FRAME_IDX] = {MVNETA_MIB_TX_BCAST_FRAME, 0,
+ "tx_bcast_frame", "Broadcast Frames Tx"},
+ [MVNETA_MIB_TX_MAC_CTL_ERR_IDX] = {MVNETA_MIB_TX_MAC_CTL_ERR, 0,
+ "tx_mac_ctl_err", "Unknown MAC Control"},
+ [MVNETA_MIB_FC_SENT_IDX] = {MVNETA_MIB_FC_SENT, 0,
+ "fc_tx", "Flow Control Tx"},
+ [MVNETA_MIB_FC_GOOD_IDX] = {MVNETA_MIB_FC_GOOD, 0,
+ "fc_rx_good", "Good Flow Control Rx"},
+ [MVNETA_MIB_FC_BAD_IDX] = {MVNETA_MIB_FC_BAD, 0,
+ "fc_rx_bad", "Bad Flow Control Rx"},
+ [MVNETA_MIB_PKT_UNDERSIZE_IDX] = {MVNETA_MIB_PKT_UNDERSIZE, 0,
+ "pkt_undersize", "Undersized Packets Rx"},
+ [MVNETA_MIB_PKT_FRAGMENT_IDX] = {MVNETA_MIB_PKT_FRAGMENT, 0,
+ "pkt_fragment", "Fragmented Packets Rx"},
+ [MVNETA_MIB_PKT_OVERSIZE_IDX] = {MVNETA_MIB_PKT_OVERSIZE, 0,
+ "pkt_oversize", "Oversized Packets Rx"},
+ [MVNETA_MIB_PKT_JABBER_IDX] = {MVNETA_MIB_PKT_JABBER, 0,
+ "pkt_jabber", "Jabber Packets Rx"},
+ [MVNETA_MIB_MAC_RX_ERR_IDX] = {MVNETA_MIB_MAC_RX_ERR, 0,
+ "mac_rx_err", "MAC Rx Errors"},
+ [MVNETA_MIB_MAC_CRC_ERR_IDX] = {MVNETA_MIB_MAC_CRC_ERR, 0,
+ "mac_crc_err", "MAC CRC Errors"},
+ [MVNETA_MIB_MAC_COL_IDX] = {MVNETA_MIB_MAC_COL, 0,
+ "mac_collision", "MAC Collision"},
+ [MVNETA_MIB_MAC_LATE_COL_IDX] = {MVNETA_MIB_MAC_LATE_COL, 0,
+ "mac_late_collision", "MAC Late Collision"},
+};
+
+static struct resource_spec res_spec[] = {
+ { SYS_RES_MEMORY, 0, RF_ACTIVE },
+ { SYS_RES_IRQ, 0, RF_ACTIVE },
+ { -1, 0}
+};
+
+static struct {
+ driver_intr_t *handler;
+ char * description;
+} mvneta_intrs[] = {
+ { mvneta_rxtxth_intr, "MVNETA aggregated interrupt" },
+};
+
+static int
+mvneta_set_mac_address(struct mvneta_softc *sc, uint8_t *addr)
+{
+ unsigned int mac_h;
+ unsigned int mac_l;
+
+ mac_l = (addr[4] << 8) | (addr[5]);
+ mac_h = (addr[0] << 24) | (addr[1] << 16) |
+ (addr[2] << 8) | (addr[3] << 0);
+
+ MVNETA_WRITE(sc, MVNETA_MACAL, mac_l);
+ MVNETA_WRITE(sc, MVNETA_MACAH, mac_h);
+ return (0);
+}
+
+static int
+mvneta_get_mac_address(struct mvneta_softc *sc, uint8_t *addr)
+{
+ char env[128], *macstr;
+ int count, i;
+ uint32_t mac_l, mac_h;
+ uint32_t tmpmac[ETHER_ADDR_LEN];
+
+ if (device_get_unit(sc->dev) == 0)
+ strlcpy(env, "uboot.ethaddr", sizeof(env));
+ else {
+ env[sizeof(env) - 1] = 0;
+ snprintf(env, sizeof(env) - 1, "uboot.eth%daddr",
+ device_get_unit(sc->dev));
+ }
+ macstr = kern_getenv(env);
+ if (macstr != NULL) {
+ count = sscanf(macstr, "%x%*c%x%*c%x%*c%x%*c%x%*c%x",
+ &tmpmac[0], &tmpmac[1],
+ &tmpmac[2], &tmpmac[3],
+ &tmpmac[4], &tmpmac[5]);
+ if (count == ETHER_ADDR_LEN) {
+ for (i = 0; i < ETHER_ADDR_LEN; i++)
+ addr[i] = tmpmac[i];
+ }
+ freeenv(macstr);
+ return (0);
+ }
+
+#ifdef FDT
+ if (mvneta_fdt_mac_address(sc, addr) == 0)
+ return (0);
+#endif
+ /*
+ * Fall back -- use the currently programmed address.
+ */
+ mac_l = MVNETA_READ(sc, MVNETA_MACAL);
+ mac_h = MVNETA_READ(sc, MVNETA_MACAH);
+ if (mac_l == 0 && mac_h == 0) {
+ /*
+ * Generate pseudo-random MAC.
+ * Set lower part to random number | unit number.
+ */
+ mac_l = arc4random() & ~0xff;
+ mac_l |= device_get_unit(sc->dev) & 0xff;
+ mac_h = arc4random();
+ mac_h &= ~(3 << 24); /* Clear multicast and LAA bits */
+ if (bootverbose) {
+ device_printf(sc->dev,
+ "Could not acquire MAC address. "
+ "Using randomized one.\n");
+ }
+ }
+
+ addr[0] = (mac_h & 0xff000000) >> 24;
+ addr[1] = (mac_h & 0x00ff0000) >> 16;
+ addr[2] = (mac_h & 0x0000ff00) >> 8;
+ addr[3] = (mac_h & 0x000000ff);
+ addr[4] = (mac_l & 0x0000ff00) >> 8;
+ addr[5] = (mac_l & 0x000000ff);
+ return (0);
+}
+
+STATIC boolean_t
+mvneta_has_switch(device_t self)
+{
+ phandle_t node, switch_node, switch_eth, switch_eth_handle;
+
+ node = ofw_bus_get_node(self);
+ switch_node =
+ ofw_bus_find_compatible(OF_finddevice("/"), "marvell,dsa");
+ switch_eth = 0;
+
+ OF_getencprop(switch_node, "dsa,ethernet",
+ (void*)&switch_eth_handle, sizeof(switch_eth_handle));
+
+ if (switch_eth_handle > 0)
+ switch_eth = OF_node_from_xref(switch_eth_handle);
+
+ /* Return true if dsa,ethernet cell points to us */
+ return (node == switch_eth);
+}
+
+STATIC int
+mvneta_dma_create(struct mvneta_softc *sc)
+{
+ size_t maxsize, maxsegsz;
+ size_t q;
+ int error;
+
+ /*
+ * Create Tx DMA
+ */
+ maxsize = maxsegsz = sizeof(struct mvneta_tx_desc) * MVNETA_TX_RING_CNT;
+
+ error = bus_dma_tag_create(
+ bus_get_dma_tag(sc->dev), /* parent */
+ 16, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filtfunc, filtfuncarg */
+ maxsize, /* maxsize */
+ 1, /* nsegments */
+ maxsegsz, /* maxsegsz */
+ BUS_DMA_COHERENT, /* flags */
+ NULL, NULL, /* lockfunc, lockfuncarg */
+ &sc->tx_dtag); /* dmat */
+ if (error != 0) {
+ device_printf(sc->dev,
+ "Failed to create DMA tag for Tx descriptors.\n");
+ goto fail;
+ }
+ error = bus_dma_tag_create(
+ bus_get_dma_tag(sc->dev), /* parent */
+ 1, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filtfunc, filtfuncarg */
+ MVNETA_PACKET_SIZE, /* maxsize */
+ MVNETA_TX_SEGLIMIT, /* nsegments */
+ MVNETA_PACKET_SIZE, /* maxsegsz */
+ BUS_DMA_COHERENT | BUS_DMA_ALLOCNOW, /* flags */
+ NULL, NULL, /* lockfunc, lockfuncarg */
+ &sc->txmbuf_dtag);
+ if (error != 0) {
+ device_printf(sc->dev,
+ "Failed to create DMA tag for Tx mbufs.\n");
+ goto fail;
+ }
+
+ for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
+ error = mvneta_ring_alloc_tx_queue(sc, q);
+ if (error != 0) {
+ device_printf(sc->dev,
+ "Failed to allocate DMA safe memory for TxQ: %d\n", q);
+ goto fail;
+ }
+ }
+
+ /*
+ * Create Rx DMA.
+ */
+ /* Create tag for Rx descripors */
+ error = bus_dma_tag_create(
+ bus_get_dma_tag(sc->dev), /* parent */
+ 32, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filtfunc, filtfuncarg */
+ sizeof(struct mvneta_rx_desc) * MVNETA_RX_RING_CNT, /* maxsize */
+ 1, /* nsegments */
+ sizeof(struct mvneta_rx_desc) * MVNETA_RX_RING_CNT, /* maxsegsz */
+ BUS_DMA_COHERENT, /* flags */
+ NULL, NULL, /* lockfunc, lockfuncarg */
+ &sc->rx_dtag); /* dmat */
+ if (error != 0) {
+ device_printf(sc->dev,
+ "Failed to create DMA tag for Rx descriptors.\n");
+ goto fail;
+ }
+
+ /* Create tag for Rx buffers */
+ error = bus_dma_tag_create(
+ bus_get_dma_tag(sc->dev), /* parent */
+ 32, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filtfunc, filtfuncarg */
+ MVNETA_PACKET_SIZE, 1, /* maxsize, nsegments */
+ MVNETA_PACKET_SIZE, /* maxsegsz */
+ BUS_DMA_COHERENT, /* flags */
+ NULL, NULL, /* lockfunc, lockfuncarg */
+ &sc->rxbuf_dtag); /* dmat */
+ if (error != 0) {
+ device_printf(sc->dev,
+ "Failed to create DMA tag for Rx buffers.\n");
+ goto fail;
+ }
+
+ for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
+ if (mvneta_ring_alloc_rx_queue(sc, q) != 0) {
+ device_printf(sc->dev,
+ "Failed to allocate DMA safe memory for RxQ: %d\n", q);
+ goto fail;
+ }
+ }
+
+ return (0);
+fail:
+ mvneta_detach(sc->dev);
+
+ return (error);
+}
+
+/* ARGSUSED */
+int
+mvneta_attach(device_t self)
+{
+ struct mvneta_softc *sc;
+ struct ifnet *ifp;
+ device_t child;
+ int ifm_target;
+ int q, error;
+ uint32_t reg;
+
+ sc = device_get_softc(self);
+ sc->dev = self;
+
+ mtx_init(&sc->mtx, "mvneta_sc", NULL, MTX_DEF);
+
+ error = bus_alloc_resources(self, res_spec, sc->res);
+ if (error) {
+ device_printf(self, "could not allocate resources\n");
+ return (ENXIO);
+ }
+
+ sc->version = MVNETA_READ(sc, MVNETA_PV);
+ device_printf(self, "version is %x\n", sc->version);
+ callout_init(&sc->tick_ch, 0);
+
+ /*
+ * make sure DMA engines are in reset state
+ */
+ MVNETA_WRITE(sc, MVNETA_PRXINIT, 0x00000001);
+ MVNETA_WRITE(sc, MVNETA_PTXINIT, 0x00000001);
+
+ /*
+ * Disable port snoop for buffers and descriptors
+ * to avoid L2 caching of both without DRAM copy.
+ * Obtain coherency settings from the first MBUS
+ * window attribute.
+ */
+ if ((MVNETA_READ(sc, MV_WIN_NETA_BASE(0)) & IO_WIN_COH_ATTR_MASK) == 0) {
+ reg = MVNETA_READ(sc, MVNETA_PSNPCFG);
+ reg &= ~MVNETA_PSNPCFG_DESCSNP_MASK;
+ reg &= ~MVNETA_PSNPCFG_BUFSNP_MASK;
+ MVNETA_WRITE(sc, MVNETA_PSNPCFG, reg);
+ }
+
+ /*
+ * MAC address
+ */
+ if (mvneta_get_mac_address(sc, sc->enaddr)) {
+ device_printf(self, "no mac address.\n");
+ return (ENXIO);
+ }
+ mvneta_set_mac_address(sc, sc->enaddr);
+
+ mvneta_disable_intr(sc);
+
+ /* Allocate network interface */
+ ifp = sc->ifp = if_alloc(IFT_ETHER);
+ if (ifp == NULL) {
+ device_printf(self, "if_alloc() failed\n");
+ mvneta_detach(self);
+ return (ENOMEM);
+ }
+ if_initname(ifp, device_get_name(self), device_get_unit(self));
+
+ /*
+ * We can support 802.1Q VLAN-sized frames and jumbo
+ * Ethernet frames.
+ */
+ ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_JUMBO_MTU;
+
+ ifp->if_softc = sc;
+ ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+#ifdef MVNETA_MULTIQUEUE
+ ifp->if_transmit = mvneta_transmit;
+ ifp->if_qflush = mvneta_qflush;
+#else /* !MVNETA_MULTIQUEUE */
+ ifp->if_start = mvneta_start;
+ ifp->if_snd.ifq_drv_maxlen = 0;
+ IFQ_SET_MAXLEN(&ifp->if_snd, MVNETA_TX_RING_CNT - 1);
+ IFQ_SET_READY(&ifp->if_snd);
+#endif
+ ifp->if_init = mvneta_init;
+ ifp->if_ioctl = mvneta_ioctl;
+
+ /*
+ * We can do IPv4/TCPv4/UDPv4/TCPv6/UDPv6 checksums in hardware.
+ */
+ ifp->if_capabilities |= IFCAP_HWCSUM;
+
+ /*
+ * As VLAN hardware tagging is not supported
+ * but is necessary to perform VLAN hardware checksums,
+ * it is done in the driver
+ */
+ ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
+
+ /*
+ * Currently IPv6 HW checksum is broken, so make sure it is disabled.
+ */
+ ifp->if_capabilities &= ~IFCAP_HWCSUM_IPV6;
+ ifp->if_capenable = ifp->if_capabilities;
+
+ /*
+ * Disabled option(s):
+ * - Support for Large Receive Offload
+ */
+ ifp->if_capabilities |= IFCAP_LRO;
+
+ ifp->if_hwassist = CSUM_IP | CSUM_TCP | CSUM_UDP;
+
+ /*
+ * Device DMA Buffer allocation.
+ * Handles resource deallocation in case of failure.
+ */
+ error = mvneta_dma_create(sc);
+ if (error != 0) {
+ mvneta_detach(self);
+ return (error);
+ }
+
+ /* Initialize queues */
+ for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
+ error = mvneta_ring_init_tx_queue(sc, q);
+ if (error != 0) {
+ mvneta_detach(self);
+ return (error);
+ }
+ }
+
+ for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
+ error = mvneta_ring_init_rx_queue(sc, q);
+ if (error != 0) {
+ mvneta_detach(self);
+ return (error);
+ }
+ }
+
+ ether_ifattach(ifp, sc->enaddr);
+
+ /*
+ * Enable DMA engines and Initialize Device Registers.
+ */
+ MVNETA_WRITE(sc, MVNETA_PRXINIT, 0x00000000);
+ MVNETA_WRITE(sc, MVNETA_PTXINIT, 0x00000000);
+ MVNETA_WRITE(sc, MVNETA_PACC, MVNETA_PACC_ACCELERATIONMODE_EDM);
+ mvneta_sc_lock(sc);
+ mvneta_filter_setup(sc);
+ mvneta_sc_unlock(sc);
+ mvneta_initreg(ifp);
+
+ /*
+ * Now MAC is working, setup MII.
+ */
+ if (mii_init == 0) {
+ /*
+ * MII bus is shared by all MACs and all PHYs in SoC.
+ * serializing the bus access should be safe.
+ */
+ mtx_init(&mii_mutex, "mvneta_mii", NULL, MTX_DEF);
+ mii_init = 1;
+ }
+
+ /* Attach PHY(s) */
+ if ((sc->phy_addr != MII_PHY_ANY) && (!sc->use_inband_status)) {
+ error = mii_attach(self, &sc->miibus, ifp, mvneta_mediachange,
+ mvneta_mediastatus, BMSR_DEFCAPMASK, sc->phy_addr,
+ MII_OFFSET_ANY, 0);
+ if (error != 0) {
+ if (bootverbose) {
+ device_printf(self,
+ "MII attach failed, error: %d\n", error);
+ }
+ ether_ifdetach(sc->ifp);
+ mvneta_detach(self);
+ return (error);
+ }
+ sc->mii = device_get_softc(sc->miibus);
+ sc->phy_attached = 1;
+
+ /* Disable auto-negotiation in MAC - rely on PHY layer */
+ mvneta_update_autoneg(sc, FALSE);
+ } else if (sc->use_inband_status == TRUE) {
+ /* In-band link status */
+ ifmedia_init(&sc->mvneta_ifmedia, 0, mvneta_mediachange,
+ mvneta_mediastatus);
+
+ /* Configure media */
+ ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_1000_T | IFM_FDX,
+ 0, NULL);
+ ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_100_TX, 0, NULL);
+ ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX,
+ 0, NULL);
+ ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_10_T, 0, NULL);
+ ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_10_T | IFM_FDX,
+ 0, NULL);
+ ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
+ ifmedia_set(&sc->mvneta_ifmedia, IFM_ETHER | IFM_AUTO);
+
+ /* Enable auto-negotiation */
+ mvneta_update_autoneg(sc, TRUE);
+
+ mvneta_sc_lock(sc);
+ if (MVNETA_IS_LINKUP(sc))
+ mvneta_linkup(sc);
+ else
+ mvneta_linkdown(sc);
+ mvneta_sc_unlock(sc);
+
+ } else {
+ /* Fixed-link, use predefined values */
+ mvneta_update_autoneg(sc, FALSE);
+ ifmedia_init(&sc->mvneta_ifmedia, 0, mvneta_mediachange,
+ mvneta_mediastatus);
+
+ ifm_target = IFM_ETHER;
+ switch (sc->phy_speed) {
+ case 2500:
+ if (sc->phy_mode != MVNETA_PHY_SGMII &&
+ sc->phy_mode != MVNETA_PHY_QSGMII) {
+ device_printf(self,
+ "2.5G speed can work only in (Q)SGMII mode\n");
+ ether_ifdetach(sc->ifp);
+ mvneta_detach(self);
+ return (ENXIO);
+ }
+ ifm_target |= IFM_2500_KX; /* IFM_2500_T */
+ break;
+ case 1000:
+ ifm_target |= IFM_1000_T;
+ break;
+ case 100:
+ ifm_target |= IFM_100_TX;
+ break;
+ case 10:
+ ifm_target |= IFM_10_T;
+ break;
+ default:
+ ether_ifdetach(sc->ifp);
+ mvneta_detach(self);
+ return (ENXIO);
+ }
+
+ if (sc->phy_fdx)
+ ifm_target |= IFM_FDX;
+ else
+ ifm_target |= IFM_HDX;
+
+ ifmedia_add(&sc->mvneta_ifmedia, ifm_target, 0, NULL);
+ ifmedia_set(&sc->mvneta_ifmedia, ifm_target);
+ if_link_state_change(sc->ifp, LINK_STATE_UP);
+
+ if (mvneta_has_switch(self)) {
+ child = device_add_child(sc->dev, "mdio", -1);
+ if (child == NULL) {
+ ether_ifdetach(sc->ifp);
+ mvneta_detach(self);
+ return (ENXIO);
+ }
+ bus_generic_attach(sc->dev);
+ bus_generic_attach(child);
+ }
+
+ /* Configure MAC media */
+ mvneta_update_media(sc, ifm_target);
+ }
+
+ sysctl_mvneta_init(sc);
+
+ callout_reset(&sc->tick_ch, 0, mvneta_tick, sc);
+
+ error = bus_setup_intr(self, sc->res[1],
+ INTR_TYPE_NET | INTR_MPSAFE, NULL, mvneta_intrs[0].handler, sc,
+ &sc->ih_cookie[0]);
+ if (error) {
+ device_printf(self, "could not setup %s\n",
+ mvneta_intrs[0].description);
+ ether_ifdetach(sc->ifp);
+ mvneta_detach(self);
+ return (error);
+ }
+
+ return (0);
+}
+
+STATIC int
+mvneta_detach(device_t dev)
+{
+ struct mvneta_softc *sc;
+ struct ifnet *ifp;
+ int q;
+
+ sc = device_get_softc(dev);
+ ifp = sc->ifp;
+
+ mvneta_stop(sc);
+ /* Detach network interface */
+ if (sc->ifp)
+ if_free(sc->ifp);
+
+ for (q = 0; q < MVNETA_RX_QNUM_MAX; q++)
+ mvneta_ring_dealloc_rx_queue(sc, q);
+ for (q = 0; q < MVNETA_TX_QNUM_MAX; q++)
+ mvneta_ring_dealloc_tx_queue(sc, q);
+
+ if (sc->tx_dtag != NULL)
+ bus_dma_tag_destroy(sc->tx_dtag);
+ if (sc->rx_dtag != NULL)
+ bus_dma_tag_destroy(sc->rx_dtag);
+ if (sc->txmbuf_dtag != NULL)
+ bus_dma_tag_destroy(sc->txmbuf_dtag);
+
+ bus_release_resources(dev, res_spec, sc->res);
+ return (0);
+}
+
+/*
+ * MII
+ */
+STATIC int
+mvneta_miibus_readreg(device_t dev, int phy, int reg)
+{
+ struct mvneta_softc *sc;
+ struct ifnet *ifp;
+ uint32_t smi, val;
+ int i;
+
+ sc = device_get_softc(dev);
+ ifp = sc->ifp;
+
+ mtx_lock(&mii_mutex);
+
+ for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) {
+ if ((MVNETA_READ(sc, MVNETA_SMI) & MVNETA_SMI_BUSY) == 0)
+ break;
+ DELAY(1);
+ }
+ if (i == MVNETA_PHY_TIMEOUT) {
+ if_printf(ifp, "SMI busy timeout\n");
+ mtx_unlock(&mii_mutex);
+ return (-1);
+ }
+
+ smi = MVNETA_SMI_PHYAD(phy) |
+ MVNETA_SMI_REGAD(reg) | MVNETA_SMI_OPCODE_READ;
+ MVNETA_WRITE(sc, MVNETA_SMI, smi);
+
+ for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) {
+ if ((MVNETA_READ(sc, MVNETA_SMI) & MVNETA_SMI_BUSY) == 0)
+ break;
+ DELAY(1);
+ }
+
+ if (i == MVNETA_PHY_TIMEOUT) {
+ if_printf(ifp, "SMI busy timeout\n");
+ mtx_unlock(&mii_mutex);
+ return (-1);
+ }
+ for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) {
+ smi = MVNETA_READ(sc, MVNETA_SMI);
+ if (smi & MVNETA_SMI_READVALID)
+ break;
+ DELAY(1);
+ }
+
+ if (i == MVNETA_PHY_TIMEOUT) {
+ if_printf(ifp, "SMI busy timeout\n");
+ mtx_unlock(&mii_mutex);
+ return (-1);
+ }
+
+ mtx_unlock(&mii_mutex);
+
+#ifdef MVNETA_KTR
+ CTR3(KTR_SPARE2, "%s i=%d, timeout=%d\n", ifp->if_xname, i,
+ MVNETA_PHY_TIMEOUT);
+#endif
+
+ val = smi & MVNETA_SMI_DATA_MASK;
+
+#ifdef MVNETA_KTR
+ CTR4(KTR_SPARE2, "%s phy=%d, reg=%#x, val=%#x\n", ifp->if_xname, phy,
+ reg, val);
+#endif
+ return (val);
+}
+
+STATIC int
+mvneta_miibus_writereg(device_t dev, int phy, int reg, int val)
+{
+ struct mvneta_softc *sc;
+ struct ifnet *ifp;
+ uint32_t smi;
+ int i;
+
+ sc = device_get_softc(dev);
+ ifp = sc->ifp;
+#ifdef MVNETA_KTR
+ CTR4(KTR_SPARE2, "%s phy=%d, reg=%#x, val=%#x\n", ifp->if_xname,
+ phy, reg, val);
+#endif
+
+ mtx_lock(&mii_mutex);
+
+ for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) {
+ if ((MVNETA_READ(sc, MVNETA_SMI) & MVNETA_SMI_BUSY) == 0)
+ break;
+ DELAY(1);
+ }
+ if (i == MVNETA_PHY_TIMEOUT) {
+ if_printf(ifp, "SMI busy timeout\n");
+ mtx_unlock(&mii_mutex);
+ return (0);
+ }
+
+ smi = MVNETA_SMI_PHYAD(phy) | MVNETA_SMI_REGAD(reg) |
+ MVNETA_SMI_OPCODE_WRITE | (val & MVNETA_SMI_DATA_MASK);
+ MVNETA_WRITE(sc, MVNETA_SMI, smi);
+
+ for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) {
+ if ((MVNETA_READ(sc, MVNETA_SMI) & MVNETA_SMI_BUSY) == 0)
+ break;
+ DELAY(1);
+ }
+
+ mtx_unlock(&mii_mutex);
+
+ if (i == MVNETA_PHY_TIMEOUT)
+ if_printf(ifp, "phy write timed out\n");
+
+ return (0);
+}
+
+STATIC void
+mvneta_portup(struct mvneta_softc *sc)
+{
+ int q;
+
+ for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
+ mvneta_rx_lockq(sc, q);
+ mvneta_rx_queue_enable(sc->ifp, q);
+ mvneta_rx_unlockq(sc, q);
+ }
+
+ for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
+ mvneta_tx_lockq(sc, q);
+ mvneta_tx_queue_enable(sc->ifp, q);
+ mvneta_tx_unlockq(sc, q);
+ }
+
+}
+
+STATIC void
+mvneta_portdown(struct mvneta_softc *sc)
+{
+ struct mvneta_rx_ring *rx;
+ struct mvneta_tx_ring *tx;
+ int q, cnt;
+ uint32_t reg;
+
+ for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
+ rx = MVNETA_RX_RING(sc, q);
+ mvneta_rx_lockq(sc, q);
+ rx->queue_status = MVNETA_QUEUE_DISABLED;
+ mvneta_rx_unlockq(sc, q);
+ }
+
+ for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
+ tx = MVNETA_TX_RING(sc, q);
+ mvneta_tx_lockq(sc, q);
+ tx->queue_status = MVNETA_QUEUE_DISABLED;
+ mvneta_tx_unlockq(sc, q);
+ }
+
+ /* Wait for all Rx activity to terminate. */
+ reg = MVNETA_READ(sc, MVNETA_RQC) & MVNETA_RQC_EN_MASK;
+ reg = MVNETA_RQC_DIS(reg);
+ MVNETA_WRITE(sc, MVNETA_RQC, reg);
+ cnt = 0;
+ do {
+ if (cnt >= RX_DISABLE_TIMEOUT) {
+ if_printf(sc->ifp,
+ "timeout for RX stopped. rqc 0x%x\n", reg);
+ break;
+ }
+ cnt++;
+ reg = MVNETA_READ(sc, MVNETA_RQC);
+ } while ((reg & MVNETA_RQC_EN_MASK) != 0);
+
+ /* Wait for all Tx activity to terminate. */
+ reg = MVNETA_READ(sc, MVNETA_PIE);
+ reg &= ~MVNETA_PIE_TXPKTINTRPTENB_MASK;
+ MVNETA_WRITE(sc, MVNETA_PIE, reg);
+
+ reg = MVNETA_READ(sc, MVNETA_PRXTXTIM);
+ reg &= ~MVNETA_PRXTXTI_TBTCQ_MASK;
+ MVNETA_WRITE(sc, MVNETA_PRXTXTIM, reg);
+
+ reg = MVNETA_READ(sc, MVNETA_TQC) & MVNETA_TQC_EN_MASK;
+ reg = MVNETA_TQC_DIS(reg);
+ MVNETA_WRITE(sc, MVNETA_TQC, reg);
+ cnt = 0;
+ do {
+ if (cnt >= TX_DISABLE_TIMEOUT) {
+ if_printf(sc->ifp,
+ "timeout for TX stopped. tqc 0x%x\n", reg);
+ break;
+ }
+ cnt++;
+ reg = MVNETA_READ(sc, MVNETA_TQC);
+ } while ((reg & MVNETA_TQC_EN_MASK) != 0);
+
+ /* Wait for all Tx FIFO is empty */
+ cnt = 0;
+ do {
+ if (cnt >= TX_FIFO_EMPTY_TIMEOUT) {
+ if_printf(sc->ifp,
+ "timeout for TX FIFO drained. ps0 0x%x\n", reg);
+ break;
+ }
+ cnt++;
+ reg = MVNETA_READ(sc, MVNETA_PS0);
+ } while (((reg & MVNETA_PS0_TXFIFOEMP) == 0) &&
+ ((reg & MVNETA_PS0_TXINPROG) != 0));
+}
+
+/*
+ * Device Register Initialization
+ * reset device registers to device driver default value.
+ * the device is not enabled here.
+ */
+STATIC int
+mvneta_initreg(struct ifnet *ifp)
+{
+ struct mvneta_softc *sc;
+ int q, i;
+ uint32_t reg;
+
+ sc = ifp->if_softc;
+#ifdef MVNETA_KTR
+ CTR1(KTR_SPARE2, "%s initializing device register", ifp->if_xname);
+#endif
+
+ /* Disable Legacy WRR, Disable EJP, Release from reset. */
+ MVNETA_WRITE(sc, MVNETA_TQC_1, 0);
+ /* Enable mbus retry. */
+ MVNETA_WRITE(sc, MVNETA_MBUS_CONF, MVNETA_MBUS_RETRY_EN);
+
+ /* Init TX/RX Queue Registers */
+ for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
+ mvneta_rx_lockq(sc, q);
+ if (mvneta_rx_queue_init(ifp, q) != 0) {
+ device_printf(sc->dev,
+ "initialization failed: cannot initialize queue\n");
+ mvneta_rx_unlockq(sc, q);
+ return (ENOBUFS);
+ }
+ mvneta_rx_unlockq(sc, q);
+ }
+ for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
+ mvneta_tx_lockq(sc, q);
+ if (mvneta_tx_queue_init(ifp, q) != 0) {
+ device_printf(sc->dev,
+ "initialization failed: cannot initialize queue\n");
+ mvneta_tx_unlockq(sc, q);
+ return (ENOBUFS);
+ }
+ mvneta_tx_unlockq(sc, q);
+ }
+
+ /*
+ * Ethernet Unit Control - disable automatic PHY management by HW.
+ * In case the port uses SMI-controlled PHY, poll its status with
+ * mii_tick() and update MAC settings accordingly.
+ */
+ reg = MVNETA_READ(sc, MVNETA_EUC);
+ reg &= ~MVNETA_EUC_POLLING;
+ MVNETA_WRITE(sc, MVNETA_EUC, reg);
+
+ /* EEE: Low Power Idle */
+ reg = MVNETA_LPIC0_LILIMIT(MVNETA_LPI_LI);
+ reg |= MVNETA_LPIC0_TSLIMIT(MVNETA_LPI_TS);
+ MVNETA_WRITE(sc, MVNETA_LPIC0, reg);
+
+ reg = MVNETA_LPIC1_TWLIMIT(MVNETA_LPI_TW);
+ MVNETA_WRITE(sc, MVNETA_LPIC1, reg);
+
+ reg = MVNETA_LPIC2_MUSTSET;
+ MVNETA_WRITE(sc, MVNETA_LPIC2, reg);
+
+ /* Port MAC Control set 0 */
+ reg = MVNETA_PMACC0_MUSTSET; /* must write 0x1 */
+ reg &= ~MVNETA_PMACC0_PORTEN; /* port is still disabled */
+ reg |= MVNETA_PMACC0_FRAMESIZELIMIT(MVNETA_MAX_FRAME);
+ MVNETA_WRITE(sc, MVNETA_PMACC0, reg);
+
+ /* Port MAC Control set 2 */
+ reg = MVNETA_READ(sc, MVNETA_PMACC2);
+ switch (sc->phy_mode) {
+ case MVNETA_PHY_QSGMII:
+ reg |= (MVNETA_PMACC2_PCSEN | MVNETA_PMACC2_RGMIIEN);
+ MVNETA_WRITE(sc, MVNETA_PSERDESCFG, MVNETA_PSERDESCFG_QSGMII);
+ break;
+ case MVNETA_PHY_SGMII:
+ reg |= (MVNETA_PMACC2_PCSEN | MVNETA_PMACC2_RGMIIEN);
+ MVNETA_WRITE(sc, MVNETA_PSERDESCFG, MVNETA_PSERDESCFG_SGMII);
+ break;
+ case MVNETA_PHY_RGMII:
+ case MVNETA_PHY_RGMII_ID:
+ reg |= MVNETA_PMACC2_RGMIIEN;
+ break;
+ }
+ reg |= MVNETA_PMACC2_MUSTSET;
+ reg &= ~MVNETA_PMACC2_PORTMACRESET;
+ MVNETA_WRITE(sc, MVNETA_PMACC2, reg);
+
+ /* Port Configuration Extended: enable Tx CRC generation */
+ reg = MVNETA_READ(sc, MVNETA_PXCX);
+ reg &= ~MVNETA_PXCX_TXCRCDIS;
+ MVNETA_WRITE(sc, MVNETA_PXCX, reg);
+
+ /* clear MIB counter registers(clear by read) */
+ for (i = 0; i < nitems(mvneta_mib_list); i++) {
+ if (mvneta_mib_list[i].reg64)
+ MVNETA_READ_MIB_8(sc, mvneta_mib_list[i].regnum);
+ else
+ MVNETA_READ_MIB_4(sc, mvneta_mib_list[i].regnum);
+ }
+ MVNETA_READ(sc, MVNETA_PDFC);
+ MVNETA_READ(sc, MVNETA_POFC);
+
+ /* Set SDC register except IPGINT bits */
+ reg = MVNETA_SDC_RXBSZ_16_64BITWORDS;
+ reg |= MVNETA_SDC_TXBSZ_16_64BITWORDS;
+ reg |= MVNETA_SDC_BLMR;
+ reg |= MVNETA_SDC_BLMT;
+ MVNETA_WRITE(sc, MVNETA_SDC, reg);
+
+ return (0);
+}
+
+STATIC void
+mvneta_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
+{
+
+ if (error != 0)
+ return;
+ *(bus_addr_t *)arg = segs->ds_addr;
+}
+
+STATIC int
+mvneta_ring_alloc_rx_queue(struct mvneta_softc *sc, int q)
+{
+ struct mvneta_rx_ring *rx;
+ struct mvneta_buf *rxbuf;
+ bus_dmamap_t dmap;
+ int i, error;
+
+ if (q >= MVNETA_RX_QNUM_MAX)
+ return (EINVAL);
+
+ rx = MVNETA_RX_RING(sc, q);
+ mtx_init(&rx->ring_mtx, "mvneta_rx", NULL, MTX_DEF);
+ /* Allocate DMA memory for Rx descriptors */
+ error = bus_dmamem_alloc(sc->rx_dtag,
+ (void**)&(rx->desc),
+ BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
+ &rx->desc_map);
+ if (error != 0 || rx->desc == NULL)
+ goto fail;
+ error = bus_dmamap_load(sc->rx_dtag, rx->desc_map,
+ rx->desc,
+ sizeof(struct mvneta_rx_desc) * MVNETA_RX_RING_CNT,
+ mvneta_dmamap_cb, &rx->desc_pa, BUS_DMA_NOWAIT);
+ if (error != 0)
+ goto fail;
+
+ for (i = 0; i < MVNETA_RX_RING_CNT; i++) {
+ error = bus_dmamap_create(sc->rxbuf_dtag, BUS_DMA_COHERENT,
+ &dmap);
+ if (error != 0) {
+ device_printf(sc->dev,
+ "Failed to create DMA map for Rx buffer num: %d\n", i);
+ goto fail;
+ }
+ rxbuf = &rx->rxbuf[i];
+ rxbuf->dmap = dmap;
+ rxbuf->m = NULL;
+ }
+
+ return (0);
+fail:
+ mvneta_ring_dealloc_rx_queue(sc, q);
+ device_printf(sc->dev, "DMA Ring buffer allocation failure.\n");
+ return (error);
+}
+
+STATIC int
+mvneta_ring_alloc_tx_queue(struct mvneta_softc *sc, int q)
+{
+ struct mvneta_tx_ring *tx;
+ int error;
+
+ if (q >= MVNETA_TX_QNUM_MAX)
+ return (EINVAL);
+ tx = MVNETA_TX_RING(sc, q);
+ mtx_init(&tx->ring_mtx, "mvneta_tx", NULL, MTX_DEF);
+ error = bus_dmamem_alloc(sc->tx_dtag,
+ (void**)&(tx->desc),
+ BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
+ &tx->desc_map);
+ if (error != 0 || tx->desc == NULL)
+ goto fail;
+ error = bus_dmamap_load(sc->tx_dtag, tx->desc_map,
+ tx->desc,
+ sizeof(struct mvneta_tx_desc) * MVNETA_TX_RING_CNT,
+ mvneta_dmamap_cb, &tx->desc_pa, BUS_DMA_NOWAIT);
+ if (error != 0)
+ goto fail;
+
+#ifdef MVNETA_MULTIQUEUE
+ tx->br = buf_ring_alloc(MVNETA_BUFRING_SIZE, M_DEVBUF, M_NOWAIT,
+ &tx->ring_mtx);
+ if (tx->br == NULL) {
+ device_printf(sc->dev,
+ "Could not setup buffer ring for TxQ(%d)\n", q);
+ error = ENOMEM;
+ goto fail;
+ }
+#endif
+
+ return (0);
+fail:
+ mvneta_ring_dealloc_tx_queue(sc, q);
+ device_printf(sc->dev, "DMA Ring buffer allocation failure.\n");
+ return (error);
+}
+
+STATIC void
+mvneta_ring_dealloc_tx_queue(struct mvneta_softc *sc, int q)
+{
+ struct mvneta_tx_ring *tx;
+ struct mvneta_buf *txbuf;
+ void *kva;
+ int error;
+ int i;
+
+ if (q >= MVNETA_TX_QNUM_MAX)
+ return;
+ tx = MVNETA_TX_RING(sc, q);
+
+ if (tx->taskq != NULL) {
+ /* Remove task */
+ while (taskqueue_cancel(tx->taskq, &tx->task, NULL) != 0)
+ taskqueue_drain(tx->taskq, &tx->task);
+ }
+#ifdef MVNETA_MULTIQUEUE
+ if (tx->br != NULL)
+ drbr_free(tx->br, M_DEVBUF);
+#endif
+
+ if (sc->txmbuf_dtag != NULL) {
+ if (mtx_name(&tx->ring_mtx) != NULL) {
+ /*
+ * It is assumed that maps are being loaded after mutex
+ * is initialized. Therefore we can skip unloading maps
+ * when mutex is empty.
+ */
+ mvneta_tx_lockq(sc, q);
+ mvneta_ring_flush_tx_queue(sc, q);
+ mvneta_tx_unlockq(sc, q);
+ }
+ for (i = 0; i < MVNETA_TX_RING_CNT; i++) {
+ txbuf = &tx->txbuf[i];
+ if (txbuf->dmap != NULL) {
+ error = bus_dmamap_destroy(sc->txmbuf_dtag,
+ txbuf->dmap);
+ if (error != 0) {
+ panic("%s: map busy for Tx descriptor (Q%d, %d)",
+ __func__, q, i);
+ }
+ }
+ }
+ }
+
+ if (tx->desc_pa != 0)
+ bus_dmamap_unload(sc->tx_dtag, tx->desc_map);
+
+ kva = (void *)tx->desc;
+ if (kva != NULL)
+ bus_dmamem_free(sc->tx_dtag, tx->desc, tx->desc_map);
+
+ if (mtx_name(&tx->ring_mtx) != NULL)
+ mtx_destroy(&tx->ring_mtx);
+
+ memset(tx, 0, sizeof(*tx));
+}
+
+STATIC void
+mvneta_ring_dealloc_rx_queue(struct mvneta_softc *sc, int q)
+{
+ struct mvneta_rx_ring *rx;
+ struct lro_ctrl *lro;
+ void *kva;
+
+ if (q >= MVNETA_RX_QNUM_MAX)
+ return;
+
+ rx = MVNETA_RX_RING(sc, q);
+
+ mvneta_ring_flush_rx_queue(sc, q);
+
+ if (rx->desc_pa != 0)
+ bus_dmamap_unload(sc->rx_dtag, rx->desc_map);
+
+ kva = (void *)rx->desc;
+ if (kva != NULL)
+ bus_dmamem_free(sc->rx_dtag, rx->desc, rx->desc_map);
+
+ lro = &rx->lro;
+ tcp_lro_free(lro);
+
+ if (mtx_name(&rx->ring_mtx) != NULL)
+ mtx_destroy(&rx->ring_mtx);
+
+ memset(rx, 0, sizeof(*rx));
+}
+
+STATIC int
+mvneta_ring_init_rx_queue(struct mvneta_softc *sc, int q)
+{
+ struct mvneta_rx_ring *rx;
+ struct lro_ctrl *lro;
+ int error;
+
+ if (q >= MVNETA_RX_QNUM_MAX)
+ return (0);
+
+ rx = MVNETA_RX_RING(sc, q);
+ rx->dma = rx->cpu = 0;
+ rx->queue_th_received = MVNETA_RXTH_COUNT;
+ rx->queue_th_time = (get_tclk() / 1000) / 10; /* 0.1 [ms] */
+
+ /* Initialize LRO */
+ rx->lro_enabled = FALSE;
+ if ((sc->ifp->if_capenable & IFCAP_LRO) != 0) {
+ lro = &rx->lro;
+ error = tcp_lro_init(lro);
+ if (error != 0)
+ device_printf(sc->dev, "LRO Initialization failed!\n");
+ else {
+ rx->lro_enabled = TRUE;
+ lro->ifp = sc->ifp;
+ }
+ }
+
+ return (0);
+}
+
+STATIC int
+mvneta_ring_init_tx_queue(struct mvneta_softc *sc, int q)
+{
+ struct mvneta_tx_ring *tx;
+ struct mvneta_buf *txbuf;
+ int i, error;
+
+ if (q >= MVNETA_TX_QNUM_MAX)
+ return (0);
+
+ tx = MVNETA_TX_RING(sc, q);
+
+ /* Tx handle */
+ for (i = 0; i < MVNETA_TX_RING_CNT; i++) {
+ txbuf = &tx->txbuf[i];
+ txbuf->m = NULL;
+ /* Tx handle needs DMA map for busdma_load_mbuf() */
+ error = bus_dmamap_create(sc->txmbuf_dtag, BUS_DMA_COHERENT,
+ &txbuf->dmap);
+ if (error != 0) {
+ device_printf(sc->dev,
+ "can't create dma map (tx ring %d)\n", i);
+ return (error);
+ }
+ }
+ tx->dma = tx->cpu = 0;
+ tx->used = 0;
+ tx->drv_error = 0;
+ tx->queue_status = MVNETA_QUEUE_DISABLED;
+ tx->queue_hung = FALSE;
+
+ tx->ifp = sc->ifp;
+ tx->qidx = q;
+ TASK_INIT(&tx->task, 0, mvneta_tx_task, tx);
+ tx->taskq = taskqueue_create_fast("mvneta_tx_taskq", M_WAITOK,
+ taskqueue_thread_enqueue, &tx->taskq);
+ taskqueue_start_threads(&tx->taskq, 1, PI_NET, "%s: tx_taskq(%d)",
+ device_get_nameunit(sc->dev), q);
+
+ return (0);
+}
+
+STATIC void
+mvneta_ring_flush_tx_queue(struct mvneta_softc *sc, int q)
+{
+ struct mvneta_tx_ring *tx;
+ struct mvneta_buf *txbuf;
+ int i;
+
+ tx = MVNETA_TX_RING(sc, q);
+ KASSERT_TX_MTX(sc, q);
+
+ /* Tx handle */
+ for (i = 0; i < MVNETA_TX_RING_CNT; i++) {
+ txbuf = &tx->txbuf[i];
+ bus_dmamap_unload(sc->txmbuf_dtag, txbuf->dmap);
+ if (txbuf->m != NULL) {
+ m_freem(txbuf->m);
+ txbuf->m = NULL;
+ }
+ }
+ tx->dma = tx->cpu = 0;
+ tx->used = 0;
+}
+
+STATIC void
+mvneta_ring_flush_rx_queue(struct mvneta_softc *sc, int q)
+{
+ struct mvneta_rx_ring *rx;
+ struct mvneta_buf *rxbuf;
+ int i;
+
+ rx = MVNETA_RX_RING(sc, q);
+ KASSERT_RX_MTX(sc, q);
+
+ /* Rx handle */
+ for (i = 0; i < MVNETA_RX_RING_CNT; i++) {
+ rxbuf = &rx->rxbuf[i];
+ mvneta_rx_buf_free(sc, rxbuf);
+ }
+ rx->dma = rx->cpu = 0;
+}
+
+/*
+ * Rx/Tx Queue Control
+ */
+STATIC int
+mvneta_rx_queue_init(struct ifnet *ifp, int q)
+{
+ struct mvneta_softc *sc;
+ struct mvneta_rx_ring *rx;
+ uint32_t reg;
+
+ sc = ifp->if_softc;
+ KASSERT_RX_MTX(sc, q);
+ rx = MVNETA_RX_RING(sc, q);
+ DASSERT(rx->desc_pa != 0);
+
+ /* descriptor address */
+ MVNETA_WRITE(sc, MVNETA_PRXDQA(q), rx->desc_pa);
+
+ /* Rx buffer size and descriptor ring size */
+ reg = MVNETA_PRXDQS_BUFFERSIZE(MVNETA_PACKET_SIZE >> 3);
+ reg |= MVNETA_PRXDQS_DESCRIPTORSQUEUESIZE(MVNETA_RX_RING_CNT);
+ MVNETA_WRITE(sc, MVNETA_PRXDQS(q), reg);
+#ifdef MVNETA_KTR
+ CTR3(KTR_SPARE2, "%s PRXDQS(%d): %#x", ifp->if_xname, q,
+ MVNETA_READ(sc, MVNETA_PRXDQS(q)));
+#endif
+ /* Rx packet offset address */
+ reg = MVNETA_PRXC_PACKETOFFSET(MVNETA_PACKET_OFFSET >> 3);
+ MVNETA_WRITE(sc, MVNETA_PRXC(q), reg);
+#ifdef MVNETA_KTR
+ CTR3(KTR_SPARE2, "%s PRXC(%d): %#x", ifp->if_xname, q,
+ MVNETA_READ(sc, MVNETA_PRXC(q)));
+#endif
+
+ /* if DMA is not working, register is not updated */
+ DASSERT(MVNETA_READ(sc, MVNETA_PRXDQA(q)) == rx->desc_pa);
+ return (0);
+}
+
+STATIC int
+mvneta_tx_queue_init(struct ifnet *ifp, int q)
+{
+ struct mvneta_softc *sc;
+ struct mvneta_tx_ring *tx;
+ uint32_t reg;
+
+ sc = ifp->if_softc;
+ KASSERT_TX_MTX(sc, q);
+ tx = MVNETA_TX_RING(sc, q);
+ DASSERT(tx->desc_pa != 0);
+
+ /* descriptor address */
+ MVNETA_WRITE(sc, MVNETA_PTXDQA(q), tx->desc_pa);
+
+ /* descriptor ring size */
+ reg = MVNETA_PTXDQS_DQS(MVNETA_TX_RING_CNT);
+ MVNETA_WRITE(sc, MVNETA_PTXDQS(q), reg);
+
+ /* if DMA is not working, register is not updated */
+ DASSERT(MVNETA_READ(sc, MVNETA_PTXDQA(q)) == tx->desc_pa);
+ return (0);
+}
+
+STATIC int
+mvneta_rx_queue_enable(struct ifnet *ifp, int q)
+{
+ struct mvneta_softc *sc;
+ struct mvneta_rx_ring *rx;
+ uint32_t reg;
+
+ sc = ifp->if_softc;
+ rx = MVNETA_RX_RING(sc, q);
+ KASSERT_RX_MTX(sc, q);
+
+ /* Set Rx interrupt threshold */
+ reg = MVNETA_PRXDQTH_ODT(rx->queue_th_received);
+ MVNETA_WRITE(sc, MVNETA_PRXDQTH(q), reg);
+
+ reg = MVNETA_PRXITTH_RITT(rx->queue_th_time);
+ MVNETA_WRITE(sc, MVNETA_PRXITTH(q), reg);
+
+ /* Unmask RXTX_TH Intr. */
+ reg = MVNETA_READ(sc, MVNETA_PRXTXTIM);
+ reg |= MVNETA_PRXTXTI_RBICTAPQ(q); /* Rx Buffer Interrupt Coalese */
+ MVNETA_WRITE(sc, MVNETA_PRXTXTIM, reg);
+
+ /* Enable Rx queue */
+ reg = MVNETA_READ(sc, MVNETA_RQC) & MVNETA_RQC_EN_MASK;
+ reg |= MVNETA_RQC_ENQ(q);
+ MVNETA_WRITE(sc, MVNETA_RQC, reg);
+
+ rx->queue_status = MVNETA_QUEUE_WORKING;
+ return (0);
+}
+
+STATIC int
+mvneta_tx_queue_enable(struct ifnet *ifp, int q)
+{
+ struct mvneta_softc *sc;
+ struct mvneta_tx_ring *tx;
+
+ sc = ifp->if_softc;
+ tx = MVNETA_TX_RING(sc, q);
+ KASSERT_TX_MTX(sc, q);
+
+ /* Enable Tx queue */
+ MVNETA_WRITE(sc, MVNETA_TQC, MVNETA_TQC_ENQ(q));
+
+ tx->queue_status = MVNETA_QUEUE_IDLE;
+ tx->queue_hung = FALSE;
+ return (0);
+}
+
+STATIC __inline void
+mvneta_rx_lockq(struct mvneta_softc *sc, int q)
+{
+
+ DASSERT(q >= 0);
+ DASSERT(q < MVNETA_RX_QNUM_MAX);
+ mtx_lock(&sc->rx_ring[q].ring_mtx);
+}
+
+STATIC __inline void
+mvneta_rx_unlockq(struct mvneta_softc *sc, int q)
+{
+
+ DASSERT(q >= 0);
+ DASSERT(q < MVNETA_RX_QNUM_MAX);
+ mtx_unlock(&sc->rx_ring[q].ring_mtx);
+}
+
+STATIC __inline int __unused
+mvneta_tx_trylockq(struct mvneta_softc *sc, int q)
+{
+
+ DASSERT(q >= 0);
+ DASSERT(q < MVNETA_TX_QNUM_MAX);
+ return (mtx_trylock(&sc->tx_ring[q].ring_mtx));
+}
+
+STATIC __inline void
+mvneta_tx_lockq(struct mvneta_softc *sc, int q)
+{
+
+ DASSERT(q >= 0);
+ DASSERT(q < MVNETA_TX_QNUM_MAX);
+ mtx_lock(&sc->tx_ring[q].ring_mtx);
+}
+
+STATIC __inline void
+mvneta_tx_unlockq(struct mvneta_softc *sc, int q)
+{
+
+ DASSERT(q >= 0);
+ DASSERT(q < MVNETA_TX_QNUM_MAX);
+ mtx_unlock(&sc->tx_ring[q].ring_mtx);
+}
+
+/*
+ * Interrupt Handlers
+ */
+STATIC void
+mvneta_disable_intr(struct mvneta_softc *sc)
+{
+
+ MVNETA_WRITE(sc, MVNETA_EUIM, 0);
+ MVNETA_WRITE(sc, MVNETA_EUIC, 0);
+ MVNETA_WRITE(sc, MVNETA_PRXTXTIM, 0);
+ MVNETA_WRITE(sc, MVNETA_PRXTXTIC, 0);
+ MVNETA_WRITE(sc, MVNETA_PRXTXIM, 0);
+ MVNETA_WRITE(sc, MVNETA_PRXTXIC, 0);
+ MVNETA_WRITE(sc, MVNETA_PMIM, 0);
+ MVNETA_WRITE(sc, MVNETA_PMIC, 0);
+ MVNETA_WRITE(sc, MVNETA_PIE, 0);
+}
+
+STATIC void
+mvneta_enable_intr(struct mvneta_softc *sc)
+{
+ uint32_t reg;
+
+ /* Enable Summary Bit to check all interrupt cause. */
+ reg = MVNETA_READ(sc, MVNETA_PRXTXTIM);
+ reg |= MVNETA_PRXTXTI_PMISCICSUMMARY;
+ MVNETA_WRITE(sc, MVNETA_PRXTXTIM, reg);
+
+ if (sc->use_inband_status) {
+ /* Enable Port MISC Intr. (via RXTX_TH_Summary bit) */
+ MVNETA_WRITE(sc, MVNETA_PMIM, MVNETA_PMI_PHYSTATUSCHNG |
+ MVNETA_PMI_LINKCHANGE | MVNETA_PMI_PSCSYNCCHANGE);
+ }
+
+ /* Enable All Queue Interrupt */
+ reg = MVNETA_READ(sc, MVNETA_PIE);
+ reg |= MVNETA_PIE_RXPKTINTRPTENB_MASK;
+ reg |= MVNETA_PIE_TXPKTINTRPTENB_MASK;
+ MVNETA_WRITE(sc, MVNETA_PIE, reg);
+}
+
+STATIC void
+mvneta_rxtxth_intr(void *arg)
+{
+ struct mvneta_softc *sc;
+ struct ifnet *ifp;
+ uint32_t ic, queues;
+
+ sc = arg;
+ ifp = sc->ifp;
+#ifdef MVNETA_KTR
+ CTR1(KTR_SPARE2, "%s got RXTX_TH_Intr", ifp->if_xname);
+#endif
+ ic = MVNETA_READ(sc, MVNETA_PRXTXTIC);
+ if (ic == 0)
+ return;
+ MVNETA_WRITE(sc, MVNETA_PRXTXTIC, ~ic);
+
+ /* Ack maintance interrupt first */
+ if (__predict_false((ic & MVNETA_PRXTXTI_PMISCICSUMMARY) &&
+ sc->use_inband_status)) {
+ mvneta_sc_lock(sc);
+ mvneta_misc_intr(sc);
+ mvneta_sc_unlock(sc);
+ }
+ if (__predict_false(!(ifp->if_drv_flags & IFF_DRV_RUNNING)))
+ return;
+ /* RxTxTH interrupt */
+ queues = MVNETA_PRXTXTI_GET_RBICTAPQ(ic);
+ if (__predict_true(queues)) {
+#ifdef MVNETA_KTR
+ CTR1(KTR_SPARE2, "%s got PRXTXTIC: +RXEOF", ifp->if_xname);
+#endif
+ /* At the moment the driver support only one RX queue. */
+ DASSERT(MVNETA_IS_QUEUE_SET(queues, 0));
+ mvneta_rx(sc, 0, 0);
+ }
+}
+
+STATIC int
+mvneta_misc_intr(struct mvneta_softc *sc)
+{
+ uint32_t ic;
+ int claimed = 0;
+
+#ifdef MVNETA_KTR
+ CTR1(KTR_SPARE2, "%s got MISC_INTR", sc->ifp->if_xname);
+#endif
+ KASSERT_SC_MTX(sc);
+
+ for (;;) {
+ ic = MVNETA_READ(sc, MVNETA_PMIC);
+ ic &= MVNETA_READ(sc, MVNETA_PMIM);
+ if (ic == 0)
+ break;
+ MVNETA_WRITE(sc, MVNETA_PMIC, ~ic);
+ claimed = 1;
+
+ if (ic & (MVNETA_PMI_PHYSTATUSCHNG |
+ MVNETA_PMI_LINKCHANGE | MVNETA_PMI_PSCSYNCCHANGE))
+ mvneta_link_isr(sc);
+ }
+ return (claimed);
+}
+
+STATIC void
+mvneta_tick(void *arg)
+{
+ struct mvneta_softc *sc;
+ struct mvneta_tx_ring *tx;
+ struct mvneta_rx_ring *rx;
+ int q;
+ uint32_t fc_prev, fc_curr;
+
+ sc = arg;
+
+ /*
+ * This is done before mib update to get the right stats
+ * for this tick.
+ */
+ mvneta_tx_drain(sc);
+
+ /* Extract previous flow-control frame received counter. */
+ fc_prev = sc->sysctl_mib[MVNETA_MIB_FC_GOOD_IDX].counter;
+ /* Read mib registers (clear by read). */
+ mvneta_update_mib(sc);
+ /* Extract current flow-control frame received counter. */
+ fc_curr = sc->sysctl_mib[MVNETA_MIB_FC_GOOD_IDX].counter;
+
+
+ if (sc->phy_attached && sc->ifp->if_flags & IFF_UP) {
+ mvneta_sc_lock(sc);
+ mii_tick(sc->mii);
+
+ /* Adjust MAC settings */
+ mvneta_adjust_link(sc);
+ mvneta_sc_unlock(sc);
+ }
+
+ /*
+ * We were unable to refill the rx queue and left the rx func, leaving
+ * the ring without mbuf and no way to call the refill func.
+ */
+ for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
+ rx = MVNETA_RX_RING(sc, q);
+ if (rx->needs_refill == TRUE) {
+ mvneta_rx_lockq(sc, q);
+ mvneta_rx_queue_refill(sc, q);
+ mvneta_rx_unlockq(sc, q);
+ }
+ }
+
+ /*
+ * Watchdog:
+ * - check if queue is mark as hung.
+ * - ignore hung status if we received some pause frame
+ * as hardware may have paused packet transmit.
+ */
+ for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
+ /*
+ * We should take queue lock, but as we only read
+ * queue status we can do it without lock, we may
+ * only missdetect queue status for one tick.
+ */
+ tx = MVNETA_TX_RING(sc, q);
+
+ if (tx->queue_hung && (fc_curr - fc_prev) == 0)
+ goto timeout;
+ }
+
+ callout_schedule(&sc->tick_ch, hz);
+ return;
+
+timeout:
+ if_printf(sc->ifp, "watchdog timeout\n");
+
+ mvneta_sc_lock(sc);
+ sc->counter_watchdog++;
+ sc->counter_watchdog_mib++;
+ /* Trigger reinitialize sequence. */
+ mvneta_stop_locked(sc);
+ mvneta_init_locked(sc);
+ mvneta_sc_unlock(sc);
+}
+
+STATIC void
+mvneta_qflush(struct ifnet *ifp)
+{
+#ifdef MVNETA_MULTIQUEUE
+ struct mvneta_softc *sc;
+ struct mvneta_tx_ring *tx;
+ struct mbuf *m;
+ size_t q;
+
+ sc = ifp->if_softc;
+
+ for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
+ tx = MVNETA_TX_RING(sc, q);
+ mvneta_tx_lockq(sc, q);
+ while ((m = buf_ring_dequeue_sc(tx->br)) != NULL)
+ m_freem(m);
+ mvneta_tx_unlockq(sc, q);
+ }
+#endif
+ if_qflush(ifp);
+}
+
+STATIC void
+mvneta_tx_task(void *arg, int pending)
+{
+ struct mvneta_softc *sc;
+ struct mvneta_tx_ring *tx;
+ struct ifnet *ifp;
+ int error;
+
+ tx = arg;
+ ifp = tx->ifp;
+ sc = ifp->if_softc;
+
+ mvneta_tx_lockq(sc, tx->qidx);
+ error = mvneta_xmit_locked(sc, tx->qidx);
+ mvneta_tx_unlockq(sc, tx->qidx);
+
+ /* Try again */
+ if (__predict_false(error != 0 && error != ENETDOWN)) {
+ pause("mvneta_tx_task_sleep", 1);
+ taskqueue_enqueue(tx->taskq, &tx->task);
+ }
+}
+
+STATIC int
+mvneta_xmitfast_locked(struct mvneta_softc *sc, int q, struct mbuf **m)
+{
+ struct mvneta_tx_ring *tx;
+ struct ifnet *ifp;
+ int error;
+
+ KASSERT_TX_MTX(sc, q);
+ tx = MVNETA_TX_RING(sc, q);
+ error = 0;
+
+ ifp = sc->ifp;
+
+ /* Dont enqueue packet if the queue is disabled. */
+ if (__predict_false(tx->queue_status == MVNETA_QUEUE_DISABLED)) {
+ m_freem(*m);
+ *m = NULL;
+ return (ENETDOWN);
+ }
+
+ /* Reclaim mbuf if above threshold. */
+ if (__predict_true(tx->used > MVNETA_TX_RECLAIM_COUNT))
+ mvneta_tx_queue_complete(sc, q);
+
+ /* Do not call transmit path if queue is already too full. */
+ if (__predict_false(tx->used >
+ MVNETA_TX_RING_CNT - MVNETA_TX_SEGLIMIT))
+ return (ENOBUFS);
+
+ error = mvneta_tx_queue(sc, m, q);
+ if (__predict_false(error != 0))
+ return (error);
+
+ /* Send a copy of the frame to the BPF listener */
+ ETHER_BPF_MTAP(ifp, *m);
+
+ /* Set watchdog on */
+ tx->watchdog_time = ticks;
+ tx->queue_status = MVNETA_QUEUE_WORKING;
+
+ return (error);
+}
+
+#ifdef MVNETA_MULTIQUEUE
+STATIC int
+mvneta_transmit(struct ifnet *ifp, struct mbuf *m)
+{
+ struct mvneta_softc *sc;
+ struct mvneta_tx_ring *tx;
+ int error;
+ int q;
+
+ sc = ifp->if_softc;
+
+ /* Use default queue if there is no flow id as thread can migrate. */
+ if (__predict_true(M_HASHTYPE_GET(m) != M_HASHTYPE_NONE))
+ q = m->m_pkthdr.flowid % MVNETA_TX_QNUM_MAX;
+ else
+ q = 0;
+
+ tx = MVNETA_TX_RING(sc, q);
+
+ /* If buf_ring is full start transmit immediatly. */
+ if (buf_ring_full(tx->br)) {
+ mvneta_tx_lockq(sc, q);
+ mvneta_xmit_locked(sc, q);
+ mvneta_tx_unlockq(sc, q);
+ }
+
+ /*
+ * If the buf_ring is empty we will not reorder packets.
+ * If the lock is available transmit without using buf_ring.
+ */
+ if (buf_ring_empty(tx->br) && mvneta_tx_trylockq(sc, q) != 0) {
+ error = mvneta_xmitfast_locked(sc, q, &m);
+ mvneta_tx_unlockq(sc, q);
+ if (__predict_true(error == 0))
+ return (0);
+
+ /* Transmit can fail in fastpath. */
+ if (__predict_false(m == NULL))
+ return (error);
+ }
+
+ /* Enqueue then schedule taskqueue. */
+ error = drbr_enqueue(ifp, tx->br, m);
+ if (__predict_false(error != 0))
+ return (error);
+
+ taskqueue_enqueue(tx->taskq, &tx->task);
+ return (0);
+}
+
+STATIC int
+mvneta_xmit_locked(struct mvneta_softc *sc, int q)
+{
+ struct ifnet *ifp;
+ struct mvneta_tx_ring *tx;
+ struct mbuf *m;
+ int error;
+
+ KASSERT_TX_MTX(sc, q);
+ ifp = sc->ifp;
+ tx = MVNETA_TX_RING(sc, q);
+ error = 0;
+
+ while ((m = drbr_peek(ifp, tx->br)) != NULL) {
+ error = mvneta_xmitfast_locked(sc, q, &m);
+ if (__predict_false(error != 0)) {
+ if (m != NULL)
+ drbr_putback(ifp, tx->br, m);
+ else
+ drbr_advance(ifp, tx->br);
+ break;
+ }
+ drbr_advance(ifp, tx->br);
+ }
+
+ return (error);
+}
+#else /* !MVNETA_MULTIQUEUE */
+STATIC void
+mvneta_start(struct ifnet *ifp)
+{
+ struct mvneta_softc *sc;
+ struct mvneta_tx_ring *tx;
+ int error;
+
+ sc = ifp->if_softc;
+ tx = MVNETA_TX_RING(sc, 0);
+
+ mvneta_tx_lockq(sc, 0);
+ error = mvneta_xmit_locked(sc, 0);
+ mvneta_tx_unlockq(sc, 0);
+ /* Handle retransmit in the background taskq. */
+ if (__predict_false(error != 0 && error != ENETDOWN))
+ taskqueue_enqueue(tx->taskq, &tx->task);
+}
+
+STATIC int
+mvneta_xmit_locked(struct mvneta_softc *sc, int q)
+{
+ struct ifnet *ifp;
+ struct mvneta_tx_ring *tx;
+ struct mbuf *m;
+ int error;
+
+ KASSERT_TX_MTX(sc, q);
+ ifp = sc->ifp;
+ tx = MVNETA_TX_RING(sc, 0);
+ error = 0;
+
+ while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
+ IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
+ if (m == NULL)
+ break;
+
+ error = mvneta_xmitfast_locked(sc, q, &m);
+ if (__predict_false(error != 0)) {
+ if (m != NULL)
+ IFQ_DRV_PREPEND(&ifp->if_snd, m);
+ break;
+ }
+ }
+
+ return (error);
+}
+#endif
+
+STATIC int
+mvneta_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+{
+ struct mvneta_softc *sc;
+ struct mvneta_rx_ring *rx;
+ struct ifreq *ifr;
+ int error, mask;
+ uint32_t flags;
+ char *mediatype;
+ int q;
+
+ error = 0;
+ sc = ifp->if_softc;
+ ifr = (struct ifreq *)data;
+ switch (cmd) {
+ case SIOCSIFFLAGS:
+ mvneta_sc_lock(sc);
+ if (ifp->if_flags & IFF_UP) {
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ flags = ifp->if_flags ^ sc->mvneta_if_flags;
+
+ if (flags != 0)
+ sc->mvneta_if_flags = ifp->if_flags;
+
+ if ((flags & IFF_PROMISC) != 0)
+ mvneta_filter_setup(sc);
+ } else {
+ mvneta_init_locked(sc);
+ sc->mvneta_if_flags = ifp->if_flags;
+ if (sc->phy_attached)
+ mii_mediachg(sc->mii);
+ mvneta_sc_unlock(sc);
+ break;
+ }
+ } else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+ mvneta_stop_locked(sc);
+
+ sc->mvneta_if_flags = ifp->if_flags;
+ mvneta_sc_unlock(sc);
+ break;
+ case SIOCSIFCAP:
+ if (ifp->if_mtu > MVNETA_MAX_CSUM_MTU &&
+ ifr->ifr_reqcap & IFCAP_TXCSUM)
+ ifr->ifr_reqcap &= ~IFCAP_TXCSUM;
+ mask = ifp->if_capenable ^ ifr->ifr_reqcap;
+ if (mask & IFCAP_HWCSUM) {
+ ifp->if_capenable &= ~IFCAP_HWCSUM;
+ ifp->if_capenable |= IFCAP_HWCSUM & ifr->ifr_reqcap;
+ if (ifp->if_capenable & IFCAP_TXCSUM)
+ ifp->if_hwassist = CSUM_IP | CSUM_TCP |
+ CSUM_UDP;
+ else
+ ifp->if_hwassist = 0;
+ }
+ if (mask & IFCAP_LRO) {
+ mvneta_sc_lock(sc);
+ ifp->if_capenable ^= IFCAP_LRO;
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
+ for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
+ rx = MVNETA_RX_RING(sc, q);
+ rx->lro_enabled = !rx->lro_enabled;
+ }
+ }
+ mvneta_sc_unlock(sc);
+ }
+ VLAN_CAPABILITIES(ifp);
+ break;
+ case SIOCSIFMEDIA:
+ if ((IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T ||
+ IFM_SUBTYPE(ifr->ifr_media) == IFM_2500_T ||
+ IFM_SUBTYPE(ifr->ifr_media) == IFM_2500_KX) &&
+ (ifr->ifr_media & IFM_FDX) == 0) {
+ if (IFM_SUBTYPE(ifr->ifr_media) == IFM_2500_T)
+ mediatype = "2500Base-T";
+ else if (IFM_SUBTYPE(ifr->ifr_media) == IFM_2500_KX)
+ mediatype = "2500Base-KX";
+ else
+ mediatype = "1000Base-T";
+ device_printf(sc->dev,
+ "%s half-duplex unsupported\n", mediatype);
+ error = EINVAL;
+ break;
+ }
+ case SIOCGIFMEDIA: /* FALLTHROUGH */
+ case SIOCGIFXMEDIA:
+ if (!sc->phy_attached)
+ error = ifmedia_ioctl(ifp, ifr, &sc->mvneta_ifmedia,
+ cmd);
+ else
+ error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media,
+ cmd);
+ break;
+ case SIOCSIFMTU:
+ if (ifr->ifr_mtu < 68 || ifr->ifr_mtu > MVNETA_MAX_FRAME -
+ MVNETA_ETHER_SIZE) {
+ error = EINVAL;
+ } else {
+ ifp->if_mtu = ifr->ifr_mtu;
+ mvneta_sc_lock(sc);
+ if (ifp->if_mtu > MVNETA_MAX_CSUM_MTU) {
+ ifp->if_capenable &= ~IFCAP_TXCSUM;
+ ifp->if_hwassist = 0;
+ } else {
+ ifp->if_capenable |= IFCAP_TXCSUM;
+ ifp->if_hwassist = CSUM_IP | CSUM_TCP |
+ CSUM_UDP;
+ }
+
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ /* Trigger reinitialize sequence */
+ mvneta_stop_locked(sc);
+ mvneta_init_locked(sc);
+ }
+ mvneta_sc_unlock(sc);
+ }
+ break;
+
+ default:
+ error = ether_ioctl(ifp, cmd, data);
+ break;
+ }
+
+ return (error);
+}
+
+STATIC void
+mvneta_init_locked(void *arg)
+{
+ struct mvneta_softc *sc;
+ struct ifnet *ifp;
+ uint32_t reg;
+ int q, cpu;
+
+ sc = arg;
+ ifp = sc->ifp;
+
+ if (!device_is_attached(sc->dev) ||
+ (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
+ return;
+
+ mvneta_disable_intr(sc);
+ callout_stop(&sc->tick_ch);
+
+ /* Get the latest mac address */
+ bcopy(IF_LLADDR(ifp), sc->enaddr, ETHER_ADDR_LEN);
+ mvneta_set_mac_address(sc, sc->enaddr);
+ mvneta_filter_setup(sc);
+
+ /* Start DMA Engine */
+ MVNETA_WRITE(sc, MVNETA_PRXINIT, 0x00000000);
+ MVNETA_WRITE(sc, MVNETA_PTXINIT, 0x00000000);
+ MVNETA_WRITE(sc, MVNETA_PACC, MVNETA_PACC_ACCELERATIONMODE_EDM);
+
+ /* Enable port */
+ reg = MVNETA_READ(sc, MVNETA_PMACC0);
+ reg |= MVNETA_PMACC0_PORTEN;
+ MVNETA_WRITE(sc, MVNETA_PMACC0, reg);
+
+ /* Allow access to each TXQ/RXQ from both CPU's */
+ for (cpu = 0; cpu < mp_ncpus; ++cpu)
+ MVNETA_WRITE(sc, MVNETA_PCP2Q(cpu),
+ MVNETA_PCP2Q_TXQEN_MASK | MVNETA_PCP2Q_RXQEN_MASK);
+
+ for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
+ mvneta_rx_lockq(sc, q);
+ mvneta_rx_queue_refill(sc, q);
+ mvneta_rx_unlockq(sc, q);
+ }
+
+ if (!sc->phy_attached)
+ mvneta_linkup(sc);
+
+ /* Enable interrupt */
+ mvneta_enable_intr(sc);
+
+ /* Set Counter */
+ callout_schedule(&sc->tick_ch, hz);
+
+ ifp->if_drv_flags |= IFF_DRV_RUNNING;
+}
+
+STATIC void
+mvneta_init(void *arg)
+{
+ struct mvneta_softc *sc;
+
+ sc = arg;
+ mvneta_sc_lock(sc);
+ mvneta_init_locked(sc);
+ if (sc->phy_attached)
+ mii_mediachg(sc->mii);
+ mvneta_sc_unlock(sc);
+}
+
+/* ARGSUSED */
+STATIC void
+mvneta_stop_locked(struct mvneta_softc *sc)
+{
+ struct ifnet *ifp;
+ struct mvneta_rx_ring *rx;
+ struct mvneta_tx_ring *tx;
+ uint32_t reg;
+ int q;
+
+ ifp = sc->ifp;
+ if (ifp == NULL || (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ return;
+
+ mvneta_disable_intr(sc);
+
+ callout_stop(&sc->tick_ch);
+
+ ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+
+ /* Link down */
+ if (sc->linkup == TRUE)
+ mvneta_linkdown(sc);
+
+ /* Reset the MAC Port Enable bit */
+ reg = MVNETA_READ(sc, MVNETA_PMACC0);
+ reg &= ~MVNETA_PMACC0_PORTEN;
+ MVNETA_WRITE(sc, MVNETA_PMACC0, reg);
+
+ /* Disable each of queue */
+ for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
+ rx = MVNETA_RX_RING(sc, q);
+
+ mvneta_rx_lockq(sc, q);
+ mvneta_ring_flush_rx_queue(sc, q);
+ mvneta_rx_unlockq(sc, q);
+ }
+
+ /*
+ * Hold Reset state of DMA Engine
+ * (must write 0x0 to restart it)
+ */
+ MVNETA_WRITE(sc, MVNETA_PRXINIT, 0x00000001);
+ MVNETA_WRITE(sc, MVNETA_PTXINIT, 0x00000001);
+
+ for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
+ tx = MVNETA_TX_RING(sc, q);
+
+ mvneta_tx_lockq(sc, q);
+ mvneta_ring_flush_tx_queue(sc, q);
+ mvneta_tx_unlockq(sc, q);
+ }
+}
+
+STATIC void
+mvneta_stop(struct mvneta_softc *sc)
+{
+
+ mvneta_sc_lock(sc);
+ mvneta_stop_locked(sc);
+ mvneta_sc_unlock(sc);
+}
+
+STATIC int
+mvneta_mediachange(struct ifnet *ifp)
+{
+ struct mvneta_softc *sc;
+
+ sc = ifp->if_softc;
+
+ if (!sc->phy_attached && !sc->use_inband_status) {
+ /* We shouldn't be here */
+ if_printf(ifp, "Cannot change media in fixed-link mode!\n");
+ return (0);
+ }
+
+ if (sc->use_inband_status) {
+ mvneta_update_media(sc, sc->mvneta_ifmedia.ifm_media);
+ return (0);
+ }
+
+ mvneta_sc_lock(sc);
+
+ /* Update PHY */
+ mii_mediachg(sc->mii);
+
+ mvneta_sc_unlock(sc);
+
+ return (0);
+}
+
+STATIC void
+mvneta_get_media(struct mvneta_softc *sc, struct ifmediareq *ifmr)
+{
+ uint32_t psr;
+
+ psr = MVNETA_READ(sc, MVNETA_PSR);
+
+ /* Speed */
+ if (psr & MVNETA_PSR_GMIISPEED)
+ ifmr->ifm_active = IFM_ETHER_SUBTYPE_SET(IFM_1000_T);
+ else if (psr & MVNETA_PSR_MIISPEED)
+ ifmr->ifm_active = IFM_ETHER_SUBTYPE_SET(IFM_100_TX);
+ else if (psr & MVNETA_PSR_LINKUP)
+ ifmr->ifm_active = IFM_ETHER_SUBTYPE_SET(IFM_10_T);
+
+ /* Duplex */
+ if (psr & MVNETA_PSR_FULLDX)
+ ifmr->ifm_active |= IFM_FDX;
+
+ /* Link */
+ ifmr->ifm_status = IFM_AVALID;
+ if (psr & MVNETA_PSR_LINKUP)
+ ifmr->ifm_status |= IFM_ACTIVE;
+}
+
+STATIC void
+mvneta_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
+{
+ struct mvneta_softc *sc;
+ struct mii_data *mii;
+
+ sc = ifp->if_softc;
+
+ if (!sc->phy_attached && !sc->use_inband_status) {
+ ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
+ return;
+ }
+
+ mvneta_sc_lock(sc);
+
+ if (sc->use_inband_status) {
+ mvneta_get_media(sc, ifmr);
+ mvneta_sc_unlock(sc);
+ return;
+ }
+
+ mii = sc->mii;
+ mii_pollstat(mii);
+
+ ifmr->ifm_active = mii->mii_media_active;
+ ifmr->ifm_status = mii->mii_media_status;
+
+ mvneta_sc_unlock(sc);
+}
+
+/*
+ * Link State Notify
+ */
+STATIC void
+mvneta_update_autoneg(struct mvneta_softc *sc, int enable)
+{
+ int reg;
+
+ if (enable) {
+ reg = MVNETA_READ(sc, MVNETA_PANC);
+ reg &= ~(MVNETA_PANC_FORCELINKFAIL | MVNETA_PANC_FORCELINKPASS |
+ MVNETA_PANC_ANFCEN);
+ reg |= MVNETA_PANC_ANDUPLEXEN | MVNETA_PANC_ANSPEEDEN |
+ MVNETA_PANC_INBANDANEN;
+ MVNETA_WRITE(sc, MVNETA_PANC, reg);
+
+ reg = MVNETA_READ(sc, MVNETA_PMACC2);
+ reg |= MVNETA_PMACC2_INBANDANMODE;
+ MVNETA_WRITE(sc, MVNETA_PMACC2, reg);
+
+ reg = MVNETA_READ(sc, MVNETA_PSOMSCD);
+ reg |= MVNETA_PSOMSCD_ENABLE;
+ MVNETA_WRITE(sc, MVNETA_PSOMSCD, reg);
+ } else {
+ reg = MVNETA_READ(sc, MVNETA_PANC);
+ reg &= ~(MVNETA_PANC_FORCELINKFAIL | MVNETA_PANC_FORCELINKPASS |
+ MVNETA_PANC_ANDUPLEXEN | MVNETA_PANC_ANSPEEDEN |
+ MVNETA_PANC_INBANDANEN);
+ MVNETA_WRITE(sc, MVNETA_PANC, reg);
+
+ reg = MVNETA_READ(sc, MVNETA_PMACC2);
+ reg &= ~MVNETA_PMACC2_INBANDANMODE;
+ MVNETA_WRITE(sc, MVNETA_PMACC2, reg);
+
+ reg = MVNETA_READ(sc, MVNETA_PSOMSCD);
+ reg &= ~MVNETA_PSOMSCD_ENABLE;
+ MVNETA_WRITE(sc, MVNETA_PSOMSCD, reg);
+ }
+}
+
+STATIC int
+mvneta_update_media(struct mvneta_softc *sc, int media)
+{
+ boolean_t autoneg, running;
+ char *type;
+ int reg, err;
+
+ err = 0;
+
+ mvneta_sc_lock(sc);
+
+ mvneta_linkreset(sc);
+
+ running = (sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != 0;
+ if (running)
+ mvneta_stop_locked(sc);
+
+ autoneg = (IFM_SUBTYPE(media) == IFM_AUTO);
+
+ if (sc->use_inband_status)
+ mvneta_update_autoneg(sc, autoneg);
+
+ mvneta_update_eee(sc);
+ mvneta_update_fc(sc);
+
+ if (!autoneg) {
+ reg = MVNETA_READ(sc, MVNETA_PANC);
+ reg &= ~(MVNETA_PANC_SETGMIISPEED |
+ MVNETA_PANC_SETMIISPEED |
+ MVNETA_PANC_SETFULLDX);
+ if (IFM_SUBTYPE(media) == IFM_1000_T ||
+ IFM_SUBTYPE(media) == IFM_2500_T ||
+ IFM_SUBTYPE(media) == IFM_2500_KX) {
+ if ((media & IFM_FDX) == 0) {
+ if (IFM_SUBTYPE(media) == IFM_2500_T)
+ type = "2500Base-T";
+ else if (IFM_SUBTYPE(media) == IFM_2500_KX)
+ type = "2500Base-KX";
+ else
+ type = "1000Base-T";
+ device_printf(sc->dev,
+ "%s half-duplex unsupported\n", type);
+ err = EINVAL;
+ goto out;
+ }
+ reg |= MVNETA_PANC_SETGMIISPEED;
+ } else if (IFM_SUBTYPE(media) == IFM_100_TX)
+ reg |= MVNETA_PANC_SETMIISPEED;
+
+ if (media & IFM_FDX)
+ reg |= MVNETA_PANC_SETFULLDX;
+
+ MVNETA_WRITE(sc, MVNETA_PANC, reg);
+ }
+out:
+ if (running)
+ mvneta_init_locked(sc);
+ mvneta_sc_unlock(sc);
+ return (err);
+}
+
+STATIC void
+mvneta_adjust_link(struct mvneta_softc *sc)
+{
+ boolean_t phy_linkup;
+ int reg;
+
+ /* Update eee/fc */
+ mvneta_update_eee(sc);
+ mvneta_update_fc(sc);
+
+ /* Check for link change */
+ phy_linkup = (sc->mii->mii_media_status &
+ (IFM_AVALID | IFM_ACTIVE)) == (IFM_AVALID | IFM_ACTIVE);
+
+ if (sc->linkup != phy_linkup)
+ mvneta_linkupdate(sc, phy_linkup);
+
+ /* Don't update media on disabled link */
+ if (!phy_linkup)
+ return;
+
+ /* Check for media type change */
+ if (sc->mvneta_media != sc->mii->mii_media_active) {
+ sc->mvneta_media = sc->mii->mii_media_active;
+
+ reg = MVNETA_READ(sc, MVNETA_PANC);
+ reg &= ~(MVNETA_PANC_SETGMIISPEED |
+ MVNETA_PANC_SETMIISPEED |
+ MVNETA_PANC_SETFULLDX);
+ if (IFM_SUBTYPE(sc->mvneta_media) == IFM_1000_T ||
+ IFM_SUBTYPE(sc->mvneta_media) == IFM_2500_T ||
+ IFM_SUBTYPE(sc->mvneta_media) == IFM_2500_KX) {
+ reg |= MVNETA_PANC_SETGMIISPEED;
+ } else if (IFM_SUBTYPE(sc->mvneta_media) == IFM_100_TX)
+ reg |= MVNETA_PANC_SETMIISPEED;
+
+ if (sc->mvneta_media & IFM_FDX)
+ reg |= MVNETA_PANC_SETFULLDX;
+
+ MVNETA_WRITE(sc, MVNETA_PANC, reg);
+ }
+}
+
+STATIC void
+mvneta_link_isr(struct mvneta_softc *sc)
+{
+ int linkup;
+
+ KASSERT_SC_MTX(sc);
+
+ linkup = MVNETA_IS_LINKUP(sc) ? TRUE : FALSE;
+ if (sc->linkup == linkup)
+ return;
+
+ if (linkup == TRUE)
+ mvneta_linkup(sc);
+ else
+ mvneta_linkdown(sc);
+
+#ifdef DEBUG
+ log(LOG_DEBUG,
+ "%s: link %s\n", device_xname(sc->dev), linkup ? "up" : "down");
+#endif
+}
+
+STATIC void
+mvneta_linkupdate(struct mvneta_softc *sc, boolean_t linkup)
+{
+
+ KASSERT_SC_MTX(sc);
+
+ if (linkup == TRUE)
+ mvneta_linkup(sc);
+ else
+ mvneta_linkdown(sc);
+
+#ifdef DEBUG
+ log(LOG_DEBUG,
+ "%s: link %s\n", device_xname(sc->dev), linkup ? "up" : "down");
+#endif
+}
+
+STATIC void
+mvneta_update_eee(struct mvneta_softc *sc)
+{
+ uint32_t reg;
+
+ KASSERT_SC_MTX(sc);
+
+ /* set EEE parameters */
+ reg = MVNETA_READ(sc, MVNETA_LPIC1);
+ if (sc->cf_lpi)
+ reg |= MVNETA_LPIC1_LPIRE;
+ else
+ reg &= ~MVNETA_LPIC1_LPIRE;
+ MVNETA_WRITE(sc, MVNETA_LPIC1, reg);
+}
+
+STATIC void
+mvneta_update_fc(struct mvneta_softc *sc)
+{
+ uint32_t reg;
+
+ KASSERT_SC_MTX(sc);
+
+ reg = MVNETA_READ(sc, MVNETA_PANC);
+ if (sc->cf_fc) {
+ /* Flow control negotiation */
+ reg |= MVNETA_PANC_PAUSEADV;
+ reg |= MVNETA_PANC_ANFCEN;
+ } else {
+ /* Disable flow control negotiation */
+ reg &= ~MVNETA_PANC_PAUSEADV;
+ reg &= ~MVNETA_PANC_ANFCEN;
+ }
+
+ MVNETA_WRITE(sc, MVNETA_PANC, reg);
+}
+
+STATIC void
+mvneta_linkup(struct mvneta_softc *sc)
+{
+ uint32_t reg;
+
+ KASSERT_SC_MTX(sc);
+
+ if (!sc->use_inband_status) {
+ reg = MVNETA_READ(sc, MVNETA_PANC);
+ reg |= MVNETA_PANC_FORCELINKPASS;
+ reg &= ~MVNETA_PANC_FORCELINKFAIL;
+ MVNETA_WRITE(sc, MVNETA_PANC, reg);
+ }
+
+ mvneta_qflush(sc->ifp);
+ mvneta_portup(sc);
+ sc->linkup = TRUE;
+ if_link_state_change(sc->ifp, LINK_STATE_UP);
+}
+
+STATIC void
+mvneta_linkdown(struct mvneta_softc *sc)
+{
+ uint32_t reg;
+
+ KASSERT_SC_MTX(sc);
+
+ if (!sc->use_inband_status) {
+ reg = MVNETA_READ(sc, MVNETA_PANC);
+ reg &= ~MVNETA_PANC_FORCELINKPASS;
+ reg |= MVNETA_PANC_FORCELINKFAIL;
+ MVNETA_WRITE(sc, MVNETA_PANC, reg);
+ }
+
+ mvneta_portdown(sc);
+ mvneta_qflush(sc->ifp);
+ sc->linkup = FALSE;
+ if_link_state_change(sc->ifp, LINK_STATE_DOWN);
+}
+
+STATIC void
+mvneta_linkreset(struct mvneta_softc *sc)
+{
+ struct mii_softc *mii;
+
+ if (sc->phy_attached) {
+ /* Force reset PHY */
+ mii = LIST_FIRST(&sc->mii->mii_phys);
+ if (mii)
+ mii_phy_reset(mii);
+ }
+}
+
+/*
+ * Tx Subroutines
+ */
+STATIC int
+mvneta_tx_queue(struct mvneta_softc *sc, struct mbuf **mbufp, int q)
+{
+ struct ifnet *ifp;
+ bus_dma_segment_t txsegs[MVNETA_TX_SEGLIMIT];
+ struct mbuf *mtmp, *mbuf;
+ struct mvneta_tx_ring *tx;
+ struct mvneta_buf *txbuf;
+ struct mvneta_tx_desc *t;
+ uint32_t ptxsu;
+ int start, used, error, i, txnsegs;
+
+ mbuf = *mbufp;
+ tx = MVNETA_TX_RING(sc, q);
+ DASSERT(tx->used >= 0);
+ DASSERT(tx->used <= MVNETA_TX_RING_CNT);
+ t = NULL;
+ ifp = sc->ifp;
+
+ if (__predict_false(mbuf->m_flags & M_VLANTAG)) {
+ mbuf = ether_vlanencap(mbuf, mbuf->m_pkthdr.ether_vtag);
+ if (mbuf == NULL) {
+ tx->drv_error++;
+ *mbufp = NULL;
+ return (ENOBUFS);
+ }
+ mbuf->m_flags &= ~M_VLANTAG;
+ *mbufp = mbuf;
+ }
+
+ if (__predict_false(mbuf->m_next != NULL &&
+ (mbuf->m_pkthdr.csum_flags &
+ (CSUM_IP | CSUM_TCP | CSUM_UDP)) != 0)) {
+ if (M_WRITABLE(mbuf) == 0) {
+ mtmp = m_dup(mbuf, M_NOWAIT);
+ m_freem(mbuf);
+ if (mtmp == NULL) {
+ tx->drv_error++;
+ *mbufp = NULL;
+ return (ENOBUFS);
+ }
+ *mbufp = mbuf = mtmp;
+ }
+ }
+
+ /* load mbuf using dmamap of 1st descriptor */
+ txbuf = &tx->txbuf[tx->cpu];
+ error = bus_dmamap_load_mbuf_sg(sc->txmbuf_dtag,
+ txbuf->dmap, mbuf, txsegs, &txnsegs,
+ BUS_DMA_NOWAIT);
+ if (__predict_false(error != 0)) {
+#ifdef MVNETA_KTR
+ CTR3(KTR_SPARE2, "%s:%u bus_dmamap_load_mbuf_sg error=%d", ifp->if_xname, q, error);
+#endif
+ /* This is the only recoverable error (except EFBIG). */
+ if (error != ENOMEM) {
+ tx->drv_error++;
+ m_freem(mbuf);
+ *mbufp = NULL;
+ return (ENOBUFS);
+ }
+ return (error);
+ }
+
+ if (__predict_false(txnsegs <= 0
+ || (txnsegs + tx->used) > MVNETA_TX_RING_CNT)) {
+ /* we have no enough descriptors or mbuf is broken */
+#ifdef MVNETA_KTR
+ CTR3(KTR_SPARE2, "%s:%u not enough descriptors txnsegs=%d",
+ ifp->if_xname, q, txnsegs);
+#endif
+ bus_dmamap_unload(sc->txmbuf_dtag, txbuf->dmap);
+ return (ENOBUFS);
+ }
+ DASSERT(txbuf->m == NULL);
+
+ /* remember mbuf using 1st descriptor */
+ txbuf->m = mbuf;
+ bus_dmamap_sync(sc->txmbuf_dtag, txbuf->dmap,
+ BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+
+ /* load to tx descriptors */
+ start = tx->cpu;
+ used = 0;
+ for (i = 0; i < txnsegs; i++) {
+ t = &tx->desc[tx->cpu];
+ t->command = 0;
+ t->l4ichk = 0;
+ t->flags = 0;
+ if (__predict_true(i == 0)) {
+ /* 1st descriptor */
+ t->command |= MVNETA_TX_CMD_W_PACKET_OFFSET(0);
+ t->command |= MVNETA_TX_CMD_F;
+ mvneta_tx_set_csumflag(ifp, t, mbuf);
+ }
+ t->bufptr_pa = txsegs[i].ds_addr;
+ t->bytecnt = txsegs[i].ds_len;
+ tx->cpu = tx_counter_adv(tx->cpu, 1);
+
+ tx->used++;
+ used++;
+ }
+ /* t is last descriptor here */
+ DASSERT(t != NULL);
+ t->command |= MVNETA_TX_CMD_L|MVNETA_TX_CMD_PADDING;
+
+ bus_dmamap_sync(sc->tx_dtag, tx->desc_map,
+ BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+
+ while (__predict_false(used > 255)) {
+ ptxsu = MVNETA_PTXSU_NOWD(255);
+ MVNETA_WRITE(sc, MVNETA_PTXSU(q), ptxsu);
+ used -= 255;
+ }
+ if (__predict_true(used > 0)) {
+ ptxsu = MVNETA_PTXSU_NOWD(used);
+ MVNETA_WRITE(sc, MVNETA_PTXSU(q), ptxsu);
+ }
+ return (0);
+}
+
+STATIC void
+mvneta_tx_set_csumflag(struct ifnet *ifp,
+ struct mvneta_tx_desc *t, struct mbuf *m)
+{
+ struct ether_header *eh;
+ int csum_flags;
+ uint32_t iphl, ipoff;
+ struct ip *ip;
+
+ iphl = ipoff = 0;
+ csum_flags = ifp->if_hwassist & m->m_pkthdr.csum_flags;
+ eh = mtod(m, struct ether_header *);
+ switch (ntohs(eh->ether_type)) {
+ case ETHERTYPE_IP:
+ ipoff = ETHER_HDR_LEN;
+ break;
+ case ETHERTYPE_IPV6:
+ return;
+ case ETHERTYPE_VLAN:
+ ipoff = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
+ break;
+ }
+
+ if (__predict_true(csum_flags & (CSUM_IP|CSUM_IP_TCP|CSUM_IP_UDP))) {
+ ip = (struct ip *)(m->m_data + ipoff);
+ iphl = ip->ip_hl<<2;
+ t->command |= MVNETA_TX_CMD_L3_IP4;
+ } else {
+ t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NONE;
+ return;
+ }
+
+
+ /* L3 */
+ if (csum_flags & CSUM_IP) {
+ t->command |= MVNETA_TX_CMD_IP4_CHECKSUM;
+ }
+
+ /* L4 */
+ if (csum_flags & CSUM_IP_TCP) {
+ t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NOFRAG;
+ t->command |= MVNETA_TX_CMD_L4_TCP;
+ } else if (csum_flags & CSUM_IP_UDP) {
+ t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NOFRAG;
+ t->command |= MVNETA_TX_CMD_L4_UDP;
+ } else
+ t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NONE;
+
+ t->l4ichk = 0;
+ t->command |= MVNETA_TX_CMD_IP_HEADER_LEN(iphl >> 2);
+ t->command |= MVNETA_TX_CMD_L3_OFFSET(ipoff);
+}
+
+STATIC void
+mvneta_tx_queue_complete(struct mvneta_softc *sc, int q)
+{
+ struct mvneta_tx_ring *tx;
+ struct mvneta_buf *txbuf;
+ struct mvneta_tx_desc *t;
+ uint32_t ptxs, ptxsu, ndesc;
+ int i;
+
+ KASSERT_TX_MTX(sc, q);
+
+ tx = MVNETA_TX_RING(sc, q);
+ if (__predict_false(tx->queue_status == MVNETA_QUEUE_DISABLED))
+ return;
+
+ ptxs = MVNETA_READ(sc, MVNETA_PTXS(q));
+ ndesc = MVNETA_PTXS_GET_TBC(ptxs);
+
+ if (__predict_false(ndesc == 0)) {
+ if (tx->used == 0)
+ tx->queue_status = MVNETA_QUEUE_IDLE;
+ else if (tx->queue_status == MVNETA_QUEUE_WORKING &&
+ ((ticks - tx->watchdog_time) > MVNETA_WATCHDOG))
+ tx->queue_hung = TRUE;
+ return;
+ }
+
+#ifdef MVNETA_KTR
+ CTR3(KTR_SPARE2, "%s:%u tx_complete begin ndesc=%u",
+ sc->ifp->if_xname, q, ndesc);
+#endif
+
+ bus_dmamap_sync(sc->tx_dtag, tx->desc_map,
+ BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
+
+ for (i = 0; i < ndesc; i++) {
+ t = &tx->desc[tx->dma];
+#ifdef MVNETA_KTR
+ if (t->flags & MVNETA_TX_F_ES)
+ CTR3(KTR_SPARE2, "%s tx error queue %d desc %d",
+ sc->ifp->if_xname, q, tx->dma);
+#endif
+ txbuf = &tx->txbuf[tx->dma];
+ if (__predict_true(txbuf->m != NULL)) {
+ DASSERT((t->command & MVNETA_TX_CMD_F) != 0);
+ bus_dmamap_unload(sc->txmbuf_dtag, txbuf->dmap);
+ m_freem(txbuf->m);
+ txbuf->m = NULL;
+ }
+ else
+ DASSERT((t->flags & MVNETA_TX_CMD_F) == 0);
+ tx->dma = tx_counter_adv(tx->dma, 1);
+ tx->used--;
+ }
+ DASSERT(tx->used >= 0);
+ DASSERT(tx->used <= MVNETA_TX_RING_CNT);
+ while (__predict_false(ndesc > 255)) {
+ ptxsu = MVNETA_PTXSU_NORB(255);
+ MVNETA_WRITE(sc, MVNETA_PTXSU(q), ptxsu);
+ ndesc -= 255;
+ }
+ if (__predict_true(ndesc > 0)) {
+ ptxsu = MVNETA_PTXSU_NORB(ndesc);
+ MVNETA_WRITE(sc, MVNETA_PTXSU(q), ptxsu);
+ }
+#ifdef MVNETA_KTR
+ CTR5(KTR_SPARE2, "%s:%u tx_complete tx_cpu=%d tx_dma=%d tx_used=%d",
+ sc->ifp->if_xname, q, tx->cpu, tx->dma, tx->used);
+#endif
+
+ tx->watchdog_time = ticks;
+
+ if (tx->used == 0)
+ tx->queue_status = MVNETA_QUEUE_IDLE;
+}
+
+/*
+ * Do a final TX complete when TX is idle.
+ */
+STATIC void
+mvneta_tx_drain(struct mvneta_softc *sc)
+{
+ struct mvneta_tx_ring *tx;
+ int q;
+
+ /*
+ * Handle trailing mbuf on TX queue.
+ * Check is done lockess to avoid TX path contention.
+ */
+ for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
+ tx = MVNETA_TX_RING(sc, q);
+ if ((ticks - tx->watchdog_time) > MVNETA_WATCHDOG_TXCOMP &&
+ tx->used > 0) {
+ mvneta_tx_lockq(sc, q);
+ mvneta_tx_queue_complete(sc, q);
+ mvneta_tx_unlockq(sc, q);
+ }
+ }
+}
+
+/*
+ * Rx Subroutines
+ */
+STATIC int
+mvneta_rx(struct mvneta_softc *sc, int q, int count)
+{
+ uint32_t prxs, npkt;
+ int more;
+
+ more = 0;
+ mvneta_rx_lockq(sc, q);
+ prxs = MVNETA_READ(sc, MVNETA_PRXS(q));
+ npkt = MVNETA_PRXS_GET_ODC(prxs);
+ if (__predict_false(npkt == 0))
+ goto out;
+
+ if (count > 0 && npkt > count) {
+ more = 1;
+ npkt = count;
+ }
+ mvneta_rx_queue(sc, q, npkt);
+out:
+ mvneta_rx_unlockq(sc, q);
+ return more;
+}
+
+/*
+ * Helper routine for updating PRXSU register of a given queue.
+ * Handles number of processed descriptors bigger than maximum acceptable value.
+ */
+STATIC __inline void
+mvneta_prxsu_update(struct mvneta_softc *sc, int q, int processed)
+{
+ uint32_t prxsu;
+
+ while (__predict_false(processed > 255)) {
+ prxsu = MVNETA_PRXSU_NOOFPROCESSEDDESCRIPTORS(255);
+ MVNETA_WRITE(sc, MVNETA_PRXSU(q), prxsu);
+ processed -= 255;
+ }
+ prxsu = MVNETA_PRXSU_NOOFPROCESSEDDESCRIPTORS(processed);
+ MVNETA_WRITE(sc, MVNETA_PRXSU(q), prxsu);
+}
+
+static __inline void
+mvneta_prefetch(void *p)
+{
+
+ __builtin_prefetch(p);
+}
+
+STATIC void
+mvneta_rx_queue(struct mvneta_softc *sc, int q, int npkt)
+{
+ struct ifnet *ifp;
+ struct mvneta_rx_ring *rx;
+ struct mvneta_rx_desc *r;
+ struct mvneta_buf *rxbuf;
+ struct mbuf *m;
+ struct lro_ctrl *lro;
+ struct lro_entry *queued;
+ void *pktbuf;
+ int i, pktlen, processed, ndma;
+
+ KASSERT_RX_MTX(sc, q);
+
+ ifp = sc->ifp;
+ rx = MVNETA_RX_RING(sc, q);
+ processed = 0;
+
+ if (__predict_false(rx->queue_status == MVNETA_QUEUE_DISABLED))
+ return;
+
+ bus_dmamap_sync(sc->rx_dtag, rx->desc_map,
+ BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
+
+ for (i = 0; i < npkt; i++) {
+ /* Prefetch next desc, rxbuf. */
+ ndma = rx_counter_adv(rx->dma, 1);
+ mvneta_prefetch(&rx->desc[ndma]);
+ mvneta_prefetch(&rx->rxbuf[ndma]);
+
+ /* get descriptor and packet */
+ r = &rx->desc[rx->dma];
+ rxbuf = &rx->rxbuf[rx->dma];
+ m = rxbuf->m;
+ rxbuf->m = NULL;
+ DASSERT(m != NULL);
+ bus_dmamap_sync(sc->rxbuf_dtag, rxbuf->dmap,
+ BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(sc->rxbuf_dtag, rxbuf->dmap);
+ /* Prefetch mbuf header. */
+ mvneta_prefetch(m);
+
+ processed++;
+ /* Drop desc with error status or not in a single buffer. */
+ DASSERT((r->status & (MVNETA_RX_F|MVNETA_RX_L)) ==
+ (MVNETA_RX_F|MVNETA_RX_L));
+ if (__predict_false((r->status & MVNETA_RX_ES) ||
+ (r->status & (MVNETA_RX_F|MVNETA_RX_L)) !=
+ (MVNETA_RX_F|MVNETA_RX_L)))
+ goto rx_error;
+
+ /*
+ * [ OFF | MH | PKT | CRC ]
+ * bytecnt cover MH, PKT, CRC
+ */
+ pktlen = r->bytecnt - ETHER_CRC_LEN - MVNETA_HWHEADER_SIZE;
+ pktbuf = (uint8_t *)r->bufptr_va + MVNETA_PACKET_OFFSET +
+ MVNETA_HWHEADER_SIZE;
+
+ /* Prefetch mbuf data. */
+ mvneta_prefetch(pktbuf);
+
+ /* Write value to mbuf (avoid read). */
+ m->m_data = pktbuf;
+ m->m_len = m->m_pkthdr.len = pktlen;
+ m->m_pkthdr.rcvif = ifp;
+ mvneta_rx_set_csumflag(ifp, r, m);
+
+ /* Increase rx_dma before releasing the lock. */
+ rx->dma = ndma;
+
+ if (__predict_false(rx->lro_enabled &&
+ ((r->status & MVNETA_RX_L3_IP) != 0) &&
+ ((r->status & MVNETA_RX_L4_MASK) == MVNETA_RX_L4_TCP) &&
+ (m->m_pkthdr.csum_flags &
+ (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
+ (CSUM_DATA_VALID | CSUM_PSEUDO_HDR))) {
+ if (rx->lro.lro_cnt != 0) {
+ if (tcp_lro_rx(&rx->lro, m, 0) == 0)
+ goto rx_done;
+ }
+ }
+
+ mvneta_rx_unlockq(sc, q);
+ (*ifp->if_input)(ifp, m);
+ mvneta_rx_lockq(sc, q);
+ /*
+ * Check whether this queue has been disabled in the
+ * meantime. If yes, then clear LRO and exit.
+ */
+ if(__predict_false(rx->queue_status == MVNETA_QUEUE_DISABLED))
+ goto rx_lro;
+rx_done:
+ /* Refresh receive ring to avoid stall and minimize jitter. */
+ if (processed >= MVNETA_RX_REFILL_COUNT) {
+ mvneta_prxsu_update(sc, q, processed);
+ mvneta_rx_queue_refill(sc, q);
+ processed = 0;
+ }
+ continue;
+rx_error:
+ m_freem(m);
+ rx->dma = ndma;
+ /* Refresh receive ring to avoid stall and minimize jitter. */
+ if (processed >= MVNETA_RX_REFILL_COUNT) {
+ mvneta_prxsu_update(sc, q, processed);
+ mvneta_rx_queue_refill(sc, q);
+ processed = 0;
+ }
+ }
+#ifdef MVNETA_KTR
+ CTR3(KTR_SPARE2, "%s:%u %u packets received", ifp->if_xname, q, npkt);
+#endif
+ /* DMA status update */
+ mvneta_prxsu_update(sc, q, processed);
+ /* Refill the rest of buffers if there are any to refill */
+ mvneta_rx_queue_refill(sc, q);
+
+rx_lro:
+ /*
+ * Flush any outstanding LRO work
+ */
+ lro = &rx->lro;
+ while (__predict_false((queued = LIST_FIRST(&lro->lro_active)) != NULL)) {
+ LIST_REMOVE(LIST_FIRST((&lro->lro_active)), next);
+ tcp_lro_flush(lro, queued);
+ }
+}
+
+STATIC void
+mvneta_rx_buf_free(struct mvneta_softc *sc, struct mvneta_buf *rxbuf)
+{
+
+ bus_dmamap_unload(sc->rxbuf_dtag, rxbuf->dmap);
+ /* This will remove all data at once */
+ m_freem(rxbuf->m);
+}
+
+STATIC void
+mvneta_rx_queue_refill(struct mvneta_softc *sc, int q)
+{
+ struct mvneta_rx_ring *rx;
+ struct mvneta_rx_desc *r;
+ struct mvneta_buf *rxbuf;
+ bus_dma_segment_t segs;
+ struct mbuf *m;
+ uint32_t prxs, prxsu, ndesc;
+ int npkt, refill, nsegs, error;
+
+ KASSERT_RX_MTX(sc, q);
+
+ rx = MVNETA_RX_RING(sc, q);
+ prxs = MVNETA_READ(sc, MVNETA_PRXS(q));
+ ndesc = MVNETA_PRXS_GET_NODC(prxs) + MVNETA_PRXS_GET_ODC(prxs);
+ refill = MVNETA_RX_RING_CNT - ndesc;
+#ifdef MVNETA_KTR
+ CTR3(KTR_SPARE2, "%s:%u refill %u packets", sc->ifp->if_xname, q,
+ refill);
+#endif
+ if (__predict_false(refill <= 0))
+ return;
+
+ for (npkt = 0; npkt < refill; npkt++) {
+ rxbuf = &rx->rxbuf[rx->cpu];
+ m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
+ if (__predict_false(m == NULL)) {
+ error = ENOBUFS;
+ break;
+ }
+ m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
+
+ error = bus_dmamap_load_mbuf_sg(sc->rxbuf_dtag, rxbuf->dmap,
+ m, &segs, &nsegs, BUS_DMA_NOWAIT);
+ if (__predict_false(error != 0 || nsegs != 1)) {
+ KASSERT(1, ("Failed to load Rx mbuf DMA map"));
+ m_freem(m);
+ break;
+ }
+
+ /* Add the packet to the ring */
+ rxbuf->m = m;
+ r = &rx->desc[rx->cpu];
+ r->bufptr_pa = segs.ds_addr;
+ r->bufptr_va = (uint32_t)m->m_data;
+
+ rx->cpu = rx_counter_adv(rx->cpu, 1);
+ }
+ if (npkt == 0) {
+ if (refill == MVNETA_RX_RING_CNT)
+ rx->needs_refill = TRUE;
+ return;
+ }
+
+ rx->needs_refill = FALSE;
+ bus_dmamap_sync(sc->rx_dtag, rx->desc_map, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+
+ while (__predict_false(npkt > 255)) {
+ prxsu = MVNETA_PRXSU_NOOFNEWDESCRIPTORS(255);
+ MVNETA_WRITE(sc, MVNETA_PRXSU(q), prxsu);
+ npkt -= 255;
+ }
+ if (__predict_true(npkt > 0)) {
+ prxsu = MVNETA_PRXSU_NOOFNEWDESCRIPTORS(npkt);
+ MVNETA_WRITE(sc, MVNETA_PRXSU(q), prxsu);
+ }
+}
+
+STATIC __inline void
+mvneta_rx_set_csumflag(struct ifnet *ifp,
+ struct mvneta_rx_desc *r, struct mbuf *m)
+{
+ uint32_t csum_flags;
+
+ csum_flags = 0;
+ if (__predict_false((r->status &
+ (MVNETA_RX_IP_HEADER_OK|MVNETA_RX_L3_IP)) == 0))
+ return; /* not a IP packet */
+
+ /* L3 */
+ if (__predict_true((r->status & MVNETA_RX_IP_HEADER_OK) ==
+ MVNETA_RX_IP_HEADER_OK))
+ csum_flags |= CSUM_L3_CALC|CSUM_L3_VALID;
+
+ if (__predict_true((r->status & (MVNETA_RX_IP_HEADER_OK|MVNETA_RX_L3_IP)) ==
+ (MVNETA_RX_IP_HEADER_OK|MVNETA_RX_L3_IP))) {
+ /* L4 */
+ switch (r->status & MVNETA_RX_L4_MASK) {
+ case MVNETA_RX_L4_TCP:
+ case MVNETA_RX_L4_UDP:
+ csum_flags |= CSUM_L4_CALC;
+ if (__predict_true((r->status &
+ MVNETA_RX_L4_CHECKSUM_OK) == MVNETA_RX_L4_CHECKSUM_OK)) {
+ csum_flags |= CSUM_L4_VALID;
+ m->m_pkthdr.csum_data = htons(0xffff);
+ }
+ break;
+ case MVNETA_RX_L4_OTH:
+ default:
+ break;
+ }
+ }
+ m->m_pkthdr.csum_flags = csum_flags;
+}
+
+/*
+ * MAC address filter
+ */
+STATIC void
+mvneta_filter_setup(struct mvneta_softc *sc)
+{
+ struct ifnet *ifp;
+ uint32_t dfut[MVNETA_NDFUT], dfsmt[MVNETA_NDFSMT], dfomt[MVNETA_NDFOMT];
+ uint32_t pxc;
+ int i;
+
+ KASSERT_SC_MTX(sc);
+
+ memset(dfut, 0, sizeof(dfut));
+ memset(dfsmt, 0, sizeof(dfsmt));
+ memset(dfomt, 0, sizeof(dfomt));
+
+ ifp = sc->ifp;
+ ifp->if_flags |= IFF_ALLMULTI;
+ if (ifp->if_flags & (IFF_ALLMULTI|IFF_PROMISC)) {
+ for (i = 0; i < MVNETA_NDFSMT; i++) {
+ dfsmt[i] = dfomt[i] =
+ MVNETA_DF(0, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
+ MVNETA_DF(1, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
+ MVNETA_DF(2, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
+ MVNETA_DF(3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
+ }
+ }
+
+ pxc = MVNETA_READ(sc, MVNETA_PXC);
+ pxc &= ~(MVNETA_PXC_UPM | MVNETA_PXC_RXQ_MASK | MVNETA_PXC_RXQARP_MASK |
+ MVNETA_PXC_TCPQ_MASK | MVNETA_PXC_UDPQ_MASK | MVNETA_PXC_BPDUQ_MASK);
+ pxc |= MVNETA_PXC_RXQ(MVNETA_RX_QNUM_MAX-1);
+ pxc |= MVNETA_PXC_RXQARP(MVNETA_RX_QNUM_MAX-1);
+ pxc |= MVNETA_PXC_TCPQ(MVNETA_RX_QNUM_MAX-1);
+ pxc |= MVNETA_PXC_UDPQ(MVNETA_RX_QNUM_MAX-1);
+ pxc |= MVNETA_PXC_BPDUQ(MVNETA_RX_QNUM_MAX-1);
+ pxc |= MVNETA_PXC_RB | MVNETA_PXC_RBIP | MVNETA_PXC_RBARP;
+ if (ifp->if_flags & IFF_BROADCAST) {
+ pxc &= ~(MVNETA_PXC_RB | MVNETA_PXC_RBIP | MVNETA_PXC_RBARP);
+ }
+ if (ifp->if_flags & IFF_PROMISC) {
+ pxc |= MVNETA_PXC_UPM;
+ }
+ MVNETA_WRITE(sc, MVNETA_PXC, pxc);
+
+ /* Set Destination Address Filter Unicast Table */
+ if (ifp->if_flags & IFF_PROMISC) {
+ /* pass all unicast addresses */
+ for (i = 0; i < MVNETA_NDFUT; i++) {
+ dfut[i] =
+ MVNETA_DF(0, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
+ MVNETA_DF(1, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
+ MVNETA_DF(2, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
+ MVNETA_DF(3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
+ }
+ } else {
+ i = sc->enaddr[5] & 0xf; /* last nibble */
+ dfut[i>>2] = MVNETA_DF(i&3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
+ }
+ MVNETA_WRITE_REGION(sc, MVNETA_DFUT(0), dfut, MVNETA_NDFUT);
+
+ /* Set Destination Address Filter Multicast Tables */
+ MVNETA_WRITE_REGION(sc, MVNETA_DFSMT(0), dfsmt, MVNETA_NDFSMT);
+ MVNETA_WRITE_REGION(sc, MVNETA_DFOMT(0), dfomt, MVNETA_NDFOMT);
+}
+
+/*
+ * sysctl(9)
+ */
+STATIC int
+sysctl_read_mib(SYSCTL_HANDLER_ARGS)
+{
+ struct mvneta_sysctl_mib *arg;
+ struct mvneta_softc *sc;
+ uint64_t val;
+
+ arg = (struct mvneta_sysctl_mib *)arg1;
+ if (arg == NULL)
+ return (EINVAL);
+
+ sc = arg->sc;
+ if (sc == NULL)
+ return (EINVAL);
+ if (arg->index < 0 || arg->index > MVNETA_PORTMIB_NOCOUNTER)
+ return (EINVAL);
+
+ mvneta_sc_lock(sc);
+ val = arg->counter;
+ mvneta_sc_unlock(sc);
+ return sysctl_handle_64(oidp, &val, 0, req);
+}
+
+
+STATIC int
+sysctl_clear_mib(SYSCTL_HANDLER_ARGS)
+{
+ struct mvneta_softc *sc;
+ int err, val;
+
+ val = 0;
+ sc = (struct mvneta_softc *)arg1;
+ if (sc == NULL)
+ return (EINVAL);
+
+ err = sysctl_handle_int(oidp, &val, 0, req);
+ if (err != 0)
+ return (err);
+
+ if (val < 0 || val > 1)
+ return (EINVAL);
+
+ if (val == 1) {
+ mvneta_sc_lock(sc);
+ mvneta_clear_mib(sc);
+ mvneta_sc_unlock(sc);
+ }
+
+ return (0);
+}
+
+STATIC int
+sysctl_set_queue_rxthtime(SYSCTL_HANDLER_ARGS)
+{
+ struct mvneta_sysctl_queue *arg;
+ struct mvneta_rx_ring *rx;
+ struct mvneta_softc *sc;
+ uint32_t reg, time_mvtclk;
+ int err, time_us;
+
+ rx = NULL;
+ arg = (struct mvneta_sysctl_queue *)arg1;
+ if (arg == NULL)
+ return (EINVAL);
+ if (arg->queue < 0 || arg->queue > MVNETA_RX_RING_CNT)
+ return (EINVAL);
+ if (arg->rxtx != MVNETA_SYSCTL_RX)
+ return (EINVAL);
+
+ sc = arg->sc;
+ if (sc == NULL)
+ return (EINVAL);
+
+ /* read queue length */
+ mvneta_sc_lock(sc);
+ mvneta_rx_lockq(sc, arg->queue);
+ rx = MVNETA_RX_RING(sc, arg->queue);
+ time_mvtclk = rx->queue_th_time;
+ time_us = ((uint64_t)time_mvtclk * 1000ULL * 1000ULL) / get_tclk();
+ mvneta_rx_unlockq(sc, arg->queue);
+ mvneta_sc_unlock(sc);
+
+ err = sysctl_handle_int(oidp, &time_us, 0, req);
+ if (err != 0)
+ return (err);
+
+ mvneta_sc_lock(sc);
+ mvneta_rx_lockq(sc, arg->queue);
+
+ /* update queue length (0[sec] - 1[sec]) */
+ if (time_us < 0 || time_us > (1000 * 1000)) {
+ mvneta_rx_unlockq(sc, arg->queue);
+ mvneta_sc_unlock(sc);
+ return (EINVAL);
+ }
+ time_mvtclk =
+ (uint64_t)get_tclk() * (uint64_t)time_us / (1000ULL * 1000ULL);
+ rx->queue_th_time = time_mvtclk;
+ reg = MVNETA_PRXITTH_RITT(rx->queue_th_time);
+ MVNETA_WRITE(sc, MVNETA_PRXITTH(arg->queue), reg);
+ mvneta_rx_unlockq(sc, arg->queue);
+ mvneta_sc_unlock(sc);
+
+ return (0);
+}
+
+STATIC void
+sysctl_mvneta_init(struct mvneta_softc *sc)
+{
+ struct sysctl_ctx_list *ctx;
+ struct sysctl_oid_list *children;
+ struct sysctl_oid_list *rxchildren;
+ struct sysctl_oid_list *qchildren, *mchildren;
+ struct sysctl_oid *tree;
+ int i, q;
+ struct mvneta_sysctl_queue *rxarg;
+#define MVNETA_SYSCTL_NAME(num) "queue" # num
+ static const char *sysctl_queue_names[] = {
+ MVNETA_SYSCTL_NAME(0), MVNETA_SYSCTL_NAME(1),
+ MVNETA_SYSCTL_NAME(2), MVNETA_SYSCTL_NAME(3),
+ MVNETA_SYSCTL_NAME(4), MVNETA_SYSCTL_NAME(5),
+ MVNETA_SYSCTL_NAME(6), MVNETA_SYSCTL_NAME(7),
+ };
+#undef MVNETA_SYSCTL_NAME
+
+#define MVNETA_SYSCTL_DESCR(num) "configuration parameters for queue " # num
+ static const char *sysctl_queue_descrs[] = {
+ MVNETA_SYSCTL_DESCR(0), MVNETA_SYSCTL_DESCR(1),
+ MVNETA_SYSCTL_DESCR(2), MVNETA_SYSCTL_DESCR(3),
+ MVNETA_SYSCTL_DESCR(4), MVNETA_SYSCTL_DESCR(5),
+ MVNETA_SYSCTL_DESCR(6), MVNETA_SYSCTL_DESCR(7),
+ };
+#undef MVNETA_SYSCTL_DESCR
+
+
+ ctx = device_get_sysctl_ctx(sc->dev);
+ children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
+
+ tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "rx",
+ CTLFLAG_RD, 0, "NETA RX");
+ rxchildren = SYSCTL_CHILDREN(tree);
+ tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "mib",
+ CTLFLAG_RD, 0, "NETA MIB");
+ mchildren = SYSCTL_CHILDREN(tree);
+
+
+ SYSCTL_ADD_INT(ctx, children, OID_AUTO, "flow_control",
+ CTLFLAG_RW, &sc->cf_fc, 0, "flow control");
+ SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lpi",
+ CTLFLAG_RW, &sc->cf_lpi, 0, "Low Power Idle");
+
+ /*
+ * MIB access
+ */
+ /* dev.mvneta.[unit].mib.<mibs> */
+ for (i = 0; i < MVNETA_PORTMIB_NOCOUNTER; i++) {
+ const char *name = mvneta_mib_list[i].sysctl_name;
+ const char *desc = mvneta_mib_list[i].desc;
+ struct mvneta_sysctl_mib *mib_arg = &sc->sysctl_mib[i];
+
+ mib_arg->sc = sc;
+ mib_arg->index = i;
+ SYSCTL_ADD_PROC(ctx, mchildren, OID_AUTO, name,
+ CTLTYPE_U64|CTLFLAG_RD, (void *)mib_arg, 0,
+ sysctl_read_mib, "I", desc);
+ }
+ SYSCTL_ADD_UQUAD(ctx, mchildren, OID_AUTO, "rx_discard",
+ CTLFLAG_RD, &sc->counter_pdfc, "Port Rx Discard Frame Counter");
+ SYSCTL_ADD_UQUAD(ctx, mchildren, OID_AUTO, "overrun",
+ CTLFLAG_RD, &sc->counter_pofc, "Port Overrun Frame Counter");
+ SYSCTL_ADD_UINT(ctx, mchildren, OID_AUTO, "watchdog",
+ CTLFLAG_RD, &sc->counter_watchdog, 0, "TX Watchdog Counter");
+
+ SYSCTL_ADD_PROC(ctx, mchildren, OID_AUTO, "reset",
+ CTLTYPE_INT|CTLFLAG_RW, (void *)sc, 0,
+ sysctl_clear_mib, "I", "Reset MIB counters");
+
+ for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
+ rxarg = &sc->sysctl_rx_queue[q];
+
+ rxarg->sc = sc;
+ rxarg->queue = q;
+ rxarg->rxtx = MVNETA_SYSCTL_RX;
+
+ /* hw.mvneta.mvneta[unit].rx.[queue] */
+ tree = SYSCTL_ADD_NODE(ctx, rxchildren, OID_AUTO,
+ sysctl_queue_names[q], CTLFLAG_RD, 0,
+ sysctl_queue_descrs[q]);
+ qchildren = SYSCTL_CHILDREN(tree);
+
+ /* hw.mvneta.mvneta[unit].rx.[queue].threshold_timer_us */
+ SYSCTL_ADD_PROC(ctx, qchildren, OID_AUTO, "threshold_timer_us",
+ CTLTYPE_UINT | CTLFLAG_RW, rxarg, 0,
+ sysctl_set_queue_rxthtime, "I",
+ "interrupt coalescing threshold timer [us]");
+ }
+}
+
+/*
+ * MIB
+ */
+STATIC void
+mvneta_clear_mib(struct mvneta_softc *sc)
+{
+ int i;
+
+ KASSERT_SC_MTX(sc);
+
+ for (i = 0; i < nitems(mvneta_mib_list); i++) {
+ if (mvneta_mib_list[i].reg64)
+ MVNETA_READ_MIB_8(sc, mvneta_mib_list[i].regnum);
+ else
+ MVNETA_READ_MIB_4(sc, mvneta_mib_list[i].regnum);
+ sc->sysctl_mib[i].counter = 0;
+ }
+ MVNETA_READ(sc, MVNETA_PDFC);
+ sc->counter_pdfc = 0;
+ MVNETA_READ(sc, MVNETA_POFC);
+ sc->counter_pofc = 0;
+ sc->counter_watchdog = 0;
+}
+
+STATIC void
+mvneta_update_mib(struct mvneta_softc *sc)
+{
+ struct mvneta_tx_ring *tx;
+ int i;
+ uint64_t val;
+ uint32_t reg;
+
+ for (i = 0; i < nitems(mvneta_mib_list); i++) {
+
+ if (mvneta_mib_list[i].reg64)
+ val = MVNETA_READ_MIB_8(sc, mvneta_mib_list[i].regnum);
+ else
+ val = MVNETA_READ_MIB_4(sc, mvneta_mib_list[i].regnum);
+
+ if (val == 0)
+ continue;
+
+ sc->sysctl_mib[i].counter += val;
+ switch (mvneta_mib_list[i].regnum) {
+ case MVNETA_MIB_RX_GOOD_OCT:
+ //if_inc_counter(sc->ifp, IFCOUNTER_IBYTES, val);
+ break;
+ case MVNETA_MIB_RX_BAD_FRAME:
+ if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, val);
+ break;
+ case MVNETA_MIB_RX_GOOD_FRAME:
+ if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, val);
+ break;
+ case MVNETA_MIB_RX_MCAST_FRAME:
+ if_inc_counter(sc->ifp, IFCOUNTER_IMCASTS, val);
+ break;
+ case MVNETA_MIB_TX_GOOD_OCT:
+ //if_inc_counter(sc->ifp, IFCOUNTER_OBYTES, val);
+ break;
+ case MVNETA_MIB_TX_GOOD_FRAME:
+ if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, val);
+ break;
+ case MVNETA_MIB_TX_MCAST_FRAME:
+ if_inc_counter(sc->ifp, IFCOUNTER_OMCASTS, val);
+ break;
+ case MVNETA_MIB_MAC_COL:
+ if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, val);
+ break;
+ case MVNETA_MIB_TX_MAC_TRNS_ERR:
+ case MVNETA_MIB_TX_EXCES_COL:
+ case MVNETA_MIB_MAC_LATE_COL:
+ if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, val);
+ break;
+ }
+ }
+
+ reg = MVNETA_READ(sc, MVNETA_PDFC);
+ sc->counter_pdfc += reg;
+ if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS, reg);
+ reg = MVNETA_READ(sc, MVNETA_POFC);
+ sc->counter_pofc += reg;
+ if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS, reg);
+
+ /* TX watchdog. */
+ if (sc->counter_watchdog_mib > 0) {
+ if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, sc->counter_watchdog_mib);
+ sc->counter_watchdog_mib = 0;
+ }
+ /*
+ * TX driver errors:
+ * We do not take queue locks to not disrupt TX path.
+ * We may only miss one drv error which will be fixed at
+ * next mib update. We may also clear counter when TX path
+ * is incrementing it but we only do it if counter was not zero
+ * thus we may only loose one error.
+ */
+ for (i = 0; i < MVNETA_TX_QNUM_MAX; i++) {
+ tx = MVNETA_TX_RING(sc, i);
+
+ if (tx->drv_error > 0) {
+ if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, tx->drv_error);
+ tx->drv_error = 0;
+ }
+ }
+}
diff --git a/sys/dev/neta/if_mvneta_fdt.c b/sys/dev/neta/if_mvneta_fdt.c
new file mode 100644
index 0000000..67b31bd
--- /dev/null
+++ b/sys/dev/neta/if_mvneta_fdt.c
@@ -0,0 +1,225 @@
+/*
+ * Copyright (c) 2017 Stormshield.
+ * Copyright (c) 2017 Semihalf.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "opt_platform.h"
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+#include <sys/socket.h>
+#include <sys/taskqueue.h>
+
+#include <net/ethernet.h>
+#include <net/if.h>
+#include <net/if_media.h>
+
+#include <netinet/in.h>
+#include <netinet/ip.h>
+#include <netinet/tcp_lro.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <dev/mii/mii.h>
+#include <dev/mii/miivar.h>
+
+#include "if_mvnetareg.h"
+#include "if_mvnetavar.h"
+
+#define PHY_MODE_MAXLEN 10
+#define INBAND_STATUS_MAXLEN 16
+
+static int mvneta_fdt_probe(device_t);
+static int mvneta_fdt_attach(device_t);
+
+static device_method_t mvneta_fdt_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, mvneta_fdt_probe),
+ DEVMETHOD(device_attach, mvneta_fdt_attach),
+
+ /* End */
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_1(mvneta, mvneta_fdt_driver, mvneta_fdt_methods,
+ sizeof(struct mvneta_softc), mvneta_driver);
+
+static devclass_t mvneta_fdt_devclass;
+
+DRIVER_MODULE(mvneta, ofwbus, mvneta_fdt_driver, mvneta_fdt_devclass, 0, 0);
+DRIVER_MODULE(mvneta, simplebus, mvneta_fdt_driver, mvneta_fdt_devclass, 0, 0);
+
+static int mvneta_fdt_phy_acquire(device_t);
+
+static int
+mvneta_fdt_probe(device_t dev)
+{
+
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ if (!ofw_bus_is_compatible(dev, "marvell,armada-370-neta"))
+ return (ENXIO);
+
+ device_set_desc(dev, "NETA controller");
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+mvneta_fdt_attach(device_t dev)
+{
+ int err;
+
+ /* Try to fetch PHY information from FDT */
+ err = mvneta_fdt_phy_acquire(dev);
+ if (err != 0)
+ return (err);
+
+ return (mvneta_attach(dev));
+}
+
+static int
+mvneta_fdt_phy_acquire(device_t dev)
+{
+ struct mvneta_softc *sc;
+ phandle_t node, child, phy_handle;
+ char phymode[PHY_MODE_MAXLEN];
+ char managed[INBAND_STATUS_MAXLEN];
+ char *name;
+
+ sc = device_get_softc(dev);
+ node = ofw_bus_get_node(dev);
+
+ /* PHY mode is crucial */
+ if (OF_getprop(node, "phy-mode", phymode, sizeof(phymode)) <= 0) {
+ device_printf(dev, "Failed to acquire PHY mode from FDT.\n");
+ return (ENXIO);
+ }
+
+ if (strncmp(phymode, "rgmii-id", 8) == 0)
+ sc->phy_mode = MVNETA_PHY_RGMII_ID;
+ else if (strncmp(phymode, "rgmii", 5) == 0)
+ sc->phy_mode = MVNETA_PHY_RGMII;
+ else if (strncmp(phymode, "sgmii", 5) == 0)
+ sc->phy_mode = MVNETA_PHY_SGMII;
+ else if (strncmp(phymode, "qsgmii", 6) == 0)
+ sc->phy_mode = MVNETA_PHY_QSGMII;
+ else
+ sc->phy_mode = MVNETA_PHY_SGMII;
+
+ /* Check if in-band link status will be used */
+ if (OF_getprop(node, "managed", managed, sizeof(managed)) > 0) {
+ if (strncmp(managed, "in-band-status", 14) == 0) {
+ sc->use_inband_status = TRUE;
+ device_printf(dev, "Use in-band link status.\n");
+ return (0);
+ }
+ }
+
+ if (OF_getencprop(node, "phy", (void *)&phy_handle,
+ sizeof(phy_handle)) <= 0) {
+ /* Test for fixed-link (present i.e. in 388-gp) */
+ for (child = OF_child(node); child != 0; child = OF_peer(child)) {
+ if (OF_getprop_alloc(child,
+ "name", 1, (void **)&name) <= 0) {
+ continue;
+ }
+ if (strncmp(name, "fixed-link", 10) == 0) {
+ free(name, M_OFWPROP);
+ if (OF_getencprop(child, "speed",
+ &sc->phy_speed, sizeof(sc->phy_speed)) <= 0) {
+ if (bootverbose) {
+ device_printf(dev,
+ "No PHY information.\n");
+ }
+ return (ENXIO);
+ }
+ if (OF_hasprop(child, "full-duplex"))
+ sc->phy_fdx = TRUE;
+ else
+ sc->phy_fdx = FALSE;
+
+ /* Keep this flag just for the record */
+ sc->phy_addr = MII_PHY_ANY;
+
+ return (0);
+ }
+ free(name, M_OFWPROP);
+ }
+ if (bootverbose) {
+ device_printf(dev,
+ "Could not find PHY information in FDT.\n");
+ }
+ return (ENXIO);
+ } else {
+ phy_handle = OF_instance_to_package(phy_handle);
+ if (OF_getencprop(phy_handle, "reg", &sc->phy_addr,
+ sizeof(sc->phy_addr)) <= 0) {
+ device_printf(dev,
+ "Could not find PHY address in FDT.\n");
+ return (ENXIO);
+ }
+ }
+
+ return (0);
+}
+
+int
+mvneta_fdt_mac_address(struct mvneta_softc *sc, uint8_t *addr)
+{
+ phandle_t node;
+ uint8_t lmac[ETHER_ADDR_LEN];
+ uint8_t zeromac[] = {[0 ... (ETHER_ADDR_LEN - 1)] = 0};
+ int len;
+
+ /*
+ * Retrieve hw address from the device tree.
+ */
+ node = ofw_bus_get_node(sc->dev);
+ if (node == 0)
+ return (ENXIO);
+
+ len = OF_getprop(node, "local-mac-address", (void *)lmac, sizeof(lmac));
+ if (len != ETHER_ADDR_LEN)
+ return (ENOENT);
+
+ if (memcmp(lmac, zeromac, ETHER_ADDR_LEN) == 0) {
+ /* Invalid MAC address (all zeros) */
+ return (EINVAL);
+ }
+ memcpy(addr, lmac, ETHER_ADDR_LEN);
+
+ return (0);
+}
diff --git a/sys/dev/neta/if_mvnetareg.h b/sys/dev/neta/if_mvnetareg.h
new file mode 100644
index 0000000..ccc4644
--- /dev/null
+++ b/sys/dev/neta/if_mvnetareg.h
@@ -0,0 +1,926 @@
+/*
+ * Copyright (c) 2017 Stormshield.
+ * Copyright (c) 2017 Semihalf.
+ * Copyright (c) 2015 Internet Initiative Japan Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ *
+ */
+
+#ifndef _IF_MVNETAREG_H_
+#define _IF_MVNETAREG_H_
+
+#if BYTE_ORDER == BIG_ENDIAN
+#error "BIG ENDIAN not supported"
+#endif
+
+#define MVNETA_SIZE 0x4000
+
+#define MVNETA_NWINDOW 6
+#define MVNETA_NREMAP 4
+
+#define MVNETA_MAX_QUEUE_SIZE 8
+#define MVNETA_RX_QNUM_MAX 1
+/* XXX: Currently multi-queue can be used on the Tx side only */
+#ifdef MVNETA_MULTIQUEUE
+#define MVNETA_TX_QNUM_MAX 2
+#else
+#define MVNETA_TX_QNUM_MAX 1
+#endif
+
+#if MVNETA_TX_QNUM_MAX & (MVNETA_TX_QNUM_MAX - 1) != 0
+#error "MVNETA_TX_QNUM_MAX Should be a power of 2"
+#endif
+#if MVNETA_RX_QNUM_MAX & (MVNETA_RX_QNUM_MAX - 1) != 0
+#error "MVNETA_RX_QNUM_MAX Should be a power of 2"
+#endif
+
+#define MVNETA_QUEUE(n) (1 << (n))
+#define MVNETA_QUEUE_ALL 0xff
+#define MVNETA_TX_QUEUE_ALL ((1<<MVNETA_TX_QNUM_MAX)-1)
+#define MVNETA_RX_QUEUE_ALL ((1<<MVNETA_RX_QNUM_MAX)-1)
+
+/*
+ * Ethernet Unit Registers
+ * GbE0 BASE 0x00007.0000 SIZE 0x4000
+ * GbE1 BASE 0x00007.4000 SIZE 0x4000
+ *
+ * TBD: reasonable bus space submapping....
+ */
+/* Address Decoder Registers */
+#define MVNETA_BASEADDR(n) (0x2200 + ((n) << 3)) /* Base Address */
+#define MVNETA_S(n) (0x2204 + ((n) << 3)) /* Size */
+#define MVNETA_HA(n) (0x2280 + ((n) << 2)) /* High Address Remap */
+#define MVNETA_BARE 0x2290 /* Base Address Enable */
+#define MVNETA_EPAP 0x2294 /* Ethernet Port Access Protect */
+
+/* Global Miscellaneous Registers */
+#define MVNETA_PHYADDR 0x2000
+#define MVNETA_SMI 0x2004
+#define MVNETA_EUDA 0x2008 /* Ethernet Unit Default Address */
+#define MVNETA_EUDID 0x200c /* Ethernet Unit Default ID */
+#define MVNETA_MBUS_CONF 0x2010 /* MBUS configuration */
+#define MVNETA_MBUS_RETRY_EN 0x20 /* MBUS transactions retry enable */
+#define MVNETA_EUIC 0x2080 /* Ethernet Unit Interrupt Cause */
+#define MVNETA_EUIM 0x2084 /* Ethernet Unit Interrupt Mask */
+#define MVNETA_EUEA 0x2094 /* Ethernet Unit Error Address */
+#define MVNETA_EUIAE 0x2098 /* Ethernet Unit Internal Addr Error */
+#define MVNETA_EUC 0x20b0 /* Ethernet Unit Control */
+
+/* Miscellaneous Registers */
+#define MVNETA_SDC 0x241c /* SDMA Configuration */
+
+/* Networking Controller Miscellaneous Registers */
+#define MVNETA_PACC 0x2500 /* Port Acceleration Mode */
+#define MVNETA_PV 0x25bc /* Port Version */
+
+/* Rx DMA Hardware Parser Registers */
+#define MVNETA_EVLANE 0x2410 /* VLAN EtherType */
+#define MVNETA_MACAL 0x2414 /* MAC Address Low */
+#define MVNETA_MACAH 0x2418 /* MAC Address High */
+#define MVNETA_NDSCP 7
+#define MVNETA_DSCP(n) (0x2420 + ((n) << 2))
+#define MVNETA_VPT2P 0x2440 /* VLAN Priority Tag to Priority */
+#define MVNETA_ETP 0x24bc /* Ethernet Type Priority */
+#define MVNETA_NDFSMT 64
+#define MVNETA_DFSMT(n) (0x3400 + ((n) << 2))
+ /* Destination Address Filter Special Multicast Table */
+#define MVNETA_NDFOMT 64
+#define MVNETA_DFOMT(n) (0x3500 + ((n) << 2))
+ /* Destination Address Filter Other Multicast Table */
+#define MVNETA_NDFUT 4
+#define MVNETA_DFUT(n) (0x3600 + ((n) << 2))
+ /* Destination Address Filter Unicast Table */
+
+/* Rx DMA Miscellaneous Registers */
+#define MVNETA_PMFS 0x247c /* Port Rx Minimal Frame Size */
+#define MVNETA_PDFC 0x2484 /* Port Rx Discard Frame Counter */
+#define MVNETA_POFC 0x2488 /* Port Overrun Frame Counter */
+#define MVNETA_RQC 0x2680 /* Receive Queue Command */
+
+/* Rx DMA Networking Controller Miscellaneous Registers */
+#define MVNETA_PRXC(q) (0x1400 + ((q) << 2)) /*Port RX queues Config*/
+#define MVNETA_PRXSNP(q) (0x1420 + ((q) << 2)) /* Port RX queues Snoop */
+#define MVNETA_PRXDQA(q) (0x1480 + ((q) << 2)) /*P RXqueues desc Q Addr*/
+#define MVNETA_PRXDQS(q) (0x14a0 + ((q) << 2)) /*P RXqueues desc Q Size*/
+#define MVNETA_PRXDQTH(q) (0x14c0 + ((q) << 2)) /*P RXqueues desc Q Thrs*/
+#define MVNETA_PRXS(q) (0x14e0 + ((q) << 2)) /*Port RX queues Status */
+#define MVNETA_PRXSU(q) (0x1500 + ((q) << 2)) /*P RXqueues Stat Update*/
+#define MVNETA_PRXDI(q) (0x1520 + ((q) << 2)) /*P RXqueues Stat Update*/
+#define MVNETA_PRXINIT 0x1cc0 /* Port RX Initialization */
+
+/* Rx DMA Wake on LAN Registers 0x3690 - 0x36b8 */
+
+/* Tx DMA Miscellaneous Registers */
+#define MVNETA_TQC 0x2448 /* Transmit Queue Command */
+#define MVNETA_TQC_1 0x24e4
+#define MVNETA_PXTFTT 0x2478 /* Port Tx FIFO Threshold */
+#define MVNETA_TXBADFCS 0x3cc0 /*Tx Bad FCS Transmitted Pckts Counter*/
+#define MVNETA_TXDROPPED 0x3cc4 /* Tx Dropped Packets Counter */
+
+/* Tx DMA Networking Controller Miscellaneous Registers */
+#define MVNETA_PTXDQA(q) (0x3c00 + ((q) << 2)) /*P TXqueues desc Q Addr*/
+#define MVNETA_PTXDQS(q) (0x3c20 + ((q) << 2)) /*P TXqueues desc Q Size*/
+#define MVNETA_PTXS(q) (0x3c40 + ((q) << 2)) /* Port TX queues Status*/
+#define MVNETA_PTXSU(q) (0x3c60 + ((q) << 2)) /*P TXqueues Stat Update*/
+#define MVNETA_PTXDI(q) (0x3c80 + ((q) << 2)) /* P TXqueues Desc Index*/
+#define MVNETA_TXTBC(q) (0x3ca0 + ((q) << 2)) /* TX Trans-ed Buf Count*/
+#define MVNETA_PTXINIT 0x3cf0 /* Port TX Initialization */
+
+/* Tx DMA Packet Modification Registers */
+#define MVNETA_NMH 15
+#define MVNETA_TXMH(n) (0x3d44 + ((n) << 2))
+#define MVNETA_TXMTU 0x3d88
+
+/* Tx DMA Queue Arbiter Registers (Version 1) */
+#define MVNETA_TQFPC_V1 0x24dc /* Transmit Queue Fixed Priority Cfg */
+#define MVNETA_TQTBC_V1 0x24e0 /* Transmit Queue Token-Bucket Cfg */
+#define MVNETA_MTU_V1 0x24e8 /* MTU */
+#define MVNETA_PMTBS_V1 0x24ec /* Port Max Token-Bucket Size */
+#define MVNETA_TQTBCOUNT_V1(q) (0x2700 + ((q) << 4))
+ /* Transmit Queue Token-Bucket Counter */
+#define MVNETA_TQTBCONFIG_V1(q) (0x2704 + ((q) << 4))
+ /* Transmit Queue Token-Bucket Configuration */
+#define MVNETA_PTTBC_V1 0x2740 /* Port Transmit Backet Counter */
+
+/* Tx DMA Queue Arbiter Registers (Version 3) */
+#define MVNETA_TQC1_V3 0x3e00 /* Transmit Queue Command1 */
+#define MVNETA_TQFPC_V3 0x3e04 /* Transmit Queue Fixed Priority Cfg */
+#define MVNETA_BRC_V3 0x3e08 /* Basic Refill No of Clocks */
+#define MVNETA_MTU_V3 0x3e0c /* MTU */
+#define MVNETA_PREFILL_V3 0x3e10 /* Port Backet Refill */
+#define MVNETA_PMTBS_V3 0x3e14 /* Port Max Token-Bucket Size */
+#define MVNETA_QREFILL_V3(q) (0x3e20 + ((q) << 2))
+ /* Transmit Queue Refill */
+#define MVNETA_QMTBS_V3(q) (0x3e40 + ((q) << 2))
+ /* Transmit Queue Max Token-Bucket Size */
+#define MVNETA_QTTBC_V3(q) (0x3e60 + ((q) << 2))
+ /* Transmit Queue Token-Bucket Counter */
+#define MVNETA_TQAC_V3(q) (0x3e80 + ((q) << 2))
+ /* Transmit Queue Arbiter Cfg */
+#define MVNETA_TQIPG_V3(q) (0x3ea0 + ((q) << 2))
+ /* Transmit Queue IPG(valid q=2..3) */
+#define MVNETA_HITKNINLOPKT_V3 0x3eb0 /* High Token in Low Packet */
+#define MVNETA_HITKNINASYNCPKT_V3 0x3eb4 /* High Token in Async Packet */
+#define MVNETA_LOTKNINASYNCPKT_V3 0x3eb8 /* Low Token in Async Packet */
+#define MVNETA_TS_V3 0x3ebc /* Token Speed */
+
+/* RX_TX DMA Registers */
+#define MVNETA_PXC 0x2400 /* Port Configuration */
+#define MVNETA_PXCX 0x2404 /* Port Configuration Extend */
+#define MVNETA_MH 0x2454 /* Marvell Header */
+
+/* Serial(SMI/MII) Registers */
+#define MVNETA_PSC0 0x243c /* Port Serial Control0 */
+#define MVNETA_PS0 0x2444 /* Ethernet Port Status */
+#define MVNETA_PSERDESCFG 0x24a0 /* Serdes Configuration */
+#define MVNETA_PSERDESSTS 0x24a4 /* Serdes Status */
+#define MVNETA_PSOMSCD 0x24f4 /* One mS Clock Divider */
+#define MVNETA_PSPFCCD 0x24f8 /* Periodic Flow Control Clock Divider*/
+
+/* Gigabit Ethernet MAC Serial Parameters Configuration Registers */
+#define MVNETA_PSPC 0x2c14 /* Port Serial Parameters Config */
+#define MVNETA_PSP1C 0x2c94 /* Port Serial Parameters 1 Config */
+
+/* Gigabit Ethernet Auto-Negotiation Configuration Registers */
+#define MVNETA_PANC 0x2c0c /* Port Auto-Negotiation Configuration*/
+
+/* Gigabit Ethernet MAC Control Registers */
+#define MVNETA_PMACC0 0x2c00 /* Port MAC Control 0 */
+#define MVNETA_PMACC1 0x2c04 /* Port MAC Control 1 */
+#define MVNETA_PMACC2 0x2c08 /* Port MAC Control 2 */
+#define MVNETA_PMACC3 0x2c48 /* Port MAC Control 3 */
+#define MVNETA_CCFCPST(p) (0x2c58 + ((p) << 2)) /*CCFC Port Speed Timerp*/
+#define MVNETA_PMACC4 0x2c90 /* Port MAC Control 4 */
+
+/* Gigabit Ethernet MAC Interrupt Registers */
+#define MVNETA_PIC 0x2c20
+#define MVNETA_PIM 0x2c24
+
+/* Gigabit Ethernet Low Power Idle Registers */
+#define MVNETA_LPIC0 0x2cc0 /* LowPowerIdle control 0 */
+#define MVNETA_LPIC1 0x2cc4 /* LPI control 1 */
+#define MVNETA_LPIC2 0x2cc8 /* LPI control 2 */
+#define MVNETA_LPIS 0x2ccc /* LPI status */
+#define MVNETA_LPIC 0x2cd0 /* LPI counter */
+
+/* Gigabit Ethernet MAC PRBS Check Status Registers */
+#define MVNETA_PPRBSS 0x2c38 /* Port PRBS Status */
+#define MVNETA_PPRBSEC 0x2c3c /* Port PRBS Error Counter */
+
+/* Gigabit Ethernet MAC Status Registers */
+#define MVNETA_PSR 0x2c10 /* Port Status Register0 */
+
+/* Networking Controller Interrupt Registers */
+#define MVNETA_PCP2Q(cpu) (0x2540 + ((cpu) << 2))
+#define MVNETA_PRXITTH(q) (0x2580 + ((q) << 2))
+ /* Port Rx Interrupt Threshold */
+#define MVNETA_PRXTXTIC 0x25a0 /*Port RX_TX Threshold Interrupt Cause*/
+#define MVNETA_PRXTXTIM 0x25a4 /*Port RX_TX Threshold Interrupt Mask */
+#define MVNETA_PRXTXIC 0x25a8 /* Port RX_TX Interrupt Cause */
+#define MVNETA_PRXTXIM 0x25ac /* Port RX_TX Interrupt Mask */
+#define MVNETA_PMIC 0x25b0 /* Port Misc Interrupt Cause */
+#define MVNETA_PMIM 0x25b4 /* Port Misc Interrupt Mask */
+#define MVNETA_PIE 0x25b8 /* Port Interrupt Enable */
+#define MVNETA_PSNPCFG 0x25e4 /* Port Snoop Config */
+#define MVNETA_PSNPCFG_DESCSNP_MASK (0x3 << 4)
+#define MVNETA_PSNPCFG_BUFSNP_MASK (0x3 << 8)
+
+/* Miscellaneous Interrupt Registers */
+#define MVNETA_PEUIAE 0x2494 /* Port Internal Address Error */
+
+/* SGMII PHY Registers */
+#define MVNETA_PPLLC 0x2e04 /* Power and PLL Control */
+#define MVNETA_TESTC0 0x2e54 /* PHY Test Control 0 */
+#define MVNETA_TESTPRBSEC0 0x2e7c /* PHY Test PRBS Error Counter 0 */
+#define MVNETA_TESTPRBSEC1 0x2e80 /* PHY Test PRBS Error Counter 1 */
+#define MVNETA_TESTOOB0 0x2e84 /* PHY Test OOB 0 */
+#define MVNETA_DLE 0x2e8c /* Digital Loopback Enable */
+#define MVNETA_RCS 0x2f18 /* Reference Clock Select */
+#define MVNETA_COMPHYC 0x2f18 /* COMPHY Control */
+
+/*
+ * Ethernet MAC MIB Registers
+ * GbE0 BASE 0x00007.3000
+ * GbE1 BASE 0x00007.7000
+ */
+/* MAC MIB Counters 0x3000 - 0x307c */
+#define MVNETA_PORTMIB_BASE 0x3000
+#define MVNETA_PORTMIB_SIZE 0x0080
+#define MVNETA_PORTMIB_NOCOUNTER 30
+
+/* Rx */
+#define MVNETA_MIB_RX_GOOD_OCT 0x00 /* 64bit */
+#define MVNETA_MIB_RX_BAD_OCT 0x08
+#define MVNETA_MIB_RX_GOOD_FRAME 0x10
+#define MVNETA_MIB_RX_BAD_FRAME 0x14
+#define MVNETA_MIB_RX_BCAST_FRAME 0x18
+#define MVNETA_MIB_RX_MCAST_FRAME 0x1c
+#define MVNETA_MIB_RX_FRAME64_OCT 0x20
+#define MVNETA_MIB_RX_FRAME127_OCT 0x24
+#define MVNETA_MIB_RX_FRAME255_OCT 0x28
+#define MVNETA_MIB_RX_FRAME511_OCT 0x2c
+#define MVNETA_MIB_RX_FRAME1023_OCT 0x30
+#define MVNETA_MIB_RX_FRAMEMAX_OCT 0x34
+
+/* Tx */
+#define MVNETA_MIB_TX_MAC_TRNS_ERR 0x0c
+#define MVNETA_MIB_TX_GOOD_OCT 0x38 /* 64bit */
+#define MVNETA_MIB_TX_GOOD_FRAME 0x40
+#define MVNETA_MIB_TX_EXCES_COL 0x44
+#define MVNETA_MIB_TX_MCAST_FRAME 0x48
+#define MVNETA_MIB_TX_BCAST_FRAME 0x4c
+#define MVNETA_MIB_TX_MAC_CTL_ERR 0x50
+
+/* Flow Control */
+#define MVNETA_MIB_FC_SENT 0x54
+#define MVNETA_MIB_FC_GOOD 0x58
+#define MVNETA_MIB_FC_BAD 0x5c
+
+/* Packet Processing */
+#define MVNETA_MIB_PKT_UNDERSIZE 0x60
+#define MVNETA_MIB_PKT_FRAGMENT 0x64
+#define MVNETA_MIB_PKT_OVERSIZE 0x68
+#define MVNETA_MIB_PKT_JABBER 0x6c
+
+/* MAC Layer Errors */
+#define MVNETA_MIB_MAC_RX_ERR 0x70
+#define MVNETA_MIB_MAC_CRC_ERR 0x74
+#define MVNETA_MIB_MAC_COL 0x78
+#define MVNETA_MIB_MAC_LATE_COL 0x7c
+
+/* END OF REGISTER NUMBERS */
+
+/*
+ *
+ * Register Formats
+ *
+ */
+/*
+ * Address Decoder Registers
+ */
+/* Base Address (MVNETA_BASEADDR) */
+#define MVNETA_BASEADDR_TARGET(target) ((target) & 0xf)
+#define MVNETA_BASEADDR_ATTR(attr) (((attr) & 0xff) << 8)
+#define MVNETA_BASEADDR_BASE(base) ((base) & 0xffff0000)
+
+/* Size (MVNETA_S) */
+#define MVNETA_S_SIZE(size) (((size) - 1) & 0xffff0000)
+
+/* Base Address Enable (MVNETA_BARE) */
+#define MVNETA_BARE_EN_MASK ((1 << MVNETA_NWINDOW) - 1)
+#define MVNETA_BARE_EN(win) ((1 << (win)) & MVNETA_BARE_EN_MASK)
+
+/* Ethernet Port Access Protect (MVNETA_EPAP) */
+#define MVNETA_EPAP_AC_NAC 0x0 /* No access allowed */
+#define MVNETA_EPAP_AC_RO 0x1 /* Read Only */
+#define MVNETA_EPAP_AC_FA 0x3 /* Full access (r/w) */
+#define MVNETA_EPAP_EPAR(win, ac) ((ac) << ((win) * 2))
+
+/*
+ * Global Miscellaneous Registers
+ */
+/* PHY Address (MVNETA_PHYADDR) */
+#define MVNETA_PHYADDR_PHYAD(phy) ((phy) & 0x1f)
+#define MVNETA_PHYADDR_GET_PHYAD(reg) ((reg) & 0x1f)
+
+/* SMI register fields (MVNETA_SMI) */
+#define MVNETA_SMI_DATA_MASK 0x0000ffff
+#define MVNETA_SMI_PHYAD(phy) (((phy) & 0x1f) << 16)
+#define MVNETA_SMI_REGAD(reg) (((reg) & 0x1f) << 21)
+#define MVNETA_SMI_OPCODE_WRITE (0 << 26)
+#define MVNETA_SMI_OPCODE_READ (1 << 26)
+#define MVNETA_SMI_READVALID (1 << 27)
+#define MVNETA_SMI_BUSY (1 << 28)
+
+/* Ethernet Unit Default ID (MVNETA_EUDID) */
+#define MVNETA_EUDID_DIDR_MASK 0x0000000f
+#define MVNETA_EUDID_DIDR(id) ((id) & 0x0f)
+#define MVNETA_EUDID_DATTR_MASK 0x00000ff0
+#define MVNETA_EUDID_DATTR(attr) (((attr) & 0xff) << 4)
+
+/* Ethernet Unit Interrupt Cause (MVNETA_EUIC) */
+#define MVNETA_EUIC_ETHERINTSUM (1 << 0)
+#define MVNETA_EUIC_PARITY (1 << 1)
+#define MVNETA_EUIC_ADDRVIOL (1 << 2)
+#define MVNETA_EUIC_ADDRVNOMATCH (1 << 3)
+#define MVNETA_EUIC_SMIDONE (1 << 4)
+#define MVNETA_EUIC_COUNTWA (1 << 5)
+#define MVNETA_EUIC_INTADDRERR (1 << 7)
+#define MVNETA_EUIC_PORT0DPERR (1 << 9)
+#define MVNETA_EUIC_TOPDPERR (1 << 12)
+
+/* Ethernet Unit Internal Addr Error (MVNETA_EUIAE) */
+#define MVNETA_EUIAE_INTADDR_MASK 0x000001ff
+#define MVNETA_EUIAE_INTADDR(addr) ((addr) & 0x1ff)
+#define MVNETA_EUIAE_GET_INTADDR(addr) ((addr) & 0x1ff)
+
+/* Ethernet Unit Control (MVNETA_EUC) */
+#define MVNETA_EUC_POLLING (1 << 1)
+#define MVNETA_EUC_PORTRESET (1 << 24)
+#define MVNETA_EUC_RAMSINITIALIZATIONCOMPLETED (1 << 25)
+
+/*
+ * Miscellaneous Registers
+ */
+/* SDMA Configuration (MVNETA_SDC) */
+#define MVNETA_SDC_RXBSZ(x) ((x) << 1)
+#define MVNETA_SDC_RXBSZ_MASK MVNETA_SDC_RXBSZ(7)
+#define MVNETA_SDC_RXBSZ_1_64BITWORDS MVNETA_SDC_RXBSZ(0)
+#define MVNETA_SDC_RXBSZ_2_64BITWORDS MVNETA_SDC_RXBSZ(1)
+#define MVNETA_SDC_RXBSZ_4_64BITWORDS MVNETA_SDC_RXBSZ(2)
+#define MVNETA_SDC_RXBSZ_8_64BITWORDS MVNETA_SDC_RXBSZ(3)
+#define MVNETA_SDC_RXBSZ_16_64BITWORDS MVNETA_SDC_RXBSZ(4)
+#define MVNETA_SDC_BLMR (1 << 4)
+#define MVNETA_SDC_BLMT (1 << 5)
+#define MVNETA_SDC_SWAPMODE (1 << 6)
+#define MVNETA_SDC_TXBSZ(x) ((x) << 22)
+#define MVNETA_SDC_TXBSZ_MASK MVNETA_SDC_TXBSZ(7)
+#define MVNETA_SDC_TXBSZ_1_64BITWORDS MVNETA_SDC_TXBSZ(0)
+#define MVNETA_SDC_TXBSZ_2_64BITWORDS MVNETA_SDC_TXBSZ(1)
+#define MVNETA_SDC_TXBSZ_4_64BITWORDS MVNETA_SDC_TXBSZ(2)
+#define MVNETA_SDC_TXBSZ_8_64BITWORDS MVNETA_SDC_TXBSZ(3)
+#define MVNETA_SDC_TXBSZ_16_64BITWORDS MVNETA_SDC_TXBSZ(4)
+
+/*
+ * Networking Controller Miscellaneous Registers
+ */
+/* Port Acceleration Mode (MVNETA_PACC) */
+#define MVNETA_PACC_ACCELERATIONMODE_MASK 0x7
+#define MVNETA_PACC_ACCELERATIONMODE_EDM 0x1 /* Enhanced Desc Mode */
+
+/* Port Version (MVNETA_PV) */
+#define MVNETA_PV_VERSION_MASK 0xff
+#define MVNETA_PV_VERSION(v) ((v) & 0xff)
+#define MVNETA_PV_GET_VERSION(reg) ((reg) & 0xff)
+
+/*
+ * Rx DMA Hardware Parser Registers
+ */
+/* Ether Type Priority (MVNETA_ETP) */
+#define MVNETA_ETP_ETHERTYPEPRIEN (1 << 0) /* EtherType Prio Ena */
+#define MVNETA_ETP_ETHERTYPEPRIFRSTEN (1 << 1)
+#define MVNETA_ETP_ETHERTYPEPRIQ (0x7 << 2) /*EtherType Prio Queue*/
+#define MVNETA_ETP_ETHERTYPEPRIVAL (0xffff << 5) /*EtherType Prio Value*/
+#define MVNETA_ETP_FORCEUNICSTHIT (1 << 21) /* Force Unicast hit */
+
+/* Destination Address Filter Registers (MVNETA_DF{SM,OM,U}T) */
+#define MVNETA_DF(n, x) ((x) << (8 * (n)))
+#define MVNETA_DF_PASS (1 << 0)
+#define MVNETA_DF_QUEUE(q) ((q) << 1)
+#define MVNETA_DF_QUEUE_ALL ((MVNETA_RX_QNUM_MAX-1) << 1)
+#define MVNETA_DF_QUEUE_MASK ((MVNETA_RX_QNUM_MAX-1) << 1)
+
+/*
+ * Rx DMA Miscellaneous Registers
+ */
+/* Port Rx Minimal Frame Size (MVNETA_PMFS) */
+#define MVNETA_PMFS_RXMFS(rxmfs) (((rxmfs) - 40) & 0x7c)
+
+/* Receive Queue Command (MVNETA_RQC) */
+#define MVNETA_RQC_EN_MASK (0xff << 0) /* Enable Q */
+#define MVNETA_RQC_ENQ(q) (1 << (0 + (q)))
+#define MVNETA_RQC_EN(n) ((n) << 0)
+#define MVNETA_RQC_DIS_MASK (0xff << 8) /* Disable Q */
+#define MVNETA_RQC_DISQ(q) (1 << (8 + (q)))
+#define MVNETA_RQC_DIS(n) ((n) << 8)
+
+/*
+ * Rx DMA Networking Controller Miscellaneous Registers
+ */
+/* Port RX queues Configuration (MVNETA_PRXC) */
+#define MVNETA_PRXC_PACKETOFFSET(o) (((o) & 0xf) << 8)
+
+/* Port RX queues Snoop (MVNETA_PRXSNP) */
+#define MVNETA_PRXSNP_SNOOPNOOFBYTES(b) (((b) & 0x3fff) << 0)
+#define MVNETA_PRXSNP_L2DEPOSITNOOFBYTES(b) (((b) & 0x3fff) << 16)
+
+/* Port RX queues Descriptors Queue Size (MVNETA_PRXDQS) */
+#define MVNETA_PRXDQS_DESCRIPTORSQUEUESIZE(s) (((s) & 0x3fff) << 0)
+#define MVNETA_PRXDQS_BUFFERSIZE(s) (((s) & 0x1fff) << 19)
+
+/* Port RX queues Descriptors Queue Threshold (MVNETA_PRXDQTH) */
+ /* Occupied Descriptors Threshold */
+#define MVNETA_PRXDQTH_ODT(x) (((x) & 0x3fff) << 0)
+ /* Non Occupied Descriptors Threshold */
+#define MVNETA_PRXDQTH_NODT(x) (((x) & 0x3fff) << 16)
+
+/* Port RX queues Status (MVNETA_PRXS) */
+ /* Occupied Descriptors Counter */
+#define MVNETA_PRXS_ODC(x) (((x) & 0x3fff) << 0)
+ /* Non Occupied Descriptors Counter */
+#define MVNETA_PRXS_NODC(x) (((x) & 0x3fff) << 16)
+#define MVNETA_PRXS_GET_ODC(reg) (((reg) >> 0) & 0x3fff)
+#define MVNETA_PRXS_GET_NODC(reg) (((reg) >> 16) & 0x3fff)
+
+/* Port RX queues Status Update (MVNETA_PRXSU) */
+#define MVNETA_PRXSU_NOOFPROCESSEDDESCRIPTORS(x) (((x) & 0xff) << 0)
+#define MVNETA_PRXSU_NOOFNEWDESCRIPTORS(x) (((x) & 0xff) << 16)
+
+/* Port RX Initialization (MVNETA_PRXINIT) */
+#define MVNETA_PRXINIT_RXDMAINIT (1 << 0)
+
+/*
+ * Tx DMA Miscellaneous Registers
+ */
+/* Transmit Queue Command (MVNETA_TQC) */
+#define MVNETA_TQC_EN_MASK (0xff << 0)
+#define MVNETA_TQC_ENQ(q) (1 << ((q) + 0))/* Enable Q */
+#define MVNETA_TQC_EN(n) ((n) << 0)
+#define MVNETA_TQC_DIS_MASK (0xff << 8)
+#define MVNETA_TQC_DISQ(q) (1 << ((q) + 8))/* Disable Q */
+#define MVNETA_TQC_DIS(n) ((n) << 8)
+
+/*
+ * Tx DMA Networking Controller Miscellaneous Registers
+ */
+/* Port TX queues Descriptors Queue Size (MVNETA_PTXDQS) */
+ /* Descriptors Queue Size */
+#define MVNETA_PTXDQS_DQS_MASK (0x3fff << 0)
+#define MVNETA_PTXDQS_DQS(x) (((x) & 0x3fff) << 0)
+ /* Transmitted Buffer Threshold */
+#define MVNETA_PTXDQS_TBT_MASK (0x3fff << 16)
+#define MVNETA_PTXDQS_TBT(x) (((x) & 0x3fff) << 16)
+
+/* Port TX queues Status (MVNETA_PTXS) */
+ /* Transmitted Buffer Counter */
+#define MVNETA_PTXS_TBC(x) (((x) & 0x3fff) << 16)
+
+#define MVNETA_PTXS_GET_TBC(reg) (((reg) >> 16) & 0x3fff)
+ /* Pending Descriptors Counter */
+#define MVNETA_PTXS_PDC(x) ((x) & 0x3fff)
+#define MVNETA_PTXS_GET_PDC(x) ((x) & 0x3fff)
+
+/* Port TX queues Status Update (MVNETA_PTXSU) */
+ /* Number Of Written Descriptors */
+#define MVNETA_PTXSU_NOWD(x) (((x) & 0xff) << 0)
+ /* Number Of Released Buffers */
+#define MVNETA_PTXSU_NORB(x) (((x) & 0xff) << 16)
+
+/* TX Transmitted Buffers Counter (MVNETA_TXTBC) */
+ /* Transmitted Buffers Counter */
+#define MVNETA_TXTBC_TBC(x) (((x) & 0x3fff) << 16)
+
+/* Port TX Initialization (MVNETA_PTXINIT) */
+#define MVNETA_PTXINIT_TXDMAINIT (1 << 0)
+
+/*
+ * Tx DMA Queue Arbiter Registers (Version 1 )
+ */
+/* Transmit Queue Fixed Priority Configuration */
+#define MVNETA_TQFPC_EN(q) (1 << (q))
+
+/*
+ * RX_TX DMA Registers
+ */
+/* Port Configuration (MVNETA_PXC) */
+#define MVNETA_PXC_UPM (1 << 0) /* Uni Promisc mode */
+#define MVNETA_PXC_RXQ(q) ((q) << 1)
+#define MVNETA_PXC_RXQ_MASK MVNETA_PXC_RXQ(7)
+#define MVNETA_PXC_RXQARP(q) ((q) << 4)
+#define MVNETA_PXC_RXQARP_MASK MVNETA_PXC_RXQARP(7)
+#define MVNETA_PXC_RB (1 << 7) /* Rej mode of MAC */
+#define MVNETA_PXC_RBIP (1 << 8)
+#define MVNETA_PXC_RBARP (1 << 9)
+#define MVNETA_PXC_AMNOTXES (1 << 12)
+#define MVNETA_PXC_RBARPF (1 << 13)
+#define MVNETA_PXC_TCPCAPEN (1 << 14)
+#define MVNETA_PXC_UDPCAPEN (1 << 15)
+#define MVNETA_PXC_TCPQ(q) ((q) << 16)
+#define MVNETA_PXC_TCPQ_MASK MVNETA_PXC_TCPQ(7)
+#define MVNETA_PXC_UDPQ(q) ((q) << 19)
+#define MVNETA_PXC_UDPQ_MASK MVNETA_PXC_UDPQ(7)
+#define MVNETA_PXC_BPDUQ(q) ((q) << 22)
+#define MVNETA_PXC_BPDUQ_MASK MVNETA_PXC_BPDUQ(7)
+#define MVNETA_PXC_RXCS (1 << 25)
+
+/* Port Configuration Extend (MVNETA_PXCX) */
+#define MVNETA_PXCX_SPAN (1 << 1)
+#define MVNETA_PXCX_TXCRCDIS (1 << 3)
+
+/* Marvell Header (MVNETA_MH) */
+#define MVNETA_MH_MHEN (1 << 0)
+#define MVNETA_MH_DAPREFIX (0x3 << 1)
+#define MVNETA_MH_SPID (0xf << 4)
+#define MVNETA_MH_MHMASK (0x3 << 8)
+#define MVNETA_MH_MHMASK_8QUEUES (0x0 << 8)
+#define MVNETA_MH_MHMASK_4QUEUES (0x1 << 8)
+#define MVNETA_MH_MHMASK_2QUEUES (0x3 << 8)
+#define MVNETA_MH_DSAEN_MASK (0x3 << 10)
+#define MVNETA_MH_DSAEN_DISABLE (0x0 << 10)
+#define MVNETA_MH_DSAEN_NONEXTENDED (0x1 << 10)
+#define MVNETA_MH_DSAEN_EXTENDED (0x2 << 10)
+
+/*
+ * Serial(SMI/MII) Registers
+ */
+#define MVNETA_PSOMSCD_ENABLE (1UL<<31)
+#define MVNETA_PSERDESCFG_QSGMII (0x0667)
+#define MVNETA_PSERDESCFG_SGMII (0x0cc7)
+/* Port Seiral Control0 (MVNETA_PSC0) */
+#define MVNETA_PSC0_FORCE_FC_MASK (0x3 << 5)
+#define MVNETA_PSC0_FORCE_FC(fc) (((fc) & 0x3) << 5)
+#define MVNETA_PSC0_FORCE_FC_PAUSE MVNETA_PSC0_FORCE_FC(0x1)
+#define MVNETA_PSC0_FORCE_FC_NO_PAUSE MVNETA_PSC0_FORCE_FC(0x0)
+#define MVNETA_PSC0_FORCE_BP_MASK (0x3 << 7)
+#define MVNETA_PSC0_FORCE_BP(fc) (((fc) & 0x3) << 5)
+#define MVNETA_PSC0_FORCE_BP_JAM MVNETA_PSC0_FORCE_BP(0x1)
+#define MVNETA_PSC0_FORCE_BP_NO_JAM MVNETA_PSC0_FORCE_BP(0x0)
+#define MVNETA_PSC0_DTE_ADV (1 << 14)
+#define MVNETA_PSC0_IGN_RXERR (1 << 28)
+#define MVNETA_PSC0_IGN_COLLISION (1 << 29)
+#define MVNETA_PSC0_IGN_CARRIER (1 << 30)
+
+/* Ethernet Port Status0 (MVNETA_PS0) */
+#define MVNETA_PS0_TXINPROG (1 << 0)
+#define MVNETA_PS0_TXFIFOEMP (1 << 8)
+#define MVNETA_PS0_RXFIFOEMPTY (1 << 16)
+
+/*
+ * Gigabit Ethernet MAC Serial Parameters Configuration Registers
+ */
+#define MVNETA_PSPC_MUST_SET (1 << 3 | 1 << 4 | 1 << 5 | 0x23 << 6)
+#define MVNETA_PSP1C_MUST_SET (1 << 0 | 1 << 1 | 1 << 2)
+
+/*
+ * Gigabit Ethernet Auto-Negotiation Configuration Registers
+ */
+/* Port Auto-Negotiation Configuration (MVNETA_PANC) */
+#define MVNETA_PANC_FORCELINKFAIL (1 << 0)
+#define MVNETA_PANC_FORCELINKPASS (1 << 1)
+#define MVNETA_PANC_INBANDANEN (1 << 2)
+#define MVNETA_PANC_INBANDANBYPASSEN (1 << 3)
+#define MVNETA_PANC_INBANDRESTARTAN (1 << 4)
+#define MVNETA_PANC_SETMIISPEED (1 << 5)
+#define MVNETA_PANC_SETGMIISPEED (1 << 6)
+#define MVNETA_PANC_ANSPEEDEN (1 << 7)
+#define MVNETA_PANC_SETFCEN (1 << 8)
+#define MVNETA_PANC_PAUSEADV (1 << 9)
+#define MVNETA_PANC_ANFCEN (1 << 11)
+#define MVNETA_PANC_SETFULLDX (1 << 12)
+#define MVNETA_PANC_ANDUPLEXEN (1 << 13)
+#define MVNETA_PANC_MUSTSET (1 << 15)
+
+/*
+ * Gigabit Ethernet MAC Control Registers
+ */
+/* Port MAC Control 0 (MVNETA_PMACC0) */
+#define MVNETA_PMACC0_PORTEN (1 << 0)
+#define MVNETA_PMACC0_PORTTYPE (1 << 1)
+#define MVNETA_PMACC0_FRAMESIZELIMIT(x) ((((x) >> 1) << 2) & 0x7ffc)
+#define MVNETA_PMACC0_FRAMESIZELIMIT_MASK (0x7ffc)
+#define MVNETA_PMACC0_MUSTSET (1 << 15)
+
+/* Port MAC Control 1 (MVNETA_PMACC1) */
+#define MVNETA_PMACC1_PCSLB (1 << 6)
+
+/* Port MAC Control 2 (MVNETA_PMACC2) */
+#define MVNETA_PMACC2_INBANDANMODE (1 << 0)
+#define MVNETA_PMACC2_PCSEN (1 << 3)
+#define MVNETA_PMACC2_PCSEN (1 << 3)
+#define MVNETA_PMACC2_RGMIIEN (1 << 4)
+#define MVNETA_PMACC2_PADDINGDIS (1 << 5)
+#define MVNETA_PMACC2_PORTMACRESET (1 << 6)
+#define MVNETA_PMACC2_PRBSCHECKEN (1 << 10)
+#define MVNETA_PMACC2_PRBSGENEN (1 << 11)
+#define MVNETA_PMACC2_SDTT_MASK (3 << 12) /* Select Data To Transmit */
+#define MVNETA_PMACC2_SDTT_RM (0 << 12) /* Regular Mode */
+#define MVNETA_PMACC2_SDTT_PRBS (1 << 12) /* PRBS Mode */
+#define MVNETA_PMACC2_SDTT_ZC (2 << 12) /* Zero Constant */
+#define MVNETA_PMACC2_SDTT_OC (3 << 12) /* One Constant */
+#define MVNETA_PMACC2_MUSTSET (3 << 14)
+
+/* Port MAC Control 3 (MVNETA_PMACC3) */
+#define MVNETA_PMACC3_IPG_MASK 0x7f80
+
+/*
+ * Gigabit Ethernet MAC Interrupt Registers
+ */
+/* Port Interrupt Cause/Mask (MVNETA_PIC/MVNETA_PIM) */
+#define MVNETA_PI_INTSUM (1 << 0)
+#define MVNETA_PI_LSC (1 << 1) /* LinkStatus Change */
+#define MVNETA_PI_ACOP (1 << 2) /* AnCompleted OnPort */
+#define MVNETA_PI_AOOR (1 << 5) /* AddressOut Of Range */
+#define MVNETA_PI_SSC (1 << 6) /* SyncStatus Change */
+#define MVNETA_PI_PRBSEOP (1 << 7) /* QSGMII PRBS error */
+#define MVNETA_PI_MIBCWA (1 << 15) /* MIB counter wrap around */
+#define MVNETA_PI_QSGMIIPRBSE (1 << 10) /* QSGMII PRBS error */
+#define MVNETA_PI_PCSRXPRLPI (1 << 11) /* PCS Rx path received LPI*/
+#define MVNETA_PI_PCSTXPRLPI (1 << 12) /* PCS Tx path received LPI*/
+#define MVNETA_PI_MACRXPRLPI (1 << 13) /* MAC Rx path received LPI*/
+#define MVNETA_PI_MIBCCD (1 << 14) /* MIB counters copy done */
+
+/*
+ * Gigabit Ethernet MAC Low Power Idle Registers
+ */
+/* LPI Control 0 (MVNETA_LPIC0) */
+#define MVNETA_LPIC0_LILIMIT(x) (((x) & 0xff) << 0)
+#define MVNETA_LPIC0_TSLIMIT(x) (((x) & 0xff) << 8)
+
+/* LPI Control 1 (MVNETA_LPIC1) */
+#define MVNETA_LPIC1_LPIRE (1 << 0) /* LPI request enable */
+#define MVNETA_LPIC1_LPIRF (1 << 1) /* LPI request force */
+#define MVNETA_LPIC1_LPIMM (1 << 2) /* LPI manual mode */
+#define MVNETA_LPIC1_TWLIMIT(x) (((x) & 0xfff) << 4)
+
+/* LPI Control 2 (MVNETA_LPIC2) */
+#define MVNETA_LPIC2_MUSTSET 0x17d
+
+/* LPI Status (MVNETA_LPIS) */
+#define MVNETA_LPIS_PCSRXPLPIS (1 << 0) /* PCS Rx path LPI status */
+#define MVNETA_LPIS_PCSTXPLPIS (1 << 1) /* PCS Tx path LPI status */
+#define MVNETA_LPIS_MACRXPLPIS (1 << 2)/* MAC Rx path LP idle status */
+#define MVNETA_LPIS_MACTXPLPWS (1 << 3)/* MAC Tx path LP wait status */
+#define MVNETA_LPIS_MACTXPLPIS (1 << 4)/* MAC Tx path LP idle status */
+
+/*
+ * Gigabit Ethernet MAC PRBS Check Status Registers
+ */
+/* Port PRBS Status (MVNETA_PPRBSS) */
+#define MVNETA_PPRBSS_PRBSCHECKLOCKED (1 << 0)
+#define MVNETA_PPRBSS_PRBSCHECKRDY (1 << 1)
+
+/*
+ * Gigabit Ethernet MAC Status Registers
+ */
+/* Port Status Register (MVNETA_PSR) */
+#define MVNETA_PSR_LINKUP (1 << 0)
+#define MVNETA_PSR_GMIISPEED (1 << 1)
+#define MVNETA_PSR_MIISPEED (1 << 2)
+#define MVNETA_PSR_FULLDX (1 << 3)
+#define MVNETA_PSR_RXFCEN (1 << 4)
+#define MVNETA_PSR_TXFCEN (1 << 5)
+#define MVNETA_PSR_PRP (1 << 6) /* Port Rx Pause */
+#define MVNETA_PSR_PTP (1 << 7) /* Port Tx Pause */
+#define MVNETA_PSR_PDP (1 << 8) /*Port is Doing Back-Pressure*/
+#define MVNETA_PSR_SYNCFAIL10MS (1 << 10)
+#define MVNETA_PSR_ANDONE (1 << 11)
+#define MVNETA_PSR_IBANBA (1 << 12) /* InBand AutoNeg BypassAct */
+#define MVNETA_PSR_SYNCOK (1 << 14)
+
+/*
+ * Networking Controller Interrupt Registers
+ */
+/* Port CPU to Queue */
+#define MVNETA_MAXCPU 2
+#define MVNETA_PCP2Q_TXQEN(q) (1 << ((q) + 8))
+#define MVNETA_PCP2Q_TXQEN_MASK (0xff << 8)
+#define MVNETA_PCP2Q_RXQEN(q) (1 << ((q) + 0))
+#define MVNETA_PCP2Q_RXQEN_MASK (0xff << 0)
+
+/* Port RX_TX Interrupt Threshold */
+#define MVNETA_PRXITTH_RITT(t) ((t) & 0xffffff)
+
+/* Port RX_TX Threshold Interrupt Cause/Mask (MVNETA_PRXTXTIC/MVNETA_PRXTXTIM) */
+#define MVNETA_PRXTXTI_TBTCQ(q) (1 << ((q) + 0))
+#define MVNETA_PRXTXTI_TBTCQ_MASK (0xff << 0)
+#define MVNETA_PRXTXTI_GET_TBTCQ(reg) (((reg) >> 0) & 0xff)
+ /* Tx Buffer Threshold Cross Queue*/
+#define MVNETA_PRXTXTI_RBICTAPQ(q) (1 << ((q) + 8))
+#define MVNETA_PRXTXTI_RBICTAPQ_MASK (0xff << 8)
+#define MVNETA_PRXTXTI_GET_RBICTAPQ(reg) (((reg) >> 8) & 0xff)
+ /* Rx Buffer Int. Coaleasing Th. Pri. Alrt Q */
+#define MVNETA_PRXTXTI_RDTAQ(q) (1 << ((q) + 16))
+#define MVNETA_PRXTXTI_RDTAQ_MASK (0xff << 16)
+#define MVNETA_PRXTXTI_GET_RDTAQ(reg) (((reg) >> 16) & 0xff)
+ /* Rx Descriptor Threshold Alert Queue*/
+#define MVNETA_PRXTXTI_PRXTXICSUMMARY (1 << 29) /* PRXTXI summary */
+#define MVNETA_PRXTXTI_PTXERRORSUMMARY (1 << 30) /* PTEXERROR summary */
+#define MVNETA_PRXTXTI_PMISCICSUMMARY (1UL << 31) /* PMISCIC summary */
+
+/* Port RX_TX Interrupt Cause/Mask (MVNETA_PRXTXIC/MVNETA_PRXTXIM) */
+#define MVNETA_PRXTXI_TBRQ(q) (1 << ((q) + 0))
+#define MVNETA_PRXTXI_TBRQ_MASK (0xff << 0)
+#define MVNETA_PRXTXI_GET_TBRQ(reg) (((reg) >> 0) & 0xff)
+#define MVNETA_PRXTXI_RPQ(q) (1 << ((q) + 8))
+#define MVNETA_PRXTXI_RPQ_MASK (0xff << 8)
+#define MVNETA_PRXTXI_GET_RPQ(reg) (((reg) >> 8) & 0xff)
+#define MVNETA_PRXTXI_RREQ(q) (1 << ((q) + 16))
+#define MVNETA_PRXTXI_RREQ_MASK (0xff << 16)
+#define MVNETA_PRXTXI_GET_RREQ(reg) (((reg) >> 16) & 0xff)
+#define MVNETA_PRXTXI_PRXTXTHICSUMMARY (1 << 29)
+#define MVNETA_PRXTXI_PTXERRORSUMMARY (1 << 30)
+#define MVNETA_PRXTXI_PMISCICSUMMARY (1UL << 31)
+
+/* Port Misc Interrupt Cause/Mask (MVNETA_PMIC/MVNETA_PMIM) */
+#define MVNETA_PMI_PHYSTATUSCHNG (1 << 0)
+#define MVNETA_PMI_LINKCHANGE (1 << 1)
+#define MVNETA_PMI_IAE (1 << 7) /* Internal Address Error */
+#define MVNETA_PMI_RXOVERRUN (1 << 8)
+#define MVNETA_PMI_RXCRCERROR (1 << 9)
+#define MVNETA_PMI_RXLARGEPACKET (1 << 10)
+#define MVNETA_PMI_TXUNDRN (1 << 11)
+#define MVNETA_PMI_PRBSERROR (1 << 12)
+#define MVNETA_PMI_PSCSYNCCHANGE (1 << 13)
+#define MVNETA_PMI_SRSE (1 << 14) /* SerdesRealignSyncError */
+#define MVNETA_PMI_TREQ(q) (1 << ((q) + 24)) /* TxResourceErrorQ */
+#define MVNETA_PMI_TREQ_MASK (0xff << 24) /* TxResourceErrorQ */
+
+/* Port Interrupt Enable (MVNETA_PIE) */
+#define MVNETA_PIE_RXPKTINTRPTENB(q) (1 << ((q) + 0))
+#define MVNETA_PIE_TXPKTINTRPTENB(q) (1 << ((q) + 8))
+#define MVNETA_PIE_RXPKTINTRPTENB_MASK (0xff << 0)
+#define MVNETA_PIE_TXPKTINTRPTENB_MASK (0xff << 8)
+
+/*
+ * Miscellaneous Interrupt Registers
+ */
+#define MVNETA_PEUIAE_ADDR_MASK (0x3fff)
+#define MVNETA_PEUIAE_ADDR(addr) ((addr) & 0x3fff)
+#define MVNETA_PEUIAE_GET_ADDR(reg) ((reg) & 0x3fff)
+
+/*
+ * SGMII PHY Registers
+ */
+/* Power and PLL Control (MVNETA_PPLLC) */
+#define MVNETA_PPLLC_REF_FREF_SEL_MASK (0xf << 0)
+#define MVNETA_PPLLC_PHY_MODE_MASK (7 << 5)
+#define MVNETA_PPLLC_PHY_MODE_SATA (0 << 5)
+#define MVNETA_PPLLC_PHY_MODE_SAS (1 << 5)
+#define MVNETA_PPLLC_PLL_LOCK (1 << 8)
+#define MVNETA_PPLLC_PU_DFE (1 << 10)
+#define MVNETA_PPLLC_PU_TX_INTP (1 << 11)
+#define MVNETA_PPLLC_PU_TX (1 << 12)
+#define MVNETA_PPLLC_PU_RX (1 << 13)
+#define MVNETA_PPLLC_PU_PLL (1 << 14)
+
+/* Digital Loopback Enable (MVNETA_DLE) */
+#define MVNETA_DLE_LOCAL_SEL_BITS_MASK (3 << 10)
+#define MVNETA_DLE_LOCAL_SEL_BITS_10BITS (0 << 10)
+#define MVNETA_DLE_LOCAL_SEL_BITS_20BITS (1 << 10)
+#define MVNETA_DLE_LOCAL_SEL_BITS_40BITS (2 << 10)
+#define MVNETA_DLE_LOCAL_RXPHER_TO_TX_EN (1 << 12)
+#define MVNETA_DLE_LOCAL_ANA_TX2RX_LPBK_EN (1 << 13)
+#define MVNETA_DLE_LOCAL_DIG_TX2RX_LPBK_EN (1 << 14)
+#define MVNETA_DLE_LOCAL_DIG_RX2TX_LPBK_EN (1 << 15)
+
+/* Reference Clock Select (MVNETA_RCS) */
+#define MVNETA_RCS_REFCLK_SEL (1 << 10)
+
+/*
+ * DMA descriptors
+ */
+struct mvneta_tx_desc {
+ /* LITTLE_ENDIAN */
+ uint32_t command; /* off 0x00: commands */
+ uint16_t l4ichk; /* initial checksum */
+ uint16_t bytecnt; /* 0ff 0x04: buffer byte count */
+ uint32_t bufptr_pa; /* off 0x08: buffer ptr(PA) */
+ uint32_t flags; /* off 0x0c: flags */
+ uint32_t reserved0; /* off 0x10 */
+ uint32_t reserved1; /* off 0x14 */
+ uint32_t reserved2; /* off 0x18 */
+ uint32_t reserved3; /* off 0x1c */
+};
+
+struct mvneta_rx_desc {
+ /* LITTLE_ENDIAN */
+ uint32_t status; /* status and flags */
+ uint16_t reserved0;
+ uint16_t bytecnt; /* buffer byte count */
+ uint32_t bufptr_pa; /* packet buffer pointer */
+ uint32_t reserved1;
+ uint32_t bufptr_va;
+ uint16_t reserved2;
+ uint16_t l4chk; /* L4 checksum */
+ uint32_t reserved3;
+ uint32_t reserved4;
+};
+
+/*
+ * Received packet command header:
+ * network controller => software
+ * the controller parse the packet and set some flags.
+ */
+#define MVNETA_RX_IPV4_FRAGMENT (1UL << 31) /* Fragment Indicator */
+#define MVNETA_RX_L4_CHECKSUM_OK (1 << 30) /* L4 Checksum */
+/* bit 29 reserved */
+#define MVNETA_RX_U (1 << 28) /* Unknown Destination */
+#define MVNETA_RX_F (1 << 27) /* First buffer */
+#define MVNETA_RX_L (1 << 26) /* Last buffer */
+#define MVNETA_RX_IP_HEADER_OK (1 << 25) /* IP Header is OK */
+#define MVNETA_RX_L3_IP (1 << 24) /* IP Type 0:IP6 1:IP4 */
+#define MVNETA_RX_L2_EV2 (1 << 23) /* Ethernet v2 frame */
+#define MVNETA_RX_L4_MASK (3 << 21) /* L4 Type */
+#define MVNETA_RX_L4_TCP (0x00 << 21)
+#define MVNETA_RX_L4_UDP (0x01 << 21)
+#define MVNETA_RX_L4_OTH (0x10 << 21)
+#define MVNETA_RX_BPDU (1 << 20) /* BPDU frame */
+#define MVNETA_RX_VLAN (1 << 19) /* VLAN tag found */
+#define MVNETA_RX_EC_MASK (3 << 17) /* Error code */
+#define MVNETA_RX_EC_CE (0x00 << 17) /* CRC error */
+#define MVNETA_RX_EC_OR (0x01 << 17) /* FIFO overrun */
+#define MVNETA_RX_EC_MF (0x10 << 17) /* Max. frame len */
+#define MVNETA_RX_EC_RE (0x11 << 17) /* Resource error */
+#define MVNETA_RX_ES (1 << 16) /* Error summary */
+/* bit 15:0 reserved */
+
+/*
+ * Transmit packet command header:
+ * software => network controller
+ */
+#define MVNETA_TX_CMD_L4_CHECKSUM_MASK (0x3 << 30) /* Do L4 Checksum */
+#define MVNETA_TX_CMD_L4_CHECKSUM_FRAG (0x0 << 30)
+#define MVNETA_TX_CMD_L4_CHECKSUM_NOFRAG (0x1 << 30)
+#define MVNETA_TX_CMD_L4_CHECKSUM_NONE (0x2 << 30)
+#define MVNETA_TX_CMD_PACKET_OFFSET_MASK (0x7f << 23) /* Payload offset */
+#define MVNETA_TX_CMD_W_PACKET_OFFSET(v) (((v) & 0x7f) << 23)
+/* bit 22 reserved */
+#define MVNETA_TX_CMD_F (1 << 21) /* First buffer */
+#define MVNETA_TX_CMD_L (1 << 20) /* Last buffer */
+#define MVNETA_TX_CMD_PADDING (1 << 19) /* Pad short frame */
+#define MVNETA_TX_CMD_IP4_CHECKSUM (1 << 18) /* Do IPv4 Checksum */
+#define MVNETA_TX_CMD_L3_IP4 (0 << 17)
+#define MVNETA_TX_CMD_L3_IP6 (1 << 17)
+#define MVNETA_TX_CMD_L4_TCP (0 << 16)
+#define MVNETA_TX_CMD_L4_UDP (1 << 16)
+/* bit 15:13 reserved */
+#define MVNETA_TX_CMD_IP_HEADER_LEN_MASK (0x1f << 8) /* IP header len >> 2 */
+#define MVNETA_TX_CMD_IP_HEADER_LEN(v) (((v) & 0x1f) << 8)
+/* bit 7 reserved */
+#define MVNETA_TX_CMD_L3_OFFSET_MASK (0x7f << 0) /* offset of L3 hdr. */
+#define MVNETA_TX_CMD_L3_OFFSET(v) (((v) & 0x7f) << 0)
+
+/*
+ * Transmit packet extra attributes
+ * and error status returned from network controller.
+ */
+#define MVNETA_TX_F_DSA_TAG (3 << 30) /* DSA Tag */
+/* bit 29:8 reserved */
+#define MVNETA_TX_F_MH_SEL (0xf << 4) /* Marvell Header */
+/* bit 3 reserved */
+#define MVNETA_TX_F_EC_MASK (3 << 1) /* Error code */
+#define MVNETA_TX_F_EC_LC (0x00 << 1) /* Late Collision */
+#define MVNETA_TX_F_EC_UR (0x01 << 1) /* Underrun */
+#define MVNETA_TX_F_EC_RL (0x10 << 1) /* Excess. Collision */
+#define MVNETA_TX_F_EC_RESERVED (0x11 << 1)
+#define MVNETA_TX_F_ES (1 << 0) /* Error summary */
+
+#define MVNETA_ERROR_SUMMARY (1 << 0)
+#define MVNETA_BUFFER_OWNED_MASK (1UL << 31)
+#define MVNETA_BUFFER_OWNED_BY_HOST (0UL << 31)
+#define MVNETA_BUFFER_OWNED_BY_DMA (1UL << 31)
+
+#endif /* _IF_MVNETAREG_H_ */
diff --git a/sys/dev/neta/if_mvnetavar.h b/sys/dev/neta/if_mvnetavar.h
new file mode 100644
index 0000000..912e9d7
--- /dev/null
+++ b/sys/dev/neta/if_mvnetavar.h
@@ -0,0 +1,327 @@
+/*
+ * Copyright (c) 2017 Stormshield.
+ * Copyright (c) 2017 Semihalf.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ *
+ */
+
+#ifndef _IF_MVNETAVAR_H_
+#define _IF_MVNETAVAR_H_
+#include <net/if.h>
+
+#define MVNETA_HWHEADER_SIZE 2 /* Marvell Header */
+#define MVNETA_ETHER_SIZE 22 /* Maximum ether size */
+#define MVNETA_MAX_CSUM_MTU 1600 /* Port1,2 hw limit */
+
+/*
+ * Limit support for frame up to hw csum limit
+ * until jumbo frame support is added.
+ */
+#define MVNETA_MAX_FRAME (MVNETA_MAX_CSUM_MTU + MVNETA_ETHER_SIZE)
+
+/*
+ * Default limit of queue length
+ *
+ * queue 0 is lowest priority and queue 7 is highest priority.
+ * IP packet is received on queue 7 by default.
+ */
+#define MVNETA_TX_RING_CNT 512
+#define MVNETA_RX_RING_CNT 256
+
+#define MVNETA_BUFRING_SIZE 1024
+
+#define MVNETA_PACKET_OFFSET 64
+#define MVNETA_PACKET_SIZE MCLBYTES
+
+#define MVNETA_RXTH_COUNT 128
+#define MVNETA_RX_REFILL_COUNT 8
+#define MVNETA_TX_RECLAIM_COUNT 32
+
+/*
+ * Device Register access
+ */
+#define MVNETA_READ(sc, reg) \
+ bus_read_4((sc)->res[0], (reg))
+#define MVNETA_WRITE(sc, reg, val) \
+ bus_write_4((sc)->res[0], (reg), (val))
+
+#define MVNETA_READ_REGION(sc, reg, val, c) \
+ bus_read_region_4((sc)->res[0], (reg), (val), (c))
+#define MVNETA_WRITE_REGION(sc, reg, val, c) \
+ bus_write_region_4((sc)->res[0], (reg), (val), (c))
+
+#define MVNETA_READ_MIB_4(sc, reg) \
+ bus_read_4((sc)->res[0], MVNETA_PORTMIB_BASE + (reg))
+#define MVNETA_READ_MIB_8(sc, reg) \
+ bus_read_8((sc)->res[0], MVNETA_PORTMIB_BASE + (reg))
+
+#define MVNETA_IS_LINKUP(sc) \
+ (MVNETA_READ((sc), MVNETA_PSR) & MVNETA_PSR_LINKUP)
+
+#define MVNETA_IS_QUEUE_SET(queues, q) \
+ ((((queues) >> (q)) & 0x1))
+
+/*
+ * EEE: Lower Power Idle config
+ * Default timer is duration of MTU sized frame transmission.
+ * The timer can be negotiated by LLDP protocol, but we have no
+ * support.
+ */
+#define MVNETA_LPI_TS (ETHERMTU * 8 / 1000) /* [us] */
+#define MVNETA_LPI_TW (ETHERMTU * 8 / 1000) /* [us] */
+#define MVNETA_LPI_LI (ETHERMTU * 8 / 1000) /* [us] */
+
+/*
+ * DMA Descriptor
+ *
+ * the ethernet device has 8 rx/tx DMA queues. each of queue has its own
+ * decriptor list. descriptors are simply index by counter inside the device.
+ */
+#define MVNETA_TX_SEGLIMIT 32
+
+#define MVNETA_QUEUE_IDLE 1
+#define MVNETA_QUEUE_WORKING 2
+#define MVNETA_QUEUE_DISABLED 3
+
+struct mvneta_buf {
+ struct mbuf * m; /* pointer to related mbuf */
+ bus_dmamap_t dmap;
+};
+
+struct mvneta_rx_ring {
+ int queue_status;
+ /* Real descriptors array. shared by RxDMA */
+ struct mvneta_rx_desc *desc;
+ bus_dmamap_t desc_map;
+ bus_addr_t desc_pa;
+
+ /* Managment entries for each of descritors */
+ struct mvneta_buf rxbuf[MVNETA_RX_RING_CNT];
+
+ /* locks */
+ struct mtx ring_mtx;
+
+ /* Index */
+ int dma;
+ int cpu;
+
+ /* Limit */
+ int queue_th_received;
+ int queue_th_time; /* [Tclk] */
+
+ /* LRO */
+ struct lro_ctrl lro;
+ boolean_t lro_enabled;
+ /* Is this queue out of mbuf */
+ boolean_t needs_refill;
+} __aligned(CACHE_LINE_SIZE);
+
+struct mvneta_tx_ring {
+ /* Index of this queue */
+ int qidx;
+ /* IFNET pointer */
+ struct ifnet *ifp;
+ /* Ring buffer for IFNET */
+ struct buf_ring *br;
+ /* Real descriptors array. shared by TxDMA */
+ struct mvneta_tx_desc *desc;
+ bus_dmamap_t desc_map;
+ bus_addr_t desc_pa;
+
+ /* Managment entries for each of descritors */
+ struct mvneta_buf txbuf[MVNETA_TX_RING_CNT];
+
+ /* locks */
+ struct mtx ring_mtx;
+
+ /* Index */
+ int used;
+ int dma;
+ int cpu;
+
+ /* watchdog */
+#define MVNETA_WATCHDOG_TXCOMP (hz / 10) /* 100ms */
+#define MVNETA_WATCHDOG (10 * hz) /* 10s */
+ int watchdog_time;
+ int queue_status;
+ boolean_t queue_hung;
+
+ /* Task */
+ struct task task;
+ struct taskqueue *taskq;
+
+ /* Stats */
+ uint32_t drv_error;
+} __aligned(CACHE_LINE_SIZE);
+
+static __inline int
+tx_counter_adv(int ctr, int n)
+{
+
+ ctr += n;
+ while (__predict_false(ctr >= MVNETA_TX_RING_CNT))
+ ctr -= MVNETA_TX_RING_CNT;
+
+ return (ctr);
+}
+
+static __inline int
+rx_counter_adv(int ctr, int n)
+{
+
+ ctr += n;
+ while (__predict_false(ctr >= MVNETA_RX_RING_CNT))
+ ctr -= MVNETA_RX_RING_CNT;
+
+ return (ctr);
+}
+
+/*
+ * Timeout control
+ */
+#define MVNETA_PHY_TIMEOUT 10000 /* msec */
+#define RX_DISABLE_TIMEOUT 0x1000000 /* times */
+#define TX_DISABLE_TIMEOUT 0x1000000 /* times */
+#define TX_FIFO_EMPTY_TIMEOUT 0x1000000 /* times */
+
+/*
+ * Debug
+ */
+#define KASSERT_SC_MTX(sc) \
+ KASSERT(mtx_owned(&(sc)->mtx), ("SC mutex not owned"))
+#define KASSERT_BM_MTX(sc) \
+ KASSERT(mtx_owned(&(sc)->bm.bm_mtx), ("BM mutex not owned"))
+#define KASSERT_RX_MTX(sc, q) \
+ KASSERT(mtx_owned(&(sc)->rx_ring[(q)].ring_mtx),\
+ ("RX mutex not owned"))
+#define KASSERT_TX_MTX(sc, q) \
+ KASSERT(mtx_owned(&(sc)->tx_ring[(q)].ring_mtx),\
+ ("TX mutex not owned"))
+
+/*
+ * sysctl(9) parameters
+ */
+struct mvneta_sysctl_queue {
+ struct mvneta_softc *sc;
+ int rxtx;
+ int queue;
+};
+#define MVNETA_SYSCTL_RX 0
+#define MVNETA_SYSCTL_TX 1
+
+struct mvneta_sysctl_mib {
+ struct mvneta_softc *sc;
+ int index;
+ uint64_t counter;
+};
+
+enum mvneta_phy_mode {
+ MVNETA_PHY_QSGMII,
+ MVNETA_PHY_SGMII,
+ MVNETA_PHY_RGMII,
+ MVNETA_PHY_RGMII_ID
+};
+
+/*
+ * Ethernet Device main context
+ */
+DECLARE_CLASS(mvneta_driver);
+
+struct mvneta_softc {
+ device_t dev;
+ uint32_t version;
+ /*
+ * mtx must be held by interface functions to/from
+ * other frameworks. interrupt hander, sysctl hander,
+ * ioctl hander, and so on.
+ */
+ struct mtx mtx;
+ struct resource *res[2];
+ void *ih_cookie[1];
+
+ struct ifnet *ifp;
+ uint32_t mvneta_if_flags;
+ uint32_t mvneta_media;
+
+ int phy_attached;
+ enum mvneta_phy_mode phy_mode;
+ int phy_addr;
+ int phy_speed; /* PHY speed */
+ boolean_t phy_fdx; /* Full duplex mode */
+ boolean_t use_inband_status; /* In-band link status */
+
+ /*
+ * Link State control
+ */
+ boolean_t linkup;
+ device_t miibus;
+ struct mii_data *mii;
+ uint8_t enaddr[ETHER_ADDR_LEN];
+ struct ifmedia mvneta_ifmedia;
+
+ bus_dma_tag_t rx_dtag;
+ bus_dma_tag_t rxbuf_dtag;
+ bus_dma_tag_t tx_dtag;
+ bus_dma_tag_t txmbuf_dtag;
+ struct mvneta_rx_ring rx_ring[MVNETA_RX_QNUM_MAX];
+ struct mvneta_tx_ring tx_ring[MVNETA_TX_QNUM_MAX];
+
+ /*
+ * Maintance clock
+ */
+ struct callout tick_ch;
+
+ int cf_lpi;
+ int cf_fc;
+ int debug;
+
+ /*
+ * Sysctl interfaces
+ */
+ struct mvneta_sysctl_queue sysctl_rx_queue[MVNETA_RX_QNUM_MAX];
+ struct mvneta_sysctl_queue sysctl_tx_queue[MVNETA_TX_QNUM_MAX];
+
+ /*
+ * MIB counter
+ */
+ struct mvneta_sysctl_mib sysctl_mib[MVNETA_PORTMIB_NOCOUNTER];
+ uint64_t counter_pdfc;
+ uint64_t counter_pofc;
+ uint32_t counter_watchdog; /* manual reset when clearing mib */
+ uint32_t counter_watchdog_mib; /* reset after each mib update */
+};
+#define MVNETA_RX_RING(sc, q) \
+ (&(sc)->rx_ring[(q)])
+#define MVNETA_TX_RING(sc, q) \
+ (&(sc)->tx_ring[(q)])
+
+int mvneta_attach(device_t);
+
+#ifdef FDT
+int mvneta_fdt_mac_address(struct mvneta_softc *, uint8_t *);
+#endif
+
+#endif /* _IF_MVNETAVAR_H_ */
diff --git a/sys/dev/oce/oce_mbox.c b/sys/dev/oce/oce_mbox.c
index cb2ae81..c9a997d 100644
--- a/sys/dev/oce/oce_mbox.c
+++ b/sys/dev/oce/oce_mbox.c
@@ -864,7 +864,7 @@ oce_rxf_set_promiscuous(POCE_SOFTC sc, uint8_t enable)
req->iface_flags = MBX_RX_IFACE_FLAGS_PROMISCUOUS;
if (enable & 0x02)
- req->iface_flags = MBX_RX_IFACE_FLAGS_VLAN_PROMISCUOUS;
+ req->iface_flags |= MBX_RX_IFACE_FLAGS_VLAN_PROMISCUOUS;
req->if_id = sc->if_id;
diff --git a/sys/dev/ofw/ofw_bus_subr.c b/sys/dev/ofw/ofw_bus_subr.c
index fa72583..5a97edd 100644
--- a/sys/dev/ofw/ofw_bus_subr.c
+++ b/sys/dev/ofw/ofw_bus_subr.c
@@ -46,6 +46,7 @@ __FBSDID("$FreeBSD$");
#include "ofw_bus_if.h"
#define OFW_COMPAT_LEN 255
+#define OFW_STATUS_LEN 16
int
ofw_bus_gen_setup_devinfo(struct ofw_bus_devinfo *obd, phandle_t node)
@@ -179,6 +180,24 @@ ofw_bus_status_okay(device_t dev)
return (0);
}
+int
+ofw_bus_node_status_okay(phandle_t node)
+{
+ char status[OFW_STATUS_LEN];
+ int len;
+
+ len = OF_getproplen(node, "status");
+ if (len <= 0)
+ return (1);
+
+ OF_getprop(node, "status", status, OFW_STATUS_LEN);
+ if ((len == 5 && (bcmp(status, "okay", len) == 0)) ||
+ (len == 3 && (bcmp(status, "ok", len))))
+ return (1);
+
+ return (0);
+}
+
static int
ofw_bus_node_is_compatible_int(const char *compat, int len,
const char *onecompat)
diff --git a/sys/dev/ofw/ofw_bus_subr.h b/sys/dev/ofw/ofw_bus_subr.h
index 6532a16..6058696 100644
--- a/sys/dev/ofw/ofw_bus_subr.h
+++ b/sys/dev/ofw/ofw_bus_subr.h
@@ -100,6 +100,7 @@ int ofw_bus_intr_by_rid(device_t, phandle_t, int, phandle_t *, int *,
/* Helper to get device status property */
const char *ofw_bus_get_status(device_t dev);
int ofw_bus_status_okay(device_t dev);
+int ofw_bus_node_status_okay(phandle_t node);
/* Helper to get node's interrupt parent */
phandle_t ofw_bus_find_iparent(phandle_t);
diff --git a/sys/dev/ofw/openfirmio.c b/sys/dev/ofw/openfirmio.c
index 5803ec1..0079310 100644
--- a/sys/dev/ofw/openfirmio.c
+++ b/sys/dev/ofw/openfirmio.c
@@ -100,8 +100,6 @@ openfirm_getstr(int len, const char *user, char **cpp)
return (ENAMETOOLONG);
*cpp = cp = malloc(len + 1, M_TEMP, M_WAITOK);
- if (cp == NULL)
- return (ENOMEM);
error = copyin(user, cp, len);
cp[len] = '\0';
return (error);
@@ -173,10 +171,6 @@ openfirm_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flags,
if (len <= 0)
break;
value = malloc(len, M_TEMP, M_WAITOK);
- if (value == NULL) {
- error = ENOMEM;
- break;
- }
len = OF_getprop(node, name, (void *)value, len);
error = copyout(value, of->of_buf, len);
break;
@@ -199,10 +193,6 @@ openfirm_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flags,
if (error)
break;
value = malloc(of->of_buflen, M_TEMP, M_WAITOK);
- if (value == NULL) {
- error = ENOMEM;
- break;
- }
error = copyin(of->of_buf, value, of->of_buflen);
if (error)
break;
diff --git a/sys/dev/ofw/openpromio.c b/sys/dev/ofw/openpromio.c
index 8ba6d3a..e2a345b 100644
--- a/sys/dev/ofw/openpromio.c
+++ b/sys/dev/ofw/openpromio.c
@@ -151,18 +151,10 @@ openprom_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flags,
break;
}
prop = malloc(len, M_TEMP, M_WAITOK | M_ZERO);
- if (prop == NULL) {
- error = ENOMEM;
- break;
- }
error = copyinstr(&oprom->oprom_array, prop, len, &done);
if (error != 0)
break;
buf = malloc(OPROMMAXPARAM, M_TEMP, M_WAITOK | M_ZERO);
- if (buf == NULL) {
- error = ENOMEM;
- break;
- }
node = openprom_node;
switch (cmd) {
case OPROMGETPROP:
diff --git a/sys/dev/sdhci/sdhci_fdt.c b/sys/dev/sdhci/sdhci_fdt.c
index 2e1b30e..20ba0b7 100644
--- a/sys/dev/sdhci/sdhci_fdt.c
+++ b/sys/dev/sdhci/sdhci_fdt.c
@@ -57,10 +57,25 @@ __FBSDID("$FreeBSD$");
#include "mmcbr_if.h"
#include "sdhci_if.h"
-#define MAX_SLOTS 6
+#define MAX_SLOTS 6
+#define SDHCI_FDT_ARMADA38X 1
+#define SDHCI_FDT_GENERIC 2
+#define SDHCI_FDT_XLNX_ZY7 3
+
+#define MV_SDIO3_CONF 0
+#define MV_SDIO3_CONF_FB_CLK (1 << 2)
+#define MV_SDIO3_CONF_CLK_INV (1 << 0)
+
+static struct ofw_compat_data compat_data[] = {
+ { "marvell,armada-380-sdhci", SDHCI_FDT_ARMADA38X },
+ { "sdhci_generic", SDHCI_FDT_GENERIC },
+ { "xlnx,zy7_sdhci", SDHCI_FDT_XLNX_ZY7 },
+ { NULL, 0 }
+};
struct sdhci_fdt_softc {
device_t dev; /* Controller device */
+ u_int devid; /* Device ID */
u_int quirks; /* Chip specific quirks */
u_int caps; /* If we override SDHCI_CAPABILITIES */
uint32_t max_clk; /* Max possible freq */
@@ -70,6 +85,9 @@ struct sdhci_fdt_softc {
int num_slots; /* Number of slots on this controller*/
struct sdhci_slot slots[MAX_SLOTS];
struct resource *mem_res[MAX_SLOTS]; /* Memory resource */
+
+ bool wp_inverted; /* WP pin is inverted */
+ bool no_18v; /* No 1.8V support */
};
static uint8_t
@@ -110,8 +128,15 @@ static uint32_t
sdhci_fdt_read_4(device_t dev, struct sdhci_slot *slot, bus_size_t off)
{
struct sdhci_fdt_softc *sc = device_get_softc(dev);
+ uint32_t val32;
- return (bus_read_4(sc->mem_res[slot->num], off));
+ val32 = bus_read_4(sc->mem_res[slot->num], off);
+ if (off == SDHCI_CAPABILITIES && sc->no_18v)
+ val32 &= ~SDHCI_CAN_VDD_180;
+ if (sc->devid == SDHCI_FDT_ARMADA38X && off == SDHCI_CAPABILITIES2)
+ val32 &= ~(SDHCI_CAN_SDR104 | SDHCI_TUNE_SDR50);
+
+ return (val32);
}
static void
@@ -152,6 +177,33 @@ sdhci_fdt_intr(void *arg)
}
static int
+sdhci_fdt_get_ro(device_t bus, device_t dev)
+{
+ struct sdhci_fdt_softc *sc = device_get_softc(bus);
+
+ return (sdhci_generic_get_ro(bus, dev) ^ sc->wp_inverted);
+}
+
+static void
+sdhci_fdt_mv_init(device_t dev)
+{
+ int rid;
+ struct resource *res;
+ uint32_t reg;
+
+ rid = 2;
+ res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE);
+ if (res == NULL) {
+ device_printf(dev, "SDIO3-conf register not present.\n");
+ return;
+ }
+ reg = bus_read_4(res, MV_SDIO3_CONF);
+ reg &= ~MV_SDIO3_CONF_CLK_INV;
+ bus_write_4(res, MV_SDIO3_CONF, reg);
+ bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(res), res);
+}
+
+static int
sdhci_fdt_probe(device_t dev)
{
struct sdhci_fdt_softc *sc = device_get_softc(dev);
@@ -165,13 +217,22 @@ sdhci_fdt_probe(device_t dev)
if (!ofw_bus_status_okay(dev))
return (ENXIO);
- if (ofw_bus_is_compatible(dev, "sdhci_generic")) {
+ sc->devid = ofw_bus_search_compatible(dev, compat_data)->ocd_data;
+ switch (sc->devid) {
+ case SDHCI_FDT_ARMADA38X:
+ sc->quirks = SDHCI_QUIRK_BROKEN_AUTO_STOP;
+ device_set_desc(dev, "ARMADA38X SDHCI controller");
+ break;
+ case SDHCI_FDT_GENERIC:
device_set_desc(dev, "generic fdt SDHCI controller");
- } else if (ofw_bus_is_compatible(dev, "xlnx,zy7_sdhci")) {
+ break;
+ case SDHCI_FDT_XLNX_ZY7:
sc->quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK;
device_set_desc(dev, "Zynq-7000 generic fdt SDHCI controller");
- } else
+ break;
+ default:
return (ENXIO);
+ }
node = ofw_bus_get_node(dev);
@@ -182,6 +243,10 @@ sdhci_fdt_probe(device_t dev)
sc->num_slots = cid;
if ((OF_getencprop(node, "max-frequency", &cid, sizeof(cid))) > 0)
sc->max_clk = cid;
+ if (OF_hasprop(node, "no-1-8-v"))
+ sc->no_18v = true;
+ if (OF_hasprop(node, "wp-inverted"))
+ sc->wp_inverted = true;
return (0);
}
@@ -231,6 +296,10 @@ sdhci_fdt_attach(device_t dev)
}
device_printf(dev, "%d slot(s) allocated\n", sc->num_slots);
+ /* Platform init. */
+ if (sc->devid == SDHCI_FDT_ARMADA38X && sc->num_slots == 1)
+ sdhci_fdt_mv_init(dev);
+
/* Activate the interrupt */
err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_MISC | INTR_MPSAFE,
NULL, sdhci_fdt_intr, sc, &sc->intrhand);
@@ -279,7 +348,7 @@ static device_method_t sdhci_fdt_methods[] = {
/* mmcbr_if */
DEVMETHOD(mmcbr_update_ios, sdhci_generic_update_ios),
DEVMETHOD(mmcbr_request, sdhci_generic_request),
- DEVMETHOD(mmcbr_get_ro, sdhci_generic_get_ro),
+ DEVMETHOD(mmcbr_get_ro, sdhci_fdt_get_ro),
DEVMETHOD(mmcbr_acquire_host, sdhci_generic_acquire_host),
DEVMETHOD(mmcbr_release_host, sdhci_generic_release_host),
diff --git a/sys/dev/spibus/ofw_spibus.c b/sys/dev/spibus/ofw_spibus.c
index 1da3dae..6c7cc92 100644
--- a/sys/dev/spibus/ofw_spibus.c
+++ b/sys/dev/spibus/ofw_spibus.c
@@ -80,6 +80,7 @@ ofw_spibus_attach(device_t dev)
phandle_t child;
pcell_t clock, paddr;
device_t childdev;
+ uint32_t mode = SPIBUS_MODE_NONE;
sc->dev = dev;
@@ -103,6 +104,24 @@ ofw_spibus_attach(device_t dev)
}
/*
+ * Try to get the cpol/cpha mode
+ */
+ if (OF_hasprop(child, "spi-cpol"))
+ mode = SPIBUS_MODE_CPOL;
+ if (OF_hasprop(child, "spi-cpha")) {
+ if (mode == SPIBUS_MODE_CPOL)
+ mode = SPIBUS_MODE_CPOL_CPHA;
+ else
+ mode = SPIBUS_MODE_CPHA;
+ }
+
+ /*
+ * Try to get the CS polarity
+ */
+ if (OF_hasprop(child, "spi-cs-high"))
+ paddr |= SPIBUS_CS_HIGH;
+
+ /*
* Get the maximum clock frequency for device, zero means
* use the default bus speed.
*
@@ -124,6 +143,7 @@ ofw_spibus_attach(device_t dev)
continue;
dinfo->opd_dinfo.cs = paddr;
dinfo->opd_dinfo.clock = clock;
+ dinfo->opd_dinfo.mode = mode;
if (ofw_bus_gen_setup_devinfo(&dinfo->opd_obdinfo, child) !=
0) {
free(dinfo, M_DEVBUF);
diff --git a/sys/dev/virtio/network/if_vtnetvar.h b/sys/dev/virtio/network/if_vtnetvar.h
index 15436d9..de3525c 100644
--- a/sys/dev/virtio/network/if_vtnetvar.h
+++ b/sys/dev/virtio/network/if_vtnetvar.h
@@ -29,6 +29,10 @@
#ifndef _IF_VTNETVAR_H
#define _IF_VTNETVAR_H
+#ifdef ALTQ
+#define VTNET_LEGACY_TX
+#endif
+
struct vtnet_softc;
struct vtnet_statistics {
diff --git a/sys/dev/vmware/vmxnet3/if_vmxvar.h b/sys/dev/vmware/vmxnet3/if_vmxvar.h
index 8613516..711e694 100644
--- a/sys/dev/vmware/vmxnet3/if_vmxvar.h
+++ b/sys/dev/vmware/vmxnet3/if_vmxvar.h
@@ -20,6 +20,10 @@
#ifndef _IF_VMXVAR_H
#define _IF_VMXVAR_H
+#ifdef ALTQ
+#define VMXNET3_LEGACY_TX
+#endif
+
struct vmxnet3_softc;
struct vmxnet3_dma_alloc {
@@ -131,7 +135,7 @@ struct vmxnet3_txq_stats {
struct vmxnet3_txqueue {
struct mtx vxtxq_mtx;
struct vmxnet3_softc *vxtxq_sc;
-#ifndef VMXNET3_TX_LEGACY
+#ifndef VMXNET3_LEGACY_TX
struct buf_ring *vxtxq_br;
#endif
int vxtxq_id;
@@ -142,7 +146,7 @@ struct vmxnet3_txqueue {
struct vmxnet3_txq_stats vxtxq_stats;
struct vmxnet3_txq_shared *vxtxq_ts;
struct sysctl_oid_list *vxtxq_sysctl;
-#ifndef VMXNET3_TX_LEGACY
+#ifndef VMXNET3_LEGACY_TX
struct task vxtxq_defrtask;
#endif
char vxtxq_name[16];
OpenPOWER on IntegriCloud