summaryrefslogtreecommitdiffstats
path: root/sys/dev/bce/if_bce.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/dev/bce/if_bce.c')
-rw-r--r--sys/dev/bce/if_bce.c2182
1 files changed, 1533 insertions, 649 deletions
diff --git a/sys/dev/bce/if_bce.c b/sys/dev/bce/if_bce.c
index e50229f..2ecbcd9 100644
--- a/sys/dev/bce/if_bce.c
+++ b/sys/dev/bce/if_bce.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2006-2007 Broadcom Corporation
+ * Copyright (c) 2006-2008 Broadcom Corporation
* David Christensen <davidch@broadcom.com>. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -34,13 +34,15 @@ __FBSDID("$FreeBSD$");
/*
* The following controllers are supported by this driver:
* BCM5706C A2, A3
+ * BCM5706S A2, A3
* BCM5708C B1, B2
+ * BCM5708S B1, B2
*
* The following controllers are not supported by this driver:
- * BCM5706C A0, A1
- * BCM5706S A0, A1, A2, A3
- * BCM5708C A0, B0
- * BCM5708S A0, B0, B1, B2
+ * BCM5706C A0, A1 (pre-production)
+ * BCM5706S A0, A1 (pre-production)
+ * BCM5708C A0, B0 (pre-production)
+ * BCM5708S A0, B0 (pre-production)
*/
#include "opt_bce.h"
@@ -77,10 +79,10 @@ __FBSDID("$FreeBSD$");
int bce_debug_dma_map_addr_failure = 0;
/* Controls how often to simulate a bootcode failure. */
- int bce_debug_bootcode_running_failure = 0;
+ int bce_debug_bootcode_running_failure = 0;
#endif
-
-
+
+
/****************************************************************************/
/* PCI Device ID Table */
/* */
@@ -218,14 +220,20 @@ static int bce_shutdown (device_t);
/* BCE Debug Data Structure Dump Routines */
/****************************************************************************/
#ifdef BCE_DEBUG
+static u32 bce_ctx_rd (struct bce_softc *, u32, u32);
static void bce_dump_mbuf (struct bce_softc *, struct mbuf *);
-static void bce_dump_tx_mbuf_chain (struct bce_softc *, int, int);
-static void bce_dump_rx_mbuf_chain (struct bce_softc *, int, int);
+static void bce_dump_tx_mbuf_chain (struct bce_softc *, u16, int);
+static void bce_dump_rx_mbuf_chain (struct bce_softc *, u16, int);
+static void bce_dump_pg_mbuf_chain (struct bce_softc *, u16, int);
static void bce_dump_txbd (struct bce_softc *, int, struct tx_bd *);
static void bce_dump_rxbd (struct bce_softc *, int, struct rx_bd *);
+static void bce_dump_pgbd (struct bce_softc *, int, struct rx_bd *);
static void bce_dump_l2fhdr (struct bce_softc *, int, struct l2_fhdr *);
-static void bce_dump_tx_chain (struct bce_softc *, int, int);
-static void bce_dump_rx_chain (struct bce_softc *, int, int);
+static void bce_dump_ctx (struct bce_softc *, u16);
+static void bce_dump_ftqs (struct bce_softc *);
+static void bce_dump_tx_chain (struct bce_softc *, u16, int);
+static void bce_dump_rx_chain (struct bce_softc *, u16, int);
+static void bce_dump_pg_chain (struct bce_softc *, u16, int);
static void bce_dump_status_block (struct bce_softc *);
static void bce_dump_stats_block (struct bce_softc *);
static void bce_dump_driver_state (struct bce_softc *);
@@ -285,13 +293,19 @@ static void bce_stop (struct bce_softc *);
static int bce_reset (struct bce_softc *, u32);
static int bce_chipinit (struct bce_softc *);
static int bce_blockinit (struct bce_softc *);
-static int bce_get_buf (struct bce_softc *, struct mbuf *, u16 *, u16 *, u32 *);
+static int bce_get_rx_buf (struct bce_softc *, struct mbuf *, u16 *, u16 *, u32 *);
+static int bce_get_pg_buf (struct bce_softc *, struct mbuf *, u16 *, u16 *);
static int bce_init_tx_chain (struct bce_softc *);
-static void bce_fill_rx_chain (struct bce_softc *);
+static void bce_free_tx_chain (struct bce_softc *);
+
static int bce_init_rx_chain (struct bce_softc *);
+static void bce_fill_rx_chain (struct bce_softc *);
static void bce_free_rx_chain (struct bce_softc *);
-static void bce_free_tx_chain (struct bce_softc *);
+
+static int bce_init_pg_chain (struct bce_softc *);
+static void bce_fill_pg_chain (struct bce_softc *);
+static void bce_free_pg_chain (struct bce_softc *);
static int bce_tx_encap (struct bce_softc *, struct mbuf **);
static void bce_start_locked (struct ifnet *);
@@ -305,10 +319,11 @@ static void bce_init_locked (struct bce_softc *);
static void bce_init (void *);
static void bce_mgmt_init_locked (struct bce_softc *sc);
-static void bce_init_context (struct bce_softc *);
+static void bce_init_ctx (struct bce_softc *);
static void bce_get_mac_addr (struct bce_softc *);
static void bce_set_mac_addr (struct bce_softc *);
static void bce_phy_intr (struct bce_softc *);
+static inline u16 bce_get_hw_rx_cons(struct bce_softc *);
static void bce_rx_intr (struct bce_softc *);
static void bce_tx_intr (struct bce_softc *);
static void bce_disable_intr (struct bce_softc *);
@@ -330,20 +345,28 @@ static void bce_add_sysctls (struct bce_softc *);
/* FreeBSD device dispatch table. */
/****************************************************************************/
static device_method_t bce_methods[] = {
- /* Device interface */
+ /* Device interface (device_if.h) */
DEVMETHOD(device_probe, bce_probe),
DEVMETHOD(device_attach, bce_attach),
DEVMETHOD(device_detach, bce_detach),
DEVMETHOD(device_shutdown, bce_shutdown),
+/* Supported by device interface but not used here. */
+/* DEVMETHOD(device_identify, bce_identify), */
+/* DEVMETHOD(device_suspend, bce_suspend), */
+/* DEVMETHOD(device_resume, bce_resume), */
+/* DEVMETHOD(device_quiesce, bce_quiesce), */
- /* bus interface */
+ /* Bus interface (bus_if.h) */
DEVMETHOD(bus_print_child, bus_generic_print_child),
DEVMETHOD(bus_driver_added, bus_generic_driver_added),
- /* MII interface */
+ /* MII interface (miibus_if.h) */
DEVMETHOD(miibus_readreg, bce_miibus_read_reg),
DEVMETHOD(miibus_writereg, bce_miibus_write_reg),
DEVMETHOD(miibus_statchg, bce_miibus_statchg),
+/* Supported by MII interface but not used here. */
+/* DEVMETHOD(miibus_linkchg, bce_miibus_linkchg), */
+/* DEVMETHOD(miibus_mediainit, bce_miibus_mediainit), */
{ 0, 0 }
};
@@ -370,16 +393,22 @@ DRIVER_MODULE(miibus, bce, miibus_driver, miibus_devclass, 0, 0);
static int bce_tso_enable = TRUE;
static int bce_msi_enable = 1;
+SYSCTL_NODE(_hw, OID_AUTO, bce, CTLFLAG_RD, 0, "bce driver parameters");
+
/* Allowable values are TRUE or FALSE */
TUNABLE_INT("hw.bce.tso_enable", &bce_tso_enable);
-/* Allowable values are 0 (IRQ only) and 1 (IRQ or MSI) */
-TUNABLE_INT("hw.bce.msi_enable", &bce_msi_enable);
-
-SYSCTL_NODE(_hw, OID_AUTO, bce, CTLFLAG_RD, 0, "bce driver parameters");
SYSCTL_UINT(_hw_bce, OID_AUTO, tso_enable, CTLFLAG_RDTUN, &bce_tso_enable, 0,
"TSO Enable/Disable");
+
+/* Allowable values are 0 (IRQ only) and 1 (IRQ or MSI) */
+TUNABLE_INT("hw.bce.msi_enable", &bce_msi_enable);
SYSCTL_UINT(_hw_bce, OID_AUTO, msi_enable, CTLFLAG_RDTUN, &bce_msi_enable, 0,
"MSI | INTx selector");
+
+/* ToDo: Add tunable to enable/disable strict MTU handling. */
+/* Currently allows "loose" RX MTU checking (i.e. sets the */
+/* h/w RX MTU to the size of the largest receive buffer, or */
+/* 2048 bytes). */
/****************************************************************************/
/* Device probe function. */
@@ -460,21 +489,19 @@ bce_attach(device_t dev)
struct bce_softc *sc;
struct ifnet *ifp;
u32 val;
- int count, mbuf, rid, rc = 0;
+ int count, rid, rc = 0;
sc = device_get_softc(dev);
sc->bce_dev = dev;
DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
- mbuf = device_get_unit(dev);
+ sc->bce_unit = device_get_unit(dev);
/* Set initial device and PHY flags */
sc->bce_flags = 0;
sc->bce_phy_flags = 0;
- sc->bce_unit = mbuf;
-
pci_enable_busmaster(dev);
/* Allocate PCI memory resources. */
@@ -572,18 +599,14 @@ bce_attach(device_t dev)
DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "%s(): bce_shmem_base = 0x%08X\n",
__FUNCTION__, sc->bce_shmem_base);
+ /* Fetch the bootcode revision. */
sc->bce_fw_ver = REG_RD_IND(sc, sc->bce_shmem_base +
BCE_DEV_INFO_BC_REV);
- DBPRINT(sc, BCE_INFO_FIRMWARE, "%s(): bce_fw_ver = 0x%08X\n",
- __FUNCTION__, sc->bce_fw_ver);
/* Check if any management firmware is running. */
val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_PORT_FEATURE);
- if (val & (BCE_PORT_FEATURE_ASF_ENABLED | BCE_PORT_FEATURE_IMD_ENABLED)) {
+ if (val & (BCE_PORT_FEATURE_ASF_ENABLED | BCE_PORT_FEATURE_IMD_ENABLED))
sc->bce_flags |= BCE_MFW_ENABLE_FLAG;
- DBPRINT(sc, BCE_INFO_LOAD, "%s(): BCE_MFW_ENABLE_FLAG\n",
- __FUNCTION__);
- }
/* Get PCI bus information (speed and type). */
val = REG_RD(sc, BCE_PCICFG_MISC_STATUS);
@@ -757,8 +780,10 @@ bce_attach(device_t dev)
ifp->if_capenable = ifp->if_capabilities;
- /* Assume a standard 1500 byte MTU size for mbuf allocations. */
- sc->mbuf_alloc_size = MCLBYTES;
+ /* Use standard mbuf sizes for buffer allocation. */
+ sc->rx_bd_mbuf_alloc_size = MHLEN;
+ sc->pg_bd_mbuf_alloc_size = MCLBYTES;
+
#ifdef DEVICE_POLLING
ifp->if_capabilities |= IFCAP_POLLING;
#endif
@@ -810,7 +835,7 @@ bce_attach(device_t dev)
*/
/* Print some important debugging info. */
- DBRUN(BCE_INFO, bce_dump_driver_state(sc));
+ DBRUNMSG(BCE_INFO, bce_dump_driver_state(sc));
/* Add the supported sysctls to the kernel. */
bce_add_sysctls(sc);
@@ -843,6 +868,9 @@ bce_attach(device_t dev)
if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)
printf("2.5G ");
printf(")\n");
+
+ DBPRINT(sc, BCE_FATAL, "%s(): sc = %p\n",
+ __FUNCTION__, sc);
goto bce_attach_exit;
@@ -881,17 +909,19 @@ bce_detach(device_t dev)
ether_poll_deregister(ifp);
#endif
+ /* Stop and reset the controller. */
+ BCE_LOCK(sc);
+
/* Stop the pulse so the bootcode can go to driver absent state. */
callout_stop(&sc->bce_pulse_callout);
- /* Stop and reset the controller. */
- BCE_LOCK(sc);
bce_stop(sc);
if (sc->bce_flags & BCE_NO_WOL_FLAG)
msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN;
else
msg = BCE_DRV_MSG_CODE_UNLOAD;
bce_reset(sc, msg);
+
BCE_UNLOCK(sc);
ether_ifdetach(ifp);
@@ -994,6 +1024,33 @@ bce_reg_wr_ind(struct bce_softc *sc, u32 offset, u32 val)
pci_write_config(dev, BCE_PCICFG_REG_WINDOW, val, 4);
}
+
+#ifdef BCE_DEBUG
+/****************************************************************************/
+/* Context memory read. */
+/* */
+/* The NetXtreme II controller uses context memory to track connection */
+/* information for L2 and higher network protocols. */
+/* */
+/* Returns: */
+/* The requested 32 bit value of context memory. */
+/****************************************************************************/
+static u32
+bce_ctx_rd(struct bce_softc *sc, u32 cid_addr, u32 offset)
+{
+ u32 val;
+
+ offset += cid_addr;
+ REG_WR(sc, BCE_CTX_DATA_ADR, offset);
+ val = REG_RD(sc, BCE_CTX_DATA);
+
+ DBPRINT(sc, BCE_EXCESSIVE, "%s(); cid_addr = 0x%08X, offset = 0x%08X, "
+ "val = 0x%08X\n", __FUNCTION__, cid_addr, offset, val);
+
+ return(val);
+}
+#endif
+
/****************************************************************************/
/* Context memory write. */
@@ -1212,7 +1269,8 @@ bce_miibus_statchg(device_t dev)
val |= BCE_EMAC_MODE_PORT_GMII;
break;
default:
- DBPRINT(sc, BCE_INFO, "Enabling default GMII interface.\n");
+ DBPRINT(sc, BCE_INFO, "Unknown speed, enabling default GMII "
+ "interface.\n");
val |= BCE_EMAC_MODE_PORT_GMII;
}
@@ -1226,7 +1284,7 @@ bce_miibus_statchg(device_t dev)
REG_WR(sc, BCE_EMAC_MODE, val);
#if 0
- /* Todo: Enable flow control support in brgphy and bge. */
+ /* ToDo: Enable flow control support in brgphy and bge. */
/* FLAG0 is set if RX is enabled and FLAG1 if TX is enabled */
if (mii->mii_media_active & IFM_FLAG0)
BCE_SETBIT(sc, BCE_EMAC_RX_MODE, BCE_EMAC_RX_MODE_FLOW_EN);
@@ -2095,11 +2153,13 @@ bce_dma_free(struct bce_softc *sc)
DBPRINT(sc,BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
/* Destroy the status block. */
- if (sc->status_block != NULL)
+ if (sc->status_block != NULL) {
bus_dmamem_free(
sc->status_tag,
sc->status_block,
sc->status_map);
+ sc->status_block = NULL;
+ }
if (sc->status_map != NULL) {
bus_dmamap_unload(
@@ -2107,18 +2167,23 @@ bce_dma_free(struct bce_softc *sc)
sc->status_map);
bus_dmamap_destroy(sc->status_tag,
sc->status_map);
+ sc->status_map = NULL;
}
- if (sc->status_tag != NULL)
+ if (sc->status_tag != NULL) {
bus_dma_tag_destroy(sc->status_tag);
+ sc->status_tag = NULL;
+ }
/* Destroy the statistics block. */
- if (sc->stats_block != NULL)
+ if (sc->stats_block != NULL) {
bus_dmamem_free(
sc->stats_tag,
sc->stats_block,
sc->stats_map);
+ sc->stats_block = NULL;
+ }
if (sc->stats_map != NULL) {
bus_dmamap_unload(
@@ -2126,19 +2191,24 @@ bce_dma_free(struct bce_softc *sc)
sc->stats_map);
bus_dmamap_destroy(sc->stats_tag,
sc->stats_map);
+ sc->stats_map = NULL;
}
- if (sc->stats_tag != NULL)
+ if (sc->stats_tag != NULL) {
bus_dma_tag_destroy(sc->stats_tag);
+ sc->stats_tag = NULL;
+ }
/* Free, unmap and destroy all TX buffer descriptor chain pages. */
for (i = 0; i < TX_PAGES; i++ ) {
- if (sc->tx_bd_chain[i] != NULL)
+ if (sc->tx_bd_chain[i] != NULL) {
bus_dmamem_free(
sc->tx_bd_chain_tag,
sc->tx_bd_chain[i],
sc->tx_bd_chain_map[i]);
+ sc->tx_bd_chain[i] = NULL;
+ }
if (sc->tx_bd_chain_map[i] != NULL) {
bus_dmamap_unload(
@@ -2147,22 +2217,26 @@ bce_dma_free(struct bce_softc *sc)
bus_dmamap_destroy(
sc->tx_bd_chain_tag,
sc->tx_bd_chain_map[i]);
+ sc->tx_bd_chain_map[i] = NULL;
}
-
}
/* Destroy the TX buffer descriptor tag. */
- if (sc->tx_bd_chain_tag != NULL)
+ if (sc->tx_bd_chain_tag != NULL) {
bus_dma_tag_destroy(sc->tx_bd_chain_tag);
+ sc->tx_bd_chain_tag = NULL;
+ }
/* Free, unmap and destroy all RX buffer descriptor chain pages. */
for (i = 0; i < RX_PAGES; i++ ) {
- if (sc->rx_bd_chain[i] != NULL)
+ if (sc->rx_bd_chain[i] != NULL) {
bus_dmamem_free(
sc->rx_bd_chain_tag,
sc->rx_bd_chain[i],
sc->rx_bd_chain_map[i]);
+ sc->rx_bd_chain[i] = NULL;
+ }
if (sc->rx_bd_chain_map[i] != NULL) {
bus_dmamap_unload(
@@ -2171,12 +2245,43 @@ bce_dma_free(struct bce_softc *sc)
bus_dmamap_destroy(
sc->rx_bd_chain_tag,
sc->rx_bd_chain_map[i]);
+ sc->rx_bd_chain_map[i] = NULL;
}
}
/* Destroy the RX buffer descriptor tag. */
- if (sc->rx_bd_chain_tag != NULL)
+ if (sc->rx_bd_chain_tag != NULL) {
bus_dma_tag_destroy(sc->rx_bd_chain_tag);
+ sc->rx_bd_chain_tag = NULL;
+ }
+
+
+ /* Free, unmap and destroy all page buffer descriptor chain pages. */
+ for (i = 0; i < PG_PAGES; i++ ) {
+ if (sc->pg_bd_chain[i] != NULL) {
+ bus_dmamem_free(
+ sc->pg_bd_chain_tag,
+ sc->pg_bd_chain[i],
+ sc->pg_bd_chain_map[i]);
+ sc->pg_bd_chain[i] = NULL;
+ }
+
+ if (sc->pg_bd_chain_map[i] != NULL) {
+ bus_dmamap_unload(
+ sc->pg_bd_chain_tag,
+ sc->pg_bd_chain_map[i]);
+ bus_dmamap_destroy(
+ sc->pg_bd_chain_tag,
+ sc->pg_bd_chain_map[i]);
+ sc->pg_bd_chain_map[i] = NULL;
+ }
+ }
+
+ /* Destroy the page buffer descriptor tag. */
+ if (sc->pg_bd_chain_tag != NULL) {
+ bus_dma_tag_destroy(sc->pg_bd_chain_tag);
+ sc->pg_bd_chain_tag = NULL;
+ }
/* Unload and destroy the TX mbuf maps. */
@@ -2186,13 +2291,15 @@ bce_dma_free(struct bce_softc *sc)
sc->tx_mbuf_map[i]);
bus_dmamap_destroy(sc->tx_mbuf_tag,
sc->tx_mbuf_map[i]);
+ sc->tx_mbuf_map[i] = NULL;
}
}
/* Destroy the TX mbuf tag. */
- if (sc->tx_mbuf_tag != NULL)
+ if (sc->tx_mbuf_tag != NULL) {
bus_dma_tag_destroy(sc->tx_mbuf_tag);
-
+ sc->tx_mbuf_tag = NULL;
+ }
/* Unload and destroy the RX mbuf maps. */
for (i = 0; i < TOTAL_RX_BD; i++) {
@@ -2201,17 +2308,38 @@ bce_dma_free(struct bce_softc *sc)
sc->rx_mbuf_map[i]);
bus_dmamap_destroy(sc->rx_mbuf_tag,
sc->rx_mbuf_map[i]);
+ sc->rx_mbuf_map[i] = NULL;
}
}
/* Destroy the RX mbuf tag. */
- if (sc->rx_mbuf_tag != NULL)
+ if (sc->rx_mbuf_tag != NULL) {
bus_dma_tag_destroy(sc->rx_mbuf_tag);
+ sc->rx_mbuf_tag = NULL;
+ }
+ /* Unload and destroy the page mbuf maps. */
+ for (i = 0; i < TOTAL_PG_BD; i++) {
+ if (sc->pg_mbuf_map[i] != NULL) {
+ bus_dmamap_unload(sc->pg_mbuf_tag,
+ sc->pg_mbuf_map[i]);
+ bus_dmamap_destroy(sc->pg_mbuf_tag,
+ sc->pg_mbuf_map[i]);
+ sc->pg_mbuf_map[i] = NULL;
+ }
+ }
+
+ /* Destroy the page mbuf tag. */
+ if (sc->pg_mbuf_tag != NULL) {
+ bus_dma_tag_destroy(sc->pg_mbuf_tag);
+ sc->pg_mbuf_tag = NULL;
+ }
/* Destroy the parent tag */
- if (sc->parent_tag != NULL)
+ if (sc->parent_tag != NULL) {
bus_dma_tag_destroy(sc->parent_tag);
+ sc->parent_tag = NULL;
+ }
DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
@@ -2506,7 +2634,7 @@ bce_dma_alloc(device_t dev)
/*
* Create a DMA tag for the RX buffer descriptor chain,
- * allocate and clear the memory, and fetch the physical
+ * allocate and clear the memory, and fetch the physical
* address of the blocks.
*/
if (bus_dma_tag_create(sc->parent_tag,
@@ -2564,16 +2692,19 @@ bce_dma_alloc(device_t dev)
/*
* Create a DMA tag for RX mbufs.
- */
+ */
+ max_size = max_seg_size = ((sc->rx_bd_mbuf_alloc_size < MCLBYTES) ?
+ MCLBYTES : sc->rx_bd_mbuf_alloc_size);
+
if (bus_dma_tag_create(sc->parent_tag,
1,
BCE_DMA_BOUNDARY,
sc->max_bus_addr,
BUS_SPACE_MAXADDR,
NULL, NULL,
- MJUM9BYTES,
- BCE_MAX_SEGMENTS,
- MJUM9BYTES,
+ max_size,
+ 1,
+ max_seg_size,
0,
NULL, NULL,
&sc->rx_mbuf_tag)) {
@@ -2594,6 +2725,99 @@ bce_dma_alloc(device_t dev)
}
}
+ /*
+ * Create a DMA tag for the page buffer descriptor chain,
+ * allocate and clear the memory, and fetch the physical
+ * address of the blocks.
+ */
+ if (bus_dma_tag_create(sc->parent_tag,
+ BCM_PAGE_SIZE,
+ BCE_DMA_BOUNDARY,
+ BUS_SPACE_MAXADDR,
+ sc->max_bus_addr,
+ NULL, NULL,
+ BCE_PG_CHAIN_PAGE_SZ,
+ 1,
+ BCE_PG_CHAIN_PAGE_SZ,
+ 0,
+ NULL, NULL,
+ &sc->pg_bd_chain_tag)) {
+ BCE_PRINTF("%s(%d): Could not allocate page descriptor chain DMA tag!\n",
+ __FILE__, __LINE__);
+ rc = ENOMEM;
+ goto bce_dma_alloc_exit;
+ }
+
+ for (i = 0; i < PG_PAGES; i++) {
+
+ if (bus_dmamem_alloc(sc->pg_bd_chain_tag,
+ (void **)&sc->pg_bd_chain[i],
+ BUS_DMA_NOWAIT,
+ &sc->pg_bd_chain_map[i])) {
+ BCE_PRINTF("%s(%d): Could not allocate page descriptor chain "
+ "DMA memory!\n", __FILE__, __LINE__);
+ rc = ENOMEM;
+ goto bce_dma_alloc_exit;
+ }
+
+ bzero((char *)sc->pg_bd_chain[i], BCE_PG_CHAIN_PAGE_SZ);
+
+ error = bus_dmamap_load(sc->pg_bd_chain_tag,
+ sc->pg_bd_chain_map[i],
+ sc->pg_bd_chain[i],
+ BCE_PG_CHAIN_PAGE_SZ,
+ bce_dma_map_addr,
+ &busaddr,
+ BUS_DMA_NOWAIT);
+
+ if (error) {
+ BCE_PRINTF("%s(%d): Could not map page descriptor chain DMA memory!\n",
+ __FILE__, __LINE__);
+ rc = ENOMEM;
+ goto bce_dma_alloc_exit;
+ }
+
+ sc->pg_bd_chain_paddr[i] = busaddr;
+ /* DRC - Fix for 64 bit systems. */
+ DBPRINT(sc, BCE_INFO, "pg_bd_chain_paddr[%d] = 0x%08X\n",
+ i, (u32) sc->pg_bd_chain_paddr[i]);
+ }
+
+ /*
+ * Create a DMA tag for page mbufs.
+ */
+ max_size = max_seg_size = ((sc->pg_bd_mbuf_alloc_size < MCLBYTES) ?
+ MCLBYTES : sc->rx_bd_mbuf_alloc_size);
+
+ if (bus_dma_tag_create(sc->parent_tag,
+ 1,
+ BCE_DMA_BOUNDARY,
+ sc->max_bus_addr,
+ BUS_SPACE_MAXADDR,
+ NULL, NULL,
+ max_size,
+ 1,
+ max_seg_size,
+ 0,
+ NULL, NULL,
+ &sc->pg_mbuf_tag)) {
+ BCE_PRINTF("%s(%d): Could not allocate page mbuf DMA tag!\n",
+ __FILE__, __LINE__);
+ rc = ENOMEM;
+ goto bce_dma_alloc_exit;
+ }
+
+ /* Create DMA maps for the page mbuf clusters. */
+ for (i = 0; i < TOTAL_PG_BD; i++) {
+ if (bus_dmamap_create(sc->pg_mbuf_tag, BUS_DMA_NOWAIT,
+ &sc->pg_mbuf_map[i])) {
+ BCE_PRINTF("%s(%d): Unable to create page mbuf DMA map!\n",
+ __FILE__, __LINE__);
+ rc = ENOMEM;
+ goto bce_dma_alloc_exit;
+ }
+ }
+
bce_dma_alloc_exit:
DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
@@ -2728,7 +2952,12 @@ bce_load_rv2p_fw(struct bce_softc *sc, u32 *rv2p_code,
{
int i;
u32 val;
-
+
+ /* Set the page size used by RV2P. */
+ if (rv2p_proc == RV2P_PROC2) {
+ BCE_RV2P_PROC2_CHG_MAX_BD_PAGE(USABLE_RX_BD_PER_PAGE);
+ }
+
for (i = 0; i < rv2p_code_len; i += 8) {
REG_WR(sc, BCE_RV2P_INSTR_HIGH, *rv2p_code);
rv2p_code++;
@@ -3045,6 +3274,53 @@ bce_init_cpus(struct bce_softc *sc)
DBPRINT(sc, BCE_INFO_RESET, "Loading COM firmware.\n");
bce_load_cpu_fw(sc, &cpu_reg, &fw);
+
+ /* Initialize the Command Processor. */
+ cpu_reg.mode = BCE_CP_CPU_MODE;
+ cpu_reg.mode_value_halt = BCE_CP_CPU_MODE_SOFT_HALT;
+ cpu_reg.mode_value_sstep = BCE_CP_CPU_MODE_STEP_ENA;
+ cpu_reg.state = BCE_CP_CPU_STATE;
+ cpu_reg.state_value_clear = 0xffffff;
+ cpu_reg.gpr0 = BCE_CP_CPU_REG_FILE;
+ cpu_reg.evmask = BCE_CP_CPU_EVENT_MASK;
+ cpu_reg.pc = BCE_CP_CPU_PROGRAM_COUNTER;
+ cpu_reg.inst = BCE_CP_CPU_INSTRUCTION;
+ cpu_reg.bp = BCE_CP_CPU_HW_BREAKPOINT;
+ cpu_reg.spad_base = BCE_CP_SCRATCH;
+ cpu_reg.mips_view_base = 0x8000000;
+
+ fw.ver_major = bce_CP_b06FwReleaseMajor;
+ fw.ver_minor = bce_CP_b06FwReleaseMinor;
+ fw.ver_fix = bce_CP_b06FwReleaseFix;
+ fw.start_addr = bce_CP_b06FwStartAddr;
+
+ fw.text_addr = bce_CP_b06FwTextAddr;
+ fw.text_len = bce_CP_b06FwTextLen;
+ fw.text_index = 0;
+ fw.text = bce_CP_b06FwText;
+
+ fw.data_addr = bce_CP_b06FwDataAddr;
+ fw.data_len = bce_CP_b06FwDataLen;
+ fw.data_index = 0;
+ fw.data = bce_CP_b06FwData;
+
+ fw.sbss_addr = bce_CP_b06FwSbssAddr;
+ fw.sbss_len = bce_CP_b06FwSbssLen;
+ fw.sbss_index = 0;
+ fw.sbss = bce_CP_b06FwSbss;
+
+ fw.bss_addr = bce_CP_b06FwBssAddr;
+ fw.bss_len = bce_CP_b06FwBssLen;
+ fw.bss_index = 0;
+ fw.bss = bce_CP_b06FwBss;
+
+ fw.rodata_addr = bce_CP_b06FwRodataAddr;
+ fw.rodata_len = bce_CP_b06FwRodataLen;
+ fw.rodata_index = 0;
+ fw.rodata = bce_CP_b06FwRodata;
+
+ DBPRINT(sc, BCE_INFO_RESET, "Loading CP firmware.\n");
+ bce_load_cpu_fw(sc, &cpu_reg, &fw);
}
@@ -3057,29 +3333,30 @@ bce_init_cpus(struct bce_softc *sc)
/* Nothing. */
/****************************************************************************/
static void
-bce_init_context(struct bce_softc *sc)
+bce_init_ctx(struct bce_softc *sc)
{
- u32 vcid;
+ u32 vcid = 96;
- vcid = 96;
while (vcid) {
u32 vcid_addr, pcid_addr, offset;
+ int i;
vcid--;
vcid_addr = GET_CID_ADDR(vcid);
pcid_addr = vcid_addr;
- REG_WR(sc, BCE_CTX_VIRT_ADDR, 0x00);
- REG_WR(sc, BCE_CTX_PAGE_TBL, pcid_addr);
+ for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
+ vcid_addr += (i << PHY_CTX_SHIFT);
+ pcid_addr += (i << PHY_CTX_SHIFT);
- /* Zero out the context. */
- for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
- CTX_WR(sc, 0x00, offset, 0);
- }
+ REG_WR(sc, BCE_CTX_VIRT_ADDR, vcid_addr);
+ REG_WR(sc, BCE_CTX_PAGE_TBL, pcid_addr);
- REG_WR(sc, BCE_CTX_VIRT_ADDR, vcid_addr);
- REG_WR(sc, BCE_CTX_PAGE_TBL, pcid_addr);
+ /* Zero out the context. */
+ for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
+ CTX_WR(sc, vcid_addr, offset, 0);
+ }
}
}
@@ -3182,6 +3459,7 @@ bce_stop(struct bce_softc *sc)
bce_disable_intr(sc);
/* Free RX buffers. */
+ bce_free_pg_chain(sc);
bce_free_rx_chain(sc);
/* Free TX buffers. */
@@ -3353,7 +3631,7 @@ bce_chipinit(struct bce_softc *sc)
BCE_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
/* Initialize context mapping and zero out the quick contexts. */
- bce_init_context(sc);
+ bce_init_ctx(sc);
/* Initialize the on-boards CPUs */
bce_init_cpus(sc);
@@ -3495,32 +3773,25 @@ bce_blockinit_exit:
/****************************************************************************/
-/* Encapsulate an mbuf cluster into the rx_bd chain. */
-/* */
-/* The NetXtreme II can support Jumbo frames by using multiple rx_bd's. */
-/* This routine will map an mbuf cluster into 1 or more rx_bd's as */
-/* necessary. */
-/* */
-/* Todo: Consider writing the hardware mailboxes here to make rx_bd's */
-/* available to the hardware as soon as possible. */
+/* Encapsulate an mbuf into the rx_bd chain. */
/* */
/* Returns: */
/* 0 for success, positive value for failure. */
/****************************************************************************/
static int
-bce_get_buf(struct bce_softc *sc, struct mbuf *m, u16 *prod, u16 *chain_prod,
- u32 *prod_bseq)
+bce_get_rx_buf(struct bce_softc *sc, struct mbuf *m, u16 *prod,
+ u16 *chain_prod, u32 *prod_bseq)
{
bus_dmamap_t map;
bus_dma_segment_t segs[BCE_MAX_SEGMENTS];
struct mbuf *m_new = NULL;
struct rx_bd *rxbd;
- int i, nsegs, error, rc = 0;
+ int nsegs, error, rc = 0;
#ifdef BCE_DEBUG
u16 debug_chain_prod = *chain_prod;
#endif
- DBPRINT(sc, (BCE_VERBOSE_RESET | BCE_VERBOSE_RECV), "Entering %s()\n",
+ DBPRINT(sc, (BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD), "Entering %s()\n",
__FUNCTION__);
/* Make sure the inputs are valid. */
@@ -3528,67 +3799,44 @@ bce_get_buf(struct bce_softc *sc, struct mbuf *m, u16 *prod, u16 *chain_prod,
BCE_PRINTF("%s(%d): RX producer out of range: 0x%04X > 0x%04X\n",
__FILE__, __LINE__, *chain_prod, (u16) MAX_RX_BD));
- DBPRINT(sc, BCE_VERBOSE_RECV, "%s(enter): prod = 0x%04X, chain_prod = 0x%04X, "
+ DBPRINT(sc, BCE_VERBOSE, "%s(enter): prod = 0x%04X, chain_prod = 0x%04X, "
"prod_bseq = 0x%08X\n", __FUNCTION__, *prod, *chain_prod, *prod_bseq);
+ /* Update some debug statistic counters */
+ DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
+ sc->rx_low_watermark = sc->free_rx_bd);
+ DBRUNIF((sc->free_rx_bd == sc->max_rx_bd), sc->rx_empty_count++);
+
/* Check whether this is a new mbuf allocation. */
if (m == NULL) {
/* Simulate an mbuf allocation failure. */
DBRUNIF(DB_RANDOMTRUE(bce_debug_mbuf_allocation_failure),
sc->mbuf_alloc_failed++;
- sc->mbuf_sim_alloc_failed++;
+ sc->debug_mbuf_sim_alloc_failed++;
rc = ENOBUFS;
- goto bce_get_buf_exit);
+ goto bce_get_rx_buf_exit);
/* This is a new mbuf allocation. */
MGETHDR(m_new, M_DONTWAIT, MT_DATA);
if (m_new == NULL) {
-
- DBPRINT(sc, BCE_WARN, "%s(%d): RX mbuf header allocation failed!\n",
- __FILE__, __LINE__);
-
sc->mbuf_alloc_failed++;
-
rc = ENOBUFS;
- goto bce_get_buf_exit;
+ goto bce_get_rx_buf_exit;
}
- DBRUNIF(1, sc->rx_mbuf_alloc++);
-
- /* Simulate an mbuf cluster allocation failure. */
- DBRUNIF(DB_RANDOMTRUE(bce_debug_mbuf_allocation_failure),
- m_freem(m_new);
- sc->rx_mbuf_alloc--;
- sc->mbuf_alloc_failed++;
- sc->mbuf_sim_alloc_failed++;
- rc = ENOBUFS;
- goto bce_get_buf_exit);
-
- /* Attach a cluster to the mbuf. */
- m_cljget(m_new, M_DONTWAIT, sc->mbuf_alloc_size);
- if (!(m_new->m_flags & M_EXT)) {
-
- DBPRINT(sc, BCE_WARN, "%s(%d): RX mbuf chain allocation failed!\n",
- __FILE__, __LINE__);
-
- m_freem(m_new);
- DBRUNIF(1, sc->rx_mbuf_alloc--);
-
- sc->mbuf_alloc_failed++;
- rc = ENOBUFS;
- goto bce_get_buf_exit;
- }
-
- /* Initialize the mbuf cluster. */
- m_new->m_len = m_new->m_pkthdr.len = sc->mbuf_alloc_size;
+ DBRUN(sc->debug_rx_mbuf_alloc++);
} else {
/* Reuse an existing mbuf. */
m_new = m;
- m_new->m_len = m_new->m_pkthdr.len = sc->mbuf_alloc_size;
- m_new->m_data = m_new->m_ext.ext_buf;
}
+ M_ASSERTPKTHDR(m_new);
+
+ m_new->m_pkthdr.len = m_new->m_len = sc->rx_bd_mbuf_alloc_size;
+
+ /* ToDo: Consider calling m_fragment() to test error handling. */
+
/* Map the mbuf cluster into device memory. */
map = sc->rx_mbuf_map[*chain_prod];
error = bus_dmamap_load_mbuf_sg(sc->rx_mbuf_tag, map, m_new,
@@ -3600,69 +3848,150 @@ bce_get_buf(struct bce_softc *sc, struct mbuf *m, u16 *prod, u16 *chain_prod,
__FILE__, __LINE__);
m_freem(m_new);
- DBRUNIF(1, sc->rx_mbuf_alloc--);
+ DBRUN(sc->debug_rx_mbuf_alloc--);
rc = ENOBUFS;
- goto bce_get_buf_exit;
+ goto bce_get_rx_buf_exit;
}
+
+ /* All mbufs must map to a single segment. */
+ KASSERT(nsegs == 1, ("%s(): Too many segments returned (%d)!",
+ __FUNCTION__, nsegs));
- /* Make sure there is room in the receive chain. */
- if (nsegs > sc->free_rx_bd) {
- bus_dmamap_unload(sc->rx_mbuf_tag, map);
-
- m_freem(m_new);
- DBRUNIF(1, sc->rx_mbuf_alloc--);
-
- rc = EFBIG;
- goto bce_get_buf_exit;
- }
-
-#ifdef BCE_DEBUG
- /* Track the distribution of buffer segments. */
- sc->rx_mbuf_segs[nsegs]++;
-#endif
-
- /* Update some debug statistic counters */
- DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
- sc->rx_low_watermark = sc->free_rx_bd);
- DBRUNIF((sc->free_rx_bd == sc->max_rx_bd), sc->rx_empty_count++);
+ /* ToDo: Do we need bus_dmamap_sync(,,BUS_DMASYNC_PREWRITE) here? */
- /* Setup the rx_bd for the first segment. */
+ /* Setup the rx_bd for the segment. */
rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[0].ds_addr));
rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[0].ds_addr));
rxbd->rx_bd_len = htole32(segs[0].ds_len);
- rxbd->rx_bd_flags = htole32(RX_BD_FLAGS_START);
+ rxbd->rx_bd_flags = htole32(RX_BD_FLAGS_START | RX_BD_FLAGS_END);
*prod_bseq += segs[0].ds_len;
- for (i = 1; i < nsegs; i++) {
+ /* Save the mbuf and update our counter. */
+ sc->rx_mbuf_ptr[*chain_prod] = m_new;
+ sc->free_rx_bd -= nsegs;
+
+ DBRUNMSG(BCE_EXCESSIVE, bce_dump_rx_mbuf_chain(sc, debug_chain_prod,
+ nsegs));
+
+ DBPRINT(sc, BCE_VERBOSE, "%s(exit): prod = 0x%04X, chain_prod = 0x%04X, "
+ "prod_bseq = 0x%08X\n", __FUNCTION__, *prod, *chain_prod, *prod_bseq);
- *prod = NEXT_RX_BD(*prod);
- *chain_prod = RX_CHAIN_IDX(*prod);
+bce_get_rx_buf_exit:
+ DBPRINT(sc, (BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD), "Exiting %s()\n",
+ __FUNCTION__);
- rxbd = &sc->rx_bd_chain[RX_PAGE(*chain_prod)][RX_IDX(*chain_prod)];
+ return(rc);
+}
- rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[i].ds_addr));
- rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[i].ds_addr));
- rxbd->rx_bd_len = htole32(segs[i].ds_len);
- rxbd->rx_bd_flags = 0;
- *prod_bseq += segs[i].ds_len;
+
+/****************************************************************************/
+/* Encapsulate an mbuf cluster into the page chain. */
+/* */
+/* Returns: */
+/* 0 for success, positive value for failure. */
+/****************************************************************************/
+static int
+bce_get_pg_buf(struct bce_softc *sc, struct mbuf *m, u16 *prod,
+ u16 *prod_idx)
+{
+ bus_dmamap_t map;
+ bus_addr_t busaddr;
+ struct mbuf *m_new = NULL;
+ struct rx_bd *pgbd;
+ int error, rc = 0;
+#ifdef BCE_DEBUG
+ u16 debug_prod_idx = *prod_idx;
+#endif
+
+ DBPRINT(sc, (BCE_VERBOSE_RESET | BCE_VERBOSE_RECV), "Entering %s()\n",
+ __FUNCTION__);
+
+ /* Make sure the inputs are valid. */
+ DBRUNIF((*prod_idx > MAX_PG_BD),
+ BCE_PRINTF("%s(%d): page producer out of range: 0x%04X > 0x%04X\n",
+ __FILE__, __LINE__, *prod_idx, (u16) MAX_PG_BD));
+
+ DBPRINT(sc, BCE_VERBOSE_RECV, "%s(enter): prod = 0x%04X, "
+ "chain_prod = 0x%04X\n", __FUNCTION__, *prod, *prod_idx);
+
+ /* Update counters if we've hit a new low or run out of pages. */
+ DBRUNIF((sc->free_pg_bd < sc->pg_low_watermark),
+ sc->pg_low_watermark = sc->free_pg_bd);
+ DBRUNIF((sc->free_pg_bd == sc->max_pg_bd), sc->pg_empty_count++);
+
+ /* Check whether this is a new mbuf allocation. */
+ if (m == NULL) {
+
+ /* Simulate an mbuf allocation failure. */
+ DBRUNIF(DB_RANDOMTRUE(bce_debug_mbuf_allocation_failure),
+ sc->mbuf_alloc_failed++;
+ sc->debug_mbuf_sim_alloc_failed++;
+ rc = ENOBUFS;
+ goto bce_get_pg_buf_exit);
+
+ /* This is a new mbuf allocation. */
+ m_new = m_getcl(M_DONTWAIT, MT_DATA, 0);
+ if (m_new == NULL) {
+ sc->mbuf_alloc_failed++;
+ rc = ENOBUFS;
+ goto bce_get_pg_buf_exit;
+ }
+
+ DBRUN(sc->debug_pg_mbuf_alloc++);
+ } else {
+ /* Reuse an existing mbuf. */
+ m_new = m;
+ m_new->m_data = m_new->m_ext.ext_buf;
}
- rxbd->rx_bd_flags |= htole32(RX_BD_FLAGS_END);
+ m_new->m_len = sc->pg_bd_mbuf_alloc_size;
+
+ /* ToDo: Consider calling m_fragment() to test error handling. */
+
+ /* Map the mbuf cluster into device memory. */
+ map = sc->pg_mbuf_map[*prod_idx];
+ error = bus_dmamap_load(sc->pg_mbuf_tag, map, mtod(m_new, void *),
+ sc->pg_bd_mbuf_alloc_size, bce_dma_map_addr, &busaddr, BUS_DMA_NOWAIT);
+
+ /* Handle any mapping errors. */
+ if (error) {
+ BCE_PRINTF("%s(%d): Error mapping mbuf into page chain!\n",
+ __FILE__, __LINE__);
+
+ m_freem(m_new);
+ DBRUN(sc->debug_pg_mbuf_alloc--);
+
+ rc = ENOBUFS;
+ goto bce_get_pg_buf_exit;
+ }
+
+ /* ToDo: Do we need bus_dmamap_sync(,,BUS_DMASYNC_PREWRITE) here? */
+
+ /*
+ * The page chain uses the same rx_bd data structure
+ * as the receive chain but doesn't require a byte sequence (bseq).
+ */
+ pgbd = &sc->pg_bd_chain[PG_PAGE(*prod_idx)][PG_IDX(*prod_idx)];
+
+ pgbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(busaddr));
+ pgbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(busaddr));
+ pgbd->rx_bd_len = htole32(sc->pg_bd_mbuf_alloc_size);
+ pgbd->rx_bd_flags = htole32(RX_BD_FLAGS_START | RX_BD_FLAGS_END);
/* Save the mbuf and update our counter. */
- sc->rx_mbuf_ptr[*chain_prod] = m_new;
- sc->free_rx_bd -= nsegs;
+ sc->pg_mbuf_ptr[*prod_idx] = m_new;
+ sc->free_pg_bd--;
- DBRUN(BCE_VERBOSE_RECV, bce_dump_rx_mbuf_chain(sc, debug_chain_prod,
- nsegs));
+ DBRUNMSG(BCE_VERBOSE_RECV, bce_dump_pg_mbuf_chain(sc, debug_prod_idx,
+ 1));
- DBPRINT(sc, BCE_VERBOSE_RECV, "%s(exit): prod = 0x%04X, chain_prod = 0x%04X, "
- "prod_bseq = 0x%08X\n", __FUNCTION__, *prod, *chain_prod, *prod_bseq);
+ DBPRINT(sc, BCE_VERBOSE_RECV, "%s(exit): prod = 0x%04X, "
+ "prod_idx = 0x%04X\n", __FUNCTION__, *prod, *prod_idx);
-bce_get_buf_exit:
+bce_get_pg_buf_exit:
DBPRINT(sc, (BCE_VERBOSE_RESET | BCE_VERBOSE_RECV), "Exiting %s()\n",
__FUNCTION__);
@@ -3691,8 +4020,8 @@ bce_init_tx_chain(struct bce_softc *sc)
sc->tx_prod_bseq = 0;
sc->used_tx_bd = 0;
sc->max_tx_bd = USABLE_TX_BD;
- DBRUNIF(1, sc->tx_hi_watermark = USABLE_TX_BD);
- DBRUNIF(1, sc->tx_full_count = 0);
+ DBRUN(sc->tx_hi_watermark = USABLE_TX_BD);
+ DBRUN(sc->tx_full_count = 0);
/*
* The NetXtreme II supports a linked-list structre called
@@ -3734,7 +4063,7 @@ bce_init_tx_chain(struct bce_softc *sc)
val = BCE_ADDR_LO(sc->tx_bd_chain_paddr[0]);
CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TBDR_BHADDR_LO, val);
- DBRUN(BCE_VERBOSE_SEND, bce_dump_tx_chain(sc, 0, TOTAL_TX_BD));
+ DBRUNMSG(BCE_VERBOSE_SEND, bce_dump_tx_chain(sc, 0, TOTAL_TX_BD));
DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
@@ -3763,7 +4092,7 @@ bce_free_tx_chain(struct bce_softc *sc)
BUS_DMASYNC_POSTWRITE);
m_freem(sc->tx_mbuf_ptr[i]);
sc->tx_mbuf_ptr[i] = NULL;
- DBRUNIF(1, sc->tx_mbuf_alloc--);
+ DBRUN(sc->debug_tx_mbuf_alloc--);
}
}
@@ -3774,72 +4103,16 @@ bce_free_tx_chain(struct bce_softc *sc)
sc->used_tx_bd = 0;
/* Check if we lost any mbufs in the process. */
- DBRUNIF((sc->tx_mbuf_alloc),
+ DBRUNIF((sc->debug_tx_mbuf_alloc),
BCE_PRINTF("%s(%d): Memory leak! Lost %d mbufs "
"from tx chain!\n",
- __FILE__, __LINE__, sc->tx_mbuf_alloc));
+ __FILE__, __LINE__, sc->debug_tx_mbuf_alloc));
DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
}
/****************************************************************************/
-/* Add mbufs to the RX chain until its full or an mbuf allocation error */
-/* occurs. */
-/* */
-/* Returns: */
-/* Nothing */
-/****************************************************************************/
-static void
-bce_fill_rx_chain(struct bce_softc *sc)
-{
- u16 prod, chain_prod;
- u32 prod_bseq;
-#ifdef BCE_DEBUG
- int rx_mbuf_alloc_before, free_rx_bd_before;
-#endif
-
- DBPRINT(sc, BCE_EXCESSIVE_RECV, "Entering %s()\n", __FUNCTION__);
-
- prod = sc->rx_prod;
- prod_bseq = sc->rx_prod_bseq;
-
-#ifdef BCE_DEBUG
- rx_mbuf_alloc_before = sc->rx_mbuf_alloc;
- free_rx_bd_before = sc->free_rx_bd;
-#endif
-
- /* Keep filling the RX chain until it's full. */
- while (sc->free_rx_bd > 0) {
- chain_prod = RX_CHAIN_IDX(prod);
- if (bce_get_buf(sc, NULL, &prod, &chain_prod, &prod_bseq)) {
- /* Bail out if we can't add an mbuf to the chain. */
- break;
- }
- prod = NEXT_RX_BD(prod);
- }
-
-#if 0
- DBRUNIF((sc->rx_mbuf_alloc - rx_mbuf_alloc_before),
- BCE_PRINTF("%s(): Installed %d mbufs in %d rx_bd entries.\n",
- __FUNCTION__, (sc->rx_mbuf_alloc - rx_mbuf_alloc_before),
- (free_rx_bd_before - sc->free_rx_bd)));
-#endif
-
- /* Save the RX chain producer index. */
- sc->rx_prod = prod;
- sc->rx_prod_bseq = prod_bseq;
-
- /* Tell the chip about the waiting rx_bd's. */
- REG_WR16(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BDIDX, sc->rx_prod);
- REG_WR(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BSEQ, sc->rx_prod_bseq);
-
- DBPRINT(sc, BCE_EXCESSIVE_RECV, "Exiting %s()\n", __FUNCTION__);
-
-}
-
-
-/****************************************************************************/
/* Allocate memory and initialize the RX data structures. */
/* */
/* Returns: */
@@ -3859,9 +4132,9 @@ bce_init_rx_chain(struct bce_softc *sc)
sc->rx_cons = 0;
sc->rx_prod_bseq = 0;
sc->free_rx_bd = USABLE_RX_BD;
- sc->max_rx_bd = USABLE_RX_BD;
- DBRUNIF(1, sc->rx_low_watermark = USABLE_RX_BD);
- DBRUNIF(1, sc->rx_empty_count = 0);
+ sc->max_rx_bd = USABLE_RX_BD;
+ DBRUN(sc->rx_low_watermark = sc->max_rx_bd);
+ DBRUN(sc->rx_empty_count = 0);
/* Initialize the RX next pointer chain entries. */
for (i = 0; i < RX_PAGES; i++) {
@@ -3892,11 +4165,9 @@ bce_init_rx_chain(struct bce_softc *sc)
val = BCE_ADDR_LO(sc->rx_bd_chain_paddr[0]);
CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_NX_BDHADDR_LO, val);
-
/* Fill up the RX chain. */
bce_fill_rx_chain(sc);
-
for (i = 0; i < RX_PAGES; i++) {
bus_dmamap_sync(
sc->rx_bd_chain_tag,
@@ -3904,7 +4175,7 @@ bce_init_rx_chain(struct bce_softc *sc)
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
- DBRUN(BCE_VERBOSE_RECV, bce_dump_rx_chain(sc, 0, TOTAL_RX_BD));
+ DBRUNMSG(BCE_VERBOSE_RECV, bce_dump_rx_chain(sc, 0, TOTAL_RX_BD));
DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
@@ -3913,6 +4184,50 @@ bce_init_rx_chain(struct bce_softc *sc)
/****************************************************************************/
+/* Add mbufs to the RX chain until its full or an mbuf allocation error */
+/* occurs. */
+/* */
+/* Returns: */
+/* Nothing */
+/****************************************************************************/
+static void
+bce_fill_rx_chain(struct bce_softc *sc)
+{
+ u16 prod, prod_idx;
+ u32 prod_bseq;
+
+ DBPRINT(sc, BCE_VERBOSE_RECV, "Entering %s()\n", __FUNCTION__);
+
+ prod = sc->rx_prod;
+ prod_bseq = sc->rx_prod_bseq;
+
+ /* Keep filling the RX chain until it's full. */
+ while (sc->free_rx_bd > 0) {
+ prod_idx = RX_CHAIN_IDX(prod);
+ if (bce_get_rx_buf(sc, NULL, &prod, &prod_idx, &prod_bseq)) {
+ /* Bail out if we can't add an mbuf to the chain. */
+ break;
+ }
+ prod = NEXT_RX_BD(prod);
+ }
+
+ /* Save the RX chain producer index. */
+ sc->rx_prod = prod;
+ sc->rx_prod_bseq = prod_bseq;
+
+ DBRUNIF(((prod & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE),
+ BCE_PRINTF("%s(): Invalid rx_prod value: 0x%04X\n",
+ __FUNCTION__, sc->rx_prod));
+
+ /* Tell the chip about the waiting rx_bd's. */
+ REG_WR16(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BDIDX, sc->rx_prod);
+ REG_WR(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BSEQ, sc->rx_prod_bseq);
+
+ DBPRINT(sc, BCE_VERBOSE_RECV, "Exiting %s()\n", __FUNCTION__);
+}
+
+
+/****************************************************************************/
/* Free memory and clear the RX data structures. */
/* */
/* Returns: */
@@ -3922,15 +4237,11 @@ static void
bce_free_rx_chain(struct bce_softc *sc)
{
int i;
-#ifdef BCE_DEBUG
- int rx_mbuf_alloc_before;
-#endif
DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
-#ifdef BCE_DEBUG
- rx_mbuf_alloc_before = sc->rx_mbuf_alloc;
-#endif
+ /* Clear the jumbo page chain support. */
+ CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_PG_BUF_SIZE, 0);
/* Free any mbufs still in the RX mbuf chain. */
for (i = 0; i < TOTAL_RX_BD; i++) {
@@ -3940,14 +4251,10 @@ bce_free_rx_chain(struct bce_softc *sc)
BUS_DMASYNC_POSTREAD);
m_freem(sc->rx_mbuf_ptr[i]);
sc->rx_mbuf_ptr[i] = NULL;
- DBRUNIF(1, sc->rx_mbuf_alloc--);
+ DBRUN(sc->debug_rx_mbuf_alloc--);
}
}
- DBRUNIF((rx_mbuf_alloc_before - sc->rx_mbuf_alloc),
- BCE_PRINTF("%s(): Released %d mbufs.\n",
- __FUNCTION__, (rx_mbuf_alloc_before - sc->rx_mbuf_alloc)));
-
/* Clear each RX chain page. */
for (i = 0; i < RX_PAGES; i++)
bzero((char *)sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ);
@@ -3955,9 +4262,160 @@ bce_free_rx_chain(struct bce_softc *sc)
sc->free_rx_bd = sc->max_rx_bd;
/* Check if we lost any mbufs in the process. */
- DBRUNIF((sc->rx_mbuf_alloc),
- BCE_PRINTF("%s(%d): Memory leak! Lost %d mbufs from rx chain!\n",
- __FILE__, __LINE__, sc->rx_mbuf_alloc));
+ DBRUNIF((sc->debug_rx_mbuf_alloc),
+ BCE_PRINTF("%s(): Memory leak! Lost %d mbufs from rx chain!\n",
+ __FUNCTION__, sc->debug_rx_mbuf_alloc));
+
+ DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
+}
+
+
+/****************************************************************************/
+/* Allocate memory and initialize the page data structures. */
+/* Assumes that bce_init_rx_chain() has not already been called. */
+/* */
+/* Returns: */
+/* 0 for success, positive value for failure. */
+/****************************************************************************/
+static int
+bce_init_pg_chain(struct bce_softc *sc)
+{
+ struct rx_bd *pgbd;
+ int i, rc = 0;
+ u32 val;
+
+ DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
+
+ /* Initialize the page producer and consumer indices. */
+ sc->pg_prod = 0;
+ sc->pg_cons = 0;
+ sc->free_pg_bd = USABLE_PG_BD;
+ sc->max_pg_bd = USABLE_PG_BD;
+ DBRUN(sc->pg_low_watermark = sc->max_pg_bd);
+ DBRUN(sc->pg_empty_count = 0);
+
+ /* Initialize the page next pointer chain entries. */
+ for (i = 0; i < PG_PAGES; i++) {
+ int j;
+
+ pgbd = &sc->pg_bd_chain[i][USABLE_PG_BD_PER_PAGE];
+
+ /* Check if we've reached the last page. */
+ if (i == (PG_PAGES - 1))
+ j = 0;
+ else
+ j = i + 1;
+
+ /* Setup the chain page pointers. */
+ pgbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->pg_bd_chain_paddr[j]));
+ pgbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->pg_bd_chain_paddr[j]));
+ }
+
+ /* Point the hardware to the first page in the page chain. */
+ val = BCE_ADDR_HI(sc->pg_bd_chain_paddr[0]);
+ CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_NX_PG_BDHADDR_HI, val);
+ val = BCE_ADDR_LO(sc->pg_bd_chain_paddr[0]);
+ CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_NX_PG_BDHADDR_LO, val);
+
+ /* Configure the rx_bd and page chain mbuf cluster size. */
+ val = (sc->rx_bd_mbuf_alloc_size << 16) | sc->pg_bd_mbuf_alloc_size;
+ CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_PG_BUF_SIZE, val);
+
+ /* Configure the context reserved for jumbo support. */
+ CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RBDC_KEY,
+ BCE_L2CTX_RBDC_JUMBO_KEY);
+
+ /* Fill up the page chain. */
+ bce_fill_pg_chain(sc);
+
+ for (i = 0; i < PG_PAGES; i++) {
+ bus_dmamap_sync(
+ sc->pg_bd_chain_tag,
+ sc->pg_bd_chain_map[i],
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ }
+
+ DBRUNMSG(BCE_VERBOSE_RECV, bce_dump_pg_chain(sc, 0, TOTAL_PG_BD));
+ DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
+
+ return(rc);
+}
+
+/****************************************************************************/
+/* Add mbufs to the page chain until its full or an mbuf allocation error */
+/* occurs. */
+/* */
+/* Returns: */
+/* Nothing */
+/****************************************************************************/
+static void
+bce_fill_pg_chain(struct bce_softc *sc)
+{
+ u16 prod, prod_idx;
+
+ DBPRINT(sc, BCE_EXCESSIVE_RECV, "Entering %s()\n", __FUNCTION__);
+
+ prod = sc->pg_prod;
+
+ /* Keep filling the page chain until it's full. */
+ while (sc->free_pg_bd > 0) {
+ prod_idx = PG_CHAIN_IDX(prod);
+ if (bce_get_pg_buf(sc, NULL, &prod, &prod_idx)) {
+ /* Bail out if we can't add an mbuf to the chain. */
+ break;
+ }
+ prod = NEXT_PG_BD(prod);
+ }
+
+ /* Save the page chain producer index. */
+ sc->pg_prod = prod;
+
+ DBRUNIF(((prod & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE),
+ BCE_PRINTF("%s(): Invalid pg_prod value: 0x%04X\n",
+ __FUNCTION__, sc->pg_prod));
+
+ /* Tell the chip about the new rx_bd's in the page chain. */
+ REG_WR16(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_PG_BDIDX, sc->pg_prod);
+
+ DBPRINT(sc, BCE_EXCESSIVE_RECV, "Exiting %s()\n", __FUNCTION__);
+}
+
+
+/****************************************************************************/
+/* Free memory and clear the RX data structures. */
+/* */
+/* Returns: */
+/* Nothing. */
+/****************************************************************************/
+static void
+bce_free_pg_chain(struct bce_softc *sc)
+{
+ int i;
+
+ DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
+
+ /* Free any mbufs still in the mbuf page chain. */
+ for (i = 0; i < TOTAL_PG_BD; i++) {
+ if (sc->pg_mbuf_ptr[i] != NULL) {
+ if (sc->pg_mbuf_map[i] != NULL)
+ bus_dmamap_sync(sc->pg_mbuf_tag, sc->pg_mbuf_map[i],
+ BUS_DMASYNC_POSTREAD);
+ m_freem(sc->pg_mbuf_ptr[i]);
+ sc->pg_mbuf_ptr[i] = NULL;
+ DBRUN(sc->debug_pg_mbuf_alloc--);
+ }
+ }
+
+ /* Clear each page chain pages. */
+ for (i = 0; i < PG_PAGES; i++)
+ bzero((char *)sc->pg_bd_chain[i], BCE_PG_CHAIN_PAGE_SZ);
+
+ sc->free_pg_bd = sc->max_pg_bd;
+
+ /* Check if we lost any mbufs in the process. */
+ DBRUNIF((sc->debug_pg_mbuf_alloc),
+ BCE_PRINTF("%s(): Memory leak! Lost %d mbufs from page chain!\n",
+ __FUNCTION__, sc->debug_pg_mbuf_alloc));
DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __FUNCTION__);
}
@@ -4060,7 +4518,7 @@ bce_phy_intr(struct bce_softc *sc)
/* Handle any changes if the link state has changed. */
if (new_link_state != old_link_state) {
- DBRUN(BCE_VERBOSE_INTR, bce_dump_status_block(sc));
+ DBRUNMSG(BCE_VERBOSE_INTR, bce_dump_status_block(sc));
sc->bce_link = 0;
callout_stop(&sc->bce_tick_callout);
@@ -4086,6 +4544,24 @@ bce_phy_intr(struct bce_softc *sc)
/****************************************************************************/
+/* Reads the receive consumer value from the status block (skipping over */
+/* chain page pointer if necessary). */
+/* */
+/* Returns: */
+/* hw_cons */
+/****************************************************************************/
+static inline u16
+bce_get_hw_rx_cons(struct bce_softc *sc)
+{
+ u16 hw_cons = sc->status_block->status_rx_quick_consumer_index0;
+
+ if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
+ hw_cons++;
+
+ return hw_cons;
+}
+
+/****************************************************************************/
/* Handles received frame interrupt events. */
/* */
/* Returns: */
@@ -4094,63 +4570,47 @@ bce_phy_intr(struct bce_softc *sc)
static void
bce_rx_intr(struct bce_softc *sc)
{
- struct status_block *sblk = sc->status_block;
- struct ifnet *ifp = sc->bce_ifp;
- u16 hw_cons, sw_cons, sw_chain_cons, sw_prod, sw_chain_prod;
- u32 sw_prod_bseq;
- struct l2_fhdr *l2fhdr;
-
- DBRUNIF(1, sc->rx_interrupts++);
+ struct ifnet *ifp = sc->bce_ifp;
+ struct l2_fhdr *l2fhdr;
+ unsigned int pages, pkt_len, rem_len;
+ u16 sw_rx_cons, sw_rx_cons_idx, sw_pg_cons, sw_pg_cons_idx, hw_rx_cons;
+ u32 status;
+
+#ifdef BCE_DEBUG
+ u32 rx_intr_start, rx_intr_end;
+ rx_intr_start = REG_RD(sc, BCE_TIMER_25MHZ_FREE_RUN);
+ sc->rx_interrupts++;
+#endif
/* Prepare the RX chain pages to be accessed by the host CPU. */
for (int i = 0; i < RX_PAGES; i++)
bus_dmamap_sync(sc->rx_bd_chain_tag,
sc->rx_bd_chain_map[i], BUS_DMASYNC_POSTWRITE);
- /* Get the hardware's view of the RX consumer index. */
- hw_cons = sc->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
- if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
- hw_cons++;
+ /* Prepare the page chain pages to be accessed by the host CPU. */
+ for (int i = 0; i < PG_PAGES; i++)
+ bus_dmamap_sync(sc->pg_bd_chain_tag,
+ sc->pg_bd_chain_map[i], BUS_DMASYNC_POSTWRITE);
- /* Get working copies of the driver's view of the RX indices. */
- sw_cons = sc->rx_cons;
- sw_prod = sc->rx_prod;
- sw_prod_bseq = sc->rx_prod_bseq;
-
- DBPRINT(sc, BCE_INFO_RECV, "%s(enter): sw_prod = 0x%04X, "
- "sw_cons = 0x%04X, sw_prod_bseq = 0x%08X\n",
- __FUNCTION__, sw_prod, sw_cons, sw_prod_bseq);
+ /* Get the hardware's view of the RX consumer index. */
+ hw_rx_cons = sc->hw_rx_cons = bce_get_hw_rx_cons(sc);
- /* Prevent speculative reads from getting ahead of the status block. */
- bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
- BUS_SPACE_BARRIER_READ);
+ /* Get working copies of the driver's view of the consumer indices. */
+ sw_rx_cons = sc->rx_cons;
+ sw_pg_cons = sc->pg_cons;
/* Update some debug statistics counters */
DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
sc->rx_low_watermark = sc->free_rx_bd);
- DBRUNIF((sc->free_rx_bd == USABLE_RX_BD), sc->rx_empty_count++);
-
- /* Scan through the receive chain as long as there is work to do */
- while (sw_cons != hw_cons) {
- struct mbuf *m;
- struct rx_bd *rxbd;
- unsigned int len;
- u32 status;
+ DBRUNIF((sc->free_rx_bd == sc->max_rx_bd), sc->rx_empty_count++);
- /* Clear the mbuf pointer. */
- m = NULL;
+ /* Scan through the receive chain as long as there is work to do */
+ /* ToDo: Consider setting a limit on the number of packets processed. */
+ while (sw_rx_cons != hw_rx_cons) {
+ struct mbuf *m0;
/* Convert the producer/consumer indices to an actual rx_bd index. */
- sw_chain_cons = RX_CHAIN_IDX(sw_cons);
- sw_chain_prod = RX_CHAIN_IDX(sw_prod);
-
- /* Get the used rx_bd. */
- rxbd = &sc->rx_bd_chain[RX_PAGE(sw_chain_cons)][RX_IDX(sw_chain_cons)];
- sc->free_rx_bd++;
-
- DBRUN(BCE_VERBOSE_RECV,
- BCE_PRINTF("%s(): ", __FUNCTION__);
- bce_dump_rxbd(sc, sw_chain_cons, rxbd));
+ sw_rx_cons_idx = RX_CHAIN_IDX(sw_rx_cons);
#ifdef DEVICE_POLLING
if (ifp->if_capenable & IFCAP_POLLING) {
@@ -4160,192 +4620,244 @@ bce_rx_intr(struct bce_softc *sc)
}
#endif
- /* The mbuf is stored with the last rx_bd entry of a packet. */
- if (sc->rx_mbuf_ptr[sw_chain_cons] != NULL) {
-
- /* Validate that this is the last rx_bd. */
- DBRUNIF((!(rxbd->rx_bd_flags & RX_BD_FLAGS_END)),
- BCE_PRINTF("%s(%d): Unexpected mbuf found in rx_bd[0x%04X]!\n",
- __FILE__, __LINE__, sw_chain_cons);
- bce_breakpoint(sc));
-
+ /* Unmap the mbuf from DMA space. */
+ bus_dmamap_sync(sc->rx_mbuf_tag,
+ sc->rx_mbuf_map[sw_rx_cons_idx],
+ BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(sc->rx_mbuf_tag,
+ sc->rx_mbuf_map[sw_rx_cons_idx]);
+
+ /* Remove the mbuf from the RX chain. */
+ m0 = sc->rx_mbuf_ptr[sw_rx_cons_idx];
+ sc->rx_mbuf_ptr[sw_rx_cons_idx] = NULL;
+ DBRUN(sc->debug_rx_mbuf_alloc--);
+ sc->free_rx_bd++;
+
+ /*
+ * Frames received on the NetXteme II are prepended
+ * with an l2_fhdr structure which provides status
+ * information about the received frame (including
+ * VLAN tags and checksum info). The frames are also
+ * automatically adjusted to align the IP header
+ * (i.e. two null bytes are inserted before the
+ * Ethernet header). As a result the data DMA'd by
+ * the controller into the mbuf is as follows:
+ * +---------+-----+---------------------+-----+
+ * | l2_fhdr | pad | packet data | FCS |
+ * +---------+-----+---------------------+-----+
+ * The l2_fhdr needs to be checked and skipped and
+ * the FCS needs to be stripped before sending the
+ * packet up the stack.
+ */
+ l2fhdr = mtod(m0, struct l2_fhdr *);
+
+ /* Get the packet data + FCS length and the status. */
+ pkt_len = l2fhdr->l2_fhdr_pkt_len;
+ status = l2fhdr->l2_fhdr_status;
+
+ /*
+ * Skip over the l2_fhdr and pad, resulting in the
+ * following data in the mbuf:
+ * +---------------------+-----+
+ * | packet data | FCS |
+ * +---------------------+-----+
+ */
+ m_adj(m0, sizeof(struct l2_fhdr) + ETHER_ALIGN);
+
+ /*
+ * Check whether the received frame fits in a single
+ * mbuf or not (i.e. packet data + FCS <=
+ * sc->rx_bd_mbuf_alloc_size bytes).
+ */
+ if (pkt_len > m0->m_len) {
/*
- * ToDo: If the received packet is small enough
- * to fit into a single, non-M_EXT mbuf,
- * allocate a new mbuf here, copy the data to
- * that mbuf, and recycle the mapped jumbo frame.
+ * The received frame is larger than a single mbuf.
+ * If the frame was a TCP frame then only the TCP
+ * header is placed in the mbuf, the remaining
+ * payload (including FCS) is placed in the page
+ * chain, the SPLIT flag is set, and the header
+ * length is placed in the IP checksum field.
+ * If the frame is not a TCP frame then the mbuf
+ * is filled and the remaining bytes are placed
+ * in the page chain.
*/
+ if (status & L2_FHDR_STATUS_SPLIT)
+ m0->m_len = l2fhdr->l2_fhdr_ip_xsum;
+
+ rem_len = pkt_len - m0->m_len;
+
+ /* Calculate how many pages to pull off the page chain. */
+ /* ToDo: The following assumes that mbuf clusters are 2KB. */
+ pages = (rem_len + sc->pg_bd_mbuf_alloc_size) >> 11;
+
+ /* Pull mbufs off the page chain for the remaining data. */
+ while (rem_len > 0) {
+ struct mbuf *m_pg;
+
+ sw_pg_cons_idx = PG_CHAIN_IDX(sw_pg_cons);
+
+ /* Remove the mbuf from the page chain. */
+ m_pg = sc->pg_mbuf_ptr[sw_pg_cons_idx];
+ sc->pg_mbuf_ptr[sw_pg_cons_idx] = NULL;
+ DBRUN(sc->debug_pg_mbuf_alloc--);
+ sc->free_pg_bd++;
+
+ /* Unmap the page chain mbuf from DMA space. */
+ bus_dmamap_sync(sc->pg_mbuf_tag,
+ sc->pg_mbuf_map[sw_pg_cons_idx],
+ BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(sc->pg_mbuf_tag,
+ sc->pg_mbuf_map[sw_pg_cons_idx]);
+
+ /* Adjust the mbuf length. */
+ if (rem_len < m_pg->m_len) {
+ /* The mbuf chain is complete. */
+ m_pg->m_len = rem_len;
+ rem_len = 0;
+ } else {
+ /* More packet data is waiting. */
+ rem_len -= m_pg->m_len;
+ }
+
+ /* Concatenate the mbuf cluster to the mbuf. */
+ m_cat(m0, m_pg);
+
+ sw_pg_cons = NEXT_PG_BD(sw_pg_cons);
+ }
- /* Unmap the mbuf from DMA space. */
- bus_dmamap_sync(sc->rx_mbuf_tag,
- sc->rx_mbuf_map[sw_chain_cons],
- BUS_DMASYNC_POSTREAD);
- bus_dmamap_unload(sc->rx_mbuf_tag,
- sc->rx_mbuf_map[sw_chain_cons]);
-
- /* Remove the mbuf from the RX chain. */
- m = sc->rx_mbuf_ptr[sw_chain_cons];
- sc->rx_mbuf_ptr[sw_chain_cons] = NULL;
-
+ /* Set the total packet length. */
+ m0->m_pkthdr.len = pkt_len;
+
+ } else {
/*
- * Frames received on the NetXteme II are prepended
- * with an l2_fhdr structure which provides status
- * information about the received frame (including
- * VLAN tags and checksum info). The frames are also
- * automatically adjusted to align the IP header
- * (i.e. two null bytes are inserted before the
- * Ethernet header).
+ * The received packet is small and fits in a
+ * single mbuf (i.e. the l2_fhdr + pad + packet +
+ * FCS <= MHLEN). In other words, the packet is
+ * 154 bytes or less in size.
*/
- l2fhdr = mtod(m, struct l2_fhdr *);
-
- len = l2fhdr->l2_fhdr_pkt_len;
- status = l2fhdr->l2_fhdr_status;
-
- DBRUNIF(DB_RANDOMTRUE(bce_debug_l2fhdr_status_check),
- BCE_PRINTF("Simulating l2_fhdr status error.\n");
- status = status | L2_FHDR_ERRORS_PHY_DECODE);
-
- /* Watch for unusual sized frames. */
- DBRUNIF(((len < BCE_MIN_MTU) || (len > BCE_MAX_JUMBO_ETHER_MTU_VLAN)),
- BCE_PRINTF("%s(%d): Unusual frame size found. "
- "Min(%d), Actual(%d), Max(%d)\n",
- __FILE__, __LINE__, (int) BCE_MIN_MTU,
- len, (int) BCE_MAX_JUMBO_ETHER_MTU_VLAN);
- bce_dump_mbuf(sc, m);
- bce_breakpoint(sc));
-
- len -= ETHER_CRC_LEN;
-
- /* Check the received frame for errors. */
- if (status & (L2_FHDR_ERRORS_BAD_CRC |
- L2_FHDR_ERRORS_PHY_DECODE | L2_FHDR_ERRORS_ALIGNMENT |
- L2_FHDR_ERRORS_TOO_SHORT | L2_FHDR_ERRORS_GIANT_FRAME)) {
-
- /* Log the error and release the mbuf. */
- ifp->if_ierrors++;
- DBRUNIF(1, sc->l2fhdr_status_errors++);
-
- /* Todo: Reuse the mbuf to improve performance. */
-
- m_freem(m);
- m = NULL;
- goto bce_rx_int_next_rx;
- }
- /* Skip over the l2_fhdr when passing the data up the stack. */
- m_adj(m, sizeof(struct l2_fhdr) + ETHER_ALIGN);
-
- /* Adjust the packet length to match the received data. */
- m->m_pkthdr.len = m->m_len = len;
-
- /* Send the packet to the appropriate interface. */
- m->m_pkthdr.rcvif = ifp;
-
- DBRUN(BCE_VERBOSE_RECV,
- struct ether_header *eh;
- eh = mtod(m, struct ether_header *);
- BCE_PRINTF("%s(): to: %6D, from: %6D, type: 0x%04X\n",
- __FUNCTION__, eh->ether_dhost, ":",
- eh->ether_shost, ":", htons(eh->ether_type)));
-
- /* Validate the checksum if offload enabled. */
- if (ifp->if_capenable & IFCAP_RXCSUM) {
-
- /* Check for an IP datagram. */
- if (status & L2_FHDR_STATUS_IP_DATAGRAM) {
- m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
-
- /* Check if the IP checksum is valid. */
- if ((l2fhdr->l2_fhdr_ip_xsum ^ 0xffff) == 0)
- m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
- else
- DBPRINT(sc, BCE_WARN_SEND,
- "%s(): Invalid IP checksum = 0x%04X!\n",
- __FUNCTION__, l2fhdr->l2_fhdr_ip_xsum);
- }
+ /* Set the total packet length. */
+ m0->m_pkthdr.len = m0->m_len = pkt_len;
+ }
+
+ /* Remove the trailing Ethernet FCS. */
+ m_adj(m0, -ETHER_CRC_LEN);
+
+ /* Check that the resulting mbuf chain is valid. */
+ DBRUN(m_sanity(m0, FALSE));
+
+ DBRUNIF(DB_RANDOMTRUE(bce_debug_l2fhdr_status_check),
+ BCE_PRINTF("Simulating l2_fhdr status error.\n");
+ status = status | L2_FHDR_ERRORS_PHY_DECODE);
+
+ /* Check the received frame for errors. */
+ if (status & (L2_FHDR_ERRORS_BAD_CRC |
+ L2_FHDR_ERRORS_PHY_DECODE | L2_FHDR_ERRORS_ALIGNMENT |
+ L2_FHDR_ERRORS_TOO_SHORT | L2_FHDR_ERRORS_GIANT_FRAME)) {
+
+ /* Log the error and release the mbuf. */
+ ifp->if_ierrors++;
+ DBRUN(sc->l2fhdr_status_errors++);
+
+ m_freem(m0);
+ m0 = NULL;
+ goto bce_rx_int_next_rx;
+ }
- /* Check for a valid TCP/UDP frame. */
- if (status & (L2_FHDR_STATUS_TCP_SEGMENT |
- L2_FHDR_STATUS_UDP_DATAGRAM)) {
-
- /* Check for a good TCP/UDP checksum. */
- if ((status & (L2_FHDR_ERRORS_TCP_XSUM |
- L2_FHDR_ERRORS_UDP_XSUM)) == 0) {
- m->m_pkthdr.csum_data =
- l2fhdr->l2_fhdr_tcp_udp_xsum;
- m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID
- | CSUM_PSEUDO_HDR);
- } else
- DBPRINT(sc, BCE_WARN_SEND,
- "%s(): Invalid TCP/UDP checksum = 0x%04X!\n",
- __FUNCTION__, l2fhdr->l2_fhdr_tcp_udp_xsum);
+ /* Send the packet to the appropriate interface. */
+ m0->m_pkthdr.rcvif = ifp;
+
+ /* Assume no hardware checksum. */
+ m0->m_pkthdr.csum_flags = 0;
+
+ /* Validate the checksum if offload enabled. */
+ if (ifp->if_capenable & IFCAP_RXCSUM) {
+
+ /* Check for an IP datagram. */
+ if (!(status & L2_FHDR_STATUS_SPLIT) &&
+ (status & L2_FHDR_STATUS_IP_DATAGRAM)) {
+ m0->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
+
+ /* Check if the IP checksum is valid. */
+ if ((l2fhdr->l2_fhdr_ip_xsum ^ 0xffff) == 0)
+ m0->m_pkthdr.csum_flags |= CSUM_IP_VALID;
+ }
+
+ /* Check for a valid TCP/UDP frame. */
+ if (status & (L2_FHDR_STATUS_TCP_SEGMENT |
+ L2_FHDR_STATUS_UDP_DATAGRAM)) {
+
+ /* Check for a good TCP/UDP checksum. */
+ if ((status & (L2_FHDR_ERRORS_TCP_XSUM |
+ L2_FHDR_ERRORS_UDP_XSUM)) == 0) {
+ m0->m_pkthdr.csum_data =
+ l2fhdr->l2_fhdr_tcp_udp_xsum;
+ m0->m_pkthdr.csum_flags |= (CSUM_DATA_VALID
+ | CSUM_PSEUDO_HDR);
}
- }
-
+ }
+ }
- /*
- * If we received a packet with a vlan tag,
- * attach that information to the packet.
- */
- if (status & L2_FHDR_STATUS_L2_VLAN_TAG) {
- DBPRINT(sc, BCE_VERBOSE_SEND, "%s(): VLAN tag = 0x%04X\n",
- __FUNCTION__, l2fhdr->l2_fhdr_vlan_tag);
+ /*
+ * If we received a packet with a vlan tag,
+ * attach that information to the packet.
+ */
+ if (status & L2_FHDR_STATUS_L2_VLAN_TAG) {
#if __FreeBSD_version < 700000
- VLAN_INPUT_TAG(ifp, m, l2fhdr->l2_fhdr_vlan_tag, continue);
+ VLAN_INPUT_TAG(ifp, m0, l2fhdr->l2_fhdr_vlan_tag, continue);
#else
- m->m_pkthdr.ether_vtag = l2fhdr->l2_fhdr_vlan_tag;
- m->m_flags |= M_VLANTAG;
+ m0->m_pkthdr.ether_vtag = l2fhdr->l2_fhdr_vlan_tag;
+ m0->m_flags |= M_VLANTAG;
#endif
- }
+ }
- /* Pass the mbuf off to the upper layers. */
- ifp->if_ipackets++;
+ /* Pass the mbuf off to the upper layers. */
+ ifp->if_ipackets++;
bce_rx_int_next_rx:
- sw_prod = NEXT_RX_BD(sw_prod);
- }
-
- sw_cons = NEXT_RX_BD(sw_cons);
+ sw_rx_cons = NEXT_RX_BD(sw_rx_cons);
/* If we have a packet, pass it up the stack */
- if (m) {
+ if (m0) {
/* Make sure we don't lose our place when we release the lock. */
- sc->rx_cons = sw_cons;
+ sc->rx_cons = sw_rx_cons;
+ sc->pg_cons = sw_pg_cons;
- DBPRINT(sc, BCE_VERBOSE_RECV, "%s(): Passing received frame up.\n",
- __FUNCTION__);
BCE_UNLOCK(sc);
- (*ifp->if_input)(ifp, m);
- DBRUNIF(1, sc->rx_mbuf_alloc--);
+ (*ifp->if_input)(ifp, m0);
BCE_LOCK(sc);
/* Recover our place. */
- sw_cons = sc->rx_cons;
+ sw_rx_cons = sc->rx_cons;
+ sw_pg_cons = sc->pg_cons;
}
/* Refresh hw_cons to see if there's new work */
- if (sw_cons == hw_cons) {
- hw_cons = sc->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
- if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
- hw_cons++;
- }
-
- /* Prevent speculative reads from getting ahead of the status block. */
- bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
- BUS_SPACE_BARRIER_READ);
+ if (sw_rx_cons == hw_rx_cons)
+ hw_rx_cons = sc->hw_rx_cons = bce_get_hw_rx_cons(sc);
}
- /* No new packets to process. Refill the RX chain and exit. */
- sc->rx_cons = sw_cons;
+ /* No new packets to process. Refill the RX and page chains and exit. */
+ sc->pg_cons = sw_pg_cons;
+ bce_fill_pg_chain(sc);
+
+ sc->rx_cons = sw_rx_cons;
bce_fill_rx_chain(sc);
for (int i = 0; i < RX_PAGES; i++)
bus_dmamap_sync(sc->rx_bd_chain_tag,
sc->rx_bd_chain_map[i], BUS_DMASYNC_PREWRITE);
+ for (int i = 0; i < PG_PAGES; i++)
+ bus_dmamap_sync(sc->pg_bd_chain_tag,
+ sc->pg_bd_chain_map[i], BUS_DMASYNC_PREWRITE);
+
DBPRINT(sc, BCE_INFO_RECV, "%s(exit): rx_prod = 0x%04X, "
"rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n",
- __FUNCTION__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq);
+ __FUNCTION__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq);
+
+ DBRUN(rx_intr_end = REG_RD(sc, BCE_TIMER_25MHZ_FREE_RUN);
+ sc->rx_intr_time += (u64) BCE_TIME_DELTA(rx_intr_start, rx_intr_end));
}
@@ -4361,11 +4873,15 @@ bce_tx_intr(struct bce_softc *sc)
struct status_block *sblk = sc->status_block;
struct ifnet *ifp = sc->bce_ifp;
u16 hw_tx_cons, sw_tx_cons, sw_tx_chain_cons;
+
+#ifdef BCE_DEBUG
+ u32 tx_intr_start, tx_intr_end;
+ tx_intr_start = REG_RD(sc, BCE_TIMER_25MHZ_FREE_RUN);
+ sc->tx_interrupts++;
+#endif
BCE_LOCK_ASSERT(sc);
- DBRUNIF(1, sc->tx_interrupts++);
-
/* Get the hardware's view of the TX consumer index. */
hw_tx_cons = sc->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
@@ -4397,7 +4913,7 @@ bce_tx_intr(struct bce_softc *sc)
(int) MAX_TX_BD);
bce_breakpoint(sc));
- DBRUNIF(1, txbd = &sc->tx_bd_chain[TX_PAGE(sw_tx_chain_cons)]
+ DBRUN(txbd = &sc->tx_bd_chain[TX_PAGE(sw_tx_chain_cons)]
[TX_IDX(sw_tx_chain_cons)]);
DBRUNIF((txbd == NULL),
@@ -4405,7 +4921,7 @@ bce_tx_intr(struct bce_softc *sc)
__FILE__, __LINE__, sw_tx_chain_cons);
bce_breakpoint(sc));
- DBRUN(BCE_INFO_SEND, BCE_PRINTF("%s(): ", __FUNCTION__);
+ DBRUNMSG(BCE_INFO_SEND, BCE_PRINTF("%s(): ", __FUNCTION__);
bce_dump_txbd(sc, sw_tx_chain_cons, txbd));
/*
@@ -4421,7 +4937,7 @@ bce_tx_intr(struct bce_softc *sc)
"txmbuf == NULL!\n", __FILE__, __LINE__);
bce_breakpoint(sc));
- DBRUN(BCE_INFO_SEND,
+ DBRUNMSG(BCE_INFO_SEND,
BCE_PRINTF("%s(): Unloading map/freeing mbuf "
"from tx_bd[0x%04X]\n", __FUNCTION__, sw_tx_chain_cons));
@@ -4432,7 +4948,7 @@ bce_tx_intr(struct bce_softc *sc)
/* Free the mbuf. */
m_freem(sc->tx_mbuf_ptr[sw_tx_chain_cons]);
sc->tx_mbuf_ptr[sw_tx_chain_cons] = NULL;
- DBRUNIF(1, sc->tx_mbuf_alloc--);
+ DBRUN(sc->debug_tx_mbuf_alloc--);
ifp->if_opackets++;
}
@@ -4463,6 +4979,8 @@ bce_tx_intr(struct bce_softc *sc)
}
sc->tx_cons = sw_tx_cons;
+ DBRUN(tx_intr_end = REG_RD(sc, BCE_TIMER_25MHZ_FREE_RUN);
+ sc->tx_intr_time += (u64) BCE_TIME_DELTA(tx_intr_start, tx_intr_end));
}
@@ -4514,7 +5032,7 @@ static void
bce_init_locked(struct bce_softc *sc)
{
struct ifnet *ifp;
- u32 ether_mtu;
+ u32 ether_mtu = 0;
DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __FUNCTION__);
@@ -4550,40 +5068,39 @@ bce_init_locked(struct bce_softc *sc)
bcopy(IF_LLADDR(sc->bce_ifp), sc->eaddr, ETHER_ADDR_LEN);
bce_set_mac_addr(sc);
- /* Calculate and program the Ethernet MTU size. */
- ether_mtu = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ifp->if_mtu +
- ETHER_CRC_LEN;
-
- DBPRINT(sc, BCE_INFO_MISC, "%s(): setting mtu = %d\n",__FUNCTION__, ether_mtu);
-
- /*
- * Program the mtu, enabling jumbo frame
- * support if necessary. Also set the mbuf
- * allocation count for RX frames.
- */
- if (ether_mtu > ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) {
- REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, min(ether_mtu, BCE_MAX_JUMBO_ETHER_MTU) |
+ /* Calculate and program the hardware Ethernet MTU size. */
+ if (ifp->if_mtu <= sc->pg_bd_mbuf_alloc_size)
+ /* Be generous on receive if we have room. */
+ ether_mtu = sc->pg_bd_mbuf_alloc_size;
+ else
+ ether_mtu = ifp->if_mtu;
+
+ ether_mtu += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ETHER_CRC_LEN;
+
+ DBPRINT(sc, BCE_INFO_MISC, "%s(): setting h/w mtu = %d\n", __FUNCTION__,
+ ether_mtu);
+
+ /* Program the mtu, enabling jumbo frame support if necessary. */
+ if (ether_mtu > (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN))
+ REG_WR(sc, BCE_EMAC_RX_MTU_SIZE,
+ min(ether_mtu, BCE_MAX_JUMBO_ETHER_MTU) |
BCE_EMAC_RX_MTU_SIZE_JUMBO_ENA);
- sc->mbuf_alloc_size = MJUM9BYTES;
- } else {
+ else
REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu);
- sc->mbuf_alloc_size = MCLBYTES;
- }
- /* Calculate the RX Ethernet frame size for rx_bd's. */
- sc->max_frame_size = sizeof(struct l2_fhdr) + 2 + ether_mtu + 8;
-
- DBPRINT(sc, BCE_INFO_RECV,
- "%s(): mclbytes = %d, mbuf_alloc_size = %d, "
- "max_frame_size = %d\n",
- __FUNCTION__, (int) MCLBYTES, sc->mbuf_alloc_size, sc->max_frame_size);
+ DBPRINT(sc, BCE_INFO_LOAD,
+ "%s(): rx_bd_mbuf_alloc_size = %d, pg_bd_mbuf_alloc_size = %d\n",
+ __FUNCTION__, sc->rx_bd_mbuf_alloc_size, sc->pg_bd_mbuf_alloc_size);
/* Program appropriate promiscuous/multicast filtering. */
bce_set_rx_mode(sc);
+ /* Init page buffer descriptor chain. */
+ bce_init_pg_chain(sc);
+
/* Init RX buffer descriptor chain. */
bce_init_rx_chain(sc);
-
+
/* Init TX buffer descriptor chain. */
bce_init_tx_chain(sc);
@@ -4662,7 +5179,7 @@ bce_mgmt_init_locked_exit:
static void
bce_init(void *xsc)
{
- struct bce_softc *sc = xsc;
+ struct bce_softc *sc = xsc;
BCE_LOCK(sc);
bce_init_locked(sc);
@@ -4767,7 +5284,7 @@ bce_tx_encap(struct bce_softc *sc, struct mbuf **m_head)
flags |= (((ip_hlen + tcp_hlen - 40) >> 2) << 8);
bce_tx_encap_skip_tso:
- DBRUNIF(1, sc->requested_tso_frames++);
+ DBRUN(sc->requested_tso_frames++);
}
}
@@ -4792,7 +5309,7 @@ bce_tx_encap_skip_tso:
/* The mbuf is too fragmented for our DMA mapping. */
DBPRINT(sc, BCE_WARN, "%s(): fragmented mbuf (%d pieces)\n",
__FUNCTION__, nsegs);
- DBRUNIF(1, bce_dump_mbuf(sc, m0););
+ DBRUN(bce_dump_mbuf(sc, m0););
/* Try to defrag the mbuf. */
m0 = m_defrag(*m_head, M_DONTWAIT);
@@ -4856,7 +5373,7 @@ bce_tx_encap_skip_tso:
/*
* Cycle through each mbuf segment that makes up
* the outgoing frame, gathering the mapping info
- * for that segment and creating a tx_bd to for
+ * for that segment and creating a tx_bd for
* the mbuf.
*/
for (i = 0; i < nsegs ; i++) {
@@ -4878,7 +5395,7 @@ bce_tx_encap_skip_tso:
/* Set the END flag on the last TX buffer descriptor. */
txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_END);
- DBRUN(BCE_EXCESSIVE_SEND, bce_dump_tx_chain(sc, debug_prod, nsegs));
+ DBRUNMSG(BCE_EXCESSIVE_SEND, bce_dump_tx_chain(sc, debug_prod, nsegs));
DBPRINT(sc, BCE_INFO_SEND,
"%s(): End: prod = 0x%04X, chain_prod = %04X, "
@@ -4901,9 +5418,9 @@ bce_tx_encap_skip_tso:
DBRUNIF((sc->used_tx_bd > sc->tx_hi_watermark),
sc->tx_hi_watermark = sc->used_tx_bd);
DBRUNIF((sc->used_tx_bd == sc->max_tx_bd), sc->tx_full_count++);
- DBRUNIF(1, sc->tx_mbuf_alloc++);
+ DBRUNIF(sc->debug_tx_mbuf_alloc++);
- DBRUN(BCE_VERBOSE_SEND, bce_dump_tx_mbuf_chain(sc, chain_prod, nsegs));
+ DBRUNMSG(BCE_EXCESSIVE_SEND, bce_dump_tx_mbuf_chain(sc, chain_prod, 1));
/* prod points to the next free tx_bd at this point. */
sc->tx_prod = prod;
@@ -5245,7 +5762,7 @@ bce_watchdog(struct bce_softc *sc)
BCE_PRINTF("%s(%d): Watchdog timeout occurred, resetting!\n",
__FILE__, __LINE__);
- DBRUN(BCE_VERBOSE_SEND,
+ DBRUNMSG(BCE_VERBOSE_SEND,
bce_dump_driver_state(sc);
bce_dump_status_block(sc));
@@ -5345,7 +5862,7 @@ bce_intr(void *xsc)
DBPRINT(sc, BCE_EXCESSIVE, "Entering %s()\n", __FUNCTION__);
BCE_LOCK(sc);
- DBRUNIF(1, sc->interrupts_generated++);
+ DBRUN(sc->interrupts_generated++);
#ifdef DEVICE_POLLING
if (ifp->if_capenable & IFCAP_POLLING) {
@@ -5391,12 +5908,12 @@ bce_intr(void *xsc)
(sc->status_block->status_attn_bits_ack &
~STATUS_ATTN_BITS_LINK_STATE))) {
- DBRUN(1, sc->unexpected_attentions++);
+ DBRUN(sc->unexpected_attentions++);
BCE_PRINTF("%s(%d): Fatal attention detected: 0x%08X\n",
__FILE__, __LINE__, sc->status_block->status_attn_bits);
- DBRUN(BCE_FATAL,
+ DBRUNMSG(BCE_FATAL,
if (bce_debug_unexpected_attention == 0)
bce_breakpoint(sc));
@@ -5802,15 +6319,19 @@ bce_tick(void *xsc)
BCE_LOCK_ASSERT(sc);
+ /* Schedule the next tick. */
+ callout_reset(&sc->bce_tick_callout, hz, bce_tick, sc);
+
/* Update the statistics from the hardware statistics block. */
bce_stats_update(sc);
+ /* Top off the receive and page chains. */
+ bce_fill_pg_chain(sc);
+ bce_fill_rx_chain(sc);
+
/* Check that chip hasn't hung. */
bce_watchdog(sc);
- /* Schedule the next tick. */
- callout_reset(&sc->bce_tick_callout, hz, bce_tick, sc);
-
/* If link is up already up then we're done. */
if (sc->bce_link)
goto bce_tick_locked_exit;
@@ -5942,7 +6463,7 @@ bce_sysctl_dump_rx_chain(SYSCTL_HANDLER_ARGS)
if (result == 1) {
sc = (struct bce_softc *)arg1;
- bce_dump_rx_chain(sc, 0, sc->max_rx_bd);
+ bce_dump_rx_chain(sc, 0, TOTAL_RX_BD);
}
return error;
@@ -5978,6 +6499,34 @@ bce_sysctl_dump_tx_chain(SYSCTL_HANDLER_ARGS)
/****************************************************************************/
+/* Provides a sysctl interface to allow dumping the page chain. */
+/* */
+/* Returns: */
+/* 0 for success, positive value for failure. */
+/****************************************************************************/
+static int
+bce_sysctl_dump_pg_chain(SYSCTL_HANDLER_ARGS)
+{
+ int error;
+ int result;
+ struct bce_softc *sc;
+
+ result = -1;
+ error = sysctl_handle_int(oidp, &result, 0, req);
+
+ if (error || !req->newptr)
+ return (error);
+
+ if (result == 1) {
+ sc = (struct bce_softc *)arg1;
+ bce_dump_pg_chain(sc, 0, TOTAL_PG_BD);
+ }
+
+ return error;
+}
+
+
+/****************************************************************************/
/* Provides a sysctl interface to allow reading arbitrary registers in the */
/* device. DO NOT ENABLE ON PRODUCTION SYSTEMS! */
/* */
@@ -6116,62 +6665,42 @@ bce_add_sysctls(struct bce_softc *sc)
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
"unexpected_attentions",
CTLFLAG_RD, &sc->unexpected_attentions,
- 0, "unexpected attentions");
+ 0, "Unexpected attentions");
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
"lost_status_block_updates",
CTLFLAG_RD, &sc->lost_status_block_updates,
- 0, "lost status block updates");
+ 0, "Lost status block updates");
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
- "mbuf_sim_alloc_failed",
- CTLFLAG_RD, &sc->mbuf_sim_alloc_failed,
- 0, "mbuf cluster simulated allocation failures");
+ "debug_mbuf_sim_alloc_failed",
+ CTLFLAG_RD, &sc->debug_mbuf_sim_alloc_failed,
+ 0, "Simulated mbuf cluster allocation failures");
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
"requested_tso_frames",
CTLFLAG_RD, &sc->requested_tso_frames,
- 0, "The number of TSO frames received");
-
- SYSCTL_ADD_INT(ctx, children, OID_AUTO,
- "rx_mbuf_segs[1]",
- CTLFLAG_RD, &sc->rx_mbuf_segs[1],
- 0, "mbuf cluster with 1 segment");
-
- SYSCTL_ADD_INT(ctx, children, OID_AUTO,
- "rx_mbuf_segs[2]",
- CTLFLAG_RD, &sc->rx_mbuf_segs[2],
- 0, "mbuf cluster with 2 segments");
+ 0, "Number of TSO frames received");
- SYSCTL_ADD_INT(ctx, children, OID_AUTO,
- "rx_mbuf_segs[3]",
- CTLFLAG_RD, &sc->rx_mbuf_segs[3],
- 0, "mbuf cluster with 3 segments");
-
- SYSCTL_ADD_INT(ctx, children, OID_AUTO,
- "rx_mbuf_segs[4]",
- CTLFLAG_RD, &sc->rx_mbuf_segs[4],
- 0, "mbuf cluster with 4 segments");
-
- SYSCTL_ADD_INT(ctx, children, OID_AUTO,
- "rx_mbuf_segs[5]",
- CTLFLAG_RD, &sc->rx_mbuf_segs[5],
- 0, "mbuf cluster with 5 segments");
+ SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
+ "rx_interrupts",
+ CTLFLAG_RD, &sc->rx_interrupts,
+ 0, "Number of RX interrupts");
- SYSCTL_ADD_INT(ctx, children, OID_AUTO,
- "rx_mbuf_segs[6]",
- CTLFLAG_RD, &sc->rx_mbuf_segs[6],
- 0, "mbuf cluster with 6 segments");
+ SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
+ "tx_interrupts",
+ CTLFLAG_RD, &sc->tx_interrupts,
+ 0, "Number of TX interrupts");
- SYSCTL_ADD_INT(ctx, children, OID_AUTO,
- "rx_mbuf_segs[7]",
- CTLFLAG_RD, &sc->rx_mbuf_segs[7],
- 0, "mbuf cluster with 7 segments");
+ SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
+ "rx_intr_time",
+ CTLFLAG_RD, &sc->rx_intr_time,
+ "RX interrupt time");
- SYSCTL_ADD_INT(ctx, children, OID_AUTO,
- "rx_mbuf_segs[8]",
- CTLFLAG_RD, &sc->rx_mbuf_segs[8],
- 0, "mbuf cluster with 8 segments");
+ SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
+ "tx_intr_time",
+ CTLFLAG_RD, &sc->tx_intr_time,
+ "TX interrupt time");
#endif
@@ -6487,6 +7016,11 @@ bce_add_sysctls(struct bce_softc *sc)
bce_sysctl_dump_tx_chain, "I", "Dump tx_bd chain");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
+ "dump_pg_chain", CTLTYPE_INT | CTLFLAG_RW,
+ (void *)sc, 0,
+ bce_sysctl_dump_pg_chain, "I", "Dump page chain");
+
+ SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
"breakpoint", CTLTYPE_INT | CTLFLAG_RW,
(void *)sc, 0,
bce_sysctl_breakpoint, "I", "Driver breakpoint");
@@ -6554,7 +7088,6 @@ bce_unfreeze_controller(struct bce_softc *sc)
static void
bce_dump_mbuf(struct bce_softc *sc, struct mbuf *m)
{
- u32 val_hi, val_lo;
struct mbuf *mp = m;
if (m == NULL) {
@@ -6563,68 +7096,25 @@ bce_dump_mbuf(struct bce_softc *sc, struct mbuf *m)
}
while (mp) {
- val_hi = BCE_ADDR_HI(mp);
- val_lo = BCE_ADDR_LO(mp);
- BCE_PRINTF("mbuf: vaddr = 0x%08X:%08X, m_len = %d, m_flags = ( ",
- val_hi, val_lo, mp->m_len);
-
- if (mp->m_flags & M_EXT)
- printf("M_EXT ");
- if (mp->m_flags & M_PKTHDR)
- printf("M_PKTHDR ");
- if (mp->m_flags & M_EOR)
- printf("M_EOR ");
- if (mp->m_flags & M_RDONLY)
- printf("M_RDONLY ");
-
- val_hi = BCE_ADDR_HI(mp->m_data);
- val_lo = BCE_ADDR_LO(mp->m_data);
- printf(") m_data = 0x%08X:%08X\n",
- val_hi, val_lo);
+ BCE_PRINTF("mbuf: %p, m_len = %d, m_flags = 0x%b, m_data = %p\n",
+ mp, mp->m_len, mp->m_flags,
+ "\20\1M_EXT\2M_PKTHDR\3M_EOR\4M_RDONLY",
+ mp->m_data);
if (mp->m_flags & M_PKTHDR) {
- BCE_PRINTF("- m_pkthdr: flags = ( ");
- if (mp->m_flags & M_BCAST)
- printf("M_BCAST ");
- if (mp->m_flags & M_MCAST)
- printf("M_MCAST ");
- if (mp->m_flags & M_FRAG)
- printf("M_FRAG ");
- if (mp->m_flags & M_FIRSTFRAG)
- printf("M_FIRSTFRAG ");
- if (mp->m_flags & M_LASTFRAG)
- printf("M_LASTFRAG ");
- if (mp->m_flags & M_VLANTAG)
- printf("M_VLANTAG ");
- if (mp->m_flags & M_PROMISC)
- printf("M_PROMISC ");
- printf(") csum_flags = ( ");
- if (mp->m_pkthdr.csum_flags & CSUM_IP)
- printf("CSUM_IP ");
- if (mp->m_pkthdr.csum_flags & CSUM_TCP)
- printf("CSUM_TCP ");
- if (mp->m_pkthdr.csum_flags & CSUM_UDP)
- printf("CSUM_UDP ");
- if (mp->m_pkthdr.csum_flags & CSUM_IP_FRAGS)
- printf("CSUM_IP_FRAGS ");
- if (mp->m_pkthdr.csum_flags & CSUM_FRAGMENT)
- printf("CSUM_FRAGMENT ");
- if (mp->m_pkthdr.csum_flags & CSUM_TSO)
- printf("CSUM_TSO ");
- if (mp->m_pkthdr.csum_flags & CSUM_IP_CHECKED)
- printf("CSUM_IP_CHECKED ");
- if (mp->m_pkthdr.csum_flags & CSUM_IP_VALID)
- printf("CSUM_IP_VALID ");
- if (mp->m_pkthdr.csum_flags & CSUM_DATA_VALID)
- printf("CSUM_DATA_VALID ");
- printf(")\n");
+ BCE_PRINTF("- m_pkthdr: len = %d, flags = 0x%b, csum_flags = %b\n",
+ mp->m_pkthdr.len, mp->m_flags,
+ "\20\12M_BCAST\13M_MCAST\14M_FRAG\15M_FIRSTFRAG"
+ "\16M_LASTFRAG\21M_VLANTAG\22M_PROMISC\23M_NOFREE",
+ mp->m_pkthdr.csum_flags,
+ "\20\1CSUM_IP\2CSUM_TCP\3CSUM_UDP\4CSUM_IP_FRAGS"
+ "\5CSUM_FRAGMENT\6CSUM_TSO\11CSUM_IP_CHECKED"
+ "\12CSUM_IP_VALID\13CSUM_DATA_VALID\14CSUM_PSEUDO_HDR");
}
if (mp->m_flags & M_EXT) {
- val_hi = BCE_ADDR_HI(mp->m_ext.ext_buf);
- val_lo = BCE_ADDR_LO(mp->m_ext.ext_buf);
- BCE_PRINTF("- m_ext: vaddr = 0x%08X:%08X, ext_size = %d, type = ",
- val_hi, val_lo, mp->m_ext.ext_size);
+ BCE_PRINTF("- m_ext: %p, ext_size = %d, type = ",
+ mp->m_ext.ext_buf, mp->m_ext.ext_size);
switch (mp->m_ext.ext_type) {
case EXT_CLUSTER: printf("EXT_CLUSTER\n"); break;
case EXT_SFBUF: printf("EXT_SFBUF\n"); break;
@@ -6652,7 +7142,7 @@ bce_dump_mbuf(struct bce_softc *sc, struct mbuf *m)
/* Nothing. */
/****************************************************************************/
static void
-bce_dump_tx_mbuf_chain(struct bce_softc *sc, int chain_prod, int count)
+bce_dump_tx_mbuf_chain(struct bce_softc *sc, u16 chain_prod, int count)
{
struct mbuf *m;
@@ -6663,7 +7153,7 @@ bce_dump_tx_mbuf_chain(struct bce_softc *sc, int chain_prod, int count)
for (int i = 0; i < count; i++) {
m = sc->tx_mbuf_ptr[chain_prod];
- BCE_PRINTF("txmbuf[%d]\n", chain_prod);
+ BCE_PRINTF("txmbuf[0x%04X]\n", chain_prod);
bce_dump_mbuf(sc, m);
chain_prod = TX_CHAIN_IDX(NEXT_TX_BD(chain_prod));
}
@@ -6682,7 +7172,7 @@ bce_dump_tx_mbuf_chain(struct bce_softc *sc, int chain_prod, int count)
/* Nothing. */
/****************************************************************************/
static void
-bce_dump_rx_mbuf_chain(struct bce_softc *sc, int chain_prod, int count)
+bce_dump_rx_mbuf_chain(struct bce_softc *sc, u16 chain_prod, int count)
{
struct mbuf *m;
@@ -6707,6 +7197,37 @@ bce_dump_rx_mbuf_chain(struct bce_softc *sc, int chain_prod, int count)
/****************************************************************************/
+/* Prints out the mbufs in the mbuf page chain. */
+/* */
+/* Returns: */
+/* Nothing. */
+/****************************************************************************/
+static void
+bce_dump_pg_mbuf_chain(struct bce_softc *sc, u16 chain_prod, int count)
+{
+ struct mbuf *m;
+
+ BCE_PRINTF(
+ "----------------------------"
+ " pg mbuf data "
+ "----------------------------\n");
+
+ for (int i = 0; i < count; i++) {
+ m = sc->pg_mbuf_ptr[chain_prod];
+ BCE_PRINTF("pgmbuf[0x%04X]\n", chain_prod);
+ bce_dump_mbuf(sc, m);
+ chain_prod = PG_CHAIN_IDX(NEXT_PG_BD(chain_prod));
+ }
+
+
+ BCE_PRINTF(
+ "----------------------------"
+ "----------------"
+ "----------------------------\n");
+}
+
+
+/****************************************************************************/
/* Prints out a tx_bd structure. */
/* */
/* Returns: */
@@ -6785,11 +7306,11 @@ bce_dump_rxbd(struct bce_softc *sc, int idx, struct rx_bd *rxbd)
/* Index out of range. */
BCE_PRINTF("rx_bd[0x%04X]: Invalid rx_bd index!\n", idx);
else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
- /* TX Chain page pointer. */
+ /* RX Chain page pointer. */
BCE_PRINTF("rx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page pointer\n",
idx, rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo);
else
- /* Normal tx_bd entry. */
+ /* Normal rx_bd entry. */
BCE_PRINTF("rx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = 0x%08X, "
"flags = 0x%08X\n", idx,
rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo,
@@ -6798,6 +7319,31 @@ bce_dump_rxbd(struct bce_softc *sc, int idx, struct rx_bd *rxbd)
/****************************************************************************/
+/* Prints out a rx_bd structure in the page chain. */
+/* */
+/* Returns: */
+/* Nothing. */
+/****************************************************************************/
+static void
+bce_dump_pgbd(struct bce_softc *sc, int idx, struct rx_bd *pgbd)
+{
+ if (idx > MAX_PG_BD)
+ /* Index out of range. */
+ BCE_PRINTF("pg_bd[0x%04X]: Invalid pg_bd index!\n", idx);
+ else if ((idx & USABLE_PG_BD_PER_PAGE) == USABLE_PG_BD_PER_PAGE)
+ /* Page Chain page pointer. */
+ BCE_PRINTF("px_bd[0x%04X]: haddr = 0x%08X:%08X, chain page pointer\n",
+ idx, pgbd->rx_bd_haddr_hi, pgbd->rx_bd_haddr_lo);
+ else
+ /* Normal rx_bd entry. */
+ BCE_PRINTF("pg_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = 0x%08X, "
+ "flags = 0x%08X\n", idx,
+ pgbd->rx_bd_haddr_hi, pgbd->rx_bd_haddr_lo,
+ pgbd->rx_bd_len, pgbd->rx_bd_flags);
+}
+
+
+/****************************************************************************/
/* Prints out a l2_fhdr structure. */
/* */
/* Returns: */
@@ -6806,12 +7352,265 @@ bce_dump_rxbd(struct bce_softc *sc, int idx, struct rx_bd *rxbd)
static void
bce_dump_l2fhdr(struct bce_softc *sc, int idx, struct l2_fhdr *l2fhdr)
{
- BCE_PRINTF("l2_fhdr[0x%04X]: status = 0x%08X, "
- "pkt_len = 0x%04X, vlan = 0x%04x, ip_xsum = 0x%04X, "
+ BCE_PRINTF("l2_fhdr[0x%04X]: status = 0x%b, "
+ "pkt_len = %d, vlan = 0x%04x, ip_xsum/hdr_len = 0x%04X, "
"tcp_udp_xsum = 0x%04X\n", idx,
- l2fhdr->l2_fhdr_status, l2fhdr->l2_fhdr_pkt_len,
- l2fhdr->l2_fhdr_vlan_tag, l2fhdr->l2_fhdr_ip_xsum,
- l2fhdr->l2_fhdr_tcp_udp_xsum);
+ l2fhdr->l2_fhdr_status, BCE_L2FHDR_PRINTFB,
+ l2fhdr->l2_fhdr_pkt_len, l2fhdr->l2_fhdr_vlan_tag,
+ l2fhdr->l2_fhdr_ip_xsum, l2fhdr->l2_fhdr_tcp_udp_xsum);
+}
+
+
+/****************************************************************************/
+/* Prints out the L2 context memory. (Only useful for CID 0 to 15. ) */
+/* */
+/* Returns: */
+/* Nothing. */
+/****************************************************************************/
+static void
+bce_dump_ctx(struct bce_softc *sc, u16 cid)
+{
+ if (cid < TX_CID) {
+ BCE_PRINTF(
+ "----------------------------"
+ " CTX Data "
+ "----------------------------\n");
+
+ BCE_PRINTF(" 0x%04X - (CID) Context ID\n", cid);
+ BCE_PRINTF(" 0x%08X - (L2CTX_HOST_BDIDX) host rx producer index\n",
+ CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_HOST_BDIDX));
+ BCE_PRINTF(" 0x%08X - (L2CTX_HOST_BSEQ) host byte sequence\n",
+ CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_HOST_BSEQ));
+ BCE_PRINTF(" 0x%08X - (L2CTX_NX_BSEQ) h/w byte sequence\n",
+ CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_NX_BSEQ));
+ BCE_PRINTF(" 0x%08X - (L2CTX_NX_BDHADDR_HI) h/w buffer descriptor address\n",
+ CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_NX_BDHADDR_HI));
+ BCE_PRINTF(" 0x%08X - (L2CTX_NX_BDHADDR_LO) h/w buffer descriptor address\n",
+ CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_NX_BDHADDR_LO));
+ BCE_PRINTF(" 0x%08X - (L2CTX_NX_BDIDX) h/w rx consumer index\n",
+ CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_NX_BDIDX));
+ BCE_PRINTF(" 0x%08X - (L2CTX_HOST_PG_BDIDX) host page producer index\n",
+ CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_HOST_PG_BDIDX));
+ BCE_PRINTF(" 0x%08X - (L2CTX_PG_BUF_SIZE) host rx_bd/page buffer size\n",
+ CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_PG_BUF_SIZE));
+ BCE_PRINTF(" 0x%08X - (L2CTX_NX_PG_BDHADDR_HI) h/w page chain address\n",
+ CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_NX_PG_BDHADDR_HI));
+ BCE_PRINTF(" 0x%08X - (L2CTX_NX_PG_BDHADDR_LO) h/w page chain address\n",
+ CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_NX_PG_BDHADDR_LO));
+ BCE_PRINTF(" 0x%08X - (L2CTX_NX_PG_BDIDX) h/w page consumer index\n",
+ CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_NX_PG_BDIDX));
+
+ BCE_PRINTF(
+ "----------------------------"
+ "----------------"
+ "----------------------------\n");
+ }
+}
+
+
+/****************************************************************************/
+/* Prints out the FTQ data. */
+/* */
+/* Returns: */
+/* Nothing. */
+/****************************************************************************/
+static void
+bce_dump_ftqs(struct bce_softc *sc)
+{
+ u32 cmd, ctl, cur_depth, max_depth, valid_cnt;
+
+ BCE_PRINTF(
+ "----------------------------"
+ " FTQ Data "
+ "----------------------------\n");
+
+ BCE_PRINTF(" FTQ Command Control Depth_Now Max_Depth Valid_Cnt\n");
+ BCE_PRINTF(" ----- ---------- ---------- ---------- ---------- ----------\n");
+
+ /* Setup the generic statistic counters for the FTQ valid count. */
+ REG_WR(sc, BCE_HC_STAT_GEN_SEL_0,
+ ((BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PPQ_VALID_CNT << 24) |
+ (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXPCQ_VALID_CNT << 16) |
+ (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXPQ_VALID_CNT << 8) |
+ (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RLUPQ_VALID_CNT)));
+ REG_WR(sc, BCE_HC_STAT_GEN_SEL_1,
+ ((BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TSCHQ_VALID_CNT << 24) |
+ (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RDMAQ_VALID_CNT << 16) |
+ (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PTQ_VALID_CNT << 8) |
+ (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PMQ_VALID_CNT)));
+ REG_WR(sc, BCE_HC_STAT_GEN_SEL_2,
+ ((BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TPATQ_VALID_CNT << 24) |
+ (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TDMAQ_VALID_CNT << 16) |
+ (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXPQ_VALID_CNT << 8) |
+ (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TBDRQ_VALID_CNT)));
+ REG_WR(sc, BCE_HC_STAT_GEN_SEL_3,
+ ((BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMQ_VALID_CNT << 24) |
+ (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMTQ_VALID_CNT << 16) |
+ (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMXQ_VALID_CNT << 8) |
+ (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TASQ_VALID_CNT)));
+
+ cmd = REG_RD(sc, 0x23f8); /* RLUP_FTQ_CMD */
+ ctl = REG_RD(sc, 0x23fc); /* RLUP_FTQ_CTL */
+ cur_depth = (ctl & 0xFFC00000) >> 22;
+ max_depth = (ctl & 0x003FF000) >> 12;
+ valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT0);
+ BCE_PRINTF(" RLUP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
+ cmd, ctl, cur_depth, max_depth, valid_cnt);
+
+ cmd = REG_RD_IND(sc, 0xc53f8); /* RXP_FTQ_CMD */
+ ctl = REG_RD_IND(sc, 0xc53fc); /* RXP_FTQ_CTL */
+ cur_depth = (ctl & 0xFFC00000) >> 22;
+ max_depth = (ctl & 0x003FF000) >> 12;
+ valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT1);
+ BCE_PRINTF(" RXP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
+ cmd, ctl, cur_depth, max_depth, valid_cnt);
+
+ cmd = REG_RD_IND(sc, 0xc53b8); /* RXP_CFTQ_CMD */
+ ctl = REG_RD_IND(sc, 0xc53bc); /* RXP_CFTQ_CTL */
+ cur_depth = (ctl & 0xFFC00000) >> 22;
+ max_depth = (ctl & 0x003FF000) >> 12;
+ valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT2);
+ BCE_PRINTF(" RXPC 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
+ cmd, ctl, cur_depth, max_depth, valid_cnt);
+
+ cmd = REG_RD(sc, 0x2b78); /* RV2P_PFTQ_CMD */
+ ctl = REG_RD(sc, 0x2b7c); /* RV2P_PFTQ_CTL */
+ cur_depth = (ctl & 0xFFC00000) >> 22;
+ max_depth = (ctl & 0x003FF000) >> 12;
+ valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT3);
+ BCE_PRINTF(" RV2PP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
+ cmd, ctl, cur_depth, max_depth, valid_cnt);
+
+ cmd = REG_RD(sc, 0x2bf8); /* RV2P_MFTQ_CMD */
+ ctl = REG_RD(sc, 0x2bfc); /* RV2P_MFTQ_CTL */
+ cur_depth = (ctl & 0xFFC00000) >> 22;
+ max_depth = (ctl & 0x003FF000) >> 12;
+ valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT4);
+ BCE_PRINTF(" RV2PM 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
+ cmd, ctl, cur_depth, max_depth, valid_cnt);
+
+ cmd = REG_RD(sc, 0x2bb8); /* RV2P_TFTQ_CMD */
+ ctl = REG_RD(sc, 0x2bbc); /* RV2P_TFTQ_CTL */
+ cur_depth = (ctl & 0xFFC00000) >> 22;
+ max_depth = (ctl & 0x003FF000) >> 12;
+ valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT5);
+ BCE_PRINTF(" RV2PT 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
+ cmd, ctl, cur_depth, max_depth, valid_cnt);
+
+ cmd = REG_RD(sc, 0x2ff8); /* RDMA_FTQ_CMD */
+ ctl = REG_RD(sc, 0x2ffc); /* RDMA_FTQ_CTL */
+ cur_depth = (ctl & 0xFFC00000) >> 22;
+ max_depth = (ctl & 0x003FF000) >> 12;
+ valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT6);
+ BCE_PRINTF(" RDMA 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
+ cmd, ctl, cur_depth, max_depth, valid_cnt);
+
+ cmd = REG_RD(sc, 0x4ff8); /* TSCH_FTQ_CMD */
+ ctl = REG_RD(sc, 0x4ffc); /* TSCH_FTQ_CTL */
+ cur_depth = (ctl & 0xFFC00000) >> 22;
+ max_depth = (ctl & 0x003FF000) >> 12;
+ valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT7);
+ BCE_PRINTF(" TSCH 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
+ cmd, ctl, cur_depth, max_depth, valid_cnt);
+
+ cmd = REG_RD(sc, 0x53f8); /* TBDR_FTQ_CMD */
+ ctl = REG_RD(sc, 0x53fc); /* TBDR_FTQ_CTL */
+ cur_depth = (ctl & 0xFFC00000) >> 22;
+ max_depth = (ctl & 0x003FF000) >> 12;
+ valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT8);
+ BCE_PRINTF(" TBDR 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
+ cmd, ctl, cur_depth, max_depth, valid_cnt);
+
+ cmd = REG_RD_IND(sc, 0x453f8); /* TXP_FTQ_CMD */
+ ctl = REG_RD_IND(sc, 0x453fc); /* TXP_FTQ_CTL */
+ cur_depth = (ctl & 0xFFC00000) >> 22;
+ max_depth = (ctl & 0x003FF000) >> 12;
+ valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT9);
+ BCE_PRINTF(" TXP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
+ cmd, ctl, cur_depth, max_depth, valid_cnt);
+
+ cmd = REG_RD(sc, 0x5ff8); /* TDMA_FTQ_CMD */
+ ctl = REG_RD(sc, 0x5ffc); /* TDMA_FTQ_CTL */
+ cur_depth = (ctl & 0xFFC00000) >> 22;
+ max_depth = (ctl & 0x003FF000) >> 12;
+ valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT10);
+ BCE_PRINTF(" TDMA 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
+ cmd, ctl, cur_depth, max_depth, valid_cnt);
+
+ cmd = REG_RD_IND(sc, 0x853f8); /* TPAT_FTQ_CMD */
+ ctl = REG_RD_IND(sc, 0x853fc); /* TPAT_FTQ_CTL */
+ cur_depth = (ctl & 0xFFC00000) >> 22;
+ max_depth = (ctl & 0x003FF000) >> 12;
+ valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT11);
+ BCE_PRINTF(" TPAT 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
+ cmd, ctl, cur_depth, max_depth, valid_cnt);
+
+ cmd = REG_RD_IND(sc, 0x1c03f8); /* TAS_FTQ_CMD */
+ ctl = REG_RD_IND(sc, 0x1c03fc); /* TAS_FTQ_CTL */
+ cur_depth = (ctl & 0xFFC00000) >> 22;
+ max_depth = (ctl & 0x003FF000) >> 12;
+ valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT12);
+ BCE_PRINTF(" TAS 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
+ cmd, ctl, cur_depth, max_depth, valid_cnt);
+
+ cmd = REG_RD_IND(sc, 0x105378); /* COM_COMXQ_FTQ_CMD */
+ ctl = REG_RD_IND(sc, 0x10537c); /* COM_COMXQ_FTQ_CTL */
+ cur_depth = (ctl & 0xFFC00000) >> 22;
+ max_depth = (ctl & 0x003FF000) >> 12;
+ valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT13);
+ BCE_PRINTF(" COMX 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
+ cmd, ctl, cur_depth, max_depth, valid_cnt);
+
+ cmd = REG_RD_IND(sc, 0x1053b8); /* COM_COMTQ_FTQ_CMD */
+ ctl = REG_RD_IND(sc, 0x1053bc); /* COM_COMTQ_FTQ_CTL */
+ cur_depth = (ctl & 0xFFC00000) >> 22;
+ max_depth = (ctl & 0x003FF000) >> 12;
+ valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT14);
+ BCE_PRINTF(" COMT 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
+ cmd, ctl, cur_depth, max_depth, valid_cnt);
+
+ cmd = REG_RD_IND(sc, 0x1053f8); /* COM_COMQ_FTQ_CMD */
+ ctl = REG_RD_IND(sc, 0x1053fc); /* COM_COMQ_FTQ_CTL */
+ cur_depth = (ctl & 0xFFC00000) >> 22;
+ max_depth = (ctl & 0x003FF000) >> 12;
+ valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT15);
+ BCE_PRINTF(" COMX 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
+ cmd, ctl, cur_depth, max_depth, valid_cnt);
+
+ /* Setup the generic statistic counters for the FTQ valid count. */
+ REG_WR(sc, BCE_HC_STAT_GEN_SEL_0,
+ ((BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_CSQ_VALID_CNT << 16) |
+ (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_CPQ_VALID_CNT << 8) |
+ (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_MGMQ_VALID_CNT)));
+
+ cmd = REG_RD_IND(sc, 0x1453f8); /* MCP_MCPQ_FTQ_CMD */
+ ctl = REG_RD_IND(sc, 0x1453fc); /* MCP_MCPQ_FTQ_CTL */
+ cur_depth = (ctl & 0xFFC00000) >> 22;
+ max_depth = (ctl & 0x003FF000) >> 12;
+ valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT0);
+ BCE_PRINTF(" MCP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
+ cmd, ctl, cur_depth, max_depth, valid_cnt);
+
+ cmd = REG_RD_IND(sc, 0x1853f8); /* CP_CPQ_FTQ_CMD */
+ ctl = REG_RD_IND(sc, 0x1853fc); /* CP_CPQ_FTQ_CTL */
+ cur_depth = (ctl & 0xFFC00000) >> 22;
+ max_depth = (ctl & 0x003FF000) >> 12;
+ valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT1);
+ BCE_PRINTF(" CP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
+ cmd, ctl, cur_depth, max_depth, valid_cnt);
+
+ cmd = REG_RD(sc, 0x43f8); /* CSCH_CH_FTQ_CMD */
+ ctl = REG_RD(sc, 0x43fc); /* CSCH_CH_FTQ_CTL */
+ cur_depth = (ctl & 0xFFC00000) >> 22;
+ max_depth = (ctl & 0x003FF000) >> 12;
+ valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT2);
+ BCE_PRINTF(" CS 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
+ cmd, ctl, cur_depth, max_depth, valid_cnt);
+
+ BCE_PRINTF(
+ "----------------------------"
+ "----------------"
+ "----------------------------\n");
}
@@ -6822,7 +7621,7 @@ bce_dump_l2fhdr(struct bce_softc *sc, int idx, struct l2_fhdr *l2fhdr)
/* Nothing. */
/****************************************************************************/
static void
-bce_dump_tx_chain(struct bce_softc *sc, int tx_prod, int count)
+bce_dump_tx_chain(struct bce_softc *sc, u16 tx_prod, int count)
{
struct tx_bd *txbd;
@@ -6849,7 +7648,7 @@ bce_dump_tx_chain(struct bce_softc *sc, int tx_prod, int count)
for (int i = 0; i < count; i++) {
txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)];
bce_dump_txbd(sc, tx_prod, txbd);
- tx_prod = TX_CHAIN_IDX(NEXT_TX_BD(tx_prod));
+ tx_prod = NEXT_TX_BD(tx_prod);
}
BCE_PRINTF(
@@ -6866,11 +7665,11 @@ bce_dump_tx_chain(struct bce_softc *sc, int tx_prod, int count)
/* Nothing. */
/****************************************************************************/
static void
-bce_dump_rx_chain(struct bce_softc *sc, int rx_prod, int count)
+bce_dump_rx_chain(struct bce_softc *sc, u16 rx_prod, int count)
{
struct rx_bd *rxbd;
- /* First some info about the tx_bd chain structure. */
+ /* First some info about the rx_bd chain structure. */
BCE_PRINTF(
"----------------------------"
" rx_bd chain "
@@ -6893,7 +7692,52 @@ bce_dump_rx_chain(struct bce_softc *sc, int rx_prod, int count)
for (int i = 0; i < count; i++) {
rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)];
bce_dump_rxbd(sc, rx_prod, rxbd);
- rx_prod = RX_CHAIN_IDX(NEXT_RX_BD(rx_prod));
+ rx_prod = RX_CHAIN_IDX(rx_prod + 1);
+ }
+
+ BCE_PRINTF(
+ "----------------------------"
+ "----------------"
+ "----------------------------\n");
+}
+
+
+/****************************************************************************/
+/* Prints out the page chain. */
+/* */
+/* Returns: */
+/* Nothing. */
+/****************************************************************************/
+static void
+bce_dump_pg_chain(struct bce_softc *sc, u16 pg_prod, int count)
+{
+ struct rx_bd *pgbd;
+
+ /* First some info about the page chain structure. */
+ BCE_PRINTF(
+ "----------------------------"
+ " page chain "
+ "----------------------------\n");
+
+ BCE_PRINTF("page size = 0x%08X, pg chain pages = 0x%08X\n",
+ (u32) BCM_PAGE_SIZE, (u32) PG_PAGES);
+
+ BCE_PRINTF("rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n",
+ (u32) TOTAL_PG_BD_PER_PAGE, (u32) USABLE_PG_BD_PER_PAGE);
+
+ BCE_PRINTF("total rx_bd = 0x%08X, max_pg_bd = 0x%08X\n",
+ (u32) TOTAL_PG_BD, (u32) MAX_PG_BD);
+
+ BCE_PRINTF(
+ "----------------------------"
+ " page data "
+ "----------------------------\n");
+
+ /* Now print out the rx_bd's themselves. */
+ for (int i = 0; i < count; i++) {
+ pgbd = &sc->pg_bd_chain[PG_PAGE(pg_prod)][PG_IDX(pg_prod)];
+ bce_dump_pgbd(sc, pg_prod, pgbd);
+ pg_prod = PG_CHAIN_IDX(pg_prod + 1);
}
BCE_PRINTF(
@@ -7337,6 +8181,12 @@ bce_dump_driver_state(struct bce_softc *sc)
"0x%08X:%08X - (sc->rx_bd_chain) rx_bd chain virtual address\n",
val_hi, val_lo);
+ val_hi = BCE_ADDR_HI(sc->pg_bd_chain);
+ val_lo = BCE_ADDR_LO(sc->pg_bd_chain);
+ BCE_PRINTF(
+ "0x%08X:%08X - (sc->pg_bd_chain) page chain virtual address\n",
+ val_hi, val_lo);
+
val_hi = BCE_ADDR_HI(sc->tx_mbuf_ptr);
val_lo = BCE_ADDR_LO(sc->tx_mbuf_ptr);
BCE_PRINTF(
@@ -7349,6 +8199,12 @@ bce_dump_driver_state(struct bce_softc *sc)
"0x%08X:%08X - (sc->rx_mbuf_ptr) rx mbuf chain virtual address\n",
val_hi, val_lo);
+ val_hi = BCE_ADDR_HI(sc->pg_mbuf_ptr);
+ val_lo = BCE_ADDR_LO(sc->pg_mbuf_ptr);
+ BCE_PRINTF(
+ "0x%08X:%08X - (sc->pg_mbuf_ptr) page mbuf chain virtual address\n",
+ val_hi, val_lo);
+
BCE_PRINTF(" 0x%08X - (sc->interrupts_generated) h/w intrs\n",
sc->interrupts_generated);
@@ -7370,8 +8226,8 @@ bce_dump_driver_state(struct bce_softc *sc)
BCE_PRINTF(" 0x%08X - (sc->tx_prod_bseq) tx producer bseq index\n",
sc->tx_prod_bseq);
- BCE_PRINTF(" 0x%08X - (sc->tx_mbuf_alloc) tx mbufs allocated\n",
- sc->tx_mbuf_alloc);
+ BCE_PRINTF(" 0x%08X - (sc->debug_tx_mbuf_alloc) tx mbufs allocated\n",
+ sc->debug_tx_mbuf_alloc);
BCE_PRINTF(" 0x%08X - (sc->used_tx_bd) used tx_bd's\n",
sc->used_tx_bd);
@@ -7388,22 +8244,34 @@ bce_dump_driver_state(struct bce_softc *sc)
BCE_PRINTF(" 0x%08X - (sc->rx_prod_bseq) rx producer bseq index\n",
sc->rx_prod_bseq);
- BCE_PRINTF(" 0x%08X - (sc->rx_mbuf_alloc) rx mbufs allocated\n",
- sc->rx_mbuf_alloc);
+ BCE_PRINTF(" 0x%08X - (sc->debug_rx_mbuf_alloc) rx mbufs allocated\n",
+ sc->debug_rx_mbuf_alloc);
BCE_PRINTF(" 0x%08X - (sc->free_rx_bd) free rx_bd's\n",
sc->free_rx_bd);
- BCE_PRINTF("0x%08X/%08X - (sc->rx_low_watermark) rx low watermark\n",
- sc->rx_low_watermark, sc->max_rx_bd);
+ BCE_PRINTF(" 0x%04X(0x%04X) - (sc->pg_prod) page producer index\n",
+ sc->pg_prod, (u16) PG_CHAIN_IDX(sc->pg_prod));
+
+ BCE_PRINTF(" 0x%04X(0x%04X) - (sc->pg_cons) page consumer index\n",
+ sc->pg_cons, (u16) PG_CHAIN_IDX(sc->pg_cons));
+
+ BCE_PRINTF(" 0x%08X - (sc->debug_pg_mbuf_alloc) page mbufs allocated\n",
+ sc->debug_pg_mbuf_alloc);
+
+ BCE_PRINTF(" 0x%08X - (sc->free_pg_bd) free page rx_bd's\n",
+ sc->free_pg_bd);
+
+ BCE_PRINTF("0x%08X/%08X - (sc->pg_low_watermark) page low watermark\n",
+ sc->pg_low_watermark, sc->max_pg_bd);
BCE_PRINTF(" 0x%08X - (sc->mbuf_alloc_failed) "
"mbuf alloc failures\n",
sc->mbuf_alloc_failed);
- BCE_PRINTF(" 0x%08X - (sc->mbuf_sim_alloc_failed) "
+ BCE_PRINTF(" 0x%08X - (sc->debug_mbuf_sim_alloc_failed) "
"simulated mbuf alloc failures\n",
- sc->mbuf_sim_alloc_failed);
+ sc->debug_mbuf_sim_alloc_failed);
BCE_PRINTF(
"----------------------------"
@@ -7422,7 +8290,7 @@ bce_dump_driver_state(struct bce_softc *sc)
static void
bce_dump_hw_state(struct bce_softc *sc)
{
- u32 val1;
+ u32 val;
BCE_PRINTF(
"----------------------------"
@@ -7431,48 +8299,57 @@ bce_dump_hw_state(struct bce_softc *sc)
BCE_PRINTF("0x%08X - bootcode version\n", sc->bce_fw_ver);
- val1 = REG_RD(sc, BCE_MISC_ENABLE_STATUS_BITS);
+ val = REG_RD(sc, BCE_MISC_ENABLE_STATUS_BITS);
BCE_PRINTF("0x%08X - (0x%06X) misc_enable_status_bits\n",
- val1, BCE_MISC_ENABLE_STATUS_BITS);
+ val, BCE_MISC_ENABLE_STATUS_BITS);
- val1 = REG_RD(sc, BCE_DMA_STATUS);
- BCE_PRINTF("0x%08X - (0x%06X) dma_status\n", val1, BCE_DMA_STATUS);
+ val = REG_RD(sc, BCE_DMA_STATUS);
+ BCE_PRINTF("0x%08X - (0x%06X) dma_status\n", val, BCE_DMA_STATUS);
- val1 = REG_RD(sc, BCE_CTX_STATUS);
- BCE_PRINTF("0x%08X - (0x%06X) ctx_status\n", val1, BCE_CTX_STATUS);
+ val = REG_RD(sc, BCE_CTX_STATUS);
+ BCE_PRINTF("0x%08X - (0x%06X) ctx_status\n", val, BCE_CTX_STATUS);
- val1 = REG_RD(sc, BCE_EMAC_STATUS);
- BCE_PRINTF("0x%08X - (0x%06X) emac_status\n", val1, BCE_EMAC_STATUS);
+ val = REG_RD(sc, BCE_EMAC_STATUS);
+ BCE_PRINTF("0x%08X - (0x%06X) emac_status\n", val, BCE_EMAC_STATUS);
- val1 = REG_RD(sc, BCE_RPM_STATUS);
- BCE_PRINTF("0x%08X - (0x%06X) rpm_status\n", val1, BCE_RPM_STATUS);
+ val = REG_RD(sc, BCE_RPM_STATUS);
+ BCE_PRINTF("0x%08X - (0x%06X) rpm_status\n", val, BCE_RPM_STATUS);
- val1 = REG_RD(sc, BCE_TBDR_STATUS);
- BCE_PRINTF("0x%08X - (0x%06X) tbdr_status\n", val1, BCE_TBDR_STATUS);
+ val = REG_RD(sc, 0x2004);
+ BCE_PRINTF("0x%08X - (0x%06X) rlup_status\n", val, 0x2004);
- val1 = REG_RD(sc, BCE_TDMA_STATUS);
- BCE_PRINTF("0x%08X - (0x%06X) tdma_status\n", val1, BCE_TDMA_STATUS);
+ val = REG_RD(sc, BCE_RV2P_STATUS);
+ BCE_PRINTF("0x%08X - (0x%06X) rv2p_status\n", val, BCE_RV2P_STATUS);
- val1 = REG_RD(sc, BCE_HC_STATUS);
- BCE_PRINTF("0x%08X - (0x%06X) hc_status\n", val1, BCE_HC_STATUS);
+ val = REG_RD(sc, 0x2c04);
+ BCE_PRINTF("0x%08X - (0x%06X) rdma_status\n", val, 0x2c04);
- val1 = REG_RD_IND(sc, BCE_TXP_CPU_STATE);
- BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_state\n", val1, BCE_TXP_CPU_STATE);
+ val = REG_RD(sc, BCE_TBDR_STATUS);
+ BCE_PRINTF("0x%08X - (0x%06X) tbdr_status\n", val, BCE_TBDR_STATUS);
- val1 = REG_RD_IND(sc, BCE_TPAT_CPU_STATE);
- BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_state\n", val1, BCE_TPAT_CPU_STATE);
+ val = REG_RD(sc, BCE_TDMA_STATUS);
+ BCE_PRINTF("0x%08X - (0x%06X) tdma_status\n", val, BCE_TDMA_STATUS);
- val1 = REG_RD_IND(sc, BCE_RXP_CPU_STATE);
- BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_state\n", val1, BCE_RXP_CPU_STATE);
+ val = REG_RD(sc, BCE_HC_STATUS);
+ BCE_PRINTF("0x%08X - (0x%06X) hc_status\n", val, BCE_HC_STATUS);
+
+ val = REG_RD_IND(sc, BCE_TXP_CPU_STATE);
+ BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_state\n", val, BCE_TXP_CPU_STATE);
+
+ val = REG_RD_IND(sc, BCE_TPAT_CPU_STATE);
+ BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_state\n", val, BCE_TPAT_CPU_STATE);
- val1 = REG_RD_IND(sc, BCE_COM_CPU_STATE);
- BCE_PRINTF("0x%08X - (0x%06X) com_cpu_state\n", val1, BCE_COM_CPU_STATE);
+ val = REG_RD_IND(sc, BCE_RXP_CPU_STATE);
+ BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_state\n", val, BCE_RXP_CPU_STATE);
- val1 = REG_RD_IND(sc, BCE_MCP_CPU_STATE);
- BCE_PRINTF("0x%08X - (0x%06X) mcp_cpu_state\n", val1, BCE_MCP_CPU_STATE);
+ val = REG_RD_IND(sc, BCE_COM_CPU_STATE);
+ BCE_PRINTF("0x%08X - (0x%06X) com_cpu_state\n", val, BCE_COM_CPU_STATE);
- val1 = REG_RD_IND(sc, BCE_CP_CPU_STATE);
- BCE_PRINTF("0x%08X - (0x%06X) cp_cpu_state\n", val1, BCE_CP_CPU_STATE);
+ val = REG_RD_IND(sc, BCE_MCP_CPU_STATE);
+ BCE_PRINTF("0x%08X - (0x%06X) mcp_cpu_state\n", val, BCE_MCP_CPU_STATE);
+
+ val = REG_RD_IND(sc, BCE_CP_CPU_STATE);
+ BCE_PRINTF("0x%08X - (0x%06X) cp_cpu_state\n", val, BCE_CP_CPU_STATE);
BCE_PRINTF(
"----------------------------"
@@ -7484,10 +8361,11 @@ bce_dump_hw_state(struct bce_softc *sc)
" Register Dump "
"----------------------------\n");
- for (int i = 0x400; i < 0x8000; i += 0x10)
+ for (int i = 0x400; i < 0x8000; i += 0x10) {
BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
i, REG_RD(sc, i), REG_RD(sc, i + 0x4),
- REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC));
+ REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC));
+ }
BCE_PRINTF(
"----------------------------"
@@ -7672,6 +8550,9 @@ bce_dump_tpat_state(struct bce_softc *sc)
}
+/* ToDo: Add CP and COM proccessor state dumps. */
+
+
/****************************************************************************/
/* Prints out the driver state and then enters the debugger. */
/* */
@@ -7682,17 +8563,25 @@ static void
bce_breakpoint(struct bce_softc *sc)
{
- /* Unreachable code to shut the compiler up about unused functions. */
+ /*
+ * Unreachable code to silence compiler warnings
+ * about unused functions.
+ */
if (0) {
bce_freeze_controller(sc);
bce_unfreeze_controller(sc);
bce_dump_txbd(sc, 0, NULL);
bce_dump_rxbd(sc, 0, NULL);
+ bce_dump_pgbd(sc, 0, NULL);
bce_dump_tx_mbuf_chain(sc, 0, USABLE_TX_BD);
- bce_dump_rx_mbuf_chain(sc, 0, sc->max_rx_bd);
+ bce_dump_rx_mbuf_chain(sc, 0, USABLE_RX_BD);
+ bce_dump_pg_mbuf_chain(sc, 0, USABLE_PG_BD);
bce_dump_l2fhdr(sc, 0, NULL);
+ bce_dump_ctx(sc, RX_CID);
+ bce_dump_ftqs(sc);
bce_dump_tx_chain(sc, 0, USABLE_TX_BD);
- bce_dump_rx_chain(sc, 0, sc->max_rx_bd);
+ bce_dump_rx_chain(sc, 0, USABLE_RX_BD);
+ bce_dump_pg_chain(sc, 0, USABLE_PG_BD);
bce_dump_status_block(sc);
bce_dump_stats_block(sc);
bce_dump_driver_state(sc);
@@ -7703,14 +8592,9 @@ bce_breakpoint(struct bce_softc *sc)
bce_dump_tpat_state(sc);
}
-/* bce_freeze_controller(sc); */
- bce_dump_driver_state(sc);
bce_dump_status_block(sc);
- bce_dump_tx_chain(sc, 0, TOTAL_TX_BD);
- bce_dump_hw_state(sc);
- bce_dump_txp_state(sc);
-/* bce_unfreeze_controller(sc); */
-
+ bce_dump_driver_state(sc);
+
/* Call the debugger. */
breakpoint();
OpenPOWER on IntegriCloud