summaryrefslogtreecommitdiffstats
path: root/sys/dev/bce
diff options
context:
space:
mode:
authordavidch <davidch@FreeBSD.org>2011-02-07 23:00:24 +0000
committerdavidch <davidch@FreeBSD.org>2011-02-07 23:00:24 +0000
commit93acd8b57328416a75419b8798763cecaa1bbb29 (patch)
tree4ebd02f5439b510db0a45ab6260bb491b246c2b9 /sys/dev/bce
parentd083a86f6c3210531ca481c804371282bb16a599 (diff)
downloadFreeBSD-src-93acd8b57328416a75419b8798763cecaa1bbb29.zip
FreeBSD-src-93acd8b57328416a75419b8798763cecaa1bbb29.tar.gz
- Added systcls for header splitting, RX/TX buffer count, interrupt
coalescing, strict RX MTU, verbose output, and shared memory debug. - Added additional debug counters (VLAN tags and split header frames). - Updated debug counters to 64 bit definitions. - Updated l2fhdr bit definitions. - Combined RX buffer sizing into a single function. - Added buffer size and interrupt coalescing settings to adapter info printout. Submitted by: davidch MFC after: 2 weeks
Diffstat (limited to 'sys/dev/bce')
-rw-r--r--sys/dev/bce/if_bce.c1494
-rw-r--r--sys/dev/bce/if_bcereg.h321
2 files changed, 1112 insertions, 703 deletions
diff --git a/sys/dev/bce/if_bce.c b/sys/dev/bce/if_bce.c
index 3d919d2..34e8271 100644
--- a/sys/dev/bce/if_bce.c
+++ b/sys/dev/bce/if_bce.c
@@ -287,48 +287,43 @@ static int bce_shutdown (device_t);
/* BCE Debug Data Structure Dump Routines */
/****************************************************************************/
#ifdef BCE_DEBUG
-static u32 bce_reg_rd (struct bce_softc *, u32);
-static void bce_reg_wr (struct bce_softc *, u32, u32);
-static void bce_reg_wr16 (struct bce_softc *, u32, u16);
-static u32 bce_ctx_rd (struct bce_softc *, u32, u32);
-static void bce_dump_enet (struct bce_softc *, struct mbuf *);
-static void bce_dump_mbuf (struct bce_softc *, struct mbuf *);
+static u32 bce_reg_rd (struct bce_softc *, u32);
+static void bce_reg_wr (struct bce_softc *, u32, u32);
+static void bce_reg_wr16 (struct bce_softc *, u32, u16);
+static u32 bce_ctx_rd (struct bce_softc *, u32, u32);
+static void bce_dump_enet (struct bce_softc *, struct mbuf *);
+static void bce_dump_mbuf (struct bce_softc *, struct mbuf *);
static void bce_dump_tx_mbuf_chain (struct bce_softc *, u16, int);
static void bce_dump_rx_mbuf_chain (struct bce_softc *, u16, int);
-#ifdef BCE_JUMBO_HDRSPLIT
static void bce_dump_pg_mbuf_chain (struct bce_softc *, u16, int);
-#endif
-static void bce_dump_txbd (struct bce_softc *,
+static void bce_dump_txbd (struct bce_softc *,
int, struct tx_bd *);
-static void bce_dump_rxbd (struct bce_softc *,
+static void bce_dump_rxbd (struct bce_softc *,
int, struct rx_bd *);
-#ifdef BCE_JUMBO_HDRSPLIT
-static void bce_dump_pgbd (struct bce_softc *,
+static void bce_dump_pgbd (struct bce_softc *,
int, struct rx_bd *);
-#endif
static void bce_dump_l2fhdr (struct bce_softc *,
int, struct l2_fhdr *);
-static void bce_dump_ctx (struct bce_softc *, u16);
-static void bce_dump_ftqs (struct bce_softc *);
+static void bce_dump_ctx (struct bce_softc *, u16);
+static void bce_dump_ftqs (struct bce_softc *);
static void bce_dump_tx_chain (struct bce_softc *, u16, int);
static void bce_dump_rx_bd_chain (struct bce_softc *, u16, int);
-#ifdef BCE_JUMBO_HDRSPLIT
static void bce_dump_pg_chain (struct bce_softc *, u16, int);
-#endif
static void bce_dump_status_block (struct bce_softc *);
static void bce_dump_stats_block (struct bce_softc *);
static void bce_dump_driver_state (struct bce_softc *);
static void bce_dump_hw_state (struct bce_softc *);
+static void bce_dump_shmem_state (struct bce_softc *);
static void bce_dump_mq_regs (struct bce_softc *);
static void bce_dump_bc_state (struct bce_softc *);
static void bce_dump_txp_state (struct bce_softc *, int);
static void bce_dump_rxp_state (struct bce_softc *, int);
-static void bce_dump_tpat_state (struct bce_softc *, int);
+static void bce_dump_tpat_state (struct bce_softc *, int);
static void bce_dump_cp_state (struct bce_softc *, int);
static void bce_dump_com_state (struct bce_softc *, int);
-static void bce_dump_rv2p_state (struct bce_softc *);
-static void bce_breakpoint (struct bce_softc *);
-#endif
+static void bce_dump_rv2p_state (struct bce_softc *);
+static void bce_breakpoint (struct bce_softc *);
+#endif /*BCE_DEBUG */
/****************************************************************************/
@@ -355,15 +350,15 @@ static int sysctl_nvram_write(SYSCTL_HANDLER_ARGS);
/****************************************************************************/
static int bce_acquire_nvram_lock (struct bce_softc *);
static int bce_release_nvram_lock (struct bce_softc *);
-static void bce_enable_nvram_access (struct bce_softc *);
-static void bce_disable_nvram_access (struct bce_softc *);
+static void bce_enable_nvram_access(struct bce_softc *);
+static void bce_disable_nvram_access(struct bce_softc *);
static int bce_nvram_read_dword (struct bce_softc *, u32, u8 *, u32);
-static int bce_init_nvram (struct bce_softc *);
-static int bce_nvram_read (struct bce_softc *, u32, u8 *, int);
-static int bce_nvram_test (struct bce_softc *);
+static int bce_init_nvram (struct bce_softc *);
+static int bce_nvram_read (struct bce_softc *, u32, u8 *, int);
+static int bce_nvram_test (struct bce_softc *);
#ifdef BCE_NVRAM_WRITE_SUPPORT
static int bce_enable_nvram_write (struct bce_softc *);
-static void bce_disable_nvram_write (struct bce_softc *);
+static void bce_disable_nvram_write(struct bce_softc *);
static int bce_nvram_erase_page (struct bce_softc *, u32);
static int bce_nvram_write_dword (struct bce_softc *, u32, u8 *, u32);
static int bce_nvram_write (struct bce_softc *, u32, u8 *, int);
@@ -372,12 +367,12 @@ static int bce_nvram_write (struct bce_softc *, u32, u8 *, int);
/****************************************************************************/
/* */
/****************************************************************************/
-static void bce_get_media (struct bce_softc *);
-static void bce_init_media (struct bce_softc *);
-static void bce_dma_map_addr (void *,
- bus_dma_segment_t *, int, int);
-static int bce_dma_alloc (device_t);
-static void bce_dma_free (struct bce_softc *);
+static void bce_get_rx_buffer_sizes(struct bce_softc *, int);
+static void bce_get_media (struct bce_softc *);
+static void bce_init_media (struct bce_softc *);
+static void bce_dma_map_addr (void *, bus_dma_segment_t *, int, int);
+static int bce_dma_alloc (device_t);
+static void bce_dma_free (struct bce_softc *);
static void bce_release_resources (struct bce_softc *);
/****************************************************************************/
@@ -387,69 +382,67 @@ static int bce_fw_sync (struct bce_softc *, u32);
static void bce_load_rv2p_fw (struct bce_softc *, u32 *, u32, u32);
static void bce_load_cpu_fw (struct bce_softc *,
struct cpu_reg *, struct fw_info *);
-static void bce_start_cpu (struct bce_softc *, struct cpu_reg *);
-static void bce_halt_cpu (struct bce_softc *, struct cpu_reg *);
+static void bce_start_cpu (struct bce_softc *, struct cpu_reg *);
+static void bce_halt_cpu (struct bce_softc *, struct cpu_reg *);
static void bce_start_rxp_cpu (struct bce_softc *);
static void bce_init_rxp_cpu (struct bce_softc *);
static void bce_init_txp_cpu (struct bce_softc *);
static void bce_init_tpat_cpu (struct bce_softc *);
static void bce_init_cp_cpu (struct bce_softc *);
static void bce_init_com_cpu (struct bce_softc *);
-static void bce_init_cpus (struct bce_softc *);
+static void bce_init_cpus (struct bce_softc *);
-static void bce_print_adapter_info (struct bce_softc *);
+static void bce_print_adapter_info (struct bce_softc *);
static void bce_probe_pci_caps (device_t, struct bce_softc *);
-static void bce_stop (struct bce_softc *);
-static int bce_reset (struct bce_softc *, u32);
-static int bce_chipinit (struct bce_softc *);
-static int bce_blockinit (struct bce_softc *);
+static void bce_stop (struct bce_softc *);
+static int bce_reset (struct bce_softc *, u32);
+static int bce_chipinit (struct bce_softc *);
+static int bce_blockinit (struct bce_softc *);
static int bce_init_tx_chain (struct bce_softc *);
static void bce_free_tx_chain (struct bce_softc *);
-static int bce_get_rx_buf (struct bce_softc *,
+static int bce_get_rx_buf (struct bce_softc *,
struct mbuf *, u16 *, u16 *, u32 *);
static int bce_init_rx_chain (struct bce_softc *);
static void bce_fill_rx_chain (struct bce_softc *);
static void bce_free_rx_chain (struct bce_softc *);
-#ifdef BCE_JUMBO_HDRSPLIT
-static int bce_get_pg_buf (struct bce_softc *,
+static int bce_get_pg_buf (struct bce_softc *,
struct mbuf *, u16 *, u16 *);
static int bce_init_pg_chain (struct bce_softc *);
static void bce_fill_pg_chain (struct bce_softc *);
static void bce_free_pg_chain (struct bce_softc *);
-#endif
static struct mbuf *bce_tso_setup (struct bce_softc *,
struct mbuf **, u16 *);
-static int bce_tx_encap (struct bce_softc *, struct mbuf **);
+static int bce_tx_encap (struct bce_softc *, struct mbuf **);
static void bce_start_locked (struct ifnet *);
-static void bce_start (struct ifnet *);
-static int bce_ioctl (struct ifnet *, u_long, caddr_t);
-static void bce_watchdog (struct bce_softc *);
+static void bce_start (struct ifnet *);
+static int bce_ioctl (struct ifnet *, u_long, caddr_t);
+static void bce_watchdog (struct bce_softc *);
static int bce_ifmedia_upd (struct ifnet *);
static int bce_ifmedia_upd_locked (struct ifnet *);
static void bce_ifmedia_sts (struct ifnet *, struct ifmediareq *);
static void bce_init_locked (struct bce_softc *);
-static void bce_init (void *);
+static void bce_init (void *);
static void bce_mgmt_init_locked (struct bce_softc *sc);
-static int bce_init_ctx (struct bce_softc *);
+static int bce_init_ctx (struct bce_softc *);
static void bce_get_mac_addr (struct bce_softc *);
static void bce_set_mac_addr (struct bce_softc *);
-static void bce_phy_intr (struct bce_softc *);
+static void bce_phy_intr (struct bce_softc *);
static inline u16 bce_get_hw_rx_cons (struct bce_softc *);
static void bce_rx_intr (struct bce_softc *);
static void bce_tx_intr (struct bce_softc *);
static void bce_disable_intr (struct bce_softc *);
static void bce_enable_intr (struct bce_softc *, int);
-static void bce_intr (void *);
+static void bce_intr (void *);
static void bce_set_rx_mode (struct bce_softc *);
static void bce_stats_update (struct bce_softc *);
-static void bce_tick (void *);
-static void bce_pulse (void *);
+static void bce_tick (void *);
+static void bce_pulse (void *);
static void bce_add_sysctls (struct bce_softc *);
@@ -505,23 +498,148 @@ DRIVER_MODULE(miibus, bce, miibus_driver, miibus_devclass, 0, 0);
SYSCTL_NODE(_hw, OID_AUTO, bce, CTLFLAG_RD, 0, "bce driver parameters");
/* Allowable values are TRUE or FALSE */
+static int bce_verbose = TRUE;
+TUNABLE_INT("hw.bce.verbose", &bce_verbose);
+SYSCTL_INT(_hw_bce, OID_AUTO, verbose, CTLFLAG_RDTUN, &bce_verbose, 0,
+ "Verbose output enable/disable");
+
+/* Allowable values are TRUE or FALSE */
static int bce_tso_enable = TRUE;
TUNABLE_INT("hw.bce.tso_enable", &bce_tso_enable);
SYSCTL_INT(_hw_bce, OID_AUTO, tso_enable, CTLFLAG_RDTUN, &bce_tso_enable, 0,
-"TSO Enable/Disable");
+ "TSO Enable/Disable");
/* Allowable values are 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */
/* ToDo: Add MSI-X support. */
static int bce_msi_enable = 1;
TUNABLE_INT("hw.bce.msi_enable", &bce_msi_enable);
SYSCTL_INT(_hw_bce, OID_AUTO, msi_enable, CTLFLAG_RDTUN, &bce_msi_enable, 0,
-"MSI-X|MSI|INTx selector");
+ "MSI-X|MSI|INTx selector");
+
+/* Allowable values are 1, 2, 4, 8. */
+static int bce_rx_pages = DEFAULT_RX_PAGES;
+TUNABLE_INT("hw.bce.rx_pages", &bce_rx_pages);
+SYSCTL_UINT(_hw_bce, OID_AUTO, rx_pages, CTLFLAG_RDTUN, &bce_rx_pages, 0,
+ "Receive buffer descriptor pages (1 page = 255 buffer descriptors)");
+
+/* Allowable values are 1, 2, 4, 8. */
+static int bce_tx_pages = DEFAULT_TX_PAGES;
+TUNABLE_INT("hw.bce.tx_pages", &bce_tx_pages);
+SYSCTL_UINT(_hw_bce, OID_AUTO, tx_pages, CTLFLAG_RDTUN, &bce_tx_pages, 0,
+ "Transmit buffer descriptor pages (1 page = 255 buffer descriptors)");
+
+/* Allowable values are TRUE or FALSE. */
+static int bce_hdr_split = TRUE;
+TUNABLE_INT("hw.bce.hdr_split", &bce_hdr_split);
+SYSCTL_UINT(_hw_bce, OID_AUTO, hdr_split, CTLFLAG_RDTUN, &bce_hdr_split, 0,
+ "Frame header/payload splitting Enable/Disable");
+
+/* Allowable values are TRUE or FALSE. */
+static int bce_strict_rx_mtu = FALSE;
+TUNABLE_INT("hw.bce.strict_rx_mtu", &bce_strict_rx_mtu);
+SYSCTL_UINT(_hw_bce, OID_AUTO, loose_rx_mtu, CTLFLAG_RDTUN,
+ &bce_strict_rx_mtu, 0,
+ "Enable/Disable strict RX frame size checking");
+
+/* Allowable values are 0 ... 100 */
+#ifdef BCE_DEBUG
+/* Generate 1 interrupt for every transmit completion. */
+static int bce_tx_quick_cons_trip_int = 1;
+#else
+/* Generate 1 interrupt for every 20 transmit completions. */
+static int bce_tx_quick_cons_trip_int = DEFAULT_TX_QUICK_CONS_TRIP_INT;
+#endif
+TUNABLE_INT("hw.bce.tx_quick_cons_trip_int", &bce_tx_quick_cons_trip_int);
+SYSCTL_UINT(_hw_bce, OID_AUTO, tx_quick_cons_trip_int, CTLFLAG_RDTUN,
+ &bce_tx_quick_cons_trip_int, 0,
+ "Transmit BD trip point during interrupts");
-/* ToDo: Add tunable to enable/disable strict MTU handling. */
-/* Currently allows "loose" RX MTU checking (i.e. sets the */
-/* H/W RX MTU to the size of the largest receive buffer, or */
-/* 2048 bytes). This will cause a UNH failure but is more */
-/* desireable from a functional perspective. */
+/* Allowable values are 0 ... 100 */
+/* Generate 1 interrupt for every transmit completion. */
+#ifdef BCE_DEBUG
+static int bce_tx_quick_cons_trip = 1;
+#else
+/* Generate 1 interrupt for every 20 transmit completions. */
+static int bce_tx_quick_cons_trip = DEFAULT_TX_QUICK_CONS_TRIP;
+#endif
+TUNABLE_INT("hw.bce.tx_quick_cons_trip", &bce_tx_quick_cons_trip);
+SYSCTL_UINT(_hw_bce, OID_AUTO, tx_quick_cons_trip, CTLFLAG_RDTUN,
+ &bce_tx_quick_cons_trip, 0,
+ "Transmit BD trip point");
+
+/* Allowable values are 0 ... 100 */
+#ifdef BCE_DEBUG
+/* Generate an interrupt if 0us have elapsed since the last TX completion. */
+static int bce_tx_ticks_int = 0;
+#else
+/* Generate an interrupt if 80us have elapsed since the last TX completion. */
+static int bce_tx_ticks_int = DEFAULT_TX_TICKS_INT;
+#endif
+TUNABLE_INT("hw.bce.tx_ticks_int", &bce_tx_ticks_int);
+SYSCTL_UINT(_hw_bce, OID_AUTO, tx_ticks_int, CTLFLAG_RDTUN,
+ &bce_tx_ticks_int, 0, "Transmit ticks count during interrupt");
+
+/* Allowable values are 0 ... 100 */
+#ifdef BCE_DEBUG
+/* Generate an interrupt if 0us have elapsed since the last TX completion. */
+static int bce_tx_ticks = 0;
+#else
+/* Generate an interrupt if 80us have elapsed since the last TX completion. */
+static int bce_tx_ticks = DEFAULT_TX_TICKS;
+#endif
+TUNABLE_INT("hw.bce.tx_ticks", &bce_tx_ticks);
+SYSCTL_UINT(_hw_bce, OID_AUTO, tx_ticks, CTLFLAG_RDTUN,
+ &bce_tx_ticks, 0, "Transmit ticks count");
+
+/* Allowable values are 1 ... 100 */
+#ifdef BCE_DEBUG
+/* Generate 1 interrupt for every received frame. */
+static int bce_rx_quick_cons_trip_int = 1;
+#else
+/* Generate 1 interrupt for every 6 received frames. */
+static int bce_rx_quick_cons_trip_int = DEFAULT_RX_QUICK_CONS_TRIP_INT;
+#endif
+TUNABLE_INT("hw.bce.rx_quick_cons_trip_int", &bce_rx_quick_cons_trip_int);
+SYSCTL_UINT(_hw_bce, OID_AUTO, rx_quick_cons_trip_int, CTLFLAG_RDTUN,
+ &bce_rx_quick_cons_trip_int, 0,
+ "Receive BD trip point duirng interrupts");
+
+/* Allowable values are 1 ... 100 */
+#ifdef BCE_DEBUG
+/* Generate 1 interrupt for every received frame. */
+static int bce_rx_quick_cons_trip = 1;
+#else
+/* Generate 1 interrupt for every 6 received frames. */
+static int bce_rx_quick_cons_trip = DEFAULT_RX_QUICK_CONS_TRIP;
+#endif
+TUNABLE_INT("hw.bce.rx_quick_cons_trip", &bce_rx_quick_cons_trip);
+SYSCTL_UINT(_hw_bce, OID_AUTO, rx_quick_cons_trip, CTLFLAG_RDTUN,
+ &bce_rx_quick_cons_trip, 0,
+ "Receive BD trip point");
+
+/* Allowable values are 0 ... 100 */
+#ifdef BCE_DEBUG
+/* Generate an int. if 0us have elapsed since the last received frame. */
+static int bce_rx_ticks_int = 0;
+#else
+/* Generate an int. if 18us have elapsed since the last received frame. */
+static int bce_rx_ticks_int = DEFAULT_RX_TICKS_INT;
+#endif
+TUNABLE_INT("hw.bce.rx_ticks_int", &bce_rx_ticks_int);
+SYSCTL_UINT(_hw_bce, OID_AUTO, rx_ticks_int, CTLFLAG_RDTUN,
+ &bce_rx_ticks_int, 0, "Receive ticks count during interrupt");
+
+/* Allowable values are 0 ... 100 */
+#ifdef BCE_DEBUG
+/* Generate an int. if 0us have elapsed since the last received frame. */
+static int bce_rx_ticks = 0;
+#else
+/* Generate an int. if 18us have elapsed since the last received frame. */
+static int bce_rx_ticks = DEFAULT_RX_TICKS;
+#endif
+TUNABLE_INT("hw.bce.rx_ticks", &bce_rx_ticks);
+SYSCTL_UINT(_hw_bce, OID_AUTO, rx_ticks, CTLFLAG_RDTUN,
+ &bce_rx_ticks, 0, "Receive ticks count");
/****************************************************************************/
@@ -603,7 +721,7 @@ bce_print_adapter_info(struct bce_softc *sc)
DBENTER(BCE_VERBOSE_LOAD);
- if (bootverbose) {
+ if (bce_verbose || bootverbose) {
BCE_PRINTF("ASIC (0x%08X); ", sc->bce_chipid);
printf("Rev (%c%d); ", ((BCE_CHIP_ID(sc) & 0xf000) >>
12) + 'A', ((BCE_CHIP_ID(sc) & 0x0ff0) >> 4));
@@ -625,12 +743,14 @@ bce_print_adapter_info(struct bce_softc *sc)
}
/* Firmware version and device features. */
- printf("B/C (%s); Flags (", sc->bce_bc_ver);
+ printf("B/C (%s); Bufs (RX:%d;TX:%d;PG:%d); Flags (",
+ sc->bce_bc_ver, sc->rx_pages, sc->tx_pages,
+ (bce_hdr_split == TRUE ? sc->pg_pages: 0));
- #ifdef BCE_JUMBO_HDRSPLIT
- printf("SPLT");
- i++;
- #endif
+ if (bce_hdr_split == TRUE) {
+ printf("SPLT");
+ i++;
+ }
if (sc->bce_flags & BCE_USING_MSI_FLAG) {
if (i > 0) printf("|");
@@ -653,6 +773,17 @@ bce_print_adapter_info(struct bce_softc *sc)
} else {
printf(")\n");
}
+
+ printf("Coal (RX:%d,%d,%d,%d; TX:%d,%d,%d,%d)\n",
+ sc->bce_rx_quick_cons_trip_int,
+ sc->bce_rx_quick_cons_trip,
+ sc->bce_rx_ticks_int,
+ sc->bce_rx_ticks,
+ sc->bce_tx_quick_cons_trip_int,
+ sc->bce_tx_quick_cons_trip,
+ sc->bce_tx_ticks_int,
+ sc->bce_tx_ticks);
+
}
DBEXIT(BCE_VERBOSE_LOAD);
@@ -711,6 +842,189 @@ bce_probe_pci_caps(device_t dev, struct bce_softc *sc)
/****************************************************************************/
+/* Load and validate user tunable settings. */
+/* */
+/* Returns: */
+/* Nothing. */
+/****************************************************************************/
+static void
+bce_set_tunables(struct bce_softc *sc)
+{
+ /* Set sysctl values for RX page count. */
+ switch (bce_rx_pages) {
+ case 1:
+ /* fall-through */
+ case 2:
+ /* fall-through */
+ case 4:
+ /* fall-through */
+ case 8:
+ sc->rx_pages = bce_rx_pages;
+ break;
+ default:
+ sc->rx_pages = DEFAULT_RX_PAGES;
+ BCE_PRINTF("%s(%d): Illegal value (%d) specified for "
+ "hw.bce.rx_pages! Setting default of %d.\n",
+ __FILE__, __LINE__, bce_rx_pages, DEFAULT_RX_PAGES);
+ }
+
+ /* ToDo: Consider allowing user setting for pg_pages. */
+ sc->pg_pages = min((sc->rx_pages * 4), MAX_PG_PAGES);
+
+ /* Set sysctl values for TX page count. */
+ switch (bce_tx_pages) {
+ case 1:
+ /* fall-through */
+ case 2:
+ /* fall-through */
+ case 4:
+ /* fall-through */
+ case 8:
+ sc->tx_pages = bce_tx_pages;
+ break;
+ default:
+ sc->tx_pages = DEFAULT_TX_PAGES;
+ BCE_PRINTF("%s(%d): Illegal value (%d) specified for "
+ "hw.bce.tx_pages! Setting default of %d.\n",
+ __FILE__, __LINE__, bce_tx_pages, DEFAULT_TX_PAGES);
+ }
+
+ /*
+ * Validate the TX trip point (i.e. the number of
+ * TX completions before a status block update is
+ * generated and an interrupt is asserted.
+ */
+ if (bce_tx_quick_cons_trip_int <= 100) {
+ sc->bce_tx_quick_cons_trip_int =
+ bce_tx_quick_cons_trip_int;
+ } else {
+ BCE_PRINTF("%s(%d): Illegal value (%d) specified for "
+ "hw.bce.tx_quick_cons_trip_int! Setting default of %d.\n",
+ __FILE__, __LINE__, bce_tx_quick_cons_trip_int,
+ DEFAULT_TX_QUICK_CONS_TRIP_INT);
+ sc->bce_tx_quick_cons_trip_int =
+ DEFAULT_TX_QUICK_CONS_TRIP_INT;
+ }
+
+ if (bce_tx_quick_cons_trip <= 100) {
+ sc->bce_tx_quick_cons_trip =
+ bce_tx_quick_cons_trip;
+ } else {
+ BCE_PRINTF("%s(%d): Illegal value (%d) specified for "
+ "hw.bce.tx_quick_cons_trip! Setting default of %d.\n",
+ __FILE__, __LINE__, bce_tx_quick_cons_trip,
+ DEFAULT_TX_QUICK_CONS_TRIP);
+ sc->bce_tx_quick_cons_trip =
+ DEFAULT_TX_QUICK_CONS_TRIP;
+ }
+
+ /*
+ * Validate the TX ticks count (i.e. the maximum amount
+ * of time to wait after the last TX completion has
+ * occurred before a status block update is generated
+ * and an interrupt is asserted.
+ */
+ if (bce_tx_ticks_int <= 100) {
+ sc->bce_tx_ticks_int =
+ bce_tx_ticks_int;
+ } else {
+ BCE_PRINTF("%s(%d): Illegal value (%d) specified for "
+ "hw.bce.tx_ticks_int! Setting default of %d.\n",
+ __FILE__, __LINE__, bce_tx_ticks_int,
+ DEFAULT_TX_TICKS_INT);
+ sc->bce_tx_ticks_int =
+ DEFAULT_TX_TICKS_INT;
+ }
+
+ if (bce_tx_ticks <= 100) {
+ sc->bce_tx_ticks =
+ bce_tx_ticks;
+ } else {
+ BCE_PRINTF("%s(%d): Illegal value (%d) specified for "
+ "hw.bce.tx_ticks! Setting default of %d.\n",
+ __FILE__, __LINE__, bce_tx_ticks,
+ DEFAULT_TX_TICKS);
+ sc->bce_tx_ticks =
+ DEFAULT_TX_TICKS;
+ }
+
+ /*
+ * Validate the RX trip point (i.e. the number of
+ * RX frames received before a status block update is
+ * generated and an interrupt is asserted.
+ */
+ if (bce_rx_quick_cons_trip_int <= 100) {
+ sc->bce_rx_quick_cons_trip_int =
+ bce_rx_quick_cons_trip_int;
+ } else {
+ BCE_PRINTF("%s(%d): Illegal value (%d) specified for "
+ "hw.bce.rx_quick_cons_trip_int! Setting default of %d.\n",
+ __FILE__, __LINE__, bce_rx_quick_cons_trip_int,
+ DEFAULT_RX_QUICK_CONS_TRIP_INT);
+ sc->bce_rx_quick_cons_trip_int =
+ DEFAULT_RX_QUICK_CONS_TRIP_INT;
+ }
+
+ if (bce_rx_quick_cons_trip <= 100) {
+ sc->bce_rx_quick_cons_trip =
+ bce_rx_quick_cons_trip;
+ } else {
+ BCE_PRINTF("%s(%d): Illegal value (%d) specified for "
+ "hw.bce.rx_quick_cons_trip! Setting default of %d.\n",
+ __FILE__, __LINE__, bce_rx_quick_cons_trip,
+ DEFAULT_RX_QUICK_CONS_TRIP);
+ sc->bce_rx_quick_cons_trip =
+ DEFAULT_RX_QUICK_CONS_TRIP;
+ }
+
+ /*
+ * Validate the RX ticks count (i.e. the maximum amount
+ * of time to wait after the last RX frame has been
+ * received before a status block update is generated
+ * and an interrupt is asserted.
+ */
+ if (bce_rx_ticks_int <= 100) {
+ sc->bce_rx_ticks_int = bce_rx_ticks_int;
+ } else {
+ BCE_PRINTF("%s(%d): Illegal value (%d) specified for "
+ "hw.bce.rx_ticks_int! Setting default of %d.\n",
+ __FILE__, __LINE__, bce_rx_ticks_int,
+ DEFAULT_RX_TICKS_INT);
+ sc->bce_rx_ticks_int = DEFAULT_RX_TICKS_INT;
+ }
+
+ if (bce_rx_ticks <= 100) {
+ sc->bce_rx_ticks = bce_rx_ticks;
+ } else {
+ BCE_PRINTF("%s(%d): Illegal value (%d) specified for "
+ "hw.bce.rx_ticks! Setting default of %d.\n",
+ __FILE__, __LINE__, bce_rx_ticks,
+ DEFAULT_RX_TICKS);
+ sc->bce_rx_ticks = DEFAULT_RX_TICKS;
+ }
+
+ /* Disabling both RX ticks and RX trips will prevent interrupts. */
+ if ((bce_rx_quick_cons_trip == 0) && (bce_rx_ticks == 0)) {
+ BCE_PRINTF("%s(%d): Cannot set both hw.bce.rx_ticks and "
+ "hw.bce.rx_quick_cons_trip to 0. Setting default values.\n",
+ __FILE__, __LINE__);
+ sc->bce_rx_ticks = DEFAULT_RX_TICKS;
+ sc->bce_rx_quick_cons_trip = DEFAULT_RX_QUICK_CONS_TRIP;
+ }
+
+ /* Disabling both TX ticks and TX trips will prevent interrupts. */
+ if ((bce_tx_quick_cons_trip == 0) && (bce_tx_ticks == 0)) {
+ BCE_PRINTF("%s(%d): Cannot set both hw.bce.tx_ticks and "
+ "hw.bce.tx_quick_cons_trip to 0. Setting default values.\n",
+ __FILE__, __LINE__);
+ sc->bce_tx_ticks = DEFAULT_TX_TICKS;
+ sc->bce_tx_quick_cons_trip = DEFAULT_TX_QUICK_CONS_TRIP;
+ }
+
+}
+
+
+/****************************************************************************/
/* Device attach function. */
/* */
/* Allocates device resources, performs secondary chip identification, */
@@ -739,6 +1053,8 @@ bce_attach(device_t dev)
sc->bce_flags = 0;
sc->bce_phy_flags = 0;
+ bce_set_tunables(sc);
+
pci_enable_busmaster(dev);
/* Allocate PCI memory resources. */
@@ -1028,37 +1344,13 @@ bce_attach(device_t dev)
* values for the RX and TX chains.
*/
-#ifdef BCE_DEBUG
- /* Force more frequent interrupts. */
- sc->bce_tx_quick_cons_trip_int = 1;
- sc->bce_tx_quick_cons_trip = 1;
- sc->bce_tx_ticks_int = 0;
- sc->bce_tx_ticks = 0;
-
- sc->bce_rx_quick_cons_trip_int = 1;
- sc->bce_rx_quick_cons_trip = 1;
- sc->bce_rx_ticks_int = 0;
- sc->bce_rx_ticks = 0;
-#else
- /* Improve throughput at the expense of increased latency. */
- sc->bce_tx_quick_cons_trip_int = 20;
- sc->bce_tx_quick_cons_trip = 20;
- sc->bce_tx_ticks_int = 80;
- sc->bce_tx_ticks = 80;
-
- sc->bce_rx_quick_cons_trip_int = 6;
- sc->bce_rx_quick_cons_trip = 6;
- sc->bce_rx_ticks_int = 18;
- sc->bce_rx_ticks = 18;
-#endif
-
/* Not used for L2. */
- sc->bce_comp_prod_trip_int = 0;
- sc->bce_comp_prod_trip = 0;
- sc->bce_com_ticks_int = 0;
- sc->bce_com_ticks = 0;
- sc->bce_cmd_ticks_int = 0;
- sc->bce_cmd_ticks = 0;
+ sc->bce_comp_prod_trip_int = 0;
+ sc->bce_comp_prod_trip = 0;
+ sc->bce_com_ticks_int = 0;
+ sc->bce_com_ticks = 0;
+ sc->bce_cmd_ticks_int = 0;
+ sc->bce_cmd_ticks = 0;
/* Update statistics once every second. */
sc->bce_stats_ticks = 1000000 & 0xffff00;
@@ -1112,23 +1404,11 @@ bce_attach(device_t dev)
* This may change later if the MTU size is set to
* something other than 1500.
*/
-#ifdef BCE_JUMBO_HDRSPLIT
- sc->rx_bd_mbuf_alloc_size = MHLEN;
- /* Make sure offset is 16 byte aligned for hardware. */
- sc->rx_bd_mbuf_align_pad =
- roundup2((MSIZE - MHLEN), 16) - (MSIZE - MHLEN);
- sc->rx_bd_mbuf_data_len = sc->rx_bd_mbuf_alloc_size -
- sc->rx_bd_mbuf_align_pad;
- sc->pg_bd_mbuf_alloc_size = MCLBYTES;
-#else
- sc->rx_bd_mbuf_alloc_size = MCLBYTES;
- sc->rx_bd_mbuf_align_pad =
- roundup2(MCLBYTES, 16) - MCLBYTES;
- sc->rx_bd_mbuf_data_len = sc->rx_bd_mbuf_alloc_size -
- sc->rx_bd_mbuf_align_pad;
-#endif
+ bce_get_rx_buffer_sizes(sc,
+ (ETHER_MAX_LEN - ETHER_HDR_LEN - ETHER_CRC_LEN));
- ifp->if_snd.ifq_drv_maxlen = USABLE_TX_BD;
+ /* Recalculate our buffer allocation sizes. */
+ ifp->if_snd.ifq_drv_maxlen = USABLE_TX_BD_ALLOC;
IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
IFQ_SET_READY(&ifp->if_snd);
@@ -2695,6 +2975,58 @@ bce_nvram_test_exit:
/****************************************************************************/
+/* Calculates the size of the buffers to allocate based on the MTU. */
+/* */
+/* Returns: */
+/* Nothing. */
+/****************************************************************************/
+static void
+bce_get_rx_buffer_sizes(struct bce_softc *sc, int mtu)
+{
+ DBENTER(BCE_VERBOSE_LOAD);
+
+ /* Use a single allocation type when header splitting enabled. */
+ if (bce_hdr_split == TRUE) {
+ sc->rx_bd_mbuf_alloc_size = MHLEN;
+ /* Make sure offset is 16 byte aligned for hardware. */
+ sc->rx_bd_mbuf_align_pad =
+ roundup2((MSIZE - MHLEN), 16) - (MSIZE - MHLEN);
+ sc->rx_bd_mbuf_data_len = sc->rx_bd_mbuf_alloc_size -
+ sc->rx_bd_mbuf_align_pad;
+ sc->pg_bd_mbuf_alloc_size = MCLBYTES;
+ } else {
+ if ((mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN +
+ ETHER_CRC_LEN) > MCLBYTES) {
+ /* Setup for jumbo RX buffer allocations. */
+ sc->rx_bd_mbuf_alloc_size = MJUM9BYTES;
+ sc->rx_bd_mbuf_align_pad =
+ roundup2(MJUM9BYTES, 16) - MJUM9BYTES;
+ sc->rx_bd_mbuf_data_len =
+ sc->rx_bd_mbuf_alloc_size -
+ sc->rx_bd_mbuf_align_pad;
+ } else {
+ /* Setup for standard RX buffer allocations. */
+ sc->rx_bd_mbuf_alloc_size = MCLBYTES;
+ sc->rx_bd_mbuf_align_pad =
+ roundup2(MCLBYTES, 16) - MCLBYTES;
+ sc->rx_bd_mbuf_data_len =
+ sc->rx_bd_mbuf_alloc_size -
+ sc->rx_bd_mbuf_align_pad;
+ }
+ }
+
+// DBPRINT(sc, BCE_INFO_LOAD,
+ DBPRINT(sc, BCE_WARN,
+ "%s(): rx_bd_mbuf_alloc_size = %d, rx_bd_mbuf_data_len = %d, "
+ "rx_bd_mbuf_align_pad = %d\n", __FUNCTION__,
+ sc->rx_bd_mbuf_alloc_size, sc->rx_bd_mbuf_data_len,
+ sc->rx_bd_mbuf_align_pad);
+
+ DBEXIT(BCE_VERBOSE_LOAD);
+
+}
+
+/****************************************************************************/
/* Identifies the current media type of the controller and sets the PHY */
/* address. */
/* */
@@ -2929,7 +3261,7 @@ bce_dma_free(struct bce_softc *sc)
/* Free, unmap and destroy all TX buffer descriptor chain pages. */
- for (i = 0; i < TX_PAGES; i++ ) {
+ for (i = 0; i < sc->tx_pages; i++ ) {
if (sc->tx_bd_chain[i] != NULL) {
bus_dmamem_free(
sc->tx_bd_chain_tag,
@@ -2957,7 +3289,7 @@ bce_dma_free(struct bce_softc *sc)
/* Free, unmap and destroy all RX buffer descriptor chain pages. */
- for (i = 0; i < RX_PAGES; i++ ) {
+ for (i = 0; i < sc->rx_pages; i++ ) {
if (sc->rx_bd_chain[i] != NULL) {
bus_dmamem_free(
sc->rx_bd_chain_tag,
@@ -2984,38 +3316,38 @@ bce_dma_free(struct bce_softc *sc)
}
-#ifdef BCE_JUMBO_HDRSPLIT
/* Free, unmap and destroy all page buffer descriptor chain pages. */
- for (i = 0; i < PG_PAGES; i++ ) {
- if (sc->pg_bd_chain[i] != NULL) {
- bus_dmamem_free(
- sc->pg_bd_chain_tag,
- sc->pg_bd_chain[i],
- sc->pg_bd_chain_map[i]);
- sc->pg_bd_chain[i] = NULL;
- }
+ if (bce_hdr_split == TRUE) {
+ for (i = 0; i < sc->pg_pages; i++ ) {
+ if (sc->pg_bd_chain[i] != NULL) {
+ bus_dmamem_free(
+ sc->pg_bd_chain_tag,
+ sc->pg_bd_chain[i],
+ sc->pg_bd_chain_map[i]);
+ sc->pg_bd_chain[i] = NULL;
+ }
- if (sc->pg_bd_chain_map[i] != NULL) {
- bus_dmamap_unload(
- sc->pg_bd_chain_tag,
- sc->pg_bd_chain_map[i]);
- bus_dmamap_destroy(
- sc->pg_bd_chain_tag,
- sc->pg_bd_chain_map[i]);
- sc->pg_bd_chain_map[i] = NULL;
+ if (sc->pg_bd_chain_map[i] != NULL) {
+ bus_dmamap_unload(
+ sc->pg_bd_chain_tag,
+ sc->pg_bd_chain_map[i]);
+ bus_dmamap_destroy(
+ sc->pg_bd_chain_tag,
+ sc->pg_bd_chain_map[i]);
+ sc->pg_bd_chain_map[i] = NULL;
+ }
}
- }
- /* Destroy the page buffer descriptor tag. */
- if (sc->pg_bd_chain_tag != NULL) {
- bus_dma_tag_destroy(sc->pg_bd_chain_tag);
- sc->pg_bd_chain_tag = NULL;
+ /* Destroy the page buffer descriptor tag. */
+ if (sc->pg_bd_chain_tag != NULL) {
+ bus_dma_tag_destroy(sc->pg_bd_chain_tag);
+ sc->pg_bd_chain_tag = NULL;
+ }
}
-#endif
/* Unload and destroy the TX mbuf maps. */
- for (i = 0; i < TOTAL_TX_BD; i++) {
+ for (i = 0; i < MAX_TX_BD_AVAIL; i++) {
if (sc->tx_mbuf_map[i] != NULL) {
bus_dmamap_unload(sc->tx_mbuf_tag,
sc->tx_mbuf_map[i]);
@@ -3032,7 +3364,7 @@ bce_dma_free(struct bce_softc *sc)
}
/* Unload and destroy the RX mbuf maps. */
- for (i = 0; i < TOTAL_RX_BD; i++) {
+ for (i = 0; i < MAX_RX_BD_AVAIL; i++) {
if (sc->rx_mbuf_map[i] != NULL) {
bus_dmamap_unload(sc->rx_mbuf_tag,
sc->rx_mbuf_map[i]);
@@ -3048,24 +3380,24 @@ bce_dma_free(struct bce_softc *sc)
sc->rx_mbuf_tag = NULL;
}
-#ifdef BCE_JUMBO_HDRSPLIT
/* Unload and destroy the page mbuf maps. */
- for (i = 0; i < TOTAL_PG_BD; i++) {
- if (sc->pg_mbuf_map[i] != NULL) {
- bus_dmamap_unload(sc->pg_mbuf_tag,
- sc->pg_mbuf_map[i]);
- bus_dmamap_destroy(sc->pg_mbuf_tag,
- sc->pg_mbuf_map[i]);
- sc->pg_mbuf_map[i] = NULL;
+ if (bce_hdr_split == TRUE) {
+ for (i = 0; i < MAX_PG_BD_AVAIL; i++) {
+ if (sc->pg_mbuf_map[i] != NULL) {
+ bus_dmamap_unload(sc->pg_mbuf_tag,
+ sc->pg_mbuf_map[i]);
+ bus_dmamap_destroy(sc->pg_mbuf_tag,
+ sc->pg_mbuf_map[i]);
+ sc->pg_mbuf_map[i] = NULL;
+ }
}
- }
- /* Destroy the page mbuf tag. */
- if (sc->pg_mbuf_tag != NULL) {
- bus_dma_tag_destroy(sc->pg_mbuf_tag);
- sc->pg_mbuf_tag = NULL;
+ /* Destroy the page mbuf tag. */
+ if (sc->pg_mbuf_tag != NULL) {
+ bus_dma_tag_destroy(sc->pg_mbuf_tag);
+ sc->pg_mbuf_tag = NULL;
+ }
}
-#endif
/* Destroy the parent tag */
if (sc->parent_tag != NULL) {
@@ -3308,7 +3640,7 @@ bce_dma_alloc(device_t dev)
goto bce_dma_alloc_exit;
}
- for (i = 0; i < TX_PAGES; i++) {
+ for (i = 0; i < sc->tx_pages; i++) {
if(bus_dmamem_alloc(sc->tx_bd_chain_tag,
(void **)&sc->tx_bd_chain[i],
@@ -3359,7 +3691,7 @@ bce_dma_alloc(device_t dev)
}
/* Create DMA maps for the TX mbufs clusters. */
- for (i = 0; i < TOTAL_TX_BD; i++) {
+ for (i = 0; i < TOTAL_TX_BD_ALLOC; i++) {
if (bus_dmamap_create(sc->tx_mbuf_tag, BUS_DMA_NOWAIT,
&sc->tx_mbuf_map[i])) {
BCE_PRINTF("%s(%d): Unable to create TX mbuf DMA "
@@ -3385,7 +3717,7 @@ bce_dma_alloc(device_t dev)
goto bce_dma_alloc_exit;
}
- for (i = 0; i < RX_PAGES; i++) {
+ for (i = 0; i < sc->rx_pages; i++) {
if (bus_dmamem_alloc(sc->rx_bd_chain_tag,
(void **)&sc->rx_bd_chain[i],
@@ -3417,12 +3749,11 @@ bce_dma_alloc(device_t dev)
/*
* Create a DMA tag for RX mbufs.
*/
-#ifdef BCE_JUMBO_HDRSPLIT
- max_size = max_seg_size = ((sc->rx_bd_mbuf_alloc_size < MCLBYTES) ?
- MCLBYTES : sc->rx_bd_mbuf_alloc_size);
-#else
- max_size = max_seg_size = MJUM9BYTES;
-#endif
+ if (bce_hdr_split == TRUE)
+ max_size = max_seg_size = ((sc->rx_bd_mbuf_alloc_size < MCLBYTES) ?
+ MCLBYTES : sc->rx_bd_mbuf_alloc_size);
+ else
+ max_size = max_seg_size = MJUM9BYTES;
max_segments = 1;
DBPRINT(sc, BCE_INFO_LOAD, "%s(): Creating rx_mbuf_tag "
@@ -3441,7 +3772,7 @@ bce_dma_alloc(device_t dev)
}
/* Create DMA maps for the RX mbuf clusters. */
- for (i = 0; i < TOTAL_RX_BD; i++) {
+ for (i = 0; i < TOTAL_RX_BD_ALLOC; i++) {
if (bus_dmamap_create(sc->rx_mbuf_tag, BUS_DMA_NOWAIT,
&sc->rx_mbuf_map[i])) {
BCE_PRINTF("%s(%d): Unable to create RX mbuf "
@@ -3451,78 +3782,77 @@ bce_dma_alloc(device_t dev)
}
}
-#ifdef BCE_JUMBO_HDRSPLIT
- /*
- * Create a DMA tag for the page buffer descriptor chain,
- * allocate and clear the memory, and fetch the physical
- * address of the blocks.
- */
- if (bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE,
- BCE_DMA_BOUNDARY, BUS_SPACE_MAXADDR, sc->max_bus_addr,
- NULL, NULL, BCE_PG_CHAIN_PAGE_SZ, 1, BCE_PG_CHAIN_PAGE_SZ,
- 0, NULL, NULL, &sc->pg_bd_chain_tag)) {
- BCE_PRINTF("%s(%d): Could not allocate page descriptor "
- "chain DMA tag!\n", __FILE__, __LINE__);
- rc = ENOMEM;
- goto bce_dma_alloc_exit;
- }
-
- for (i = 0; i < PG_PAGES; i++) {
-
- if (bus_dmamem_alloc(sc->pg_bd_chain_tag,
- (void **)&sc->pg_bd_chain[i],
- BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
- &sc->pg_bd_chain_map[i])) {
- BCE_PRINTF("%s(%d): Could not allocate page "
- "descriptor chain DMA memory!\n",
- __FILE__, __LINE__);
+ if (bce_hdr_split == TRUE) {
+ /*
+ * Create a DMA tag for the page buffer descriptor chain,
+ * allocate and clear the memory, and fetch the physical
+ * address of the blocks.
+ */
+ if (bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE,
+ BCE_DMA_BOUNDARY, BUS_SPACE_MAXADDR, sc->max_bus_addr,
+ NULL, NULL, BCE_PG_CHAIN_PAGE_SZ, 1, BCE_PG_CHAIN_PAGE_SZ,
+ 0, NULL, NULL, &sc->pg_bd_chain_tag)) {
+ BCE_PRINTF("%s(%d): Could not allocate page descriptor "
+ "chain DMA tag!\n", __FILE__, __LINE__);
rc = ENOMEM;
goto bce_dma_alloc_exit;
}
- error = bus_dmamap_load(sc->pg_bd_chain_tag,
- sc->pg_bd_chain_map[i], sc->pg_bd_chain[i],
- BCE_PG_CHAIN_PAGE_SZ, bce_dma_map_addr,
- &sc->pg_bd_chain_paddr[i], BUS_DMA_NOWAIT);
-
- if (error) {
- BCE_PRINTF("%s(%d): Could not map page descriptor "
- "chain DMA memory!\n", __FILE__, __LINE__);
- rc = ENOMEM;
- goto bce_dma_alloc_exit;
- }
+ for (i = 0; i < sc->pg_pages; i++) {
+ if (bus_dmamem_alloc(sc->pg_bd_chain_tag,
+ (void **)&sc->pg_bd_chain[i],
+ BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
+ &sc->pg_bd_chain_map[i])) {
+ BCE_PRINTF("%s(%d): Could not allocate page "
+ "descriptor chain DMA memory!\n",
+ __FILE__, __LINE__);
+ rc = ENOMEM;
+ goto bce_dma_alloc_exit;
+ }
- DBPRINT(sc, BCE_INFO_LOAD, "%s(): pg_bd_chain_paddr[%d] = "
- "0x%jX\n", __FUNCTION__, i,
- (uintmax_t) sc->pg_bd_chain_paddr[i]);
- }
+ error = bus_dmamap_load(sc->pg_bd_chain_tag,
+ sc->pg_bd_chain_map[i], sc->pg_bd_chain[i],
+ BCE_PG_CHAIN_PAGE_SZ, bce_dma_map_addr,
+ &sc->pg_bd_chain_paddr[i], BUS_DMA_NOWAIT);
- /*
- * Create a DMA tag for page mbufs.
- */
- max_size = max_seg_size = ((sc->pg_bd_mbuf_alloc_size < MCLBYTES) ?
- MCLBYTES : sc->pg_bd_mbuf_alloc_size);
+ if (error) {
+ BCE_PRINTF("%s(%d): Could not map page descriptor "
+ "chain DMA memory!\n", __FILE__, __LINE__);
+ rc = ENOMEM;
+ goto bce_dma_alloc_exit;
+ }
- if (bus_dma_tag_create(sc->parent_tag, 1, BCE_DMA_BOUNDARY,
- sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL,
- max_size, 1, max_seg_size, 0, NULL, NULL, &sc->pg_mbuf_tag)) {
- BCE_PRINTF("%s(%d): Could not allocate page mbuf "
- "DMA tag!\n", __FILE__, __LINE__);
- rc = ENOMEM;
- goto bce_dma_alloc_exit;
- }
+ DBPRINT(sc, BCE_INFO_LOAD, "%s(): pg_bd_chain_paddr[%d] = "
+ "0x%jX\n", __FUNCTION__, i,
+ (uintmax_t) sc->pg_bd_chain_paddr[i]);
+ }
- /* Create DMA maps for the page mbuf clusters. */
- for (i = 0; i < TOTAL_PG_BD; i++) {
- if (bus_dmamap_create(sc->pg_mbuf_tag, BUS_DMA_NOWAIT,
- &sc->pg_mbuf_map[i])) {
- BCE_PRINTF("%s(%d): Unable to create page mbuf "
- "DMA map!\n", __FILE__, __LINE__);
+ /*
+ * Create a DMA tag for page mbufs.
+ */
+ max_size = max_seg_size = ((sc->pg_bd_mbuf_alloc_size < MCLBYTES) ?
+ MCLBYTES : sc->pg_bd_mbuf_alloc_size);
+
+ if (bus_dma_tag_create(sc->parent_tag, 1, BCE_DMA_BOUNDARY,
+ sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL,
+ max_size, 1, max_seg_size, 0, NULL, NULL, &sc->pg_mbuf_tag)) {
+ BCE_PRINTF("%s(%d): Could not allocate page mbuf "
+ "DMA tag!\n", __FILE__, __LINE__);
rc = ENOMEM;
goto bce_dma_alloc_exit;
}
+
+ /* Create DMA maps for the page mbuf clusters. */
+ for (i = 0; i < TOTAL_PG_BD_ALLOC; i++) {
+ if (bus_dmamap_create(sc->pg_mbuf_tag, BUS_DMA_NOWAIT,
+ &sc->pg_mbuf_map[i])) {
+ BCE_PRINTF("%s(%d): Unable to create page mbuf "
+ "DMA map!\n", __FILE__, __LINE__);
+ rc = ENOMEM;
+ goto bce_dma_alloc_exit;
+ }
+ }
}
-#endif
bce_dma_alloc_exit:
DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_CTX);
@@ -4597,9 +4927,9 @@ bce_stop(struct bce_softc *sc)
bce_disable_intr(sc);
/* Free RX buffers. */
-#ifdef BCE_JUMBO_HDRSPLIT
- bce_free_pg_chain(sc);
-#endif
+ if (bce_hdr_split == TRUE) {
+ bce_free_pg_chain(sc);
+ }
bce_free_rx_chain(sc);
/* Free TX buffers. */
@@ -5012,10 +5342,10 @@ bce_get_rx_buf(struct bce_softc *sc, struct mbuf *m, u16 *prod,
DBENTER(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD);
/* Make sure the inputs are valid. */
- DBRUNIF((*chain_prod > MAX_RX_BD),
+ DBRUNIF((*chain_prod > MAX_RX_BD_ALLOC),
BCE_PRINTF("%s(%d): RX producer out of range: "
"0x%04X > 0x%04X\n", __FILE__, __LINE__,
- *chain_prod, (u16) MAX_RX_BD));
+ *chain_prod, (u16) MAX_RX_BD_ALLOC));
DBPRINT(sc, BCE_EXTREME_RECV, "%s(enter): prod = 0x%04X, "
"chain_prod = 0x%04X, prod_bseq = 0x%08X\n", __FUNCTION__,
@@ -5038,12 +5368,11 @@ bce_get_rx_buf(struct bce_softc *sc, struct mbuf *m, u16 *prod,
goto bce_get_rx_buf_exit);
/* This is a new mbuf allocation. */
-#ifdef BCE_JUMBO_HDRSPLIT
- MGETHDR(m_new, M_DONTWAIT, MT_DATA);
-#else
- m_new = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR,
- sc->rx_bd_mbuf_alloc_size);
-#endif
+ if (bce_hdr_split == TRUE)
+ MGETHDR(m_new, M_DONTWAIT, MT_DATA);
+ else
+ m_new = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR,
+ sc->rx_bd_mbuf_alloc_size);
if (m_new == NULL) {
sc->mbuf_alloc_failed_count++;
@@ -5116,7 +5445,6 @@ bce_get_rx_buf_exit:
}
-#ifdef BCE_JUMBO_HDRSPLIT
/****************************************************************************/
/* Encapsulate an mbuf cluster into the page chain. */
/* */
@@ -5139,10 +5467,10 @@ bce_get_pg_buf(struct bce_softc *sc, struct mbuf *m, u16 *prod,
DBENTER(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD);
/* Make sure the inputs are valid. */
- DBRUNIF((*prod_idx > MAX_PG_BD),
+ DBRUNIF((*prod_idx > MAX_PG_BD_ALLOC),
BCE_PRINTF("%s(%d): page producer out of range: "
"0x%04X > 0x%04X\n", __FILE__, __LINE__,
- *prod_idx, (u16) MAX_PG_BD));
+ *prod_idx, (u16) MAX_PG_BD_ALLOC));
DBPRINT(sc, BCE_EXTREME_RECV, "%s(enter): prod = 0x%04X, "
"chain_prod = 0x%04X\n", __FUNCTION__, *prod, *prod_idx);
@@ -5227,7 +5555,6 @@ bce_get_pg_buf_exit:
return(rc);
}
-#endif /* BCE_JUMBO_HDRSPLIT */
/****************************************************************************/
@@ -5300,7 +5627,7 @@ bce_init_tx_chain(struct bce_softc *sc)
sc->tx_cons = 0;
sc->tx_prod_bseq = 0;
sc->used_tx_bd = 0;
- sc->max_tx_bd = USABLE_TX_BD;
+ sc->max_tx_bd = USABLE_TX_BD_ALLOC;
DBRUN(sc->tx_hi_watermark = 0);
DBRUN(sc->tx_full_count = 0);
@@ -5315,24 +5642,26 @@ bce_init_tx_chain(struct bce_softc *sc)
*/
/* Set the TX next pointer chain entries. */
- for (i = 0; i < TX_PAGES; i++) {
+ for (i = 0; i < sc->tx_pages; i++) {
int j;
txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE];
/* Check if we've reached the last page. */
- if (i == (TX_PAGES - 1))
+ if (i == (sc->tx_pages - 1))
j = 0;
else
j = i + 1;
- txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->tx_bd_chain_paddr[j]));
- txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->tx_bd_chain_paddr[j]));
+ txbd->tx_bd_haddr_hi =
+ htole32(BCE_ADDR_HI(sc->tx_bd_chain_paddr[j]));
+ txbd->tx_bd_haddr_lo =
+ htole32(BCE_ADDR_LO(sc->tx_bd_chain_paddr[j]));
}
bce_init_tx_context(sc);
- DBRUNMSG(BCE_INSANE_SEND, bce_dump_tx_chain(sc, 0, TOTAL_TX_BD));
+ DBRUNMSG(BCE_INSANE_SEND, bce_dump_tx_chain(sc, 0, TOTAL_TX_BD_ALLOC));
DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_LOAD);
return(rc);
@@ -5353,7 +5682,7 @@ bce_free_tx_chain(struct bce_softc *sc)
DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_UNLOAD);
/* Unmap, unload, and free any mbufs still in the TX mbuf chain. */
- for (i = 0; i < TOTAL_TX_BD; i++) {
+ for (i = 0; i < MAX_TX_BD_AVAIL; i++) {
if (sc->tx_mbuf_ptr[i] != NULL) {
if (sc->tx_mbuf_map[i] != NULL)
bus_dmamap_sync(sc->tx_mbuf_tag,
@@ -5366,7 +5695,7 @@ bce_free_tx_chain(struct bce_softc *sc)
}
/* Clear each TX chain page. */
- for (i = 0; i < TX_PAGES; i++)
+ for (i = 0; i < sc->tx_pages; i++)
bzero((char *)sc->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ);
sc->used_tx_bd = 0;
@@ -5416,11 +5745,11 @@ bce_init_rx_context(struct bce_softc *sc)
lo_water = 0;
}
- if (lo_water >= USABLE_RX_BD) {
+ if (lo_water >= USABLE_RX_BD_ALLOC) {
lo_water = 0;
}
- hi_water = USABLE_RX_BD / 4;
+ hi_water = USABLE_RX_BD_ALLOC / 4;
if (hi_water <= lo_water) {
lo_water = 0;
@@ -5476,17 +5805,17 @@ bce_init_rx_chain(struct bce_softc *sc)
sc->rx_prod = 0;
sc->rx_cons = 0;
sc->rx_prod_bseq = 0;
- sc->free_rx_bd = USABLE_RX_BD;
- sc->max_rx_bd = USABLE_RX_BD;
+ sc->free_rx_bd = USABLE_RX_BD_ALLOC;
+ sc->max_rx_bd = USABLE_RX_BD_ALLOC;
/* Initialize the RX next pointer chain entries. */
- for (i = 0; i < RX_PAGES; i++) {
+ for (i = 0; i < sc->rx_pages; i++) {
int j;
rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE];
/* Check if we've reached the last page. */
- if (i == (RX_PAGES - 1))
+ if (i == (sc->rx_pages - 1))
j = 0;
else
j = i + 1;
@@ -5501,16 +5830,17 @@ bce_init_rx_chain(struct bce_softc *sc)
/* Fill up the RX chain. */
bce_fill_rx_chain(sc);
- DBRUN(sc->rx_low_watermark = USABLE_RX_BD);
+ DBRUN(sc->rx_low_watermark = USABLE_RX_BD_ALLOC);
DBRUN(sc->rx_empty_count = 0);
- for (i = 0; i < RX_PAGES; i++) {
+ for (i = 0; i < sc->rx_pages; i++) {
bus_dmamap_sync(sc->rx_bd_chain_tag, sc->rx_bd_chain_map[i],
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
bce_init_rx_context(sc);
- DBRUNMSG(BCE_EXTREME_RECV, bce_dump_rx_bd_chain(sc, 0, TOTAL_RX_BD));
+ DBRUNMSG(BCE_EXTREME_RECV,
+ bce_dump_rx_bd_chain(sc, 0, TOTAL_RX_BD_ALLOC));
DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD |
BCE_VERBOSE_CTX);
@@ -5584,7 +5914,7 @@ bce_free_rx_chain(struct bce_softc *sc)
DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_UNLOAD);
/* Free any mbufs still in the RX mbuf chain. */
- for (i = 0; i < TOTAL_RX_BD; i++) {
+ for (i = 0; i < MAX_RX_BD_AVAIL; i++) {
if (sc->rx_mbuf_ptr[i] != NULL) {
if (sc->rx_mbuf_map[i] != NULL)
bus_dmamap_sync(sc->rx_mbuf_tag,
@@ -5597,7 +5927,7 @@ bce_free_rx_chain(struct bce_softc *sc)
}
/* Clear each RX chain page. */
- for (i = 0; i < RX_PAGES; i++)
+ for (i = 0; i < sc->rx_pages; i++)
if (sc->rx_bd_chain[i] != NULL) {
bzero((char *)sc->rx_bd_chain[i],
BCE_RX_CHAIN_PAGE_SZ);
@@ -5614,7 +5944,6 @@ bce_free_rx_chain(struct bce_softc *sc)
}
-#ifdef BCE_JUMBO_HDRSPLIT
/****************************************************************************/
/* Allocate memory and initialize the page data structures. */
/* Assumes that bce_init_rx_chain() has not already been called. */
@@ -5635,26 +5964,28 @@ bce_init_pg_chain(struct bce_softc *sc)
/* Initialize the page producer and consumer indices. */
sc->pg_prod = 0;
sc->pg_cons = 0;
- sc->free_pg_bd = USABLE_PG_BD;
- sc->max_pg_bd = USABLE_PG_BD;
+ sc->free_pg_bd = USABLE_PG_BD_ALLOC;
+ sc->max_pg_bd = USABLE_PG_BD_ALLOC;
DBRUN(sc->pg_low_watermark = sc->max_pg_bd);
DBRUN(sc->pg_empty_count = 0);
/* Initialize the page next pointer chain entries. */
- for (i = 0; i < PG_PAGES; i++) {
+ for (i = 0; i < sc->pg_pages; i++) {
int j;
pgbd = &sc->pg_bd_chain[i][USABLE_PG_BD_PER_PAGE];
/* Check if we've reached the last page. */
- if (i == (PG_PAGES - 1))
+ if (i == (sc->pg_pages - 1))
j = 0;
else
j = i + 1;
/* Setup the chain page pointers. */
- pgbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->pg_bd_chain_paddr[j]));
- pgbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->pg_bd_chain_paddr[j]));
+ pgbd->rx_bd_haddr_hi =
+ htole32(BCE_ADDR_HI(sc->pg_bd_chain_paddr[j]));
+ pgbd->rx_bd_haddr_lo =
+ htole32(BCE_ADDR_LO(sc->pg_bd_chain_paddr[j]));
}
/* Setup the MQ BIN mapping for host_pg_bidx. */
@@ -5681,12 +6012,13 @@ bce_init_pg_chain(struct bce_softc *sc)
/* Fill up the page chain. */
bce_fill_pg_chain(sc);
- for (i = 0; i < PG_PAGES; i++) {
+ for (i = 0; i < sc->pg_pages; i++) {
bus_dmamap_sync(sc->pg_bd_chain_tag, sc->pg_bd_chain_map[i],
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
- DBRUNMSG(BCE_EXTREME_RECV, bce_dump_pg_chain(sc, 0, TOTAL_PG_BD));
+ DBRUNMSG(BCE_EXTREME_RECV,
+ bce_dump_pg_chain(sc, 0, TOTAL_PG_BD_ALLOC));
DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD |
BCE_VERBOSE_CTX);
return(rc);
@@ -5754,7 +6086,7 @@ bce_free_pg_chain(struct bce_softc *sc)
DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_UNLOAD);
/* Free any mbufs still in the mbuf page chain. */
- for (i = 0; i < TOTAL_PG_BD; i++) {
+ for (i = 0; i < MAX_PG_BD_AVAIL; i++) {
if (sc->pg_mbuf_ptr[i] != NULL) {
if (sc->pg_mbuf_map[i] != NULL)
bus_dmamap_sync(sc->pg_mbuf_tag,
@@ -5767,7 +6099,7 @@ bce_free_pg_chain(struct bce_softc *sc)
}
/* Clear each page chain pages. */
- for (i = 0; i < PG_PAGES; i++)
+ for (i = 0; i < sc->pg_pages; i++)
bzero((char *)sc->pg_bd_chain[i], BCE_PG_CHAIN_PAGE_SZ);
sc->free_pg_bd = sc->max_pg_bd;
@@ -5779,7 +6111,6 @@ bce_free_pg_chain(struct bce_softc *sc)
DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_UNLOAD);
}
-#endif /* BCE_JUMBO_HDRSPLIT */
/****************************************************************************/
@@ -5963,10 +6294,8 @@ bce_rx_intr(struct bce_softc *sc)
unsigned int pkt_len;
u16 sw_rx_cons, sw_rx_cons_idx, hw_rx_cons;
u32 status;
-#ifdef BCE_JUMBO_HDRSPLIT
unsigned int rem_len;
u16 sw_pg_cons, sw_pg_cons_idx;
-#endif
DBENTER(BCE_VERBOSE_RECV | BCE_VERBOSE_INTR);
DBRUN(sc->interrupts_rx++);
@@ -5975,26 +6304,23 @@ bce_rx_intr(struct bce_softc *sc)
__FUNCTION__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq);
/* Prepare the RX chain pages to be accessed by the host CPU. */
- for (int i = 0; i < RX_PAGES; i++)
+ for (int i = 0; i < sc->rx_pages; i++)
bus_dmamap_sync(sc->rx_bd_chain_tag,
sc->rx_bd_chain_map[i], BUS_DMASYNC_POSTREAD);
-#ifdef BCE_JUMBO_HDRSPLIT
/* Prepare the page chain pages to be accessed by the host CPU. */
- for (int i = 0; i < PG_PAGES; i++)
- bus_dmamap_sync(sc->pg_bd_chain_tag,
- sc->pg_bd_chain_map[i], BUS_DMASYNC_POSTREAD);
-#endif
+ if (bce_hdr_split == TRUE) {
+ for (int i = 0; i < sc->pg_pages; i++)
+ bus_dmamap_sync(sc->pg_bd_chain_tag,
+ sc->pg_bd_chain_map[i], BUS_DMASYNC_POSTREAD);
+ }
/* Get the hardware's view of the RX consumer index. */
hw_rx_cons = sc->hw_rx_cons = bce_get_hw_rx_cons(sc);
/* Get working copies of the driver's view of the consumer indices. */
sw_rx_cons = sc->rx_cons;
-
-#ifdef BCE_JUMBO_HDRSPLIT
sw_pg_cons = sc->pg_cons;
-#endif
/* Update some debug statistics counters */
DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
@@ -6037,7 +6363,7 @@ bce_rx_intr(struct bce_softc *sc)
* with an l2_fhdr structure which provides status
* information about the received frame (including
* VLAN tags and checksum info). The frames are
- * also automatically adjusted to align the IP
+ * also automatically adjusted to word align the IP
* header (i.e. two null bytes are inserted before
* the Ethernet header). As a result the data
* DMA'd by the controller into the mbuf looks
@@ -6066,96 +6392,104 @@ bce_rx_intr(struct bce_softc *sc)
*/
m_adj(m0, sizeof(struct l2_fhdr) + ETHER_ALIGN);
-#ifdef BCE_JUMBO_HDRSPLIT
/*
- * Check whether the received frame fits in a single
- * mbuf or not (i.e. packet data + FCS <=
- * sc->rx_bd_mbuf_data_len bytes).
+ * When split header mode is used, an ethernet frame
+ * may be split across the receive chain and the
+ * page chain. If that occurs an mbuf cluster must be
+ * reassembled from the individual mbuf pieces.
*/
- if (pkt_len > m0->m_len) {
+ if (bce_hdr_split == TRUE) {
/*
- * The received frame is larger than a single mbuf.
- * If the frame was a TCP frame then only the TCP
- * header is placed in the mbuf, the remaining
- * payload (including FCS) is placed in the page
- * chain, the SPLIT flag is set, and the header
- * length is placed in the IP checksum field.
- * If the frame is not a TCP frame then the mbuf
- * is filled and the remaining bytes are placed
- * in the page chain.
+ * Check whether the received frame fits in a single
+ * mbuf or not (i.e. packet data + FCS <=
+ * sc->rx_bd_mbuf_data_len bytes).
*/
+ if (pkt_len > m0->m_len) {
+ /*
+ * The received frame is larger than a single mbuf.
+ * If the frame was a TCP frame then only the TCP
+ * header is placed in the mbuf, the remaining
+ * payload (including FCS) is placed in the page
+ * chain, the SPLIT flag is set, and the header
+ * length is placed in the IP checksum field.
+ * If the frame is not a TCP frame then the mbuf
+ * is filled and the remaining bytes are placed
+ * in the page chain.
+ */
- DBPRINT(sc, BCE_INFO_RECV, "%s(): Found a large "
- "packet.\n", __FUNCTION__);
-
- /*
- * When the page chain is enabled and the TCP
- * header has been split from the TCP payload,
- * the ip_xsum structure will reflect the length
- * of the TCP header, not the IP checksum. Set
- * the packet length of the mbuf accordingly.
- */
- if (status & L2_FHDR_STATUS_SPLIT)
- m0->m_len = l2fhdr->l2_fhdr_ip_xsum;
-
- rem_len = pkt_len - m0->m_len;
-
- /* Pull mbufs off the page chain for the remaining data. */
- while (rem_len > 0) {
- struct mbuf *m_pg;
-
- sw_pg_cons_idx = PG_CHAIN_IDX(sw_pg_cons);
-
- /* Remove the mbuf from the page chain. */
- m_pg = sc->pg_mbuf_ptr[sw_pg_cons_idx];
- sc->pg_mbuf_ptr[sw_pg_cons_idx] = NULL;
- DBRUN(sc->debug_pg_mbuf_alloc--);
- sc->free_pg_bd++;
+ DBPRINT(sc, BCE_INFO_RECV, "%s(): Found a large "
+ "packet.\n", __FUNCTION__);
+ DBRUN(sc->split_header_frames_rcvd++);
- /* Unmap the page chain mbuf from DMA space. */
- bus_dmamap_sync(sc->pg_mbuf_tag,
- sc->pg_mbuf_map[sw_pg_cons_idx],
- BUS_DMASYNC_POSTREAD);
- bus_dmamap_unload(sc->pg_mbuf_tag,
- sc->pg_mbuf_map[sw_pg_cons_idx]);
-
- /* Adjust the mbuf length. */
- if (rem_len < m_pg->m_len) {
- /* The mbuf chain is complete. */
- m_pg->m_len = rem_len;
- rem_len = 0;
- } else {
- /* More packet data is waiting. */
- rem_len -= m_pg->m_len;
+ /*
+ * When the page chain is enabled and the TCP
+ * header has been split from the TCP payload,
+ * the ip_xsum structure will reflect the length
+ * of the TCP header, not the IP checksum. Set
+ * the packet length of the mbuf accordingly.
+ */
+ if (status & L2_FHDR_STATUS_SPLIT) {
+ m0->m_len = l2fhdr->l2_fhdr_ip_xsum;
+ DBRUN(sc->split_header_tcp_frames_rcvd++);
}
- /* Concatenate the mbuf cluster to the mbuf. */
- m_cat(m0, m_pg);
-
- sw_pg_cons = NEXT_PG_BD(sw_pg_cons);
- }
+ rem_len = pkt_len - m0->m_len;
+
+ /* Pull mbufs off the page chain for any remaining data. */
+ while (rem_len > 0) {
+ struct mbuf *m_pg;
+
+ sw_pg_cons_idx = PG_CHAIN_IDX(sw_pg_cons);
+
+ /* Remove the mbuf from the page chain. */
+ m_pg = sc->pg_mbuf_ptr[sw_pg_cons_idx];
+ sc->pg_mbuf_ptr[sw_pg_cons_idx] = NULL;
+ DBRUN(sc->debug_pg_mbuf_alloc--);
+ sc->free_pg_bd++;
+
+ /* Unmap the page chain mbuf from DMA space. */
+ bus_dmamap_sync(sc->pg_mbuf_tag,
+ sc->pg_mbuf_map[sw_pg_cons_idx],
+ BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(sc->pg_mbuf_tag,
+ sc->pg_mbuf_map[sw_pg_cons_idx]);
+
+ /* Adjust the mbuf length. */
+ if (rem_len < m_pg->m_len) {
+ /* The mbuf chain is complete. */
+ m_pg->m_len = rem_len;
+ rem_len = 0;
+ } else {
+ /* More packet data is waiting. */
+ rem_len -= m_pg->m_len;
+ }
+
+ /* Concatenate the mbuf cluster to the mbuf. */
+ m_cat(m0, m_pg);
+
+ sw_pg_cons = NEXT_PG_BD(sw_pg_cons);
+ }
- /* Set the total packet length. */
- m0->m_pkthdr.len = pkt_len;
+ /* Set the total packet length. */
+ m0->m_pkthdr.len = pkt_len;
- } else {
- /*
- * The received packet is small and fits in a
- * single mbuf (i.e. the l2_fhdr + pad + packet +
- * FCS <= MHLEN). In other words, the packet is
- * 154 bytes or less in size.
- */
+ } else {
+ /*
+ * The received packet is small and fits in a
+ * single mbuf (i.e. the l2_fhdr + pad + packet +
+ * FCS <= MHLEN). In other words, the packet is
+ * 154 bytes or less in size.
+ */
- DBPRINT(sc, BCE_INFO_RECV, "%s(): Found a small "
- "packet.\n", __FUNCTION__);
+ DBPRINT(sc, BCE_INFO_RECV, "%s(): Found a small "
+ "packet.\n", __FUNCTION__);
+ /* Set the total packet length. */
+ m0->m_pkthdr.len = m0->m_len = pkt_len;
+ }
+ } else
/* Set the total packet length. */
m0->m_pkthdr.len = m0->m_len = pkt_len;
- }
-#else
- /* Set the total packet length. */
- m0->m_pkthdr.len = m0->m_len = pkt_len;
-#endif
/* Remove the trailing Ethernet FCS. */
m_adj(m0, -ETHER_CRC_LEN);
@@ -6164,8 +6498,8 @@ bce_rx_intr(struct bce_softc *sc)
DBRUN(m_sanity(m0, FALSE));
DBRUNIF(((m0->m_len < ETHER_HDR_LEN) |
(m0->m_pkthdr.len > BCE_MAX_JUMBO_ETHER_MTU_VLAN)),
- BCE_PRINTF("Invalid Ethernet frame size!\n");
- m_print(m0, 128));
+ BCE_PRINTF("Invalid Ethernet frame size!\n");
+ m_print(m0, 128));
DBRUNIF(DB_RANDOMTRUE(l2fhdr_error_sim_control),
sc->l2fhdr_error_sim_count++;
@@ -6224,7 +6558,9 @@ bce_rx_intr(struct bce_softc *sc)
/* Attach the VLAN tag. */
if (status & L2_FHDR_STATUS_L2_VLAN_TAG) {
+ DBRUN(sc->vlan_tagged_frames_rcvd++);
if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
+ DBRUN(sc->vlan_tagged_frames_stripped++);
#if __FreeBSD_version < 700000
VLAN_INPUT_TAG(ifp, m0,
l2fhdr->l2_fhdr_vlan_tag, continue);
@@ -6267,9 +6603,7 @@ bce_rx_int_next_rx:
if (m0) {
/* Make sure we don't lose our place when we release the lock. */
sc->rx_cons = sw_rx_cons;
-#ifdef BCE_JUMBO_HDRSPLIT
sc->pg_cons = sw_pg_cons;
-#endif
BCE_UNLOCK(sc);
(*ifp->if_input)(ifp, m0);
@@ -6277,9 +6611,7 @@ bce_rx_int_next_rx:
/* Recover our place. */
sw_rx_cons = sc->rx_cons;
-#ifdef BCE_JUMBO_HDRSPLIT
sw_pg_cons = sc->pg_cons;
-#endif
}
/* Refresh hw_cons to see if there's new work */
@@ -6287,26 +6619,26 @@ bce_rx_int_next_rx:
hw_rx_cons = sc->hw_rx_cons = bce_get_hw_rx_cons(sc);
}
-#ifdef BCE_JUMBO_HDRSPLIT
/* No new packets. Refill the page chain. */
- sc->pg_cons = sw_pg_cons;
- bce_fill_pg_chain(sc);
-#endif
+ if (bce_hdr_split == TRUE) {
+ sc->pg_cons = sw_pg_cons;
+ bce_fill_pg_chain(sc);
+ }
/* No new packets. Refill the RX chain. */
sc->rx_cons = sw_rx_cons;
bce_fill_rx_chain(sc);
/* Prepare the page chain pages to be accessed by the NIC. */
- for (int i = 0; i < RX_PAGES; i++)
+ for (int i = 0; i < sc->rx_pages; i++)
bus_dmamap_sync(sc->rx_bd_chain_tag,
sc->rx_bd_chain_map[i], BUS_DMASYNC_PREWRITE);
-#ifdef BCE_JUMBO_HDRSPLIT
- for (int i = 0; i < PG_PAGES; i++)
- bus_dmamap_sync(sc->pg_bd_chain_tag,
- sc->pg_bd_chain_map[i], BUS_DMASYNC_PREWRITE);
-#endif
+ if (bce_hdr_split == TRUE) {
+ for (int i = 0; i < sc->pg_pages; i++)
+ bus_dmamap_sync(sc->pg_bd_chain_tag,
+ sc->pg_bd_chain_map[i], BUS_DMASYNC_PREWRITE);
+ }
DBPRINT(sc, BCE_EXTREME_RECV, "%s(exit): rx_prod = 0x%04X, "
"rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n",
@@ -6376,10 +6708,10 @@ bce_tx_intr(struct bce_softc *sc)
"sw_tx_chain_cons = 0x%04X\n",
__FUNCTION__, hw_tx_cons, sw_tx_cons, sw_tx_chain_cons);
- DBRUNIF((sw_tx_chain_cons > MAX_TX_BD),
+ DBRUNIF((sw_tx_chain_cons > MAX_TX_BD_ALLOC),
BCE_PRINTF("%s(%d): TX chain consumer out of range! "
" 0x%04X > 0x%04X\n", __FILE__, __LINE__, sw_tx_chain_cons,
- (int) MAX_TX_BD);
+ (int) MAX_TX_BD_ALLOC);
bce_breakpoint(sc));
DBRUN(txbd = &sc->tx_bd_chain[TX_PAGE(sw_tx_chain_cons)]
@@ -6547,19 +6879,26 @@ bce_init_locked(struct bce_softc *sc)
/*
* Calculate and program the hardware Ethernet MTU
- * size. Be generous on the receive if we have room.
+ * size. Be generous on the receive if we have room
+ * and allowed by the user.
*/
-#ifdef BCE_JUMBO_HDRSPLIT
- if (ifp->if_mtu <= (sc->rx_bd_mbuf_data_len +
- sc->pg_bd_mbuf_alloc_size))
- ether_mtu = sc->rx_bd_mbuf_data_len +
- sc->pg_bd_mbuf_alloc_size;
-#else
- if (ifp->if_mtu <= sc->rx_bd_mbuf_data_len)
- ether_mtu = sc->rx_bd_mbuf_data_len;
-#endif
- else
+ if (bce_strict_rx_mtu == TRUE)
ether_mtu = ifp->if_mtu;
+ else {
+ if (bce_hdr_split == TRUE) {
+ if (ifp->if_mtu <= (sc->rx_bd_mbuf_data_len +
+ sc->pg_bd_mbuf_alloc_size))
+ ether_mtu = sc->rx_bd_mbuf_data_len +
+ sc->pg_bd_mbuf_alloc_size;
+ else
+ ether_mtu = ifp->if_mtu;
+ } else {
+ if (ifp->if_mtu <= sc->rx_bd_mbuf_data_len)
+ ether_mtu = sc->rx_bd_mbuf_data_len;
+ else
+ ether_mtu = ifp->if_mtu;
+ }
+ }
ether_mtu += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ETHER_CRC_LEN;
@@ -6574,22 +6913,16 @@ bce_init_locked(struct bce_softc *sc)
else
REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu);
- DBPRINT(sc, BCE_INFO_LOAD,
- "%s(): rx_bd_mbuf_alloc_size = %d, rx_bce_mbuf_data_len = %d, "
- "rx_bd_mbuf_align_pad = %d\n", __FUNCTION__,
- sc->rx_bd_mbuf_alloc_size, sc->rx_bd_mbuf_data_len,
- sc->rx_bd_mbuf_align_pad);
-
/* Program appropriate promiscuous/multicast filtering. */
bce_set_rx_mode(sc);
-#ifdef BCE_JUMBO_HDRSPLIT
- DBPRINT(sc, BCE_INFO_LOAD, "%s(): pg_bd_mbuf_alloc_size = %d\n",
- __FUNCTION__, sc->pg_bd_mbuf_alloc_size);
+ if (bce_hdr_split == TRUE) {
+ DBPRINT(sc, BCE_INFO_LOAD, "%s(): pg_bd_mbuf_alloc_size = %d\n",
+ __FUNCTION__, sc->pg_bd_mbuf_alloc_size);
- /* Init page buffer descriptor chain. */
- bce_init_pg_chain(sc);
-#endif
+ /* Init page buffer descriptor chain. */
+ bce_init_pg_chain(sc);
+ }
/* Init RX buffer descriptor chain. */
bce_init_rx_chain(sc);
@@ -6748,7 +7081,7 @@ bce_tso_setup(struct bce_softc *sc, struct mbuf **m_head, u16 *flags)
return (NULL);
}
- /* IP header length and checksum will be calc'd by hardware */
+ /* Clear IP header length and checksum, will be calc'd by h/w. */
ip = (struct ip *)(m->m_data + sizeof(struct ether_header));
ip_len = ip->ip_len;
ip->ip_len = 0;
@@ -7117,7 +7450,7 @@ bce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
struct bce_softc *sc = ifp->if_softc;
struct ifreq *ifr = (struct ifreq *) data;
struct mii_data *mii;
- int mask, error = 0, reinit;
+ int mask, error = 0;
DBENTER(BCE_VERBOSE_MISC);
@@ -7138,40 +7471,22 @@ bce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
BCE_LOCK(sc);
ifp->if_mtu = ifr->ifr_mtu;
- reinit = 0;
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- /*
- * Because allocation size is used in RX
- * buffer allocation, stop controller if
- * it is already running.
- */
- bce_stop(sc);
- reinit = 1;
- }
-#ifdef BCE_JUMBO_HDRSPLIT
- /* No buffer allocation size changes are necessary. */
-#else
- /* Recalculate our buffer allocation sizes. */
- if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN +
- ETHER_CRC_LEN) > MCLBYTES) {
- sc->rx_bd_mbuf_alloc_size = MJUM9BYTES;
- sc->rx_bd_mbuf_align_pad =
- roundup2(MJUM9BYTES, 16) - MJUM9BYTES;
- sc->rx_bd_mbuf_data_len =
- sc->rx_bd_mbuf_alloc_size -
- sc->rx_bd_mbuf_align_pad;
- } else {
- sc->rx_bd_mbuf_alloc_size = MCLBYTES;
- sc->rx_bd_mbuf_align_pad =
- roundup2(MCLBYTES, 16) - MCLBYTES;
- sc->rx_bd_mbuf_data_len =
- sc->rx_bd_mbuf_alloc_size -
- sc->rx_bd_mbuf_align_pad;
- }
-#endif
- if (reinit != 0)
+ if (bce_hdr_split == FALSE) {
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ /*
+ * Because allocation size is used in RX
+ * buffer allocation, stop controller if
+ * it is already running.
+ */
+ bce_stop(sc);
+ }
+
+ bce_get_rx_buffer_sizes(sc, ifp->if_mtu);
+
bce_init_locked(sc);
+ }
+
BCE_UNLOCK(sc);
break;
@@ -7848,7 +8163,7 @@ bce_pulse(void *xsc)
sc->bc_state = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION);
/* Report whether the bootcode still knows the driver is running. */
- if (bootverbose) {
+ if (bce_verbose || bootverbose) {
if (sc->bce_drv_cardiac_arrest == FALSE) {
if (!(sc->bc_state & BCE_CONDITION_DRV_PRESENT)) {
sc->bce_drv_cardiac_arrest = TRUE;
@@ -7906,10 +8221,13 @@ bce_tick(void *xsc)
/* Update the statistics from the hardware statistics block. */
bce_stats_update(sc);
+ /*
+ * ToDo: This is a safety measure. Need to re-evaluate
+ * high level processing logic and eliminate this code.
+ */
/* Top off the receive and page chains. */
-#ifdef BCE_JUMBO_HDRSPLIT
- bce_fill_pg_chain(sc);
-#endif
+ if (bce_hdr_split == TRUE)
+ bce_fill_pg_chain(sc);
bce_fill_rx_chain(sc);
/* Check that chip hasn't hung. */
@@ -7932,7 +8250,7 @@ bce_tick(void *xsc)
if ((IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX ||
IFM_SUBTYPE(mii->mii_media_active) == IFM_2500_SX) &&
- bootverbose)
+ (bce_verbose || bootverbose))
BCE_PRINTF("Gigabit link up!\n");
/* Now that link is up, handle any outstanding TX traffic. */
@@ -8084,6 +8402,10 @@ bce_sysctl_stats_clear(SYSCTL_HANDLER_ARGS)
if (result == 1) {
sc = (struct bce_softc *)arg1;
+ struct statistics_block *stats;
+
+ stats = (struct statistics_block *) sc->stats_block;
+ bzero(stats, sizeof(struct statistics_block));
/* Clear the internal H/W statistics counters. */
REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW);
@@ -8096,7 +8418,7 @@ bce_sysctl_stats_clear(SYSCTL_HANDLER_ARGS)
sc->tso_frames_failed = 0;
sc->rx_empty_count =
sc->tx_full_count = 0;
- sc->rx_low_watermark = USABLE_RX_BD;
+ sc->rx_low_watermark = USABLE_RX_BD_ALLOC;
sc->tx_hi_watermark = 0;
sc->l2fhdr_error_count =
sc->l2fhdr_error_sim_count = 0;
@@ -8106,9 +8428,11 @@ bce_sysctl_stats_clear(SYSCTL_HANDLER_ARGS)
sc->dma_map_addr_tx_failed_count = 0;
sc->mbuf_frag_count = 0;
sc->csum_offload_tcp_udp =
- sc->csum_offload_ip = 0;
+ sc->csum_offload_ip = 0;
sc->vlan_tagged_frames_rcvd =
- sc->vlan_tagged_frames_stripped = 0;
+ sc->vlan_tagged_frames_stripped = 0;
+ sc->split_header_frames_rcvd =
+ sc->split_header_tcp_frames_rcvd = 0;
/* Clear firmware maintained statistics. */
REG_WR_IND(sc, 0x120084, 0);
@@ -8119,6 +8443,35 @@ bce_sysctl_stats_clear(SYSCTL_HANDLER_ARGS)
/****************************************************************************/
+/* Allows the shared memory contents to be dumped through the sysctl . */
+/* interface. */
+/* */
+/* Returns: */
+/* 0 for success, positive value for failure. */
+/****************************************************************************/
+static int
+bce_sysctl_shmem_state(SYSCTL_HANDLER_ARGS)
+{
+ int error;
+ int result;
+ struct bce_softc *sc;
+
+ result = -1;
+ error = sysctl_handle_int(oidp, &result, 0, req);
+
+ if (error || !req->newptr)
+ return (error);
+
+ if (result == 1) {
+ sc = (struct bce_softc *)arg1;
+ bce_dump_shmem_state(sc);
+ }
+
+ return error;
+}
+
+
+/****************************************************************************/
/* Allows the bootcode state to be dumped through the sysctl interface. */
/* */
/* Returns: */
@@ -8167,7 +8520,7 @@ bce_sysctl_dump_rx_bd_chain(SYSCTL_HANDLER_ARGS)
if (result == 1) {
sc = (struct bce_softc *)arg1;
- bce_dump_rx_bd_chain(sc, 0, TOTAL_RX_BD);
+ bce_dump_rx_bd_chain(sc, 0, TOTAL_RX_BD_ALLOC);
}
return error;
@@ -8195,7 +8548,7 @@ bce_sysctl_dump_rx_mbuf_chain(SYSCTL_HANDLER_ARGS)
if (result == 1) {
sc = (struct bce_softc *)arg1;
- bce_dump_rx_mbuf_chain(sc, 0, USABLE_RX_BD);
+ bce_dump_rx_mbuf_chain(sc, 0, USABLE_RX_BD_ALLOC);
}
return error;
@@ -8223,14 +8576,13 @@ bce_sysctl_dump_tx_chain(SYSCTL_HANDLER_ARGS)
if (result == 1) {
sc = (struct bce_softc *)arg1;
- bce_dump_tx_chain(sc, 0, TOTAL_TX_BD);
+ bce_dump_tx_chain(sc, 0, TOTAL_TX_BD_ALLOC);
}
return error;
}
-#ifdef BCE_JUMBO_HDRSPLIT
/****************************************************************************/
/* Provides a sysctl interface to allow dumping the page chain. */
/* */
@@ -8252,12 +8604,11 @@ bce_sysctl_dump_pg_chain(SYSCTL_HANDLER_ARGS)
if (result == 1) {
sc = (struct bce_softc *)arg1;
- bce_dump_pg_chain(sc, 0, TOTAL_PG_BD);
+ bce_dump_pg_chain(sc, 0, TOTAL_PG_BD_ALLOC);
}
return error;
}
-#endif
/****************************************************************************/
/* Provides a sysctl interface to allow reading arbitrary NVRAM offsets in */
@@ -8566,123 +8917,137 @@ bce_add_sysctls(struct bce_softc *sc)
CTLFLAG_RD, &sc->rx_low_watermark,
0, "Lowest level of free rx_bd's");
- SYSCTL_ADD_INT(ctx, children, OID_AUTO,
+ SYSCTL_ADD_QUAD(ctx, children, OID_AUTO,
"rx_empty_count",
CTLFLAG_RD, &sc->rx_empty_count,
- 0, "Number of times the RX chain was empty");
+ "Number of times the RX chain was empty");
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
"tx_hi_watermark",
CTLFLAG_RD, &sc->tx_hi_watermark,
0, "Highest level of used tx_bd's");
- SYSCTL_ADD_INT(ctx, children, OID_AUTO,
+ SYSCTL_ADD_QUAD(ctx, children, OID_AUTO,
"tx_full_count",
CTLFLAG_RD, &sc->tx_full_count,
- 0, "Number of times the TX chain was full");
+ "Number of times the TX chain was full");
- SYSCTL_ADD_INT(ctx, children, OID_AUTO,
+ SYSCTL_ADD_QUAD(ctx, children, OID_AUTO,
"tso_frames_requested",
CTLFLAG_RD, &sc->tso_frames_requested,
- 0, "Number of TSO frames requested");
+ "Number of TSO frames requested");
- SYSCTL_ADD_INT(ctx, children, OID_AUTO,
+ SYSCTL_ADD_QUAD(ctx, children, OID_AUTO,
"tso_frames_completed",
CTLFLAG_RD, &sc->tso_frames_completed,
- 0, "Number of TSO frames completed");
+ "Number of TSO frames completed");
- SYSCTL_ADD_INT(ctx, children, OID_AUTO,
+ SYSCTL_ADD_QUAD(ctx, children, OID_AUTO,
"tso_frames_failed",
CTLFLAG_RD, &sc->tso_frames_failed,
- 0, "Number of TSO frames failed");
+ "Number of TSO frames failed");
- SYSCTL_ADD_INT(ctx, children, OID_AUTO,
+ SYSCTL_ADD_QUAD(ctx, children, OID_AUTO,
"csum_offload_ip",
CTLFLAG_RD, &sc->csum_offload_ip,
- 0, "Number of IP checksum offload frames");
+ "Number of IP checksum offload frames");
- SYSCTL_ADD_INT(ctx, children, OID_AUTO,
+ SYSCTL_ADD_QUAD(ctx, children, OID_AUTO,
"csum_offload_tcp_udp",
CTLFLAG_RD, &sc->csum_offload_tcp_udp,
- 0, "Number of TCP/UDP checksum offload frames");
+ "Number of TCP/UDP checksum offload frames");
- SYSCTL_ADD_INT(ctx, children, OID_AUTO,
+ SYSCTL_ADD_QUAD(ctx, children, OID_AUTO,
"vlan_tagged_frames_rcvd",
CTLFLAG_RD, &sc->vlan_tagged_frames_rcvd,
- 0, "Number of VLAN tagged frames received");
+ "Number of VLAN tagged frames received");
- SYSCTL_ADD_INT(ctx, children, OID_AUTO,
+ SYSCTL_ADD_QUAD(ctx, children, OID_AUTO,
"vlan_tagged_frames_stripped",
CTLFLAG_RD, &sc->vlan_tagged_frames_stripped,
- 0, "Number of VLAN tagged frames stripped");
+ "Number of VLAN tagged frames stripped");
- SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
+ SYSCTL_ADD_QUAD(ctx, children, OID_AUTO,
"interrupts_rx",
CTLFLAG_RD, &sc->interrupts_rx,
- 0, "Number of RX interrupts");
+ "Number of RX interrupts");
- SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
+ SYSCTL_ADD_QUAD(ctx, children, OID_AUTO,
"interrupts_tx",
CTLFLAG_RD, &sc->interrupts_tx,
- 0, "Number of TX interrupts");
+ "Number of TX interrupts");
+
+ if (bce_hdr_split == TRUE) {
+ SYSCTL_ADD_QUAD(ctx, children, OID_AUTO,
+ "split_header_frames_rcvd",
+ CTLFLAG_RD, &sc->split_header_frames_rcvd,
+ "Number of split header frames received");
+
+ SYSCTL_ADD_QUAD(ctx, children, OID_AUTO,
+ "split_header_tcp_frames_rcvd",
+ CTLFLAG_RD, &sc->split_header_tcp_frames_rcvd,
+ "Number of split header TCP frames received");
+ }
+
SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
"nvram_dump", CTLTYPE_OPAQUE | CTLFLAG_RD,
(void *)sc, 0,
sysctl_nvram_dump, "S", "");
+
#ifdef BCE_NVRAM_WRITE_SUPPORT
SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
"nvram_write", CTLTYPE_OPAQUE | CTLFLAG_WR,
(void *)sc, 0,
sysctl_nvram_write, "S", "");
#endif
-#endif
+#endif /* BCE_DEBUG */
- SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
+ SYSCTL_ADD_QUAD(ctx, children, OID_AUTO,
"stat_IfHcInOctets",
CTLFLAG_RD, &sc->stat_IfHCInOctets,
"Bytes received");
- SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
+ SYSCTL_ADD_QUAD(ctx, children, OID_AUTO,
"stat_IfHCInBadOctets",
CTLFLAG_RD, &sc->stat_IfHCInBadOctets,
"Bad bytes received");
- SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
+ SYSCTL_ADD_QUAD(ctx, children, OID_AUTO,
"stat_IfHCOutOctets",
CTLFLAG_RD, &sc->stat_IfHCOutOctets,
"Bytes sent");
- SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
+ SYSCTL_ADD_QUAD(ctx, children, OID_AUTO,
"stat_IfHCOutBadOctets",
CTLFLAG_RD, &sc->stat_IfHCOutBadOctets,
"Bad bytes sent");
- SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
+ SYSCTL_ADD_QUAD(ctx, children, OID_AUTO,
"stat_IfHCInUcastPkts",
CTLFLAG_RD, &sc->stat_IfHCInUcastPkts,
"Unicast packets received");
- SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
+ SYSCTL_ADD_QUAD(ctx, children, OID_AUTO,
"stat_IfHCInMulticastPkts",
CTLFLAG_RD, &sc->stat_IfHCInMulticastPkts,
"Multicast packets received");
- SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
+ SYSCTL_ADD_QUAD(ctx, children, OID_AUTO,
"stat_IfHCInBroadcastPkts",
CTLFLAG_RD, &sc->stat_IfHCInBroadcastPkts,
"Broadcast packets received");
- SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
+ SYSCTL_ADD_QUAD(ctx, children, OID_AUTO,
"stat_IfHCOutUcastPkts",
CTLFLAG_RD, &sc->stat_IfHCOutUcastPkts,
"Unicast packets sent");
- SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
+ SYSCTL_ADD_QUAD(ctx, children, OID_AUTO,
"stat_IfHCOutMulticastPkts",
CTLFLAG_RD, &sc->stat_IfHCOutMulticastPkts,
"Multicast packets sent");
- SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
+ SYSCTL_ADD_QUAD(ctx, children, OID_AUTO,
"stat_IfHCOutBroadcastPkts",
CTLFLAG_RD, &sc->stat_IfHCOutBroadcastPkts,
"Broadcast packets sent");
@@ -8941,6 +9306,11 @@ bce_add_sysctls(struct bce_softc *sc)
bce_sysctl_stats_clear, "I", "Clear statistics block");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
+ "shmem_state", CTLTYPE_INT | CTLFLAG_RW,
+ (void *)sc, 0,
+ bce_sysctl_shmem_state, "I", "Shared memory state information");
+
+ SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
"bc_state", CTLTYPE_INT | CTLFLAG_RW,
(void *)sc, 0,
bce_sysctl_bc_state, "I", "Bootcode state information");
@@ -8960,12 +9330,13 @@ bce_add_sysctls(struct bce_softc *sc)
(void *)sc, 0,
bce_sysctl_dump_tx_chain, "I", "Dump tx_bd chain");
-#ifdef BCE_JUMBO_HDRSPLIT
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
- "dump_pg_chain", CTLTYPE_INT | CTLFLAG_RW,
- (void *)sc, 0,
- bce_sysctl_dump_pg_chain, "I", "Dump page chain");
-#endif
+ if (bce_hdr_split == TRUE) {
+ SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
+ "dump_pg_chain", CTLTYPE_INT | CTLFLAG_RW,
+ (void *)sc, 0,
+ bce_sysctl_dump_pg_chain, "I", "Dump page chain");
+ }
+
SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
"dump_ctx", CTLTYPE_INT | CTLFLAG_RW,
(void *)sc, 0,
@@ -9269,7 +9640,6 @@ bce_dump_rx_mbuf_chain(struct bce_softc *sc, u16 chain_prod, int count)
}
-#ifdef BCE_JUMBO_HDRSPLIT
/****************************************************************************/
/* Prints out the mbufs in the mbuf page chain. */
/* */
@@ -9299,7 +9669,6 @@ bce_dump_pg_mbuf_chain(struct bce_softc *sc, u16 chain_prod, int count)
"----------------"
"----------------------------\n");
}
-#endif
/****************************************************************************/
@@ -9313,7 +9682,7 @@ bce_dump_txbd(struct bce_softc *sc, int idx, struct tx_bd *txbd)
{
int i = 0;
- if (idx > MAX_TX_BD)
+ if (idx > MAX_TX_BD_ALLOC)
/* Index out of range. */
BCE_PRINTF("tx_bd[0x%04X]: Invalid tx_bd index!\n", idx);
else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
@@ -9426,7 +9795,7 @@ bce_dump_txbd(struct bce_softc *sc, int idx, struct tx_bd *txbd)
static __attribute__ ((noinline)) void
bce_dump_rxbd(struct bce_softc *sc, int idx, struct rx_bd *rxbd)
{
- if (idx > MAX_RX_BD)
+ if (idx > MAX_RX_BD_ALLOC)
/* Index out of range. */
BCE_PRINTF("rx_bd[0x%04X]: Invalid rx_bd index!\n", idx);
else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
@@ -9443,7 +9812,6 @@ bce_dump_rxbd(struct bce_softc *sc, int idx, struct rx_bd *rxbd)
}
-#ifdef BCE_JUMBO_HDRSPLIT
/****************************************************************************/
/* Prints out a rx_bd structure in the page chain. */
/* */
@@ -9453,7 +9821,7 @@ bce_dump_rxbd(struct bce_softc *sc, int idx, struct rx_bd *rxbd)
static __attribute__ ((noinline)) void
bce_dump_pgbd(struct bce_softc *sc, int idx, struct rx_bd *pgbd)
{
- if (idx > MAX_PG_BD)
+ if (idx > MAX_PG_BD_ALLOC)
/* Index out of range. */
BCE_PRINTF("pg_bd[0x%04X]: Invalid pg_bd index!\n", idx);
else if ((idx & USABLE_PG_BD_PER_PAGE) == USABLE_PG_BD_PER_PAGE)
@@ -9467,7 +9835,6 @@ bce_dump_pgbd(struct bce_softc *sc, int idx, struct rx_bd *pgbd)
pgbd->rx_bd_haddr_hi, pgbd->rx_bd_haddr_lo,
pgbd->rx_bd_len, pgbd->rx_bd_flags);
}
-#endif
/****************************************************************************/
@@ -9877,10 +10244,10 @@ bce_dump_tx_chain(struct bce_softc *sc, u16 tx_prod, int count)
"----------------------------\n");
BCE_PRINTF("page size = 0x%08X, tx chain pages = 0x%08X\n",
- (u32) BCM_PAGE_SIZE, (u32) TX_PAGES);
+ (u32) BCM_PAGE_SIZE, (u32) sc->tx_pages);
BCE_PRINTF("tx_bd per page = 0x%08X, usable tx_bd per page = 0x%08X\n",
(u32) TOTAL_TX_BD_PER_PAGE, (u32) USABLE_TX_BD_PER_PAGE);
- BCE_PRINTF("total tx_bd = 0x%08X\n", (u32) TOTAL_TX_BD);
+ BCE_PRINTF("total tx_bd = 0x%08X\n", (u32) TOTAL_TX_BD_ALLOC);
BCE_PRINTF(
"----------------------------"
@@ -9919,12 +10286,12 @@ bce_dump_rx_bd_chain(struct bce_softc *sc, u16 rx_prod, int count)
"----------------------------\n");
BCE_PRINTF("page size = 0x%08X, rx chain pages = 0x%08X\n",
- (u32) BCM_PAGE_SIZE, (u32) RX_PAGES);
+ (u32) BCM_PAGE_SIZE, (u32) sc->rx_pages);
BCE_PRINTF("rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n",
(u32) TOTAL_RX_BD_PER_PAGE, (u32) USABLE_RX_BD_PER_PAGE);
- BCE_PRINTF("total rx_bd = 0x%08X\n", (u32) TOTAL_RX_BD);
+ BCE_PRINTF("total rx_bd = 0x%08X\n", (u32) TOTAL_RX_BD_ALLOC);
BCE_PRINTF(
"----------------------------"
@@ -9945,7 +10312,6 @@ bce_dump_rx_bd_chain(struct bce_softc *sc, u16 rx_prod, int count)
}
-#ifdef BCE_JUMBO_HDRSPLIT
/****************************************************************************/
/* Prints out the page chain. */
/* */
@@ -9964,13 +10330,12 @@ bce_dump_pg_chain(struct bce_softc *sc, u16 pg_prod, int count)
"----------------------------\n");
BCE_PRINTF("page size = 0x%08X, pg chain pages = 0x%08X\n",
- (u32) BCM_PAGE_SIZE, (u32) PG_PAGES);
+ (u32) BCM_PAGE_SIZE, (u32) sc->pg_pages);
BCE_PRINTF("rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n",
(u32) TOTAL_PG_BD_PER_PAGE, (u32) USABLE_PG_BD_PER_PAGE);
- BCE_PRINTF("total rx_bd = 0x%08X, max_pg_bd = 0x%08X\n",
- (u32) TOTAL_PG_BD, (u32) MAX_PG_BD);
+ BCE_PRINTF("total pg_bd = 0x%08X\n", (u32) TOTAL_PG_BD_ALLOC);
BCE_PRINTF(
"----------------------------"
@@ -9989,7 +10354,6 @@ bce_dump_pg_chain(struct bce_softc *sc, u16 pg_prod, int count)
"----------------"
"----------------------------\n");
}
-#endif
#define BCE_PRINT_RX_CONS(arg) \
@@ -10196,12 +10560,12 @@ bce_dump_driver_state(struct bce_softc *sc)
BCE_PRINTF("0x%08X:%08X - (sc->rx_bd_chain) rx_bd chain "
"virtual address\n", val_hi, val_lo);
-#ifdef BCE_JUMBO_HDRSPLIT
- val_hi = BCE_ADDR_HI(sc->pg_bd_chain);
- val_lo = BCE_ADDR_LO(sc->pg_bd_chain);
- BCE_PRINTF("0x%08X:%08X - (sc->pg_bd_chain) page chain "
- "virtual address\n", val_hi, val_lo);
-#endif
+ if (bce_hdr_split == TRUE) {
+ val_hi = BCE_ADDR_HI(sc->pg_bd_chain);
+ val_lo = BCE_ADDR_LO(sc->pg_bd_chain);
+ BCE_PRINTF("0x%08X:%08X - (sc->pg_bd_chain) page chain "
+ "virtual address\n", val_hi, val_lo);
+ }
val_hi = BCE_ADDR_HI(sc->tx_mbuf_ptr);
val_lo = BCE_ADDR_LO(sc->tx_mbuf_ptr);
@@ -10213,24 +10577,28 @@ bce_dump_driver_state(struct bce_softc *sc)
BCE_PRINTF("0x%08X:%08X - (sc->rx_mbuf_ptr) rx mbuf chain "
"virtual address\n", val_hi, val_lo);
-#ifdef BCE_JUMBO_HDRSPLIT
- val_hi = BCE_ADDR_HI(sc->pg_mbuf_ptr);
- val_lo = BCE_ADDR_LO(sc->pg_mbuf_ptr);
- BCE_PRINTF("0x%08X:%08X - (sc->pg_mbuf_ptr) page mbuf chain "
- "virtual address\n", val_hi, val_lo);
-#endif
+ if (bce_hdr_split == TRUE) {
+ val_hi = BCE_ADDR_HI(sc->pg_mbuf_ptr);
+ val_lo = BCE_ADDR_LO(sc->pg_mbuf_ptr);
+ BCE_PRINTF("0x%08X:%08X - (sc->pg_mbuf_ptr) page mbuf chain "
+ "virtual address\n", val_hi, val_lo);
+ }
- BCE_PRINTF(" 0x%08X - (sc->interrupts_generated) "
- "h/w intrs\n", sc->interrupts_generated);
+ BCE_PRINTF(" 0x%016llX - (sc->interrupts_generated) "
+ "h/w intrs\n",
+ (long long unsigned int) sc->interrupts_generated);
- BCE_PRINTF(" 0x%08X - (sc->interrupts_rx) "
- "rx interrupts handled\n", sc->interrupts_rx);
+ BCE_PRINTF(" 0x%016llX - (sc->interrupts_rx) "
+ "rx interrupts handled\n",
+ (long long unsigned int) sc->interrupts_rx);
- BCE_PRINTF(" 0x%08X - (sc->interrupts_tx) "
- "tx interrupts handled\n", sc->interrupts_tx);
+ BCE_PRINTF(" 0x%016llX - (sc->interrupts_tx) "
+ "tx interrupts handled\n",
+ (long long unsigned int) sc->interrupts_tx);
- BCE_PRINTF(" 0x%08X - (sc->phy_interrupts) "
- "phy interrupts handled\n", sc->phy_interrupts);
+ BCE_PRINTF(" 0x%016llX - (sc->phy_interrupts) "
+ "phy interrupts handled\n",
+ (long long unsigned int) sc->phy_interrupts);
BCE_PRINTF(" 0x%08X - (sc->last_status_idx) "
"status block index\n", sc->last_status_idx);
@@ -10250,8 +10618,8 @@ bce_dump_driver_state(struct bce_softc *sc)
BCE_PRINTF(" 0x%08X - (sc->used_tx_bd) used "
"tx_bd's\n", sc->used_tx_bd);
- BCE_PRINTF("0x%08X/%08X - (sc->tx_hi_watermark) tx hi "
- "watermark\n", sc->tx_hi_watermark, sc->max_tx_bd);
+ BCE_PRINTF(" 0x%04X/0x%04X - (sc->tx_hi_watermark)/"
+ "(sc->max_tx_bd)\n", sc->tx_hi_watermark, sc->max_tx_bd);
BCE_PRINTF(" 0x%04X(0x%04X) - (sc->rx_prod) rx producer "
"index\n", sc->rx_prod, (u16) RX_CHAIN_IDX(sc->rx_prod));
@@ -10262,28 +10630,31 @@ bce_dump_driver_state(struct bce_softc *sc)
BCE_PRINTF(" 0x%08X - (sc->rx_prod_bseq) rx producer "
"byte seq index\n", sc->rx_prod_bseq);
+ BCE_PRINTF(" 0x%04X/0x%04X - (sc->rx_low_watermark)/"
+ "(sc->max_rx_bd)\n", sc->rx_low_watermark, sc->max_rx_bd);
+
BCE_PRINTF(" 0x%08X - (sc->debug_rx_mbuf_alloc) rx "
"mbufs allocated\n", sc->debug_rx_mbuf_alloc);
BCE_PRINTF(" 0x%08X - (sc->free_rx_bd) free "
"rx_bd's\n", sc->free_rx_bd);
-#ifdef BCE_JUMBO_HDRSPLIT
- BCE_PRINTF(" 0x%04X(0x%04X) - (sc->pg_prod) page producer "
- "index\n", sc->pg_prod, (u16) PG_CHAIN_IDX(sc->pg_prod));
+ if (bce_hdr_split == TRUE) {
+ BCE_PRINTF(" 0x%04X(0x%04X) - (sc->pg_prod) page producer "
+ "index\n", sc->pg_prod, (u16) PG_CHAIN_IDX(sc->pg_prod));
- BCE_PRINTF(" 0x%04X(0x%04X) - (sc->pg_cons) page consumer "
- "index\n", sc->pg_cons, (u16) PG_CHAIN_IDX(sc->pg_cons));
+ BCE_PRINTF(" 0x%04X(0x%04X) - (sc->pg_cons) page consumer "
+ "index\n", sc->pg_cons, (u16) PG_CHAIN_IDX(sc->pg_cons));
- BCE_PRINTF(" 0x%08X - (sc->debug_pg_mbuf_alloc) page "
- "mbufs allocated\n", sc->debug_pg_mbuf_alloc);
+ BCE_PRINTF(" 0x%08X - (sc->debug_pg_mbuf_alloc) page "
+ "mbufs allocated\n", sc->debug_pg_mbuf_alloc);
+ }
BCE_PRINTF(" 0x%08X - (sc->free_pg_bd) free page "
"rx_bd's\n", sc->free_pg_bd);
- BCE_PRINTF("0x%08X/%08X - (sc->pg_low_watermark) page low "
- "watermark\n", sc->pg_low_watermark, sc->max_pg_bd);
-#endif
+ BCE_PRINTF(" 0x%04X/0x%04X - (sc->pg_low_watermark)/"
+ "(sc->max_pg_bd)\n", sc->pg_low_watermark, sc->max_pg_bd);
BCE_PRINTF(" 0x%08X - (sc->mbuf_alloc_failed_count) "
"mbuf alloc failures\n", sc->mbuf_alloc_failed_count);
@@ -10414,6 +10785,44 @@ bce_dump_hw_state(struct bce_softc *sc)
/****************************************************************************/
+/* Prints out the contentst of shared memory which is used for host driver */
+/* to bootcode firmware communication. */
+/* */
+/* Returns: */
+/* Nothing. */
+/****************************************************************************/
+static __attribute__ ((noinline)) void
+bce_dump_shmem_state(struct bce_softc *sc)
+{
+ BCE_PRINTF(
+ "----------------------------"
+ " Hardware State "
+ "----------------------------\n");
+
+ BCE_PRINTF("0x%08X - Shared memory base address\n",
+ sc->bce_shmem_base);
+ BCE_PRINTF("%s - bootcode version\n",
+ sc->bce_bc_ver);
+
+ BCE_PRINTF(
+ "----------------------------"
+ " Shared Mem "
+ "----------------------------\n");
+
+ for (int i = 0x0; i < 0x200; i += 0x10) {
+ BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n",
+ i, bce_shmem_rd(sc, i), bce_shmem_rd(sc, i + 0x4),
+ bce_shmem_rd(sc, i + 0x8), bce_shmem_rd(sc, i + 0xC));
+ }
+
+ BCE_PRINTF(
+ "----------------------------"
+ "----------------"
+ "----------------------------\n");
+}
+
+
+/****************************************************************************/
/* Prints out the mailbox queue registers. */
/* */
/* Returns: */
@@ -10871,13 +11280,15 @@ bce_breakpoint(struct bce_softc *sc)
bce_dump_enet(sc, NULL);
bce_dump_txbd(sc, 0, NULL);
bce_dump_rxbd(sc, 0, NULL);
- bce_dump_tx_mbuf_chain(sc, 0, USABLE_TX_BD);
- bce_dump_rx_mbuf_chain(sc, 0, USABLE_RX_BD);
+ bce_dump_tx_mbuf_chain(sc, 0, USABLE_TX_BD_ALLOC);
+ bce_dump_rx_mbuf_chain(sc, 0, USABLE_RX_BD_ALLOC);
+ bce_dump_pg_mbuf_chain(sc, 0, USABLE_PG_BD_ALLOC);
bce_dump_l2fhdr(sc, 0, NULL);
bce_dump_ctx(sc, RX_CID);
bce_dump_ftqs(sc);
- bce_dump_tx_chain(sc, 0, USABLE_TX_BD);
- bce_dump_rx_bd_chain(sc, 0, USABLE_RX_BD);
+ bce_dump_tx_chain(sc, 0, USABLE_TX_BD_ALLOC);
+ bce_dump_rx_bd_chain(sc, 0, USABLE_RX_BD_ALLOC);
+ bce_dump_pg_chain(sc, 0, USABLE_PG_BD_ALLOC);
bce_dump_status_block(sc);
bce_dump_stats_block(sc);
bce_dump_driver_state(sc);
@@ -10889,12 +11300,7 @@ bce_breakpoint(struct bce_softc *sc)
bce_dump_cp_state(sc, 0);
bce_dump_com_state(sc, 0);
bce_dump_rv2p_state(sc);
-
-#ifdef BCE_JUMBO_HDRSPLIT
bce_dump_pgbd(sc, 0, NULL);
- bce_dump_pg_mbuf_chain(sc, 0, USABLE_PG_BD);
- bce_dump_pg_chain(sc, 0, USABLE_PG_BD);
-#endif
}
bce_dump_status_block(sc);
diff --git a/sys/dev/bce/if_bcereg.h b/sys/dev/bce/if_bcereg.h
index cedb6b1..4bd5a62 100644
--- a/sys/dev/bce/if_bcereg.h
+++ b/sys/dev/bce/if_bcereg.h
@@ -302,18 +302,18 @@
#define BCE_CP_INTR 0x00000008
#define BCE_CP_UNLOAD 0x00000010
#define BCE_CP_RESET 0x00000020
-#define BCE_CP_PHY 0x00000040
+#define BCE_CP_PHY 0x00000040
#define BCE_CP_NVRAM 0x00000080
-#define BCE_CP_FIRMWARE 0x00000100
-#define BCE_CP_CTX 0x00000200
-#define BCE_CP_REG 0x00000400
+#define BCE_CP_FIRMWARE 0x00000100
+#define BCE_CP_CTX 0x00000200
+#define BCE_CP_REG 0x00000400
#define BCE_CP_MISC 0x00400000
#define BCE_CP_SPECIAL 0x00800000
-#define BCE_CP_ALL 0x00FFFFFF
+#define BCE_CP_ALL 0x00FFFFFF
#define BCE_CP_MASK 0x00FFFFFF
-#define BCE_LEVEL_FATAL 0x00000000
+#define BCE_LEVEL_FATAL 0x00000000
#define BCE_LEVEL_WARN 0x01000000
#define BCE_LEVEL_INFO 0x02000000
#define BCE_LEVEL_VERBOSE 0x03000000
@@ -326,28 +326,28 @@
#define BCE_INFO_LOAD (BCE_CP_LOAD | BCE_LEVEL_INFO)
#define BCE_VERBOSE_LOAD (BCE_CP_LOAD | BCE_LEVEL_VERBOSE)
#define BCE_EXTREME_LOAD (BCE_CP_LOAD | BCE_LEVEL_EXTREME)
-#define BCE_INSANE_LOAD (BCE_CP_LOAD | BCE_LEVEL_INSANE)
+#define BCE_INSANE_LOAD (BCE_CP_LOAD | BCE_LEVEL_INSANE)
#define BCE_WARN_SEND (BCE_CP_SEND | BCE_LEVEL_WARN)
#define BCE_INFO_SEND (BCE_CP_SEND | BCE_LEVEL_INFO)
#define BCE_VERBOSE_SEND (BCE_CP_SEND | BCE_LEVEL_VERBOSE)
#define BCE_EXTREME_SEND (BCE_CP_SEND | BCE_LEVEL_EXTREME)
-#define BCE_INSANE_SEND (BCE_CP_SEND | BCE_LEVEL_INSANE)
+#define BCE_INSANE_SEND (BCE_CP_SEND | BCE_LEVEL_INSANE)
#define BCE_WARN_RECV (BCE_CP_RECV | BCE_LEVEL_WARN)
#define BCE_INFO_RECV (BCE_CP_RECV | BCE_LEVEL_INFO)
#define BCE_VERBOSE_RECV (BCE_CP_RECV | BCE_LEVEL_VERBOSE)
#define BCE_EXTREME_RECV (BCE_CP_RECV | BCE_LEVEL_EXTREME)
-#define BCE_INSANE_RECV (BCE_CP_RECV | BCE_LEVEL_INSANE)
+#define BCE_INSANE_RECV (BCE_CP_RECV | BCE_LEVEL_INSANE)
#define BCE_WARN_INTR (BCE_CP_INTR | BCE_LEVEL_WARN)
#define BCE_INFO_INTR (BCE_CP_INTR | BCE_LEVEL_INFO)
#define BCE_VERBOSE_INTR (BCE_CP_INTR | BCE_LEVEL_VERBOSE)
#define BCE_EXTREME_INTR (BCE_CP_INTR | BCE_LEVEL_EXTREME)
-#define BCE_INSANE_INTR (BCE_CP_INTR | BCE_LEVEL_INSANE)
+#define BCE_INSANE_INTR (BCE_CP_INTR | BCE_LEVEL_INSANE)
-#define BCE_WARN_UNLOAD (BCE_CP_UNLOAD | BCE_LEVEL_WARN)
-#define BCE_INFO_UNLOAD (BCE_CP_UNLOAD | BCE_LEVEL_INFO)
+#define BCE_WARN_UNLOAD (BCE_CP_UNLOAD | BCE_LEVEL_WARN)
+#define BCE_INFO_UNLOAD (BCE_CP_UNLOAD | BCE_LEVEL_INFO)
#define BCE_VERBOSE_UNLOAD (BCE_CP_UNLOAD | BCE_LEVEL_VERBOSE)
#define BCE_EXTREME_UNLOAD (BCE_CP_UNLOAD | BCE_LEVEL_EXTREME)
#define BCE_INSANE_UNLOAD (BCE_CP_UNLOAD | BCE_LEVEL_INSANE)
@@ -360,8 +360,8 @@
#define BCE_WARN_PHY (BCE_CP_PHY | BCE_LEVEL_WARN)
#define BCE_INFO_PHY (BCE_CP_PHY | BCE_LEVEL_INFO)
-#define BCE_VERBOSE_PHY (BCE_CP_PHY | BCE_LEVEL_VERBOSE)
-#define BCE_EXTREME_PHY (BCE_CP_PHY | BCE_LEVEL_EXTREME)
+#define BCE_VERBOSE_PHY (BCE_CP_PHY | BCE_LEVEL_VERBOSE)
+#define BCE_EXTREME_PHY (BCE_CP_PHY | BCE_LEVEL_EXTREME)
#define BCE_INSANE_PHY (BCE_CP_PHY | BCE_LEVEL_INSANE)
#define BCE_WARN_NVRAM (BCE_CP_NVRAM | BCE_LEVEL_WARN)
@@ -372,40 +372,40 @@
#define BCE_WARN_FIRMWARE (BCE_CP_FIRMWARE | BCE_LEVEL_WARN)
#define BCE_INFO_FIRMWARE (BCE_CP_FIRMWARE | BCE_LEVEL_INFO)
-#define BCE_VERBOSE_FIRMWARE (BCE_CP_FIRMWARE | BCE_LEVEL_VERBOSE)
-#define BCE_EXTREME_FIRMWARE (BCE_CP_FIRMWARE | BCE_LEVEL_EXTREME)
-#define BCE_INSANE_FIRMWARE (BCE_CP_FIRMWARE | BCE_LEVEL_INSANE)
+#define BCE_VERBOSE_FIRMWARE (BCE_CP_FIRMWARE | BCE_LEVEL_VERBOSE)
+#define BCE_EXTREME_FIRMWARE (BCE_CP_FIRMWARE | BCE_LEVEL_EXTREME)
+#define BCE_INSANE_FIRMWARE (BCE_CP_FIRMWARE | BCE_LEVEL_INSANE)
#define BCE_WARN_CTX (BCE_CP_CTX | BCE_LEVEL_WARN)
#define BCE_INFO_CTX (BCE_CP_CTX | BCE_LEVEL_INFO)
-#define BCE_VERBOSE_CTX (BCE_CP_CTX | BCE_LEVEL_VERBOSE)
-#define BCE_EXTREME_CTX (BCE_CP_CTX | BCE_LEVEL_EXTREME)
+#define BCE_VERBOSE_CTX (BCE_CP_CTX | BCE_LEVEL_VERBOSE)
+#define BCE_EXTREME_CTX (BCE_CP_CTX | BCE_LEVEL_EXTREME)
#define BCE_INSANE_CTX (BCE_CP_CTX | BCE_LEVEL_INSANE)
#define BCE_WARN_REG (BCE_CP_REG | BCE_LEVEL_WARN)
#define BCE_INFO_REG (BCE_CP_REG | BCE_LEVEL_INFO)
-#define BCE_VERBOSE_REG (BCE_CP_REG | BCE_LEVEL_VERBOSE)
-#define BCE_EXTREME_REG (BCE_CP_REG | BCE_LEVEL_EXTREME)
+#define BCE_VERBOSE_REG (BCE_CP_REG | BCE_LEVEL_VERBOSE)
+#define BCE_EXTREME_REG (BCE_CP_REG | BCE_LEVEL_EXTREME)
#define BCE_INSANE_REG (BCE_CP_REG | BCE_LEVEL_INSANE)
#define BCE_WARN_MISC (BCE_CP_MISC | BCE_LEVEL_WARN)
#define BCE_INFO_MISC (BCE_CP_MISC | BCE_LEVEL_INFO)
#define BCE_VERBOSE_MISC (BCE_CP_MISC | BCE_LEVEL_VERBOSE)
#define BCE_EXTREME_MISC (BCE_CP_MISC | BCE_LEVEL_EXTREME)
-#define BCE_INSANE_MISC (BCE_CP_MISC | BCE_LEVEL_INSANE)
+#define BCE_INSANE_MISC (BCE_CP_MISC | BCE_LEVEL_INSANE)
#define BCE_WARN_SPECIAL (BCE_CP_SPECIAL | BCE_LEVEL_WARN)
#define BCE_INFO_SPECIAL (BCE_CP_SPECIAL | BCE_LEVEL_INFO)
-#define BCE_VERBOSE_SPECIAL (BCE_CP_SPECIAL | BCE_LEVEL_VERBOSE)
-#define BCE_EXTREME_SPECIAL (BCE_CP_SPECIAL | BCE_LEVEL_EXTREME)
+#define BCE_VERBOSE_SPECIAL (BCE_CP_SPECIAL | BCE_LEVEL_VERBOSE)
+#define BCE_EXTREME_SPECIAL (BCE_CP_SPECIAL | BCE_LEVEL_EXTREME)
#define BCE_INSANE_SPECIAL (BCE_CP_SPECIAL | BCE_LEVEL_INSANE)
-#define BCE_FATAL (BCE_CP_ALL | BCE_LEVEL_FATAL)
-#define BCE_WARN (BCE_CP_ALL | BCE_LEVEL_WARN)
-#define BCE_INFO (BCE_CP_ALL | BCE_LEVEL_INFO)
+#define BCE_FATAL (BCE_CP_ALL | BCE_LEVEL_FATAL)
+#define BCE_WARN (BCE_CP_ALL | BCE_LEVEL_WARN)
+#define BCE_INFO (BCE_CP_ALL | BCE_LEVEL_INFO)
#define BCE_VERBOSE (BCE_CP_ALL | BCE_LEVEL_VERBOSE)
#define BCE_EXTREME (BCE_CP_ALL | BCE_LEVEL_EXTREME)
-#define BCE_INSANE (BCE_CP_ALL | BCE_LEVEL_INSANE)
+#define BCE_INSANE (BCE_CP_ALL | BCE_LEVEL_INSANE)
#define BCE_CODE_PATH(cp) ((cp & BCE_CP_MASK) & bce_debug)
#define BCE_MSG_LEVEL(lv) \
@@ -1155,20 +1155,19 @@ struct tx_bd {
u32 tx_bd_haddr_lo;
u32 tx_bd_mss_nbytes;
u16 tx_bd_flags;
+#define TX_BD_FLAGS_CONN_FAULT (1<<0)
+#define TX_BD_FLAGS_TCP_UDP_CKSUM (1<<1)
+#define TX_BD_FLAGS_IP_CKSUM (1<<2)
+#define TX_BD_FLAGS_VLAN_TAG (1<<3)
+#define TX_BD_FLAGS_COAL_NOW (1<<4)
+#define TX_BD_FLAGS_DONT_GEN_CRC (1<<5)
+#define TX_BD_FLAGS_END (1<<6)
+#define TX_BD_FLAGS_START (1<<7)
+#define TX_BD_FLAGS_SW_OPTION_WORD (0x1f<<8)
+#define TX_BD_FLAGS_SW_FLAGS (1<<13)
+#define TX_BD_FLAGS_SW_SNAP (1<<14)
+#define TX_BD_FLAGS_SW_LSO (1<<15)
u16 tx_bd_vlan_tag;
- #define TX_BD_FLAGS_CONN_FAULT (1<<0)
- #define TX_BD_FLAGS_TCP_UDP_CKSUM (1<<1)
- #define TX_BD_FLAGS_IP_CKSUM (1<<2)
- #define TX_BD_FLAGS_VLAN_TAG (1<<3)
- #define TX_BD_FLAGS_COAL_NOW (1<<4)
- #define TX_BD_FLAGS_DONT_GEN_CRC (1<<5)
- #define TX_BD_FLAGS_END (1<<6)
- #define TX_BD_FLAGS_START (1<<7)
- #define TX_BD_FLAGS_SW_OPTION_WORD (0x1f<<8)
- #define TX_BD_FLAGS_SW_FLAGS (1<<13)
- #define TX_BD_FLAGS_SW_SNAP (1<<14)
- #define TX_BD_FLAGS_SW_LSO (1<<15)
-
};
@@ -1180,11 +1179,10 @@ struct rx_bd {
u32 rx_bd_haddr_lo;
u32 rx_bd_len;
u32 rx_bd_flags;
- #define RX_BD_FLAGS_NOPUSH (1<<0)
- #define RX_BD_FLAGS_DUMMY (1<<1)
- #define RX_BD_FLAGS_END (1<<2)
- #define RX_BD_FLAGS_START (1<<3)
-
+#define RX_BD_FLAGS_NOPUSH (1<<0)
+#define RX_BD_FLAGS_DUMMY (1<<1)
+#define RX_BD_FLAGS_END (1<<2)
+#define RX_BD_FLAGS_START (1<<3)
};
@@ -1387,6 +1385,7 @@ struct l2_fhdr {
#define L2_FHDR_ERRORS_ALIGNMENT (1<<19)
#define L2_FHDR_ERRORS_TOO_SHORT (1<<20)
#define L2_FHDR_ERRORS_GIANT_FRAME (1<<21)
+ #define L2_FHDR_ERRORS_IPV4_BAD_LEN (1<<22)
#define L2_FHDR_ERRORS_TCP_XSUM (1<<28)
#define L2_FHDR_ERRORS_UDP_XSUM (1<<31)
@@ -1405,36 +1404,39 @@ struct l2_fhdr {
};
#define BCE_L2FHDR_PRINTFB \
- "\20" \
+ "\20" \
"\40UDP_XSUM_ERR" \
- "\37b30" \
- "\36b29" \
+ "\37b30" \
+ "\36b29" \
"\35TCP_XSUM_ERR" \
- "\34b27" \
- "\33b26" \
- "\32b25" \
- "\31b24" \
- "\30b23" \
- "\27b22" \
+ "\34b27" \
+ "\33b26" \
+ "\32b25" \
+ "\31b24" \
+ "\30b23" \
+ "\27IPv4_BAL_LEN" \
"\26GIANT_ERR" \
"\25SHORT_ERR" \
"\24ALIGN_ERR" \
"\23PHY_ERR" \
"\22CRC_ERR" \
- "\21SPLIT" \
- "\20UDP" \
- "\17TCP" \
- "\16IP" \
- "\15b12" \
- "\14b11" \
- "\13b10" \
- "\12b09" \
- "\11RSS" \
- "\10SNAP" \
- "\07VLAN" \
- "\06P4" \
- "\05P3" \
- "\04P2"
+ "\21SPLIT" \
+ "\20UDP" \
+ "\17TCP" \
+ "\16IP" \
+ "\15SORT_b3" \
+ "\14SORT_b2" \
+ "\13SORT_b1" \
+ "\12SORT_b0" \
+ "\11RSS" \
+ "\10SNAP" \
+ "\07VLAN" \
+ "\06P4" \
+ "\05P3" \
+ "\04P2" \
+ "\03RULE_b2" \
+ "\02RULE_b1" \
+ "\01RULE_b0"
/*
@@ -6150,18 +6152,20 @@ struct l2_fhdr {
* Page count must remain a power of 2 for all
* of the math to work correctly.
*/
-#define TX_PAGES 2
-#define TOTAL_TX_BD_PER_PAGE (BCM_PAGE_SIZE / sizeof(struct tx_bd))
-#define USABLE_TX_BD_PER_PAGE (TOTAL_TX_BD_PER_PAGE - 1)
-#define TOTAL_TX_BD (TOTAL_TX_BD_PER_PAGE * TX_PAGES)
-#define USABLE_TX_BD (USABLE_TX_BD_PER_PAGE * TX_PAGES)
-#define MAX_TX_BD (TOTAL_TX_BD - 1)
+#define DEFAULT_TX_PAGES 2
+#define MAX_TX_PAGES 8
+#define TOTAL_TX_BD_PER_PAGE (BCM_PAGE_SIZE / sizeof(struct tx_bd))
+#define USABLE_TX_BD_PER_PAGE (TOTAL_TX_BD_PER_PAGE - 1)
+#define MAX_TX_BD_AVAIL (MAX_TX_PAGES * TOTAL_TX_BD_PER_PAGE)
+#define TOTAL_TX_BD_ALLOC (TOTAL_TX_BD_PER_PAGE * sc->tx_pages)
+#define USABLE_TX_BD_ALLOC (USABLE_TX_BD_PER_PAGE * sc->tx_pages)
+#define MAX_TX_BD_ALLOC (TOTAL_TX_BD_ALLOC - 1)
/* Advance to the next tx_bd, skipping any next page pointers. */
#define NEXT_TX_BD(x) (((x) & USABLE_TX_BD_PER_PAGE) == \
(USABLE_TX_BD_PER_PAGE - 1)) ? (x) + 2 : (x) + 1
-#define TX_CHAIN_IDX(x) ((x) & MAX_TX_BD)
+#define TX_CHAIN_IDX(x) ((x) & MAX_TX_BD_ALLOC)
#define TX_PAGE(x) (((x) & ~USABLE_TX_BD_PER_PAGE) >> (BCM_PAGE_BITS - 4))
#define TX_IDX(x) ((x) & USABLE_TX_BD_PER_PAGE)
@@ -6170,45 +6174,46 @@ struct l2_fhdr {
* Page count must remain a power of 2 for all
* of the math to work correctly.
*/
-#define RX_PAGES 2
-#define TOTAL_RX_BD_PER_PAGE (BCM_PAGE_SIZE / sizeof(struct rx_bd))
-#define USABLE_RX_BD_PER_PAGE (TOTAL_RX_BD_PER_PAGE - 1)
-#define TOTAL_RX_BD (TOTAL_RX_BD_PER_PAGE * RX_PAGES)
-#define USABLE_RX_BD (USABLE_RX_BD_PER_PAGE * RX_PAGES)
-#define MAX_RX_BD (TOTAL_RX_BD - 1)
+#define DEFAULT_RX_PAGES 2
+#define MAX_RX_PAGES 8
+#define TOTAL_RX_BD_PER_PAGE (BCM_PAGE_SIZE / sizeof(struct rx_bd))
+#define USABLE_RX_BD_PER_PAGE (TOTAL_RX_BD_PER_PAGE - 1)
+#define MAX_RX_BD_AVAIL (MAX_RX_PAGES * TOTAL_RX_BD_PER_PAGE)
+#define TOTAL_RX_BD_ALLOC (TOTAL_RX_BD_PER_PAGE * sc->rx_pages)
+#define USABLE_RX_BD_ALLOC (USABLE_RX_BD_PER_PAGE * sc->rx_pages)
+#define MAX_RX_BD_ALLOC (TOTAL_RX_BD_ALLOC - 1)
/* Advance to the next rx_bd, skipping any next page pointers. */
#define NEXT_RX_BD(x) (((x) & USABLE_RX_BD_PER_PAGE) == \
(USABLE_RX_BD_PER_PAGE - 1)) ? (x) + 2 : (x) + 1
-#define RX_CHAIN_IDX(x) ((x) & MAX_RX_BD)
+#define RX_CHAIN_IDX(x) ((x) & MAX_RX_BD_ALLOC)
#define RX_PAGE(x) (((x) & ~USABLE_RX_BD_PER_PAGE) >> (BCM_PAGE_BITS - 4))
#define RX_IDX(x) ((x) & USABLE_RX_BD_PER_PAGE)
-#ifdef BCE_JUMBO_HDRSPLIT
/*
* To accomodate jumbo frames, the page chain should
* be 4 times larger than the receive chain.
*/
-#define PG_PAGES (RX_PAGES * 4)
-#define TOTAL_PG_BD_PER_PAGE (BCM_PAGE_SIZE / sizeof(struct rx_bd))
-#define USABLE_PG_BD_PER_PAGE (TOTAL_PG_BD_PER_PAGE - 1)
-#define TOTAL_PG_BD (TOTAL_PG_BD_PER_PAGE * PG_PAGES)
-#define USABLE_PG_BD (USABLE_PG_BD_PER_PAGE * PG_PAGES)
-#define MAX_PG_BD (TOTAL_PG_BD - 1)
+#define DEFAULT_PG_PAGES (DEFAULT_RX_PAGES * 4)
+#define MAX_PG_PAGES (MAX_RX_PAGES * 4)
+#define TOTAL_PG_BD_PER_PAGE (BCM_PAGE_SIZE / sizeof(struct rx_bd))
+#define USABLE_PG_BD_PER_PAGE (TOTAL_PG_BD_PER_PAGE - 1)
+#define MAX_PG_BD_AVAIL (MAX_PG_PAGES * TOTAL_PG_BD_PER_PAGE)
+#define TOTAL_PG_BD_ALLOC (TOTAL_PG_BD_PER_PAGE * sc->pg_pages)
+#define USABLE_PG_BD_ALLOC (USABLE_PG_BD_PER_PAGE * sc->pg_pages)
+#define MAX_PG_BD_ALLOC (TOTAL_PG_BD_ALLOC - 1)
/* Advance to the next pg_bd, skipping any next page pointers. */
#define NEXT_PG_BD(x) (((x) & USABLE_PG_BD_PER_PAGE) == \
(USABLE_PG_BD_PER_PAGE - 1)) ? (x) + 2 : (x) + 1
-#define PG_CHAIN_IDX(x) ((x) & MAX_PG_BD)
+#define PG_CHAIN_IDX(x) ((x) & MAX_PG_BD_ALLOC)
#define PG_PAGE(x) (((x) & ~USABLE_PG_BD_PER_PAGE) >> (BCM_PAGE_BITS - 4))
#define PG_IDX(x) ((x) & USABLE_PG_BD_PER_PAGE)
-#endif /* BCE_JUMBO_HDRSPLIT */
-
#define CTX_INIT_RETRY_COUNT 10
/* Context size. */
@@ -6236,6 +6241,15 @@ struct l2_fhdr {
#define TX_CID 16
#define RX_CID 0
+#define DEFAULT_TX_QUICK_CONS_TRIP_INT 20
+#define DEFAULT_TX_QUICK_CONS_TRIP 20
+#define DEFAULT_TX_TICKS_INT 80
+#define DEFAULT_TX_TICKS 80
+#define DEFAULT_RX_QUICK_CONS_TRIP_INT 6
+#define DEFAULT_RX_QUICK_CONS_TRIP 6
+#define DEFAULT_RX_TICKS_INT 18
+#define DEFAULT_RX_TICKS 18
+
/****************************************************************************/
/* BCE Processor Firmwware Load Definitions */
/****************************************************************************/
@@ -6530,26 +6544,27 @@ struct bce_softc
/* The device handle for the MII bus child device. */
device_t bce_miibus;
- /* Driver maintained TX chain pointers and byte counter. */
+ /* Driver maintained RX chain pointers and byte counter. */
u16 rx_prod;
u16 rx_cons;
/* Counts the bytes used in the RX chain. */
u32 rx_prod_bseq;
+
+ /* Driver maintained TX chain pointers and byte counter. */
u16 tx_prod;
u16 tx_cons;
/* Counts the bytes used in the TX chain. */
u32 tx_prod_bseq;
-#ifdef BCE_JUMBO_HDRSPLIT
+ /* Driver maintained PG chain pointers. */
u16 pg_prod;
u16 pg_cons;
-#endif
int bce_link_up;
- struct callout bce_tick_callout;
- struct callout bce_pulse_callout;
+ struct callout bce_tick_callout;
+ struct callout bce_pulse_callout;
/* Ticks until chip reset */
int watchdog_timer;
@@ -6559,10 +6574,7 @@ struct bce_softc
int rx_bd_mbuf_alloc_size;
int rx_bd_mbuf_data_len;
int rx_bd_mbuf_align_pad;
-
-#ifdef BCE_JUMBO_HDRSPLIT
int pg_bd_mbuf_alloc_size;
-#endif
/* Receive mode settings (i.e promiscuous, multicast, etc.). */
u32 rx_mode;
@@ -6571,30 +6583,31 @@ struct bce_softc
bus_dma_tag_t parent_tag;
/* H/W maintained TX buffer descriptor chain structure. */
+ int tx_pages;
bus_dma_tag_t tx_bd_chain_tag;
- bus_dmamap_t tx_bd_chain_map[TX_PAGES];
- struct tx_bd *tx_bd_chain[TX_PAGES];
- bus_addr_t tx_bd_chain_paddr[TX_PAGES];
+ bus_dmamap_t tx_bd_chain_map[MAX_TX_PAGES];
+ struct tx_bd *tx_bd_chain[MAX_TX_PAGES];
+ bus_addr_t tx_bd_chain_paddr[MAX_TX_PAGES];
/* H/W maintained RX buffer descriptor chain structure. */
+ int rx_pages;
bus_dma_tag_t rx_bd_chain_tag;
- bus_dmamap_t rx_bd_chain_map[RX_PAGES];
- struct rx_bd *rx_bd_chain[RX_PAGES];
- bus_addr_t rx_bd_chain_paddr[RX_PAGES];
+ bus_dmamap_t rx_bd_chain_map[MAX_RX_PAGES];
+ struct rx_bd *rx_bd_chain[MAX_RX_PAGES];
+ bus_addr_t rx_bd_chain_paddr[MAX_RX_PAGES];
-#ifdef BCE_JUMBO_HDRSPLIT
/* H/W maintained page buffer descriptor chain structure. */
+ int pg_pages;
bus_dma_tag_t pg_bd_chain_tag;
- bus_dmamap_t pg_bd_chain_map[PG_PAGES];
- struct rx_bd *pg_bd_chain[PG_PAGES];
- bus_addr_t pg_bd_chain_paddr[PG_PAGES];
-#endif
+ bus_dmamap_t pg_bd_chain_map[MAX_PG_PAGES];
+ struct rx_bd *pg_bd_chain[MAX_PG_PAGES];
+ bus_addr_t pg_bd_chain_paddr[MAX_PG_PAGES];
/* H/W maintained status block. */
bus_dma_tag_t status_tag;
bus_dmamap_t status_map;
struct status_block *status_block;
- bus_addr_t status_block_paddr;
+ bus_addr_t status_block_paddr;
/* Driver maintained status block values. */
u16 last_status_idx;
@@ -6605,49 +6618,41 @@ struct bce_softc
bus_dma_tag_t stats_tag;
bus_dmamap_t stats_map;
struct statistics_block *stats_block;
- bus_addr_t stats_block_paddr;
+ bus_addr_t stats_block_paddr;
/* H/W maintained context block. */
- int ctx_pages;
+ int ctx_pages;
bus_dma_tag_t ctx_tag;
/* BCM5709/16 use host memory for context. */
bus_dmamap_t ctx_map[BCE_MAX_CONTEXT];
- void *ctx_block[BCE_MAX_CONTEXT];
- bus_addr_t ctx_paddr[BCE_MAX_CONTEXT];
+ void *ctx_block[BCE_MAX_CONTEXT];
+ bus_addr_t ctx_paddr[BCE_MAX_CONTEXT];
/* Bus tag for RX/TX mbufs. */
bus_dma_tag_t rx_mbuf_tag;
bus_dma_tag_t tx_mbuf_tag;
-
-#ifdef BCE_JUMBO_HDRSPLIT
bus_dma_tag_t pg_mbuf_tag;
-#endif
/* S/W maintained mbuf TX chain structure. */
- bus_dmamap_t tx_mbuf_map[TOTAL_TX_BD];
- struct mbuf *tx_mbuf_ptr[TOTAL_TX_BD];
+ bus_dmamap_t tx_mbuf_map[MAX_TX_BD_AVAIL];
+ struct mbuf *tx_mbuf_ptr[MAX_TX_BD_AVAIL];
/* S/W maintained mbuf RX chain structure. */
- bus_dmamap_t rx_mbuf_map[TOTAL_RX_BD];
- struct mbuf *rx_mbuf_ptr[TOTAL_RX_BD];
+ bus_dmamap_t rx_mbuf_map[MAX_RX_BD_AVAIL];
+ struct mbuf *rx_mbuf_ptr[MAX_RX_BD_AVAIL];
-#ifdef BCE_JUMBO_HDRSPLIT
/* S/W maintained mbuf page chain structure. */
- bus_dmamap_t pg_mbuf_map[TOTAL_PG_BD];
- struct mbuf *pg_mbuf_ptr[TOTAL_PG_BD];
-#endif
+ bus_dmamap_t pg_mbuf_map[MAX_PG_BD_AVAIL];
+ struct mbuf *pg_mbuf_ptr[MAX_PG_BD_AVAIL];
/* Track the number of buffer descriptors in use. */
u16 free_rx_bd;
u16 max_rx_bd;
u16 used_tx_bd;
u16 max_tx_bd;
-
-#ifdef BCE_JUMBO_HDRSPLIT
u16 free_pg_bd;
u16 max_pg_bd;
-#endif
/* Provides access to hardware statistics through sysctl. */
u64 stat_IfHCInOctets;
@@ -6733,63 +6738,61 @@ struct bce_softc
/* Track the number of enqueued mbufs. */
int debug_tx_mbuf_alloc;
int debug_rx_mbuf_alloc;
-
-#ifdef BCE_JUMBO_HDRSPLIT
int debug_pg_mbuf_alloc;
-#endif
/* Track how many and what type of interrupts are generated. */
- u32 interrupts_generated;
- u32 interrupts_handled;
- u32 interrupts_rx;
- u32 interrupts_tx;
- u32 phy_interrupts;
-
- /* Track interrupt time (25MHz clock). */
- u64 rx_intr_time;
- u64 tx_intr_time;
+ u64 interrupts_generated;
+ u64 interrupts_handled;
+ u64 interrupts_rx;
+ u64 interrupts_tx;
+ u64 phy_interrupts;
/* Lowest number of rx_bd's free. */
- u32 rx_low_watermark;
+ u16 rx_low_watermark;
/* Number of times the RX chain was empty. */
- u32 rx_empty_count;
+ u64 rx_empty_count;
-#ifdef BCE_JUMBO_HDRSPLIT
/* Lowest number of pages free. */
- u32 pg_low_watermark;
+ u16 pg_low_watermark;
/* Number of times the page chain was empty. */
- u32 pg_empty_count;
-#endif
+ u64 pg_empty_count;
/* Greatest number of tx_bd's used. */
- u32 tx_hi_watermark;
+ u16 tx_hi_watermark;
/* Number of times the TX chain was full. */
- u32 tx_full_count;
+ u64 tx_full_count;
/* Number of TSO frames requested. */
- u32 tso_frames_requested;
+ u64 tso_frames_requested;
/* Number of TSO frames completed. */
- u32 tso_frames_completed;
+ u64 tso_frames_completed;
/* Number of TSO frames failed. */
- u32 tso_frames_failed;
+ u64 tso_frames_failed;
/* Number of IP checksum offload frames.*/
- u32 csum_offload_ip;
+ u64 csum_offload_ip;
/* Number of TCP/UDP checksum offload frames.*/
- u32 csum_offload_tcp_udp;
+ u64 csum_offload_tcp_udp;
/* Number of VLAN tagged frames received. */
- u32 vlan_tagged_frames_rcvd;
+ u64 vlan_tagged_frames_rcvd;
/* Number of VLAN tagged frames stripped. */
- u32 vlan_tagged_frames_stripped;
-#endif
+ u64 vlan_tagged_frames_stripped;
+
+ /* Number of split header frames received. */
+ u64 split_header_frames_rcvd;
+
+ /* Number of split header TCP frames received. */
+ u64 split_header_tcp_frames_rcvd;
+#endif /* BCE_DEBUG */
+
uint8_t *nvram_buf;
};
OpenPOWER on IntegriCloud