diff options
author | yongari <yongari@FreeBSD.org> | 2009-11-08 01:13:38 +0000 |
---|---|---|
committer | yongari <yongari@FreeBSD.org> | 2009-11-08 01:13:38 +0000 |
commit | 3c96eca4ff6a3f12c615f51fcb286682037fc190 (patch) | |
tree | cbb67c35c27f09388fe2d0f555764d3865f281b3 | |
parent | fd872f39d35343f390ea00f68b30a0a700fb9721 (diff) | |
download | FreeBSD-src-3c96eca4ff6a3f12c615f51fcb286682037fc190.zip FreeBSD-src-3c96eca4ff6a3f12c615f51fcb286682037fc190.tar.gz |
Don't count input errors twice, we always read input errors from
MAC in bge_tick. Previously it used to show more number of input
errors. I noticed actual input errors were less than 8% even for
64 bytes UDP frames generated by netperf.
Since we always access BGE_RXLP_LOCSTAT_IFIN_DROPS register in
bge_tick, remove useless code protected by #ifdef notyet.
-rw-r--r-- | sys/dev/bge/if_bge.c | 10 |
1 files changed, 0 insertions, 10 deletions
diff --git a/sys/dev/bge/if_bge.c b/sys/dev/bge/if_bge.c index ba2073e..8308801 100644 --- a/sys/dev/bge/if_bge.c +++ b/sys/dev/bge/if_bge.c @@ -3196,7 +3196,6 @@ bge_rxeof(struct bge_softc *sc) m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx]; if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); - ifp->if_ierrors++; continue; } if (bge_newbuf_jumbo(sc, rxidx) != 0) { @@ -3209,7 +3208,6 @@ bge_rxeof(struct bge_softc *sc) stdcnt++; if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); - ifp->if_ierrors++; continue; } m = sc->bge_cdata.bge_rx_std_chain[rxidx]; @@ -3291,14 +3289,6 @@ bge_rxeof(struct bge_softc *sc) bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); if (jumbocnt) bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); -#ifdef notyet - /* - * This register wraps very quickly under heavy packet drops. - * If you need correct statistics, you can enable this check. - */ - if (BGE_IS_5705_PLUS(sc)) - ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS); -#endif return (rx_npkts); } |