summaryrefslogtreecommitdiffstats
path: root/sys/dev/sk
diff options
context:
space:
mode:
Diffstat (limited to 'sys/dev/sk')
-rw-r--r--sys/dev/sk/if_sk.c2101
-rw-r--r--sys/dev/sk/if_skreg.h138
-rw-r--r--sys/dev/sk/xmaciireg.h3
-rw-r--r--sys/dev/sk/yukonreg.h41
4 files changed, 1695 insertions, 588 deletions
diff --git a/sys/dev/sk/if_sk.c b/sys/dev/sk/if_sk.c
index a83e9f3..8a5dbf2 100644
--- a/sys/dev/sk/if_sk.c
+++ b/sys/dev/sk/if_sk.c
@@ -87,29 +87,33 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
-#include <sys/sockio.h>
+#include <sys/bus.h>
+#include <sys/endian.h>
#include <sys/mbuf.h>
#include <sys/malloc.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <sys/socket.h>
+#include <sys/sockio.h>
#include <sys/queue.h>
#include <sys/sysctl.h>
+#include <net/bpf.h>
+#include <net/ethernet.h>
#include <net/if.h>
#include <net/if_arp.h>
-#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
+#include <net/if_vlan_var.h>
-#include <net/bpf.h>
+#include <netinet/in.h>
+#include <netinet/in_systm.h>
+#include <netinet/ip.h>
-#include <vm/vm.h> /* for vtophys */
-#include <vm/pmap.h> /* for vtophys */
#include <machine/bus.h>
+#include <machine/in_cksum.h>
#include <machine/resource.h>
-#include <sys/bus.h>
#include <sys/rman.h>
#include <dev/mii/mii.h>
@@ -182,18 +186,24 @@ static int skc_probe(device_t);
static int skc_attach(device_t);
static int skc_detach(device_t);
static void skc_shutdown(device_t);
+static int skc_suspend(device_t);
+static int skc_resume(device_t);
static int sk_detach(device_t);
static int sk_probe(device_t);
static int sk_attach(device_t);
static void sk_tick(void *);
+static void sk_yukon_tick(void *);
static void sk_intr(void *);
static void sk_intr_xmac(struct sk_if_softc *);
static void sk_intr_bcom(struct sk_if_softc *);
static void sk_intr_yukon(struct sk_if_softc *);
+static __inline void sk_rxcksum(struct ifnet *, struct mbuf *, u_int32_t);
+static __inline int sk_rxvalid(struct sk_softc *, u_int32_t, u_int32_t);
static void sk_rxeof(struct sk_if_softc *);
+static void sk_jumbo_rxeof(struct sk_if_softc *);
static void sk_txeof(struct sk_if_softc *);
-static int sk_encap(struct sk_if_softc *, struct mbuf *,
- u_int32_t *);
+static void sk_txcksum(struct ifnet *, struct mbuf *, struct sk_tx_desc *);
+static int sk_encap(struct sk_if_softc *, struct mbuf **);
static void sk_start(struct ifnet *);
static void sk_start_locked(struct ifnet *);
static int sk_ioctl(struct ifnet *, u_long, caddr_t);
@@ -206,13 +216,17 @@ static void sk_watchdog(struct ifnet *);
static int sk_ifmedia_upd(struct ifnet *);
static void sk_ifmedia_sts(struct ifnet *, struct ifmediareq *);
static void sk_reset(struct sk_softc *);
-static int sk_newbuf(struct sk_if_softc *,
- struct sk_chain *, struct mbuf *);
-static int sk_alloc_jumbo_mem(struct sk_if_softc *);
-static void sk_free_jumbo_mem(struct sk_if_softc *);
+static __inline void sk_discard_rxbuf(struct sk_if_softc *, int);
+static __inline void sk_discard_jumbo_rxbuf(struct sk_if_softc *, int);
+static int sk_newbuf(struct sk_if_softc *, int);
+static int sk_jumbo_newbuf(struct sk_if_softc *, int);
+static void sk_dmamap_cb(void *, bus_dma_segment_t *, int, int);
+static int sk_dma_alloc(struct sk_if_softc *);
+static void sk_dma_free(struct sk_if_softc *);
static void *sk_jalloc(struct sk_if_softc *);
static void sk_jfree(void *, void *);
static int sk_init_rx_ring(struct sk_if_softc *);
+static int sk_init_jumbo_rx_ring(struct sk_if_softc *);
static void sk_init_tx_ring(struct sk_if_softc *);
static u_int32_t sk_win_read_4(struct sk_softc *, int);
static u_int16_t sk_win_read_2(struct sk_softc *, int);
@@ -240,7 +254,7 @@ static void sk_marv_miibus_statchg(struct sk_if_softc *);
static uint32_t sk_xmchash(const uint8_t *);
static uint32_t sk_gmchash(const uint8_t *);
-static void sk_setfilt(struct sk_if_softc *, caddr_t, int);
+static void sk_setfilt(struct sk_if_softc *, u_int16_t *, int);
static void sk_setmulti(struct sk_if_softc *);
static void sk_setpromisc(struct sk_if_softc *);
@@ -256,6 +270,18 @@ static int sysctl_hw_sk_int_mod(SYSCTL_HANDLER_ARGS);
#endif
/*
+ * It seems that SK-NET GENESIS supports very simple checksum offload
+ * capability for Tx and I believe it can generate 0 checksum value for
+ * UDP packets in Tx as the hardware can't differenciate UDP packets from
+ * TCP packets. 0 chcecksum value for UDP packet is an invalid one as it
+ * means sender didn't perforam checksum computation. For the safety I
+ * disabled UDP checksum offload capability at the moment. Alternatively
+ * we can intrduce a LINK0/LINK1 flag as hme(4) did in its Tx checksum
+ * offload routine.
+ */
+#define SK_CSUM_FEATURES (CSUM_TCP)
+
+/*
* Note that we have newbus methods for both the GEnesis controller
* itself and the XMAC(s). The XMACs are children of the GEnesis, and
* the miibus code is a child of the XMACs. We need to do it this way
@@ -268,6 +294,8 @@ static device_method_t skc_methods[] = {
DEVMETHOD(device_probe, skc_probe),
DEVMETHOD(device_attach, skc_attach),
DEVMETHOD(device_detach, skc_detach),
+ DEVMETHOD(device_suspend, skc_suspend),
+ DEVMETHOD(device_resume, skc_resume),
DEVMETHOD(device_shutdown, skc_shutdown),
/* bus interface */
@@ -438,7 +466,8 @@ sk_vpd_readbyte(sc, addr)
sk_win_write_2(sc, SK_PCI_REG(SK_PCI_VPD_ADDR), addr);
for (i = 0; i < SK_TIMEOUT; i++) {
- DELAY(1);
+ /* ASUS LOM takes a very long time to read VPD. */
+ DELAY(100);
if (sk_win_read_2(sc,
SK_PCI_REG(SK_PCI_VPD_ADDR)) & SK_VPD_FLAG)
break;
@@ -473,6 +502,9 @@ sk_vpd_read(sc)
int pos = 0, i;
struct vpd_res res;
+ /* Check VPD capability */
+ if (sk_win_read_1(sc, SK_PCI_REG(SK_PCI_VPD_CAPID)) != PCIY_VPD)
+ return;
if (sc->sk_vpd_prodname != NULL)
free(sc->sk_vpd_prodname, M_DEVBUF);
if (sc->sk_vpd_readonly != NULL)
@@ -490,8 +522,8 @@ sk_vpd_read(sc)
return;
if (res.vr_id != VPD_RES_ID) {
- printf("skc%d: bad VPD resource id: expected %x got %x\n",
- sc->sk_unit, VPD_RES_ID, res.vr_id);
+ device_printf(sc->sk_dev, "bad VPD resource id: expected %x "
+ "got %x\n", VPD_RES_ID, res.vr_id);
return;
}
@@ -507,8 +539,8 @@ sk_vpd_read(sc)
sk_vpd_read_res(sc, &res, pos);
if (res.vr_id != VPD_RES_READ) {
- printf("skc%d: bad VPD resource id: expected %x got %x\n",
- sc->sk_unit, VPD_RES_READ, res.vr_id);
+ device_printf(sc->sk_dev, "bad VPD resource id: expected %x "
+ "got %x\n", VPD_RES_READ, res.vr_id);
return;
}
@@ -527,19 +559,27 @@ sk_miibus_readreg(dev, phy, reg)
int phy, reg;
{
struct sk_if_softc *sc_if;
+ int v;
sc_if = device_get_softc(dev);
+ SK_IF_MII_LOCK(sc_if);
switch(sc_if->sk_softc->sk_type) {
case SK_GENESIS:
- return(sk_xmac_miibus_readreg(sc_if, phy, reg));
+ v = sk_xmac_miibus_readreg(sc_if, phy, reg);
+ break;
case SK_YUKON:
case SK_YUKON_LITE:
case SK_YUKON_LP:
- return(sk_marv_miibus_readreg(sc_if, phy, reg));
+ v = sk_marv_miibus_readreg(sc_if, phy, reg);
+ break;
+ default:
+ v = 0;
+ break;
}
+ SK_IF_MII_UNLOCK(sc_if);
- return(0);
+ return (v);
}
static int
@@ -548,19 +588,27 @@ sk_miibus_writereg(dev, phy, reg, val)
int phy, reg, val;
{
struct sk_if_softc *sc_if;
+ int v;
sc_if = device_get_softc(dev);
+ SK_IF_MII_LOCK(sc_if);
switch(sc_if->sk_softc->sk_type) {
case SK_GENESIS:
- return(sk_xmac_miibus_writereg(sc_if, phy, reg, val));
+ v = sk_xmac_miibus_writereg(sc_if, phy, reg, val);
+ break;
case SK_YUKON:
case SK_YUKON_LITE:
case SK_YUKON_LP:
- return(sk_marv_miibus_writereg(sc_if, phy, reg, val));
+ v = sk_marv_miibus_writereg(sc_if, phy, reg, val);
+ break;
+ default:
+ v = 0;
+ break;
}
+ SK_IF_MII_UNLOCK(sc_if);
- return(0);
+ return (v);
}
static void
@@ -571,6 +619,7 @@ sk_miibus_statchg(dev)
sc_if = device_get_softc(dev);
+ SK_IF_MII_LOCK(sc_if);
switch(sc_if->sk_softc->sk_type) {
case SK_GENESIS:
sk_xmac_miibus_statchg(sc_if);
@@ -581,6 +630,7 @@ sk_miibus_statchg(dev)
sk_marv_miibus_statchg(sc_if);
break;
}
+ SK_IF_MII_UNLOCK(sc_if);
return;
}
@@ -595,7 +645,6 @@ sk_xmac_miibus_readreg(sc_if, phy, reg)
if (sc_if->sk_phytype == SK_PHYTYPE_XMAC && phy != 0)
return(0);
- SK_IF_LOCK(sc_if);
SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
SK_XM_READ_2(sc_if, XM_PHY_DATA);
if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
@@ -607,15 +656,13 @@ sk_xmac_miibus_readreg(sc_if, phy, reg)
}
if (i == SK_TIMEOUT) {
- printf("sk%d: phy failed to come ready\n",
- sc_if->sk_unit);
- SK_IF_UNLOCK(sc_if);
+ if_printf(sc_if->sk_ifp, "phy failed to come ready\n");
return(0);
}
}
DELAY(1);
i = SK_XM_READ_2(sc_if, XM_PHY_DATA);
- SK_IF_UNLOCK(sc_if);
+
return(i);
}
@@ -626,7 +673,6 @@ sk_xmac_miibus_writereg(sc_if, phy, reg, val)
{
int i;
- SK_IF_LOCK(sc_if);
SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
for (i = 0; i < SK_TIMEOUT; i++) {
if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
@@ -634,9 +680,8 @@ sk_xmac_miibus_writereg(sc_if, phy, reg, val)
}
if (i == SK_TIMEOUT) {
- printf("sk%d: phy failed to come ready\n", sc_if->sk_unit);
- SK_IF_UNLOCK(sc_if);
- return(ETIMEDOUT);
+ if_printf(sc_if->sk_ifp, "phy failed to come ready\n");
+ return (ETIMEDOUT);
}
SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val);
@@ -645,9 +690,8 @@ sk_xmac_miibus_writereg(sc_if, phy, reg, val)
if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
break;
}
- SK_IF_UNLOCK(sc_if);
if (i == SK_TIMEOUT)
- printf("sk%d: phy write timed out\n", sc_if->sk_unit);
+ if_printf(sc_if->sk_ifp, "phy write timed out\n");
return(0);
}
@@ -660,7 +704,6 @@ sk_xmac_miibus_statchg(sc_if)
mii = device_get_softc(sc_if->sk_miibus);
- SK_IF_LOCK(sc_if);
/*
* If this is a GMII PHY, manually set the XMAC's
* duplex mode accordingly.
@@ -672,9 +715,6 @@ sk_xmac_miibus_statchg(sc_if)
SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
}
}
- SK_IF_UNLOCK(sc_if);
-
- return;
}
static int
@@ -691,7 +731,6 @@ sk_marv_miibus_readreg(sc_if, phy, reg)
return(0);
}
- SK_IF_LOCK(sc_if);
SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
YU_SMICR_REGAD(reg) | YU_SMICR_OP_READ);
@@ -703,14 +742,11 @@ sk_marv_miibus_readreg(sc_if, phy, reg)
}
if (i == SK_TIMEOUT) {
- printf("sk%d: phy failed to come ready\n",
- sc_if->sk_unit);
- SK_IF_UNLOCK(sc_if);
+ if_printf(sc_if->sk_ifp, "phy failed to come ready\n");
return(0);
}
val = SK_YU_READ_2(sc_if, YUKON_SMIDR);
- SK_IF_UNLOCK(sc_if);
return(val);
}
@@ -722,7 +758,6 @@ sk_marv_miibus_writereg(sc_if, phy, reg, val)
{
int i;
- SK_IF_LOCK(sc_if);
SK_YU_WRITE_2(sc_if, YUKON_SMIDR, val);
SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
YU_SMICR_REGAD(reg) | YU_SMICR_OP_WRITE);
@@ -732,7 +767,10 @@ sk_marv_miibus_writereg(sc_if, phy, reg, val)
if (SK_YU_READ_2(sc_if, YUKON_SMICR) & YU_SMICR_BUSY)
break;
}
- SK_IF_UNLOCK(sc_if);
+ if (i == SK_TIMEOUT) {
+ if_printf(sc_if->sk_ifp, "phy write timeout\n");
+ return (0);
+ }
return(0);
}
@@ -774,16 +812,16 @@ sk_gmchash(addr)
static void
sk_setfilt(sc_if, addr, slot)
struct sk_if_softc *sc_if;
- caddr_t addr;
+ u_int16_t *addr;
int slot;
{
int base;
base = XM_RXFILT_ENTRY(slot);
- SK_XM_WRITE_2(sc_if, base, *(u_int16_t *)(&addr[0]));
- SK_XM_WRITE_2(sc_if, base + 2, *(u_int16_t *)(&addr[2]));
- SK_XM_WRITE_2(sc_if, base + 4, *(u_int16_t *)(&addr[4]));
+ SK_XM_WRITE_2(sc_if, base, addr[0]);
+ SK_XM_WRITE_2(sc_if, base + 2, addr[1]);
+ SK_XM_WRITE_2(sc_if, base + 4, addr[2]);
return;
}
@@ -797,7 +835,8 @@ sk_setmulti(sc_if)
u_int32_t hashes[2] = { 0, 0 };
int h = 0, i;
struct ifmultiaddr *ifma;
- u_int8_t dummy[] = { 0, 0, 0, 0, 0 ,0 };
+ u_int16_t dummy[] = { 0, 0, 0 };
+ u_int16_t maddr[(ETHER_ADDR_LEN+1)/2];
SK_IF_LOCK_ASSERT(sc_if);
@@ -805,7 +844,7 @@ sk_setmulti(sc_if)
switch(sc->sk_type) {
case SK_GENESIS:
for (i = 1; i < XM_RXFILT_MAX; i++)
- sk_setfilt(sc_if, (caddr_t)&dummy, i);
+ sk_setfilt(sc_if, dummy, i);
SK_XM_WRITE_4(sc_if, XM_MAR0, 0);
SK_XM_WRITE_4(sc_if, XM_MAR2, 0);
@@ -836,22 +875,28 @@ sk_setmulti(sc_if)
* use the hash table.
*/
if (sc->sk_type == SK_GENESIS && i < XM_RXFILT_MAX) {
- sk_setfilt(sc_if,
- LLADDR((struct sockaddr_dl *)ifma->ifma_addr), i);
+ bcopy(LLADDR(
+ (struct sockaddr_dl *)ifma->ifma_addr),
+ maddr, ETHER_ADDR_LEN);
+ sk_setfilt(sc_if, maddr, i);
i++;
continue;
}
switch(sc->sk_type) {
case SK_GENESIS:
- h = sk_xmchash(
- LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
+ bcopy(LLADDR(
+ (struct sockaddr_dl *)ifma->ifma_addr),
+ maddr, ETHER_ADDR_LEN);
+ h = sk_xmchash((const uint8_t *)maddr);
break;
case SK_YUKON:
case SK_YUKON_LITE:
case SK_YUKON_LP:
- h = sk_gmchash(
- LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
+ bcopy(LLADDR(
+ (struct sockaddr_dl *)ifma->ifma_addr),
+ maddr, ETHER_ADDR_LEN);
+ h = sk_gmchash((const uint8_t *)maddr);
break;
}
if (h < 32)
@@ -919,279 +964,240 @@ static int
sk_init_rx_ring(sc_if)
struct sk_if_softc *sc_if;
{
- struct sk_chain_data *cd = &sc_if->sk_cdata;
- struct sk_ring_data *rd = sc_if->sk_rdata;
+ struct sk_ring_data *rd;
+ bus_addr_t addr;
+ u_int32_t csum_start;
int i;
- bzero((char *)rd->sk_rx_ring,
- sizeof(struct sk_rx_desc) * SK_RX_RING_CNT);
+ sc_if->sk_cdata.sk_rx_cons = 0;
+ csum_start = (ETHER_HDR_LEN + sizeof(struct ip)) << 16 |
+ ETHER_HDR_LEN;
+ rd = &sc_if->sk_rdata;
+ bzero(rd->sk_rx_ring, sizeof(struct sk_rx_desc) * SK_RX_RING_CNT);
for (i = 0; i < SK_RX_RING_CNT; i++) {
- cd->sk_rx_chain[i].sk_desc = &rd->sk_rx_ring[i];
- if (sk_newbuf(sc_if, &cd->sk_rx_chain[i], NULL) == ENOBUFS)
- return(ENOBUFS);
- if (i == (SK_RX_RING_CNT - 1)) {
- cd->sk_rx_chain[i].sk_next =
- &cd->sk_rx_chain[0];
- rd->sk_rx_ring[i].sk_next =
- vtophys(&rd->sk_rx_ring[0]);
- } else {
- cd->sk_rx_chain[i].sk_next =
- &cd->sk_rx_chain[i + 1];
- rd->sk_rx_ring[i].sk_next =
- vtophys(&rd->sk_rx_ring[i + 1]);
- }
+ if (sk_newbuf(sc_if, i) != 0)
+ return (ENOBUFS);
+ if (i == (SK_RX_RING_CNT - 1))
+ addr = SK_RX_RING_ADDR(sc_if, 0);
+ else
+ addr = SK_RX_RING_ADDR(sc_if, i + 1);
+ rd->sk_rx_ring[i].sk_next = htole32(SK_ADDR_LO(addr));
+ rd->sk_rx_ring[i].sk_csum_start = htole32(csum_start);
}
- sc_if->sk_cdata.sk_rx_prod = 0;
- sc_if->sk_cdata.sk_rx_cons = 0;
+ bus_dmamap_sync(sc_if->sk_cdata.sk_rx_ring_tag,
+ sc_if->sk_cdata.sk_rx_ring_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
return(0);
}
-static void
-sk_init_tx_ring(sc_if)
+static int
+sk_init_jumbo_rx_ring(sc_if)
struct sk_if_softc *sc_if;
{
- struct sk_chain_data *cd = &sc_if->sk_cdata;
- struct sk_ring_data *rd = sc_if->sk_rdata;
+ struct sk_ring_data *rd;
+ bus_addr_t addr;
+ u_int32_t csum_start;
int i;
- bzero((char *)sc_if->sk_rdata->sk_tx_ring,
- sizeof(struct sk_tx_desc) * SK_TX_RING_CNT);
-
- for (i = 0; i < SK_TX_RING_CNT; i++) {
- cd->sk_tx_chain[i].sk_desc = &rd->sk_tx_ring[i];
- if (i == (SK_TX_RING_CNT - 1)) {
- cd->sk_tx_chain[i].sk_next =
- &cd->sk_tx_chain[0];
- rd->sk_tx_ring[i].sk_next =
- vtophys(&rd->sk_tx_ring[0]);
- } else {
- cd->sk_tx_chain[i].sk_next =
- &cd->sk_tx_chain[i + 1];
- rd->sk_tx_ring[i].sk_next =
- vtophys(&rd->sk_tx_ring[i + 1]);
- }
+ sc_if->sk_cdata.sk_jumbo_rx_cons = 0;
+
+ csum_start = ((ETHER_HDR_LEN + sizeof(struct ip)) << 16) |
+ ETHER_HDR_LEN;
+ rd = &sc_if->sk_rdata;
+ bzero(rd->sk_jumbo_rx_ring,
+ sizeof(struct sk_rx_desc) * SK_JUMBO_RX_RING_CNT);
+ for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) {
+ if (sk_jumbo_newbuf(sc_if, i) != 0)
+ return (ENOBUFS);
+ if (i == (SK_JUMBO_RX_RING_CNT - 1))
+ addr = SK_JUMBO_RX_RING_ADDR(sc_if, 0);
+ else
+ addr = SK_JUMBO_RX_RING_ADDR(sc_if, i + 1);
+ rd->sk_jumbo_rx_ring[i].sk_next = htole32(SK_ADDR_LO(addr));
+ rd->sk_jumbo_rx_ring[i].sk_csum_start = htole32(csum_start);
}
- sc_if->sk_cdata.sk_tx_prod = 0;
- sc_if->sk_cdata.sk_tx_cons = 0;
- sc_if->sk_cdata.sk_tx_cnt = 0;
+ bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
+ sc_if->sk_cdata.sk_jumbo_rx_ring_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
- return;
+ return (0);
}
-static int
-sk_newbuf(sc_if, c, m)
+static void
+sk_init_tx_ring(sc_if)
struct sk_if_softc *sc_if;
- struct sk_chain *c;
- struct mbuf *m;
{
- struct mbuf *m_new = NULL;
- struct sk_rx_desc *r;
-
- if (m == NULL) {
- caddr_t *buf = NULL;
+ struct sk_ring_data *rd;
+ struct sk_txdesc *txd;
+ bus_addr_t addr;
+ int i;
- MGETHDR(m_new, M_DONTWAIT, MT_DATA);
- if (m_new == NULL)
- return(ENOBUFS);
+ STAILQ_INIT(&sc_if->sk_cdata.sk_txfreeq);
+ STAILQ_INIT(&sc_if->sk_cdata.sk_txbusyq);
- /* Allocate the jumbo buffer */
- buf = sk_jalloc(sc_if);
- if (buf == NULL) {
- m_freem(m_new);
-#ifdef SK_VERBOSE
- printf("sk%d: jumbo allocation failed "
- "-- packet dropped!\n", sc_if->sk_unit);
-#endif
- return(ENOBUFS);
- }
+ sc_if->sk_cdata.sk_tx_prod = 0;
+ sc_if->sk_cdata.sk_tx_cons = 0;
+ sc_if->sk_cdata.sk_tx_cnt = 0;
- /* Attach the buffer to the mbuf */
- MEXTADD(m_new, buf, SK_JLEN, sk_jfree,
- (struct sk_if_softc *)sc_if, 0, EXT_NET_DRV);
- m_new->m_data = (void *)buf;
- m_new->m_pkthdr.len = m_new->m_len = SK_JLEN;
- } else {
- /*
- * We're re-using a previously allocated mbuf;
- * be sure to re-init pointers and lengths to
- * default values.
- */
- m_new = m;
- m_new->m_len = m_new->m_pkthdr.len = SK_JLEN;
- m_new->m_data = m_new->m_ext.ext_buf;
+ rd = &sc_if->sk_rdata;
+ bzero(rd->sk_tx_ring, sizeof(struct sk_tx_desc) * SK_TX_RING_CNT);
+ for (i = 0; i < SK_TX_RING_CNT; i++) {
+ if (i == (SK_TX_RING_CNT - 1))
+ addr = SK_TX_RING_ADDR(sc_if, 0);
+ else
+ addr = SK_TX_RING_ADDR(sc_if, i + 1);
+ rd->sk_tx_ring[i].sk_next = htole32(SK_ADDR_LO(addr));
+ txd = &sc_if->sk_cdata.sk_txdesc[i];
+ STAILQ_INSERT_TAIL(&sc_if->sk_cdata.sk_txfreeq, txd, tx_q);
}
- /*
- * Adjust alignment so packet payload begins on a
- * longword boundary. Mandatory for Alpha, useful on
- * x86 too.
- */
- m_adj(m_new, ETHER_ALIGN);
-
- r = c->sk_desc;
- c->sk_mbuf = m_new;
- r->sk_data_lo = vtophys(mtod(m_new, caddr_t));
- r->sk_ctl = m_new->m_len | SK_RXSTAT;
-
- return(0);
+ bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag,
+ sc_if->sk_cdata.sk_tx_ring_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
-/*
- * Allocate jumbo buffer storage. The SysKonnect adapters support
- * "jumbograms" (9K frames), although SysKonnect doesn't currently
- * use them in their drivers. In order for us to use them, we need
- * large 9K receive buffers, however standard mbuf clusters are only
- * 2048 bytes in size. Consequently, we need to allocate and manage
- * our own jumbo buffer pool. Fortunately, this does not require an
- * excessive amount of additional code.
- */
-static int
-sk_alloc_jumbo_mem(sc_if)
+static __inline void
+sk_discard_rxbuf(sc_if, idx)
struct sk_if_softc *sc_if;
+ int idx;
{
- caddr_t ptr;
- register int i;
- struct sk_jpool_entry *entry;
-
- /* Grab a big chunk o' storage. */
- sc_if->sk_cdata.sk_jumbo_buf = contigmalloc(SK_JMEM, M_DEVBUF,
- M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
-
- if (sc_if->sk_cdata.sk_jumbo_buf == NULL) {
- printf("sk%d: no memory for jumbo buffers!\n", sc_if->sk_unit);
- return(ENOBUFS);
- }
+ struct sk_rx_desc *r;
+ struct sk_rxdesc *rxd;
+ struct mbuf *m;
- mtx_init(&sc_if->sk_jlist_mtx, "sk_jlist_mtx", NULL, MTX_DEF);
- SLIST_INIT(&sc_if->sk_jfree_listhead);
- SLIST_INIT(&sc_if->sk_jinuse_listhead);
-
- /*
- * Now divide it up into 9K pieces and save the addresses
- * in an array.
- */
- ptr = sc_if->sk_cdata.sk_jumbo_buf;
- for (i = 0; i < SK_JSLOTS; i++) {
- sc_if->sk_cdata.sk_jslots[i] = ptr;
- ptr += SK_JLEN;
- entry = malloc(sizeof(struct sk_jpool_entry),
- M_DEVBUF, M_NOWAIT);
- if (entry == NULL) {
- sk_free_jumbo_mem(sc_if);
- sc_if->sk_cdata.sk_jumbo_buf = NULL;
- printf("sk%d: no memory for jumbo "
- "buffer queue!\n", sc_if->sk_unit);
- return(ENOBUFS);
- }
- entry->slot = i;
- SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead,
- entry, jpool_entries);
- }
-
- return(0);
+ r = &sc_if->sk_rdata.sk_rx_ring[idx];
+ rxd = &sc_if->sk_cdata.sk_rxdesc[idx];
+ m = rxd->rx_m;
+ r->sk_ctl = htole32(m->m_len | SK_RXSTAT | SK_OPCODE_CSUM);
}
-static void
-sk_free_jumbo_mem(sc_if)
+static __inline void
+sk_discard_jumbo_rxbuf(sc_if, idx)
struct sk_if_softc *sc_if;
+ int idx;
{
- struct sk_jpool_entry *entry;
-
- SK_JLIST_LOCK(sc_if);
-
- /* We cannot release external mbuf storage while in use. */
- if (!SLIST_EMPTY(&sc_if->sk_jinuse_listhead)) {
- printf("sk%d: will leak jumbo buffer memory!\n", sc_if->sk_unit);
- SK_JLIST_UNLOCK(sc_if);
- return;
- }
-
- while (!SLIST_EMPTY(&sc_if->sk_jfree_listhead)) {
- entry = SLIST_FIRST(&sc_if->sk_jfree_listhead);
- SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries);
- free(entry, M_DEVBUF);
- }
-
- SK_JLIST_UNLOCK(sc_if);
-
- mtx_destroy(&sc_if->sk_jlist_mtx);
-
- contigfree(sc_if->sk_cdata.sk_jumbo_buf, SK_JMEM, M_DEVBUF);
+ struct sk_rx_desc *r;
+ struct sk_rxdesc *rxd;
+ struct mbuf *m;
- return;
+ r = &sc_if->sk_rdata.sk_jumbo_rx_ring[idx];
+ rxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[idx];
+ m = rxd->rx_m;
+ r->sk_ctl = htole32(m->m_len | SK_RXSTAT | SK_OPCODE_CSUM);
}
-/*
- * Allocate a jumbo buffer.
- */
-static void *
-sk_jalloc(sc_if)
+static int
+sk_newbuf(sc_if, idx)
struct sk_if_softc *sc_if;
+ int idx;
{
- struct sk_jpool_entry *entry;
-
- SK_JLIST_LOCK(sc_if);
-
- entry = SLIST_FIRST(&sc_if->sk_jfree_listhead);
-
- if (entry == NULL) {
-#ifdef SK_VERBOSE
- printf("sk%d: no free jumbo buffers\n", sc_if->sk_unit);
-#endif
- SK_JLIST_UNLOCK(sc_if);
- return(NULL);
+ struct sk_rx_desc *r;
+ struct sk_rxdesc *rxd;
+ struct mbuf *m;
+ bus_dma_segment_t segs[1];
+ bus_dmamap_t map;
+ int nsegs;
+
+ m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
+ if (m == NULL)
+ return (ENOBUFS);
+ m->m_len = m->m_pkthdr.len = MCLBYTES;
+ m_adj(m, ETHER_ALIGN);
+
+ if (bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_rx_tag,
+ sc_if->sk_cdata.sk_rx_sparemap, m, segs, &nsegs, 0) != 0) {
+ m_freem(m);
+ return (ENOBUFS);
}
+ KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
- SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries);
- SLIST_INSERT_HEAD(&sc_if->sk_jinuse_listhead, entry, jpool_entries);
-
- SK_JLIST_UNLOCK(sc_if);
+ rxd = &sc_if->sk_cdata.sk_rxdesc[idx];
+ if (rxd->rx_m != NULL) {
+ bus_dmamap_sync(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap,
+ BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap);
+ }
+ map = rxd->rx_dmamap;
+ rxd->rx_dmamap = sc_if->sk_cdata.sk_rx_sparemap;
+ sc_if->sk_cdata.sk_rx_sparemap = map;
+ bus_dmamap_sync(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap,
+ BUS_DMASYNC_PREREAD);
+ rxd->rx_m = m;
+ r = &sc_if->sk_rdata.sk_rx_ring[idx];
+ r->sk_data_lo = htole32(SK_ADDR_LO(segs[0].ds_addr));
+ r->sk_data_hi = htole32(SK_ADDR_HI(segs[0].ds_addr));
+ r->sk_ctl = htole32(segs[0].ds_len | SK_RXSTAT | SK_OPCODE_CSUM);
- return(sc_if->sk_cdata.sk_jslots[entry->slot]);
+ return (0);
}
-/*
- * Release a jumbo buffer.
- */
-static void
-sk_jfree(buf, args)
- void *buf;
- void *args;
-{
+static int
+sk_jumbo_newbuf(sc_if, idx)
struct sk_if_softc *sc_if;
- int i;
- struct sk_jpool_entry *entry;
-
- /* Extract the softc struct pointer. */
- sc_if = (struct sk_if_softc *)args;
- if (sc_if == NULL)
- panic("sk_jfree: didn't get softc pointer!");
-
- SK_JLIST_LOCK(sc_if);
-
- /* calculate the slot this buffer belongs to */
- i = ((vm_offset_t)buf
- - (vm_offset_t)sc_if->sk_cdata.sk_jumbo_buf) / SK_JLEN;
+ int idx;
+{
+ struct sk_rx_desc *r;
+ struct sk_rxdesc *rxd;
+ struct mbuf *m;
+ bus_dma_segment_t segs[1];
+ bus_dmamap_t map;
+ int nsegs;
+ void *buf;
- if ((i < 0) || (i >= SK_JSLOTS))
- panic("sk_jfree: asked to free buffer that we don't manage!");
+ MGETHDR(m, M_DONTWAIT, MT_DATA);
+ if (m == NULL)
+ return (ENOBUFS);
+ buf = sk_jalloc(sc_if);
+ if (buf == NULL) {
+ m_freem(m);
+ return (ENOBUFS);
+ }
+ /* Attach the buffer to the mbuf */
+ MEXTADD(m, buf, SK_JLEN, sk_jfree, (struct sk_if_softc *)sc_if, 0,
+ EXT_NET_DRV);
+ if ((m->m_flags & M_EXT) == 0) {
+ m_freem(m);
+ return (ENOBUFS);
+ }
+ m->m_pkthdr.len = m->m_len = SK_JLEN;
+ /*
+ * Adjust alignment so packet payload begins on a
+ * longword boundary. Mandatory for Alpha, useful on
+ * x86 too.
+ */
+ m_adj(m, ETHER_ALIGN);
- entry = SLIST_FIRST(&sc_if->sk_jinuse_listhead);
- if (entry == NULL)
- panic("sk_jfree: buffer not in use!");
- entry->slot = i;
- SLIST_REMOVE_HEAD(&sc_if->sk_jinuse_listhead, jpool_entries);
- SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, entry, jpool_entries);
- if (SLIST_EMPTY(&sc_if->sk_jinuse_listhead))
- wakeup(sc_if);
+ if (bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_jumbo_rx_tag,
+ sc_if->sk_cdata.sk_jumbo_rx_sparemap, m, segs, &nsegs, 0) != 0) {
+ m_freem(m);
+ return (ENOBUFS);
+ }
+ KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
+
+ rxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[idx];
+ if (rxd->rx_m != NULL) {
+ bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_tag, rxd->rx_dmamap,
+ BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_tag,
+ rxd->rx_dmamap);
+ }
+ map = rxd->rx_dmamap;
+ rxd->rx_dmamap = sc_if->sk_cdata.sk_jumbo_rx_sparemap;
+ sc_if->sk_cdata.sk_jumbo_rx_sparemap = map;
+ bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_tag, rxd->rx_dmamap,
+ BUS_DMASYNC_PREREAD);
+ rxd->rx_m = m;
+ r = &sc_if->sk_rdata.sk_jumbo_rx_ring[idx];
+ r->sk_data_lo = htole32(SK_ADDR_LO(segs[0].ds_addr));
+ r->sk_data_hi = htole32(SK_ADDR_HI(segs[0].ds_addr));
+ r->sk_ctl = htole32(segs[0].ds_len | SK_RXSTAT | SK_OPCODE_CSUM);
- SK_JLIST_UNLOCK(sc_if);
- return;
+ return (0);
}
/*
@@ -1240,9 +1246,10 @@ sk_ioctl(ifp, command, data)
{
struct sk_if_softc *sc_if = ifp->if_softc;
struct ifreq *ifr = (struct ifreq *) data;
- int error = 0;
+ int error, mask;
struct mii_data *mii;
+ error = 0;
switch(command) {
case SIOCSIFMTU:
SK_IF_LOCK(sc_if);
@@ -1272,15 +1279,12 @@ sk_ioctl(ifp, command, data)
}
sc_if->sk_if_flags = ifp->if_flags;
SK_IF_UNLOCK(sc_if);
- error = 0;
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
SK_IF_LOCK(sc_if);
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING)
sk_setmulti(sc_if);
- error = 0;
- }
SK_IF_UNLOCK(sc_if);
break;
case SIOCGIFMEDIA:
@@ -1288,12 +1292,29 @@ sk_ioctl(ifp, command, data)
mii = device_get_softc(sc_if->sk_miibus);
error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
break;
+ case SIOCSIFCAP:
+ SK_IF_LOCK(sc_if);
+ if (sc_if->sk_softc->sk_type == SK_GENESIS) {
+ SK_IF_UNLOCK(sc_if);
+ break;
+ }
+ mask = ifr->ifr_reqcap ^ ifp->if_capenable;
+ if (mask & IFCAP_HWCSUM) {
+ ifp->if_capenable ^= IFCAP_HWCSUM;
+ if (IFCAP_HWCSUM & ifp->if_capenable &&
+ IFCAP_HWCSUM & ifp->if_capabilities)
+ ifp->if_hwassist = SK_CSUM_FEATURES;
+ else
+ ifp->if_hwassist = 0;
+ }
+ SK_IF_UNLOCK(sc_if);
+ break;
default:
error = ether_ioctl(ifp, command, data);
break;
}
- return(error);
+ return (error);
}
/*
@@ -1336,6 +1357,7 @@ static void
sk_reset(sc)
struct sk_softc *sc;
{
+
CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_RESET);
CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_RESET);
if (SK_YUKON_FAMILY(sc->sk_type))
@@ -1378,8 +1400,8 @@ sk_reset(sc)
break;
}
if (bootverbose)
- printf("skc%d: interrupt moderation is %d us\n",
- sc->sk_unit, sc->sk_int_mod);
+ device_printf(sc->sk_dev, "interrupt moderation is %d us\n",
+ sc->sk_int_mod);
sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc->sk_int_mod,
sc->sk_int_ticks));
sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF|
@@ -1439,8 +1461,7 @@ sk_attach(dev)
sc = device_get_softc(device_get_parent(dev));
port = *(int *)device_get_ivars(dev);
- sc_if->sk_dev = dev;
- sc_if->sk_unit = device_get_unit(dev);
+ sc_if->sk_if_dev = dev;
sc_if->sk_port = port;
sc_if->sk_softc = sc;
sc->sk_if[port] = sc_if;
@@ -1449,27 +1470,16 @@ sk_attach(dev)
if (port == SK_PORT_B)
sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1;
- /* Allocate the descriptor queues. */
- sc_if->sk_rdata = contigmalloc(sizeof(struct sk_ring_data), M_DEVBUF,
- M_NOWAIT, M_ZERO, 0xffffffff, PAGE_SIZE, 0);
+ callout_init_mtx(&sc_if->sk_tick_ch, &sc_if->sk_softc->sk_mtx, 0);
- if (sc_if->sk_rdata == NULL) {
- printf("sk%d: no memory for list buffers!\n", sc_if->sk_unit);
- error = ENOMEM;
- goto fail;
- }
-
- /* Try to allocate memory for jumbo buffers. */
- if (sk_alloc_jumbo_mem(sc_if)) {
- printf("sk%d: jumbo buffer allocation failed\n",
- sc_if->sk_unit);
+ if (sk_dma_alloc(sc_if) != 0) {
error = ENOMEM;
goto fail;
}
ifp = sc_if->sk_ifp = if_alloc(IFT_ETHER);
if (ifp == NULL) {
- printf("sk%d: can not if_alloc()\n", sc_if->sk_unit);
+ device_printf(sc_if->sk_if_dev, "can not if_alloc()\n");
error = ENOSPC;
goto fail;
}
@@ -1478,11 +1488,16 @@ sk_attach(dev)
ifp->if_mtu = ETHERMTU;
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
/*
- * The hardware should be ready for VLAN_MTU by default:
- * XMAC II has 0x8100 in VLAN Tag Level 1 register initially;
- * YU_SMR_MFL_VLAN is set by this driver in Yukon.
+ * SK_GENESIS has a bug in checksum offload - From linux.
*/
- ifp->if_capabilities = ifp->if_capenable = IFCAP_VLAN_MTU;
+ if (sc_if->sk_softc->sk_type != SK_GENESIS) {
+ ifp->if_capabilities = IFCAP_HWCSUM;
+ ifp->if_hwassist = SK_CSUM_FEATURES;
+ } else {
+ ifp->if_capabilities = 0;
+ ifp->if_hwassist = 0;
+ }
+ ifp->if_capenable = ifp->if_capabilities;
ifp->if_ioctl = sk_ioctl;
ifp->if_start = sk_start;
ifp->if_watchdog = sk_watchdog;
@@ -1491,8 +1506,6 @@ sk_attach(dev)
ifp->if_snd.ifq_drv_maxlen = SK_TX_RING_CNT - 1;
IFQ_SET_READY(&ifp->if_snd);
- callout_handle_init(&sc_if->sk_tick_ch);
-
/*
* Get station address for this interface. Note that
* dual port cards actually come with three station
@@ -1502,7 +1515,7 @@ sk_attach(dev)
* are operating in failover mode. Currently we don't
* use this extra address.
*/
- SK_LOCK(sc);
+ SK_IF_LOCK(sc_if);
for (i = 0; i < ETHER_ADDR_LEN; i++)
eaddr[i] =
sk_win_read_1(sc, SK_MAC0_0 + (port * 8) + i);
@@ -1553,20 +1566,34 @@ sk_attach(dev)
sc_if->sk_phyaddr = SK_PHYADDR_MARV;
break;
default:
- printf("skc%d: unsupported PHY type: %d\n",
- sc->sk_unit, sc_if->sk_phytype);
+ device_printf(sc->sk_dev, "unsupported PHY type: %d\n",
+ sc_if->sk_phytype);
error = ENODEV;
- SK_UNLOCK(sc);
+ SK_IF_UNLOCK(sc_if);
goto fail;
}
-
/*
* Call MI attach routine. Can't hold locks when calling into ether_*.
*/
- SK_UNLOCK(sc);
+ SK_IF_UNLOCK(sc_if);
ether_ifattach(ifp, eaddr);
- SK_LOCK(sc);
+ SK_IF_LOCK(sc_if);
+
+ /*
+ * The hardware should be ready for VLAN_MTU by default:
+ * XMAC II has 0x8100 in VLAN Tag Level 1 register initially;
+ * YU_SMR_MFL_VLAN is set by this driver in Yukon.
+ *
+ */
+ ifp->if_capabilities |= IFCAP_VLAN_MTU;
+ ifp->if_capenable |= IFCAP_VLAN_MTU;
+ /*
+ * Tell the upper layer(s) we support long frames.
+ * Must appear after the call to ether_ifattach() because
+ * ether_ifattach() sets ifi_hdrlen to the default value.
+ */
+ ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
/*
* Do miibus setup.
@@ -1582,10 +1609,10 @@ sk_attach(dev)
break;
}
- SK_UNLOCK(sc);
+ SK_IF_UNLOCK(sc_if);
if (mii_phy_probe(dev, &sc_if->sk_miibus,
sk_ifmedia_upd, sk_ifmedia_sts)) {
- printf("skc%d: no PHY found!\n", sc_if->sk_unit);
+ device_printf(sc_if->sk_if_dev, "no PHY found!\n");
ether_ifdetach(ifp);
error = ENXIO;
goto fail;
@@ -1610,15 +1637,16 @@ skc_attach(dev)
device_t dev;
{
struct sk_softc *sc;
- int unit, error = 0, rid, *port;
+ int error = 0, rid, *port;
uint8_t skrs;
char *pname, *revstr;
sc = device_get_softc(dev);
- unit = device_get_unit(dev);
+ sc->sk_dev = dev;
mtx_init(&sc->sk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
- MTX_DEF | MTX_RECURSE);
+ MTX_DEF);
+ mtx_init(&sc->sk_mii_mtx, "sk_mii_mutex", NULL, MTX_DEF);
/*
* Map control/status registers.
*/
@@ -1628,7 +1656,7 @@ skc_attach(dev)
sc->sk_res = bus_alloc_resource_any(dev, SK_RES, &rid, RF_ACTIVE);
if (sc->sk_res == NULL) {
- printf("sk%d: couldn't map ports/memory\n", unit);
+ device_printf(dev, "couldn't map ports/memory\n");
error = ENXIO;
goto fail;
}
@@ -1641,8 +1669,8 @@ skc_attach(dev)
/* Bail out if chip is not recognized. */
if (sc->sk_type != SK_GENESIS && !SK_YUKON_FAMILY(sc->sk_type)) {
- printf("skc%d: unknown device: chipver=%02x, rev=%x\n",
- unit, sc->sk_type, sc->sk_rev);
+ device_printf(dev, "unknown device: chipver=%02x, rev=%x\n",
+ sc->sk_type, sc->sk_rev);
error = ENXIO;
goto fail;
}
@@ -1653,7 +1681,7 @@ skc_attach(dev)
RF_SHAREABLE | RF_ACTIVE);
if (sc->sk_irq == NULL) {
- printf("skc%d: couldn't map interrupt\n", unit);
+ device_printf(dev, "couldn't map interrupt\n");
error = ENXIO;
goto fail;
}
@@ -1666,13 +1694,13 @@ skc_attach(dev)
/* Pull in device tunables. */
sc->sk_int_mod = SK_IM_DEFAULT;
- error = resource_int_value(device_get_name(dev), unit,
+ error = resource_int_value(device_get_name(dev), device_get_unit(dev),
"int_mod", &sc->sk_int_mod);
if (error == 0) {
if (sc->sk_int_mod < SK_IM_MIN ||
sc->sk_int_mod > SK_IM_MAX) {
- printf("skc%d: int_mod value out of range; "
- "using default: %d\n", unit, SK_IM_DEFAULT);
+ device_printf(dev, "int_mod value out of range; "
+ "using default: %d\n", SK_IM_DEFAULT);
sc->sk_int_mod = SK_IM_DEFAULT;
}
}
@@ -1680,8 +1708,6 @@ skc_attach(dev)
/* Reset the adapter. */
sk_reset(sc);
- sc->sk_unit = unit;
-
/* Read and save vital product data from EEPROM. */
sk_vpd_read(sc);
@@ -1706,8 +1732,7 @@ skc_attach(dev)
sc->sk_rboff = SK_RBOFF_0;
break;
default:
- printf("skc%d: unknown ram size: %d\n",
- sc->sk_unit, skrs);
+ device_printf(dev, "unknown ram size: %d\n", skrs);
error = ENXIO;
goto fail;
}
@@ -1734,8 +1759,8 @@ skc_attach(dev)
sc->sk_pmd = IFM_1000_T;
break;
default:
- printf("skc%d: unknown media type: 0x%x\n",
- sc->sk_unit, sk_win_read_1(sc, SK_PMDTYPE));
+ device_printf(dev, "unknown media type: 0x%x\n",
+ sk_win_read_1(sc, SK_PMDTYPE));
error = ENXIO;
goto fail;
}
@@ -1906,7 +1931,7 @@ skc_attach(dev)
sk_intr, sc, &sc->sk_intrhand);
if (error) {
- printf("skc%d: couldn't set up irq\n", unit);
+ device_printf(dev, "couldn't set up irq\n");
goto fail;
}
@@ -1942,6 +1967,7 @@ sk_detach(dev)
sk_stop(sc_if);
/* Can't hold locks while calling detach */
SK_IF_UNLOCK(sc_if);
+ callout_drain(&sc_if->sk_tick_ch);
ether_ifdetach(ifp);
SK_IF_LOCK(sc_if);
}
@@ -1957,12 +1983,7 @@ sk_detach(dev)
device_delete_child(dev, sc_if->sk_miibus);
*/
bus_generic_detach(dev);
- if (sc_if->sk_cdata.sk_jumbo_buf != NULL)
- sk_free_jumbo_mem(sc_if);
- if (sc_if->sk_rdata != NULL) {
- contigfree(sc_if->sk_rdata, sizeof(struct sk_ring_data),
- M_DEVBUF);
- }
+ sk_dma_free(sc_if);
SK_IF_UNLOCK(sc_if);
return(0);
@@ -2001,61 +2022,709 @@ skc_detach(dev)
if (sc->sk_res)
bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res);
+ mtx_destroy(&sc->sk_mii_mtx);
mtx_destroy(&sc->sk_mtx);
return(0);
}
+struct sk_dmamap_arg {
+ bus_addr_t sk_busaddr;
+};
+
+static void
+sk_dmamap_cb(arg, segs, nseg, error)
+ void *arg;
+ bus_dma_segment_t *segs;
+ int nseg;
+ int error;
+{
+ struct sk_dmamap_arg *ctx;
+
+ if (error != 0)
+ return;
+
+ ctx = arg;
+ ctx->sk_busaddr = segs[0].ds_addr;
+}
+
+/*
+ * Allocate jumbo buffer storage. The SysKonnect adapters support
+ * "jumbograms" (9K frames), although SysKonnect doesn't currently
+ * use them in their drivers. In order for us to use them, we need
+ * large 9K receive buffers, however standard mbuf clusters are only
+ * 2048 bytes in size. Consequently, we need to allocate and manage
+ * our own jumbo buffer pool. Fortunately, this does not require an
+ * excessive amount of additional code.
+ */
static int
-sk_encap(sc_if, m_head, txidx)
- struct sk_if_softc *sc_if;
- struct mbuf *m_head;
- u_int32_t *txidx;
+sk_dma_alloc(sc_if)
+ struct sk_if_softc *sc_if;
{
- struct sk_tx_desc *f = NULL;
- struct mbuf *m;
- u_int32_t frag, cur, cnt = 0;
+ struct sk_dmamap_arg ctx;
+ struct sk_txdesc *txd;
+ struct sk_rxdesc *rxd;
+ struct sk_rxdesc *jrxd;
+ u_int8_t *ptr;
+ struct sk_jpool_entry *entry;
+ int error, i;
- SK_IF_LOCK_ASSERT(sc_if);
+ mtx_init(&sc_if->sk_jlist_mtx, "sk_jlist_mtx", NULL, MTX_DEF);
+ SLIST_INIT(&sc_if->sk_jfree_listhead);
+ SLIST_INIT(&sc_if->sk_jinuse_listhead);
- m = m_head;
- cur = frag = *txidx;
+ /* create parent tag */
+ /*
+ * XXX
+ * This driver should use BUS_SPACE_MAXADDR for lowaddr argument
+ * in bus_dma_tag_create(9) as the NIC would support DAC mode.
+ * However bz@ reported that it does not work on amd64 with > 4GB
+ * RAM. Until we have more clues of the breakage, disable DAC mode
+ * by limiting DMA address to be in 32bit address space.
+ */
+ error = bus_dma_tag_create(NULL, /* parent */
+ 1, 0, /* algnmnt, boundary */
+ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
+ 0, /* nsegments */
+ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc_if->sk_cdata.sk_parent_tag);
+ if (error != 0) {
+ device_printf(sc_if->sk_if_dev,
+ "failed to create parent DMA tag\n");
+ goto fail;
+ }
+ /* create tag for Tx ring */
+ error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
+ SK_RING_ALIGN, 0, /* algnmnt, boundary */
+ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ SK_TX_RING_SZ, /* maxsize */
+ 1, /* nsegments */
+ SK_TX_RING_SZ, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc_if->sk_cdata.sk_tx_ring_tag);
+ if (error != 0) {
+ device_printf(sc_if->sk_if_dev,
+ "failed to allocate Tx ring DMA tag\n");
+ goto fail;
+ }
+
+ /* create tag for Rx ring */
+ error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
+ SK_RING_ALIGN, 0, /* algnmnt, boundary */
+ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ SK_RX_RING_SZ, /* maxsize */
+ 1, /* nsegments */
+ SK_RX_RING_SZ, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc_if->sk_cdata.sk_rx_ring_tag);
+ if (error != 0) {
+ device_printf(sc_if->sk_if_dev,
+ "failed to allocate Rx ring DMA tag\n");
+ goto fail;
+ }
+
+ /* create tag for jumbo Rx ring */
+ error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
+ SK_RING_ALIGN, 0, /* algnmnt, boundary */
+ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ SK_JUMBO_RX_RING_SZ, /* maxsize */
+ 1, /* nsegments */
+ SK_JUMBO_RX_RING_SZ, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc_if->sk_cdata.sk_jumbo_rx_ring_tag);
+ if (error != 0) {
+ device_printf(sc_if->sk_if_dev,
+ "failed to allocate jumbo Rx ring DMA tag\n");
+ goto fail;
+ }
+
+ /* create tag for jumbo buffer blocks */
+ error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
+ PAGE_SIZE, 0, /* algnmnt, boundary */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ SK_JMEM, /* maxsize */
+ 1, /* nsegments */
+ SK_JMEM, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc_if->sk_cdata.sk_jumbo_tag);
+ if (error != 0) {
+ device_printf(sc_if->sk_if_dev,
+ "failed to allocate jumbo Rx buffer block DMA tag\n");
+ goto fail;
+ }
+
+ /* create tag for Tx buffers */
+ error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
+ 1, 0, /* algnmnt, boundary */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ MCLBYTES * SK_MAXTXSEGS, /* maxsize */
+ SK_MAXTXSEGS, /* nsegments */
+ MCLBYTES, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc_if->sk_cdata.sk_tx_tag);
+ if (error != 0) {
+ device_printf(sc_if->sk_if_dev,
+ "failed to allocate Tx DMA tag\n");
+ goto fail;
+ }
+
+ /* create tag for Rx buffers */
+ error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
+ 1, 0, /* algnmnt, boundary */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ MCLBYTES, /* maxsize */
+ 1, /* nsegments */
+ MCLBYTES, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc_if->sk_cdata.sk_rx_tag);
+ if (error != 0) {
+ device_printf(sc_if->sk_if_dev,
+ "failed to allocate Rx DMA tag\n");
+ goto fail;
+ }
+
+ /* create tag for jumbo Rx buffers */
+ error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
+ PAGE_SIZE, 0, /* algnmnt, boundary */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ MCLBYTES * SK_MAXRXSEGS, /* maxsize */
+ SK_MAXRXSEGS, /* nsegments */
+ SK_JLEN, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc_if->sk_cdata.sk_jumbo_rx_tag);
+ if (error != 0) {
+ device_printf(sc_if->sk_if_dev,
+ "failed to allocate jumbo Rx DMA tag\n");
+ goto fail;
+ }
+
+ /* allocate DMA'able memory and load the DMA map for Tx ring */
+ error = bus_dmamem_alloc(sc_if->sk_cdata.sk_tx_ring_tag,
+ (void **)&sc_if->sk_rdata.sk_tx_ring, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
+ &sc_if->sk_cdata.sk_tx_ring_map);
+ if (error != 0) {
+ device_printf(sc_if->sk_if_dev,
+ "failed to allocate DMA'able memory for Tx ring\n");
+ goto fail;
+ }
+
+ ctx.sk_busaddr = 0;
+ error = bus_dmamap_load(sc_if->sk_cdata.sk_tx_ring_tag,
+ sc_if->sk_cdata.sk_tx_ring_map, sc_if->sk_rdata.sk_tx_ring,
+ SK_TX_RING_SZ, sk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
+ if (error != 0) {
+ device_printf(sc_if->sk_if_dev,
+ "failed to load DMA'able memory for Tx ring\n");
+ goto fail;
+ }
+ sc_if->sk_rdata.sk_tx_ring_paddr = ctx.sk_busaddr;
+
+ /* allocate DMA'able memory and load the DMA map for Rx ring */
+ error = bus_dmamem_alloc(sc_if->sk_cdata.sk_rx_ring_tag,
+ (void **)&sc_if->sk_rdata.sk_rx_ring, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
+ &sc_if->sk_cdata.sk_rx_ring_map);
+ if (error != 0) {
+ device_printf(sc_if->sk_if_dev,
+ "failed to allocate DMA'able memory for Rx ring\n");
+ goto fail;
+ }
+
+ ctx.sk_busaddr = 0;
+ error = bus_dmamap_load(sc_if->sk_cdata.sk_rx_ring_tag,
+ sc_if->sk_cdata.sk_rx_ring_map, sc_if->sk_rdata.sk_rx_ring,
+ SK_RX_RING_SZ, sk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
+ if (error != 0) {
+ device_printf(sc_if->sk_if_dev,
+ "failed to load DMA'able memory for Rx ring\n");
+ goto fail;
+ }
+ sc_if->sk_rdata.sk_rx_ring_paddr = ctx.sk_busaddr;
+
+ /* allocate DMA'able memory and load the DMA map for jumbo Rx ring */
+ error = bus_dmamem_alloc(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
+ (void **)&sc_if->sk_rdata.sk_jumbo_rx_ring,
+ BUS_DMA_NOWAIT|BUS_DMA_ZERO, &sc_if->sk_cdata.sk_jumbo_rx_ring_map);
+ if (error != 0) {
+ device_printf(sc_if->sk_if_dev,
+ "failed to allocate DMA'able memory for jumbo Rx ring\n");
+ goto fail;
+ }
+
+ ctx.sk_busaddr = 0;
+ error = bus_dmamap_load(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
+ sc_if->sk_cdata.sk_jumbo_rx_ring_map,
+ sc_if->sk_rdata.sk_jumbo_rx_ring, SK_JUMBO_RX_RING_SZ, sk_dmamap_cb,
+ &ctx, BUS_DMA_NOWAIT);
+ if (error != 0) {
+ device_printf(sc_if->sk_if_dev,
+ "failed to load DMA'able memory for jumbo Rx ring\n");
+ goto fail;
+ }
+ sc_if->sk_rdata.sk_jumbo_rx_ring_paddr = ctx.sk_busaddr;
+
+ /* create DMA maps for Tx buffers */
+ for (i = 0; i < SK_TX_RING_CNT; i++) {
+ txd = &sc_if->sk_cdata.sk_txdesc[i];
+ txd->tx_m = NULL;
+ txd->tx_dmamap = 0;
+ error = bus_dmamap_create(sc_if->sk_cdata.sk_tx_tag, 0,
+ &txd->tx_dmamap);
+ if (error != 0) {
+ device_printf(sc_if->sk_if_dev,
+ "failed to create Tx dmamap\n");
+ goto fail;
+ }
+ }
+ /* create DMA maps for Rx buffers */
+ if ((error = bus_dmamap_create(sc_if->sk_cdata.sk_rx_tag, 0,
+ &sc_if->sk_cdata.sk_rx_sparemap)) != 0) {
+ device_printf(sc_if->sk_if_dev,
+ "failed to create spare Rx dmamap\n");
+ goto fail;
+ }
+ for (i = 0; i < SK_RX_RING_CNT; i++) {
+ rxd = &sc_if->sk_cdata.sk_rxdesc[i];
+ rxd->rx_m = NULL;
+ rxd->rx_dmamap = 0;
+ error = bus_dmamap_create(sc_if->sk_cdata.sk_rx_tag, 0,
+ &rxd->rx_dmamap);
+ if (error != 0) {
+ device_printf(sc_if->sk_if_dev,
+ "failed to create Rx dmamap\n");
+ goto fail;
+ }
+ }
+ /* create DMA maps for jumbo Rx buffers */
+ if ((error = bus_dmamap_create(sc_if->sk_cdata.sk_jumbo_rx_tag, 0,
+ &sc_if->sk_cdata.sk_jumbo_rx_sparemap)) != 0) {
+ device_printf(sc_if->sk_if_dev,
+ "failed to create spare jumbo Rx dmamap\n");
+ goto fail;
+ }
+ for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) {
+ jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i];
+ jrxd->rx_m = NULL;
+ jrxd->rx_dmamap = 0;
+ error = bus_dmamap_create(sc_if->sk_cdata.sk_jumbo_rx_tag, 0,
+ &jrxd->rx_dmamap);
+ if (error != 0) {
+ device_printf(sc_if->sk_if_dev,
+ "failed to create jumbo Rx dmamap\n");
+ goto fail;
+ }
+ }
+
+ /* allocate DMA'able memory and load the DMA map for jumbo buf */
+ error = bus_dmamem_alloc(sc_if->sk_cdata.sk_jumbo_tag,
+ (void **)&sc_if->sk_rdata.sk_jumbo_buf,
+ BUS_DMA_NOWAIT|BUS_DMA_ZERO, &sc_if->sk_cdata.sk_jumbo_map);
+ if (error != 0) {
+ device_printf(sc_if->sk_if_dev,
+ "failed to allocate DMA'able memory for jumbo buf\n");
+ goto fail;
+ }
+
+ ctx.sk_busaddr = 0;
+ error = bus_dmamap_load(sc_if->sk_cdata.sk_jumbo_tag,
+ sc_if->sk_cdata.sk_jumbo_map,
+ sc_if->sk_rdata.sk_jumbo_buf, SK_JMEM, sk_dmamap_cb,
+ &ctx, BUS_DMA_NOWAIT);
+ if (error != 0) {
+ device_printf(sc_if->sk_if_dev,
+ "failed to load DMA'able memory for jumbobuf\n");
+ goto fail;
+ }
+ sc_if->sk_rdata.sk_jumbo_buf_paddr = ctx.sk_busaddr;
/*
- * Start packing the mbufs in this chain into
- * the fragment pointers. Stop when we run out
- * of fragments or hit the end of the mbuf chain.
+ * Now divide it up into 9K pieces and save the addresses
+ * in an array.
*/
- for (m = m_head; m != NULL; m = m->m_next) {
- if (m->m_len != 0) {
- if ((SK_TX_RING_CNT -
- (sc_if->sk_cdata.sk_tx_cnt + cnt)) < 2)
- return(ENOBUFS);
- f = &sc_if->sk_rdata->sk_tx_ring[frag];
- f->sk_data_lo = vtophys(mtod(m, vm_offset_t));
- f->sk_ctl = m->m_len | SK_OPCODE_DEFAULT;
- if (cnt == 0)
- f->sk_ctl |= SK_TXCTL_FIRSTFRAG;
- else
- f->sk_ctl |= SK_TXCTL_OWN;
- cur = frag;
- SK_INC(frag, SK_TX_RING_CNT);
- cnt++;
+ ptr = sc_if->sk_rdata.sk_jumbo_buf;
+ for (i = 0; i < SK_JSLOTS; i++) {
+ sc_if->sk_cdata.sk_jslots[i] = ptr;
+ ptr += SK_JLEN;
+ entry = malloc(sizeof(struct sk_jpool_entry),
+ M_DEVBUF, M_NOWAIT);
+ if (entry == NULL) {
+ device_printf(sc_if->sk_if_dev,
+ "no memory for jumbo buffers!\n");
+ error = ENOMEM;
+ goto fail;
}
+ entry->slot = i;
+ SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, entry,
+ jpool_entries);
}
- if (m != NULL)
- return(ENOBUFS);
+fail:
+ return (error);
+}
- sc_if->sk_rdata->sk_tx_ring[cur].sk_ctl |=
- SK_TXCTL_LASTFRAG|SK_TXCTL_EOF_INTR;
- sc_if->sk_cdata.sk_tx_chain[cur].sk_mbuf = m_head;
- sc_if->sk_rdata->sk_tx_ring[*txidx].sk_ctl |= SK_TXCTL_OWN;
- sc_if->sk_cdata.sk_tx_cnt += cnt;
+static void
+sk_dma_free(sc_if)
+ struct sk_if_softc *sc_if;
+{
+ struct sk_txdesc *txd;
+ struct sk_rxdesc *rxd;
+ struct sk_rxdesc *jrxd;
+ struct sk_jpool_entry *entry;
+ int i;
- *txidx = frag;
+ SK_JLIST_LOCK(sc_if);
+ while ((entry = SLIST_FIRST(&sc_if->sk_jinuse_listhead))) {
+ device_printf(sc_if->sk_if_dev,
+ "asked to free buffer that is in use!\n");
+ SLIST_REMOVE_HEAD(&sc_if->sk_jinuse_listhead, jpool_entries);
+ SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, entry,
+ jpool_entries);
+ }
- return(0);
+ while (!SLIST_EMPTY(&sc_if->sk_jfree_listhead)) {
+ entry = SLIST_FIRST(&sc_if->sk_jfree_listhead);
+ SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries);
+ free(entry, M_DEVBUF);
+ }
+ SK_JLIST_UNLOCK(sc_if);
+
+ /* destroy jumbo buffer block */
+ if (sc_if->sk_cdata.sk_jumbo_map)
+ bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_tag,
+ sc_if->sk_cdata.sk_jumbo_map);
+
+ if (sc_if->sk_rdata.sk_jumbo_buf) {
+ bus_dmamem_free(sc_if->sk_cdata.sk_jumbo_tag,
+ sc_if->sk_rdata.sk_jumbo_buf,
+ sc_if->sk_cdata.sk_jumbo_map);
+ sc_if->sk_rdata.sk_jumbo_buf = NULL;
+ sc_if->sk_cdata.sk_jumbo_map = 0;
+ }
+
+ /* Tx ring */
+ if (sc_if->sk_cdata.sk_tx_ring_tag) {
+ if (sc_if->sk_cdata.sk_tx_ring_map)
+ bus_dmamap_unload(sc_if->sk_cdata.sk_tx_ring_tag,
+ sc_if->sk_cdata.sk_tx_ring_map);
+ if (sc_if->sk_cdata.sk_tx_ring_map &&
+ sc_if->sk_rdata.sk_tx_ring)
+ bus_dmamem_free(sc_if->sk_cdata.sk_tx_ring_tag,
+ sc_if->sk_rdata.sk_tx_ring,
+ sc_if->sk_cdata.sk_tx_ring_map);
+ sc_if->sk_rdata.sk_tx_ring = NULL;
+ sc_if->sk_cdata.sk_tx_ring_map = 0;
+ bus_dma_tag_destroy(sc_if->sk_cdata.sk_tx_ring_tag);
+ sc_if->sk_cdata.sk_tx_ring_tag = NULL;
+ }
+ /* Rx ring */
+ if (sc_if->sk_cdata.sk_rx_ring_tag) {
+ if (sc_if->sk_cdata.sk_rx_ring_map)
+ bus_dmamap_unload(sc_if->sk_cdata.sk_rx_ring_tag,
+ sc_if->sk_cdata.sk_rx_ring_map);
+ if (sc_if->sk_cdata.sk_rx_ring_map &&
+ sc_if->sk_rdata.sk_rx_ring)
+ bus_dmamem_free(sc_if->sk_cdata.sk_rx_ring_tag,
+ sc_if->sk_rdata.sk_rx_ring,
+ sc_if->sk_cdata.sk_rx_ring_map);
+ sc_if->sk_rdata.sk_rx_ring = NULL;
+ sc_if->sk_cdata.sk_rx_ring_map = 0;
+ bus_dma_tag_destroy(sc_if->sk_cdata.sk_rx_ring_tag);
+ sc_if->sk_cdata.sk_rx_ring_tag = NULL;
+ }
+ /* jumbo Rx ring */
+ if (sc_if->sk_cdata.sk_jumbo_rx_ring_tag) {
+ if (sc_if->sk_cdata.sk_jumbo_rx_ring_map)
+ bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
+ sc_if->sk_cdata.sk_jumbo_rx_ring_map);
+ if (sc_if->sk_cdata.sk_jumbo_rx_ring_map &&
+ sc_if->sk_rdata.sk_jumbo_rx_ring)
+ bus_dmamem_free(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
+ sc_if->sk_rdata.sk_jumbo_rx_ring,
+ sc_if->sk_cdata.sk_jumbo_rx_ring_map);
+ sc_if->sk_rdata.sk_jumbo_rx_ring = NULL;
+ sc_if->sk_cdata.sk_jumbo_rx_ring_map = 0;
+ bus_dma_tag_destroy(sc_if->sk_cdata.sk_jumbo_rx_ring_tag);
+ sc_if->sk_cdata.sk_jumbo_rx_ring_tag = NULL;
+ }
+ /* Tx buffers */
+ if (sc_if->sk_cdata.sk_tx_tag) {
+ for (i = 0; i < SK_TX_RING_CNT; i++) {
+ txd = &sc_if->sk_cdata.sk_txdesc[i];
+ if (txd->tx_dmamap) {
+ bus_dmamap_destroy(sc_if->sk_cdata.sk_tx_tag,
+ txd->tx_dmamap);
+ txd->tx_dmamap = 0;
+ }
+ }
+ bus_dma_tag_destroy(sc_if->sk_cdata.sk_tx_tag);
+ sc_if->sk_cdata.sk_tx_tag = NULL;
+ }
+ /* Rx buffers */
+ if (sc_if->sk_cdata.sk_rx_tag) {
+ for (i = 0; i < SK_RX_RING_CNT; i++) {
+ rxd = &sc_if->sk_cdata.sk_rxdesc[i];
+ if (rxd->rx_dmamap) {
+ bus_dmamap_destroy(sc_if->sk_cdata.sk_rx_tag,
+ rxd->rx_dmamap);
+ rxd->rx_dmamap = 0;
+ }
+ }
+ if (sc_if->sk_cdata.sk_rx_sparemap) {
+ bus_dmamap_destroy(sc_if->sk_cdata.sk_rx_tag,
+ sc_if->sk_cdata.sk_rx_sparemap);
+ sc_if->sk_cdata.sk_rx_sparemap = 0;
+ }
+ bus_dma_tag_destroy(sc_if->sk_cdata.sk_rx_tag);
+ sc_if->sk_cdata.sk_rx_tag = NULL;
+ }
+ /* jumbo Rx buffers */
+ if (sc_if->sk_cdata.sk_jumbo_rx_tag) {
+ for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) {
+ jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i];
+ if (jrxd->rx_dmamap) {
+ bus_dmamap_destroy(
+ sc_if->sk_cdata.sk_jumbo_rx_tag,
+ jrxd->rx_dmamap);
+ jrxd->rx_dmamap = 0;
+ }
+ }
+ if (sc_if->sk_cdata.sk_jumbo_rx_sparemap) {
+ bus_dmamap_destroy(sc_if->sk_cdata.sk_jumbo_rx_tag,
+ sc_if->sk_cdata.sk_jumbo_rx_sparemap);
+ sc_if->sk_cdata.sk_jumbo_rx_sparemap = 0;
+ }
+ bus_dma_tag_destroy(sc_if->sk_cdata.sk_jumbo_rx_tag);
+ sc_if->sk_cdata.sk_jumbo_rx_tag = NULL;
+ }
+
+ if (sc_if->sk_cdata.sk_parent_tag) {
+ bus_dma_tag_destroy(sc_if->sk_cdata.sk_parent_tag);
+ sc_if->sk_cdata.sk_parent_tag = NULL;
+ }
+ mtx_destroy(&sc_if->sk_jlist_mtx);
+}
+
+/*
+ * Allocate a jumbo buffer.
+ */
+static void *
+sk_jalloc(sc_if)
+ struct sk_if_softc *sc_if;
+{
+ struct sk_jpool_entry *entry;
+
+ SK_JLIST_LOCK(sc_if);
+
+ entry = SLIST_FIRST(&sc_if->sk_jfree_listhead);
+
+ if (entry == NULL) {
+ SK_JLIST_UNLOCK(sc_if);
+ return (NULL);
+ }
+
+ SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries);
+ SLIST_INSERT_HEAD(&sc_if->sk_jinuse_listhead, entry, jpool_entries);
+
+ SK_JLIST_UNLOCK(sc_if);
+
+ return (sc_if->sk_cdata.sk_jslots[entry->slot]);
+}
+
+/*
+ * Release a jumbo buffer.
+ */
+static void
+sk_jfree(buf, args)
+ void *buf;
+ void *args;
+{
+ struct sk_if_softc *sc_if;
+ struct sk_jpool_entry *entry;
+ int i;
+
+ /* Extract the softc struct pointer. */
+ sc_if = (struct sk_if_softc *)args;
+ KASSERT(sc_if != NULL, ("%s: can't find softc pointer!", __func__));
+
+ SK_JLIST_LOCK(sc_if);
+ /* calculate the slot this buffer belongs to */
+ i = ((vm_offset_t)buf
+ - (vm_offset_t)sc_if->sk_rdata.sk_jumbo_buf) / SK_JLEN;
+ KASSERT(i >= 0 && i < SK_JSLOTS,
+ ("%s: asked to free buffer that we don't manage!", __func__));
+
+ entry = SLIST_FIRST(&sc_if->sk_jinuse_listhead);
+ KASSERT(entry != NULL, ("%s: buffer not in use!", __func__));
+ entry->slot = i;
+ SLIST_REMOVE_HEAD(&sc_if->sk_jinuse_listhead, jpool_entries);
+ SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, entry, jpool_entries);
+ if (SLIST_EMPTY(&sc_if->sk_jinuse_listhead))
+ wakeup(sc_if);
+
+ SK_JLIST_UNLOCK(sc_if);
+}
+
+static void
+sk_txcksum(ifp, m, f)
+ struct ifnet *ifp;
+ struct mbuf *m;
+ struct sk_tx_desc *f;
+{
+ struct ip *ip;
+ u_int16_t offset;
+ u_int8_t *p;
+
+ offset = sizeof(struct ip) + ETHER_HDR_LEN;
+ for(; m && m->m_len == 0; m = m->m_next)
+ ;
+ if (m == NULL || m->m_len < ETHER_HDR_LEN) {
+ if_printf(ifp, "%s: m_len < ETHER_HDR_LEN\n", __func__);
+ /* checksum may be corrupted */
+ goto sendit;
+ }
+ if (m->m_len < ETHER_HDR_LEN + sizeof(u_int32_t)) {
+ if (m->m_len != ETHER_HDR_LEN) {
+ if_printf(ifp, "%s: m_len != ETHER_HDR_LEN\n",
+ __func__);
+ /* checksum may be corrupted */
+ goto sendit;
+ }
+ for(m = m->m_next; m && m->m_len == 0; m = m->m_next)
+ ;
+ if (m == NULL) {
+ offset = sizeof(struct ip) + ETHER_HDR_LEN;
+ /* checksum may be corrupted */
+ goto sendit;
+ }
+ ip = mtod(m, struct ip *);
+ } else {
+ p = mtod(m, u_int8_t *);
+ p += ETHER_HDR_LEN;
+ ip = (struct ip *)p;
+ }
+ offset = (ip->ip_hl << 2) + ETHER_HDR_LEN;
+
+sendit:
+ f->sk_csum_startval = 0;
+ f->sk_csum_start = htole32(((offset + m->m_pkthdr.csum_data) & 0xffff) |
+ (offset << 16));
+}
+
+static int
+sk_encap(sc_if, m_head)
+ struct sk_if_softc *sc_if;
+ struct mbuf **m_head;
+{
+ struct sk_txdesc *txd;
+ struct sk_tx_desc *f = NULL;
+ struct mbuf *m, *n;
+ bus_dma_segment_t txsegs[SK_MAXTXSEGS];
+ u_int32_t cflags, frag, si, sk_ctl;
+ int error, i, nseg;
+
+ SK_IF_LOCK_ASSERT(sc_if);
+
+ if ((txd = STAILQ_FIRST(&sc_if->sk_cdata.sk_txfreeq)) == NULL)
+ return (ENOBUFS);
+
+ m = *m_head;
+ error = bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_tx_tag,
+ txd->tx_dmamap, m, txsegs, &nseg, 0);
+ if (error == EFBIG) {
+ n = m_defrag(m, M_DONTWAIT);
+ if (n == NULL) {
+ m_freem(m);
+ m = NULL;
+ return (ENOMEM);
+ }
+ m = n;
+ error = bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_tx_tag,
+ txd->tx_dmamap, m, txsegs, &nseg, 0);
+ if (error != 0) {
+ m_freem(m);
+ m = NULL;
+ return (error);
+ }
+ } else if (error != 0)
+ return (error);
+ if (nseg == 0) {
+ m_freem(m);
+ m = NULL;
+ return (EIO);
+ }
+ if (sc_if->sk_cdata.sk_tx_cnt + nseg >= SK_TX_RING_CNT) {
+ bus_dmamap_unload(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap);
+ return (ENOBUFS);
+ }
+
+ if ((m->m_pkthdr.csum_flags & sc_if->sk_ifp->if_hwassist) != 0)
+ cflags = SK_OPCODE_CSUM;
+ else
+ cflags = SK_OPCODE_DEFAULT;
+ si = frag = sc_if->sk_cdata.sk_tx_prod;
+ for (i = 0; i < nseg; i++) {
+ f = &sc_if->sk_rdata.sk_tx_ring[frag];
+ f->sk_data_lo = htole32(SK_ADDR_LO(txsegs[i].ds_addr));
+ f->sk_data_hi = htole32(SK_ADDR_HI(txsegs[i].ds_addr));
+ sk_ctl = txsegs[i].ds_len | cflags;
+ if (i == 0) {
+ if (cflags == SK_OPCODE_CSUM)
+ sk_txcksum(sc_if->sk_ifp, m, f);
+ sk_ctl |= SK_TXCTL_FIRSTFRAG;
+ } else
+ sk_ctl |= SK_TXCTL_OWN;
+ f->sk_ctl = htole32(sk_ctl);
+ sc_if->sk_cdata.sk_tx_cnt++;
+ SK_INC(frag, SK_TX_RING_CNT);
+ }
+ sc_if->sk_cdata.sk_tx_prod = frag;
+
+ /* set EOF on the last desciptor */
+ frag = (frag + SK_TX_RING_CNT - 1) % SK_TX_RING_CNT;
+ f = &sc_if->sk_rdata.sk_tx_ring[frag];
+ f->sk_ctl |= htole32(SK_TXCTL_LASTFRAG | SK_TXCTL_EOF_INTR);
+
+ /* turn the first descriptor ownership to NIC */
+ f = &sc_if->sk_rdata.sk_tx_ring[si];
+ f->sk_ctl |= htole32(SK_TXCTL_OWN);
+
+ STAILQ_REMOVE_HEAD(&sc_if->sk_cdata.sk_txfreeq, tx_q);
+ STAILQ_INSERT_TAIL(&sc_if->sk_cdata.sk_txbusyq, txd, tx_q);
+ txd->tx_m = m;
+
+ /* sync descriptors */
+ bus_dmamap_sync(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap,
+ BUS_DMASYNC_PREWRITE);
+ bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag,
+ sc_if->sk_cdata.sk_tx_ring_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+ return (0);
}
static void
@@ -2079,17 +2748,16 @@ sk_start_locked(ifp)
{
struct sk_softc *sc;
struct sk_if_softc *sc_if;
- struct mbuf *m_head = NULL;
- u_int32_t idx;
+ struct mbuf *m_head;
+ int enq;
sc_if = ifp->if_softc;
sc = sc_if->sk_softc;
SK_IF_LOCK_ASSERT(sc_if);
- idx = sc_if->sk_cdata.sk_tx_prod;
-
- while(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf == NULL) {
+ for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
+ sc_if->sk_cdata.sk_tx_cnt < SK_TX_RING_CNT - 1; ) {
IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
if (m_head == NULL)
break;
@@ -2099,12 +2767,15 @@ sk_start_locked(ifp)
* don't have room, set the OACTIVE flag and wait
* for the NIC to drain the ring.
*/
- if (sk_encap(sc_if, m_head, &idx)) {
+ if (sk_encap(sc_if, &m_head)) {
+ if (m_head == NULL)
+ break;
IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
ifp->if_drv_flags |= IFF_DRV_OACTIVE;
break;
}
+ enq++;
/*
* If there's a BPF listener, bounce a copy of this frame
* to him.
@@ -2112,16 +2783,13 @@ sk_start_locked(ifp)
BPF_MTAP(ifp, m_head);
}
- /* Transmit */
- if (idx != sc_if->sk_cdata.sk_tx_prod) {
- sc_if->sk_cdata.sk_tx_prod = idx;
+ if (enq > 0) {
+ /* Transmit */
CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
/* Set a timeout in case the chip goes out to lunch. */
ifp->if_timer = 5;
}
-
- return;
}
@@ -2133,8 +2801,9 @@ sk_watchdog(ifp)
sc_if = ifp->if_softc;
- printf("sk%d: watchdog timeout\n", sc_if->sk_unit);
SK_IF_LOCK(sc_if);
+ if_printf(sc_if->sk_ifp, "watchdog timeout\n");
+ ifp->if_oerrors++;
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
sk_init_locked(sc_if);
SK_IF_UNLOCK(sc_if);
@@ -2164,6 +2833,152 @@ skc_shutdown(dev)
return;
}
+static int
+skc_suspend(dev)
+ device_t dev;
+{
+ struct sk_softc *sc;
+ struct sk_if_softc *sc_if0, *sc_if1;
+ struct ifnet *ifp0 = NULL, *ifp1 = NULL;
+
+ sc = device_get_softc(dev);
+
+ SK_LOCK(sc);
+
+ sc_if0 = sc->sk_if[SK_PORT_A];
+ sc_if1 = sc->sk_if[SK_PORT_B];
+ if (sc_if0 != NULL)
+ ifp0 = sc_if0->sk_ifp;
+ if (sc_if1 != NULL)
+ ifp1 = sc_if1->sk_ifp;
+ if (ifp0 != NULL)
+ sk_stop(sc_if0);
+ if (ifp1 != NULL)
+ sk_stop(sc_if1);
+ sc->sk_suspended = 1;
+
+ SK_UNLOCK(sc);
+
+ return (0);
+}
+
+static int
+skc_resume(dev)
+ device_t dev;
+{
+ struct sk_softc *sc;
+ struct sk_if_softc *sc_if0, *sc_if1;
+ struct ifnet *ifp0 = NULL, *ifp1 = NULL;
+
+ sc = device_get_softc(dev);
+
+ SK_LOCK(sc);
+
+ sc_if0 = sc->sk_if[SK_PORT_A];
+ sc_if1 = sc->sk_if[SK_PORT_B];
+ if (sc_if0 != NULL)
+ ifp0 = sc_if0->sk_ifp;
+ if (sc_if1 != NULL)
+ ifp1 = sc_if1->sk_ifp;
+ if (ifp0 != NULL && ifp0->if_flags & IFF_UP)
+ sk_init_locked(sc_if0);
+ if (ifp1 != NULL && ifp1->if_flags & IFF_UP)
+ sk_init_locked(sc_if1);
+ sc->sk_suspended = 0;
+
+ SK_UNLOCK(sc);
+
+ return (0);
+}
+
+/*
+ * According to the data sheet from SK-NET GENESIS the hardware can compute
+ * two Rx checksums at the same time(Each checksum start position is
+ * programmed in Rx descriptors). However it seems that TCP/UDP checksum
+ * does not work at least on my Yukon hardware. I tried every possible ways
+ * to get correct checksum value but couldn't get correct one. So TCP/UDP
+ * checksum offload was disabled at the moment and only IP checksum offload
+ * was enabled.
+ * As nomral IP header size is 20 bytes I can't expect it would give an
+ * increase in throughput. However it seems it doesn't hurt performance in
+ * my testing. If there is a more detailed information for checksum secret
+ * of the hardware in question please contact yongari@FreeBSD.org to add
+ * TCP/UDP checksum offload support.
+ */
+static __inline void
+sk_rxcksum(ifp, m, csum)
+ struct ifnet *ifp;
+ struct mbuf *m;
+ u_int32_t csum;
+{
+ struct ether_header *eh;
+ struct ip *ip;
+ int32_t hlen, len, pktlen;
+ u_int16_t csum1, csum2, ipcsum;
+
+ pktlen = m->m_pkthdr.len;
+ if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
+ return;
+ eh = mtod(m, struct ether_header *);
+ if (eh->ether_type != htons(ETHERTYPE_IP))
+ return;
+ ip = (struct ip *)(eh + 1);
+ if (ip->ip_v != IPVERSION)
+ return;
+ hlen = ip->ip_hl << 2;
+ pktlen -= sizeof(struct ether_header);
+ if (hlen < sizeof(struct ip))
+ return;
+ if (ntohs(ip->ip_len) < hlen)
+ return;
+ if (ntohs(ip->ip_len) != pktlen)
+ return;
+
+ csum1 = htons(csum & 0xffff);
+ csum2 = htons((csum >> 16) & 0xffff);
+ ipcsum = in_addword(csum1, ~csum2 & 0xffff);
+ /* checksum fixup for IP options */
+ len = hlen - sizeof(struct ip);
+ if (len > 0) {
+ /*
+ * If the second checksum value is correct we can compute IP
+ * checksum with simple math. Unfortunately the second checksum
+ * value is wrong so we can't verify the checksum from the
+ * value(It seems there is some magic here to get correct
+ * value). If the second checksum value is correct it also
+ * means we can get TCP/UDP checksum) here. However, it still
+ * needs pseudo header checksum calculation due to hardware
+ * limitations.
+ */
+ return;
+ }
+ m->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
+ if (ipcsum == 0xffff)
+ m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
+}
+
+static __inline int
+sk_rxvalid(sc, stat, len)
+ struct sk_softc *sc;
+ u_int32_t stat, len;
+{
+
+ if (sc->sk_type == SK_GENESIS) {
+ if ((stat & XM_RXSTAT_ERRFRAME) == XM_RXSTAT_ERRFRAME ||
+ XM_RXSTAT_BYTES(stat) != len)
+ return (0);
+ } else {
+ if ((stat & (YU_RXSTAT_CRCERR | YU_RXSTAT_LONGERR |
+ YU_RXSTAT_MIIERR | YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC |
+ YU_RXSTAT_JABBER)) != 0 ||
+ (stat & YU_RXSTAT_RXOK) != YU_RXSTAT_RXOK ||
+ YU_RXSTAT_BYTES(stat) != len)
+ return (0);
+ }
+
+ return (1);
+}
+
static void
sk_rxeof(sc_if)
struct sk_if_softc *sc_if;
@@ -2171,67 +2986,132 @@ sk_rxeof(sc_if)
struct sk_softc *sc;
struct mbuf *m;
struct ifnet *ifp;
- struct sk_chain *cur_rx;
- int total_len = 0;
- int i;
- u_int32_t rxstat;
+ struct sk_rx_desc *cur_rx;
+ struct sk_rxdesc *rxd;
+ int cons, prog;
+ u_int32_t csum, rxstat, sk_ctl;
sc = sc_if->sk_softc;
ifp = sc_if->sk_ifp;
- i = sc_if->sk_cdata.sk_rx_prod;
- cur_rx = &sc_if->sk_cdata.sk_rx_chain[i];
-
- SK_LOCK_ASSERT(sc);
- while(!(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl & SK_RXCTL_OWN)) {
+ SK_IF_LOCK_ASSERT(sc_if);
- cur_rx = &sc_if->sk_cdata.sk_rx_chain[i];
- rxstat = sc_if->sk_rdata->sk_rx_ring[i].sk_xmac_rxstat;
- m = cur_rx->sk_mbuf;
- cur_rx->sk_mbuf = NULL;
- total_len = SK_RXBYTES(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl);
- SK_INC(i, SK_RX_RING_CNT);
+ bus_dmamap_sync(sc_if->sk_cdata.sk_rx_ring_tag,
+ sc_if->sk_cdata.sk_rx_ring_map, BUS_DMASYNC_POSTREAD);
- if (rxstat & XM_RXSTAT_ERRFRAME) {
+ prog = 0;
+ for (cons = sc_if->sk_cdata.sk_rx_cons; prog < SK_RX_RING_CNT;
+ prog++, SK_INC(cons, SK_RX_RING_CNT)) {
+ cur_rx = &sc_if->sk_rdata.sk_rx_ring[cons];
+ sk_ctl = le32toh(cur_rx->sk_ctl);
+ if ((sk_ctl & SK_RXCTL_OWN) != 0)
+ break;
+ rxd = &sc_if->sk_cdata.sk_rxdesc[cons];
+ rxstat = le32toh(cur_rx->sk_xmac_rxstat);
+
+ if ((sk_ctl & (SK_RXCTL_STATUS_VALID | SK_RXCTL_FIRSTFRAG |
+ SK_RXCTL_LASTFRAG)) != (SK_RXCTL_STATUS_VALID |
+ SK_RXCTL_FIRSTFRAG | SK_RXCTL_LASTFRAG) ||
+ SK_RXBYTES(sk_ctl) < SK_MIN_FRAMELEN ||
+ SK_RXBYTES(sk_ctl) > SK_MAX_FRAMELEN ||
+ sk_rxvalid(sc, rxstat, SK_RXBYTES(sk_ctl)) == 0) {
ifp->if_ierrors++;
- sk_newbuf(sc_if, cur_rx, m);
+ sk_discard_rxbuf(sc_if, cons);
continue;
}
- /*
- * Try to allocate a new jumbo buffer. If that
- * fails, copy the packet to mbufs and put the
- * jumbo buffer back in the ring so it can be
- * re-used. If allocating mbufs fails, then we
- * have to drop the packet.
- */
- if (sk_newbuf(sc_if, cur_rx, NULL) == ENOBUFS) {
- struct mbuf *m0;
- m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN,
- ifp, NULL);
- sk_newbuf(sc_if, cur_rx, m);
- if (m0 == NULL) {
- printf("sk%d: no receive buffers "
- "available -- packet dropped!\n",
- sc_if->sk_unit);
- ifp->if_ierrors++;
- continue;
- }
- m = m0;
- } else {
- m->m_pkthdr.rcvif = ifp;
- m->m_pkthdr.len = m->m_len = total_len;
+ m = rxd->rx_m;
+ csum = le32toh(cur_rx->sk_csum);
+ if (sk_newbuf(sc_if, cons) != 0) {
+ ifp->if_iqdrops++;
+ /* reuse old buffer */
+ sk_discard_rxbuf(sc_if, cons);
+ continue;
}
-
+ m->m_pkthdr.rcvif = ifp;
+ m->m_pkthdr.len = m->m_len = SK_RXBYTES(sk_ctl);
ifp->if_ipackets++;
- SK_UNLOCK(sc);
+ if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
+ sk_rxcksum(ifp, m, csum);
+ SK_IF_UNLOCK(sc_if);
(*ifp->if_input)(ifp, m);
- SK_LOCK(sc);
+ SK_IF_LOCK(sc_if);
}
- sc_if->sk_cdata.sk_rx_prod = i;
+ if (prog > 0) {
+ sc_if->sk_cdata.sk_rx_cons = cons;
+ bus_dmamap_sync(sc_if->sk_cdata.sk_rx_ring_tag,
+ sc_if->sk_cdata.sk_rx_ring_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ }
+}
- return;
+static void
+sk_jumbo_rxeof(sc_if)
+ struct sk_if_softc *sc_if;
+{
+ struct sk_softc *sc;
+ struct mbuf *m;
+ struct ifnet *ifp;
+ struct sk_rx_desc *cur_rx;
+ struct sk_rxdesc *jrxd;
+ int cons, prog;
+ u_int32_t csum, rxstat, sk_ctl;
+
+ sc = sc_if->sk_softc;
+ ifp = sc_if->sk_ifp;
+
+ SK_IF_LOCK_ASSERT(sc_if);
+
+ bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
+ sc_if->sk_cdata.sk_jumbo_rx_ring_map, BUS_DMASYNC_POSTREAD);
+
+ prog = 0;
+ for (cons = sc_if->sk_cdata.sk_jumbo_rx_cons;
+ prog < SK_JUMBO_RX_RING_CNT;
+ prog++, SK_INC(cons, SK_JUMBO_RX_RING_CNT)) {
+ cur_rx = &sc_if->sk_rdata.sk_jumbo_rx_ring[cons];
+ sk_ctl = le32toh(cur_rx->sk_ctl);
+ if ((sk_ctl & SK_RXCTL_OWN) != 0)
+ break;
+ jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[cons];
+ rxstat = le32toh(cur_rx->sk_xmac_rxstat);
+
+ if ((sk_ctl & (SK_RXCTL_STATUS_VALID | SK_RXCTL_FIRSTFRAG |
+ SK_RXCTL_LASTFRAG)) != (SK_RXCTL_STATUS_VALID |
+ SK_RXCTL_FIRSTFRAG | SK_RXCTL_LASTFRAG) ||
+ SK_RXBYTES(sk_ctl) < SK_MIN_FRAMELEN ||
+ SK_RXBYTES(sk_ctl) > SK_JUMBO_FRAMELEN ||
+ sk_rxvalid(sc, rxstat, SK_RXBYTES(sk_ctl)) == 0) {
+ ifp->if_ierrors++;
+ sk_discard_jumbo_rxbuf(sc_if, cons);
+ continue;
+ }
+
+ m = jrxd->rx_m;
+ csum = le32toh(cur_rx->sk_csum);
+ if (sk_jumbo_newbuf(sc_if, cons) != 0) {
+ ifp->if_iqdrops++;
+ /* reuse old buffer */
+ sk_discard_jumbo_rxbuf(sc_if, cons);
+ continue;
+ }
+ m->m_pkthdr.rcvif = ifp;
+ m->m_pkthdr.len = m->m_len = SK_RXBYTES(sk_ctl);
+ ifp->if_ipackets++;
+ if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
+ sk_rxcksum(ifp, m, csum);
+ SK_IF_UNLOCK(sc_if);
+ (*ifp->if_input)(ifp, m);
+ SK_IF_LOCK(sc_if);
+ }
+
+ if (prog > 0) {
+ sc_if->sk_cdata.sk_jumbo_rx_cons = cons;
+ bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
+ sc_if->sk_cdata.sk_jumbo_rx_ring_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ }
}
static void
@@ -2239,41 +3119,51 @@ sk_txeof(sc_if)
struct sk_if_softc *sc_if;
{
struct sk_softc *sc;
+ struct sk_txdesc *txd;
struct sk_tx_desc *cur_tx;
struct ifnet *ifp;
- u_int32_t idx;
+ u_int32_t idx, sk_ctl;
sc = sc_if->sk_softc;
ifp = sc_if->sk_ifp;
+ txd = STAILQ_FIRST(&sc_if->sk_cdata.sk_txbusyq);
+ if (txd == NULL)
+ return;
+ bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag,
+ sc_if->sk_cdata.sk_tx_ring_map, BUS_DMASYNC_POSTREAD);
/*
* Go through our tx ring and free mbufs for those
* frames that have been sent.
*/
- idx = sc_if->sk_cdata.sk_tx_cons;
- while(idx != sc_if->sk_cdata.sk_tx_prod) {
- cur_tx = &sc_if->sk_rdata->sk_tx_ring[idx];
- if (cur_tx->sk_ctl & SK_TXCTL_OWN)
+ for (idx = sc_if->sk_cdata.sk_tx_cons;; SK_INC(idx, SK_TX_RING_CNT)) {
+ if (sc_if->sk_cdata.sk_tx_cnt <= 0)
+ break;
+ cur_tx = &sc_if->sk_rdata.sk_tx_ring[idx];
+ sk_ctl = le32toh(cur_tx->sk_ctl);
+ if (sk_ctl & SK_TXCTL_OWN)
break;
- if (cur_tx->sk_ctl & SK_TXCTL_LASTFRAG)
- ifp->if_opackets++;
- if (sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf != NULL) {
- m_freem(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf);
- sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf = NULL;
- }
sc_if->sk_cdata.sk_tx_cnt--;
- SK_INC(idx, SK_TX_RING_CNT);
- }
-
- if (sc_if->sk_cdata.sk_tx_cnt == 0) {
- ifp->if_timer = 0;
- } else /* nudge chip to keep tx ring moving */
- CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
-
- if (sc_if->sk_cdata.sk_tx_cnt < SK_TX_RING_CNT - 2)
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
-
+ if ((sk_ctl & SK_TXCTL_LASTFRAG) == 0)
+ continue;
+ bus_dmamap_sync(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap,
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap);
+
+ ifp->if_opackets++;
+ m_freem(txd->tx_m);
+ txd->tx_m = NULL;
+ STAILQ_REMOVE_HEAD(&sc_if->sk_cdata.sk_txbusyq, tx_q);
+ STAILQ_INSERT_TAIL(&sc_if->sk_cdata.sk_txfreeq, txd, tx_q);
+ txd = STAILQ_FIRST(&sc_if->sk_cdata.sk_txbusyq);
+ }
sc_if->sk_cdata.sk_tx_cons = idx;
+ ifp->if_timer = sc_if->sk_cdata.sk_tx_cnt > 0 ? 5 : 0;
+
+ bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag,
+ sc_if->sk_cdata.sk_tx_ring_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
static void
@@ -2286,18 +3176,14 @@ sk_tick(xsc_if)
int i;
sc_if = xsc_if;
- SK_IF_LOCK(sc_if);
ifp = sc_if->sk_ifp;
mii = device_get_softc(sc_if->sk_miibus);
- if (!(ifp->if_flags & IFF_UP)) {
- SK_IF_UNLOCK(sc_if);
+ if (!(ifp->if_flags & IFF_UP))
return;
- }
if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
sk_intr_bcom(sc_if);
- SK_IF_UNLOCK(sc_if);
return;
}
@@ -2314,8 +3200,7 @@ sk_tick(xsc_if)
}
if (i != 3) {
- sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz);
- SK_IF_UNLOCK(sc_if);
+ callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
return;
}
@@ -2323,10 +3208,21 @@ sk_tick(xsc_if)
SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
SK_XM_READ_2(sc_if, XM_ISR);
mii_tick(mii);
- untimeout(sk_tick, sc_if, sc_if->sk_tick_ch);
+ callout_stop(&sc_if->sk_tick_ch);
+}
- SK_IF_UNLOCK(sc_if);
- return;
+static void
+sk_yukon_tick(xsc_if)
+ void *xsc_if;
+{
+ struct sk_if_softc *sc_if;
+ struct mii_data *mii;
+
+ sc_if = xsc_if;
+ mii = device_get_softc(sc_if->sk_miibus);
+
+ mii_tick(mii);
+ callout_reset(&sc_if->sk_tick_ch, hz, sk_yukon_tick, sc_if);
}
static void
@@ -2374,7 +3270,7 @@ sk_intr_bcom(sc_if)
SK_LINKLED_BLINK_OFF);
} else {
mii_tick(mii);
- sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz);
+ callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
}
}
@@ -2400,11 +3296,11 @@ sk_intr_xmac(sc_if)
if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) {
if (status & XM_ISR_GP0_SET) {
SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
- sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz);
+ callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
}
if (status & XM_ISR_AUTONEG_DONE) {
- sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz);
+ callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
}
}
@@ -2423,11 +3319,19 @@ static void
sk_intr_yukon(sc_if)
struct sk_if_softc *sc_if;
{
- int status;
-
- status = SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR);
+ u_int8_t status;
- return;
+ status = SK_IF_READ_1(sc_if, 0, SK_GMAC_ISR);
+ /* RX overrun */
+ if ((status & SK_GMAC_INT_RX_OVER) != 0) {
+ SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST,
+ SK_RFCTL_RX_FIFO_OVER);
+ }
+ /* TX underrun */
+ if ((status & SK_GMAC_INT_TX_UNDER) != 0) {
+ SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST,
+ SK_TFCTL_TX_FIFO_UNDER);
+ }
}
static void
@@ -2435,12 +3339,16 @@ sk_intr(xsc)
void *xsc;
{
struct sk_softc *sc = xsc;
- struct sk_if_softc *sc_if0 = NULL, *sc_if1 = NULL;
+ struct sk_if_softc *sc_if0, *sc_if1;
struct ifnet *ifp0 = NULL, *ifp1 = NULL;
u_int32_t status;
SK_LOCK(sc);
+ status = CSR_READ_4(sc, SK_ISSR);
+ if (status == 0 || status == 0xffffffff || sc->sk_suspended)
+ goto done_locked;
+
sc_if0 = sc->sk_if[SK_PORT_A];
sc_if1 = sc->sk_if[SK_PORT_B];
@@ -2449,19 +3357,22 @@ sk_intr(xsc)
if (sc_if1 != NULL)
ifp1 = sc_if1->sk_ifp;
- for (;;) {
- status = CSR_READ_4(sc, SK_ISSR);
- if (!(status & sc->sk_intrmask))
- break;
-
+ status &= sc->sk_intrmask;
+ if ((status & sc->sk_intrmask) != 0) {
/* Handle receive interrupts first. */
if (status & SK_ISR_RX1_EOF) {
- sk_rxeof(sc_if0);
+ if (ifp0->if_mtu > SK_MAX_FRAMELEN)
+ sk_jumbo_rxeof(sc_if0);
+ else
+ sk_rxeof(sc_if0);
CSR_WRITE_4(sc, SK_BMU_RX_CSR0,
SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
}
if (status & SK_ISR_RX2_EOF) {
- sk_rxeof(sc_if1);
+ if (ifp1->if_mtu > SK_MAX_FRAMELEN)
+ sk_jumbo_rxeof(sc_if1);
+ else
+ sk_rxeof(sc_if1);
CSR_WRITE_4(sc, SK_BMU_RX_CSR1,
SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
}
@@ -2469,13 +3380,11 @@ sk_intr(xsc)
/* Then transmit interrupts. */
if (status & SK_ISR_TX1_S_EOF) {
sk_txeof(sc_if0);
- CSR_WRITE_4(sc, SK_BMU_TXS_CSR0,
- SK_TXBMU_CLR_IRQ_EOF);
+ CSR_WRITE_4(sc, SK_BMU_TXS_CSR0, SK_TXBMU_CLR_IRQ_EOF);
}
if (status & SK_ISR_TX2_S_EOF) {
sk_txeof(sc_if1);
- CSR_WRITE_4(sc, SK_BMU_TXS_CSR1,
- SK_TXBMU_CLR_IRQ_EOF);
+ CSR_WRITE_4(sc, SK_BMU_TXS_CSR1, SK_TXBMU_CLR_IRQ_EOF);
}
/* Then MAC interrupts. */
@@ -2512,9 +3421,8 @@ sk_intr(xsc)
if (ifp1 != NULL && !IFQ_DRV_IS_EMPTY(&ifp1->if_snd))
sk_start_locked(ifp1);
+done_locked:
SK_UNLOCK(sc);
-
- return;
}
static void
@@ -2523,12 +3431,15 @@ sk_init_xmac(sc_if)
{
struct sk_softc *sc;
struct ifnet *ifp;
+ u_int16_t eaddr[(ETHER_ADDR_LEN+1)/2];
struct sk_bcom_hack bhack[] = {
{ 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 },
{ 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 },
{ 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
{ 0, 0 } };
+ SK_IF_LOCK_ASSERT(sc_if);
+
sc = sc_if->sk_softc;
ifp = sc_if->sk_ifp;
@@ -2585,12 +3496,10 @@ sk_init_xmac(sc_if)
}
/* Set station address */
- SK_XM_WRITE_2(sc_if, XM_PAR0,
- *(u_int16_t *)(&IF_LLADDR(sc_if->sk_ifp)[0]));
- SK_XM_WRITE_2(sc_if, XM_PAR1,
- *(u_int16_t *)(&IF_LLADDR(sc_if->sk_ifp)[2]));
- SK_XM_WRITE_2(sc_if, XM_PAR2,
- *(u_int16_t *)(&IF_LLADDR(sc_if->sk_ifp)[4]));
+ bcopy(IF_LLADDR(sc_if->sk_ifp), eaddr, ETHER_ADDR_LEN);
+ SK_XM_WRITE_2(sc_if, XM_PAR0, eaddr[0]);
+ SK_XM_WRITE_2(sc_if, XM_PAR1, eaddr[1]);
+ SK_XM_WRITE_2(sc_if, XM_PAR2, eaddr[2]);
SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION);
if (ifp->if_flags & IFF_BROADCAST) {
@@ -2619,13 +3528,12 @@ sk_init_xmac(sc_if)
* case the XMAC will start transfering frames out of the
* RX FIFO as soon as the FIFO threshold is reached.
*/
- SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES|
- XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS|
- XM_MODE_RX_INRANGELEN);
-
- if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
+ if (ifp->if_mtu > SK_MAX_FRAMELEN) {
+ SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES|
+ XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS|
+ XM_MODE_RX_INRANGELEN);
SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
- else
+ } else
SK_XM_CLRBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
/*
@@ -2686,29 +3594,44 @@ static void
sk_init_yukon(sc_if)
struct sk_if_softc *sc_if;
{
- u_int32_t phy;
+ u_int32_t phy, v;
u_int16_t reg;
struct sk_softc *sc;
struct ifnet *ifp;
int i;
+ SK_IF_LOCK_ASSERT(sc_if);
+
sc = sc_if->sk_softc;
ifp = sc_if->sk_ifp;
if (sc->sk_type == SK_YUKON_LITE &&
sc->sk_rev >= SK_YUKON_LITE_REV_A3) {
- /* Take PHY out of reset. */
- sk_win_write_4(sc, SK_GPIO,
- (sk_win_read_4(sc, SK_GPIO) | SK_GPIO_DIR9) & ~SK_GPIO_DAT9);
+ /*
+ * Workaround code for COMA mode, set PHY reset.
+ * Otherwise it will not correctly take chip out of
+ * powerdown (coma)
+ */
+ v = sk_win_read_4(sc, SK_GPIO);
+ v |= SK_GPIO_DIR9 | SK_GPIO_DAT9;
+ sk_win_write_4(sc, SK_GPIO, v);
}
/* GMAC and GPHY Reset */
SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_SET);
SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET);
DELAY(1000);
- SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_CLEAR);
- SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET);
- DELAY(1000);
+
+ if (sc->sk_type == SK_YUKON_LITE &&
+ sc->sk_rev >= SK_YUKON_LITE_REV_A3) {
+ /*
+ * Workaround code for COMA mode, clear PHY reset
+ */
+ v = sk_win_read_4(sc, SK_GPIO);
+ v |= SK_GPIO_DIR9;
+ v &= ~SK_GPIO_DAT9;
+ sk_win_write_4(sc, SK_GPIO, v);
+ }
phy = SK_GPHY_INT_POL_HI | SK_GPHY_DIS_FC | SK_GPHY_DIS_SLEEP |
SK_GPHY_ENA_XC | SK_GPHY_ANEG_ALL | SK_GPHY_ENA_PAUSE;
@@ -2753,7 +3676,7 @@ sk_init_yukon(sc_if)
/* serial mode register */
reg = YU_SMR_DATA_BLIND(0x1c) | YU_SMR_MFL_VLAN | YU_SMR_IPG_DATA(0x1e);
- if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
+ if (ifp->if_mtu > SK_MAX_FRAMELEN)
reg |= YU_SMR_MFL_JUMBO;
SK_YU_WRITE_2(sc_if, YUKON_SMR, reg);
@@ -2782,13 +3705,28 @@ sk_init_yukon(sc_if)
SK_YU_WRITE_2(sc_if, YUKON_RIMR, 0);
SK_YU_WRITE_2(sc_if, YUKON_TRIMR, 0);
+ /* Configure RX MAC FIFO Flush Mask */
+ v = YU_RXSTAT_FOFL | YU_RXSTAT_CRCERR | YU_RXSTAT_MIIERR |
+ YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC | YU_RXSTAT_RUNT |
+ YU_RXSTAT_JABBER;
+ SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_MASK, v);
+
+ /* Disable RX MAC FIFO Flush for YUKON-Lite Rev. A0 only */
+ if (sc->sk_type == SK_YUKON_LITE && sc->sk_rev == SK_YUKON_LITE_REV_A0)
+ v = SK_TFCTL_OPERATION_ON;
+ else
+ v = SK_TFCTL_OPERATION_ON | SK_RFCTL_FIFO_FLUSH_ON;
/* Configure RX MAC FIFO */
SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_CLEAR);
- SK_IF_WRITE_4(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_OPERATION_ON);
+ SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_CTRL_TEST, v);
+
+ /* Increase flush threshould to 64 bytes */
+ SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_THRESHOLD,
+ SK_RFCTL_FIFO_THRESHOLD + 1);
/* Configure TX MAC FIFO */
SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_CLEAR);
- SK_IF_WRITE_4(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON);
+ SK_IF_WRITE_2(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON);
}
/*
@@ -2817,6 +3755,7 @@ sk_init_locked(sc_if)
struct mii_data *mii;
u_int16_t reg;
u_int32_t imr;
+ int error;
SK_IF_LOCK_ASSERT(sc_if);
@@ -2845,6 +3784,25 @@ sk_init_locked(sc_if)
SK_TXLEDCTL_COUNTER_START);
}
+ /*
+ * Configure descriptor poll timer
+ *
+ * SK-NET GENESIS data sheet says that possibility of losing Start
+ * transmit command due to CPU/cache related interim storage problems
+ * under certain conditions. The document recommends a polling
+ * mechanism to send a Start transmit command to initiate transfer
+ * of ready descriptors regulary. To cope with this issue sk(4) now
+ * enables descriptor poll timer to initiate descriptor processing
+ * periodically as defined by SK_DPT_TIMER_MAX. However sk(4) still
+ * issue SK_TXBMU_TX_START to Tx BMU to get fast execution of Tx
+ * command instead of waiting for next descriptor polling time.
+ * The same rule may apply to Rx side too but it seems that is not
+ * needed at the moment.
+ * Since sk(4) uses descriptor polling as a last resort there is no
+ * need to set smaller polling time than maximum allowable one.
+ */
+ SK_IF_WRITE_4(sc_if, 0, SK_DPT_INIT, SK_DPT_TIMER_MAX);
+
/* Configure I2C registers */
/* Configure XMAC(s) */
@@ -2893,35 +3851,47 @@ sk_init_locked(sc_if)
/* Configure BMUs */
SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE);
- SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO,
- vtophys(&sc_if->sk_rdata->sk_rx_ring[0]));
- SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI, 0);
+ if (ifp->if_mtu > SK_MAX_FRAMELEN) {
+ SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO,
+ SK_ADDR_LO(SK_JUMBO_RX_RING_ADDR(sc_if, 0)));
+ SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI,
+ SK_ADDR_HI(SK_JUMBO_RX_RING_ADDR(sc_if, 0)));
+ } else {
+ SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO,
+ SK_ADDR_LO(SK_RX_RING_ADDR(sc_if, 0)));
+ SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI,
+ SK_ADDR_HI(SK_RX_RING_ADDR(sc_if, 0)));
+ }
SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE);
SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO,
- vtophys(&sc_if->sk_rdata->sk_tx_ring[0]));
- SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI, 0);
+ SK_ADDR_LO(SK_TX_RING_ADDR(sc_if, 0)));
+ SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI,
+ SK_ADDR_HI(SK_TX_RING_ADDR(sc_if, 0)));
/* Init descriptors */
- if (sk_init_rx_ring(sc_if) == ENOBUFS) {
- printf("sk%d: initialization failed: no "
- "memory for rx buffers\n", sc_if->sk_unit);
+ if (ifp->if_mtu > SK_MAX_FRAMELEN)
+ error = sk_init_jumbo_rx_ring(sc_if);
+ else
+ error = sk_init_rx_ring(sc_if);
+ if (error != 0) {
+ device_printf(sc_if->sk_if_dev,
+ "initialization failed: no memory for rx buffers\n");
sk_stop(sc_if);
return;
}
sk_init_tx_ring(sc_if);
/* Set interrupt moderation if changed via sysctl. */
- /* SK_LOCK(sc); */
imr = sk_win_read_4(sc, SK_IMTIMERINIT);
if (imr != SK_IM_USECS(sc->sk_int_mod, sc->sk_int_ticks)) {
sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc->sk_int_mod,
sc->sk_int_ticks));
if (bootverbose)
- printf("skc%d: interrupt moderation is %d us\n",
- sc->sk_unit, sc->sk_int_mod);
+ device_printf(sc_if->sk_if_dev,
+ "interrupt moderation is %d us.\n",
+ sc->sk_int_mod);
}
- /* SK_UNLOCK(sc); */
/* Configure interrupt handling */
CSR_READ_4(sc, SK_ISSR);
@@ -2948,13 +3918,29 @@ sk_init_locked(sc_if)
case SK_YUKON_LP:
reg = SK_YU_READ_2(sc_if, YUKON_GPCR);
reg |= YU_GPCR_TXEN | YU_GPCR_RXEN;
- reg &= ~(YU_GPCR_SPEED_EN | YU_GPCR_DPLX_EN);
+#if 0
+ /* XXX disable 100Mbps and full duplex mode? */
+ reg &= ~(YU_GPCR_SPEED | YU_GPCR_DPLX_DIS);
+#endif
SK_YU_WRITE_2(sc_if, YUKON_GPCR, reg);
}
+ /* Activate descriptor polling timer */
+ SK_IF_WRITE_4(sc_if, 0, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_START);
+ /* start transfer of Tx descriptors */
+ CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
+
ifp->if_drv_flags |= IFF_DRV_RUNNING;
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+ switch (sc->sk_type) {
+ case SK_YUKON:
+ case SK_YUKON_LITE:
+ case SK_YUKON_LP:
+ callout_reset(&sc_if->sk_tick_ch, hz, sk_yukon_tick, sc_if);
+ break;
+ }
+
return;
}
@@ -2964,17 +3950,44 @@ sk_stop(sc_if)
{
int i;
struct sk_softc *sc;
+ struct sk_txdesc *txd;
+ struct sk_rxdesc *rxd;
+ struct sk_rxdesc *jrxd;
struct ifnet *ifp;
+ u_int32_t val;
SK_IF_LOCK_ASSERT(sc_if);
sc = sc_if->sk_softc;
ifp = sc_if->sk_ifp;
- untimeout(sk_tick, sc_if, sc_if->sk_tick_ch);
+ callout_stop(&sc_if->sk_tick_ch);
- if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
- u_int32_t val;
+ /* stop Tx descriptor polling timer */
+ SK_IF_WRITE_4(sc_if, 0, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_STOP);
+ /* stop transfer of Tx descriptors */
+ CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_STOP);
+ for (i = 0; i < SK_TIMEOUT; i++) {
+ val = CSR_READ_4(sc, sc_if->sk_tx_bmu);
+ if ((val & SK_TXBMU_TX_STOP) == 0)
+ break;
+ DELAY(1);
+ }
+ if (i == SK_TIMEOUT)
+ device_printf(sc_if->sk_if_dev,
+ "can not stop transfer of Tx descriptor\n");
+ /* stop transfer of Rx descriptors */
+ SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_STOP);
+ for (i = 0; i < SK_TIMEOUT; i++) {
+ val = SK_IF_READ_4(sc_if, 0, SK_RXQ1_BMU_CSR);
+ if ((val & SK_RXBMU_RX_STOP) == 0)
+ break;
+ DELAY(1);
+ }
+ if (i == SK_TIMEOUT)
+ device_printf(sc_if->sk_if_dev,
+ "can not stop transfer of Rx descriptor\n");
+ if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
/* Put PHY back into reset. */
val = sk_win_read_4(sc, SK_GPIO);
if (sc_if->sk_port == SK_PORT_A) {
@@ -3023,16 +4036,36 @@ sk_stop(sc_if)
/* Free RX and TX mbufs still in the queues. */
for (i = 0; i < SK_RX_RING_CNT; i++) {
- if (sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf != NULL) {
- m_freem(sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf);
- sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf = NULL;
+ rxd = &sc_if->sk_cdata.sk_rxdesc[i];
+ if (rxd->rx_m != NULL) {
+ bus_dmamap_sync(sc_if->sk_cdata.sk_rx_tag,
+ rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(sc_if->sk_cdata.sk_rx_tag,
+ rxd->rx_dmamap);
+ m_freem(rxd->rx_m);
+ rxd->rx_m = NULL;
+ }
+ }
+ for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) {
+ jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i];
+ if (jrxd->rx_m != NULL) {
+ bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_tag,
+ jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_tag,
+ jrxd->rx_dmamap);
+ m_freem(jrxd->rx_m);
+ jrxd->rx_m = NULL;
}
}
-
for (i = 0; i < SK_TX_RING_CNT; i++) {
- if (sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf != NULL) {
- m_freem(sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf);
- sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf = NULL;
+ txd = &sc_if->sk_cdata.sk_txdesc[i];
+ if (txd->tx_m != NULL) {
+ bus_dmamap_sync(sc_if->sk_cdata.sk_tx_tag,
+ txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(sc_if->sk_cdata.sk_tx_tag,
+ txd->tx_dmamap);
+ m_freem(txd->tx_m);
+ txd->tx_m = NULL;
}
}
diff --git a/sys/dev/sk/if_skreg.h b/sys/dev/sk/if_skreg.h
index a7f045a..8b19523 100644
--- a/sys/dev/sk/if_skreg.h
+++ b/sys/dev/sk/if_skreg.h
@@ -793,13 +793,13 @@
(SK_TXBMU_TRANSFER_SM_UNRESET|SK_TXBMU_DESCWR_SM_UNRESET| \
SK_TXBMU_DESCRD_SM_UNRESET|SK_TXBMU_SUPERVISOR_SM_UNRESET| \
SK_TXBMU_PFI_SM_UNRESET|SK_TXBMU_FIFO_UNRESET| \
- SK_TXBMU_DESC_UNRESET)
+ SK_TXBMU_DESC_UNRESET|SK_TXBMU_POLL_ON)
#define SK_TXBMU_OFFLINE \
(SK_TXBMU_TRANSFER_SM_RESET|SK_TXBMU_DESCWR_SM_RESET| \
SK_TXBMU_DESCRD_SM_RESET|SK_TXBMU_SUPERVISOR_SM_RESET| \
SK_TXBMU_PFI_SM_RESET|SK_TXBMU_FIFO_RESET| \
- SK_TXBMU_DESC_RESET)
+ SK_TXBMU_DESC_RESET|SK_TXBMU_POLL_OFF)
/* Block 16 -- Receive RAMbuffer 1 */
#define SK_RXRB1_START 0x0800
@@ -894,24 +894,31 @@
#define SK_RXMF1_END 0x0C40
#define SK_RXMF1_THRESHOLD 0x0C44
#define SK_RXMF1_CTRL_TEST 0x0C48
+#define SK_RXMF1_FLUSH_MASK 0x0C4C
+#define SK_RXMF1_FLUSH_THRESHOLD 0x0C50
#define SK_RXMF1_WRITE_PTR 0x0C60
#define SK_RXMF1_WRITE_LEVEL 0x0C68
#define SK_RXMF1_READ_PTR 0x0C70
#define SK_RXMF1_READ_LEVEL 0x0C78
+/* Receive MAC FIFO 1 Contro/Test */
#define SK_RFCTL_WR_PTR_TST_ON 0x00004000 /* Write pointer test on*/
#define SK_RFCTL_WR_PTR_TST_OFF 0x00002000 /* Write pointer test off */
#define SK_RFCTL_WR_PTR_STEP 0x00001000 /* Write pointer increment */
#define SK_RFCTL_RD_PTR_TST_ON 0x00000400 /* Read pointer test on */
#define SK_RFCTL_RD_PTR_TST_OFF 0x00000200 /* Read pointer test off */
#define SK_RFCTL_RD_PTR_STEP 0x00000100 /* Read pointer increment */
-#define SK_RFCTL_RX_FIFO_OVER 0x00000040 /* Clear IRQ RX FIFO Overrun */
+#define SK_RFCTL_FIFO_FLUSH_OFF 0x00000080 /* RX FIFO Flsuh mode off */
+#define SK_RFCTL_FIFO_FLUSH_ON 0x00000040 /* RX FIFO Flush mode on */
+#define SK_RFCTL_RX_FIFO_OVER 0x00000020 /* Clear IRQ RX FIFO Overrun */
#define SK_RFCTL_FRAME_RX_DONE 0x00000010 /* Clear IRQ Frame RX Done */
#define SK_RFCTL_OPERATION_ON 0x00000008 /* Operational mode on */
#define SK_RFCTL_OPERATION_OFF 0x00000004 /* Operational mode off */
#define SK_RFCTL_RESET_CLEAR 0x00000002 /* MAC FIFO Reset Clear */
#define SK_RFCTL_RESET_SET 0x00000001 /* MAC FIFO Reset Set */
+#define SK_RFCTL_FIFO_THRESHOLD 0x0a /* flush threshold (default) */
+
/* Block 25 -- RX MAC FIFO 2 regisrers and LINK_SYNC counter */
#define SK_RXF2_END 0x0C80
#define SK_RXF2_WPTR 0x0C84
@@ -971,7 +978,7 @@
#define SK_TXLED1_CTL 0x0D28
#define SK_TXLED1_TST 0x0D29
-/* Receive MAC FIFO 1 (Yukon Only) */
+/* Transmit MAC FIFO 1 (Yukon Only) */
#define SK_TXMF1_END 0x0D40
#define SK_TXMF1_THRESHOLD 0x0D44
#define SK_TXMF1_CTRL_TEST 0x0D48
@@ -982,6 +989,7 @@
#define SK_TXMF1_RESTART_PTR 0x0D74
#define SK_TXMF1_READ_LEVEL 0x0D78
+/* Transmit MAC FIFO Control/Test */
#define SK_TFCTL_WR_PTR_TST_ON 0x00004000 /* Write pointer test on*/
#define SK_TFCTL_WR_PTR_TST_OFF 0x00002000 /* Write pointer test off */
#define SK_TFCTL_WR_PTR_STEP 0x00001000 /* Write pointer increment */
@@ -1039,6 +1047,8 @@
#define SK_DPT_INIT 0x0e00 /* Initial value 24 bits */
#define SK_DPT_TIMER 0x0e04 /* Mul of 78.12MHz clk (24b) */
+#define SK_DPT_TIMER_MAX 0x00ffffffff /* 214.75ms at 78.125MHz */
+
#define SK_DPT_TIMER_CTRL 0x0e08 /* Timer Control 16 bits */
#define SK_DPT_TCTL_STOP 0x0001 /* Stop Timer */
#define SK_DPT_TCTL_START 0x0002 /* Start Timer */
@@ -1054,7 +1064,7 @@
#define SK_GMAC_CTRL 0x0f00 /* GMAC Control Register */
#define SK_GPHY_CTRL 0x0f04 /* GPHY Control Register */
#define SK_GMAC_ISR 0x0f08 /* GMAC Interrupt Source Register */
-#define SK_GMAC_IMR 0x0f08 /* GMAC Interrupt Mask Register */
+#define SK_GMAC_IMR 0x0f0c /* GMAC Interrupt Mask Register */
#define SK_LINK_CTRL 0x0f10 /* Link Control Register (LCR) */
#define SK_WOL_CTRL 0x0f20 /* Wake on LAN Control Register */
#define SK_MAC_ADDR_LOW 0x0f24 /* Mack Address Registers LOW */
@@ -1310,6 +1320,11 @@ struct sk_type {
char *sk_name;
};
+#define SK_ADDR_LO(x) ((u_int64_t) (x) & 0xffffffff)
+#define SK_ADDR_HI(x) ((u_int64_t) (x) >> 32)
+
+#define SK_RING_ALIGN 64
+
/* RX queue descriptor data structure */
struct sk_rx_desc {
u_int32_t sk_ctl;
@@ -1318,10 +1333,8 @@ struct sk_rx_desc {
u_int32_t sk_data_hi;
u_int32_t sk_xmac_rxstat;
u_int32_t sk_timestamp;
- u_int16_t sk_csum2;
- u_int16_t sk_csum1;
- u_int16_t sk_csum2_start;
- u_int16_t sk_csum1_start;
+ u_int32_t sk_csum;
+ u_int32_t sk_csum_start;
};
#define SK_OPCODE_DEFAULT 0x00550000
@@ -1339,8 +1352,7 @@ struct sk_rx_desc {
#define SK_RXCTL_OWN 0x80000000
#define SK_RXSTAT \
- (SK_OPCODE_DEFAULT|SK_RXCTL_EOF_INTR|SK_RXCTL_LASTFRAG| \
- SK_RXCTL_FIRSTFRAG|SK_RXCTL_OWN)
+ (SK_RXCTL_EOF_INTR|SK_RXCTL_LASTFRAG|SK_RXCTL_FIRSTFRAG|SK_RXCTL_OWN)
struct sk_tx_desc {
u_int32_t sk_ctl;
@@ -1348,10 +1360,8 @@ struct sk_tx_desc {
u_int32_t sk_data_lo;
u_int32_t sk_data_hi;
u_int32_t sk_xmac_txstat;
- u_int16_t sk_rsvd0;
- u_int16_t sk_csum_startval;
- u_int16_t sk_csum_startpos;
- u_int16_t sk_csum_writepos;
+ u_int32_t sk_csum_startval;
+ u_int32_t sk_csum_start;
u_int32_t sk_rsvd1;
};
@@ -1369,11 +1379,14 @@ struct sk_tx_desc {
#define SK_TXSTAT \
(SK_OPCODE_DEFAULT|SK_TXCTL_EOF_INTR|SK_TXCTL_LASTFRAG|SK_TXCTL_OWN)
-#define SK_RXBYTES(x) (x) & 0x0000FFFF;
+#define SK_RXBYTES(x) ((x) & 0x0000FFFF)
#define SK_TXBYTES SK_RXBYTES
#define SK_TX_RING_CNT 512
#define SK_RX_RING_CNT 256
+#define SK_JUMBO_RX_RING_CNT 256
+#define SK_MAXTXSEGS 32
+#define SK_MAXRXSEGS 32
/*
* Jumbo buffer stuff. Note that we must allocate more jumbo
@@ -1385,6 +1398,9 @@ struct sk_tx_desc {
*/
#define SK_JUMBO_FRAMELEN 9018
#define SK_JUMBO_MTU (SK_JUMBO_FRAMELEN-ETHER_HDR_LEN-ETHER_CRC_LEN)
+#define SK_MAX_FRAMELEN \
+ (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN - ETHER_CRC_LEN)
+#define SK_MIN_FRAMELEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
#define SK_JSLOTS ((SK_RX_RING_CNT * 3) / 2)
#define SK_JRAWLEN (SK_JUMBO_FRAMELEN + ETHER_ALIGN)
@@ -1399,32 +1415,73 @@ struct sk_jpool_entry {
SLIST_ENTRY(sk_jpool_entry) jpool_entries;
};
-struct sk_chain {
- void *sk_desc;
- struct mbuf *sk_mbuf;
- struct sk_chain *sk_next;
+struct sk_txdesc {
+ struct mbuf *tx_m;
+ bus_dmamap_t tx_dmamap;
+ STAILQ_ENTRY(sk_txdesc) tx_q;
+};
+
+STAILQ_HEAD(sk_txdq, sk_txdesc);
+
+struct sk_rxdesc {
+ struct mbuf *rx_m;
+ bus_dmamap_t rx_dmamap;
};
struct sk_chain_data {
- struct sk_chain sk_tx_chain[SK_TX_RING_CNT];
- struct sk_chain sk_rx_chain[SK_RX_RING_CNT];
+ bus_dma_tag_t sk_parent_tag;
+ bus_dma_tag_t sk_tx_tag;
+ struct sk_txdesc sk_txdesc[SK_TX_RING_CNT];
+ struct sk_txdq sk_txfreeq;
+ struct sk_txdq sk_txbusyq;
+ bus_dma_tag_t sk_rx_tag;
+ struct sk_rxdesc sk_rxdesc[SK_RX_RING_CNT];
+ bus_dma_tag_t sk_tx_ring_tag;
+ bus_dma_tag_t sk_rx_ring_tag;
+ bus_dmamap_t sk_tx_ring_map;
+ bus_dmamap_t sk_rx_ring_map;
+ bus_dmamap_t sk_rx_sparemap;
+ bus_dma_tag_t sk_jumbo_rx_tag;
+ bus_dma_tag_t sk_jumbo_tag;
+ bus_dmamap_t sk_jumbo_map;
+ bus_dma_tag_t sk_jumbo_mtag;
+ caddr_t sk_jslots[SK_JSLOTS];
+ struct sk_rxdesc sk_jumbo_rxdesc[SK_JUMBO_RX_RING_CNT];
+ bus_dma_tag_t sk_jumbo_rx_ring_tag;
+ bus_dmamap_t sk_jumbo_rx_ring_map;
+ bus_dmamap_t sk_jumbo_rx_sparemap;
int sk_tx_prod;
int sk_tx_cons;
int sk_tx_cnt;
- int sk_rx_prod;
int sk_rx_cons;
- int sk_rx_cnt;
- /* Stick the jumbo mem management stuff here too. */
- caddr_t sk_jslots[SK_JSLOTS];
- void *sk_jumbo_buf;
-
+ int sk_jumbo_rx_cons;
};
struct sk_ring_data {
- struct sk_tx_desc sk_tx_ring[SK_TX_RING_CNT];
- struct sk_rx_desc sk_rx_ring[SK_RX_RING_CNT];
+ struct sk_tx_desc *sk_tx_ring;
+ bus_addr_t sk_tx_ring_paddr;
+ struct sk_rx_desc *sk_rx_ring;
+ bus_addr_t sk_rx_ring_paddr;
+ struct sk_rx_desc *sk_jumbo_rx_ring;
+ bus_addr_t sk_jumbo_rx_ring_paddr;
+ void *sk_jumbo_buf;
+ bus_addr_t sk_jumbo_buf_paddr;
};
+#define SK_TX_RING_ADDR(sc, i) \
+ ((sc)->sk_rdata.sk_tx_ring_paddr + sizeof(struct sk_tx_desc) * (i))
+#define SK_RX_RING_ADDR(sc, i) \
+ ((sc)->sk_rdata.sk_rx_ring_paddr + sizeof(struct sk_rx_desc) * (i))
+#define SK_JUMBO_RX_RING_ADDR(sc, i) \
+ ((sc)->sk_rdata.sk_jumbo_rx_ring_paddr + sizeof(struct sk_rx_desc) * (i))
+
+#define SK_TX_RING_SZ \
+ (sizeof(struct sk_tx_desc) * SK_TX_RING_CNT)
+#define SK_RX_RING_SZ \
+ (sizeof(struct sk_rx_desc) * SK_RX_RING_CNT)
+#define SK_JUMBO_RX_RING_SZ \
+ (sizeof(struct sk_rx_desc) * SK_JUMBO_RX_RING_CNT)
+
struct sk_bcom_hack {
int reg;
int val;
@@ -1442,7 +1499,7 @@ struct sk_softc {
void *sk_intrhand; /* irq handler handle */
struct resource *sk_irq; /* IRQ resource handle */
struct resource *sk_res; /* I/O or shared mem handle */
- u_int8_t sk_unit; /* controller number */
+ device_t sk_dev;
u_int8_t sk_type;
u_int8_t sk_rev;
u_int8_t spare;
@@ -1455,8 +1512,10 @@ struct sk_softc {
u_int32_t sk_intrmask;
int sk_int_mod;
int sk_int_ticks;
+ int sk_suspended;
struct sk_if_softc *sk_if[2];
device_t sk_devs[2];
+ struct mtx sk_mii_mtx;
struct mtx sk_mtx;
};
@@ -1466,12 +1525,14 @@ struct sk_softc {
#define SK_IF_LOCK(_sc) SK_LOCK((_sc)->sk_softc)
#define SK_IF_UNLOCK(_sc) SK_UNLOCK((_sc)->sk_softc)
#define SK_IF_LOCK_ASSERT(_sc) SK_LOCK_ASSERT((_sc)->sk_softc)
+#define SK_IF_MII_LOCK(_sc) mtx_lock(&(_sc)->sk_softc->sk_mii_mtx)
+#define SK_IF_MII_UNLOCK(_sc) mtx_unlock(&(_sc)->sk_softc->sk_mii_mtx)
/* Softc for each logical interface */
struct sk_if_softc {
struct ifnet *sk_ifp; /* interface info */
device_t sk_miibus;
- u_int8_t sk_unit; /* interface number */
+ device_t sk_if_dev;
u_int8_t sk_port; /* port # on controller */
u_int8_t sk_xmac_rev; /* XMAC chip rev (B2 or C1) */
u_int32_t sk_rx_ramstart;
@@ -1480,12 +1541,10 @@ struct sk_if_softc {
u_int32_t sk_tx_ramend;
int sk_phytype;
int sk_phyaddr;
- device_t sk_dev;
- int sk_cnt;
int sk_link;
- struct callout_handle sk_tick_ch;
+ struct callout sk_tick_ch;
struct sk_chain_data sk_cdata;
- struct sk_ring_data *sk_rdata;
+ struct sk_ring_data sk_rdata;
struct sk_softc *sk_softc; /* parent controller */
int sk_tx_bmu; /* TX BMU register */
int sk_if_flags;
@@ -1497,11 +1556,4 @@ struct sk_if_softc {
#define SK_JLIST_LOCK(_sc) mtx_lock(&(_sc)->sk_jlist_mtx)
#define SK_JLIST_UNLOCK(_sc) mtx_unlock(&(_sc)->sk_jlist_mtx)
-#define SK_MAXUNIT 256
#define SK_TIMEOUT 1000
-#define ETHER_ALIGN 2
-
-#ifdef __alpha__
-#undef vtophys
-#define vtophys(va) alpha_XXX_dmamap((vm_offset_t)va)
-#endif
diff --git a/sys/dev/sk/xmaciireg.h b/sys/dev/sk/xmaciireg.h
index 38cd8cb..604073b 100644
--- a/sys/dev/sk/xmaciireg.h
+++ b/sys/dev/sk/xmaciireg.h
@@ -317,6 +317,9 @@
#define XM_RXSTAT_VLAN_LEV1 0x00010000
#define XM_RXSTAT_VLAN_LEV2 0x00020000
#define XM_RXSTAT_LEN 0xFFFC0000
+#define XM_RXSTAT_LENSHIFT 18
+
+#define XM_RXSTAT_BYTES(x) ((x) >> XM_RXSTAT_LENSHIFT)
/*
* XMAC PHY registers, indirectly accessed through
diff --git a/sys/dev/sk/yukonreg.h b/sys/dev/sk/yukonreg.h
index 51d9f3a..efe71a0 100644
--- a/sys/dev/sk/yukonreg.h
+++ b/sys/dev/sk/yukonreg.h
@@ -22,7 +22,7 @@
#define YU_GPSR_SPEED 0x8000 /* speed 0 - 10Mbps, 1 - 100Mbps */
#define YU_GPSR_DUPLEX 0x4000 /* 0 - half duplex, 1 - full duplex */
-#define YU_GPSR_FCTL_TX 0x2000 /* flow control */
+#define YU_GPSR_FCTL_TX 0x2000 /* Tx flow control, 1 - disabled */
#define YU_GPSR_LINK 0x1000 /* link status (down/up) */
#define YU_GPSR_PAUSE 0x0800 /* flow control enable/disable */
#define YU_GPSR_TX_IN_PROG 0x0400 /* transmit in progress */
@@ -31,25 +31,26 @@
#define YU_GPSR_MII_PHY_STC 0x0020 /* MII PHY status change */
#define YU_GPSR_GIG_SPEED 0x0010 /* Gigabit Speed (0 - use speed bit) */
#define YU_GPSR_PARTITION 0x0008 /* partition mode */
-#define YU_GPSR_FCTL_RX 0x0004 /* flow control enable/disable */
-#define YU_GPSR_PROMS_EN 0x0002 /* promiscuous mode enable/disable */
+#define YU_GPSR_FCTL_RX 0x0004 /* Rx flow control, 1 - disabled */
+#define YU_GPSR_PROMS_EN 0x0002 /* promiscuous mode, 1 - enabled */
/* General Purpose Control Register (GPCR) */
#define YUKON_GPCR 0x0004
-#define YU_GPCR_FCTL_TX 0x2000 /* Transmit flow control 802.3x */
+#define YU_GPCR_FCTL_TX_DIS 0x2000 /* Disable Tx flow control 802.3x */
#define YU_GPCR_TXEN 0x1000 /* Transmit Enable */
#define YU_GPCR_RXEN 0x0800 /* Receive Enable */
-#define YU_GPCR_LPBK 0x0200 /* Loopback Enable */
+#define YU_GPCR_BURSTEN 0x0400 /* Burst Mode Enable */
+#define YU_GPCR_LPBK 0x0200 /* MAC Loopback Enable */
#define YU_GPCR_PAR 0x0100 /* Partition Enable */
-#define YU_GPCR_GIG 0x0080 /* Gigabit Speed */
+#define YU_GPCR_GIG 0x0080 /* Gigabit Speed 1000Mbps */
#define YU_GPCR_FLP 0x0040 /* Force Link Pass */
#define YU_GPCR_DUPLEX 0x0020 /* Duplex Enable */
-#define YU_GPCR_FCTL_RX 0x0010 /* Receive flow control 802.3x */
-#define YU_GPCR_SPEED 0x0008 /* Port Speed */
-#define YU_GPCR_DPLX_EN 0x0004 /* Enable Auto-Update for duplex */
-#define YU_GPCR_FCTL_EN 0x0002 /* Enabel Auto-Update for 802.3x */
-#define YU_GPCR_SPEED_EN 0x0001 /* Enable Auto-Update for speed */
+#define YU_GPCR_FCTL_RX_DIS 0x0010 /* Disable Rx flow control 802.3x */
+#define YU_GPCR_SPEED 0x0008 /* Port Speed 100Mbps */
+#define YU_GPCR_DPLX_DIS 0x0004 /* Disable Auto-Update for duplex */
+#define YU_GPCR_FCTL_DIS 0x0002 /* Disable Auto-Update for 802.3x */
+#define YU_GPCR_SPEED_DIS 0x0001 /* Disable Auto-Update for speed */
/* Transmit Control Register (TCR) */
#define YUKON_TCR 0x0008
@@ -169,3 +170,21 @@
#define YU_PAR_MIB_CLR 0x0020 /* MIB Counters Clear Mode */
#define YU_PAR_LOAD_TSTCNT 0x0010 /* Load count 0xfffffff0 into cntr */
+
+/* Receive status */
+#define YU_RXSTAT_FOFL 0x00000001 /* Rx FIFO overflow */
+#define YU_RXSTAT_CRCERR 0x00000002 /* CRC error */
+#define YU_RXSTAT_FRAGMENT 0x00000008 /* fragment */
+#define YU_RXSTAT_LONGERR 0x00000010 /* too long packet */
+#define YU_RXSTAT_MIIERR 0x00000020 /* MII error */
+#define YU_RXSTAT_BADFC 0x00000040 /* bad flow-control packet */
+#define YU_RXSTAT_GOODFC 0x00000080 /* good flow-control packet */
+#define YU_RXSTAT_RXOK 0x00000100 /* receice OK (Good packet) */
+#define YU_RXSTAT_BROADCAST 0x00000200 /* broadcast packet */
+#define YU_RXSTAT_MULTICAST 0x00000400 /* multicast packet */
+#define YU_RXSTAT_RUNT 0x00000800 /* undersize packet */
+#define YU_RXSTAT_JABBER 0x00001000 /* jabber packet */
+#define YU_RXSTAT_VLAN 0x00002000 /* VLAN packet */
+#define YU_RXSTAT_LENSHIFT 16
+
+#define YU_RXSTAT_BYTES(x) ((x) >> YU_RXSTAT_LENSHIFT)
OpenPOWER on IntegriCloud