summaryrefslogtreecommitdiffstats
path: root/sys/dev/sk
diff options
context:
space:
mode:
authoryongari <yongari@FreeBSD.org>2008-02-14 01:10:48 +0000
committeryongari <yongari@FreeBSD.org>2008-02-14 01:10:48 +0000
commit182a7cea0b8a83806ec4790bff5a432131a61e32 (patch)
treef4a20deb5e4da7191067ab0ffe0043fd1e93d603 /sys/dev/sk
parent021108eeb6017989f537321dcd7b581714149497 (diff)
downloadFreeBSD-src-182a7cea0b8a83806ec4790bff5a432131a61e32.zip
FreeBSD-src-182a7cea0b8a83806ec4790bff5a432131a61e32.tar.gz
Nuke local jumbo allocator and switch to use of UMA backed page
allocator for jumbo frame. o Removed unneeded jlist lock which was used to manage jumbo buffers. o Don't reinitialize hardware if MTU was not changed. o Added additional check for minimal MTU size. o Added a new tunable hw.skc.jumbo_disable to disable jumbo frame support for the driver. The tunable could be set for systems that do not need to use jumbo frames and it would save (9K * number of Rx descriptors) bytes kernel memory. o Jumbo buffer allocation failure is no longer critical error for the operation of sk(4). If sk(4) encounter the allocation failure it just disables jumbo frame support and continues to work without user intervention. With these changes jumbo frame performance of sk(4) was slightly increased and users should not encounter jumbo buffer allocation failure. Previously sk(4) tried to allocate physically contiguous memory, 3388KB for 256 Rx descriptors. Sometimes that amount of contiguous memory region could not be available for running systems which in turn resulted in failure of loading the driver. Tested by: Cy Schubert < Cy.Schubert () komquats dot com >
Diffstat (limited to 'sys/dev/sk')
-rw-r--r--sys/dev/sk/if_sk.c402
-rw-r--r--sys/dev/sk/if_skreg.h35
2 files changed, 142 insertions, 295 deletions
diff --git a/sys/dev/sk/if_sk.c b/sys/dev/sk/if_sk.c
index c51b4d1..9dee683 100644
--- a/sys/dev/sk/if_sk.c
+++ b/sys/dev/sk/if_sk.c
@@ -227,9 +227,9 @@ static int sk_newbuf(struct sk_if_softc *, int);
static int sk_jumbo_newbuf(struct sk_if_softc *, int);
static void sk_dmamap_cb(void *, bus_dma_segment_t *, int, int);
static int sk_dma_alloc(struct sk_if_softc *);
+static int sk_dma_jumbo_alloc(struct sk_if_softc *);
static void sk_dma_free(struct sk_if_softc *);
-static void *sk_jalloc(struct sk_if_softc *);
-static void sk_jfree(void *, void *);
+static void sk_dma_jumbo_free(struct sk_if_softc *);
static int sk_init_rx_ring(struct sk_if_softc *);
static int sk_init_jumbo_rx_ring(struct sk_if_softc *);
static void sk_init_tx_ring(struct sk_if_softc *);
@@ -263,6 +263,10 @@ static void sk_setpromisc(struct sk_if_softc *);
static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high);
static int sysctl_hw_sk_int_mod(SYSCTL_HANDLER_ARGS);
+/* Tunables. */
+static int jumbo_disable = 0;
+TUNABLE_INT("hw.skc.jumbo_disable", &jumbo_disable);
+
/*
* It seems that SK-NET GENESIS supports very simple checksum offload
* capability for Tx and I believe it can generate 0 checksum value for
@@ -1044,24 +1048,15 @@ sk_jumbo_newbuf(sc_if, idx)
bus_dma_segment_t segs[1];
bus_dmamap_t map;
int nsegs;
- void *buf;
- MGETHDR(m, M_DONTWAIT, MT_DATA);
+ m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
if (m == NULL)
return (ENOBUFS);
- buf = sk_jalloc(sc_if);
- if (buf == NULL) {
- m_freem(m);
- return (ENOBUFS);
- }
- /* Attach the buffer to the mbuf */
- MEXTADD(m, buf, SK_JLEN, sk_jfree, (struct sk_if_softc *)sc_if, buf, 0,
- EXT_NET_DRV);
if ((m->m_flags & M_EXT) == 0) {
m_freem(m);
return (ENOBUFS);
}
- m->m_pkthdr.len = m->m_len = SK_JLEN;
+ m->m_pkthdr.len = m->m_len = MJUM9BYTES;
/*
* Adjust alignment so packet payload begins on a
* longword boundary. Mandatory for Alpha, useful on
@@ -1149,15 +1144,22 @@ sk_ioctl(ifp, command, data)
error = 0;
switch(command) {
case SIOCSIFMTU:
- SK_IF_LOCK(sc_if);
- if (ifr->ifr_mtu > SK_JUMBO_MTU)
+ if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > SK_JUMBO_MTU)
error = EINVAL;
- else {
- ifp->if_mtu = ifr->ifr_mtu;
- ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
- sk_init_locked(sc_if);
+ else if (ifp->if_mtu != ifr->ifr_mtu) {
+ if (sc_if->sk_jumbo_disable != 0 &&
+ ifr->ifr_mtu > SK_MAX_FRAMELEN)
+ error = EINVAL;
+ else {
+ SK_IF_LOCK(sc_if);
+ ifp->if_mtu = ifr->ifr_mtu;
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+ sk_init_locked(sc_if);
+ }
+ SK_IF_UNLOCK(sc_if);
+ }
}
- SK_IF_UNLOCK(sc_if);
break;
case SIOCSIFFLAGS:
SK_IF_LOCK(sc_if);
@@ -1374,6 +1376,7 @@ sk_attach(dev)
error = ENOMEM;
goto fail;
}
+ sk_dma_jumbo_alloc(sc_if);
ifp = sc_if->sk_ifp = if_alloc(IFT_ETHER);
if (ifp == NULL) {
@@ -1855,6 +1858,7 @@ sk_detach(dev)
device_delete_child(dev, sc_if->sk_miibus);
*/
bus_generic_detach(dev);
+ sk_dma_jumbo_free(sc_if);
sk_dma_free(sc_if);
SK_IF_UNLOCK(sc_if);
@@ -1928,15 +1932,8 @@ sk_dma_alloc(sc_if)
struct sk_dmamap_arg ctx;
struct sk_txdesc *txd;
struct sk_rxdesc *rxd;
- struct sk_rxdesc *jrxd;
- u_int8_t *ptr;
- struct sk_jpool_entry *entry;
int error, i;
- mtx_init(&sc_if->sk_jlist_mtx, "sk_jlist_mtx", NULL, MTX_DEF);
- SLIST_INIT(&sc_if->sk_jfree_listhead);
- SLIST_INIT(&sc_if->sk_jinuse_listhead);
-
/* create parent tag */
/*
* XXX
@@ -1963,6 +1960,7 @@ sk_dma_alloc(sc_if)
"failed to create parent DMA tag\n");
goto fail;
}
+
/* create tag for Tx ring */
error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
SK_RING_ALIGN, 0, /* algnmnt, boundary */
@@ -1999,42 +1997,6 @@ sk_dma_alloc(sc_if)
goto fail;
}
- /* create tag for jumbo Rx ring */
- error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
- SK_RING_ALIGN, 0, /* algnmnt, boundary */
- BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
- BUS_SPACE_MAXADDR, /* highaddr */
- NULL, NULL, /* filter, filterarg */
- SK_JUMBO_RX_RING_SZ, /* maxsize */
- 1, /* nsegments */
- SK_JUMBO_RX_RING_SZ, /* maxsegsize */
- 0, /* flags */
- NULL, NULL, /* lockfunc, lockarg */
- &sc_if->sk_cdata.sk_jumbo_rx_ring_tag);
- if (error != 0) {
- device_printf(sc_if->sk_if_dev,
- "failed to allocate jumbo Rx ring DMA tag\n");
- goto fail;
- }
-
- /* create tag for jumbo buffer blocks */
- error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
- PAGE_SIZE, 0, /* algnmnt, boundary */
- BUS_SPACE_MAXADDR, /* lowaddr */
- BUS_SPACE_MAXADDR, /* highaddr */
- NULL, NULL, /* filter, filterarg */
- SK_JMEM, /* maxsize */
- 1, /* nsegments */
- SK_JMEM, /* maxsegsize */
- 0, /* flags */
- NULL, NULL, /* lockfunc, lockarg */
- &sc_if->sk_cdata.sk_jumbo_tag);
- if (error != 0) {
- device_printf(sc_if->sk_if_dev,
- "failed to allocate jumbo Rx buffer block DMA tag\n");
- goto fail;
- }
-
/* create tag for Tx buffers */
error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
1, 0, /* algnmnt, boundary */
@@ -2071,24 +2033,6 @@ sk_dma_alloc(sc_if)
goto fail;
}
- /* create tag for jumbo Rx buffers */
- error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
- PAGE_SIZE, 0, /* algnmnt, boundary */
- BUS_SPACE_MAXADDR, /* lowaddr */
- BUS_SPACE_MAXADDR, /* highaddr */
- NULL, NULL, /* filter, filterarg */
- MCLBYTES * SK_MAXRXSEGS, /* maxsize */
- SK_MAXRXSEGS, /* nsegments */
- SK_JLEN, /* maxsegsize */
- 0, /* flags */
- NULL, NULL, /* lockfunc, lockarg */
- &sc_if->sk_cdata.sk_jumbo_rx_tag);
- if (error != 0) {
- device_printf(sc_if->sk_if_dev,
- "failed to allocate jumbo Rx DMA tag\n");
- goto fail;
- }
-
/* allocate DMA'able memory and load the DMA map for Tx ring */
error = bus_dmamem_alloc(sc_if->sk_cdata.sk_tx_ring_tag,
(void **)&sc_if->sk_rdata.sk_tx_ring, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
@@ -2131,28 +2075,6 @@ sk_dma_alloc(sc_if)
}
sc_if->sk_rdata.sk_rx_ring_paddr = ctx.sk_busaddr;
- /* allocate DMA'able memory and load the DMA map for jumbo Rx ring */
- error = bus_dmamem_alloc(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
- (void **)&sc_if->sk_rdata.sk_jumbo_rx_ring,
- BUS_DMA_NOWAIT|BUS_DMA_ZERO, &sc_if->sk_cdata.sk_jumbo_rx_ring_map);
- if (error != 0) {
- device_printf(sc_if->sk_if_dev,
- "failed to allocate DMA'able memory for jumbo Rx ring\n");
- goto fail;
- }
-
- ctx.sk_busaddr = 0;
- error = bus_dmamap_load(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
- sc_if->sk_cdata.sk_jumbo_rx_ring_map,
- sc_if->sk_rdata.sk_jumbo_rx_ring, SK_JUMBO_RX_RING_SZ, sk_dmamap_cb,
- &ctx, BUS_DMA_NOWAIT);
- if (error != 0) {
- device_printf(sc_if->sk_if_dev,
- "failed to load DMA'able memory for jumbo Rx ring\n");
- goto fail;
- }
- sc_if->sk_rdata.sk_jumbo_rx_ring_paddr = ctx.sk_busaddr;
-
/* create DMA maps for Tx buffers */
for (i = 0; i < SK_TX_RING_CNT; i++) {
txd = &sc_if->sk_cdata.sk_txdesc[i];
@@ -2166,6 +2088,7 @@ sk_dma_alloc(sc_if)
goto fail;
}
}
+
/* create DMA maps for Rx buffers */
if ((error = bus_dmamap_create(sc_if->sk_cdata.sk_rx_tag, 0,
&sc_if->sk_cdata.sk_rx_sparemap)) != 0) {
@@ -2185,12 +2108,88 @@ sk_dma_alloc(sc_if)
goto fail;
}
}
+
+fail:
+ return (error);
+}
+
+static int
+sk_dma_jumbo_alloc(sc_if)
+ struct sk_if_softc *sc_if;
+{
+ struct sk_dmamap_arg ctx;
+ struct sk_rxdesc *jrxd;
+ int error, i;
+
+ if (jumbo_disable != 0) {
+ device_printf(sc_if->sk_if_dev, "disabling jumbo frame support\n");
+ sc_if->sk_jumbo_disable = 1;
+ return (0);
+ }
+ /* create tag for jumbo Rx ring */
+ error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
+ SK_RING_ALIGN, 0, /* algnmnt, boundary */
+ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ SK_JUMBO_RX_RING_SZ, /* maxsize */
+ 1, /* nsegments */
+ SK_JUMBO_RX_RING_SZ, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc_if->sk_cdata.sk_jumbo_rx_ring_tag);
+ if (error != 0) {
+ device_printf(sc_if->sk_if_dev,
+ "failed to allocate jumbo Rx ring DMA tag\n");
+ goto jumbo_fail;
+ }
+
+ /* create tag for jumbo Rx buffers */
+ error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
+ 1, 0, /* algnmnt, boundary */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ MJUM9BYTES, /* maxsize */
+ 1, /* nsegments */
+ MJUM9BYTES, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc_if->sk_cdata.sk_jumbo_rx_tag);
+ if (error != 0) {
+ device_printf(sc_if->sk_if_dev,
+ "failed to allocate jumbo Rx DMA tag\n");
+ goto jumbo_fail;
+ }
+
+ /* allocate DMA'able memory and load the DMA map for jumbo Rx ring */
+ error = bus_dmamem_alloc(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
+ (void **)&sc_if->sk_rdata.sk_jumbo_rx_ring,
+ BUS_DMA_NOWAIT|BUS_DMA_ZERO, &sc_if->sk_cdata.sk_jumbo_rx_ring_map);
+ if (error != 0) {
+ device_printf(sc_if->sk_if_dev,
+ "failed to allocate DMA'able memory for jumbo Rx ring\n");
+ goto jumbo_fail;
+ }
+
+ ctx.sk_busaddr = 0;
+ error = bus_dmamap_load(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
+ sc_if->sk_cdata.sk_jumbo_rx_ring_map,
+ sc_if->sk_rdata.sk_jumbo_rx_ring, SK_JUMBO_RX_RING_SZ, sk_dmamap_cb,
+ &ctx, BUS_DMA_NOWAIT);
+ if (error != 0) {
+ device_printf(sc_if->sk_if_dev,
+ "failed to load DMA'able memory for jumbo Rx ring\n");
+ goto jumbo_fail;
+ }
+ sc_if->sk_rdata.sk_jumbo_rx_ring_paddr = ctx.sk_busaddr;
+
/* create DMA maps for jumbo Rx buffers */
if ((error = bus_dmamap_create(sc_if->sk_cdata.sk_jumbo_rx_tag, 0,
&sc_if->sk_cdata.sk_jumbo_rx_sparemap)) != 0) {
device_printf(sc_if->sk_if_dev,
"failed to create spare jumbo Rx dmamap\n");
- goto fail;
+ goto jumbo_fail;
}
for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) {
jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i];
@@ -2201,55 +2200,18 @@ sk_dma_alloc(sc_if)
if (error != 0) {
device_printf(sc_if->sk_if_dev,
"failed to create jumbo Rx dmamap\n");
- goto fail;
+ goto jumbo_fail;
}
}
- /* allocate DMA'able memory and load the DMA map for jumbo buf */
- error = bus_dmamem_alloc(sc_if->sk_cdata.sk_jumbo_tag,
- (void **)&sc_if->sk_rdata.sk_jumbo_buf,
- BUS_DMA_NOWAIT|BUS_DMA_ZERO, &sc_if->sk_cdata.sk_jumbo_map);
- if (error != 0) {
- device_printf(sc_if->sk_if_dev,
- "failed to allocate DMA'able memory for jumbo buf\n");
- goto fail;
- }
-
- ctx.sk_busaddr = 0;
- error = bus_dmamap_load(sc_if->sk_cdata.sk_jumbo_tag,
- sc_if->sk_cdata.sk_jumbo_map,
- sc_if->sk_rdata.sk_jumbo_buf, SK_JMEM, sk_dmamap_cb,
- &ctx, BUS_DMA_NOWAIT);
- if (error != 0) {
- device_printf(sc_if->sk_if_dev,
- "failed to load DMA'able memory for jumbobuf\n");
- goto fail;
- }
- sc_if->sk_rdata.sk_jumbo_buf_paddr = ctx.sk_busaddr;
-
- /*
- * Now divide it up into 9K pieces and save the addresses
- * in an array.
- */
- ptr = sc_if->sk_rdata.sk_jumbo_buf;
- for (i = 0; i < SK_JSLOTS; i++) {
- sc_if->sk_cdata.sk_jslots[i] = ptr;
- ptr += SK_JLEN;
- entry = malloc(sizeof(struct sk_jpool_entry),
- M_DEVBUF, M_NOWAIT);
- if (entry == NULL) {
- device_printf(sc_if->sk_if_dev,
- "no memory for jumbo buffers!\n");
- error = ENOMEM;
- goto fail;
- }
- entry->slot = i;
- SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, entry,
- jpool_entries);
- }
+ return (0);
-fail:
- return (error);
+jumbo_fail:
+ sk_dma_jumbo_free(sc_if);
+ device_printf(sc_if->sk_if_dev, "disabling jumbo frame support due to "
+ "resource shortage\n");
+ sc_if->sk_jumbo_disable = 1;
+ return (0);
}
static void
@@ -2258,39 +2220,8 @@ sk_dma_free(sc_if)
{
struct sk_txdesc *txd;
struct sk_rxdesc *rxd;
- struct sk_rxdesc *jrxd;
- struct sk_jpool_entry *entry;
int i;
- SK_JLIST_LOCK(sc_if);
- while ((entry = SLIST_FIRST(&sc_if->sk_jinuse_listhead))) {
- device_printf(sc_if->sk_if_dev,
- "asked to free buffer that is in use!\n");
- SLIST_REMOVE_HEAD(&sc_if->sk_jinuse_listhead, jpool_entries);
- SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, entry,
- jpool_entries);
- }
-
- while (!SLIST_EMPTY(&sc_if->sk_jfree_listhead)) {
- entry = SLIST_FIRST(&sc_if->sk_jfree_listhead);
- SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries);
- free(entry, M_DEVBUF);
- }
- SK_JLIST_UNLOCK(sc_if);
-
- /* destroy jumbo buffer block */
- if (sc_if->sk_cdata.sk_jumbo_map)
- bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_tag,
- sc_if->sk_cdata.sk_jumbo_map);
-
- if (sc_if->sk_rdata.sk_jumbo_buf) {
- bus_dmamem_free(sc_if->sk_cdata.sk_jumbo_tag,
- sc_if->sk_rdata.sk_jumbo_buf,
- sc_if->sk_cdata.sk_jumbo_map);
- sc_if->sk_rdata.sk_jumbo_buf = NULL;
- sc_if->sk_cdata.sk_jumbo_map = 0;
- }
-
/* Tx ring */
if (sc_if->sk_cdata.sk_tx_ring_tag) {
if (sc_if->sk_cdata.sk_tx_ring_map)
@@ -2321,21 +2252,6 @@ sk_dma_free(sc_if)
bus_dma_tag_destroy(sc_if->sk_cdata.sk_rx_ring_tag);
sc_if->sk_cdata.sk_rx_ring_tag = NULL;
}
- /* jumbo Rx ring */
- if (sc_if->sk_cdata.sk_jumbo_rx_ring_tag) {
- if (sc_if->sk_cdata.sk_jumbo_rx_ring_map)
- bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
- sc_if->sk_cdata.sk_jumbo_rx_ring_map);
- if (sc_if->sk_cdata.sk_jumbo_rx_ring_map &&
- sc_if->sk_rdata.sk_jumbo_rx_ring)
- bus_dmamem_free(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
- sc_if->sk_rdata.sk_jumbo_rx_ring,
- sc_if->sk_cdata.sk_jumbo_rx_ring_map);
- sc_if->sk_rdata.sk_jumbo_rx_ring = NULL;
- sc_if->sk_cdata.sk_jumbo_rx_ring_map = 0;
- bus_dma_tag_destroy(sc_if->sk_cdata.sk_jumbo_rx_ring_tag);
- sc_if->sk_cdata.sk_jumbo_rx_ring_tag = NULL;
- }
/* Tx buffers */
if (sc_if->sk_cdata.sk_tx_tag) {
for (i = 0; i < SK_TX_RING_CNT; i++) {
@@ -2367,6 +2283,36 @@ sk_dma_free(sc_if)
bus_dma_tag_destroy(sc_if->sk_cdata.sk_rx_tag);
sc_if->sk_cdata.sk_rx_tag = NULL;
}
+
+ if (sc_if->sk_cdata.sk_parent_tag) {
+ bus_dma_tag_destroy(sc_if->sk_cdata.sk_parent_tag);
+ sc_if->sk_cdata.sk_parent_tag = NULL;
+ }
+}
+
+static void
+sk_dma_jumbo_free(sc_if)
+ struct sk_if_softc *sc_if;
+{
+ struct sk_rxdesc *jrxd;
+ int i;
+
+ /* jumbo Rx ring */
+ if (sc_if->sk_cdata.sk_jumbo_rx_ring_tag) {
+ if (sc_if->sk_cdata.sk_jumbo_rx_ring_map)
+ bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
+ sc_if->sk_cdata.sk_jumbo_rx_ring_map);
+ if (sc_if->sk_cdata.sk_jumbo_rx_ring_map &&
+ sc_if->sk_rdata.sk_jumbo_rx_ring)
+ bus_dmamem_free(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
+ sc_if->sk_rdata.sk_jumbo_rx_ring,
+ sc_if->sk_cdata.sk_jumbo_rx_ring_map);
+ sc_if->sk_rdata.sk_jumbo_rx_ring = NULL;
+ sc_if->sk_cdata.sk_jumbo_rx_ring_map = 0;
+ bus_dma_tag_destroy(sc_if->sk_cdata.sk_jumbo_rx_ring_tag);
+ sc_if->sk_cdata.sk_jumbo_rx_ring_tag = NULL;
+ }
+
/* jumbo Rx buffers */
if (sc_if->sk_cdata.sk_jumbo_rx_tag) {
for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) {
@@ -2386,72 +2332,6 @@ sk_dma_free(sc_if)
bus_dma_tag_destroy(sc_if->sk_cdata.sk_jumbo_rx_tag);
sc_if->sk_cdata.sk_jumbo_rx_tag = NULL;
}
-
- if (sc_if->sk_cdata.sk_parent_tag) {
- bus_dma_tag_destroy(sc_if->sk_cdata.sk_parent_tag);
- sc_if->sk_cdata.sk_parent_tag = NULL;
- }
- mtx_destroy(&sc_if->sk_jlist_mtx);
-}
-
-/*
- * Allocate a jumbo buffer.
- */
-static void *
-sk_jalloc(sc_if)
- struct sk_if_softc *sc_if;
-{
- struct sk_jpool_entry *entry;
-
- SK_JLIST_LOCK(sc_if);
-
- entry = SLIST_FIRST(&sc_if->sk_jfree_listhead);
-
- if (entry == NULL) {
- SK_JLIST_UNLOCK(sc_if);
- return (NULL);
- }
-
- SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries);
- SLIST_INSERT_HEAD(&sc_if->sk_jinuse_listhead, entry, jpool_entries);
-
- SK_JLIST_UNLOCK(sc_if);
-
- return (sc_if->sk_cdata.sk_jslots[entry->slot]);
-}
-
-/*
- * Release a jumbo buffer.
- */
-static void
-sk_jfree(buf, args)
- void *buf;
- void *args;
-{
- struct sk_if_softc *sc_if;
- struct sk_jpool_entry *entry;
- int i;
-
- /* Extract the softc struct pointer. */
- sc_if = (struct sk_if_softc *)args;
- KASSERT(sc_if != NULL, ("%s: can't find softc pointer!", __func__));
-
- SK_JLIST_LOCK(sc_if);
- /* calculate the slot this buffer belongs to */
- i = ((vm_offset_t)buf
- - (vm_offset_t)sc_if->sk_rdata.sk_jumbo_buf) / SK_JLEN;
- KASSERT(i >= 0 && i < SK_JSLOTS,
- ("%s: asked to free buffer that we don't manage!", __func__));
-
- entry = SLIST_FIRST(&sc_if->sk_jinuse_listhead);
- KASSERT(entry != NULL, ("%s: buffer not in use!", __func__));
- entry->slot = i;
- SLIST_REMOVE_HEAD(&sc_if->sk_jinuse_listhead, jpool_entries);
- SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, entry, jpool_entries);
- if (SLIST_EMPTY(&sc_if->sk_jinuse_listhead))
- wakeup(sc_if);
-
- SK_JLIST_UNLOCK(sc_if);
}
static void
diff --git a/sys/dev/sk/if_skreg.h b/sys/dev/sk/if_skreg.h
index e9e55ea..5c75d90 100644
--- a/sys/dev/sk/if_skreg.h
+++ b/sys/dev/sk/if_skreg.h
@@ -1358,34 +1358,12 @@ struct sk_tx_desc {
#define SK_RX_RING_CNT 256
#define SK_JUMBO_RX_RING_CNT 256
#define SK_MAXTXSEGS 32
-#define SK_MAXRXSEGS 32
-/*
- * Jumbo buffer stuff. Note that we must allocate more jumbo
- * buffers than there are descriptors in the receive ring. This
- * is because we don't know how long it will take for a packet
- * to be released after we hand it off to the upper protocol
- * layers. To be safe, we allocate 1.5 times the number of
- * receive descriptors.
- */
#define SK_JUMBO_FRAMELEN 9018
#define SK_JUMBO_MTU (SK_JUMBO_FRAMELEN-ETHER_HDR_LEN-ETHER_CRC_LEN)
#define SK_MAX_FRAMELEN \
(ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN - ETHER_CRC_LEN)
#define SK_MIN_FRAMELEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
-#define SK_JSLOTS ((SK_RX_RING_CNT * 3) / 2)
-
-#define SK_JRAWLEN (SK_JUMBO_FRAMELEN + ETHER_ALIGN)
-#define SK_JLEN (SK_JRAWLEN + (sizeof(u_int64_t) - \
- (SK_JRAWLEN % sizeof(u_int64_t))))
-#define SK_JPAGESZ PAGE_SIZE
-#define SK_RESID (SK_JPAGESZ - (SK_JLEN * SK_JSLOTS) % SK_JPAGESZ)
-#define SK_JMEM ((SK_JLEN * SK_JSLOTS) + SK_RESID)
-
-struct sk_jpool_entry {
- int slot;
- SLIST_ENTRY(sk_jpool_entry) jpool_entries;
-};
struct sk_txdesc {
struct mbuf *tx_m;
@@ -1414,10 +1392,6 @@ struct sk_chain_data {
bus_dmamap_t sk_rx_ring_map;
bus_dmamap_t sk_rx_sparemap;
bus_dma_tag_t sk_jumbo_rx_tag;
- bus_dma_tag_t sk_jumbo_tag;
- bus_dmamap_t sk_jumbo_map;
- bus_dma_tag_t sk_jumbo_mtag;
- caddr_t sk_jslots[SK_JSLOTS];
struct sk_rxdesc sk_jumbo_rxdesc[SK_JUMBO_RX_RING_CNT];
bus_dma_tag_t sk_jumbo_rx_ring_tag;
bus_dmamap_t sk_jumbo_rx_ring_map;
@@ -1436,8 +1410,6 @@ struct sk_ring_data {
bus_addr_t sk_rx_ring_paddr;
struct sk_rx_desc *sk_jumbo_rx_ring;
bus_addr_t sk_jumbo_rx_ring_paddr;
- void *sk_jumbo_buf;
- bus_addr_t sk_jumbo_buf_paddr;
};
#define SK_TX_RING_ADDR(sc, i) \
@@ -1518,12 +1490,7 @@ struct sk_if_softc {
struct sk_softc *sk_softc; /* parent controller */
int sk_tx_bmu; /* TX BMU register */
int sk_if_flags;
- SLIST_HEAD(__sk_jfreehead, sk_jpool_entry) sk_jfree_listhead;
- SLIST_HEAD(__sk_jinusehead, sk_jpool_entry) sk_jinuse_listhead;
- struct mtx sk_jlist_mtx;
+ int sk_jumbo_disable;
};
-#define SK_JLIST_LOCK(_sc) mtx_lock(&(_sc)->sk_jlist_mtx)
-#define SK_JLIST_UNLOCK(_sc) mtx_unlock(&(_sc)->sk_jlist_mtx)
-
#define SK_TIMEOUT 1000
OpenPOWER on IntegriCloud