summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/dev/hatm/if_hatm.c21
-rw-r--r--sys/dev/hatm/if_hatm_intr.c35
-rw-r--r--sys/dev/hatm/if_hatmvar.h19
3 files changed, 56 insertions, 19 deletions
diff --git a/sys/dev/hatm/if_hatm.c b/sys/dev/hatm/if_hatm.c
index d2c1222..5295ee0 100644
--- a/sys/dev/hatm/if_hatm.c
+++ b/sys/dev/hatm/if_hatm.c
@@ -307,15 +307,22 @@ hatm_destroy_smbufs(struct hatm_softc *sc)
{
u_int i, b;
struct mbuf_page *pg;
+ struct mbuf_chunk_hdr *h;
if (sc->mbuf_pages != NULL) {
for (i = 0; i < sc->mbuf_npages; i++) {
pg = sc->mbuf_pages[i];
for (b = 0; b < pg->hdr.nchunks; b++) {
- if (MBUF_TST_BIT(pg->hdr.card, b))
+ h = (struct mbuf_chunk_hdr *) ((char *)pg +
+ b * pg->hdr.chunksize + pg->hdr.hdroff);
+ if (h->flags & MBUF_CARD)
if_printf(&sc->ifatm.ifnet,
"%s -- mbuf page=%u card buf %u\n",
__func__, i, b);
+ if (h->flags & MBUF_USED)
+ if_printf(&sc->ifatm.ifnet,
+ "%s -- mbuf page=%u used buf %u\n",
+ __func__, i, b);
}
bus_dmamap_unload(sc->mbuf_tag, pg->hdr.map);
bus_dmamap_destroy(sc->mbuf_tag, pg->hdr.map);
@@ -2332,10 +2339,14 @@ hatm_stop(struct hatm_softc *sc)
for (p = 0; p < sc->mbuf_npages; p++) {
pg = sc->mbuf_pages[p];
for (i = 0; i < pg->hdr.nchunks; i++) {
- if (MBUF_TST_BIT(pg->hdr.card, i)) {
- MBUF_CLR_BIT(pg->hdr.card, i);
- ch = (struct mbuf_chunk_hdr *) ((char *)pg +
- i * pg->hdr.chunksize + pg->hdr.hdroff);
+ ch = (struct mbuf_chunk_hdr *) ((char *)pg +
+ i * pg->hdr.chunksize + pg->hdr.hdroff);
+ if (ch->flags & MBUF_CARD) {
+ ch->flags &= ~MBUF_CARD;
+ ch->flags |= MBUF_USED;
+ hatm_ext_free(&sc->mbuf_list[pg->hdr.pool],
+ (struct mbufx_free *)((u_char *)ch -
+ pg->hdr.hdroff));
}
}
}
diff --git a/sys/dev/hatm/if_hatm_intr.c b/sys/dev/hatm/if_hatm_intr.c
index f16ba18..14912ce3 100644
--- a/sys/dev/hatm/if_hatm_intr.c
+++ b/sys/dev/hatm/if_hatm_intr.c
@@ -83,13 +83,16 @@ CTASSERT(sizeof(((struct mbuf0_chunk *)NULL)->storage) >= MBUF0_SIZE);
CTASSERT(sizeof(((struct mbuf1_chunk *)NULL)->storage) >= MBUF1_SIZE);
CTASSERT(sizeof(struct tpd) <= HE_TPD_SIZE);
+CTASSERT(MBUF0_PER_PAGE <= 256);
+CTASSERT(MBUF1_PER_PAGE <= 256);
+
static void hatm_mbuf_page_alloc(struct hatm_softc *sc, u_int group);
/*
* Free an external mbuf to a list. We use atomic functions so that
* we don't need a mutex for the list.
*/
-static __inline void
+__inline void
hatm_ext_free(struct mbufx_free **list, struct mbufx_free *buf)
{
for (;;) {
@@ -179,7 +182,6 @@ hatm_mbuf_page_alloc(struct hatm_softc *sc, u_int group)
return;
if ((pg = malloc(MBUF_ALLOC_SIZE, M_DEVBUF, M_NOWAIT)) == NULL)
return;
- bzero(pg->hdr.card, sizeof(pg->hdr.card));
err = bus_dmamap_create(sc->mbuf_tag, 0, &pg->hdr.map);
if (err != 0) {
@@ -203,6 +205,7 @@ hatm_mbuf_page_alloc(struct hatm_softc *sc, u_int group)
if (group == 0) {
struct mbuf0_chunk *c;
+ pg->hdr.pool = 0;
pg->hdr.nchunks = MBUF0_PER_PAGE;
pg->hdr.chunksize = MBUF0_CHUNK;
pg->hdr.hdroff = sizeof(c->storage);
@@ -210,12 +213,14 @@ hatm_mbuf_page_alloc(struct hatm_softc *sc, u_int group)
for (i = 0; i < MBUF0_PER_PAGE; i++, c++) {
c->hdr.pageno = sc->mbuf_npages;
c->hdr.chunkno = i;
+ c->hdr.flags = MBUF_USED;
hatm_ext_free(&sc->mbuf_list[0],
(struct mbufx_free *)c);
}
} else {
struct mbuf1_chunk *c;
+ pg->hdr.pool = 1;
pg->hdr.nchunks = MBUF1_PER_PAGE;
pg->hdr.chunksize = MBUF1_CHUNK;
pg->hdr.hdroff = sizeof(c->storage);
@@ -223,6 +228,7 @@ hatm_mbuf_page_alloc(struct hatm_softc *sc, u_int group)
for (i = 0; i < MBUF1_PER_PAGE; i++, c++) {
c->hdr.pageno = sc->mbuf_npages;
c->hdr.chunkno = i;
+ c->hdr.flags = MBUF_USED;
hatm_ext_free(&sc->mbuf_list[1],
(struct mbufx_free *)c);
}
@@ -239,6 +245,9 @@ hatm_mbuf0_free(void *buf, void *args)
struct hatm_softc *sc = args;
struct mbuf0_chunk *c = buf;
+ KASSERT((c->hdr.flags & (MBUF_USED | MBUF_CARD)) == MBUF_USED,
+ ("freeing unused mbuf %x", c->hdr.flags));
+ c->hdr.flags &= ~MBUF_USED;
hatm_ext_free(&sc->mbuf_list[0], (struct mbufx_free *)c);
}
static void
@@ -247,6 +256,9 @@ hatm_mbuf1_free(void *buf, void *args)
struct hatm_softc *sc = args;
struct mbuf1_chunk *c = buf;
+ KASSERT((c->hdr.flags & (MBUF_USED | MBUF_CARD)) == MBUF_USED,
+ ("freeing unused mbuf %x", c->hdr.flags));
+ c->hdr.flags &= ~MBUF_USED;
hatm_ext_free(&sc->mbuf_list[1], (struct mbufx_free *)c);
}
@@ -333,7 +345,7 @@ he_intr_rbp(struct hatm_softc *sc, struct herbp *rbp, u_int large,
break;
buf0 = (struct mbuf0_chunk *)cf;
pg = sc->mbuf_pages[buf0->hdr.pageno];
- MBUF_SET_BIT(pg->hdr.card, buf0->hdr.chunkno);
+ buf0->hdr.flags |= MBUF_CARD;
rbp->rbp[rbp->tail].phys = pg->hdr.phys +
buf0->hdr.chunkno * MBUF0_CHUNK + MBUF0_OFFSET;
rbp->rbp[rbp->tail].handle =
@@ -351,7 +363,7 @@ he_intr_rbp(struct hatm_softc *sc, struct herbp *rbp, u_int large,
break;
buf1 = (struct mbuf1_chunk *)cf;
pg = sc->mbuf_pages[buf1->hdr.pageno];
- MBUF_SET_BIT(pg->hdr.card, buf1->hdr.chunkno);
+ buf1->hdr.flags |= MBUF_CARD;
rbp->rbp[rbp->tail].phys = pg->hdr.phys +
buf1->hdr.chunkno * MBUF1_CHUNK + MBUF1_OFFSET;
rbp->rbp[rbp->tail].handle =
@@ -400,7 +412,6 @@ hatm_rx_buffer(struct hatm_softc *sc, u_int group, u_int handle)
}
MBUF_PARSE_HANDLE(handle, pageno, chunkno);
- MBUF_CLR_BIT(sc->mbuf_pages[pageno]->hdr.card, chunkno);
DBG(sc, RX, ("RX group=%u handle=%x page=%u chunk=%u", group, handle,
pageno, chunkno));
@@ -415,6 +426,13 @@ hatm_rx_buffer(struct hatm_softc *sc, u_int group, u_int handle)
c0->hdr.pageno, pageno));
KASSERT(c0->hdr.chunkno == chunkno, ("chunkno = %u/%u",
c0->hdr.chunkno, chunkno));
+ KASSERT(c0->hdr.flags & MBUF_CARD, ("mbuf not on card %u/%u",
+ pageno, chunkno));
+ KASSERT(!(c0->hdr.flags & MBUF_USED), ("used mbuf %u/%u",
+ pageno, chunkno));
+
+ c0->hdr.flags |= MBUF_USED;
+ c0->hdr.flags &= ~MBUF_CARD;
if (m != NULL) {
m->m_ext.ref_cnt = &c0->hdr.ref_cnt;
@@ -432,6 +450,13 @@ hatm_rx_buffer(struct hatm_softc *sc, u_int group, u_int handle)
c1->hdr.pageno, pageno));
KASSERT(c1->hdr.chunkno == chunkno, ("chunkno = %u/%u",
c1->hdr.chunkno, chunkno));
+ KASSERT(c1->hdr.flags & MBUF_CARD, ("mbuf not on card %u/%u",
+ pageno, chunkno));
+ KASSERT(!(c1->hdr.flags & MBUF_USED), ("used mbuf %u/%u",
+ pageno, chunkno));
+
+ c1->hdr.flags |= MBUF_USED;
+ c1->hdr.flags &= ~MBUF_CARD;
if (m != NULL) {
m->m_ext.ref_cnt = &c1->hdr.ref_cnt;
diff --git a/sys/dev/hatm/if_hatmvar.h b/sys/dev/hatm/if_hatmvar.h
index ed1566a..9d451a7 100644
--- a/sys/dev/hatm/if_hatmvar.h
+++ b/sys/dev/hatm/if_hatmvar.h
@@ -239,12 +239,12 @@ SLIST_HEAD(tpd_list, tpd);
/* each allocated page has one of these structures at its very end. */
struct mbuf_page_hdr {
- uint8_t card[32]; /* bitmap for on-card */
uint16_t nchunks; /* chunks on this page */
bus_dmamap_t map; /* the DMA MAP */
uint32_t phys; /* physical base address */
uint32_t hdroff; /* chunk header offset */
uint32_t chunksize; /* chunk size */
+ u_int pool; /* pool number */
};
struct mbuf_page {
char storage[MBUF_ALLOC_SIZE - sizeof(struct mbuf_page_hdr)];
@@ -257,10 +257,6 @@ struct mbuf_page {
#define MBUF1_PER_PAGE ((MBUF_ALLOC_SIZE - sizeof(struct mbuf_page_hdr)) / \
MBUF1_CHUNK)
-#define MBUF_CLR_BIT(ARRAY, BIT) ((ARRAY)[(BIT) / 8] &= ~(1 << ((BIT) % 8)))
-#define MBUF_SET_BIT(ARRAY, BIT) ((ARRAY)[(BIT) / 8] |= (1 << ((BIT) % 8)))
-#define MBUF_TST_BIT(ARRAY, BIT) ((ARRAY)[(BIT) / 8] & (1 << ((BIT) % 8)))
-
/*
* Convert to/from handles
*/
@@ -281,12 +277,15 @@ struct mbuf_page {
#define MBUF_LARGE_FLAG 0x80000000
-/* chunks have the following structure at the end (4 byte) */
+/* chunks have the following structure at the end (8 byte) */
struct mbuf_chunk_hdr {
- uint16_t pageno;
- uint16_t chunkno;
- u_int ref_cnt;
+ uint16_t pageno;
+ uint8_t chunkno;
+ uint8_t flags;
+ u_int ref_cnt;
};
+#define MBUF_CARD 0x01 /* buffer is on card */
+#define MBUF_USED 0x02 /* buffer is somewhere in the system */
#define MBUFX_STORAGE_SIZE(X) (MBUF##X##_CHUNK \
- sizeof(struct mbuf_chunk_hdr))
@@ -628,3 +627,5 @@ void hatm_rx_vcc_close(struct hatm_softc *sc, u_int cid);
void hatm_tx_vcc_closed(struct hatm_softc *sc, u_int cid);
void hatm_vcc_closed(struct hatm_softc *sc, u_int cid);
void hatm_load_vc(struct hatm_softc *sc, u_int cid, int reopen);
+
+void hatm_ext_free(struct mbufx_free **, struct mbufx_free *);
OpenPOWER on IntegriCloud