summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/alpha/alpha/machdep.c12
-rw-r--r--sys/amd64/amd64/machdep.c12
-rw-r--r--sys/dev/en/midway.c2
-rw-r--r--sys/dev/sk/if_sk.c83
-rw-r--r--sys/dev/sk/if_skreg.h1
-rw-r--r--sys/dev/ti/if_ti.c83
-rw-r--r--sys/dev/ti/if_tireg.h1
-rw-r--r--sys/i386/i386/machdep.c12
-rw-r--r--sys/kern/uipc_mbuf.c84
-rw-r--r--sys/kern/uipc_mbuf2.c2
-rw-r--r--sys/kern/uipc_syscalls.c68
-rw-r--r--sys/netinet6/ipsec.c2
-rw-r--r--sys/netkey/key_debug.c4
-rw-r--r--sys/pc98/i386/machdep.c12
-rw-r--r--sys/pc98/pc98/machdep.c12
-rw-r--r--sys/pci/if_sk.c83
-rw-r--r--sys/pci/if_skreg.h1
-rw-r--r--sys/pci/if_ti.c83
-rw-r--r--sys/pci/if_tireg.h1
-rw-r--r--sys/pci/if_wb.c16
-rw-r--r--sys/sys/mbuf.h177
-rw-r--r--sys/sys/socketvar.h1
-rw-r--r--usr.bin/netstat/mbuf.c8
23 files changed, 292 insertions, 468 deletions
diff --git a/sys/alpha/alpha/machdep.c b/sys/alpha/alpha/machdep.c
index 02643ae..e5124d4 100644
--- a/sys/alpha/alpha/machdep.c
+++ b/sys/alpha/alpha/machdep.c
@@ -370,18 +370,16 @@ again:
(16*(ARG_MAX+(PAGE_SIZE*3))));
/*
- * Finally, allocate mbuf pool. Since mclrefcnt is an off-size
- * we use the more space efficient malloc in place of kmem_alloc.
+ * Finally, allocate mbuf pool.
*/
{
vm_offset_t mb_map_size;
- mb_map_size = nmbufs * MSIZE + nmbclusters * MCLBYTES;
+ mb_map_size = nmbufs * MSIZE + nmbclusters * MCLBYTES +
+ (nmbclusters + nmbufs / 4) * sizeof(union mext_refcnt);
mb_map_size = roundup2(mb_map_size, max(MCLBYTES, PAGE_SIZE));
- mclrefcnt = malloc(mb_map_size / MCLBYTES, M_MBUF, M_NOWAIT);
- bzero(mclrefcnt, mb_map_size / MCLBYTES);
- mb_map = kmem_suballoc(kmem_map, (vm_offset_t *)&mbutl, &maxaddr,
- mb_map_size);
+ mb_map = kmem_suballoc(kmem_map, (vm_offset_t *)&mbutl,
+ &maxaddr, mb_map_size);
mb_map->system_map = 1;
}
diff --git a/sys/amd64/amd64/machdep.c b/sys/amd64/amd64/machdep.c
index 7647877..5e0dcba 100644
--- a/sys/amd64/amd64/machdep.c
+++ b/sys/amd64/amd64/machdep.c
@@ -386,18 +386,16 @@ again:
(16*(ARG_MAX+(PAGE_SIZE*3))));
/*
- * Finally, allocate mbuf pool. Since mclrefcnt is an off-size
- * we use the more space efficient malloc in place of kmem_alloc.
+ * Finally, allocate mbuf pool.
*/
{
vm_offset_t mb_map_size;
- mb_map_size = nmbufs * MSIZE + nmbclusters * MCLBYTES;
+ mb_map_size = nmbufs * MSIZE + nmbclusters * MCLBYTES +
+ (nmbclusters + nmbufs / 4) * sizeof(union mext_refcnt);
mb_map_size = roundup2(mb_map_size, max(MCLBYTES, PAGE_SIZE));
- mclrefcnt = malloc(mb_map_size / MCLBYTES, M_MBUF, M_NOWAIT);
- bzero(mclrefcnt, mb_map_size / MCLBYTES);
- mb_map = kmem_suballoc(kmem_map, (vm_offset_t *)&mbutl, &maxaddr,
- mb_map_size);
+ mb_map = kmem_suballoc(kmem_map, (vm_offset_t *)&mbutl,
+ &maxaddr, mb_map_size);
mb_map->system_map = 1;
}
diff --git a/sys/dev/en/midway.c b/sys/dev/en/midway.c
index 410d2f3..f397065 100644
--- a/sys/dev/en/midway.c
+++ b/sys/dev/en/midway.c
@@ -1833,7 +1833,7 @@ STATIC int en_makeexclusive(sc, mm, prev)
return (0);
}
- if (mclrefcnt[mtocl(m->m_ext.ext_buf)] > 1) {
+ if (MEXT_IS_REF(m)) {
/* make a real copy of the M_EXT mbuf since it is shared */
MGET(new, M_DONTWAIT, MT_DATA);
if (!new) {
diff --git a/sys/dev/sk/if_sk.c b/sys/dev/sk/if_sk.c
index b74f3f1..d45ac26 100644
--- a/sys/dev/sk/if_sk.c
+++ b/sys/dev/sk/if_sk.c
@@ -148,8 +148,7 @@ static int sk_newbuf __P((struct sk_if_softc *,
struct sk_chain *, struct mbuf *));
static int sk_alloc_jumbo_mem __P((struct sk_if_softc *));
static void *sk_jalloc __P((struct sk_if_softc *));
-static void sk_jfree __P((caddr_t, u_int));
-static void sk_jref __P((caddr_t, u_int));
+static void sk_jfree __P((caddr_t, void *));
static int sk_init_rx_ring __P((struct sk_if_softc *));
static void sk_init_tx_ring __P((struct sk_if_softc *));
static u_int32_t sk_win_read_4 __P((struct sk_softc *, int));
@@ -690,12 +689,9 @@ static int sk_newbuf(sc_if, c, m)
}
/* Attach the buffer to the mbuf */
- m_new->m_data = m_new->m_ext.ext_buf = (void *)buf;
- m_new->m_flags |= M_EXT;
- m_new->m_ext.ext_size = m_new->m_pkthdr.len =
- m_new->m_len = SK_MCLBYTES;
- m_new->m_ext.ext_free = sk_jfree;
- m_new->m_ext.ext_ref = sk_jref;
+ MEXTADD(m_new, buf, SK_MCLBYTES, sk_jfree, NULL);
+ m_new->m_data = (void *)buf;
+ m_new->m_pkthdr.len = m_new->m_len = SK_MCLBYTES;
} else {
/*
* We're re-using a previously allocated mbuf;
@@ -765,7 +761,6 @@ static int sk_alloc_jumbo_mem(sc_if)
aptr[0] = (u_int64_t *)sc_if;
ptr += sizeof(u_int64_t);
sc_if->sk_cdata.sk_jslots[i].sk_buf = ptr;
- sc_if->sk_cdata.sk_jslots[i].sk_inuse = 0;
ptr += SK_MCLBYTES;
entry = malloc(sizeof(struct sk_jpool_entry),
M_DEVBUF, M_NOWAIT);
@@ -803,55 +798,15 @@ static void *sk_jalloc(sc_if)
SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries);
SLIST_INSERT_HEAD(&sc_if->sk_jinuse_listhead, entry, jpool_entries);
- sc_if->sk_cdata.sk_jslots[entry->slot].sk_inuse = 1;
return(sc_if->sk_cdata.sk_jslots[entry->slot].sk_buf);
}
/*
- * Adjust usage count on a jumbo buffer. In general this doesn't
- * get used much because our jumbo buffers don't get passed around
- * a lot, but it's implemented for correctness.
- */
-static void sk_jref(buf, size)
- caddr_t buf;
- u_int size;
-{
- struct sk_if_softc *sc_if;
- u_int64_t **aptr;
- register int i;
-
- /* Extract the softc struct pointer. */
- aptr = (u_int64_t **)(buf - sizeof(u_int64_t));
- sc_if = (struct sk_if_softc *)(aptr[0]);
-
- if (sc_if == NULL)
- panic("sk_jref: can't find softc pointer!");
-
- if (size != SK_MCLBYTES)
- panic("sk_jref: adjusting refcount of buf of wrong size!");
-
- /* calculate the slot this buffer belongs to */
-
- i = ((vm_offset_t)aptr
- - (vm_offset_t)sc_if->sk_cdata.sk_jumbo_buf) / SK_JLEN;
-
- if ((i < 0) || (i >= SK_JSLOTS))
- panic("sk_jref: asked to reference buffer "
- "that we don't manage!");
- else if (sc_if->sk_cdata.sk_jslots[i].sk_inuse == 0)
- panic("sk_jref: buffer already free!");
- else
- sc_if->sk_cdata.sk_jslots[i].sk_inuse++;
-
- return;
-}
-
-/*
* Release a jumbo buffer.
*/
-static void sk_jfree(buf, size)
+static void sk_jfree(buf, args)
caddr_t buf;
- u_int size;
+ void *args;
{
struct sk_if_softc *sc_if;
u_int64_t **aptr;
@@ -865,31 +820,19 @@ static void sk_jfree(buf, size)
if (sc_if == NULL)
panic("sk_jfree: can't find softc pointer!");
- if (size != SK_MCLBYTES)
- panic("sk_jfree: freeing buffer of wrong size!");
-
/* calculate the slot this buffer belongs to */
-
i = ((vm_offset_t)aptr
- (vm_offset_t)sc_if->sk_cdata.sk_jumbo_buf) / SK_JLEN;
if ((i < 0) || (i >= SK_JSLOTS))
panic("sk_jfree: asked to free buffer that we don't manage!");
- else if (sc_if->sk_cdata.sk_jslots[i].sk_inuse == 0)
- panic("sk_jfree: buffer already free!");
- else {
- sc_if->sk_cdata.sk_jslots[i].sk_inuse--;
- if(sc_if->sk_cdata.sk_jslots[i].sk_inuse == 0) {
- entry = SLIST_FIRST(&sc_if->sk_jinuse_listhead);
- if (entry == NULL)
- panic("sk_jfree: buffer not in use!");
- entry->slot = i;
- SLIST_REMOVE_HEAD(&sc_if->sk_jinuse_listhead,
- jpool_entries);
- SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead,
- entry, jpool_entries);
- }
- }
+
+ entry = SLIST_FIRST(&sc_if->sk_jinuse_listhead);
+ if (entry == NULL)
+ panic("sk_jfree: buffer not in use!");
+ entry->slot = i;
+ SLIST_REMOVE_HEAD(&sc_if->sk_jinuse_listhead, jpool_entries);
+ SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, entry, jpool_entries);
return;
}
diff --git a/sys/dev/sk/if_skreg.h b/sys/dev/sk/if_skreg.h
index af17c1c..24cce27 100644
--- a/sys/dev/sk/if_skreg.h
+++ b/sys/dev/sk/if_skreg.h
@@ -1124,7 +1124,6 @@ struct sk_tx_desc {
struct sk_jslot {
caddr_t sk_buf;
- int sk_inuse;
};
struct sk_jpool_entry {
diff --git a/sys/dev/ti/if_ti.c b/sys/dev/ti/if_ti.c
index 7e1b6f7..56847f4 100644
--- a/sys/dev/ti/if_ti.c
+++ b/sys/dev/ti/if_ti.c
@@ -190,8 +190,7 @@ static void ti_cmd_ext __P((struct ti_softc *, struct ti_cmd_desc *,
static void ti_handle_events __P((struct ti_softc *));
static int ti_alloc_jumbo_mem __P((struct ti_softc *));
static void *ti_jalloc __P((struct ti_softc *));
-static void ti_jfree __P((caddr_t, u_int));
-static void ti_jref __P((caddr_t, u_int));
+static void ti_jfree __P((caddr_t, void *));
static int ti_newbuf_std __P((struct ti_softc *, int, struct mbuf *));
static int ti_newbuf_mini __P((struct ti_softc *, int, struct mbuf *));
static int ti_newbuf_jumbo __P((struct ti_softc *, int, struct mbuf *));
@@ -629,7 +628,6 @@ static int ti_alloc_jumbo_mem(sc)
aptr[0] = (u_int64_t *)sc;
ptr += sizeof(u_int64_t);
sc->ti_cdata.ti_jslots[i].ti_buf = ptr;
- sc->ti_cdata.ti_jslots[i].ti_inuse = 0;
ptr += (TI_JLEN - sizeof(u_int64_t));
entry = malloc(sizeof(struct ti_jpool_entry),
M_DEVBUF, M_NOWAIT);
@@ -665,55 +663,15 @@ static void *ti_jalloc(sc)
SLIST_REMOVE_HEAD(&sc->ti_jfree_listhead, jpool_entries);
SLIST_INSERT_HEAD(&sc->ti_jinuse_listhead, entry, jpool_entries);
- sc->ti_cdata.ti_jslots[entry->slot].ti_inuse = 1;
return(sc->ti_cdata.ti_jslots[entry->slot].ti_buf);
}
/*
- * Adjust usage count on a jumbo buffer. In general this doesn't
- * get used much because our jumbo buffers don't get passed around
- * too much, but it's implemented for correctness.
- */
-static void ti_jref(buf, size)
- caddr_t buf;
- u_int size;
-{
- struct ti_softc *sc;
- u_int64_t **aptr;
- register int i;
-
- /* Extract the softc struct pointer. */
- aptr = (u_int64_t **)(buf - sizeof(u_int64_t));
- sc = (struct ti_softc *)(aptr[0]);
-
- if (sc == NULL)
- panic("ti_jref: can't find softc pointer!");
-
- if (size != TI_JUMBO_FRAMELEN)
- panic("ti_jref: adjusting refcount of buf of wrong size!");
-
- /* calculate the slot this buffer belongs to */
-
- i = ((vm_offset_t)aptr
- - (vm_offset_t)sc->ti_cdata.ti_jumbo_buf) / TI_JLEN;
-
- if ((i < 0) || (i >= TI_JSLOTS))
- panic("ti_jref: asked to reference buffer "
- "that we don't manage!");
- else if (sc->ti_cdata.ti_jslots[i].ti_inuse == 0)
- panic("ti_jref: buffer already free!");
- else
- sc->ti_cdata.ti_jslots[i].ti_inuse++;
-
- return;
-}
-
-/*
* Release a jumbo buffer.
*/
-static void ti_jfree(buf, size)
+static void ti_jfree(buf, args)
caddr_t buf;
- u_int size;
+ void *args;
{
struct ti_softc *sc;
u_int64_t **aptr;
@@ -727,31 +685,19 @@ static void ti_jfree(buf, size)
if (sc == NULL)
panic("ti_jfree: can't find softc pointer!");
- if (size != TI_JUMBO_FRAMELEN)
- panic("ti_jfree: freeing buffer of wrong size!");
-
/* calculate the slot this buffer belongs to */
-
i = ((vm_offset_t)aptr
- (vm_offset_t)sc->ti_cdata.ti_jumbo_buf) / TI_JLEN;
if ((i < 0) || (i >= TI_JSLOTS))
panic("ti_jfree: asked to free buffer that we don't manage!");
- else if (sc->ti_cdata.ti_jslots[i].ti_inuse == 0)
- panic("ti_jfree: buffer already free!");
- else {
- sc->ti_cdata.ti_jslots[i].ti_inuse--;
- if(sc->ti_cdata.ti_jslots[i].ti_inuse == 0) {
- entry = SLIST_FIRST(&sc->ti_jinuse_listhead);
- if (entry == NULL)
- panic("ti_jfree: buffer not in use!");
- entry->slot = i;
- SLIST_REMOVE_HEAD(&sc->ti_jinuse_listhead,
- jpool_entries);
- SLIST_INSERT_HEAD(&sc->ti_jfree_listhead,
- entry, jpool_entries);
- }
- }
+
+ entry = SLIST_FIRST(&sc->ti_jinuse_listhead);
+ if (entry == NULL)
+ panic("ti_jfree: buffer not in use!");
+ entry->slot = i;
+ SLIST_REMOVE_HEAD(&sc->ti_jinuse_listhead, jpool_entries);
+ SLIST_INSERT_HEAD(&sc->ti_jfree_listhead, entry, jpool_entries);
return;
}
@@ -877,12 +823,9 @@ static int ti_newbuf_jumbo(sc, i, m)
}
/* Attach the buffer to the mbuf. */
- m_new->m_data = m_new->m_ext.ext_buf = (void *)buf;
- m_new->m_flags |= M_EXT;
- m_new->m_len = m_new->m_pkthdr.len =
- m_new->m_ext.ext_size = TI_JUMBO_FRAMELEN;
- m_new->m_ext.ext_free = ti_jfree;
- m_new->m_ext.ext_ref = ti_jref;
+ m_new->m_data = (void *) buf;
+ m_new->m_len = m_new->m_pkthdr.len = TI_JUMBO_FRAMELEN;
+ MEXTADD(m_new, buf, TI_JUMBO_FRAMELEN, ti_jfree, NULL);
} else {
m_new = m;
m_new->m_data = m_new->m_ext.ext_buf;
diff --git a/sys/dev/ti/if_tireg.h b/sys/dev/ti/if_tireg.h
index 4712272..6346c67 100644
--- a/sys/dev/ti/if_tireg.h
+++ b/sys/dev/ti/if_tireg.h
@@ -1046,7 +1046,6 @@ struct ti_event_desc {
struct ti_jslot {
caddr_t ti_buf;
- int ti_inuse;
};
/*
diff --git a/sys/i386/i386/machdep.c b/sys/i386/i386/machdep.c
index 7647877..5e0dcba 100644
--- a/sys/i386/i386/machdep.c
+++ b/sys/i386/i386/machdep.c
@@ -386,18 +386,16 @@ again:
(16*(ARG_MAX+(PAGE_SIZE*3))));
/*
- * Finally, allocate mbuf pool. Since mclrefcnt is an off-size
- * we use the more space efficient malloc in place of kmem_alloc.
+ * Finally, allocate mbuf pool.
*/
{
vm_offset_t mb_map_size;
- mb_map_size = nmbufs * MSIZE + nmbclusters * MCLBYTES;
+ mb_map_size = nmbufs * MSIZE + nmbclusters * MCLBYTES +
+ (nmbclusters + nmbufs / 4) * sizeof(union mext_refcnt);
mb_map_size = roundup2(mb_map_size, max(MCLBYTES, PAGE_SIZE));
- mclrefcnt = malloc(mb_map_size / MCLBYTES, M_MBUF, M_NOWAIT);
- bzero(mclrefcnt, mb_map_size / MCLBYTES);
- mb_map = kmem_suballoc(kmem_map, (vm_offset_t *)&mbutl, &maxaddr,
- mb_map_size);
+ mb_map = kmem_suballoc(kmem_map, (vm_offset_t *)&mbutl,
+ &maxaddr, mb_map_size);
mb_map->system_map = 1;
}
diff --git a/sys/kern/uipc_mbuf.c b/sys/kern/uipc_mbuf.c
index 89ec747..ee0b58c 100644
--- a/sys/kern/uipc_mbuf.c
+++ b/sys/kern/uipc_mbuf.c
@@ -56,11 +56,11 @@ static void mbinit __P((void *));
SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbinit, NULL)
struct mbuf *mbutl;
-char *mclrefcnt;
struct mbstat mbstat;
u_long mbtypes[MT_NTYPES];
struct mbuf *mmbfree;
union mcluster *mclfree;
+union mext_refcnt *mext_refcnt_free;
int max_linkhdr;
int max_protohdr;
int max_hdr;
@@ -95,10 +95,9 @@ TUNABLE_INT_DECL("kern.ipc.nmbufs", NMBCLUSTERS * 4, nmbufs);
static void m_reclaim __P((void));
-/* "number of clusters of pages" */
-#define NCL_INIT 1
-
+#define NCL_INIT 2
#define NMB_INIT 16
+#define REF_INIT (NMBCLUSTERS * 2)
/* ARGSUSED*/
static void
@@ -107,7 +106,10 @@ mbinit(dummy)
{
int s;
- mmbfree = NULL; mclfree = NULL;
+ mmbfree = NULL;
+ mclfree = NULL;
+ mext_refcnt_free = NULL;
+
mbstat.m_msize = MSIZE;
mbstat.m_mclbytes = MCLBYTES;
mbstat.m_minclsize = MINCLSIZE;
@@ -115,6 +117,8 @@ mbinit(dummy)
mbstat.m_mhlen = MHLEN;
s = splimp();
+ if (m_alloc_ref(REF_INIT) == 0)
+ goto bad;
if (m_mballoc(NMB_INIT, M_DONTWAIT) == 0)
goto bad;
#if MCLBYTES <= PAGE_SIZE
@@ -128,7 +132,49 @@ mbinit(dummy)
splx(s);
return;
bad:
- panic("mbinit");
+ panic("mbinit: failed to initialize mbuf subsystem!");
+}
+
+/*
+ * Allocate at least nmb reference count structs and place them
+ * on the ref cnt free list.
+ * Must be called at splimp.
+ */
+int
+m_alloc_ref(nmb)
+ u_int nmb;
+{
+ caddr_t p;
+ u_int nbytes;
+ int i;
+
+ /*
+ * XXX:
+ * We don't cap the amount of memory that can be used
+ * by the reference counters, like we do for mbufs and
+ * mbuf clusters. The reason is that we don't really expect
+ * to have to be allocating too many of these guys with m_alloc_ref(),
+ * and if we are, we're probably not out of the woods anyway,
+ * so leave this way for now.
+ */
+
+ if (mb_map_full)
+ return (0);
+
+ nbytes = round_page(nmb * sizeof(union mext_refcnt));
+ if ((p = (caddr_t)kmem_malloc(mb_map, nbytes, M_NOWAIT)) == NULL)
+ return (0);
+ nmb = nbytes / sizeof(union mext_refcnt);
+
+ for (i = 0; i < nmb; i++) {
+ ((union mext_refcnt *)p)->next_ref = mext_refcnt_free;
+ mext_refcnt_free = (union mext_refcnt *)p;
+ p += sizeof(union mext_refcnt);
+ mbstat.m_refree++;
+ }
+ mbstat.m_refcnt += nmb;
+
+ return (1);
}
/*
@@ -363,7 +409,7 @@ m_clalloc_wait(void)
* MGET, but avoid getting into another instance of m_clalloc_wait()
*/
p = NULL;
- MCLALLOC(p, M_DONTWAIT);
+ _MCLALLOC(p, M_DONTWAIT);
s = splimp();
if (p != NULL) { /* We waited and got something... */
@@ -624,13 +670,9 @@ m_copym(m, off0, len, wait)
n->m_len = min(len, m->m_len - off);
if (m->m_flags & M_EXT) {
n->m_data = m->m_data + off;
- if(!m->m_ext.ext_ref)
- mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
- else
- (*(m->m_ext.ext_ref))(m->m_ext.ext_buf,
- m->m_ext.ext_size);
n->m_ext = m->m_ext;
n->m_flags |= M_EXT;
+ MEXT_ADD_REF(m);
} else
bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
(unsigned)n->m_len);
@@ -671,13 +713,9 @@ m_copypacket(m, how)
n->m_len = m->m_len;
if (m->m_flags & M_EXT) {
n->m_data = m->m_data;
- if(!m->m_ext.ext_ref)
- mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
- else
- (*(m->m_ext.ext_ref))(m->m_ext.ext_buf,
- m->m_ext.ext_size);
n->m_ext = m->m_ext;
n->m_flags |= M_EXT;
+ MEXT_ADD_REF(m);
} else {
bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
}
@@ -694,13 +732,9 @@ m_copypacket(m, how)
n->m_len = m->m_len;
if (m->m_flags & M_EXT) {
n->m_data = m->m_data;
- if(!m->m_ext.ext_ref)
- mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
- else
- (*(m->m_ext.ext_ref))(m->m_ext.ext_buf,
- m->m_ext.ext_size);
n->m_ext = m->m_ext;
n->m_flags |= M_EXT;
+ MEXT_ADD_REF(m);
} else {
bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
}
@@ -1042,11 +1076,7 @@ extpacket:
if (m->m_flags & M_EXT) {
n->m_flags |= M_EXT;
n->m_ext = m->m_ext;
- if(!m->m_ext.ext_ref)
- mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
- else
- (*(m->m_ext.ext_ref))(m->m_ext.ext_buf,
- m->m_ext.ext_size);
+ MEXT_ADD_REF(m);
m->m_ext.ext_size = 0; /* For Accounting XXXXXX danger */
n->m_data = m->m_data + len;
} else {
diff --git a/sys/kern/uipc_mbuf2.c b/sys/kern/uipc_mbuf2.c
index b39c002..2e6aa1e 100644
--- a/sys/kern/uipc_mbuf2.c
+++ b/sys/kern/uipc_mbuf2.c
@@ -182,7 +182,7 @@ m_pulldown(m, off, len, offp)
else {
if (n->m_ext.ext_free)
sharedcluster = 1;
- else if (mclrefcnt[mtocl(n->m_ext.ext_buf)] > 1)
+ else if (MEXT_IS_REF(n))
sharedcluster = 1;
else
sharedcluster = 0;
diff --git a/sys/kern/uipc_syscalls.c b/sys/kern/uipc_syscalls.c
index 5ba2a24..ed9d691 100644
--- a/sys/kern/uipc_syscalls.c
+++ b/sys/kern/uipc_syscalls.c
@@ -72,8 +72,7 @@
static void sf_buf_init(void *arg);
SYSINIT(sock_sf, SI_SUB_MBUF, SI_ORDER_ANY, sf_buf_init, NULL)
static struct sf_buf *sf_buf_alloc(void);
-static void sf_buf_ref(caddr_t addr, u_int size);
-static void sf_buf_free(caddr_t addr, u_int size);
+static void sf_buf_free(caddr_t addr, void *args);
static int sendit __P((struct proc *p, int s, struct msghdr *mp, int flags));
static int recvit __P((struct proc *p, int s, struct msghdr *mp,
@@ -1354,58 +1353,42 @@ sf_buf_alloc()
}
SLIST_REMOVE_HEAD(&sf_freelist, free_list);
splx(s);
- sf->refcnt = 1;
return (sf);
}
#define dtosf(x) (&sf_bufs[((uintptr_t)(x) - (uintptr_t)sf_base) >> PAGE_SHIFT])
-static void
-sf_buf_ref(caddr_t addr, u_int size)
-{
- struct sf_buf *sf;
-
- sf = dtosf(addr);
- if (sf->refcnt == 0)
- panic("sf_buf_ref: referencing a free sf_buf");
- sf->refcnt++;
-}
/*
- * Lose a reference to an sf_buf. When none left, detach mapped page
- * and release resources back to the system.
+ *
+ * Detatch mapped page and release resources back to the system.
*
* Must be called at splimp.
*/
static void
-sf_buf_free(caddr_t addr, u_int size)
+sf_buf_free(caddr_t addr, void *args)
{
struct sf_buf *sf;
struct vm_page *m;
int s;
sf = dtosf(addr);
- if (sf->refcnt == 0)
- panic("sf_buf_free: freeing free sf_buf");
- sf->refcnt--;
- if (sf->refcnt == 0) {
- pmap_qremove((vm_offset_t)addr, 1);
- m = sf->m;
- s = splvm();
- vm_page_unwire(m, 0);
- /*
- * Check for the object going away on us. This can
- * happen since we don't hold a reference to it.
- * If so, we're responsible for freeing the page.
- */
- if (m->wire_count == 0 && m->object == NULL)
- vm_page_free(m);
- splx(s);
- sf->m = NULL;
- SLIST_INSERT_HEAD(&sf_freelist, sf, free_list);
- if (sf_buf_alloc_want) {
- sf_buf_alloc_want = 0;
- wakeup(&sf_freelist);
- }
+ pmap_qremove((vm_offset_t)addr, 1);
+ m = sf->m;
+ s = splvm();
+ vm_page_unwire(m, 0);
+ /*
+ * Check for the object going away on us. This can
+ * happen since we don't hold a reference to it.
+ * If so, we're responsible for freeing the page.
+ */
+ if (m->wire_count == 0 && m->object == NULL)
+ vm_page_free(m);
+ splx(s);
+ sf->m = NULL;
+ SLIST_INSERT_HEAD(&sf_freelist, sf, free_list);
+ if (sf_buf_alloc_want) {
+ sf_buf_alloc_want = 0;
+ wakeup(&sf_freelist);
}
}
@@ -1630,12 +1613,11 @@ retry_lookup:
error = ENOBUFS;
goto done;
}
- m->m_ext.ext_free = sf_buf_free;
- m->m_ext.ext_ref = sf_buf_ref;
- m->m_ext.ext_buf = (void *)sf->kva;
- m->m_ext.ext_size = PAGE_SIZE;
+ /*
+ * Setup external storage for mbuf.
+ */
+ MEXTADD(m, sf->kva, PAGE_SIZE, sf_buf_free, NULL);
m->m_data = (char *) sf->kva + pgoff;
- m->m_flags |= M_EXT;
m->m_pkthdr.len = m->m_len = xfsize;
/*
* Add the buffer to the socket buffer chain.
diff --git a/sys/netinet6/ipsec.c b/sys/netinet6/ipsec.c
index 2e8a67e..87e771f 100644
--- a/sys/netinet6/ipsec.c
+++ b/sys/netinet6/ipsec.c
@@ -3225,7 +3225,7 @@ ipsec_copypkt(m)
*/
if (
n->m_ext.ext_free ||
- mclrefcnt[mtocl(n->m_ext.ext_buf)] > 1
+ MEXT_IS_REF(n)
)
{
int remain, copied;
diff --git a/sys/netkey/key_debug.c b/sys/netkey/key_debug.c
index ebd06c3..3e8b12b 100644
--- a/sys/netkey/key_debug.c
+++ b/sys/netkey/key_debug.c
@@ -646,9 +646,9 @@ kdebug_mbufhdr(m)
if (m->m_flags & M_EXT) {
printf(" m_ext{ ext_buf:%p ext_free:%p "
- "ext_size:%u ext_ref:%p }\n",
+ "ext_size:%u ref_cnt:%p }\n",
m->m_ext.ext_buf, m->m_ext.ext_free,
- m->m_ext.ext_size, m->m_ext.ext_ref);
+ m->m_ext.ext_size, m->m_ext.ref_cnt);
}
return;
diff --git a/sys/pc98/i386/machdep.c b/sys/pc98/i386/machdep.c
index f63a452..5d6472b 100644
--- a/sys/pc98/i386/machdep.c
+++ b/sys/pc98/i386/machdep.c
@@ -399,18 +399,16 @@ again:
(16*(ARG_MAX+(PAGE_SIZE*3))));
/*
- * Finally, allocate mbuf pool. Since mclrefcnt is an off-size
- * we use the more space efficient malloc in place of kmem_alloc.
+ * Finally, allocate mbuf pool.
*/
{
vm_offset_t mb_map_size;
- mb_map_size = nmbufs * MSIZE + nmbclusters * MCLBYTES;
+ mb_map_size = nmbufs * MSIZE + nmbclusters * MCLBYTES +
+ (nmbclusters + nmbufs / 4) * sizeof(union mext_refcnt);
mb_map_size = roundup2(mb_map_size, max(MCLBYTES, PAGE_SIZE));
- mclrefcnt = malloc(mb_map_size / MCLBYTES, M_MBUF, M_NOWAIT);
- bzero(mclrefcnt, mb_map_size / MCLBYTES);
- mb_map = kmem_suballoc(kmem_map, (vm_offset_t *)&mbutl, &maxaddr,
- mb_map_size);
+ mb_map = kmem_suballoc(kmem_map, (vm_offset_t *)&mbutl,
+ &maxaddr, mb_map_size);
mb_map->system_map = 1;
}
diff --git a/sys/pc98/pc98/machdep.c b/sys/pc98/pc98/machdep.c
index f63a452..5d6472b 100644
--- a/sys/pc98/pc98/machdep.c
+++ b/sys/pc98/pc98/machdep.c
@@ -399,18 +399,16 @@ again:
(16*(ARG_MAX+(PAGE_SIZE*3))));
/*
- * Finally, allocate mbuf pool. Since mclrefcnt is an off-size
- * we use the more space efficient malloc in place of kmem_alloc.
+ * Finally, allocate mbuf pool.
*/
{
vm_offset_t mb_map_size;
- mb_map_size = nmbufs * MSIZE + nmbclusters * MCLBYTES;
+ mb_map_size = nmbufs * MSIZE + nmbclusters * MCLBYTES +
+ (nmbclusters + nmbufs / 4) * sizeof(union mext_refcnt);
mb_map_size = roundup2(mb_map_size, max(MCLBYTES, PAGE_SIZE));
- mclrefcnt = malloc(mb_map_size / MCLBYTES, M_MBUF, M_NOWAIT);
- bzero(mclrefcnt, mb_map_size / MCLBYTES);
- mb_map = kmem_suballoc(kmem_map, (vm_offset_t *)&mbutl, &maxaddr,
- mb_map_size);
+ mb_map = kmem_suballoc(kmem_map, (vm_offset_t *)&mbutl,
+ &maxaddr, mb_map_size);
mb_map->system_map = 1;
}
diff --git a/sys/pci/if_sk.c b/sys/pci/if_sk.c
index b74f3f1..d45ac26 100644
--- a/sys/pci/if_sk.c
+++ b/sys/pci/if_sk.c
@@ -148,8 +148,7 @@ static int sk_newbuf __P((struct sk_if_softc *,
struct sk_chain *, struct mbuf *));
static int sk_alloc_jumbo_mem __P((struct sk_if_softc *));
static void *sk_jalloc __P((struct sk_if_softc *));
-static void sk_jfree __P((caddr_t, u_int));
-static void sk_jref __P((caddr_t, u_int));
+static void sk_jfree __P((caddr_t, void *));
static int sk_init_rx_ring __P((struct sk_if_softc *));
static void sk_init_tx_ring __P((struct sk_if_softc *));
static u_int32_t sk_win_read_4 __P((struct sk_softc *, int));
@@ -690,12 +689,9 @@ static int sk_newbuf(sc_if, c, m)
}
/* Attach the buffer to the mbuf */
- m_new->m_data = m_new->m_ext.ext_buf = (void *)buf;
- m_new->m_flags |= M_EXT;
- m_new->m_ext.ext_size = m_new->m_pkthdr.len =
- m_new->m_len = SK_MCLBYTES;
- m_new->m_ext.ext_free = sk_jfree;
- m_new->m_ext.ext_ref = sk_jref;
+ MEXTADD(m_new, buf, SK_MCLBYTES, sk_jfree, NULL);
+ m_new->m_data = (void *)buf;
+ m_new->m_pkthdr.len = m_new->m_len = SK_MCLBYTES;
} else {
/*
* We're re-using a previously allocated mbuf;
@@ -765,7 +761,6 @@ static int sk_alloc_jumbo_mem(sc_if)
aptr[0] = (u_int64_t *)sc_if;
ptr += sizeof(u_int64_t);
sc_if->sk_cdata.sk_jslots[i].sk_buf = ptr;
- sc_if->sk_cdata.sk_jslots[i].sk_inuse = 0;
ptr += SK_MCLBYTES;
entry = malloc(sizeof(struct sk_jpool_entry),
M_DEVBUF, M_NOWAIT);
@@ -803,55 +798,15 @@ static void *sk_jalloc(sc_if)
SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries);
SLIST_INSERT_HEAD(&sc_if->sk_jinuse_listhead, entry, jpool_entries);
- sc_if->sk_cdata.sk_jslots[entry->slot].sk_inuse = 1;
return(sc_if->sk_cdata.sk_jslots[entry->slot].sk_buf);
}
/*
- * Adjust usage count on a jumbo buffer. In general this doesn't
- * get used much because our jumbo buffers don't get passed around
- * a lot, but it's implemented for correctness.
- */
-static void sk_jref(buf, size)
- caddr_t buf;
- u_int size;
-{
- struct sk_if_softc *sc_if;
- u_int64_t **aptr;
- register int i;
-
- /* Extract the softc struct pointer. */
- aptr = (u_int64_t **)(buf - sizeof(u_int64_t));
- sc_if = (struct sk_if_softc *)(aptr[0]);
-
- if (sc_if == NULL)
- panic("sk_jref: can't find softc pointer!");
-
- if (size != SK_MCLBYTES)
- panic("sk_jref: adjusting refcount of buf of wrong size!");
-
- /* calculate the slot this buffer belongs to */
-
- i = ((vm_offset_t)aptr
- - (vm_offset_t)sc_if->sk_cdata.sk_jumbo_buf) / SK_JLEN;
-
- if ((i < 0) || (i >= SK_JSLOTS))
- panic("sk_jref: asked to reference buffer "
- "that we don't manage!");
- else if (sc_if->sk_cdata.sk_jslots[i].sk_inuse == 0)
- panic("sk_jref: buffer already free!");
- else
- sc_if->sk_cdata.sk_jslots[i].sk_inuse++;
-
- return;
-}
-
-/*
* Release a jumbo buffer.
*/
-static void sk_jfree(buf, size)
+static void sk_jfree(buf, args)
caddr_t buf;
- u_int size;
+ void *args;
{
struct sk_if_softc *sc_if;
u_int64_t **aptr;
@@ -865,31 +820,19 @@ static void sk_jfree(buf, size)
if (sc_if == NULL)
panic("sk_jfree: can't find softc pointer!");
- if (size != SK_MCLBYTES)
- panic("sk_jfree: freeing buffer of wrong size!");
-
/* calculate the slot this buffer belongs to */
-
i = ((vm_offset_t)aptr
- (vm_offset_t)sc_if->sk_cdata.sk_jumbo_buf) / SK_JLEN;
if ((i < 0) || (i >= SK_JSLOTS))
panic("sk_jfree: asked to free buffer that we don't manage!");
- else if (sc_if->sk_cdata.sk_jslots[i].sk_inuse == 0)
- panic("sk_jfree: buffer already free!");
- else {
- sc_if->sk_cdata.sk_jslots[i].sk_inuse--;
- if(sc_if->sk_cdata.sk_jslots[i].sk_inuse == 0) {
- entry = SLIST_FIRST(&sc_if->sk_jinuse_listhead);
- if (entry == NULL)
- panic("sk_jfree: buffer not in use!");
- entry->slot = i;
- SLIST_REMOVE_HEAD(&sc_if->sk_jinuse_listhead,
- jpool_entries);
- SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead,
- entry, jpool_entries);
- }
- }
+
+ entry = SLIST_FIRST(&sc_if->sk_jinuse_listhead);
+ if (entry == NULL)
+ panic("sk_jfree: buffer not in use!");
+ entry->slot = i;
+ SLIST_REMOVE_HEAD(&sc_if->sk_jinuse_listhead, jpool_entries);
+ SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, entry, jpool_entries);
return;
}
diff --git a/sys/pci/if_skreg.h b/sys/pci/if_skreg.h
index af17c1c..24cce27 100644
--- a/sys/pci/if_skreg.h
+++ b/sys/pci/if_skreg.h
@@ -1124,7 +1124,6 @@ struct sk_tx_desc {
struct sk_jslot {
caddr_t sk_buf;
- int sk_inuse;
};
struct sk_jpool_entry {
diff --git a/sys/pci/if_ti.c b/sys/pci/if_ti.c
index 7e1b6f7..56847f4 100644
--- a/sys/pci/if_ti.c
+++ b/sys/pci/if_ti.c
@@ -190,8 +190,7 @@ static void ti_cmd_ext __P((struct ti_softc *, struct ti_cmd_desc *,
static void ti_handle_events __P((struct ti_softc *));
static int ti_alloc_jumbo_mem __P((struct ti_softc *));
static void *ti_jalloc __P((struct ti_softc *));
-static void ti_jfree __P((caddr_t, u_int));
-static void ti_jref __P((caddr_t, u_int));
+static void ti_jfree __P((caddr_t, void *));
static int ti_newbuf_std __P((struct ti_softc *, int, struct mbuf *));
static int ti_newbuf_mini __P((struct ti_softc *, int, struct mbuf *));
static int ti_newbuf_jumbo __P((struct ti_softc *, int, struct mbuf *));
@@ -629,7 +628,6 @@ static int ti_alloc_jumbo_mem(sc)
aptr[0] = (u_int64_t *)sc;
ptr += sizeof(u_int64_t);
sc->ti_cdata.ti_jslots[i].ti_buf = ptr;
- sc->ti_cdata.ti_jslots[i].ti_inuse = 0;
ptr += (TI_JLEN - sizeof(u_int64_t));
entry = malloc(sizeof(struct ti_jpool_entry),
M_DEVBUF, M_NOWAIT);
@@ -665,55 +663,15 @@ static void *ti_jalloc(sc)
SLIST_REMOVE_HEAD(&sc->ti_jfree_listhead, jpool_entries);
SLIST_INSERT_HEAD(&sc->ti_jinuse_listhead, entry, jpool_entries);
- sc->ti_cdata.ti_jslots[entry->slot].ti_inuse = 1;
return(sc->ti_cdata.ti_jslots[entry->slot].ti_buf);
}
/*
- * Adjust usage count on a jumbo buffer. In general this doesn't
- * get used much because our jumbo buffers don't get passed around
- * too much, but it's implemented for correctness.
- */
-static void ti_jref(buf, size)
- caddr_t buf;
- u_int size;
-{
- struct ti_softc *sc;
- u_int64_t **aptr;
- register int i;
-
- /* Extract the softc struct pointer. */
- aptr = (u_int64_t **)(buf - sizeof(u_int64_t));
- sc = (struct ti_softc *)(aptr[0]);
-
- if (sc == NULL)
- panic("ti_jref: can't find softc pointer!");
-
- if (size != TI_JUMBO_FRAMELEN)
- panic("ti_jref: adjusting refcount of buf of wrong size!");
-
- /* calculate the slot this buffer belongs to */
-
- i = ((vm_offset_t)aptr
- - (vm_offset_t)sc->ti_cdata.ti_jumbo_buf) / TI_JLEN;
-
- if ((i < 0) || (i >= TI_JSLOTS))
- panic("ti_jref: asked to reference buffer "
- "that we don't manage!");
- else if (sc->ti_cdata.ti_jslots[i].ti_inuse == 0)
- panic("ti_jref: buffer already free!");
- else
- sc->ti_cdata.ti_jslots[i].ti_inuse++;
-
- return;
-}
-
-/*
* Release a jumbo buffer.
*/
-static void ti_jfree(buf, size)
+static void ti_jfree(buf, args)
caddr_t buf;
- u_int size;
+ void *args;
{
struct ti_softc *sc;
u_int64_t **aptr;
@@ -727,31 +685,19 @@ static void ti_jfree(buf, size)
if (sc == NULL)
panic("ti_jfree: can't find softc pointer!");
- if (size != TI_JUMBO_FRAMELEN)
- panic("ti_jfree: freeing buffer of wrong size!");
-
/* calculate the slot this buffer belongs to */
-
i = ((vm_offset_t)aptr
- (vm_offset_t)sc->ti_cdata.ti_jumbo_buf) / TI_JLEN;
if ((i < 0) || (i >= TI_JSLOTS))
panic("ti_jfree: asked to free buffer that we don't manage!");
- else if (sc->ti_cdata.ti_jslots[i].ti_inuse == 0)
- panic("ti_jfree: buffer already free!");
- else {
- sc->ti_cdata.ti_jslots[i].ti_inuse--;
- if(sc->ti_cdata.ti_jslots[i].ti_inuse == 0) {
- entry = SLIST_FIRST(&sc->ti_jinuse_listhead);
- if (entry == NULL)
- panic("ti_jfree: buffer not in use!");
- entry->slot = i;
- SLIST_REMOVE_HEAD(&sc->ti_jinuse_listhead,
- jpool_entries);
- SLIST_INSERT_HEAD(&sc->ti_jfree_listhead,
- entry, jpool_entries);
- }
- }
+
+ entry = SLIST_FIRST(&sc->ti_jinuse_listhead);
+ if (entry == NULL)
+ panic("ti_jfree: buffer not in use!");
+ entry->slot = i;
+ SLIST_REMOVE_HEAD(&sc->ti_jinuse_listhead, jpool_entries);
+ SLIST_INSERT_HEAD(&sc->ti_jfree_listhead, entry, jpool_entries);
return;
}
@@ -877,12 +823,9 @@ static int ti_newbuf_jumbo(sc, i, m)
}
/* Attach the buffer to the mbuf. */
- m_new->m_data = m_new->m_ext.ext_buf = (void *)buf;
- m_new->m_flags |= M_EXT;
- m_new->m_len = m_new->m_pkthdr.len =
- m_new->m_ext.ext_size = TI_JUMBO_FRAMELEN;
- m_new->m_ext.ext_free = ti_jfree;
- m_new->m_ext.ext_ref = ti_jref;
+ m_new->m_data = (void *) buf;
+ m_new->m_len = m_new->m_pkthdr.len = TI_JUMBO_FRAMELEN;
+ MEXTADD(m_new, buf, TI_JUMBO_FRAMELEN, ti_jfree, NULL);
} else {
m_new = m;
m_new->m_data = m_new->m_ext.ext_buf;
diff --git a/sys/pci/if_tireg.h b/sys/pci/if_tireg.h
index 4712272..6346c67 100644
--- a/sys/pci/if_tireg.h
+++ b/sys/pci/if_tireg.h
@@ -1046,7 +1046,6 @@ struct ti_event_desc {
struct ti_jslot {
caddr_t ti_buf;
- int ti_inuse;
};
/*
diff --git a/sys/pci/if_wb.c b/sys/pci/if_wb.c
index 397ca20..b33c542 100644
--- a/sys/pci/if_wb.c
+++ b/sys/pci/if_wb.c
@@ -147,7 +147,7 @@ static int wb_probe __P((device_t));
static int wb_attach __P((device_t));
static int wb_detach __P((device_t));
-static void wb_bfree __P((caddr_t, u_int));
+static void wb_bfree __P((caddr_t, void *args));
static int wb_newbuf __P((struct wb_softc *,
struct wb_chain_onefrag *,
struct mbuf *));
@@ -1078,9 +1078,9 @@ static int wb_list_rx_init(sc)
return(0);
}
-static void wb_bfree(buf, size)
+static void wb_bfree(buf, args)
caddr_t buf;
- u_int size;
+ void *args;
{
return;
}
@@ -1102,13 +1102,9 @@ static int wb_newbuf(sc, c, m)
"list -- packet dropped!\n", sc->wb_unit);
return(ENOBUFS);
}
-
- m_new->m_data = m_new->m_ext.ext_buf = c->wb_buf;
- m_new->m_flags |= M_EXT;
- m_new->m_ext.ext_size = m_new->m_pkthdr.len =
- m_new->m_len = WB_BUFBYTES;
- m_new->m_ext.ext_free = wb_bfree;
- m_new->m_ext.ext_ref = wb_bfree;
+ m_new->m_data = c->wb_buf;
+ m_new->m_pkthdr.len = m_new->m_len = WB_BUFBYTES;
+ MEXTADD(m_new, c->wb_buf, WB_BUFBYTES, wb_bfree, NULL);
} else {
m_new = m;
m_new->m_len = m_new->m_pkthdr.len = WB_BUFBYTES;
diff --git a/sys/sys/mbuf.h b/sys/sys/mbuf.h
index 155e681..033fd7b 100644
--- a/sys/sys/mbuf.h
+++ b/sys/sys/mbuf.h
@@ -55,14 +55,9 @@
* Macros for type conversion
* mtod(m, t) - convert mbuf pointer to data pointer of correct type
* dtom(x) - convert data pointer within mbuf to mbuf pointer (XXX)
- * mtocl(x) - convert pointer within cluster to cluster index #
- * cltom(x) - convert cluster # to ptr to beginning of cluster
*/
#define mtod(m, t) ((t)((m)->m_data))
#define dtom(x) ((struct mbuf *)((intptr_t)(x) & ~(MSIZE-1)))
-#define mtocl(x) (((uintptr_t)(x) - (uintptr_t)mbutl) >> MCLSHIFT)
-#define cltom(x) ((caddr_t)((uintptr_t)mbutl + \
- ((uintptr_t)(x) << MCLSHIFT)))
/* header at beginning of each mbuf: */
struct m_hdr {
@@ -90,10 +85,10 @@ struct pkthdr {
struct m_ext {
caddr_t ext_buf; /* start of buffer */
void (*ext_free) /* free routine if not the usual */
- __P((caddr_t, u_int));
+ __P((caddr_t, void *));
+ void *ext_args; /* optional argument pointer */
u_int ext_size; /* size of buffer, for ext_free */
- void (*ext_ref) /* add a reference to the ext object */
- __P((caddr_t, u_int));
+ union mext_refcnt *ref_cnt; /* pointer to ref count info */
};
struct mbuf {
@@ -188,8 +183,10 @@ struct mbuf {
struct mbstat {
u_long m_mbufs; /* mbufs obtained from page pool */
u_long m_clusters; /* clusters obtained from page pool */
- u_long m_spare; /* spare field */
u_long m_clfree; /* free clusters */
+ u_long m_refcnt; /* refcnt structs obtained from page pool */
+ u_long m_refree; /* free refcnt structs */
+ u_long m_spare; /* spare field */
u_long m_drops; /* times failed to find space */
u_long m_wait; /* times waited for space */
u_long m_drain; /* times drained protocols for space */
@@ -227,6 +224,14 @@ union mcluster {
#define MGET_C 2
/*
+ * The m_ext object reference counter structure.
+ */
+union mext_refcnt {
+ union mext_refcnt *next_ref;
+ u_long refcnt;
+};
+
+/*
* Wake up the next instance (if any) of m_mballoc_wait() which is
* waiting for an mbuf to be freed. This should be called at splimp().
*
@@ -265,6 +270,50 @@ union mcluster {
} while (0)
/*
+ * mbuf external reference count management macros:
+ *
+ * MEXT_IS_REF(m): true if (m) is not the only mbuf referencing
+ * the external buffer ext_buf
+ * MEXT_REM_REF(m): remove reference to m_ext object
+ * MEXT_ADD_REF(m): add reference to m_ext object already
+ * referred to by (m)
+ * MEXT_INIT_REF(m): allocate and initialize an external
+ * object reference counter for (m)
+ */
+#define MEXT_IS_REF(m) ((m)->m_ext.ref_cnt->refcnt > 1)
+
+#define MEXT_REM_REF(m) atomic_subtract_long(&((m)->m_ext.ref_cnt->refcnt), 1)
+
+#define MEXT_ADD_REF(m) atomic_add_long(&((m)->m_ext.ref_cnt->refcnt), 1)
+
+#define _MEXT_ALLOC_CNT(m_cnt) MBUFLOCK( \
+ union mext_refcnt *__mcnt; \
+ \
+ __mcnt = mext_refcnt_free; \
+ if ((__mcnt == NULL) && (m_alloc_ref(1) == 0)) \
+ panic("mbuf subsystem: out of ref counts!"); \
+ mext_refcnt_free = __mcnt->next_ref; \
+ __mcnt->next_ref = NULL; \
+ (m_cnt) = __mcnt; \
+ mbstat.m_refree--; \
+)
+
+#define _MEXT_DEALLOC_CNT(m_cnt) do { \
+ union mext_refcnt *__mcnt = (m_cnt); \
+ \
+ __mcnt->next_ref = mext_refcnt_free; \
+ mext_refcnt_free = __mcnt; \
+ mbstat.m_refree++; \
+} while (0)
+
+#define MEXT_INIT_REF(m) do { \
+ struct mbuf *__mmm = (m); \
+ \
+ _MEXT_ALLOC_CNT(__mmm->m_ext.ref_cnt); \
+ atomic_set_long(&(__mmm->m_ext.ref_cnt->refcnt), 1); \
+} while (0)
+
+/*
* mbuf allocation/deallocation macros:
*
* MGET(struct mbuf *m, int how, int type)
@@ -286,22 +335,20 @@ union mcluster {
if (_mm != NULL) { \
mmbfree = _mm->m_next; \
mbtypes[MT_FREE]--; \
- _mm->m_type = _mtype; \
mbtypes[_mtype]++; \
+ splx(_ms); \
+ _mm->m_type = _mtype; \
_mm->m_next = NULL; \
_mm->m_nextpkt = NULL; \
_mm->m_data = _mm->m_dat; \
_mm->m_flags = 0; \
- (m) = _mm; \
- splx(_ms); \
} else { \
splx(_ms); \
_mm = m_retry(_mhow, _mtype); \
if (_mm == NULL && _mhow == M_WAIT) \
- (m) = m_mballoc_wait(MGET_C, _mtype); \
- else \
- (m) = _mm; \
+ _mm = m_mballoc_wait(MGET_C, _mtype); \
} \
+ (m) = _mm; \
} while (0)
#define MGETHDR(m, how, type) do { \
@@ -316,36 +363,34 @@ union mcluster {
if (_mm != NULL) { \
mmbfree = _mm->m_next; \
mbtypes[MT_FREE]--; \
- _mm->m_type = _mtype; \
mbtypes[_mtype]++; \
+ splx(_ms); \
+ _mm->m_type = _mtype; \
_mm->m_next = NULL; \
_mm->m_nextpkt = NULL; \
_mm->m_data = _mm->m_pktdat; \
_mm->m_flags = M_PKTHDR; \
_mm->m_pkthdr.rcvif = NULL; \
_mm->m_pkthdr.csum_flags = 0; \
- _mm->m_pkthdr.aux = (struct mbuf *)NULL; \
- (m) = _mm; \
- splx(_ms); \
+ _mm->m_pkthdr.aux = NULL; \
} else { \
splx(_ms); \
_mm = m_retryhdr(_mhow, _mtype); \
if (_mm == NULL && _mhow == M_WAIT) \
- (m) = m_mballoc_wait(MGETHDR_C, _mtype); \
- else \
- (m) = _mm; \
+ _mm = m_mballoc_wait(MGETHDR_C, _mtype); \
} \
+ (m) = _mm; \
} while (0)
/*
- * Mbuf cluster macros.
- * MCLALLOC(caddr_t p, int how) allocates an mbuf cluster.
- * MCLGET adds such clusters to a normal mbuf;
- * the flag M_EXT is set upon success.
- * MCLFREE releases a reference to a cluster allocated by MCLALLOC,
- * freeing the cluster if the reference count has reached 0.
+ * mbuf external storage macros:
+ *
+ * MCLGET allocates and refers an mcluster to an mbuf
+ * MEXTADD sets up pre-allocated external storage and refers to mbuf
+ * MEXTFREE removes reference to external object and frees it if
+ * necessary
*/
-#define MCLALLOC(p, how) do { \
+#define _MCLALLOC(p, how) do { \
caddr_t _mp; \
int _mhow = (how); \
int _ms = splimp(); \
@@ -354,61 +399,70 @@ union mcluster {
(void)m_clalloc(1, _mhow); \
_mp = (caddr_t)mclfree; \
if (_mp != NULL) { \
- mclrefcnt[mtocl(_mp)]++; \
mbstat.m_clfree--; \
mclfree = ((union mcluster *)_mp)->mcl_next; \
- (p) = _mp; \
splx(_ms); \
} else { \
splx(_ms); \
if (_mhow == M_WAIT) \
- (p) = m_clalloc_wait(); \
- else \
- (p) = NULL; \
+ _mp = m_clalloc_wait(); \
} \
-} while (0)
+ (p) = _mp; \
+} while (0)
#define MCLGET(m, how) do { \
struct mbuf *_mm = (m); \
\
- MCLALLOC(_mm->m_ext.ext_buf, (how)); \
+ _MCLALLOC(_mm->m_ext.ext_buf, (how)); \
if (_mm->m_ext.ext_buf != NULL) { \
_mm->m_data = _mm->m_ext.ext_buf; \
_mm->m_flags |= M_EXT; \
_mm->m_ext.ext_free = NULL; \
- _mm->m_ext.ext_ref = NULL; \
+ _mm->m_ext.ext_args = NULL; \
_mm->m_ext.ext_size = MCLBYTES; \
+ MEXT_INIT_REF(_mm); \
} \
} while (0)
-#define MCLFREE1(p) do { \
- union mcluster *_mp = (union mcluster *)(p); \
+#define MEXTADD(m, buf, size, free, args) do { \
+ struct mbuf *_mm = (m); \
\
- KASSERT(mclrefcnt[mtocl(_mp)] > 0, ("freeing free cluster")); \
- if (--mclrefcnt[mtocl(_mp)] == 0) { \
- _mp->mcl_next = mclfree; \
- mclfree = _mp; \
- mbstat.m_clfree++; \
- MCLWAKEUP(); \
- } \
+ _mm->m_flags |= M_EXT; \
+ _mm->m_ext.ext_buf = (caddr_t)(buf); \
+ _mm->m_data = _mm->m_ext.ext_buf; \
+ _mm->m_ext.ext_size = (size); \
+ _mm->m_ext.ext_free = (free); \
+ _mm->m_ext.ext_args = (args); \
+ MEXT_INIT_REF(_mm); \
} while (0)
-#define MCLFREE(p) MBUFLOCK( \
- MCLFREE1(p); \
+#define _MCLFREE(p) MBUFLOCK( \
+ union mcluster *_mp = (union mcluster *)(p); \
+ \
+ _mp->mcl_next = mclfree; \
+ mclfree = _mp; \
+ mbstat.m_clfree++; \
+ MCLWAKEUP(); \
)
-#define MEXTFREE1(m) do { \
- struct mbuf *_mm = (m); \
+#define _MEXTFREE(m) do { \
+ struct mbuf *_mmm = (m); \
\
- if (_mm->m_ext.ext_free != NULL) \
- (*_mm->m_ext.ext_free)(_mm->m_ext.ext_buf, \
- _mm->m_ext.ext_size); \
- else \
- MCLFREE1(_mm->m_ext.ext_buf); \
+ if (MEXT_IS_REF(_mmm)) \
+ MEXT_REM_REF(_mmm); \
+ else if (_mmm->m_ext.ext_free != NULL) { \
+ (*(_mmm->m_ext.ext_free))(_mmm->m_ext.ext_buf, \
+ _mmm->m_ext.ext_args); \
+ _MEXT_DEALLOC_CNT(_mmm->m_ext.ref_cnt); \
+ } else { \
+ _MCLFREE(_mmm->m_ext.ext_buf); \
+ _MEXT_DEALLOC_CNT(_mmm->m_ext.ref_cnt); \
+ } \
+ _mmm->m_flags &= ~M_EXT; \
} while (0)
-#define MEXTFREE(m) MBUFLOCK( \
- MEXTFREE1(m); \
+#define MEXTFREE(m) MBUFLOCK( \
+ _MEXTFREE(m); \
)
/*
@@ -420,12 +474,12 @@ union mcluster {
struct mbuf *_mm = (m); \
\
KASSERT(_mm->m_type != MT_FREE, ("freeing free mbuf")); \
- mbtypes[_mm->m_type]--; \
if (_mm->m_flags & M_EXT) \
- MEXTFREE1(m); \
- (n) = _mm->m_next; \
+ _MEXTFREE(_mm); \
+ mbtypes[_mm->m_type]--; \
_mm->m_type = MT_FREE; \
mbtypes[MT_FREE]++; \
+ (n) = _mm->m_next; \
_mm->m_next = mmbfree; \
mmbfree = _mm; \
MMBWAKEUP(); \
@@ -540,14 +594,15 @@ extern struct mbstat mbstat;
extern u_long mbtypes[MT_NTYPES]; /* per-type mbuf allocations */
extern int mbuf_wait; /* mbuf sleep time */
extern struct mbuf *mbutl; /* virtual address of mclusters */
-extern char *mclrefcnt; /* cluster reference counts */
extern union mcluster *mclfree;
extern struct mbuf *mmbfree;
+extern union mext_refcnt *mext_refcnt_free;
extern int nmbclusters;
extern int nmbufs;
extern int nsfbufs;
void m_adj __P((struct mbuf *, int));
+int m_alloc_ref __P((u_int));
void m_cat __P((struct mbuf *,struct mbuf *));
int m_clalloc __P((int, int));
caddr_t m_clalloc_wait __P((void));
diff --git a/sys/sys/socketvar.h b/sys/sys/socketvar.h
index f1134b7..aee3191 100644
--- a/sys/sys/socketvar.h
+++ b/sys/sys/socketvar.h
@@ -272,7 +272,6 @@ struct sockopt {
struct sf_buf {
SLIST_ENTRY(sf_buf) free_list; /* list of free buffer slots */
- int refcnt; /* reference count */
struct vm_page *m; /* currently mapped page */
vm_offset_t kva; /* va of mapping */
};
diff --git a/usr.bin/netstat/mbuf.c b/usr.bin/netstat/mbuf.c
index 6ad3fdb..32c0751 100644
--- a/usr.bin/netstat/mbuf.c
+++ b/usr.bin/netstat/mbuf.c
@@ -176,9 +176,13 @@ mbpr()
printf("%lu/%lu/%u mbuf clusters in use (current/peak/max)\n",
mbstat.m_clusters - mbstat.m_clfree, mbstat.m_clusters,
nmbclusters);
- totmem = mbstat.m_mbufs * MSIZE + mbstat.m_clusters * MCLBYTES;
+ printf("%lu/%lu m_ext reference counters (in use/allocated)\n",
+ mbstat.m_refcnt - mbstat.m_refree, mbstat.m_refcnt);
+ totmem = mbstat.m_mbufs * MSIZE + mbstat.m_clusters * MCLBYTES +
+ mbstat.m_refcnt * sizeof(union mext_refcnt);
totfree = mbstat.m_clfree * MCLBYTES +
- MSIZE * (mbstat.m_mbufs - totmbufs);
+ MSIZE * (mbstat.m_mbufs - totmbufs) + mbstat.m_refree *
+ sizeof(union mext_refcnt);
printf("%u Kbytes allocated to network (%d%% in use)\n",
totmem / 1024, (unsigned) (totmem - totfree) * 100 / totmem);
printf("%lu requests for memory denied\n", mbstat.m_drops);
OpenPOWER on IntegriCloud