summaryrefslogtreecommitdiffstats
path: root/sys/kern/uipc_mbuf.c
diff options
context:
space:
mode:
authorwollman <wollman@FreeBSD.org>1996-05-08 19:38:27 +0000
committerwollman <wollman@FreeBSD.org>1996-05-08 19:38:27 +0000
commit577771db625c04242662f7e9ceb22c54f78a77ee (patch)
treea64fbe1967604e074495985930a3c00e1bf4d695 /sys/kern/uipc_mbuf.c
parent2d7d383bb75e7bb82763c67e05af41c00059465d (diff)
downloadFreeBSD-src-577771db625c04242662f7e9ceb22c54f78a77ee.zip
FreeBSD-src-577771db625c04242662f7e9ceb22c54f78a77ee.tar.gz
Our new-old mbugf allocator. This is actually something of a blast from
the past, since it returns to the old system of allocating mbufs out of a private area rather than using the kernel malloc(). While this may seem like a backwards step to some, the new allocator is some 20% faster than the old one and has much better caching properties. Written by: John Wroclawski <jtw@lcs.mit.edu>
Diffstat (limited to 'sys/kern/uipc_mbuf.c')
-rw-r--r--sys/kern/uipc_mbuf.c104
1 files changed, 103 insertions, 1 deletions
diff --git a/sys/kern/uipc_mbuf.c b/sys/kern/uipc_mbuf.c
index 744631a..9c2d315 100644
--- a/sys/kern/uipc_mbuf.c
+++ b/sys/kern/uipc_mbuf.c
@@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94
- * $Id: uipc_mbuf.c,v 1.18 1996/05/02 14:20:29 phk Exp $
+ * $Id: uipc_mbuf.c,v 1.19 1996/05/06 17:18:12 phk Exp $
*/
#include <sys/param.h>
@@ -56,6 +56,7 @@ SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbinit, NULL)
struct mbuf *mbutl;
char *mclrefcnt;
struct mbstat mbstat;
+struct mbuf *mmbfree;
union mcluster *mclfree;
int max_linkhdr;
int max_protohdr;
@@ -71,12 +72,17 @@ mbinit(dummy)
{
int s;
+#define NMB_INIT 16
#if MCLBYTES < 4096
#define NCL_INIT (4096/MCLBYTES)
#else
#define NCL_INIT 1
#endif
+
+ mmbfree = NULL; mclfree = NULL;
s = splimp();
+ if (m_mballoc(NMB_INIT, M_DONTWAIT) == 0)
+ goto bad;
if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0)
goto bad;
splx(s);
@@ -86,6 +92,47 @@ bad:
}
/*
+ * Allocate at least nmb mbufs and place on mbuf free list.
+ * Must be called at splimp.
+ */
+/* ARGSUSED */
+int
+m_mballoc(nmb, nowait)
+ register int nmb;
+ int nowait;
+{
+ register caddr_t p;
+ register int i;
+ int nbytes;
+
+ /* Once we run out of map space, it will be impossible to get
+ * any more (nothing is ever freed back to the map) (XXX which
+ * is dumb). (however you are not dead as m_reclaim might
+ * still be able to free a substantial amount of space).
+ */
+ if (mb_map_full)
+ return (0);
+
+ nbytes = round_page(nmb * MSIZE);
+ p = (caddr_t)kmem_malloc(mb_map, nbytes, nowait ? M_NOWAIT : M_WAITOK);
+ /*
+ * Either the map is now full, or this is nowait and there
+ * are no pages left.
+ */
+ if (p == NULL)
+ return (0);
+
+ nmb = nbytes / MSIZE;
+ for (i = 0; i < nmb; i++) {
+ ((struct mbuf *)p)->m_next = mmbfree;
+ mmbfree = (struct mbuf *)p;
+ p += MSIZE;
+ }
+ mbstat.m_mbufs += nmb;
+ return (1);
+}
+
+/*
* Allocate some number of mbuf clusters
* and place on cluster free list.
* Must be called at splimp.
@@ -355,6 +402,61 @@ nospace:
}
/*
+ * Copy an entire packet, including header (which must be present).
+ * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
+ */
+struct mbuf *
+m_copypacket(m, how)
+ struct mbuf *m;
+ int how;
+{
+ struct mbuf *top, *n, *o;
+
+ MGET(n, how, m->m_type);
+ top = n;
+ if (!n)
+ goto nospace;
+
+ M_COPY_PKTHDR(n, m);
+ n->m_len = m->m_len;
+ if (m->m_flags & M_EXT) {
+ n->m_data = m->m_data;
+ mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
+ n->m_ext = m->m_ext;
+ n->m_flags |= M_EXT;
+ } else {
+ bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
+ }
+
+ m = m->m_next;
+ while (m) {
+ MGET(o, how, m->m_type);
+ if (!o)
+ goto nospace;
+
+ n->m_next = o;
+ n = n->m_next;
+
+ n->m_len = m->m_len;
+ if (m->m_flags & M_EXT) {
+ n->m_data = m->m_data;
+ mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
+ n->m_ext = m->m_ext;
+ n->m_flags |= M_EXT;
+ } else {
+ bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
+ }
+
+ m = m->m_next;
+ }
+ return top;
+nospace:
+ m_freem(top);
+ MCFail++;
+ return 0;
+}
+
+/*
* Copy data from an mbuf chain starting "off" bytes from the beginning,
* continuing for "len" bytes, into the indicated buffer.
*/
OpenPOWER on IntegriCloud