summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorwollman <wollman@FreeBSD.org>1997-02-13 19:41:40 +0000
committerwollman <wollman@FreeBSD.org>1997-02-13 19:41:40 +0000
commit1f8c9c194a905535c832a0021936cf4734355267 (patch)
tree38caca2b43ba055ff41147bbfb72f0ca5c96be84
parentcb442e2038f193d705318960fc919a03900c52d3 (diff)
downloadFreeBSD-src-1f8c9c194a905535c832a0021936cf4734355267.zip
FreeBSD-src-1f8c9c194a905535c832a0021936cf4734355267.tar.gz
Provide an alternative mbuf cluster allocator which permits use of
clusters greater than one page in length by calling contigmalloc1(). This uses a helper process `mclalloc' to do the allocation if the system runs out at interrupt time to avoid calling contigmalloc at high spl. It is not yet clear to me whether this works.
-rw-r--r--sys/kern/uipc_mbuf.c47
1 files changed, 46 insertions, 1 deletions
diff --git a/sys/kern/uipc_mbuf.c b/sys/kern/uipc_mbuf.c
index 3f99385..d2cb6c0 100644
--- a/sys/kern/uipc_mbuf.c
+++ b/sys/kern/uipc_mbuf.c
@@ -81,8 +81,14 @@ mbinit(dummy)
s = splimp();
if (m_mballoc(NMB_INIT, M_DONTWAIT) == 0)
goto bad;
+#if MCLBYTES <= PAGE_SIZE
if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0)
goto bad;
+#else
+ /* It's OK to call contigmalloc in this context. */
+ if (m_clalloc(16, 0) == 0)
+ goto bad;
+#endif
splx(s);
return;
bad:
@@ -130,6 +136,34 @@ m_mballoc(nmb, nowait)
return (1);
}
+#if MCLBYTES > PAGE_SIZE
+int i_want_my_mcl;
+
+void
+kproc_mclalloc(void)
+{
+ int status;
+
+ while (1) {
+ tsleep(&i_want_my_mcl, PVM, "mclalloc", 0);
+
+ for (; i_want_my_mcl; i_want_my_mcl--) {
+ if (m_clalloc(1, 0) == 0)
+ printf("m_clalloc failed even in process context!\n");
+ }
+ }
+}
+
+static struct proc *mclallocproc;
+static struct kproc_desc mclalloc_kp = {
+ "mclalloc",
+ kproc_mclalloc,
+ &mclallocproc
+};
+SYSINIT_KT(mclallocproc, SI_SUB_KTHREAD_UPDATE, SI_ORDER_ANY, kproc_start,
+ &mclalloc_kp);
+#endif
+
/*
* Allocate some number of mbuf clusters
* and place on cluster free list.
@@ -153,9 +187,21 @@ m_clalloc(ncl, nowait)
if (mb_map_full)
return (0);
+#if MCLBYTES > PAGE_SIZE
+ if (nowait) {
+ i_want_my_mcl += ncl;
+ wakeup(&i_want_my_mcl);
+ p = 0;
+ } else {
+ p = contigmalloc1(MCLBYTES * ncl, M_DEVBUF, M_WAITOK, 0ul,
+ ~0ul, PAGE_SIZE, 0, mb_map);
+ }
+#else
npg = ncl;
p = (caddr_t)kmem_malloc(mb_map, ctob(npg),
nowait ? M_NOWAIT : M_WAITOK);
+ ncl = ncl * PAGE_SIZE / MCLBYTES;
+#endif
/*
* Either the map is now full, or this is nowait and there
* are no pages left.
@@ -163,7 +209,6 @@ m_clalloc(ncl, nowait)
if (p == NULL)
return (0);
- ncl = ncl * PAGE_SIZE / MCLBYTES;
for (i = 0; i < ncl; i++) {
((union mcluster *)p)->mcl_next = mclfree;
mclfree = (union mcluster *)p;
OpenPOWER on IntegriCloud