summaryrefslogtreecommitdiffstats
path: root/sys/kern/uipc_mbuf.c
diff options
context:
space:
mode:
authorgreen <green@FreeBSD.org>1999-12-12 05:52:51 +0000
committergreen <green@FreeBSD.org>1999-12-12 05:52:51 +0000
commit56a46611e10209f4a55ba809f58b91f7db2b60b4 (patch)
treea18fc22a78afce419dacc6ec6f2b867caf2d0922 /sys/kern/uipc_mbuf.c
parent8fc09c011cc5df71c6f1ab5510dee8133224bbb4 (diff)
downloadFreeBSD-src-56a46611e10209f4a55ba809f58b91f7db2b60b4.zip
FreeBSD-src-56a46611e10209f4a55ba809f58b91f7db2b60b4.tar.gz
This is Bosko Milekic's mbuf allocation waiting code. Basically, this
means that running out of mbuf space isn't a panic anymore, and code which runs out of network memory will sleep to wait for it. Submitted by: Bosko Milekic <bmilekic@dsuper.net> Reviewed by: green, wollman
Diffstat (limited to 'sys/kern/uipc_mbuf.c')
-rw-r--r--sys/kern/uipc_mbuf.c160
1 files changed, 138 insertions, 22 deletions
diff --git a/sys/kern/uipc_mbuf.c b/sys/kern/uipc_mbuf.c
index 1684c92..9650c68 100644
--- a/sys/kern/uipc_mbuf.c
+++ b/sys/kern/uipc_mbuf.c
@@ -48,6 +48,10 @@
#include <vm/vm_kern.h>
#include <vm/vm_extern.h>
+#ifdef INVARIANTS
+#include <machine/cpu.h>
+#endif
+
static void mbinit __P((void *));
SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbinit, NULL)
@@ -71,6 +75,8 @@ SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW,
SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0, "");
SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW,
&max_datalen, 0, "");
+SYSCTL_INT(_kern_ipc, OID_AUTO, mbuf_wait, CTLFLAG_RW,
+ &mbuf_wait, 0, "");
SYSCTL_STRUCT(_kern_ipc, KIPC_MBSTAT, mbstat, CTLFLAG_RW, &mbstat, mbstat, "");
SYSCTL_INT(_kern_ipc, KIPC_NMBCLUSTERS, nmbclusters, CTLFLAG_RD,
&nmbclusters, 0, "Maximum number of mbuf clusters avaliable");
@@ -132,10 +138,15 @@ m_mballoc(nmb, how)
register int i;
int nbytes;
- /* Once we run out of map space, it will be impossible to get
- * any more (nothing is ever freed back to the map) (XXX which
- * is dumb). (however you are not dead as m_reclaim might
- * still be able to free a substantial amount of space).
+ /*
+ * Once we run out of map space, it will be impossible to get
+ * any more (nothing is ever freed back to the map)
+ * -- however you are not dead as m_reclaim might
+ * still be able to free a substantial amount of space.
+ *
+ * XXX Furthermore, we can also work with "recycled" mbufs (when
+ * we're calling with M_WAIT the sleep procedure will be woken
+ * up when an mbuf is freed. See m_mballoc_wait()).
*/
if (mb_map_full)
return (0);
@@ -164,6 +175,52 @@ m_mballoc(nmb, how)
return (1);
}
+/*
+ * Once the mb_map has been exhausted and if the call to the allocation macros
+ * (or, in some cases, functions) is with M_WAIT, then it is necessary to rely
+ * solely on reclaimed mbufs. Here we wait for an mbuf to be freed for a
+ * designated (mbuf_wait) time.
+ */
+struct mbuf *
+m_mballoc_wait(int caller, int type)
+{
+ struct mbuf *p;
+ int s;
+
+ m_mballoc_wid++;
+ if ((tsleep(&m_mballoc_wid, PVM, "mballc", mbuf_wait)) == EWOULDBLOCK)
+ m_mballoc_wid--;
+
+ /*
+ * Now that we (think) that we've got something, we will redo an
+ * MGET, but avoid getting into another instance of m_mballoc_wait()
+ * XXX: We retry to fetch _even_ if the sleep timed out. This is left
+ * this way, purposely, in the [unlikely] case that an mbuf was
+ * freed but the sleep was not awakened in time.
+ */
+ p = NULL;
+ switch (caller) {
+ case MGET_C:
+ MGET(p, M_DONTWAIT, type);
+ break;
+ case MGETHDR_C:
+ MGETHDR(p, M_DONTWAIT, type);
+ break;
+ default:
+ panic("m_mballoc_wait: invalid caller (%d)", caller);
+ }
+
+ s = splimp();
+ if (p != NULL) { /* We waited and got something... */
+ mbstat.m_wait++;
+ /* Wake up another if we have more free. */
+ if (mmbfree != NULL)
+ m_mballoc_wakeup();
+ }
+ splx(s);
+ return (p);
+}
+
#if MCLBYTES > PAGE_SIZE
static int i_want_my_mcl;
@@ -210,7 +267,8 @@ m_clalloc(ncl, how)
/*
* Once we run out of map space, it will be impossible
* to get any more (nothing is ever freed back to the
- * map).
+ * map). From this point on, we solely rely on freed
+ * mclusters.
*/
if (mb_map_full) {
mbstat.m_drops++;
@@ -253,6 +311,47 @@ m_clalloc(ncl, how)
}
/*
+ * Once the mb_map submap has been exhausted and the allocation is called with
+ * M_WAIT, we rely on the mclfree union pointers. If nothing is free, we will
+ * sleep for a designated amount of time (mbuf_wait) or until we're woken up
+ * due to sudden mcluster availability.
+ */
+caddr_t
+m_clalloc_wait(void)
+{
+ caddr_t p;
+ int s;
+
+#ifdef __i386__
+ /* If in interrupt context, and INVARIANTS, maintain sanity and die. */
+ KASSERT(intr_nesting_level == 0, ("CLALLOC: CANNOT WAIT IN INTERRUPT"));
+#endif
+
+ /* Sleep until something's available or until we expire. */
+ m_clalloc_wid++;
+ if ((tsleep(&m_clalloc_wid, PVM, "mclalc", mbuf_wait)) == EWOULDBLOCK)
+ m_clalloc_wid--;
+
+ /*
+ * Now that we (think) that we've got something, we will redo and
+ * MGET, but avoid getting into another instance of m_clalloc_wait()
+ */
+ p = NULL;
+ MCLALLOC(p, M_DONTWAIT);
+
+ s = splimp();
+ if (p != NULL) { /* We waited and got something... */
+ mbstat.m_wait++;
+ /* Wake up another if we have more free. */
+ if (mclfree != NULL)
+ m_clalloc_wakeup();
+ }
+
+ splx(s);
+ return (p);
+}
+
+/*
* When MGET fails, ask protocols to free space when short of memory,
* then re-attempt to allocate an mbuf.
*/
@@ -265,19 +364,30 @@ m_retry(i, t)
/*
* Must only do the reclaim if not in an interrupt context.
*/
- if (i == M_WAIT)
+ if (i == M_WAIT) {
+#ifdef __i386__
+ KASSERT(intr_nesting_level == 0,
+ ("MBALLOC: CANNOT WAIT IN INTERRUPT"));
+#endif
m_reclaim();
+ }
+
+ /*
+ * Both m_mballoc_wait and m_retry must be nulled because
+ * when the MGET macro is run from here, we deffinately do _not_
+ * want to enter an instance of m_mballoc_wait() or m_retry() (again!)
+ */
+#define m_mballoc_wait(caller,type) (struct mbuf *)0
#define m_retry(i, t) (struct mbuf *)0
MGET(m, i, t);
#undef m_retry
- if (m != NULL) {
+#undef m_mballoc_wait
+
+ if (m != NULL)
mbstat.m_wait++;
- } else {
- if (i == M_DONTWAIT)
- mbstat.m_drops++;
- else
- panic("Out of mbuf clusters");
- }
+ else
+ mbstat.m_drops++;
+
return (m);
}
@@ -293,19 +403,25 @@ m_retryhdr(i, t)
/*
* Must only do the reclaim if not in an interrupt context.
*/
- if (i == M_WAIT)
+ if (i == M_WAIT) {
+#ifdef __i386__
+ KASSERT(intr_nesting_level == 0,
+ ("MBALLOC: CANNOT WAIT IN INTERRUPT"));
+#endif
m_reclaim();
+ }
+
+#define m_mballoc_wait(caller,type) (struct mbuf *)0
#define m_retryhdr(i, t) (struct mbuf *)0
MGETHDR(m, i, t);
#undef m_retryhdr
- if (m != NULL) {
+#undef m_mballoc_wait
+
+ if (m != NULL)
mbstat.m_wait++;
- } else {
- if (i == M_DONTWAIT)
- mbstat.m_drops++;
- else
- panic("Out of mbuf clusters");
- }
+ else
+ mbstat.m_drops++;
+
return (m);
}
@@ -1034,7 +1150,7 @@ void
m_print(const struct mbuf *m)
{
int len;
- const struct mbuf *m2;
+ struct mbuf *m2;
len = m->m_pkthdr.len;
m2 = m;
OpenPOWER on IntegriCloud