summaryrefslogtreecommitdiffstats
path: root/sys/kern/uipc_mbuf.c
diff options
context:
space:
mode:
authormsmith <msmith@FreeBSD.org>1999-12-28 06:35:57 +0000
committermsmith <msmith@FreeBSD.org>1999-12-28 06:35:57 +0000
commit04d30dbb4986de518a295e91749baa37767b3075 (patch)
treed30b276147b2c47aefcac20e48f7f3f1acce9d7b /sys/kern/uipc_mbuf.c
parentcbc12a3b7e07b686c758541786b9f0ee44af2769 (diff)
downloadFreeBSD-src-04d30dbb4986de518a295e91749baa37767b3075.zip
FreeBSD-src-04d30dbb4986de518a295e91749baa37767b3075.tar.gz
Actively limit the allocation of mbufs to NMBUFS/nmbufs and mbuf clusters
to NMBCLUSTERS/nmbclusters/kern.ipc.nmbclusters. Add a read-only sysctl kern.ipc.nmbufs matching kern.ipc.nmbclusters. Submitted by: Bosko Milekic <bmilekic@dsuper.net>
Diffstat (limited to 'sys/kern/uipc_mbuf.c')
-rw-r--r--sys/kern/uipc_mbuf.c24
1 files changed, 22 insertions, 2 deletions
diff --git a/sys/kern/uipc_mbuf.c b/sys/kern/uipc_mbuf.c
index 8c3c4bf..cc38c40 100644
--- a/sys/kern/uipc_mbuf.c
+++ b/sys/kern/uipc_mbuf.c
@@ -81,12 +81,14 @@ SYSCTL_INT(_kern_ipc, OID_AUTO, mbuf_wait, CTLFLAG_RW,
&mbuf_wait, 0, "");
SYSCTL_STRUCT(_kern_ipc, KIPC_MBSTAT, mbstat, CTLFLAG_RW, &mbstat, mbstat, "");
SYSCTL_INT(_kern_ipc, KIPC_NMBCLUSTERS, nmbclusters, CTLFLAG_RD,
- &nmbclusters, 0, "Maximum number of mbuf clusters avaliable");
+ &nmbclusters, 0, "Maximum number of mbuf clusters available");
+SYSCTL_INT(_kern_ipc, OID_AUTO, nmbufs, CTLFLAG_RD, &nmbufs, 0,
+ "Maximum number of mbufs available");
#ifndef NMBCLUSTERS
#define NMBCLUSTERS (512 + MAXUSERS * 16)
#endif
TUNABLE_INT_DECL("kern.ipc.nmbclusters", NMBCLUSTERS, nmbclusters);
-TUNABLE_INT_DECL("kern.ipc.nmbufs", NMBCLUSTERS * 4, nmbufs); /* XXX fixup? */
+TUNABLE_INT_DECL("kern.ipc.nmbufs", NMBCLUSTERS * 4, nmbufs);
static void m_reclaim __P((void));
@@ -141,6 +143,14 @@ m_mballoc(nmb, how)
int nbytes;
/*
+ * If we've hit the mbuf limit, stop allocating from mb_map,
+ * (or trying to) in order to avoid dipping into the section of
+ * mb_map which we've "reserved" for clusters.
+ */
+ if ((nmb + mbstat.m_mbufs) > nmbufs)
+ return (0);
+
+ /*
* Once we run out of map space, it will be impossible to get
* any more (nothing is ever freed back to the map)
* -- however you are not dead as m_reclaim might
@@ -267,6 +277,16 @@ m_clalloc(ncl, how)
int npg;
/*
+ * If we've hit the mcluster number limit, stop allocating from
+ * mb_map, (or trying to) in order to avoid dipping into the section
+ * of mb_map which we've "reserved" for mbufs.
+ */
+ if ((ncl + mbstat.m_clusters) > nmbclusters) {
+ mbstat.m_drops++;
+ return (0);
+ }
+
+ /*
* Once we run out of map space, it will be impossible
* to get any more (nothing is ever freed back to the
* map). From this point on, we solely rely on freed
OpenPOWER on IntegriCloud