summaryrefslogtreecommitdiffstats
path: root/sys/kern/uipc_mbuf.c
diff options
context:
space:
mode:
authorbmilekic <bmilekic@FreeBSD.org>2001-03-17 23:23:24 +0000
committerbmilekic <bmilekic@FreeBSD.org>2001-03-17 23:23:24 +0000
commitd2fde0df5de4b649915862faeff13a10ba49cbef (patch)
treebc990b88bc88e5501502eb63c77dbba65177ec2e /sys/kern/uipc_mbuf.c
parentbc374be5ebfdc725a8c6a49af3fad14df5dc1d2b (diff)
downloadFreeBSD-src-d2fde0df5de4b649915862faeff13a10ba49cbef.zip
FreeBSD-src-d2fde0df5de4b649915862faeff13a10ba49cbef.tar.gz
Fix a couple of things in the internal mbuf allocation interface:
- Make sure that m_mballoc() really doesn't allow over nmbufs mbufs to be allocated from mb_map. In the case where nmbufs-reserved space is not an exact multiple of PAGE_SIZE (which it should be, but anyway...), we hold nmbufs as an absolute maximum which need not ever be reached. - Clean up m_clalloc(); make it more consistent in the sense that the first argument `ncl' really means "the number of clusters ensured to be allocated" and not "the number of pages worth of clusters to be allocated," as was previously the case. This also makes it consistent with m_mballoc() as well as the comment that preceeds it. Reviewed by: jlemon
Diffstat (limited to 'sys/kern/uipc_mbuf.c')
-rw-r--r--sys/kern/uipc_mbuf.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/sys/kern/uipc_mbuf.c b/sys/kern/uipc_mbuf.c
index f825c4d..b5569cd 100644
--- a/sys/kern/uipc_mbuf.c
+++ b/sys/kern/uipc_mbuf.c
@@ -237,6 +237,9 @@ m_mballoc(int nmb, int how)
int i;
int nbytes;
+ nbytes = round_page(nmb * MSIZE);
+ nmb = nbytes / MSIZE;
+
/*
* If we've hit the mbuf limit, stop allocating from mb_map.
* Also, once we run out of map space, it will be impossible to
@@ -253,8 +256,6 @@ m_mballoc(int nmb, int how)
return (0);
}
- nbytes = round_page(nmb * MSIZE);
-
mtx_unlock(&mmbfree.m_mtx);
p = (caddr_t)kmem_malloc(mb_map, nbytes, M_NOWAIT);
if (p == NULL && how == M_TRYWAIT) {
@@ -270,8 +271,6 @@ m_mballoc(int nmb, int how)
if (p == NULL)
return (0);
- nmb = nbytes / MSIZE;
-
/*
* We don't let go of the mutex in order to avoid a race.
* It is up to the caller to let go of the mutex when done
@@ -361,7 +360,10 @@ m_clalloc(int ncl, int how)
{
caddr_t p;
int i;
- int npg;
+ int npg_sz;
+
+ npg_sz = round_page(ncl * MCLBYTES);
+ ncl = npg_sz / MCLBYTES;
/*
* If the map is now full (nothing will ever be freed to it).
@@ -373,11 +375,9 @@ m_clalloc(int ncl, int how)
return (0);
}
- npg = ncl;
mtx_unlock(&mclfree.m_mtx);
- p = (caddr_t)kmem_malloc(mb_map, ctob(npg),
+ p = (caddr_t)kmem_malloc(mb_map, npg_sz,
how == M_TRYWAIT ? M_WAITOK : M_NOWAIT);
- ncl = ncl * PAGE_SIZE / MCLBYTES;
mtx_lock(&mclfree.m_mtx);
/*
OpenPOWER on IntegriCloud