summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorbmilekic <bmilekic@FreeBSD.org>2001-03-24 23:47:52 +0000
committerbmilekic <bmilekic@FreeBSD.org>2001-03-24 23:47:52 +0000
commitab0c3d9c1a6d8d4b0ff2011b208ca24b5e45e83d (patch)
tree1e8309d56e2a292ab526ff5e1f2bd5ca6e862bed
parent11442775804b9679c2c761846072b171d2507840 (diff)
downloadFreeBSD-src-ab0c3d9c1a6d8d4b0ff2011b208ca24b5e45e83d.zip
FreeBSD-src-ab0c3d9c1a6d8d4b0ff2011b208ca24b5e45e83d.tar.gz
Move the atomic() mbstat.m_drops incrementing to the MGET(HDR) and
MCLGET macros in order to avoid incrementing the drop count twice. Otherwise, in some cases, we may increment m_drops once in m_mballoc() for example, and increment it again in m_mballoc_wait() if the wait fails.
-rw-r--r--sys/kern/uipc_mbuf.c24
-rw-r--r--sys/sys/mbuf.h11
2 files changed, 13 insertions, 22 deletions
diff --git a/sys/kern/uipc_mbuf.c b/sys/kern/uipc_mbuf.c
index b5569cd..5a23f67 100644
--- a/sys/kern/uipc_mbuf.c
+++ b/sys/kern/uipc_mbuf.c
@@ -245,16 +245,8 @@ m_mballoc(int nmb, int how)
* Also, once we run out of map space, it will be impossible to
* get any more (nothing is ever freed back to the map).
*/
- if (mb_map_full || ((nmb + mbstat.m_mbufs) > nmbufs)) {
- /*
- * Needs to be atomic as we may be incrementing it
- * while holding another mutex, like mclfree. In other
- * words, m_drops is not reserved solely for mbufs,
- * but is also available for clusters.
- */
- atomic_add_long(&mbstat.m_drops, 1);
+ if (mb_map_full || ((nmb + mbstat.m_mbufs) > nmbufs))
return (0);
- }
mtx_unlock(&mmbfree.m_mtx);
p = (caddr_t)kmem_malloc(mb_map, nbytes, M_NOWAIT);
@@ -343,8 +335,7 @@ m_mballoc_wait(void)
atomic_add_long(&mbstat.m_wait, 1);
if (mmbfree.m_head != NULL)
MBWAKEUP(m_mballoc_wid);
- } else
- atomic_add_long(&mbstat.m_drops, 1);
+ }
return (p);
}
@@ -370,10 +361,8 @@ m_clalloc(int ncl, int how)
* If we've hit the mcluster number limit, stop allocating from
* mb_map.
*/
- if (mb_map_full || ((ncl + mbstat.m_clusters) > nmbclusters)) {
- atomic_add_long(&mbstat.m_drops, 1);
+ if (mb_map_full || ((ncl + mbstat.m_clusters) > nmbclusters))
return (0);
- }
mtx_unlock(&mclfree.m_mtx);
p = (caddr_t)kmem_malloc(mb_map, npg_sz,
@@ -384,10 +373,8 @@ m_clalloc(int ncl, int how)
* Either the map is now full, or `how' is M_DONTWAIT and there
* are no pages left.
*/
- if (p == NULL) {
- atomic_add_long(&mbstat.m_drops, 1);
+ if (p == NULL)
return (0);
- }
/*
* We don't let go of the mutex in order to avoid a race.
@@ -429,8 +416,7 @@ m_clalloc_wait(void)
atomic_add_long(&mbstat.m_wait, 1);
if (mclfree.m_head != NULL)
MBWAKEUP(m_clalloc_wid);
- } else
- atomic_add_long(&mbstat.m_drops, 1);
+ }
return (p);
}
diff --git a/sys/sys/mbuf.h b/sys/sys/mbuf.h
index 8fbf7b4..2471f7c 100644
--- a/sys/sys/mbuf.h
+++ b/sys/sys/mbuf.h
@@ -377,8 +377,10 @@ struct mcntfree_lst {
mbtypes[_mtype]++; \
mtx_unlock(&mmbfree.m_mtx); \
_MGET_SETUP(_mm, _mtype); \
- } else \
+ } else { \
mtx_unlock(&mmbfree.m_mtx); \
+ atomic_add_long(&mbstat.m_drops, 1); \
+ } \
(m) = _mm; \
} while (0)
@@ -404,8 +406,10 @@ struct mcntfree_lst {
mbtypes[_mtype]++; \
mtx_unlock(&mmbfree.m_mtx); \
_MGETHDR_SETUP(_mm, _mtype); \
- } else \
+ } else { \
mtx_unlock(&mmbfree.m_mtx); \
+ atomic_add_long(&mbstat.m_drops, 1); \
+ } \
(m) = _mm; \
} while (0)
@@ -453,7 +457,8 @@ struct mcntfree_lst {
_mm->m_ext.ext_size = MCLBYTES; \
_mm->m_ext.ext_type = EXT_CLUSTER; \
} \
- } \
+ } else \
+ atomic_add_long(&mbstat.m_drops, 1); \
} while (0)
#define MEXTADD(m, buf, size, free, args, flags, type) do { \
OpenPOWER on IntegriCloud