summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorbmilekic <bmilekic@FreeBSD.org>2001-01-16 01:53:13 +0000
committerbmilekic <bmilekic@FreeBSD.org>2001-01-16 01:53:13 +0000
commit3650624f860c7421853deb5ba07cd75ea8b4128a (patch)
tree353164b25a0588e4b31024b05c340c8b9dd86fc2
parent20a8a23d2b99a3392aeb543a6d96167366f82e2a (diff)
downloadFreeBSD-src-3650624f860c7421853deb5ba07cd75ea8b4128a.zip
FreeBSD-src-3650624f860c7421853deb5ba07cd75ea8b4128a.tar.gz
Add some KASSERTs valid if WITNESS is defined to verify that the mbuf
allocation routines are being called safely. Since we drop our relevant mbuf mutex and acquire Giant before we call kmem_malloc(), we have to make sure that this does not pave the way for a fatal lock order reversal. Check that either Giant is already held (in which case it's safe to grab it again and recurse on it) or, if Giant is not held, that no other locks are held before we try to acquire Giant. Similarily, add a KASSERT valid in the WITNESS case in m_reclaim() to nail callers who end up in m_reclaim() and hold a lock. Pointed out by: jhb
-rw-r--r--sys/kern/uipc_mbuf.c43
1 files changed, 39 insertions, 4 deletions
diff --git a/sys/kern/uipc_mbuf.c b/sys/kern/uipc_mbuf.c
index 4afacb2..9c24370 100644
--- a/sys/kern/uipc_mbuf.c
+++ b/sys/kern/uipc_mbuf.c
@@ -205,12 +205,22 @@ m_alloc_ref(nmb, how)
nbytes = round_page(nmb * sizeof(union mext_refcnt));
mtx_exit(&mcntfree.m_mtx, MTX_DEF);
+#ifdef WITNESS
+ /*
+ * XXX: Make sure we don't create lock order problems.
+ * XXX: We'll grab Giant, but for that to be OK, make sure
+ * XXX: that either Giant is already held OR make sure that
+ * XXX: no other locks are held coming in.
+ * XXX: Revisit once most of the net stuff gets locks added.
+ */
+ KASSERT(mtx_owned(&Giant) || witness_list(CURPROC) == 0,
+ ("m_alloc_ref: Giant must be owned or no locks held"));
+#endif
mtx_enter(&Giant, MTX_DEF);
if ((p = (caddr_t)kmem_malloc(mb_map, nbytes, how == M_TRYWAIT ?
M_WAITOK : M_NOWAIT)) == NULL) {
mtx_exit(&Giant, MTX_DEF);
- mtx_enter(&mcntfree.m_mtx, MTX_DEF); /* XXX: We must be holding
- it going out. */
+ mtx_enter(&mcntfree.m_mtx, MTX_DEF);
return (0);
}
mtx_exit(&Giant, MTX_DEF);
@@ -264,9 +274,18 @@ m_mballoc(nmb, how)
nbytes = round_page(nmb * MSIZE);
- /* XXX: The letting go of the mmbfree lock here may eventually
- be moved to only be done for M_TRYWAIT calls to kmem_malloc() */
mtx_exit(&mmbfree.m_mtx, MTX_DEF);
+#ifdef WITNESS
+ /*
+ * XXX: Make sure we don't create lock order problems.
+ * XXX: We'll grab Giant, but for that to be OK, make sure
+ * XXX: that either Giant is already held OR make sure that
+ * XXX: no other locks are held coming in.
+ * XXX: Revisit once most of the net stuff gets locks added.
+ */
+ KASSERT(mtx_owned(&Giant) || witness_list(CURPROC) == 0,
+ ("m_mballoc: Giant must be owned or no locks held"));
+#endif
mtx_enter(&Giant, MTX_DEF);
p = (caddr_t)kmem_malloc(mb_map, nbytes, M_NOWAIT);
if (p == 0 && how == M_TRYWAIT) {
@@ -390,6 +409,17 @@ m_clalloc(ncl, how)
npg = ncl;
mtx_exit(&mclfree.m_mtx, MTX_DEF);
+#ifdef WITNESS
+ /*
+ * XXX: Make sure we don't create lock order problems.
+ * XXX: We'll grab Giant, but for that to be OK, make sure
+ * XXX: that either Giant is already held OR make sure that
+ * XXX: no other locks are held coming in.
+ * XXX: Revisit once most of the net stuff gets locks added.
+ */
+ KASSERT(mtx_owned(&Giant) || witness_list(CURPROC) == 0,
+ ("m_clalloc: Giant must be owned or no locks held"));
+#endif
mtx_enter(&Giant, MTX_DEF);
p = (caddr_t)kmem_malloc(mb_map, ctob(npg),
how == M_TRYWAIT ? M_WAITOK : M_NOWAIT);
@@ -467,6 +497,11 @@ m_reclaim()
register struct domain *dp;
register struct protosw *pr;
+#ifdef WITNESS
+ KASSERT(witness_list(CURPROC) == 0,
+ ("m_reclaim called with locks held"));
+#endif
+
for (dp = domains; dp; dp = dp->dom_next)
for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++)
if (pr->pr_drain)
OpenPOWER on IntegriCloud