summaryrefslogtreecommitdiffstats
path: root/sys/kern/subr_mbuf.c
diff options
context:
space:
mode:
authorbmilekic <bmilekic@FreeBSD.org>2002-07-24 15:11:23 +0000
committerbmilekic <bmilekic@FreeBSD.org>2002-07-24 15:11:23 +0000
commitffebbf61bcbfe9454ef61afb4985012a73ad52e0 (patch)
treed95cc09f997768fe9fb30935549a17ce07c84879 /sys/kern/subr_mbuf.c
parent2eda4f26f636e106d6bb1c4381d8d45e76755fa7 (diff)
downloadFreeBSD-src-ffebbf61bcbfe9454ef61afb4985012a73ad52e0.zip
FreeBSD-src-ffebbf61bcbfe9454ef61afb4985012a73ad52e0.tar.gz
Move m_freem() from uipc_mbuf.c to subr_mbuf.c so it can take advantage
of the inlines, like its cousin, m_free(). Also, make a small (first step?) optimisation of m_free() to use the MBP_PERSIST{,ENT} interface to hold the lock across frees when possible. The thing is that right now, we can only do this easily for at most across one mbuf + one cluster free, as the comment mentions (it also explains why). Anyway, some basic tests revealed a 5-10% overall improvement. Some of the results can be found here: http://people.freebsd.org/~bmilekic/code/measure.txt
Diffstat (limited to 'sys/kern/subr_mbuf.c')
-rw-r--r--sys/kern/subr_mbuf.c48
1 files changed, 48 insertions, 0 deletions
diff --git a/sys/kern/subr_mbuf.c b/sys/kern/subr_mbuf.c
index 7b2ed05..34ecb81 100644
--- a/sys/kern/subr_mbuf.c
+++ b/sys/kern/subr_mbuf.c
@@ -1352,6 +1352,54 @@ m_free(struct mbuf *mb)
}
/*
+ * Free an entire chain of mbufs and associated external buffers, if
+ * applicable. Right now, we only optimize a little so that the cache
+ * lock may be held across a single mbuf+cluster free. Hopefully,
+ * we'll eventually be holding the lock across more than merely two
+ * consecutive frees but right now this is hard to implement because of
+ * things like _mext_dealloc_ref (may do a free()) and atomic ops in the
+ * loop, as well as the fact that we may recurse on m_freem() in
+ * m_pkthdr.aux != NULL cases.
+ *
+ * - mb: the mbuf chain to free.
+ */
+void
+m_freem(struct mbuf *mb)
+{
+ struct mbuf *m;
+ int cchnum;
+ short persist;
+
+ while (mb != NULL) {
+ /* XXX: This check is bogus... please fix (see KAME). */
+ if ((mb->m_flags & M_PKTHDR) != 0 && mb->m_pkthdr.aux) {
+ m_freem(mb->m_pkthdr.aux);
+ mb->m_pkthdr.aux = NULL;
+ }
+ persist = 0;
+ m = mb;
+ mb = mb->m_next;
+ if ((m->m_flags & M_EXT) != 0) {
+ MEXT_REM_REF(m);
+ if (atomic_cmpset_int(m->m_ext.ref_cnt, 0, 1)) {
+ _mext_dealloc_ref(m);
+ if (m->m_ext.ext_type == EXT_CLUSTER) {
+ mb_free(&mb_list_clust,
+ (caddr_t)m->m_ext.ext_buf,
+ MT_NOTMBUF, MBP_PERSIST, &cchnum);
+ persist = MBP_PERSISTENT;
+ } else {
+ (*(m->m_ext.ext_free))(m->m_ext.ext_buf,
+ m->m_ext.ext_args);
+ persist = 0;
+ }
+ }
+ }
+ mb_free(&mb_list_mbuf, m, m->m_type, persist, &cchnum);
+ }
+}
+
+/*
* Fetch an mbuf with a cluster attached to it. If one of the
* allocations fails, the entire allocation fails. This routine is
* the preferred way of fetching both the mbuf and cluster together,
OpenPOWER on IntegriCloud