summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorbmilekic <bmilekic@FreeBSD.org>2004-06-10 00:04:27 +0000
committerbmilekic <bmilekic@FreeBSD.org>2004-06-10 00:04:27 +0000
commitbfeac9f9f9688ca640857e10822d01721474591c (patch)
treecf0c4150052fc3fb0da94db193b0300ccbc69e0b /sys
parent6f5439900b28bd5cb607da87c0273309e64a4963 (diff)
downloadFreeBSD-src-bfeac9f9f9688ca640857e10822d01721474591c.zip
FreeBSD-src-bfeac9f9f9688ca640857e10822d01721474591c.tar.gz
Plug a race where upon free this scenario could occur:
(time grows downward) thread 1 thread 2 ------------|------------ dec ref_cnt | | dec ref_cnt <-- ref_cnt now zero cmpset | free all | return | | alloc again,| reuse prev | ref_cnt | | cmpset, read | already freed | ref_cnt ------------|------------ This should fix that by performing only a single atomic test-and-set that will serve to decrement the ref_cnt, only if it hasn't changed since the earlier read, otherwise it'll loop and re-read. This forces ordering of decrements so that truly the thread which did the LAST decrement is the one that frees. This is how atomic-instruction-based refcnting should probably be handled. Submitted by: Julian Elischer
Diffstat (limited to 'sys')
-rw-r--r--sys/kern/uipc_mbuf.c45
1 files changed, 30 insertions, 15 deletions
diff --git a/sys/kern/uipc_mbuf.c b/sys/kern/uipc_mbuf.c
index 9fb5c4a..2c8af1f 100644
--- a/sys/kern/uipc_mbuf.c
+++ b/sys/kern/uipc_mbuf.c
@@ -221,23 +221,38 @@ m_extadd(struct mbuf *mb, caddr_t buf, u_int size,
void
mb_free_ext(struct mbuf *m)
{
+ u_int cnt;
- MEXT_REM_REF(m);
- if (atomic_cmpset_int(m->m_ext.ref_cnt, 0, 1)) {
- if (m->m_ext.ext_type == EXT_PACKET) {
- uma_zfree(zone_pack, m);
- return;
- } else if (m->m_ext.ext_type == EXT_CLUSTER) {
- uma_zfree(zone_clust, m->m_ext.ext_buf);
- m->m_ext.ext_buf = NULL;
- } else {
- (*(m->m_ext.ext_free))(m->m_ext.ext_buf,
- m->m_ext.ext_args);
- if (m->m_ext.ext_type != EXT_EXTREF)
- free(m->m_ext.ref_cnt, M_MBUF);
+ /*
+ * This is tricky. We need to make sure to decrement the
+ * refcount in a safe way but to also clean up if we're the
+ * last reference. This method seems to do it without race.
+ */
+ do {
+ cnt = *(m->m_ext.ref_cnt);
+ if (atomic_cmpset_int(m->m_ext.ref_cnt, cnt, cnt - 1)) {
+ if (cnt == 1) {
+ /*
+ * Do the free, should be safe.
+ */
+ if (m->m_ext.ext_type == EXT_PACKET) {
+ uma_zfree(zone_pack, m);
+ break;
+ } else if (m->m_ext.ext_type == EXT_CLUSTER) {
+ uma_zfree(zone_clust, m->m_ext.ext_buf);
+ m->m_ext.ext_buf = NULL;
+ } else {
+ (*(m->m_ext.ext_free))(m->m_ext.ext_buf,
+ m->m_ext.ext_args);
+ if (m->m_ext.ext_type != EXT_EXTREF)
+ free(m->m_ext.ref_cnt, M_MBUF);
+ }
+ uma_zfree(zone_mbuf, m);
+ }
+ /* Decrement (and potentially free) done, safely. */
+ break;
}
- }
- uma_zfree(zone_mbuf, m);
+ } while (1);
}
/*
OpenPOWER on IntegriCloud