summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/kern/uipc_mbuf.c10
-rw-r--r--sys/sys/mbuf.h2
2 files changed, 5 insertions, 7 deletions
diff --git a/sys/kern/uipc_mbuf.c b/sys/kern/uipc_mbuf.c
index 1f6069e..5e65572 100644
--- a/sys/kern/uipc_mbuf.c
+++ b/sys/kern/uipc_mbuf.c
@@ -199,7 +199,7 @@ m_extadd(struct mbuf *mb, caddr_t buf, u_int size,
ref_cnt = (u_int *)uma_find_refcnt(zone_clust,
mb->m_ext.ext_buf);
else if (type == EXT_EXTREF)
- ref_cnt = mb->m_ext.ref_cnt;
+ ref_cnt = __DEVOLATILE(u_int *, mb->m_ext.ref_cnt);
mb->m_ext.ref_cnt = (ref_cnt == NULL) ?
malloc(sizeof(u_int), M_MBUF, M_NOWAIT) : (u_int *)ref_cnt;
if (mb->m_ext.ref_cnt != NULL) {
@@ -234,12 +234,9 @@ mb_free_ext(struct mbuf *m)
* This is tricky. We need to make sure to decrement the
* refcount in a safe way but to also clean up if we're the
* last reference. This method seems to do it without race.
- * The volatile cast is required to emit the proper load
- * instructions. Otherwise gcc will optimize the read outside
- * of the while loop.
*/
while (dofree == 0) {
- cnt = *(volatile u_int *)(m->m_ext.ref_cnt);
+ cnt = *(m->m_ext.ref_cnt);
if (atomic_cmpset_int(m->m_ext.ref_cnt, cnt, cnt - 1)) {
if (cnt == 1)
dofree = 1;
@@ -262,7 +259,8 @@ mb_free_ext(struct mbuf *m)
m->m_ext.ext_args);
if (m->m_ext.ext_type != EXT_EXTREF) {
if (m->m_ext.ref_cnt != NULL)
- free(m->m_ext.ref_cnt, M_MBUF);
+ free(__DEVOLATILE(u_int *,
+ m->m_ext.ref_cnt), M_MBUF);
m->m_ext.ref_cnt = NULL;
}
m->m_ext.ext_buf = NULL;
diff --git a/sys/sys/mbuf.h b/sys/sys/mbuf.h
index 1621fa7..0703916 100644
--- a/sys/sys/mbuf.h
+++ b/sys/sys/mbuf.h
@@ -122,7 +122,7 @@ struct m_ext {
(void *, void *);
void *ext_args; /* optional argument pointer */
u_int ext_size; /* size of buffer, for ext_free */
- u_int *ref_cnt; /* pointer to ref count info */
+ volatile u_int *ref_cnt; /* pointer to ref count info */
int ext_type; /* type of external storage */
};
OpenPOWER on IntegriCloud