summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authordwhite <dwhite@FreeBSD.org>2005-03-03 02:41:37 +0000
committerdwhite <dwhite@FreeBSD.org>2005-03-03 02:41:37 +0000
commitac2dd8b794f18323edf3ffedbc2801a1c96ec278 (patch)
tree7d32629c23e471639588b4aaeb9f116a6e1e6b22
parent3ec33a490c4e69e6bb5eb620245695e740311bb9 (diff)
downloadFreeBSD-src-ac2dd8b794f18323edf3ffedbc2801a1c96ec278.zip
FreeBSD-src-ac2dd8b794f18323edf3ffedbc2801a1c96ec278.tar.gz
Insert volatile cast to discourage gcc from optimizing the read outside
of the while loop. Suggested by: alc MFC after: 1 day
-rw-r--r--sys/kern/uipc_mbuf.c5
1 files changed, 4 insertions, 1 deletions
diff --git a/sys/kern/uipc_mbuf.c b/sys/kern/uipc_mbuf.c
index 4d832a8..1f6069e 100644
--- a/sys/kern/uipc_mbuf.c
+++ b/sys/kern/uipc_mbuf.c
@@ -234,9 +234,12 @@ mb_free_ext(struct mbuf *m)
* This is tricky. We need to make sure to decrement the
* refcount in a safe way but to also clean up if we're the
* last reference. This method seems to do it without race.
+ * The volatile cast is required to emit the proper load
+ * instructions. Otherwise gcc will optimize the read outside
+ * of the while loop.
*/
while (dofree == 0) {
- cnt = *(m->m_ext.ref_cnt);
+ cnt = *(volatile u_int *)(m->m_ext.ref_cnt);
if (atomic_cmpset_int(m->m_ext.ref_cnt, cnt, cnt - 1)) {
if (cnt == 1)
dofree = 1;
OpenPOWER on IntegriCloud