summaryrefslogtreecommitdiffstats
path: root/contrib/libcxxrt/guard.cc
diff options
context:
space:
mode:
authordim <dim@FreeBSD.org>2015-02-01 02:29:58 +0000
committerdim <dim@FreeBSD.org>2015-02-01 02:29:58 +0000
commit2ccbfd1b8e87d9e16738583f1cb19868ea5ad479 (patch)
treea0f122f4b76eb51437a7dd525aaf7fc88d4f887f /contrib/libcxxrt/guard.cc
parent111a0cf771f31b0fdae5d6e0e7cbd3277e702172 (diff)
downloadFreeBSD-src-2ccbfd1b8e87d9e16738583f1cb19868ea5ad479.zip
FreeBSD-src-2ccbfd1b8e87d9e16738583f1cb19868ea5ad479.tar.gz
Revert r278017 (MFC of r273381 and r276417) and r278018 (MFC of r273434)
until I can figure out why it leads to link failures in some scenarios.
Diffstat (limited to 'contrib/libcxxrt/guard.cc')
-rw-r--r--contrib/libcxxrt/guard.cc105
1 files changed, 32 insertions, 73 deletions
diff --git a/contrib/libcxxrt/guard.cc b/contrib/libcxxrt/guard.cc
index f1c4c69..f0c26ab 100644
--- a/contrib/libcxxrt/guard.cc
+++ b/contrib/libcxxrt/guard.cc
@@ -70,45 +70,18 @@
#ifdef __arm__
// ARM ABI - 32-bit guards.
typedef uint32_t guard_t;
-typedef uint32_t guard_lock_t;
-static const uint32_t LOCKED = static_cast<guard_t>(1) << 31;
+static const uint32_t LOCKED = ((guard_t)1) << 31;
static const uint32_t INITIALISED = 1;
-#define LOCK_PART(guard) (guard)
-#define INIT_PART(guard) (guard)
-#elif defined(_LP64)
+#else
typedef uint64_t guard_t;
-typedef uint64_t guard_lock_t;
# if defined(__LITTLE_ENDIAN__)
-static const guard_t LOCKED = static_cast<guard_t>(1) << 63;
+static const guard_t LOCKED = ((guard_t)1) << 63;
static const guard_t INITIALISED = 1;
# else
static const guard_t LOCKED = 1;
-static const guard_t INITIALISED = static_cast<guard_t>(1) << 56;
-# endif
-#define LOCK_PART(guard) (guard)
-#define INIT_PART(guard) (guard)
-#else
-typedef uint32_t guard_lock_t;
-# if defined(__LITTLE_ENDIAN__)
-typedef struct {
- uint32_t init_half;
- uint32_t lock_half;
-} guard_t;
-static const uint32_t LOCKED = static_cast<guard_lock_t>(1) << 31;
-static const uint32_t INITIALISED = 1;
-# else
-typedef struct {
- uint32_t init_half;
- uint32_t lock_half;
-} guard_t;
-_Static_assert(sizeof(guard_t) == sizeof(uint64_t), "");
-static const uint32_t LOCKED = 1;
-static const uint32_t INITIALISED = static_cast<guard_lock_t>(1) << 24;
+static const guard_t INITIALISED = ((guard_t)1) << 56;
# endif
-#define LOCK_PART(guard) (&(guard)->lock_half)
-#define INIT_PART(guard) (&(guard)->init_half)
#endif
-static const guard_lock_t INITIAL = 0;
/**
* Acquires a lock on a guard, returning 0 if the object has already been
@@ -117,49 +90,42 @@ static const guard_lock_t INITIAL = 0;
*/
extern "C" int __cxa_guard_acquire(volatile guard_t *guard_object)
{
- guard_lock_t old;
// Not an atomic read, doesn't establish a happens-before relationship, but
// if one is already established and we end up seeing an initialised state
// then it's a fast path, otherwise we'll do something more expensive than
// this test anyway...
- if (INITIALISED == *INIT_PART(guard_object))
- return 0;
+ if ((INITIALISED == *guard_object)) { return 0; }
// Spin trying to do the initialisation
- for (;;)
+ while (1)
{
// Loop trying to move the value of the guard from 0 (not
// locked, not initialised) to the locked-uninitialised
// position.
- old = __sync_val_compare_and_swap(LOCK_PART(guard_object),
- INITIAL, LOCKED);
- if (old == INITIAL) {
- // Lock obtained. If lock and init bit are
- // in separate words, check for init race.
- if (INIT_PART(guard_object) == LOCK_PART(guard_object))
+ switch (__sync_val_compare_and_swap(guard_object, 0, LOCKED))
+ {
+ // If the old value was 0, we succeeded, so continue
+ // initialising
+ case 0:
return 1;
- if (INITIALISED != *INIT_PART(guard_object))
- return 1;
-
- // No need for a memory barrier here,
- // see first comment.
- *LOCK_PART(guard_object) = INITIAL;
- return 0;
+ // If this was already initialised, return and let the caller skip
+ // initialising it again.
+ case INITIALISED:
+ return 0;
+ // If it is locked by another thread, relinquish the CPU and try
+ // again later.
+ case LOCKED:
+ case LOCKED | INITIALISED:
+ sched_yield();
+ break;
+ // If it is some other value, then something has gone badly wrong.
+ // Give up.
+ default:
+ fprintf(stderr, "Invalid state detected attempting to lock static initialiser.\n");
+ abort();
}
- // If lock and init bit are in the same word, check again
- // if we are done.
- if (INIT_PART(guard_object) == LOCK_PART(guard_object) &&
- old == INITIALISED)
- return 0;
-
- assert(old == LOCKED);
- // Another thread holds the lock.
- // If lock and init bit are in different words, check
- // if we are done before yielding and looping.
- if (INIT_PART(guard_object) != LOCK_PART(guard_object) &&
- INITIALISED == *INIT_PART(guard_object))
- return 0;
- sched_yield();
}
+ //__builtin_unreachable();
+ return 0;
}
/**
@@ -169,8 +135,7 @@ extern "C" int __cxa_guard_acquire(volatile guard_t *guard_object)
extern "C" void __cxa_guard_abort(volatile guard_t *guard_object)
{
__attribute__((unused))
- bool reset = __sync_bool_compare_and_swap(LOCK_PART(guard_object),
- LOCKED, INITIAL);
+ bool reset = __sync_bool_compare_and_swap(guard_object, LOCKED, 0);
assert(reset);
}
/**
@@ -179,15 +144,9 @@ extern "C" void __cxa_guard_abort(volatile guard_t *guard_object)
*/
extern "C" void __cxa_guard_release(volatile guard_t *guard_object)
{
- guard_lock_t old;
- if (INIT_PART(guard_object) == LOCK_PART(guard_object))
- old = LOCKED;
- else
- old = INITIAL;
__attribute__((unused))
- bool reset = __sync_bool_compare_and_swap(INIT_PART(guard_object),
- old, INITIALISED);
+ bool reset = __sync_bool_compare_and_swap(guard_object, LOCKED, INITIALISED);
assert(reset);
- if (INIT_PART(guard_object) != LOCK_PART(guard_object))
- *LOCK_PART(guard_object) = INITIAL;
}
+
+
OpenPOWER on IntegriCloud