summaryrefslogtreecommitdiffstats
path: root/contrib/libc++/src/memory.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/libc++/src/memory.cpp')
-rw-r--r--contrib/libc++/src/memory.cpp51
1 files changed, 39 insertions, 12 deletions
diff --git a/contrib/libc++/src/memory.cpp b/contrib/libc++/src/memory.cpp
index 15e947f..8569faf 100644
--- a/contrib/libc++/src/memory.cpp
+++ b/contrib/libc++/src/memory.cpp
@@ -96,7 +96,35 @@ __shared_weak_count::__release_shared() _NOEXCEPT
void
__shared_weak_count::__release_weak() _NOEXCEPT
{
- if (decrement(__shared_weak_owners_) == -1)
+ // NOTE: The acquire load here is an optimization of the very
+ // common case where a shared pointer is being destructed while
+ // having no other contended references.
+ //
+ // BENEFIT: We avoid expensive atomic stores like XADD and STREX
+ // in a common case. Those instructions are slow and do nasty
+ // things to caches.
+ //
+ // IS THIS SAFE? Yes. During weak destruction, if we see that we
+ // are the last reference, we know that no-one else is accessing
+ // us. If someone were accessing us, then they would be doing so
+ // while the last shared / weak_ptr was being destructed, and
+ // that's undefined anyway.
+ //
+ // If we see anything other than a 0, then we have possible
+ // contention, and need to use an atomicrmw primitive.
+ // The same arguments don't apply for increment, where it is legal
+ // (though inadvisable) to share shared_ptr references between
+ // threads, and have them all get copied at once. The argument
+ // also doesn't apply for __release_shared, because an outstanding
+ // weak_ptr::lock() could read / modify the shared count.
+ if (__libcpp_atomic_load(&__shared_weak_owners_, _AO_Acquire) == 0)
+ {
+ // no need to do this store, because we are about
+ // to destroy everything.
+ //__libcpp_atomic_store(&__shared_weak_owners_, -1, _AO_Release);
+ __on_zero_shared_weak();
+ }
+ else if (decrement(__shared_weak_owners_) == -1)
__on_zero_shared_weak();
}
@@ -126,8 +154,8 @@ __shared_weak_count::__get_deleter(const type_info&) const _NOEXCEPT
#if !defined(_LIBCPP_HAS_NO_ATOMIC_HEADER)
-static const std::size_t __sp_mut_count = 16;
-static __libcpp_mutex_t mut_back_imp[__sp_mut_count] =
+_LIBCPP_SAFE_STATIC static const std::size_t __sp_mut_count = 16;
+_LIBCPP_SAFE_STATIC static __libcpp_mutex_t mut_back[__sp_mut_count] =
{
_LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER,
_LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER,
@@ -135,8 +163,6 @@ static __libcpp_mutex_t mut_back_imp[__sp_mut_count] =
_LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER
};
-static mutex* mut_back = reinterpret_cast<std::mutex*>(mut_back_imp);
-
_LIBCPP_CONSTEXPR __sp_mut::__sp_mut(void* p) _NOEXCEPT
: __lx(p)
{
@@ -145,13 +171,13 @@ _LIBCPP_CONSTEXPR __sp_mut::__sp_mut(void* p) _NOEXCEPT
void
__sp_mut::lock() _NOEXCEPT
{
- mutex& m = *static_cast<mutex*>(__lx);
+ auto m = static_cast<__libcpp_mutex_t*>(__lx);
unsigned count = 0;
- while (!m.try_lock())
+ while (__libcpp_mutex_trylock(m) != 0)
{
if (++count > 16)
{
- m.lock();
+ __libcpp_mutex_lock(m);
break;
}
this_thread::yield();
@@ -161,13 +187,13 @@ __sp_mut::lock() _NOEXCEPT
void
__sp_mut::unlock() _NOEXCEPT
{
- static_cast<mutex*>(__lx)->unlock();
+ __libcpp_mutex_unlock(static_cast<__libcpp_mutex_t*>(__lx));
}
__sp_mut&
__get_sp_mut(const void* p)
{
- static __sp_mut muts[__sp_mut_count]
+ static __sp_mut muts[__sp_mut_count]
{
&mut_back[ 0], &mut_back[ 1], &mut_back[ 2], &mut_back[ 3],
&mut_back[ 4], &mut_back[ 5], &mut_back[ 6], &mut_back[ 7],
@@ -194,11 +220,12 @@ undeclare_no_pointers(char*, size_t)
{
}
-pointer_safety
-get_pointer_safety() _NOEXCEPT
+#if !defined(_LIBCPP_ABI_POINTER_SAFETY_ENUM_TYPE)
+pointer_safety get_pointer_safety() _NOEXCEPT
{
return pointer_safety::relaxed;
}
+#endif
void*
__undeclare_reachable(void* p)
OpenPOWER on IntegriCloud