diff options
Diffstat (limited to 'lib/tsan/rtl/tsan_mutex.cc')
-rw-r--r-- | lib/tsan/rtl/tsan_mutex.cc | 259 |
1 files changed, 259 insertions, 0 deletions
diff --git a/lib/tsan/rtl/tsan_mutex.cc b/lib/tsan/rtl/tsan_mutex.cc new file mode 100644 index 0000000..1a70f8f --- /dev/null +++ b/lib/tsan/rtl/tsan_mutex.cc @@ -0,0 +1,259 @@ +//===-- tsan_mutex.cc -----------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +//===----------------------------------------------------------------------===// +#include "sanitizer_common/sanitizer_libc.h" +#include "tsan_mutex.h" +#include "tsan_platform.h" +#include "tsan_rtl.h" + +namespace __tsan { + +// Simple reader-writer spin-mutex. Optimized for not-so-contended case. +// Readers have preference, can possibly starvate writers. + +// The table fixes what mutexes can be locked under what mutexes. +// E.g. if the row for MutexTypeThreads contains MutexTypeReport, +// then Report mutex can be locked while under Threads mutex. +// The leaf mutexes can be locked under any other mutexes. +// Recursive locking is not supported. +const MutexType MutexTypeLeaf = (MutexType)-1; +static MutexType CanLockTab[MutexTypeCount][MutexTypeCount] = { + /*0 MutexTypeInvalid*/ {}, + /*1 MutexTypeTrace*/ {MutexTypeLeaf}, + /*2 MutexTypeThreads*/ {MutexTypeReport}, + /*3 MutexTypeReport*/ {}, + /*4 MutexTypeSyncVar*/ {}, + /*5 MutexTypeSyncTab*/ {MutexTypeSyncVar}, + /*6 MutexTypeSlab*/ {MutexTypeLeaf}, + /*7 MutexTypeAnnotations*/ {}, + /*8 MutexTypeAtExit*/ {MutexTypeSyncTab}, +}; + +static bool CanLockAdj[MutexTypeCount][MutexTypeCount]; + +void InitializeMutex() { + // Build the "can lock" adjacency matrix. + // If [i][j]==true, then one can lock mutex j while under mutex i. + const int N = MutexTypeCount; + int cnt[N] = {}; + bool leaf[N] = {}; + for (int i = 1; i < N; i++) { + for (int j = 0; j < N; j++) { + int z = CanLockTab[i][j]; + if (z == MutexTypeInvalid) + continue; + if (z == MutexTypeLeaf) { + CHECK(!leaf[i]); + leaf[i] = true; + continue; + } + CHECK(!CanLockAdj[i][z]); + CanLockAdj[i][z] = true; + cnt[i]++; + } + } + for (int i = 0; i < N; i++) { + CHECK(!leaf[i] || cnt[i] == 0); + } + // Add leaf mutexes. + for (int i = 0; i < N; i++) { + if (!leaf[i]) + continue; + for (int j = 0; j < N; j++) { + if (i == j || leaf[j] || j == MutexTypeInvalid) + continue; + CHECK(!CanLockAdj[j][i]); + CanLockAdj[j][i] = true; + } + } + // Build the transitive closure. + bool CanLockAdj2[MutexTypeCount][MutexTypeCount]; + for (int i = 0; i < N; i++) { + for (int j = 0; j < N; j++) { + CanLockAdj2[i][j] = CanLockAdj[i][j]; + } + } + for (int k = 0; k < N; k++) { + for (int i = 0; i < N; i++) { + for (int j = 0; j < N; j++) { + if (CanLockAdj2[i][k] && CanLockAdj2[k][j]) { + CanLockAdj2[i][j] = true; + } + } + } + } +#if 0 + TsanPrintf("Can lock graph:\n"); + for (int i = 0; i < N; i++) { + for (int j = 0; j < N; j++) { + TsanPrintf("%d ", CanLockAdj[i][j]); + } + TsanPrintf("\n"); + } + TsanPrintf("Can lock graph closure:\n"); + for (int i = 0; i < N; i++) { + for (int j = 0; j < N; j++) { + TsanPrintf("%d ", CanLockAdj2[i][j]); + } + TsanPrintf("\n"); + } +#endif + // Verify that the graph is acyclic. + for (int i = 0; i < N; i++) { + if (CanLockAdj2[i][i]) { + TsanPrintf("Mutex %d participates in a cycle\n", i); + Die(); + } + } +} + +DeadlockDetector::DeadlockDetector() { + // Rely on zero initialization because some mutexes can be locked before ctor. +} + +void DeadlockDetector::Lock(MutexType t) { + // TsanPrintf("LOCK %d @%zu\n", t, seq_ + 1); + u64 max_seq = 0; + u64 max_idx = MutexTypeInvalid; + for (int i = 0; i != MutexTypeCount; i++) { + if (locked_[i] == 0) + continue; + CHECK_NE(locked_[i], max_seq); + if (max_seq < locked_[i]) { + max_seq = locked_[i]; + max_idx = i; + } + } + locked_[t] = ++seq_; + if (max_idx == MutexTypeInvalid) + return; + // TsanPrintf(" last %d @%zu\n", max_idx, max_seq); + if (!CanLockAdj[max_idx][t]) { + TsanPrintf("ThreadSanitizer: internal deadlock detected\n"); + TsanPrintf("ThreadSanitizer: can't lock %d while under %zu\n", + t, (uptr)max_idx); + Die(); + } +} + +void DeadlockDetector::Unlock(MutexType t) { + // TsanPrintf("UNLO %d @%zu #%zu\n", t, seq_, locked_[t]); + CHECK(locked_[t]); + locked_[t] = 0; +} + +const uptr kUnlocked = 0; +const uptr kWriteLock = 1; +const uptr kReadLock = 2; + +class Backoff { + public: + Backoff() + : iter_() { + } + + bool Do() { + if (iter_++ < kActiveSpinIters) + proc_yield(kActiveSpinCnt); + else + internal_sched_yield(); + return true; + } + + u64 Contention() const { + u64 active = iter_ % kActiveSpinIters; + u64 passive = iter_ - active; + return active + 10 * passive; + } + + private: + int iter_; + static const int kActiveSpinIters = 10; + static const int kActiveSpinCnt = 20; +}; + +Mutex::Mutex(MutexType type, StatType stat_type) { + CHECK_GT(type, MutexTypeInvalid); + CHECK_LT(type, MutexTypeCount); +#if TSAN_DEBUG + type_ = type; +#endif +#if TSAN_COLLECT_STATS + stat_type_ = stat_type; +#endif + atomic_store(&state_, kUnlocked, memory_order_relaxed); +} + +Mutex::~Mutex() { + CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked); +} + +void Mutex::Lock() { +#if TSAN_DEBUG && !TSAN_GO + cur_thread()->deadlock_detector.Lock(type_); +#endif + uptr cmp = kUnlocked; + if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock, + memory_order_acquire)) + return; + for (Backoff backoff; backoff.Do();) { + if (atomic_load(&state_, memory_order_relaxed) == kUnlocked) { + cmp = kUnlocked; + if (atomic_compare_exchange_weak(&state_, &cmp, kWriteLock, + memory_order_acquire)) { +#if TSAN_COLLECT_STATS + StatInc(cur_thread(), stat_type_, backoff.Contention()); +#endif + return; + } + } + } +} + +void Mutex::Unlock() { + uptr prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release); + (void)prev; + DCHECK_NE(prev & kWriteLock, 0); +#if TSAN_DEBUG && !TSAN_GO + cur_thread()->deadlock_detector.Unlock(type_); +#endif +} + +void Mutex::ReadLock() { +#if TSAN_DEBUG && !TSAN_GO + cur_thread()->deadlock_detector.Lock(type_); +#endif + uptr prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire); + if ((prev & kWriteLock) == 0) + return; + for (Backoff backoff; backoff.Do();) { + prev = atomic_load(&state_, memory_order_acquire); + if ((prev & kWriteLock) == 0) { +#if TSAN_COLLECT_STATS + StatInc(cur_thread(), stat_type_, backoff.Contention()); +#endif + return; + } + } +} + +void Mutex::ReadUnlock() { + uptr prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release); + (void)prev; + DCHECK_EQ(prev & kWriteLock, 0); + DCHECK_GT(prev & ~kWriteLock, 0); +#if TSAN_DEBUG && !TSAN_GO + cur_thread()->deadlock_detector.Unlock(type_); +#endif +} + +} // namespace __tsan |