diff options
Diffstat (limited to 'contrib/compiler-rt/lib/tsan/rtl')
53 files changed, 13636 insertions, 0 deletions
diff --git a/contrib/compiler-rt/lib/tsan/rtl/Makefile.old b/contrib/compiler-rt/lib/tsan/rtl/Makefile.old new file mode 100644 index 0000000..79c761c --- /dev/null +++ b/contrib/compiler-rt/lib/tsan/rtl/Makefile.old @@ -0,0 +1,62 @@ +CXXFLAGS = -std=c++11 -fPIE -g -Wall -Werror -fno-builtin -msse3 -DTSAN_DEBUG=$(DEBUG) -DSANITIZER_DEBUG=$(DEBUG) +CLANG=clang +ifeq ($(DEBUG), 0) + CXXFLAGS += -O3 +endif + +# For interception. FIXME: move interception one level higher. +INTERCEPTION=../../interception +COMMON=../../sanitizer_common +INCLUDES= -I../.. -I../../../include +EXTRA_CXXFLAGS=-fno-exceptions -fno-rtti +NO_SYSROOT=--sysroot=. +CXXFLAGS+=$(EXTRA_CXXFLAGS) +CXXFLAGS+=$(CFLAGS) +ifeq ($(DEBUG), 0) + CXXFLAGS+=-fomit-frame-pointer +ifeq ($(CXX), g++) + CXXFLAGS+=-Wframe-larger-than=512 +endif # CXX=g++ +endif # DEBUG=0 + +ifeq ($(CXX), $(CLANG)++) + # Global constructors are banned. + CXXFLAGS+=-Wglobal-constructors +endif + + + +all: libtsan.a + +LIBTSAN_HEADERS=$(wildcard *.h) \ + $(wildcard $(INTERCEPTION)/*.h) \ + $(wildcard $(COMMON)/*.h) +LIBTSAN_SRC=$(wildcard *.cc) +LIBTSAN_ASM_SRC=$(wildcard *.S) +INTERCEPTION_SRC=$(wildcard $(INTERCEPTION)/*.cc) +COMMON_SRC=$(wildcard $(COMMON)/*.cc) + +LIBTSAN_OBJ=$(patsubst %.cc,%.o,$(LIBTSAN_SRC)) \ + $(patsubst %.S,%.o,$(LIBTSAN_ASM_SRC)) \ + $(patsubst $(INTERCEPTION)/%.cc,%.o,$(INTERCEPTION_SRC)) \ + $(patsubst $(COMMON)/%.cc,%.o,$(COMMON_SRC)) + +%_linux.o: %_linux.cc Makefile.old $(LIBTSAN_HEADERS) + $(CXX) $(CXXFLAGS) $(INCLUDES) -c $< +%.o: %.cc Makefile.old $(LIBTSAN_HEADERS) + $(CXX) $(CXXFLAGS) $(INCLUDES) $(NO_SYSROOT) -c $< +%.o: $(INTERCEPTION)/%.cc Makefile.old $(LIBTSAN_HEADERS) + $(CXX) $(CXXFLAGS) $(INCLUDES) -c $< -o $@ +%.o: $(COMMON)/%.cc Makefile.old $(LIBTSAN_HEADERS) + $(CXX) $(CXXFLAGS) $(INCLUDES) -c $< -o $@ +%.o: %.S + $(CXX) $(INCLUDES) -o $@ -c $< + +libtsan.a: $(LIBTSAN_OBJ) + ar ru $@ $(LIBTSAN_OBJ) + +libtsan_dummy.a: tsan_dummy_rtl.o + ar ru $@ $< + +clean: + rm -f *.o *.a diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan.syms.extra b/contrib/compiler-rt/lib/tsan/rtl/tsan.syms.extra new file mode 100644 index 0000000..49ed6b4 --- /dev/null +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan.syms.extra @@ -0,0 +1,14 @@ +__tsan_init +__tsan_read* +__tsan_write* +__tsan_vptr* +__tsan_func* +__tsan_atomic* +__tsan_java* +__tsan_unaligned* +__tsan_release +__tsan_acquire +Annotate* +WTFAnnotate* +RunningOnValgrind +ValgrindSlowdown diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_clock.cc b/contrib/compiler-rt/lib/tsan/rtl/tsan_clock.cc new file mode 100644 index 0000000..f2b39a1 --- /dev/null +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_clock.cc @@ -0,0 +1,429 @@ +//===-- tsan_clock.cc -----------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +//===----------------------------------------------------------------------===// +#include "tsan_clock.h" +#include "tsan_rtl.h" +#include "sanitizer_common/sanitizer_placement_new.h" + +// SyncClock and ThreadClock implement vector clocks for sync variables +// (mutexes, atomic variables, file descriptors, etc) and threads, respectively. +// ThreadClock contains fixed-size vector clock for maximum number of threads. +// SyncClock contains growable vector clock for currently necessary number of +// threads. +// Together they implement very simple model of operations, namely: +// +// void ThreadClock::acquire(const SyncClock *src) { +// for (int i = 0; i < kMaxThreads; i++) +// clock[i] = max(clock[i], src->clock[i]); +// } +// +// void ThreadClock::release(SyncClock *dst) const { +// for (int i = 0; i < kMaxThreads; i++) +// dst->clock[i] = max(dst->clock[i], clock[i]); +// } +// +// void ThreadClock::ReleaseStore(SyncClock *dst) const { +// for (int i = 0; i < kMaxThreads; i++) +// dst->clock[i] = clock[i]; +// } +// +// void ThreadClock::acq_rel(SyncClock *dst) { +// acquire(dst); +// release(dst); +// } +// +// Conformance to this model is extensively verified in tsan_clock_test.cc. +// However, the implementation is significantly more complex. The complexity +// allows to implement important classes of use cases in O(1) instead of O(N). +// +// The use cases are: +// 1. Singleton/once atomic that has a single release-store operation followed +// by zillions of acquire-loads (the acquire-load is O(1)). +// 2. Thread-local mutex (both lock and unlock can be O(1)). +// 3. Leaf mutex (unlock is O(1)). +// 4. A mutex shared by 2 threads (both lock and unlock can be O(1)). +// 5. An atomic with a single writer (writes can be O(1)). +// The implementation dynamically adopts to workload. So if an atomic is in +// read-only phase, these reads will be O(1); if it later switches to read/write +// phase, the implementation will correctly handle that by switching to O(N). +// +// Thread-safety note: all const operations on SyncClock's are conducted under +// a shared lock; all non-const operations on SyncClock's are conducted under +// an exclusive lock; ThreadClock's are private to respective threads and so +// do not need any protection. +// +// Description of ThreadClock state: +// clk_ - fixed size vector clock. +// nclk_ - effective size of the vector clock (the rest is zeros). +// tid_ - index of the thread associated with he clock ("current thread"). +// last_acquire_ - current thread time when it acquired something from +// other threads. +// +// Description of SyncClock state: +// clk_ - variable size vector clock, low kClkBits hold timestamp, +// the remaining bits hold "acquired" flag (the actual value is thread's +// reused counter); +// if acquried == thr->reused_, then the respective thread has already +// acquired this clock (except possibly dirty_tids_). +// dirty_tids_ - holds up to two indeces in the vector clock that other threads +// need to acquire regardless of "acquired" flag value; +// release_store_tid_ - denotes that the clock state is a result of +// release-store operation by the thread with release_store_tid_ index. +// release_store_reused_ - reuse count of release_store_tid_. + +// We don't have ThreadState in these methods, so this is an ugly hack that +// works only in C++. +#ifndef SANITIZER_GO +# define CPP_STAT_INC(typ) StatInc(cur_thread(), typ) +#else +# define CPP_STAT_INC(typ) (void)0 +#endif + +namespace __tsan { + +const unsigned kInvalidTid = (unsigned)-1; + +ThreadClock::ThreadClock(unsigned tid, unsigned reused) + : tid_(tid) + , reused_(reused + 1) { // 0 has special meaning + CHECK_LT(tid, kMaxTidInClock); + CHECK_EQ(reused_, ((u64)reused_ << kClkBits) >> kClkBits); + nclk_ = tid_ + 1; + last_acquire_ = 0; + internal_memset(clk_, 0, sizeof(clk_)); + clk_[tid_].reused = reused_; +} + +void ThreadClock::acquire(ClockCache *c, const SyncClock *src) { + DCHECK(nclk_ <= kMaxTid); + DCHECK(src->size_ <= kMaxTid); + CPP_STAT_INC(StatClockAcquire); + + // Check if it's empty -> no need to do anything. + const uptr nclk = src->size_; + if (nclk == 0) { + CPP_STAT_INC(StatClockAcquireEmpty); + return; + } + + // Check if we've already acquired src after the last release operation on src + bool acquired = false; + if (nclk > tid_) { + CPP_STAT_INC(StatClockAcquireLarge); + if (src->elem(tid_).reused == reused_) { + CPP_STAT_INC(StatClockAcquireRepeat); + for (unsigned i = 0; i < kDirtyTids; i++) { + unsigned tid = src->dirty_tids_[i]; + if (tid != kInvalidTid) { + u64 epoch = src->elem(tid).epoch; + if (clk_[tid].epoch < epoch) { + clk_[tid].epoch = epoch; + acquired = true; + } + } + } + if (acquired) { + CPP_STAT_INC(StatClockAcquiredSomething); + last_acquire_ = clk_[tid_].epoch; + } + return; + } + } + + // O(N) acquire. + CPP_STAT_INC(StatClockAcquireFull); + nclk_ = max(nclk_, nclk); + for (uptr i = 0; i < nclk; i++) { + u64 epoch = src->elem(i).epoch; + if (clk_[i].epoch < epoch) { + clk_[i].epoch = epoch; + acquired = true; + } + } + + // Remember that this thread has acquired this clock. + if (nclk > tid_) + src->elem(tid_).reused = reused_; + + if (acquired) { + CPP_STAT_INC(StatClockAcquiredSomething); + last_acquire_ = clk_[tid_].epoch; + } +} + +void ThreadClock::release(ClockCache *c, SyncClock *dst) const { + DCHECK_LE(nclk_, kMaxTid); + DCHECK_LE(dst->size_, kMaxTid); + + if (dst->size_ == 0) { + // ReleaseStore will correctly set release_store_tid_, + // which can be important for future operations. + ReleaseStore(c, dst); + return; + } + + CPP_STAT_INC(StatClockRelease); + // Check if we need to resize dst. + if (dst->size_ < nclk_) + dst->Resize(c, nclk_); + + // Check if we had not acquired anything from other threads + // since the last release on dst. If so, we need to update + // only dst->elem(tid_). + if (dst->elem(tid_).epoch > last_acquire_) { + UpdateCurrentThread(dst); + if (dst->release_store_tid_ != tid_ || + dst->release_store_reused_ != reused_) + dst->release_store_tid_ = kInvalidTid; + return; + } + + // O(N) release. + CPP_STAT_INC(StatClockReleaseFull); + // First, remember whether we've acquired dst. + bool acquired = IsAlreadyAcquired(dst); + if (acquired) + CPP_STAT_INC(StatClockReleaseAcquired); + // Update dst->clk_. + for (uptr i = 0; i < nclk_; i++) { + ClockElem &ce = dst->elem(i); + ce.epoch = max(ce.epoch, clk_[i].epoch); + ce.reused = 0; + } + // Clear 'acquired' flag in the remaining elements. + if (nclk_ < dst->size_) + CPP_STAT_INC(StatClockReleaseClearTail); + for (uptr i = nclk_; i < dst->size_; i++) + dst->elem(i).reused = 0; + for (unsigned i = 0; i < kDirtyTids; i++) + dst->dirty_tids_[i] = kInvalidTid; + dst->release_store_tid_ = kInvalidTid; + dst->release_store_reused_ = 0; + // If we've acquired dst, remember this fact, + // so that we don't need to acquire it on next acquire. + if (acquired) + dst->elem(tid_).reused = reused_; +} + +void ThreadClock::ReleaseStore(ClockCache *c, SyncClock *dst) const { + DCHECK(nclk_ <= kMaxTid); + DCHECK(dst->size_ <= kMaxTid); + CPP_STAT_INC(StatClockStore); + + // Check if we need to resize dst. + if (dst->size_ < nclk_) + dst->Resize(c, nclk_); + + if (dst->release_store_tid_ == tid_ && + dst->release_store_reused_ == reused_ && + dst->elem(tid_).epoch > last_acquire_) { + CPP_STAT_INC(StatClockStoreFast); + UpdateCurrentThread(dst); + return; + } + + // O(N) release-store. + CPP_STAT_INC(StatClockStoreFull); + for (uptr i = 0; i < nclk_; i++) { + ClockElem &ce = dst->elem(i); + ce.epoch = clk_[i].epoch; + ce.reused = 0; + } + // Clear the tail of dst->clk_. + if (nclk_ < dst->size_) { + for (uptr i = nclk_; i < dst->size_; i++) { + ClockElem &ce = dst->elem(i); + ce.epoch = 0; + ce.reused = 0; + } + CPP_STAT_INC(StatClockStoreTail); + } + for (unsigned i = 0; i < kDirtyTids; i++) + dst->dirty_tids_[i] = kInvalidTid; + dst->release_store_tid_ = tid_; + dst->release_store_reused_ = reused_; + // Rememeber that we don't need to acquire it in future. + dst->elem(tid_).reused = reused_; +} + +void ThreadClock::acq_rel(ClockCache *c, SyncClock *dst) { + CPP_STAT_INC(StatClockAcquireRelease); + acquire(c, dst); + ReleaseStore(c, dst); +} + +// Updates only single element related to the current thread in dst->clk_. +void ThreadClock::UpdateCurrentThread(SyncClock *dst) const { + // Update the threads time, but preserve 'acquired' flag. + dst->elem(tid_).epoch = clk_[tid_].epoch; + + for (unsigned i = 0; i < kDirtyTids; i++) { + if (dst->dirty_tids_[i] == tid_) { + CPP_STAT_INC(StatClockReleaseFast1); + return; + } + if (dst->dirty_tids_[i] == kInvalidTid) { + CPP_STAT_INC(StatClockReleaseFast2); + dst->dirty_tids_[i] = tid_; + return; + } + } + // Reset all 'acquired' flags, O(N). + CPP_STAT_INC(StatClockReleaseSlow); + for (uptr i = 0; i < dst->size_; i++) + dst->elem(i).reused = 0; + for (unsigned i = 0; i < kDirtyTids; i++) + dst->dirty_tids_[i] = kInvalidTid; +} + +// Checks whether the current threads has already acquired src. +bool ThreadClock::IsAlreadyAcquired(const SyncClock *src) const { + if (src->elem(tid_).reused != reused_) + return false; + for (unsigned i = 0; i < kDirtyTids; i++) { + unsigned tid = src->dirty_tids_[i]; + if (tid != kInvalidTid) { + if (clk_[tid].epoch < src->elem(tid).epoch) + return false; + } + } + return true; +} + +void SyncClock::Resize(ClockCache *c, uptr nclk) { + CPP_STAT_INC(StatClockReleaseResize); + if (RoundUpTo(nclk, ClockBlock::kClockCount) <= + RoundUpTo(size_, ClockBlock::kClockCount)) { + // Growing within the same block. + // Memory is already allocated, just increase the size. + size_ = nclk; + return; + } + if (nclk <= ClockBlock::kClockCount) { + // Grow from 0 to one-level table. + CHECK_EQ(size_, 0); + CHECK_EQ(tab_, 0); + CHECK_EQ(tab_idx_, 0); + size_ = nclk; + tab_idx_ = ctx->clock_alloc.Alloc(c); + tab_ = ctx->clock_alloc.Map(tab_idx_); + internal_memset(tab_, 0, sizeof(*tab_)); + return; + } + // Growing two-level table. + if (size_ == 0) { + // Allocate first level table. + tab_idx_ = ctx->clock_alloc.Alloc(c); + tab_ = ctx->clock_alloc.Map(tab_idx_); + internal_memset(tab_, 0, sizeof(*tab_)); + } else if (size_ <= ClockBlock::kClockCount) { + // Transform one-level table to two-level table. + u32 old = tab_idx_; + tab_idx_ = ctx->clock_alloc.Alloc(c); + tab_ = ctx->clock_alloc.Map(tab_idx_); + internal_memset(tab_, 0, sizeof(*tab_)); + tab_->table[0] = old; + } + // At this point we have first level table allocated. + // Add second level tables as necessary. + for (uptr i = RoundUpTo(size_, ClockBlock::kClockCount); + i < nclk; i += ClockBlock::kClockCount) { + u32 idx = ctx->clock_alloc.Alloc(c); + ClockBlock *cb = ctx->clock_alloc.Map(idx); + internal_memset(cb, 0, sizeof(*cb)); + CHECK_EQ(tab_->table[i/ClockBlock::kClockCount], 0); + tab_->table[i/ClockBlock::kClockCount] = idx; + } + size_ = nclk; +} + +// Sets a single element in the vector clock. +// This function is called only from weird places like AcquireGlobal. +void ThreadClock::set(unsigned tid, u64 v) { + DCHECK_LT(tid, kMaxTid); + DCHECK_GE(v, clk_[tid].epoch); + clk_[tid].epoch = v; + if (nclk_ <= tid) + nclk_ = tid + 1; + last_acquire_ = clk_[tid_].epoch; +} + +void ThreadClock::DebugDump(int(*printf)(const char *s, ...)) { + printf("clock=["); + for (uptr i = 0; i < nclk_; i++) + printf("%s%llu", i == 0 ? "" : ",", clk_[i].epoch); + printf("] reused=["); + for (uptr i = 0; i < nclk_; i++) + printf("%s%llu", i == 0 ? "" : ",", clk_[i].reused); + printf("] tid=%u/%u last_acq=%llu", + tid_, reused_, last_acquire_); +} + +SyncClock::SyncClock() + : release_store_tid_(kInvalidTid) + , release_store_reused_() + , tab_() + , tab_idx_() + , size_() { + for (uptr i = 0; i < kDirtyTids; i++) + dirty_tids_[i] = kInvalidTid; +} + +SyncClock::~SyncClock() { + // Reset must be called before dtor. + CHECK_EQ(size_, 0); + CHECK_EQ(tab_, 0); + CHECK_EQ(tab_idx_, 0); +} + +void SyncClock::Reset(ClockCache *c) { + if (size_ == 0) { + // nothing + } else if (size_ <= ClockBlock::kClockCount) { + // One-level table. + ctx->clock_alloc.Free(c, tab_idx_); + } else { + // Two-level table. + for (uptr i = 0; i < size_; i += ClockBlock::kClockCount) + ctx->clock_alloc.Free(c, tab_->table[i / ClockBlock::kClockCount]); + ctx->clock_alloc.Free(c, tab_idx_); + } + tab_ = 0; + tab_idx_ = 0; + size_ = 0; + release_store_tid_ = kInvalidTid; + release_store_reused_ = 0; + for (uptr i = 0; i < kDirtyTids; i++) + dirty_tids_[i] = kInvalidTid; +} + +ClockElem &SyncClock::elem(unsigned tid) const { + DCHECK_LT(tid, size_); + if (size_ <= ClockBlock::kClockCount) + return tab_->clock[tid]; + u32 idx = tab_->table[tid / ClockBlock::kClockCount]; + ClockBlock *cb = ctx->clock_alloc.Map(idx); + return cb->clock[tid % ClockBlock::kClockCount]; +} + +void SyncClock::DebugDump(int(*printf)(const char *s, ...)) { + printf("clock=["); + for (uptr i = 0; i < size_; i++) + printf("%s%llu", i == 0 ? "" : ",", elem(i).epoch); + printf("] reused=["); + for (uptr i = 0; i < size_; i++) + printf("%s%llu", i == 0 ? "" : ",", elem(i).reused); + printf("] release_store_tid=%d/%d dirty_tids=%d/%d", + release_store_tid_, release_store_reused_, + dirty_tids_[0], dirty_tids_[1]); +} +} // namespace __tsan diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_clock.h b/contrib/compiler-rt/lib/tsan/rtl/tsan_clock.h new file mode 100644 index 0000000..4e352cb --- /dev/null +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_clock.h @@ -0,0 +1,129 @@ +//===-- tsan_clock.h --------------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +//===----------------------------------------------------------------------===// +#ifndef TSAN_CLOCK_H +#define TSAN_CLOCK_H + +#include "tsan_defs.h" +#include "tsan_dense_alloc.h" + +namespace __tsan { + +struct ClockElem { + u64 epoch : kClkBits; + u64 reused : 64 - kClkBits; +}; + +struct ClockBlock { + static const uptr kSize = 512; + static const uptr kTableSize = kSize / sizeof(u32); + static const uptr kClockCount = kSize / sizeof(ClockElem); + + union { + u32 table[kTableSize]; + ClockElem clock[kClockCount]; + }; + + ClockBlock() { + } +}; + +typedef DenseSlabAlloc<ClockBlock, 1<<16, 1<<10> ClockAlloc; +typedef DenseSlabAllocCache ClockCache; + +// The clock that lives in sync variables (mutexes, atomics, etc). +class SyncClock { + public: + SyncClock(); + ~SyncClock(); + + uptr size() const { + return size_; + } + + u64 get(unsigned tid) const { + return elem(tid).epoch; + } + + void Resize(ClockCache *c, uptr nclk); + void Reset(ClockCache *c); + + void DebugDump(int(*printf)(const char *s, ...)); + + private: + friend struct ThreadClock; + static const uptr kDirtyTids = 2; + + unsigned release_store_tid_; + unsigned release_store_reused_; + unsigned dirty_tids_[kDirtyTids]; + // tab_ contains indirect pointer to a 512b block using DenseSlabAlloc. + // If size_ <= 64, then tab_ points to an array with 64 ClockElem's. + // Otherwise, tab_ points to an array with 128 u32 elements, + // each pointing to the second-level 512b block with 64 ClockElem's. + ClockBlock *tab_; + u32 tab_idx_; + u32 size_; + + ClockElem &elem(unsigned tid) const; +}; + +// The clock that lives in threads. +struct ThreadClock { + public: + typedef DenseSlabAllocCache Cache; + + explicit ThreadClock(unsigned tid, unsigned reused = 0); + + u64 get(unsigned tid) const { + DCHECK_LT(tid, kMaxTidInClock); + return clk_[tid].epoch; + } + + void set(unsigned tid, u64 v); + + void set(u64 v) { + DCHECK_GE(v, clk_[tid_].epoch); + clk_[tid_].epoch = v; + } + + void tick() { + clk_[tid_].epoch++; + } + + uptr size() const { + return nclk_; + } + + void acquire(ClockCache *c, const SyncClock *src); + void release(ClockCache *c, SyncClock *dst) const; + void acq_rel(ClockCache *c, SyncClock *dst); + void ReleaseStore(ClockCache *c, SyncClock *dst) const; + + void DebugReset(); + void DebugDump(int(*printf)(const char *s, ...)); + + private: + static const uptr kDirtyTids = SyncClock::kDirtyTids; + const unsigned tid_; + const unsigned reused_; + u64 last_acquire_; + uptr nclk_; + ClockElem clk_[kMaxTidInClock]; + + bool IsAlreadyAcquired(const SyncClock *src) const; + void UpdateCurrentThread(SyncClock *dst) const; +}; + +} // namespace __tsan + +#endif // TSAN_CLOCK_H diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_defs.h b/contrib/compiler-rt/lib/tsan/rtl/tsan_defs.h new file mode 100644 index 0000000..7ed3796 --- /dev/null +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_defs.h @@ -0,0 +1,188 @@ +//===-- tsan_defs.h ---------------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +//===----------------------------------------------------------------------===// + +#ifndef TSAN_DEFS_H +#define TSAN_DEFS_H + +#include "sanitizer_common/sanitizer_internal_defs.h" +#include "sanitizer_common/sanitizer_libc.h" +#include "tsan_stat.h" + +#ifndef TSAN_DEBUG +#define TSAN_DEBUG 0 +#endif // TSAN_DEBUG + +namespace __tsan { + +#ifdef SANITIZER_GO +const bool kGoMode = true; +const bool kCppMode = false; +const char *const kTsanOptionsEnv = "GORACE"; +// Go linker does not support weak symbols. +#define CPP_WEAK +#else +const bool kGoMode = false; +const bool kCppMode = true; +const char *const kTsanOptionsEnv = "TSAN_OPTIONS"; +#define CPP_WEAK WEAK +#endif + +const int kTidBits = 13; +const unsigned kMaxTid = 1 << kTidBits; +const unsigned kMaxTidInClock = kMaxTid * 2; // This includes msb 'freed' bit. +const int kClkBits = 42; +const unsigned kMaxTidReuse = (1 << (64 - kClkBits)) - 1; +const uptr kShadowStackSize = 64 * 1024; + +#ifdef TSAN_SHADOW_COUNT +# if TSAN_SHADOW_COUNT == 2 \ + || TSAN_SHADOW_COUNT == 4 || TSAN_SHADOW_COUNT == 8 +const uptr kShadowCnt = TSAN_SHADOW_COUNT; +# else +# error "TSAN_SHADOW_COUNT must be one of 2,4,8" +# endif +#else +// Count of shadow values in a shadow cell. +#define TSAN_SHADOW_COUNT 4 +const uptr kShadowCnt = 4; +#endif + +// That many user bytes are mapped onto a single shadow cell. +const uptr kShadowCell = 8; + +// Size of a single shadow value (u64). +const uptr kShadowSize = 8; + +// Shadow memory is kShadowMultiplier times larger than user memory. +const uptr kShadowMultiplier = kShadowSize * kShadowCnt / kShadowCell; + +// That many user bytes are mapped onto a single meta shadow cell. +// Must be less or equal to minimal memory allocator alignment. +const uptr kMetaShadowCell = 8; + +// Size of a single meta shadow value (u32). +const uptr kMetaShadowSize = 4; + +#if defined(TSAN_NO_HISTORY) && TSAN_NO_HISTORY +const bool kCollectHistory = false; +#else +const bool kCollectHistory = true; +#endif + +#if defined(TSAN_COLLECT_STATS) && TSAN_COLLECT_STATS +const bool kCollectStats = true; +#else +const bool kCollectStats = false; +#endif + +// The following "build consistency" machinery ensures that all source files +// are built in the same configuration. Inconsistent builds lead to +// hard to debug crashes. +#if TSAN_DEBUG +void build_consistency_debug(); +#else +void build_consistency_release(); +#endif + +#if TSAN_COLLECT_STATS +void build_consistency_stats(); +#else +void build_consistency_nostats(); +#endif + +#if TSAN_SHADOW_COUNT == 1 +void build_consistency_shadow1(); +#elif TSAN_SHADOW_COUNT == 2 +void build_consistency_shadow2(); +#elif TSAN_SHADOW_COUNT == 4 +void build_consistency_shadow4(); +#else +void build_consistency_shadow8(); +#endif + +static inline void USED build_consistency() { +#if TSAN_DEBUG + build_consistency_debug(); +#else + build_consistency_release(); +#endif +#if TSAN_COLLECT_STATS + build_consistency_stats(); +#else + build_consistency_nostats(); +#endif +#if TSAN_SHADOW_COUNT == 1 + build_consistency_shadow1(); +#elif TSAN_SHADOW_COUNT == 2 + build_consistency_shadow2(); +#elif TSAN_SHADOW_COUNT == 4 + build_consistency_shadow4(); +#else + build_consistency_shadow8(); +#endif +} + +template<typename T> +T min(T a, T b) { + return a < b ? a : b; +} + +template<typename T> +T max(T a, T b) { + return a > b ? a : b; +} + +template<typename T> +T RoundUp(T p, u64 align) { + DCHECK_EQ(align & (align - 1), 0); + return (T)(((u64)p + align - 1) & ~(align - 1)); +} + +template<typename T> +T RoundDown(T p, u64 align) { + DCHECK_EQ(align & (align - 1), 0); + return (T)((u64)p & ~(align - 1)); +} + +// Zeroizes high part, returns 'bits' lsb bits. +template<typename T> +T GetLsb(T v, int bits) { + return (T)((u64)v & ((1ull << bits) - 1)); +} + +struct MD5Hash { + u64 hash[2]; + bool operator==(const MD5Hash &other) const; +}; + +MD5Hash md5_hash(const void *data, uptr size); + +struct ThreadState; +class ThreadContext; +struct Context; +struct ReportStack; +class ReportDesc; +class RegionAlloc; + +// Descriptor of user's memory block. +struct MBlock { + u64 siz; + u32 stk; + u16 tid; +}; + +COMPILER_CHECK(sizeof(MBlock) == 16); + +} // namespace __tsan + +#endif // TSAN_DEFS_H diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_dense_alloc.h b/contrib/compiler-rt/lib/tsan/rtl/tsan_dense_alloc.h new file mode 100644 index 0000000..a1cf84b --- /dev/null +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_dense_alloc.h @@ -0,0 +1,137 @@ +//===-- tsan_dense_alloc.h --------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +// A DenseSlabAlloc is a freelist-based allocator of fixed-size objects. +// DenseSlabAllocCache is a thread-local cache for DenseSlabAlloc. +// The only difference with traditional slab allocators is that DenseSlabAlloc +// allocates/free indices of objects and provide a functionality to map +// the index onto the real pointer. The index is u32, that is, 2 times smaller +// than uptr (hense the Dense prefix). +//===----------------------------------------------------------------------===// +#ifndef TSAN_DENSE_ALLOC_H +#define TSAN_DENSE_ALLOC_H + +#include "sanitizer_common/sanitizer_common.h" +#include "tsan_defs.h" +#include "tsan_mutex.h" + +namespace __tsan { + +class DenseSlabAllocCache { + static const uptr kSize = 128; + typedef u32 IndexT; + uptr pos; + IndexT cache[kSize]; + template<typename T, uptr kL1Size, uptr kL2Size> friend class DenseSlabAlloc; +}; + +template<typename T, uptr kL1Size, uptr kL2Size> +class DenseSlabAlloc { + public: + typedef DenseSlabAllocCache Cache; + typedef typename Cache::IndexT IndexT; + + DenseSlabAlloc() { + // Check that kL1Size and kL2Size are sane. + CHECK_EQ(kL1Size & (kL1Size - 1), 0); + CHECK_EQ(kL2Size & (kL2Size - 1), 0); + CHECK_GE(1ull << (sizeof(IndexT) * 8), kL1Size * kL2Size); + // Check that it makes sense to use the dense alloc. + CHECK_GE(sizeof(T), sizeof(IndexT)); + internal_memset(map_, 0, sizeof(map_)); + freelist_ = 0; + fillpos_ = 0; + } + + ~DenseSlabAlloc() { + for (uptr i = 0; i < kL1Size; i++) { + if (map_[i] != 0) + UnmapOrDie(map_[i], kL2Size * sizeof(T)); + } + } + + IndexT Alloc(Cache *c) { + if (c->pos == 0) + Refill(c); + return c->cache[--c->pos]; + } + + void Free(Cache *c, IndexT idx) { + DCHECK_NE(idx, 0); + if (c->pos == Cache::kSize) + Drain(c); + c->cache[c->pos++] = idx; + } + + T *Map(IndexT idx) { + DCHECK_NE(idx, 0); + DCHECK_LE(idx, kL1Size * kL2Size); + return &map_[idx / kL2Size][idx % kL2Size]; + } + + void FlushCache(Cache *c) { + SpinMutexLock lock(&mtx_); + while (c->pos) { + IndexT idx = c->cache[--c->pos]; + *(IndexT*)Map(idx) = freelist_; + freelist_ = idx; + } + } + + void InitCache(Cache *c) { + c->pos = 0; + internal_memset(c->cache, 0, sizeof(c->cache)); + } + + private: + T *map_[kL1Size]; + SpinMutex mtx_; + IndexT freelist_; + uptr fillpos_; + + void Refill(Cache *c) { + SpinMutexLock lock(&mtx_); + if (freelist_ == 0) { + if (fillpos_ == kL1Size) { + Printf("ThreadSanitizer: DenseSlabAllocator overflow. Dying.\n"); + Die(); + } + T *batch = (T*)MmapOrDie(kL2Size * sizeof(T), "DenseSlabAllocator"); + // Reserve 0 as invalid index. + IndexT start = fillpos_ == 0 ? 1 : 0; + for (IndexT i = start; i < kL2Size; i++) { + new(batch + i) T(); + *(IndexT*)(batch + i) = i + 1 + fillpos_ * kL2Size; + } + *(IndexT*)(batch + kL2Size - 1) = 0; + freelist_ = fillpos_ * kL2Size + start; + map_[fillpos_++] = batch; + } + for (uptr i = 0; i < Cache::kSize / 2 && freelist_ != 0; i++) { + IndexT idx = freelist_; + c->cache[c->pos++] = idx; + freelist_ = *(IndexT*)Map(idx); + } + } + + void Drain(Cache *c) { + SpinMutexLock lock(&mtx_); + for (uptr i = 0; i < Cache::kSize / 2; i++) { + IndexT idx = c->cache[--c->pos]; + *(IndexT*)Map(idx) = freelist_; + freelist_ = idx; + } + } +}; + +} // namespace __tsan + +#endif // TSAN_DENSE_ALLOC_H diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_fd.cc b/contrib/compiler-rt/lib/tsan/rtl/tsan_fd.cc new file mode 100644 index 0000000..d18502f --- /dev/null +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_fd.cc @@ -0,0 +1,298 @@ +//===-- tsan_fd.cc --------------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +//===----------------------------------------------------------------------===// + +#include "tsan_fd.h" +#include "tsan_rtl.h" +#include <sanitizer_common/sanitizer_atomic.h> + +namespace __tsan { + +const int kTableSizeL1 = 1024; +const int kTableSizeL2 = 1024; +const int kTableSize = kTableSizeL1 * kTableSizeL2; + +struct FdSync { + atomic_uint64_t rc; +}; + +struct FdDesc { + FdSync *sync; + int creation_tid; + u32 creation_stack; +}; + +struct FdContext { + atomic_uintptr_t tab[kTableSizeL1]; + // Addresses used for synchronization. + FdSync globsync; + FdSync filesync; + FdSync socksync; + u64 connectsync; +}; + +static FdContext fdctx; + +static bool bogusfd(int fd) { + // Apparently a bogus fd value. + return fd < 0 || fd >= kTableSize; +} + +static FdSync *allocsync(ThreadState *thr, uptr pc) { + FdSync *s = (FdSync*)user_alloc(thr, pc, sizeof(FdSync), kDefaultAlignment, + false); + atomic_store(&s->rc, 1, memory_order_relaxed); + return s; +} + +static FdSync *ref(FdSync *s) { + if (s && atomic_load(&s->rc, memory_order_relaxed) != (u64)-1) + atomic_fetch_add(&s->rc, 1, memory_order_relaxed); + return s; +} + +static void unref(ThreadState *thr, uptr pc, FdSync *s) { + if (s && atomic_load(&s->rc, memory_order_relaxed) != (u64)-1) { + if (atomic_fetch_sub(&s->rc, 1, memory_order_acq_rel) == 1) { + CHECK_NE(s, &fdctx.globsync); + CHECK_NE(s, &fdctx.filesync); + CHECK_NE(s, &fdctx.socksync); + user_free(thr, pc, s, false); + } + } +} + +static FdDesc *fddesc(ThreadState *thr, uptr pc, int fd) { + CHECK_GE(fd, 0); + CHECK_LT(fd, kTableSize); + atomic_uintptr_t *pl1 = &fdctx.tab[fd / kTableSizeL2]; + uptr l1 = atomic_load(pl1, memory_order_consume); + if (l1 == 0) { + uptr size = kTableSizeL2 * sizeof(FdDesc); + // We need this to reside in user memory to properly catch races on it. + void *p = user_alloc(thr, pc, size, kDefaultAlignment, false); + internal_memset(p, 0, size); + MemoryResetRange(thr, (uptr)&fddesc, (uptr)p, size); + if (atomic_compare_exchange_strong(pl1, &l1, (uptr)p, memory_order_acq_rel)) + l1 = (uptr)p; + else + user_free(thr, pc, p, false); + } + return &((FdDesc*)l1)[fd % kTableSizeL2]; // NOLINT +} + +// pd must be already ref'ed. +static void init(ThreadState *thr, uptr pc, int fd, FdSync *s) { + FdDesc *d = fddesc(thr, pc, fd); + // As a matter of fact, we don't intercept all close calls. + // See e.g. libc __res_iclose(). + if (d->sync) { + unref(thr, pc, d->sync); + d->sync = 0; + } + if (flags()->io_sync == 0) { + unref(thr, pc, s); + } else if (flags()->io_sync == 1) { + d->sync = s; + } else if (flags()->io_sync == 2) { + unref(thr, pc, s); + d->sync = &fdctx.globsync; + } + d->creation_tid = thr->tid; + d->creation_stack = CurrentStackId(thr, pc); + // To catch races between fd usage and open. + MemoryRangeImitateWrite(thr, pc, (uptr)d, 8); +} + +void FdInit() { + atomic_store(&fdctx.globsync.rc, (u64)-1, memory_order_relaxed); + atomic_store(&fdctx.filesync.rc, (u64)-1, memory_order_relaxed); + atomic_store(&fdctx.socksync.rc, (u64)-1, memory_order_relaxed); +} + +void FdOnFork(ThreadState *thr, uptr pc) { + // On fork() we need to reset all fd's, because the child is going + // close all them, and that will cause races between previous read/write + // and the close. + for (int l1 = 0; l1 < kTableSizeL1; l1++) { + FdDesc *tab = (FdDesc*)atomic_load(&fdctx.tab[l1], memory_order_relaxed); + if (tab == 0) + break; + for (int l2 = 0; l2 < kTableSizeL2; l2++) { + FdDesc *d = &tab[l2]; + MemoryResetRange(thr, pc, (uptr)d, 8); + } + } +} + +bool FdLocation(uptr addr, int *fd, int *tid, u32 *stack) { + for (int l1 = 0; l1 < kTableSizeL1; l1++) { + FdDesc *tab = (FdDesc*)atomic_load(&fdctx.tab[l1], memory_order_relaxed); + if (tab == 0) + break; + if (addr >= (uptr)tab && addr < (uptr)(tab + kTableSizeL2)) { + int l2 = (addr - (uptr)tab) / sizeof(FdDesc); + FdDesc *d = &tab[l2]; + *fd = l1 * kTableSizeL1 + l2; + *tid = d->creation_tid; + *stack = d->creation_stack; + return true; + } + } + return false; +} + +void FdAcquire(ThreadState *thr, uptr pc, int fd) { + if (bogusfd(fd)) + return; + FdDesc *d = fddesc(thr, pc, fd); + FdSync *s = d->sync; + DPrintf("#%d: FdAcquire(%d) -> %p\n", thr->tid, fd, s); + MemoryRead(thr, pc, (uptr)d, kSizeLog8); + if (s) + Acquire(thr, pc, (uptr)s); +} + +void FdRelease(ThreadState *thr, uptr pc, int fd) { + if (bogusfd(fd)) + return; + FdDesc *d = fddesc(thr, pc, fd); + FdSync *s = d->sync; + DPrintf("#%d: FdRelease(%d) -> %p\n", thr->tid, fd, s); + MemoryRead(thr, pc, (uptr)d, kSizeLog8); + if (s) + Release(thr, pc, (uptr)s); +} + +void FdAccess(ThreadState *thr, uptr pc, int fd) { + DPrintf("#%d: FdAccess(%d)\n", thr->tid, fd); + if (bogusfd(fd)) + return; + FdDesc *d = fddesc(thr, pc, fd); + MemoryRead(thr, pc, (uptr)d, kSizeLog8); +} + +void FdClose(ThreadState *thr, uptr pc, int fd) { + DPrintf("#%d: FdClose(%d)\n", thr->tid, fd); + if (bogusfd(fd)) + return; + FdDesc *d = fddesc(thr, pc, fd); + // To catch races between fd usage and close. + MemoryWrite(thr, pc, (uptr)d, kSizeLog8); + // We need to clear it, because if we do not intercept any call out there + // that creates fd, we will hit false postives. + MemoryResetRange(thr, pc, (uptr)d, 8); + unref(thr, pc, d->sync); + d->sync = 0; + d->creation_tid = 0; + d->creation_stack = 0; +} + +void FdFileCreate(ThreadState *thr, uptr pc, int fd) { + DPrintf("#%d: FdFileCreate(%d)\n", thr->tid, fd); + if (bogusfd(fd)) + return; + init(thr, pc, fd, &fdctx.filesync); +} + +void FdDup(ThreadState *thr, uptr pc, int oldfd, int newfd) { + DPrintf("#%d: FdDup(%d, %d)\n", thr->tid, oldfd, newfd); + if (bogusfd(oldfd) || bogusfd(newfd)) + return; + // Ignore the case when user dups not yet connected socket. + FdDesc *od = fddesc(thr, pc, oldfd); + MemoryRead(thr, pc, (uptr)od, kSizeLog8); + FdClose(thr, pc, newfd); + init(thr, pc, newfd, ref(od->sync)); +} + +void FdPipeCreate(ThreadState *thr, uptr pc, int rfd, int wfd) { + DPrintf("#%d: FdCreatePipe(%d, %d)\n", thr->tid, rfd, wfd); + FdSync *s = allocsync(thr, pc); + init(thr, pc, rfd, ref(s)); + init(thr, pc, wfd, ref(s)); + unref(thr, pc, s); +} + +void FdEventCreate(ThreadState *thr, uptr pc, int fd) { + DPrintf("#%d: FdEventCreate(%d)\n", thr->tid, fd); + if (bogusfd(fd)) + return; + init(thr, pc, fd, allocsync(thr, pc)); +} + +void FdSignalCreate(ThreadState *thr, uptr pc, int fd) { + DPrintf("#%d: FdSignalCreate(%d)\n", thr->tid, fd); + if (bogusfd(fd)) + return; + init(thr, pc, fd, 0); +} + +void FdInotifyCreate(ThreadState *thr, uptr pc, int fd) { + DPrintf("#%d: FdInotifyCreate(%d)\n", thr->tid, fd); + if (bogusfd(fd)) + return; + init(thr, pc, fd, 0); +} + +void FdPollCreate(ThreadState *thr, uptr pc, int fd) { + DPrintf("#%d: FdPollCreate(%d)\n", thr->tid, fd); + if (bogusfd(fd)) + return; + init(thr, pc, fd, allocsync(thr, pc)); +} + +void FdSocketCreate(ThreadState *thr, uptr pc, int fd) { + DPrintf("#%d: FdSocketCreate(%d)\n", thr->tid, fd); + if (bogusfd(fd)) + return; + // It can be a UDP socket. + init(thr, pc, fd, &fdctx.socksync); +} + +void FdSocketAccept(ThreadState *thr, uptr pc, int fd, int newfd) { + DPrintf("#%d: FdSocketAccept(%d, %d)\n", thr->tid, fd, newfd); + if (bogusfd(fd)) + return; + // Synchronize connect->accept. + Acquire(thr, pc, (uptr)&fdctx.connectsync); + init(thr, pc, newfd, &fdctx.socksync); +} + +void FdSocketConnecting(ThreadState *thr, uptr pc, int fd) { + DPrintf("#%d: FdSocketConnecting(%d)\n", thr->tid, fd); + if (bogusfd(fd)) + return; + // Synchronize connect->accept. + Release(thr, pc, (uptr)&fdctx.connectsync); +} + +void FdSocketConnect(ThreadState *thr, uptr pc, int fd) { + DPrintf("#%d: FdSocketConnect(%d)\n", thr->tid, fd); + if (bogusfd(fd)) + return; + init(thr, pc, fd, &fdctx.socksync); +} + +uptr File2addr(const char *path) { + (void)path; + static u64 addr; + return (uptr)&addr; +} + +uptr Dir2addr(const char *path) { + (void)path; + static u64 addr; + return (uptr)&addr; +} + +} // namespace __tsan diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_fd.h b/contrib/compiler-rt/lib/tsan/rtl/tsan_fd.h new file mode 100644 index 0000000..75c616d --- /dev/null +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_fd.h @@ -0,0 +1,65 @@ +//===-- tsan_fd.h -----------------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +// This file handles synchronization via IO. +// People use IO for synchronization along the lines of: +// +// int X; +// int client_socket; // initialized elsewhere +// int server_socket; // initialized elsewhere +// +// Thread 1: +// X = 42; +// send(client_socket, ...); +// +// Thread 2: +// if (recv(server_socket, ...) > 0) +// assert(X == 42); +// +// This file determines the scope of the file descriptor (pipe, socket, +// all local files, etc) and executes acquire and release operations on +// the scope as necessary. Some scopes are very fine grained (e.g. pipe +// operations synchronize only with operations on the same pipe), while +// others are corse-grained (e.g. all operations on local files synchronize +// with each other). +//===----------------------------------------------------------------------===// +#ifndef TSAN_FD_H +#define TSAN_FD_H + +#include "tsan_rtl.h" + +namespace __tsan { + +void FdInit(); +void FdAcquire(ThreadState *thr, uptr pc, int fd); +void FdRelease(ThreadState *thr, uptr pc, int fd); +void FdAccess(ThreadState *thr, uptr pc, int fd); +void FdClose(ThreadState *thr, uptr pc, int fd); +void FdFileCreate(ThreadState *thr, uptr pc, int fd); +void FdDup(ThreadState *thr, uptr pc, int oldfd, int newfd); +void FdPipeCreate(ThreadState *thr, uptr pc, int rfd, int wfd); +void FdEventCreate(ThreadState *thr, uptr pc, int fd); +void FdSignalCreate(ThreadState *thr, uptr pc, int fd); +void FdInotifyCreate(ThreadState *thr, uptr pc, int fd); +void FdPollCreate(ThreadState *thr, uptr pc, int fd); +void FdSocketCreate(ThreadState *thr, uptr pc, int fd); +void FdSocketAccept(ThreadState *thr, uptr pc, int fd, int newfd); +void FdSocketConnecting(ThreadState *thr, uptr pc, int fd); +void FdSocketConnect(ThreadState *thr, uptr pc, int fd); +bool FdLocation(uptr addr, int *fd, int *tid, u32 *stack); +void FdOnFork(ThreadState *thr, uptr pc); + +uptr File2addr(const char *path); +uptr Dir2addr(const char *path); + +} // namespace __tsan + +#endif // TSAN_INTERFACE_H diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_flags.cc b/contrib/compiler-rt/lib/tsan/rtl/tsan_flags.cc new file mode 100644 index 0000000..5dc331f --- /dev/null +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_flags.cc @@ -0,0 +1,133 @@ +//===-- tsan_flags.cc -----------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_flags.h" +#include "sanitizer_common/sanitizer_libc.h" +#include "tsan_flags.h" +#include "tsan_rtl.h" +#include "tsan_mman.h" + +namespace __tsan { + +Flags *flags() { + return &ctx->flags; +} + +// Can be overriden in frontend. +#ifdef TSAN_EXTERNAL_HOOKS +extern "C" const char* __tsan_default_options(); +#else +extern "C" SANITIZER_INTERFACE_ATTRIBUTE +const char *WEAK __tsan_default_options() { + return ""; +} +#endif + +static void ParseFlags(Flags *f, const char *env) { + ParseFlag(env, &f->enable_annotations, "enable_annotations", ""); + ParseFlag(env, &f->suppress_equal_stacks, "suppress_equal_stacks", ""); + ParseFlag(env, &f->suppress_equal_addresses, "suppress_equal_addresses", ""); + ParseFlag(env, &f->report_bugs, "report_bugs", ""); + ParseFlag(env, &f->report_thread_leaks, "report_thread_leaks", ""); + ParseFlag(env, &f->report_destroy_locked, "report_destroy_locked", ""); + ParseFlag(env, &f->report_mutex_bugs, "report_mutex_bugs", ""); + ParseFlag(env, &f->report_signal_unsafe, "report_signal_unsafe", ""); + ParseFlag(env, &f->report_atomic_races, "report_atomic_races", ""); + ParseFlag(env, &f->force_seq_cst_atomics, "force_seq_cst_atomics", ""); + ParseFlag(env, &f->print_benign, "print_benign", ""); + ParseFlag(env, &f->exitcode, "exitcode", ""); + ParseFlag(env, &f->halt_on_error, "halt_on_error", ""); + ParseFlag(env, &f->atexit_sleep_ms, "atexit_sleep_ms", ""); + ParseFlag(env, &f->profile_memory, "profile_memory", ""); + ParseFlag(env, &f->flush_memory_ms, "flush_memory_ms", ""); + ParseFlag(env, &f->flush_symbolizer_ms, "flush_symbolizer_ms", ""); + ParseFlag(env, &f->memory_limit_mb, "memory_limit_mb", ""); + ParseFlag(env, &f->stop_on_start, "stop_on_start", ""); + ParseFlag(env, &f->running_on_valgrind, "running_on_valgrind", ""); + ParseFlag(env, &f->history_size, "history_size", ""); + ParseFlag(env, &f->io_sync, "io_sync", ""); + ParseFlag(env, &f->die_after_fork, "die_after_fork", ""); + + // DDFlags + ParseFlag(env, &f->second_deadlock_stack, "second_deadlock_stack", ""); +} + +void InitializeFlags(Flags *f, const char *env) { + internal_memset(f, 0, sizeof(*f)); + + // Default values. + f->enable_annotations = true; + f->suppress_equal_stacks = true; + f->suppress_equal_addresses = true; + f->report_bugs = true; + f->report_thread_leaks = true; + f->report_destroy_locked = true; + f->report_mutex_bugs = true; + f->report_signal_unsafe = true; + f->report_atomic_races = true; + f->force_seq_cst_atomics = false; + f->print_benign = false; + f->exitcode = 66; + f->halt_on_error = false; + f->atexit_sleep_ms = 1000; + f->profile_memory = ""; + f->flush_memory_ms = 0; + f->flush_symbolizer_ms = 5000; + f->memory_limit_mb = 0; + f->stop_on_start = false; + f->running_on_valgrind = false; + f->history_size = kGoMode ? 1 : 2; // There are a lot of goroutines in Go. + f->io_sync = 1; + f->die_after_fork = true; + + // DDFlags + f->second_deadlock_stack = false; + + CommonFlags *cf = common_flags(); + SetCommonFlagsDefaults(cf); + // Override some common flags defaults. + cf->allow_addr2line = true; + cf->detect_deadlocks = true; + cf->print_suppressions = false; + cf->stack_trace_format = " #%n %f %S %M"; + + // Let a frontend override. + ParseFlags(f, __tsan_default_options()); + ParseCommonFlagsFromString(cf, __tsan_default_options()); + // Override from command line. + ParseFlags(f, env); + ParseCommonFlagsFromString(cf, env); + + // Sanity check. + if (!f->report_bugs) { + f->report_thread_leaks = false; + f->report_destroy_locked = false; + f->report_signal_unsafe = false; + } + + if (cf->help) PrintFlagDescriptions(); + + if (f->history_size < 0 || f->history_size > 7) { + Printf("ThreadSanitizer: incorrect value for history_size" + " (must be [0..7])\n"); + Die(); + } + + if (f->io_sync < 0 || f->io_sync > 2) { + Printf("ThreadSanitizer: incorrect value for io_sync" + " (must be [0..2])\n"); + Die(); + } +} + +} // namespace __tsan diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_flags.h b/contrib/compiler-rt/lib/tsan/rtl/tsan_flags.h new file mode 100644 index 0000000..621ca13 --- /dev/null +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_flags.h @@ -0,0 +1,88 @@ +//===-- tsan_flags.h --------------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// NOTE: This file may be included into user code. +//===----------------------------------------------------------------------===// + +#ifndef TSAN_FLAGS_H +#define TSAN_FLAGS_H + +#include "sanitizer_common/sanitizer_flags.h" +#include "sanitizer_common/sanitizer_deadlock_detector_interface.h" + +namespace __tsan { + +struct Flags : DDFlags { + // Enable dynamic annotations, otherwise they are no-ops. + bool enable_annotations; + // Suppress a race report if we've already output another race report + // with the same stack. + bool suppress_equal_stacks; + // Suppress a race report if we've already output another race report + // on the same address. + bool suppress_equal_addresses; + // Turns off bug reporting entirely (useful for benchmarking). + bool report_bugs; + // Report thread leaks at exit? + bool report_thread_leaks; + // Report destruction of a locked mutex? + bool report_destroy_locked; + // Report incorrect usages of mutexes and mutex annotations? + bool report_mutex_bugs; + // Report violations of async signal-safety + // (e.g. malloc() call from a signal handler). + bool report_signal_unsafe; + // Report races between atomic and plain memory accesses. + bool report_atomic_races; + // If set, all atomics are effectively sequentially consistent (seq_cst), + // regardless of what user actually specified. + bool force_seq_cst_atomics; + // Print matched "benign" races at exit. + bool print_benign; + // Override exit status if something was reported. + int exitcode; + // Exit after first reported error. + bool halt_on_error; + // Sleep in main thread before exiting for that many ms + // (useful to catch "at exit" races). + int atexit_sleep_ms; + // If set, periodically write memory profile to that file. + const char *profile_memory; + // Flush shadow memory every X ms. + int flush_memory_ms; + // Flush symbolizer caches every X ms. + int flush_symbolizer_ms; + // Resident memory limit in MB to aim at. + // If the process consumes more memory, then TSan will flush shadow memory. + int memory_limit_mb; + // Stops on start until __tsan_resume() is called (for debugging). + bool stop_on_start; + // Controls whether RunningOnValgrind() returns true or false. + bool running_on_valgrind; + // Per-thread history size, controls how many previous memory accesses + // are remembered per thread. Possible values are [0..7]. + // history_size=0 amounts to 32K memory accesses. Each next value doubles + // the amount of memory accesses, up to history_size=7 that amounts to + // 4M memory accesses. The default value is 2 (128K memory accesses). + int history_size; + // Controls level of synchronization implied by IO operations. + // 0 - no synchronization + // 1 - reasonable level of synchronization (write->read) + // 2 - global synchronization of all IO operations + int io_sync; + // Die after multi-threaded fork if the child creates new threads. + bool die_after_fork; +}; + +Flags *flags(); +void InitializeFlags(Flags *flags, const char *env); +} // namespace __tsan + +#endif // TSAN_FLAGS_H diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_ignoreset.cc b/contrib/compiler-rt/lib/tsan/rtl/tsan_ignoreset.cc new file mode 100644 index 0000000..cdb90d2 --- /dev/null +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_ignoreset.cc @@ -0,0 +1,47 @@ +//===-- tsan_ignoreset.cc -------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +//===----------------------------------------------------------------------===// +#include "tsan_ignoreset.h" + +namespace __tsan { + +const uptr IgnoreSet::kMaxSize; + +IgnoreSet::IgnoreSet() + : size_() { +} + +void IgnoreSet::Add(u32 stack_id) { + if (size_ == kMaxSize) + return; + for (uptr i = 0; i < size_; i++) { + if (stacks_[i] == stack_id) + return; + } + stacks_[size_++] = stack_id; +} + +void IgnoreSet::Reset() { + size_ = 0; +} + +uptr IgnoreSet::Size() const { + return size_; +} + +u32 IgnoreSet::At(uptr i) const { + CHECK_LT(i, size_); + CHECK_LE(size_, kMaxSize); + return stacks_[i]; +} + +} // namespace __tsan diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_ignoreset.h b/contrib/compiler-rt/lib/tsan/rtl/tsan_ignoreset.h new file mode 100644 index 0000000..e747d81 --- /dev/null +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_ignoreset.h @@ -0,0 +1,38 @@ +//===-- tsan_ignoreset.h ----------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +// IgnoreSet holds a set of stack traces where ignores were enabled. +//===----------------------------------------------------------------------===// +#ifndef TSAN_IGNORESET_H +#define TSAN_IGNORESET_H + +#include "tsan_defs.h" + +namespace __tsan { + +class IgnoreSet { + public: + static const uptr kMaxSize = 16; + + IgnoreSet(); + void Add(u32 stack_id); + void Reset(); + uptr Size() const; + u32 At(uptr i) const; + + private: + uptr size_; + u32 stacks_[kMaxSize]; +}; + +} // namespace __tsan + +#endif // TSAN_IGNORESET_H diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_interceptors.cc b/contrib/compiler-rt/lib/tsan/rtl/tsan_interceptors.cc new file mode 100644 index 0000000..5bede0e --- /dev/null +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_interceptors.cc @@ -0,0 +1,2587 @@ +//===-- tsan_interceptors.cc ----------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +// FIXME: move as many interceptors as possible into +// sanitizer_common/sanitizer_common_interceptors.inc +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_atomic.h" +#include "sanitizer_common/sanitizer_libc.h" +#include "sanitizer_common/sanitizer_linux.h" +#include "sanitizer_common/sanitizer_platform_limits_posix.h" +#include "sanitizer_common/sanitizer_placement_new.h" +#include "sanitizer_common/sanitizer_stacktrace.h" +#include "interception/interception.h" +#include "tsan_interface.h" +#include "tsan_platform.h" +#include "tsan_suppressions.h" +#include "tsan_rtl.h" +#include "tsan_mman.h" +#include "tsan_fd.h" + +using namespace __tsan; // NOLINT + +#if SANITIZER_FREEBSD +#define __errno_location __error +#define __libc_malloc __malloc +#define __libc_realloc __realloc +#define __libc_calloc __calloc +#define __libc_free __free +#define stdout __stdoutp +#define stderr __stderrp +#endif + +const int kSigCount = 65; + +struct my_siginfo_t { + // The size is determined by looking at sizeof of real siginfo_t on linux. + u64 opaque[128 / sizeof(u64)]; +}; + +struct ucontext_t { + // The size is determined by looking at sizeof of real ucontext_t on linux. + u64 opaque[936 / sizeof(u64) + 1]; +}; + +extern "C" int pthread_attr_init(void *attr); +extern "C" int pthread_attr_destroy(void *attr); +DECLARE_REAL(int, pthread_attr_getdetachstate, void *, void *) +extern "C" int pthread_attr_setstacksize(void *attr, uptr stacksize); +extern "C" int pthread_key_create(unsigned *key, void (*destructor)(void* v)); +extern "C" int pthread_setspecific(unsigned key, const void *v); +DECLARE_REAL(int, pthread_mutexattr_gettype, void *, void *) +extern "C" int pthread_yield(); +extern "C" int pthread_sigmask(int how, const __sanitizer_sigset_t *set, + __sanitizer_sigset_t *oldset); +// REAL(sigfillset) defined in common interceptors. +DECLARE_REAL(int, sigfillset, __sanitizer_sigset_t *set) +DECLARE_REAL(int, fflush, __sanitizer_FILE *fp) +extern "C" void *pthread_self(); +extern "C" void _exit(int status); +extern "C" int *__errno_location(); +extern "C" int fileno_unlocked(void *stream); +extern "C" void *__libc_malloc(uptr size); +extern "C" void *__libc_calloc(uptr size, uptr n); +extern "C" void *__libc_realloc(void *ptr, uptr size); +extern "C" void __libc_free(void *ptr); +#if !SANITIZER_FREEBSD +extern "C" int mallopt(int param, int value); +#endif +extern __sanitizer_FILE *stdout, *stderr; +const int PTHREAD_MUTEX_RECURSIVE = 1; +const int PTHREAD_MUTEX_RECURSIVE_NP = 1; +const int EINVAL = 22; +const int EBUSY = 16; +const int EOWNERDEAD = 130; +const int EPOLL_CTL_ADD = 1; +const int SIGILL = 4; +const int SIGABRT = 6; +const int SIGFPE = 8; +const int SIGSEGV = 11; +const int SIGPIPE = 13; +const int SIGTERM = 15; +const int SIGBUS = 7; +const int SIGSYS = 31; +void *const MAP_FAILED = (void*)-1; +const int PTHREAD_BARRIER_SERIAL_THREAD = -1; +const int MAP_FIXED = 0x10; +typedef long long_t; // NOLINT + +// From /usr/include/unistd.h +# define F_ULOCK 0 /* Unlock a previously locked region. */ +# define F_LOCK 1 /* Lock a region for exclusive use. */ +# define F_TLOCK 2 /* Test and lock a region for exclusive use. */ +# define F_TEST 3 /* Test a region for other processes locks. */ + +typedef void (*sighandler_t)(int sig); + +#define errno (*__errno_location()) + +struct sigaction_t { + union { + sighandler_t sa_handler; + void (*sa_sigaction)(int sig, my_siginfo_t *siginfo, void *uctx); + }; +#if SANITIZER_FREEBSD + int sa_flags; + __sanitizer_sigset_t sa_mask; +#else + __sanitizer_sigset_t sa_mask; + int sa_flags; + void (*sa_restorer)(); +#endif +}; + +const sighandler_t SIG_DFL = (sighandler_t)0; +const sighandler_t SIG_IGN = (sighandler_t)1; +const sighandler_t SIG_ERR = (sighandler_t)-1; +const int SA_SIGINFO = 4; +const int SIG_SETMASK = 2; + +namespace std { +struct nothrow_t {}; +} // namespace std + +static sigaction_t sigactions[kSigCount]; + +namespace __tsan { +struct SignalDesc { + bool armed; + bool sigaction; + my_siginfo_t siginfo; + ucontext_t ctx; +}; + +struct SignalContext { + int int_signal_send; + atomic_uintptr_t in_blocking_func; + atomic_uintptr_t have_pending_signals; + SignalDesc pending_signals[kSigCount]; +}; + +// The object is 64-byte aligned, because we want hot data to be located in +// a single cache line if possible (it's accessed in every interceptor). +static ALIGNED(64) char libignore_placeholder[sizeof(LibIgnore)]; +static LibIgnore *libignore() { + return reinterpret_cast<LibIgnore*>(&libignore_placeholder[0]); +} + +void InitializeLibIgnore() { + libignore()->Init(*SuppressionContext::Get()); + libignore()->OnLibraryLoaded(0); +} + +} // namespace __tsan + +static SignalContext *SigCtx(ThreadState *thr) { + SignalContext *ctx = (SignalContext*)thr->signal_ctx; + if (ctx == 0 && !thr->is_dead) { + ctx = (SignalContext*)MmapOrDie(sizeof(*ctx), "SignalContext"); + MemoryResetRange(thr, (uptr)&SigCtx, (uptr)ctx, sizeof(*ctx)); + thr->signal_ctx = ctx; + } + return ctx; +} + +static unsigned g_thread_finalize_key; + +class ScopedInterceptor { + public: + ScopedInterceptor(ThreadState *thr, const char *fname, uptr pc); + ~ScopedInterceptor(); + private: + ThreadState *const thr_; + const uptr pc_; + bool in_ignored_lib_; +}; + +ScopedInterceptor::ScopedInterceptor(ThreadState *thr, const char *fname, + uptr pc) + : thr_(thr) + , pc_(pc) + , in_ignored_lib_(false) { + if (!thr_->ignore_interceptors) { + Initialize(thr); + FuncEntry(thr, pc); + } + DPrintf("#%d: intercept %s()\n", thr_->tid, fname); + if (!thr_->in_ignored_lib && libignore()->IsIgnored(pc)) { + in_ignored_lib_ = true; + thr_->in_ignored_lib = true; + ThreadIgnoreBegin(thr_, pc_); + } +} + +ScopedInterceptor::~ScopedInterceptor() { + if (in_ignored_lib_) { + thr_->in_ignored_lib = false; + ThreadIgnoreEnd(thr_, pc_); + } + if (!thr_->ignore_interceptors) { + ProcessPendingSignals(thr_); + FuncExit(thr_); + CheckNoLocks(thr_); + } +} + +#define SCOPED_INTERCEPTOR_RAW(func, ...) \ + ThreadState *thr = cur_thread(); \ + const uptr caller_pc = GET_CALLER_PC(); \ + ScopedInterceptor si(thr, #func, caller_pc); \ + const uptr pc = StackTrace::GetCurrentPc(); \ + (void)pc; \ +/**/ + +#define SCOPED_TSAN_INTERCEPTOR(func, ...) \ + SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__); \ + if (REAL(func) == 0) { \ + Report("FATAL: ThreadSanitizer: failed to intercept %s\n", #func); \ + Die(); \ + } \ + if (thr->ignore_interceptors || thr->in_ignored_lib) \ + return REAL(func)(__VA_ARGS__); \ +/**/ + +#define TSAN_INTERCEPTOR(ret, func, ...) INTERCEPTOR(ret, func, __VA_ARGS__) +#define TSAN_INTERCEPT(func) INTERCEPT_FUNCTION(func) +#if SANITIZER_FREEBSD +# define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION(func) +#else +# define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION_VER(func, ver) +#endif + +#define BLOCK_REAL(name) (BlockingCall(thr), REAL(name)) + +struct BlockingCall { + explicit BlockingCall(ThreadState *thr) + : thr(thr) + , ctx(SigCtx(thr)) { + for (;;) { + atomic_store(&ctx->in_blocking_func, 1, memory_order_relaxed); + if (atomic_load(&ctx->have_pending_signals, memory_order_relaxed) == 0) + break; + atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed); + ProcessPendingSignals(thr); + } + // When we are in a "blocking call", we process signals asynchronously + // (right when they arrive). In this context we do not expect to be + // executing any user/runtime code. The known interceptor sequence when + // this is not true is: pthread_join -> munmap(stack). It's fine + // to ignore munmap in this case -- we handle stack shadow separately. + thr->ignore_interceptors++; + } + + ~BlockingCall() { + thr->ignore_interceptors--; + atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed); + } + + ThreadState *thr; + SignalContext *ctx; +}; + +TSAN_INTERCEPTOR(unsigned, sleep, unsigned sec) { + SCOPED_TSAN_INTERCEPTOR(sleep, sec); + unsigned res = BLOCK_REAL(sleep)(sec); + AfterSleep(thr, pc); + return res; +} + +TSAN_INTERCEPTOR(int, usleep, long_t usec) { + SCOPED_TSAN_INTERCEPTOR(usleep, usec); + int res = BLOCK_REAL(usleep)(usec); + AfterSleep(thr, pc); + return res; +} + +TSAN_INTERCEPTOR(int, nanosleep, void *req, void *rem) { + SCOPED_TSAN_INTERCEPTOR(nanosleep, req, rem); + int res = BLOCK_REAL(nanosleep)(req, rem); + AfterSleep(thr, pc); + return res; +} + +// The sole reason tsan wraps atexit callbacks is to establish synchronization +// between callback setup and callback execution. +struct AtExitCtx { + void (*f)(); + void *arg; +}; + +static void at_exit_wrapper(void *arg) { + ThreadState *thr = cur_thread(); + uptr pc = 0; + Acquire(thr, pc, (uptr)arg); + AtExitCtx *ctx = (AtExitCtx*)arg; + ((void(*)(void *arg))ctx->f)(ctx->arg); + __libc_free(ctx); +} + +static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(), + void *arg, void *dso); + +TSAN_INTERCEPTOR(int, atexit, void (*f)()) { + if (cur_thread()->in_symbolizer) + return 0; + // We want to setup the atexit callback even if we are in ignored lib + // or after fork. + SCOPED_INTERCEPTOR_RAW(atexit, f); + return setup_at_exit_wrapper(thr, pc, (void(*)())f, 0, 0); +} + +TSAN_INTERCEPTOR(int, __cxa_atexit, void (*f)(void *a), void *arg, void *dso) { + if (cur_thread()->in_symbolizer) + return 0; + SCOPED_TSAN_INTERCEPTOR(__cxa_atexit, f, arg, dso); + return setup_at_exit_wrapper(thr, pc, (void(*)())f, arg, dso); +} + +static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(), + void *arg, void *dso) { + AtExitCtx *ctx = (AtExitCtx*)__libc_malloc(sizeof(AtExitCtx)); + ctx->f = f; + ctx->arg = arg; + Release(thr, pc, (uptr)ctx); + // Memory allocation in __cxa_atexit will race with free during exit, + // because we do not see synchronization around atexit callback list. + ThreadIgnoreBegin(thr, pc); + int res = REAL(__cxa_atexit)(at_exit_wrapper, ctx, dso); + ThreadIgnoreEnd(thr, pc); + return res; +} + +static void on_exit_wrapper(int status, void *arg) { + ThreadState *thr = cur_thread(); + uptr pc = 0; + Acquire(thr, pc, (uptr)arg); + AtExitCtx *ctx = (AtExitCtx*)arg; + ((void(*)(int status, void *arg))ctx->f)(status, ctx->arg); + __libc_free(ctx); +} + +TSAN_INTERCEPTOR(int, on_exit, void(*f)(int, void*), void *arg) { + if (cur_thread()->in_symbolizer) + return 0; + SCOPED_TSAN_INTERCEPTOR(on_exit, f, arg); + AtExitCtx *ctx = (AtExitCtx*)__libc_malloc(sizeof(AtExitCtx)); + ctx->f = (void(*)())f; + ctx->arg = arg; + Release(thr, pc, (uptr)ctx); + // Memory allocation in __cxa_atexit will race with free during exit, + // because we do not see synchronization around atexit callback list. + ThreadIgnoreBegin(thr, pc); + int res = REAL(on_exit)(on_exit_wrapper, ctx); + ThreadIgnoreEnd(thr, pc); + return res; +} + +// Cleanup old bufs. +static void JmpBufGarbageCollect(ThreadState *thr, uptr sp) { + for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) { + JmpBuf *buf = &thr->jmp_bufs[i]; + if (buf->sp <= sp) { + uptr sz = thr->jmp_bufs.Size(); + thr->jmp_bufs[i] = thr->jmp_bufs[sz - 1]; + thr->jmp_bufs.PopBack(); + i--; + } + } +} + +static void SetJmp(ThreadState *thr, uptr sp, uptr mangled_sp) { + if (thr->shadow_stack_pos == 0) // called from libc guts during bootstrap + return; + // Cleanup old bufs. + JmpBufGarbageCollect(thr, sp); + // Remember the buf. + JmpBuf *buf = thr->jmp_bufs.PushBack(); + buf->sp = sp; + buf->mangled_sp = mangled_sp; + buf->shadow_stack_pos = thr->shadow_stack_pos; + SignalContext *sctx = SigCtx(thr); + buf->int_signal_send = sctx ? sctx->int_signal_send : 0; + buf->in_blocking_func = sctx ? + atomic_load(&sctx->in_blocking_func, memory_order_relaxed) : + false; + buf->in_signal_handler = atomic_load(&thr->in_signal_handler, + memory_order_relaxed); +} + +static void LongJmp(ThreadState *thr, uptr *env) { +#if SANITIZER_FREEBSD + uptr mangled_sp = env[2]; +#else + uptr mangled_sp = env[6]; +#endif // SANITIZER_FREEBSD + // Find the saved buf by mangled_sp. + for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) { + JmpBuf *buf = &thr->jmp_bufs[i]; + if (buf->mangled_sp == mangled_sp) { + CHECK_GE(thr->shadow_stack_pos, buf->shadow_stack_pos); + // Unwind the stack. + while (thr->shadow_stack_pos > buf->shadow_stack_pos) + FuncExit(thr); + SignalContext *sctx = SigCtx(thr); + if (sctx) { + sctx->int_signal_send = buf->int_signal_send; + atomic_store(&sctx->in_blocking_func, buf->in_blocking_func, + memory_order_relaxed); + } + atomic_store(&thr->in_signal_handler, buf->in_signal_handler, + memory_order_relaxed); + JmpBufGarbageCollect(thr, buf->sp - 1); // do not collect buf->sp + return; + } + } + Printf("ThreadSanitizer: can't find longjmp buf\n"); + CHECK(0); +} + +// FIXME: put everything below into a common extern "C" block? +extern "C" void __tsan_setjmp(uptr sp, uptr mangled_sp) { + SetJmp(cur_thread(), sp, mangled_sp); +} + +// Not called. Merely to satisfy TSAN_INTERCEPT(). +extern "C" SANITIZER_INTERFACE_ATTRIBUTE +int __interceptor_setjmp(void *env); +extern "C" int __interceptor_setjmp(void *env) { + CHECK(0); + return 0; +} + +// FIXME: any reason to have a separate declaration? +extern "C" SANITIZER_INTERFACE_ATTRIBUTE +int __interceptor__setjmp(void *env); +extern "C" int __interceptor__setjmp(void *env) { + CHECK(0); + return 0; +} + +extern "C" SANITIZER_INTERFACE_ATTRIBUTE +int __interceptor_sigsetjmp(void *env); +extern "C" int __interceptor_sigsetjmp(void *env) { + CHECK(0); + return 0; +} + +extern "C" SANITIZER_INTERFACE_ATTRIBUTE +int __interceptor___sigsetjmp(void *env); +extern "C" int __interceptor___sigsetjmp(void *env) { + CHECK(0); + return 0; +} + +extern "C" int setjmp(void *env); +extern "C" int _setjmp(void *env); +extern "C" int sigsetjmp(void *env); +extern "C" int __sigsetjmp(void *env); +DEFINE_REAL(int, setjmp, void *env) +DEFINE_REAL(int, _setjmp, void *env) +DEFINE_REAL(int, sigsetjmp, void *env) +DEFINE_REAL(int, __sigsetjmp, void *env) + +TSAN_INTERCEPTOR(void, longjmp, uptr *env, int val) { + { + SCOPED_TSAN_INTERCEPTOR(longjmp, env, val); + } + LongJmp(cur_thread(), env); + REAL(longjmp)(env, val); +} + +TSAN_INTERCEPTOR(void, siglongjmp, uptr *env, int val) { + { + SCOPED_TSAN_INTERCEPTOR(siglongjmp, env, val); + } + LongJmp(cur_thread(), env); + REAL(siglongjmp)(env, val); +} + +TSAN_INTERCEPTOR(void*, malloc, uptr size) { + if (cur_thread()->in_symbolizer) + return __libc_malloc(size); + void *p = 0; + { + SCOPED_INTERCEPTOR_RAW(malloc, size); + p = user_alloc(thr, pc, size); + } + invoke_malloc_hook(p, size); + return p; +} + +TSAN_INTERCEPTOR(void*, __libc_memalign, uptr align, uptr sz) { + SCOPED_TSAN_INTERCEPTOR(__libc_memalign, align, sz); + return user_alloc(thr, pc, sz, align); +} + +TSAN_INTERCEPTOR(void*, calloc, uptr size, uptr n) { + if (cur_thread()->in_symbolizer) + return __libc_calloc(size, n); + if (__sanitizer::CallocShouldReturnNullDueToOverflow(size, n)) + return AllocatorReturnNull(); + void *p = 0; + { + SCOPED_INTERCEPTOR_RAW(calloc, size, n); + p = user_alloc(thr, pc, n * size); + if (p) + internal_memset(p, 0, n * size); + } + invoke_malloc_hook(p, n * size); + return p; +} + +TSAN_INTERCEPTOR(void*, realloc, void *p, uptr size) { + if (cur_thread()->in_symbolizer) + return __libc_realloc(p, size); + if (p) + invoke_free_hook(p); + { + SCOPED_INTERCEPTOR_RAW(realloc, p, size); + p = user_realloc(thr, pc, p, size); + } + invoke_malloc_hook(p, size); + return p; +} + +TSAN_INTERCEPTOR(void, free, void *p) { + if (p == 0) + return; + if (cur_thread()->in_symbolizer) + return __libc_free(p); + invoke_free_hook(p); + SCOPED_INTERCEPTOR_RAW(free, p); + user_free(thr, pc, p); +} + +TSAN_INTERCEPTOR(void, cfree, void *p) { + if (p == 0) + return; + if (cur_thread()->in_symbolizer) + return __libc_free(p); + invoke_free_hook(p); + SCOPED_INTERCEPTOR_RAW(cfree, p); + user_free(thr, pc, p); +} + +TSAN_INTERCEPTOR(uptr, malloc_usable_size, void *p) { + SCOPED_INTERCEPTOR_RAW(malloc_usable_size, p); + return user_alloc_usable_size(p); +} + +#define OPERATOR_NEW_BODY(mangled_name) \ + if (cur_thread()->in_symbolizer) \ + return __libc_malloc(size); \ + void *p = 0; \ + { \ + SCOPED_INTERCEPTOR_RAW(mangled_name, size); \ + p = user_alloc(thr, pc, size); \ + } \ + invoke_malloc_hook(p, size); \ + return p; + +SANITIZER_INTERFACE_ATTRIBUTE +void *operator new(__sanitizer::uptr size); +void *operator new(__sanitizer::uptr size) { + OPERATOR_NEW_BODY(_Znwm); +} + +SANITIZER_INTERFACE_ATTRIBUTE +void *operator new[](__sanitizer::uptr size); +void *operator new[](__sanitizer::uptr size) { + OPERATOR_NEW_BODY(_Znam); +} + +SANITIZER_INTERFACE_ATTRIBUTE +void *operator new(__sanitizer::uptr size, std::nothrow_t const&); +void *operator new(__sanitizer::uptr size, std::nothrow_t const&) { + OPERATOR_NEW_BODY(_ZnwmRKSt9nothrow_t); +} + +SANITIZER_INTERFACE_ATTRIBUTE +void *operator new[](__sanitizer::uptr size, std::nothrow_t const&); +void *operator new[](__sanitizer::uptr size, std::nothrow_t const&) { + OPERATOR_NEW_BODY(_ZnamRKSt9nothrow_t); +} + +#define OPERATOR_DELETE_BODY(mangled_name) \ + if (ptr == 0) return; \ + if (cur_thread()->in_symbolizer) \ + return __libc_free(ptr); \ + invoke_free_hook(ptr); \ + SCOPED_INTERCEPTOR_RAW(mangled_name, ptr); \ + user_free(thr, pc, ptr); + +SANITIZER_INTERFACE_ATTRIBUTE +void operator delete(void *ptr) throw(); +void operator delete(void *ptr) throw() { + OPERATOR_DELETE_BODY(_ZdlPv); +} + +SANITIZER_INTERFACE_ATTRIBUTE +void operator delete[](void *ptr) throw(); +void operator delete[](void *ptr) throw() { + OPERATOR_DELETE_BODY(_ZdaPv); +} + +SANITIZER_INTERFACE_ATTRIBUTE +void operator delete(void *ptr, std::nothrow_t const&); +void operator delete(void *ptr, std::nothrow_t const&) { + OPERATOR_DELETE_BODY(_ZdlPvRKSt9nothrow_t); +} + +SANITIZER_INTERFACE_ATTRIBUTE +void operator delete[](void *ptr, std::nothrow_t const&); +void operator delete[](void *ptr, std::nothrow_t const&) { + OPERATOR_DELETE_BODY(_ZdaPvRKSt9nothrow_t); +} + +TSAN_INTERCEPTOR(uptr, strlen, const char *s) { + SCOPED_TSAN_INTERCEPTOR(strlen, s); + uptr len = internal_strlen(s); + MemoryAccessRange(thr, pc, (uptr)s, len + 1, false); + return len; +} + +TSAN_INTERCEPTOR(void*, memset, void *dst, int v, uptr size) { + SCOPED_TSAN_INTERCEPTOR(memset, dst, v, size); + MemoryAccessRange(thr, pc, (uptr)dst, size, true); + return internal_memset(dst, v, size); +} + +TSAN_INTERCEPTOR(void*, memcpy, void *dst, const void *src, uptr size) { + SCOPED_TSAN_INTERCEPTOR(memcpy, dst, src, size); + MemoryAccessRange(thr, pc, (uptr)dst, size, true); + MemoryAccessRange(thr, pc, (uptr)src, size, false); + return internal_memcpy(dst, src, size); +} + +TSAN_INTERCEPTOR(int, memcmp, const void *s1, const void *s2, uptr n) { + SCOPED_TSAN_INTERCEPTOR(memcmp, s1, s2, n); + int res = 0; + uptr len = 0; + for (; len < n; len++) { + if ((res = ((const unsigned char *)s1)[len] - + ((const unsigned char *)s2)[len])) + break; + } + MemoryAccessRange(thr, pc, (uptr)s1, len < n ? len + 1 : n, false); + MemoryAccessRange(thr, pc, (uptr)s2, len < n ? len + 1 : n, false); + return res; +} + +TSAN_INTERCEPTOR(void*, memmove, void *dst, void *src, uptr n) { + SCOPED_TSAN_INTERCEPTOR(memmove, dst, src, n); + MemoryAccessRange(thr, pc, (uptr)dst, n, true); + MemoryAccessRange(thr, pc, (uptr)src, n, false); + return REAL(memmove)(dst, src, n); +} + +TSAN_INTERCEPTOR(char*, strchr, char *s, int c) { + SCOPED_TSAN_INTERCEPTOR(strchr, s, c); + char *res = REAL(strchr)(s, c); + uptr len = res ? (char*)res - (char*)s + 1 : internal_strlen(s) + 1; + MemoryAccessRange(thr, pc, (uptr)s, len, false); + return res; +} + +TSAN_INTERCEPTOR(char*, strchrnul, char *s, int c) { + SCOPED_TSAN_INTERCEPTOR(strchrnul, s, c); + char *res = REAL(strchrnul)(s, c); + uptr len = (char*)res - (char*)s + 1; + MemoryAccessRange(thr, pc, (uptr)s, len, false); + return res; +} + +TSAN_INTERCEPTOR(char*, strrchr, char *s, int c) { + SCOPED_TSAN_INTERCEPTOR(strrchr, s, c); + MemoryAccessRange(thr, pc, (uptr)s, internal_strlen(s) + 1, false); + return REAL(strrchr)(s, c); +} + +TSAN_INTERCEPTOR(char*, strcpy, char *dst, const char *src) { // NOLINT + SCOPED_TSAN_INTERCEPTOR(strcpy, dst, src); // NOLINT + uptr srclen = internal_strlen(src); + MemoryAccessRange(thr, pc, (uptr)dst, srclen + 1, true); + MemoryAccessRange(thr, pc, (uptr)src, srclen + 1, false); + return REAL(strcpy)(dst, src); // NOLINT +} + +TSAN_INTERCEPTOR(char*, strncpy, char *dst, char *src, uptr n) { + SCOPED_TSAN_INTERCEPTOR(strncpy, dst, src, n); + uptr srclen = internal_strnlen(src, n); + MemoryAccessRange(thr, pc, (uptr)dst, n, true); + MemoryAccessRange(thr, pc, (uptr)src, min(srclen + 1, n), false); + return REAL(strncpy)(dst, src, n); +} + +TSAN_INTERCEPTOR(const char*, strstr, const char *s1, const char *s2) { + SCOPED_TSAN_INTERCEPTOR(strstr, s1, s2); + const char *res = REAL(strstr)(s1, s2); + uptr len1 = internal_strlen(s1); + uptr len2 = internal_strlen(s2); + MemoryAccessRange(thr, pc, (uptr)s1, len1 + 1, false); + MemoryAccessRange(thr, pc, (uptr)s2, len2 + 1, false); + return res; +} + +TSAN_INTERCEPTOR(char*, strdup, const char *str) { + SCOPED_TSAN_INTERCEPTOR(strdup, str); + // strdup will call malloc, so no instrumentation is required here. + return REAL(strdup)(str); +} + +static bool fix_mmap_addr(void **addr, long_t sz, int flags) { + if (*addr) { + if (!IsAppMem((uptr)*addr) || !IsAppMem((uptr)*addr + sz - 1)) { + if (flags & MAP_FIXED) { + errno = EINVAL; + return false; + } else { + *addr = 0; + } + } + } + return true; +} + +TSAN_INTERCEPTOR(void*, mmap, void *addr, long_t sz, int prot, + int flags, int fd, unsigned off) { + SCOPED_TSAN_INTERCEPTOR(mmap, addr, sz, prot, flags, fd, off); + if (!fix_mmap_addr(&addr, sz, flags)) + return MAP_FAILED; + void *res = REAL(mmap)(addr, sz, prot, flags, fd, off); + if (res != MAP_FAILED) { + if (fd > 0) + FdAccess(thr, pc, fd); + MemoryRangeImitateWrite(thr, pc, (uptr)res, sz); + } + return res; +} + +#if !SANITIZER_FREEBSD +TSAN_INTERCEPTOR(void*, mmap64, void *addr, long_t sz, int prot, + int flags, int fd, u64 off) { + SCOPED_TSAN_INTERCEPTOR(mmap64, addr, sz, prot, flags, fd, off); + if (!fix_mmap_addr(&addr, sz, flags)) + return MAP_FAILED; + void *res = REAL(mmap64)(addr, sz, prot, flags, fd, off); + if (res != MAP_FAILED) { + if (fd > 0) + FdAccess(thr, pc, fd); + MemoryRangeImitateWrite(thr, pc, (uptr)res, sz); + } + return res; +} +#define TSAN_MAYBE_INTERCEPT_MMAP64 TSAN_INTERCEPT(mmap64) +#else +#define TSAN_MAYBE_INTERCEPT_MMAP64 +#endif + +TSAN_INTERCEPTOR(int, munmap, void *addr, long_t sz) { + SCOPED_TSAN_INTERCEPTOR(munmap, addr, sz); + DontNeedShadowFor((uptr)addr, sz); + int res = REAL(munmap)(addr, sz); + return res; +} + +#if !SANITIZER_FREEBSD +TSAN_INTERCEPTOR(void*, memalign, uptr align, uptr sz) { + SCOPED_INTERCEPTOR_RAW(memalign, align, sz); + return user_alloc(thr, pc, sz, align); +} +#define TSAN_MAYBE_INTERCEPT_MEMALIGN TSAN_INTERCEPT(memalign) +#else +#define TSAN_MAYBE_INTERCEPT_MEMALIGN +#endif + +TSAN_INTERCEPTOR(void*, aligned_alloc, uptr align, uptr sz) { + SCOPED_INTERCEPTOR_RAW(memalign, align, sz); + return user_alloc(thr, pc, sz, align); +} + +TSAN_INTERCEPTOR(void*, valloc, uptr sz) { + SCOPED_INTERCEPTOR_RAW(valloc, sz); + return user_alloc(thr, pc, sz, GetPageSizeCached()); +} + +#if !SANITIZER_FREEBSD +TSAN_INTERCEPTOR(void*, pvalloc, uptr sz) { + SCOPED_INTERCEPTOR_RAW(pvalloc, sz); + sz = RoundUp(sz, GetPageSizeCached()); + return user_alloc(thr, pc, sz, GetPageSizeCached()); +} +#define TSAN_MAYBE_INTERCEPT_PVALLOC TSAN_INTERCEPT(pvalloc) +#else +#define TSAN_MAYBE_INTERCEPT_PVALLOC +#endif + +TSAN_INTERCEPTOR(int, posix_memalign, void **memptr, uptr align, uptr sz) { + SCOPED_INTERCEPTOR_RAW(posix_memalign, memptr, align, sz); + *memptr = user_alloc(thr, pc, sz, align); + return 0; +} + +// Used in thread-safe function static initialization. +extern "C" int INTERFACE_ATTRIBUTE __cxa_guard_acquire(atomic_uint32_t *g) { + SCOPED_INTERCEPTOR_RAW(__cxa_guard_acquire, g); + for (;;) { + u32 cmp = atomic_load(g, memory_order_acquire); + if (cmp == 0) { + if (atomic_compare_exchange_strong(g, &cmp, 1<<16, memory_order_relaxed)) + return 1; + } else if (cmp == 1) { + Acquire(thr, pc, (uptr)g); + return 0; + } else { + internal_sched_yield(); + } + } +} + +extern "C" void INTERFACE_ATTRIBUTE __cxa_guard_release(atomic_uint32_t *g) { + SCOPED_INTERCEPTOR_RAW(__cxa_guard_release, g); + Release(thr, pc, (uptr)g); + atomic_store(g, 1, memory_order_release); +} + +extern "C" void INTERFACE_ATTRIBUTE __cxa_guard_abort(atomic_uint32_t *g) { + SCOPED_INTERCEPTOR_RAW(__cxa_guard_abort, g); + atomic_store(g, 0, memory_order_relaxed); +} + +static void thread_finalize(void *v) { + uptr iter = (uptr)v; + if (iter > 1) { + if (pthread_setspecific(g_thread_finalize_key, (void*)(iter - 1))) { + Printf("ThreadSanitizer: failed to set thread key\n"); + Die(); + } + return; + } + { + ThreadState *thr = cur_thread(); + ThreadFinish(thr); + SignalContext *sctx = thr->signal_ctx; + if (sctx) { + thr->signal_ctx = 0; + UnmapOrDie(sctx, sizeof(*sctx)); + } + } +} + + +struct ThreadParam { + void* (*callback)(void *arg); + void *param; + atomic_uintptr_t tid; +}; + +extern "C" void *__tsan_thread_start_func(void *arg) { + ThreadParam *p = (ThreadParam*)arg; + void* (*callback)(void *arg) = p->callback; + void *param = p->param; + int tid = 0; + { + ThreadState *thr = cur_thread(); + // Thread-local state is not initialized yet. + ScopedIgnoreInterceptors ignore; + ThreadIgnoreBegin(thr, 0); + if (pthread_setspecific(g_thread_finalize_key, + (void *)kPthreadDestructorIterations)) { + Printf("ThreadSanitizer: failed to set thread key\n"); + Die(); + } + ThreadIgnoreEnd(thr, 0); + while ((tid = atomic_load(&p->tid, memory_order_acquire)) == 0) + pthread_yield(); + atomic_store(&p->tid, 0, memory_order_release); + ThreadStart(thr, tid, GetTid()); + } + void *res = callback(param); + // Prevent the callback from being tail called, + // it mixes up stack traces. + volatile int foo = 42; + foo++; + return res; +} + +TSAN_INTERCEPTOR(int, pthread_create, + void *th, void *attr, void *(*callback)(void*), void * param) { + SCOPED_INTERCEPTOR_RAW(pthread_create, th, attr, callback, param); + if (ctx->after_multithreaded_fork) { + if (flags()->die_after_fork) { + Report("ThreadSanitizer: starting new threads after multi-threaded " + "fork is not supported. Dying (set die_after_fork=0 to override)\n"); + Die(); + } else { + VPrintf(1, "ThreadSanitizer: starting new threads after multi-threaded " + "fork is not supported (pid %d). Continuing because of " + "die_after_fork=0, but you are on your own\n", internal_getpid()); + } + } + __sanitizer_pthread_attr_t myattr; + if (attr == 0) { + pthread_attr_init(&myattr); + attr = &myattr; + } + int detached = 0; + REAL(pthread_attr_getdetachstate)(attr, &detached); + AdjustStackSize(attr); + + ThreadParam p; + p.callback = callback; + p.param = param; + atomic_store(&p.tid, 0, memory_order_relaxed); + int res = -1; + { + // Otherwise we see false positives in pthread stack manipulation. + ScopedIgnoreInterceptors ignore; + ThreadIgnoreBegin(thr, pc); + res = REAL(pthread_create)(th, attr, __tsan_thread_start_func, &p); + ThreadIgnoreEnd(thr, pc); + } + if (res == 0) { + int tid = ThreadCreate(thr, pc, *(uptr*)th, detached); + CHECK_NE(tid, 0); + atomic_store(&p.tid, tid, memory_order_release); + while (atomic_load(&p.tid, memory_order_acquire) != 0) + pthread_yield(); + } + if (attr == &myattr) + pthread_attr_destroy(&myattr); + return res; +} + +TSAN_INTERCEPTOR(int, pthread_join, void *th, void **ret) { + SCOPED_INTERCEPTOR_RAW(pthread_join, th, ret); + int tid = ThreadTid(thr, pc, (uptr)th); + ThreadIgnoreBegin(thr, pc); + int res = BLOCK_REAL(pthread_join)(th, ret); + ThreadIgnoreEnd(thr, pc); + if (res == 0) { + ThreadJoin(thr, pc, tid); + } + return res; +} + +TSAN_INTERCEPTOR(int, pthread_detach, void *th) { + SCOPED_TSAN_INTERCEPTOR(pthread_detach, th); + int tid = ThreadTid(thr, pc, (uptr)th); + int res = REAL(pthread_detach)(th); + if (res == 0) { + ThreadDetach(thr, pc, tid); + } + return res; +} + +// Problem: +// NPTL implementation of pthread_cond has 2 versions (2.2.5 and 2.3.2). +// pthread_cond_t has different size in the different versions. +// If call new REAL functions for old pthread_cond_t, they will corrupt memory +// after pthread_cond_t (old cond is smaller). +// If we call old REAL functions for new pthread_cond_t, we will lose some +// functionality (e.g. old functions do not support waiting against +// CLOCK_REALTIME). +// Proper handling would require to have 2 versions of interceptors as well. +// But this is messy, in particular requires linker scripts when sanitizer +// runtime is linked into a shared library. +// Instead we assume we don't have dynamic libraries built against old +// pthread (2.2.5 is dated by 2002). And provide legacy_pthread_cond flag +// that allows to work with old libraries (but this mode does not support +// some features, e.g. pthread_condattr_getpshared). +static void *init_cond(void *c, bool force = false) { + // sizeof(pthread_cond_t) >= sizeof(uptr) in both versions. + // So we allocate additional memory on the side large enough to hold + // any pthread_cond_t object. Always call new REAL functions, but pass + // the aux object to them. + // Note: the code assumes that PTHREAD_COND_INITIALIZER initializes + // first word of pthread_cond_t to zero. + // It's all relevant only for linux. + if (!common_flags()->legacy_pthread_cond) + return c; + atomic_uintptr_t *p = (atomic_uintptr_t*)c; + uptr cond = atomic_load(p, memory_order_acquire); + if (!force && cond != 0) + return (void*)cond; + void *newcond = WRAP(malloc)(pthread_cond_t_sz); + internal_memset(newcond, 0, pthread_cond_t_sz); + if (atomic_compare_exchange_strong(p, &cond, (uptr)newcond, + memory_order_acq_rel)) + return newcond; + WRAP(free)(newcond); + return (void*)cond; +} + +struct CondMutexUnlockCtx { + ThreadState *thr; + uptr pc; + void *m; +}; + +static void cond_mutex_unlock(CondMutexUnlockCtx *arg) { + MutexLock(arg->thr, arg->pc, (uptr)arg->m); +} + +INTERCEPTOR(int, pthread_cond_init, void *c, void *a) { + void *cond = init_cond(c, true); + SCOPED_TSAN_INTERCEPTOR(pthread_cond_init, cond, a); + MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), true); + return REAL(pthread_cond_init)(cond, a); +} + +INTERCEPTOR(int, pthread_cond_wait, void *c, void *m) { + void *cond = init_cond(c); + SCOPED_TSAN_INTERCEPTOR(pthread_cond_wait, cond, m); + MutexUnlock(thr, pc, (uptr)m); + MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false); + CondMutexUnlockCtx arg = {thr, pc, m}; + // This ensures that we handle mutex lock even in case of pthread_cancel. + // See test/tsan/cond_cancel.cc. + int res = call_pthread_cancel_with_cleanup( + (int(*)(void *c, void *m, void *abstime))REAL(pthread_cond_wait), + cond, m, 0, (void(*)(void *arg))cond_mutex_unlock, &arg); + if (res == errno_EOWNERDEAD) + MutexRepair(thr, pc, (uptr)m); + MutexLock(thr, pc, (uptr)m); + return res; +} + +INTERCEPTOR(int, pthread_cond_timedwait, void *c, void *m, void *abstime) { + void *cond = init_cond(c); + SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait, cond, m, abstime); + MutexUnlock(thr, pc, (uptr)m); + MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false); + CondMutexUnlockCtx arg = {thr, pc, m}; + // This ensures that we handle mutex lock even in case of pthread_cancel. + // See test/tsan/cond_cancel.cc. + int res = call_pthread_cancel_with_cleanup( + REAL(pthread_cond_timedwait), cond, m, abstime, + (void(*)(void *arg))cond_mutex_unlock, &arg); + if (res == errno_EOWNERDEAD) + MutexRepair(thr, pc, (uptr)m); + MutexLock(thr, pc, (uptr)m); + return res; +} + +INTERCEPTOR(int, pthread_cond_signal, void *c) { + void *cond = init_cond(c); + SCOPED_TSAN_INTERCEPTOR(pthread_cond_signal, cond); + MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false); + return REAL(pthread_cond_signal)(cond); +} + +INTERCEPTOR(int, pthread_cond_broadcast, void *c) { + void *cond = init_cond(c); + SCOPED_TSAN_INTERCEPTOR(pthread_cond_broadcast, cond); + MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false); + return REAL(pthread_cond_broadcast)(cond); +} + +INTERCEPTOR(int, pthread_cond_destroy, void *c) { + void *cond = init_cond(c); + SCOPED_TSAN_INTERCEPTOR(pthread_cond_destroy, cond); + MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), true); + int res = REAL(pthread_cond_destroy)(cond); + if (common_flags()->legacy_pthread_cond) { + // Free our aux cond and zero the pointer to not leave dangling pointers. + WRAP(free)(cond); + atomic_store((atomic_uintptr_t*)c, 0, memory_order_relaxed); + } + return res; +} + +TSAN_INTERCEPTOR(int, pthread_mutex_init, void *m, void *a) { + SCOPED_TSAN_INTERCEPTOR(pthread_mutex_init, m, a); + int res = REAL(pthread_mutex_init)(m, a); + if (res == 0) { + bool recursive = false; + if (a) { + int type = 0; + if (REAL(pthread_mutexattr_gettype)(a, &type) == 0) + recursive = (type == PTHREAD_MUTEX_RECURSIVE + || type == PTHREAD_MUTEX_RECURSIVE_NP); + } + MutexCreate(thr, pc, (uptr)m, false, recursive, false); + } + return res; +} + +TSAN_INTERCEPTOR(int, pthread_mutex_destroy, void *m) { + SCOPED_TSAN_INTERCEPTOR(pthread_mutex_destroy, m); + int res = REAL(pthread_mutex_destroy)(m); + if (res == 0 || res == EBUSY) { + MutexDestroy(thr, pc, (uptr)m); + } + return res; +} + +TSAN_INTERCEPTOR(int, pthread_mutex_trylock, void *m) { + SCOPED_TSAN_INTERCEPTOR(pthread_mutex_trylock, m); + int res = REAL(pthread_mutex_trylock)(m); + if (res == EOWNERDEAD) + MutexRepair(thr, pc, (uptr)m); + if (res == 0 || res == EOWNERDEAD) + MutexLock(thr, pc, (uptr)m, /*rec=*/1, /*try_lock=*/true); + return res; +} + +TSAN_INTERCEPTOR(int, pthread_mutex_timedlock, void *m, void *abstime) { + SCOPED_TSAN_INTERCEPTOR(pthread_mutex_timedlock, m, abstime); + int res = REAL(pthread_mutex_timedlock)(m, abstime); + if (res == 0) { + MutexLock(thr, pc, (uptr)m); + } + return res; +} + +TSAN_INTERCEPTOR(int, pthread_spin_init, void *m, int pshared) { + SCOPED_TSAN_INTERCEPTOR(pthread_spin_init, m, pshared); + int res = REAL(pthread_spin_init)(m, pshared); + if (res == 0) { + MutexCreate(thr, pc, (uptr)m, false, false, false); + } + return res; +} + +TSAN_INTERCEPTOR(int, pthread_spin_destroy, void *m) { + SCOPED_TSAN_INTERCEPTOR(pthread_spin_destroy, m); + int res = REAL(pthread_spin_destroy)(m); + if (res == 0) { + MutexDestroy(thr, pc, (uptr)m); + } + return res; +} + +TSAN_INTERCEPTOR(int, pthread_spin_lock, void *m) { + SCOPED_TSAN_INTERCEPTOR(pthread_spin_lock, m); + int res = REAL(pthread_spin_lock)(m); + if (res == 0) { + MutexLock(thr, pc, (uptr)m); + } + return res; +} + +TSAN_INTERCEPTOR(int, pthread_spin_trylock, void *m) { + SCOPED_TSAN_INTERCEPTOR(pthread_spin_trylock, m); + int res = REAL(pthread_spin_trylock)(m); + if (res == 0) { + MutexLock(thr, pc, (uptr)m, /*rec=*/1, /*try_lock=*/true); + } + return res; +} + +TSAN_INTERCEPTOR(int, pthread_spin_unlock, void *m) { + SCOPED_TSAN_INTERCEPTOR(pthread_spin_unlock, m); + MutexUnlock(thr, pc, (uptr)m); + int res = REAL(pthread_spin_unlock)(m); + return res; +} + +TSAN_INTERCEPTOR(int, pthread_rwlock_init, void *m, void *a) { + SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_init, m, a); + int res = REAL(pthread_rwlock_init)(m, a); + if (res == 0) { + MutexCreate(thr, pc, (uptr)m, true, false, false); + } + return res; +} + +TSAN_INTERCEPTOR(int, pthread_rwlock_destroy, void *m) { + SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_destroy, m); + int res = REAL(pthread_rwlock_destroy)(m); + if (res == 0) { + MutexDestroy(thr, pc, (uptr)m); + } + return res; +} + +TSAN_INTERCEPTOR(int, pthread_rwlock_rdlock, void *m) { + SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_rdlock, m); + int res = REAL(pthread_rwlock_rdlock)(m); + if (res == 0) { + MutexReadLock(thr, pc, (uptr)m); + } + return res; +} + +TSAN_INTERCEPTOR(int, pthread_rwlock_tryrdlock, void *m) { + SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_tryrdlock, m); + int res = REAL(pthread_rwlock_tryrdlock)(m); + if (res == 0) { + MutexReadLock(thr, pc, (uptr)m, /*try_lock=*/true); + } + return res; +} + +TSAN_INTERCEPTOR(int, pthread_rwlock_timedrdlock, void *m, void *abstime) { + SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedrdlock, m, abstime); + int res = REAL(pthread_rwlock_timedrdlock)(m, abstime); + if (res == 0) { + MutexReadLock(thr, pc, (uptr)m); + } + return res; +} + +TSAN_INTERCEPTOR(int, pthread_rwlock_wrlock, void *m) { + SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_wrlock, m); + int res = REAL(pthread_rwlock_wrlock)(m); + if (res == 0) { + MutexLock(thr, pc, (uptr)m); + } + return res; +} + +TSAN_INTERCEPTOR(int, pthread_rwlock_trywrlock, void *m) { + SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_trywrlock, m); + int res = REAL(pthread_rwlock_trywrlock)(m); + if (res == 0) { + MutexLock(thr, pc, (uptr)m, /*rec=*/1, /*try_lock=*/true); + } + return res; +} + +TSAN_INTERCEPTOR(int, pthread_rwlock_timedwrlock, void *m, void *abstime) { + SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedwrlock, m, abstime); + int res = REAL(pthread_rwlock_timedwrlock)(m, abstime); + if (res == 0) { + MutexLock(thr, pc, (uptr)m); + } + return res; +} + +TSAN_INTERCEPTOR(int, pthread_rwlock_unlock, void *m) { + SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_unlock, m); + MutexReadOrWriteUnlock(thr, pc, (uptr)m); + int res = REAL(pthread_rwlock_unlock)(m); + return res; +} + +TSAN_INTERCEPTOR(int, pthread_barrier_init, void *b, void *a, unsigned count) { + SCOPED_TSAN_INTERCEPTOR(pthread_barrier_init, b, a, count); + MemoryWrite(thr, pc, (uptr)b, kSizeLog1); + int res = REAL(pthread_barrier_init)(b, a, count); + return res; +} + +TSAN_INTERCEPTOR(int, pthread_barrier_destroy, void *b) { + SCOPED_TSAN_INTERCEPTOR(pthread_barrier_destroy, b); + MemoryWrite(thr, pc, (uptr)b, kSizeLog1); + int res = REAL(pthread_barrier_destroy)(b); + return res; +} + +TSAN_INTERCEPTOR(int, pthread_barrier_wait, void *b) { + SCOPED_TSAN_INTERCEPTOR(pthread_barrier_wait, b); + Release(thr, pc, (uptr)b); + MemoryRead(thr, pc, (uptr)b, kSizeLog1); + int res = REAL(pthread_barrier_wait)(b); + MemoryRead(thr, pc, (uptr)b, kSizeLog1); + if (res == 0 || res == PTHREAD_BARRIER_SERIAL_THREAD) { + Acquire(thr, pc, (uptr)b); + } + return res; +} + +TSAN_INTERCEPTOR(int, pthread_once, void *o, void (*f)()) { + SCOPED_INTERCEPTOR_RAW(pthread_once, o, f); + if (o == 0 || f == 0) + return EINVAL; + atomic_uint32_t *a = static_cast<atomic_uint32_t*>(o); + u32 v = atomic_load(a, memory_order_acquire); + if (v == 0 && atomic_compare_exchange_strong(a, &v, 1, + memory_order_relaxed)) { + (*f)(); + if (!thr->in_ignored_lib) + Release(thr, pc, (uptr)o); + atomic_store(a, 2, memory_order_release); + } else { + while (v != 2) { + pthread_yield(); + v = atomic_load(a, memory_order_acquire); + } + if (!thr->in_ignored_lib) + Acquire(thr, pc, (uptr)o); + } + return 0; +} + +TSAN_INTERCEPTOR(int, sem_init, void *s, int pshared, unsigned value) { + SCOPED_TSAN_INTERCEPTOR(sem_init, s, pshared, value); + int res = REAL(sem_init)(s, pshared, value); + return res; +} + +TSAN_INTERCEPTOR(int, sem_destroy, void *s) { + SCOPED_TSAN_INTERCEPTOR(sem_destroy, s); + int res = REAL(sem_destroy)(s); + return res; +} + +TSAN_INTERCEPTOR(int, sem_wait, void *s) { + SCOPED_TSAN_INTERCEPTOR(sem_wait, s); + int res = BLOCK_REAL(sem_wait)(s); + if (res == 0) { + Acquire(thr, pc, (uptr)s); + } + return res; +} + +TSAN_INTERCEPTOR(int, sem_trywait, void *s) { + SCOPED_TSAN_INTERCEPTOR(sem_trywait, s); + int res = BLOCK_REAL(sem_trywait)(s); + if (res == 0) { + Acquire(thr, pc, (uptr)s); + } + return res; +} + +TSAN_INTERCEPTOR(int, sem_timedwait, void *s, void *abstime) { + SCOPED_TSAN_INTERCEPTOR(sem_timedwait, s, abstime); + int res = BLOCK_REAL(sem_timedwait)(s, abstime); + if (res == 0) { + Acquire(thr, pc, (uptr)s); + } + return res; +} + +TSAN_INTERCEPTOR(int, sem_post, void *s) { + SCOPED_TSAN_INTERCEPTOR(sem_post, s); + Release(thr, pc, (uptr)s); + int res = REAL(sem_post)(s); + return res; +} + +TSAN_INTERCEPTOR(int, sem_getvalue, void *s, int *sval) { + SCOPED_TSAN_INTERCEPTOR(sem_getvalue, s, sval); + int res = REAL(sem_getvalue)(s, sval); + if (res == 0) { + Acquire(thr, pc, (uptr)s); + } + return res; +} + +#if !SANITIZER_FREEBSD +TSAN_INTERCEPTOR(int, __xstat, int version, const char *path, void *buf) { + SCOPED_TSAN_INTERCEPTOR(__xstat, version, path, buf); + return REAL(__xstat)(version, path, buf); +} +#define TSAN_MAYBE_INTERCEPT___XSTAT TSAN_INTERCEPT(__xstat) +#else +#define TSAN_MAYBE_INTERCEPT___XSTAT +#endif + +TSAN_INTERCEPTOR(int, stat, const char *path, void *buf) { +#if SANITIZER_FREEBSD + SCOPED_TSAN_INTERCEPTOR(stat, path, buf); + return REAL(stat)(path, buf); +#else + SCOPED_TSAN_INTERCEPTOR(__xstat, 0, path, buf); + return REAL(__xstat)(0, path, buf); +#endif +} + +#if !SANITIZER_FREEBSD +TSAN_INTERCEPTOR(int, __xstat64, int version, const char *path, void *buf) { + SCOPED_TSAN_INTERCEPTOR(__xstat64, version, path, buf); + return REAL(__xstat64)(version, path, buf); +} +#define TSAN_MAYBE_INTERCEPT___XSTAT64 TSAN_INTERCEPT(__xstat64) +#else +#define TSAN_MAYBE_INTERCEPT___XSTAT64 +#endif + +#if !SANITIZER_FREEBSD +TSAN_INTERCEPTOR(int, stat64, const char *path, void *buf) { + SCOPED_TSAN_INTERCEPTOR(__xstat64, 0, path, buf); + return REAL(__xstat64)(0, path, buf); +} +#define TSAN_MAYBE_INTERCEPT_STAT64 TSAN_INTERCEPT(stat64) +#else +#define TSAN_MAYBE_INTERCEPT_STAT64 +#endif + +#if !SANITIZER_FREEBSD +TSAN_INTERCEPTOR(int, __lxstat, int version, const char *path, void *buf) { + SCOPED_TSAN_INTERCEPTOR(__lxstat, version, path, buf); + return REAL(__lxstat)(version, path, buf); +} +#define TSAN_MAYBE_INTERCEPT___LXSTAT TSAN_INTERCEPT(__lxstat) +#else +#define TSAN_MAYBE_INTERCEPT___LXSTAT +#endif + +TSAN_INTERCEPTOR(int, lstat, const char *path, void *buf) { +#if SANITIZER_FREEBSD + SCOPED_TSAN_INTERCEPTOR(lstat, path, buf); + return REAL(lstat)(path, buf); +#else + SCOPED_TSAN_INTERCEPTOR(__lxstat, 0, path, buf); + return REAL(__lxstat)(0, path, buf); +#endif +} + +#if !SANITIZER_FREEBSD +TSAN_INTERCEPTOR(int, __lxstat64, int version, const char *path, void *buf) { + SCOPED_TSAN_INTERCEPTOR(__lxstat64, version, path, buf); + return REAL(__lxstat64)(version, path, buf); +} +#define TSAN_MAYBE_INTERCEPT___LXSTAT64 TSAN_INTERCEPT(__lxstat64) +#else +#define TSAN_MAYBE_INTERCEPT___LXSTAT64 +#endif + +#if !SANITIZER_FREEBSD +TSAN_INTERCEPTOR(int, lstat64, const char *path, void *buf) { + SCOPED_TSAN_INTERCEPTOR(__lxstat64, 0, path, buf); + return REAL(__lxstat64)(0, path, buf); +} +#define TSAN_MAYBE_INTERCEPT_LSTAT64 TSAN_INTERCEPT(lstat64) +#else +#define TSAN_MAYBE_INTERCEPT_LSTAT64 +#endif + +#if !SANITIZER_FREEBSD +TSAN_INTERCEPTOR(int, __fxstat, int version, int fd, void *buf) { + SCOPED_TSAN_INTERCEPTOR(__fxstat, version, fd, buf); + if (fd > 0) + FdAccess(thr, pc, fd); + return REAL(__fxstat)(version, fd, buf); +} +#define TSAN_MAYBE_INTERCEPT___FXSTAT TSAN_INTERCEPT(__fxstat) +#else +#define TSAN_MAYBE_INTERCEPT___FXSTAT +#endif + +TSAN_INTERCEPTOR(int, fstat, int fd, void *buf) { +#if SANITIZER_FREEBSD + SCOPED_TSAN_INTERCEPTOR(fstat, fd, buf); + if (fd > 0) + FdAccess(thr, pc, fd); + return REAL(fstat)(fd, buf); +#else + SCOPED_TSAN_INTERCEPTOR(__fxstat, 0, fd, buf); + if (fd > 0) + FdAccess(thr, pc, fd); + return REAL(__fxstat)(0, fd, buf); +#endif +} + +#if !SANITIZER_FREEBSD +TSAN_INTERCEPTOR(int, __fxstat64, int version, int fd, void *buf) { + SCOPED_TSAN_INTERCEPTOR(__fxstat64, version, fd, buf); + if (fd > 0) + FdAccess(thr, pc, fd); + return REAL(__fxstat64)(version, fd, buf); +} +#define TSAN_MAYBE_INTERCEPT___FXSTAT64 TSAN_INTERCEPT(__fxstat64) +#else +#define TSAN_MAYBE_INTERCEPT___FXSTAT64 +#endif + +#if !SANITIZER_FREEBSD +TSAN_INTERCEPTOR(int, fstat64, int fd, void *buf) { + SCOPED_TSAN_INTERCEPTOR(__fxstat64, 0, fd, buf); + if (fd > 0) + FdAccess(thr, pc, fd); + return REAL(__fxstat64)(0, fd, buf); +} +#define TSAN_MAYBE_INTERCEPT_FSTAT64 TSAN_INTERCEPT(fstat64) +#else +#define TSAN_MAYBE_INTERCEPT_FSTAT64 +#endif + +TSAN_INTERCEPTOR(int, open, const char *name, int flags, int mode) { + SCOPED_TSAN_INTERCEPTOR(open, name, flags, mode); + int fd = REAL(open)(name, flags, mode); + if (fd >= 0) + FdFileCreate(thr, pc, fd); + return fd; +} + +#if !SANITIZER_FREEBSD +TSAN_INTERCEPTOR(int, open64, const char *name, int flags, int mode) { + SCOPED_TSAN_INTERCEPTOR(open64, name, flags, mode); + int fd = REAL(open64)(name, flags, mode); + if (fd >= 0) + FdFileCreate(thr, pc, fd); + return fd; +} +#define TSAN_MAYBE_INTERCEPT_OPEN64 TSAN_INTERCEPT(open64) +#else +#define TSAN_MAYBE_INTERCEPT_OPEN64 +#endif + +TSAN_INTERCEPTOR(int, creat, const char *name, int mode) { + SCOPED_TSAN_INTERCEPTOR(creat, name, mode); + int fd = REAL(creat)(name, mode); + if (fd >= 0) + FdFileCreate(thr, pc, fd); + return fd; +} + +#if !SANITIZER_FREEBSD +TSAN_INTERCEPTOR(int, creat64, const char *name, int mode) { + SCOPED_TSAN_INTERCEPTOR(creat64, name, mode); + int fd = REAL(creat64)(name, mode); + if (fd >= 0) + FdFileCreate(thr, pc, fd); + return fd; +} +#define TSAN_MAYBE_INTERCEPT_CREAT64 TSAN_INTERCEPT(creat64) +#else +#define TSAN_MAYBE_INTERCEPT_CREAT64 +#endif + +TSAN_INTERCEPTOR(int, dup, int oldfd) { + SCOPED_TSAN_INTERCEPTOR(dup, oldfd); + int newfd = REAL(dup)(oldfd); + if (oldfd >= 0 && newfd >= 0 && newfd != oldfd) + FdDup(thr, pc, oldfd, newfd); + return newfd; +} + +TSAN_INTERCEPTOR(int, dup2, int oldfd, int newfd) { + SCOPED_TSAN_INTERCEPTOR(dup2, oldfd, newfd); + int newfd2 = REAL(dup2)(oldfd, newfd); + if (oldfd >= 0 && newfd2 >= 0 && newfd2 != oldfd) + FdDup(thr, pc, oldfd, newfd2); + return newfd2; +} + +TSAN_INTERCEPTOR(int, dup3, int oldfd, int newfd, int flags) { + SCOPED_TSAN_INTERCEPTOR(dup3, oldfd, newfd, flags); + int newfd2 = REAL(dup3)(oldfd, newfd, flags); + if (oldfd >= 0 && newfd2 >= 0 && newfd2 != oldfd) + FdDup(thr, pc, oldfd, newfd2); + return newfd2; +} + +#if !SANITIZER_FREEBSD +TSAN_INTERCEPTOR(int, eventfd, unsigned initval, int flags) { + SCOPED_TSAN_INTERCEPTOR(eventfd, initval, flags); + int fd = REAL(eventfd)(initval, flags); + if (fd >= 0) + FdEventCreate(thr, pc, fd); + return fd; +} +#define TSAN_MAYBE_INTERCEPT_EVENTFD TSAN_INTERCEPT(eventfd) +#else +#define TSAN_MAYBE_INTERCEPT_EVENTFD +#endif + +#if !SANITIZER_FREEBSD +TSAN_INTERCEPTOR(int, signalfd, int fd, void *mask, int flags) { + SCOPED_TSAN_INTERCEPTOR(signalfd, fd, mask, flags); + if (fd >= 0) + FdClose(thr, pc, fd); + fd = REAL(signalfd)(fd, mask, flags); + if (fd >= 0) + FdSignalCreate(thr, pc, fd); + return fd; +} +#define TSAN_MAYBE_INTERCEPT_SIGNALFD TSAN_INTERCEPT(signalfd) +#else +#define TSAN_MAYBE_INTERCEPT_SIGNALFD +#endif + +#if !SANITIZER_FREEBSD +TSAN_INTERCEPTOR(int, inotify_init, int fake) { + SCOPED_TSAN_INTERCEPTOR(inotify_init, fake); + int fd = REAL(inotify_init)(fake); + if (fd >= 0) + FdInotifyCreate(thr, pc, fd); + return fd; +} +#define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT TSAN_INTERCEPT(inotify_init) +#else +#define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT +#endif + +#if !SANITIZER_FREEBSD +TSAN_INTERCEPTOR(int, inotify_init1, int flags) { + SCOPED_TSAN_INTERCEPTOR(inotify_init1, flags); + int fd = REAL(inotify_init1)(flags); + if (fd >= 0) + FdInotifyCreate(thr, pc, fd); + return fd; +} +#define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1 TSAN_INTERCEPT(inotify_init1) +#else +#define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1 +#endif + +TSAN_INTERCEPTOR(int, socket, int domain, int type, int protocol) { + SCOPED_TSAN_INTERCEPTOR(socket, domain, type, protocol); + int fd = REAL(socket)(domain, type, protocol); + if (fd >= 0) + FdSocketCreate(thr, pc, fd); + return fd; +} + +TSAN_INTERCEPTOR(int, socketpair, int domain, int type, int protocol, int *fd) { + SCOPED_TSAN_INTERCEPTOR(socketpair, domain, type, protocol, fd); + int res = REAL(socketpair)(domain, type, protocol, fd); + if (res == 0 && fd[0] >= 0 && fd[1] >= 0) + FdPipeCreate(thr, pc, fd[0], fd[1]); + return res; +} + +TSAN_INTERCEPTOR(int, connect, int fd, void *addr, unsigned addrlen) { + SCOPED_TSAN_INTERCEPTOR(connect, fd, addr, addrlen); + FdSocketConnecting(thr, pc, fd); + int res = REAL(connect)(fd, addr, addrlen); + if (res == 0 && fd >= 0) + FdSocketConnect(thr, pc, fd); + return res; +} + +TSAN_INTERCEPTOR(int, bind, int fd, void *addr, unsigned addrlen) { + SCOPED_TSAN_INTERCEPTOR(bind, fd, addr, addrlen); + int res = REAL(bind)(fd, addr, addrlen); + if (fd > 0 && res == 0) + FdAccess(thr, pc, fd); + return res; +} + +TSAN_INTERCEPTOR(int, listen, int fd, int backlog) { + SCOPED_TSAN_INTERCEPTOR(listen, fd, backlog); + int res = REAL(listen)(fd, backlog); + if (fd > 0 && res == 0) + FdAccess(thr, pc, fd); + return res; +} + +#if !SANITIZER_FREEBSD +TSAN_INTERCEPTOR(int, epoll_create, int size) { + SCOPED_TSAN_INTERCEPTOR(epoll_create, size); + int fd = REAL(epoll_create)(size); + if (fd >= 0) + FdPollCreate(thr, pc, fd); + return fd; +} +#define TSAN_MAYBE_INTERCEPT_EPOLL_CREATE TSAN_INTERCEPT(epoll_create) +#else +#define TSAN_MAYBE_INTERCEPT_EPOLL_CREATE +#endif + +#if !SANITIZER_FREEBSD +TSAN_INTERCEPTOR(int, epoll_create1, int flags) { + SCOPED_TSAN_INTERCEPTOR(epoll_create1, flags); + int fd = REAL(epoll_create1)(flags); + if (fd >= 0) + FdPollCreate(thr, pc, fd); + return fd; +} +#define TSAN_MAYBE_INTERCEPT_EPOLL_CREATE1 TSAN_INTERCEPT(epoll_create1) +#else +#define TSAN_MAYBE_INTERCEPT_EPOLL_CREATE1 +#endif + +TSAN_INTERCEPTOR(int, close, int fd) { + SCOPED_TSAN_INTERCEPTOR(close, fd); + if (fd >= 0) + FdClose(thr, pc, fd); + return REAL(close)(fd); +} + +#if !SANITIZER_FREEBSD +TSAN_INTERCEPTOR(int, __close, int fd) { + SCOPED_TSAN_INTERCEPTOR(__close, fd); + if (fd >= 0) + FdClose(thr, pc, fd); + return REAL(__close)(fd); +} +#define TSAN_MAYBE_INTERCEPT___CLOSE TSAN_INTERCEPT(__close) +#else +#define TSAN_MAYBE_INTERCEPT___CLOSE +#endif + +// glibc guts +#if !SANITIZER_FREEBSD +TSAN_INTERCEPTOR(void, __res_iclose, void *state, bool free_addr) { + SCOPED_TSAN_INTERCEPTOR(__res_iclose, state, free_addr); + int fds[64]; + int cnt = ExtractResolvFDs(state, fds, ARRAY_SIZE(fds)); + for (int i = 0; i < cnt; i++) { + if (fds[i] > 0) + FdClose(thr, pc, fds[i]); + } + REAL(__res_iclose)(state, free_addr); +} +#define TSAN_MAYBE_INTERCEPT___RES_ICLOSE TSAN_INTERCEPT(__res_iclose) +#else +#define TSAN_MAYBE_INTERCEPT___RES_ICLOSE +#endif + +TSAN_INTERCEPTOR(int, pipe, int *pipefd) { + SCOPED_TSAN_INTERCEPTOR(pipe, pipefd); + int res = REAL(pipe)(pipefd); + if (res == 0 && pipefd[0] >= 0 && pipefd[1] >= 0) + FdPipeCreate(thr, pc, pipefd[0], pipefd[1]); + return res; +} + +TSAN_INTERCEPTOR(int, pipe2, int *pipefd, int flags) { + SCOPED_TSAN_INTERCEPTOR(pipe2, pipefd, flags); + int res = REAL(pipe2)(pipefd, flags); + if (res == 0 && pipefd[0] >= 0 && pipefd[1] >= 0) + FdPipeCreate(thr, pc, pipefd[0], pipefd[1]); + return res; +} + +TSAN_INTERCEPTOR(long_t, send, int fd, void *buf, long_t len, int flags) { + SCOPED_TSAN_INTERCEPTOR(send, fd, buf, len, flags); + if (fd >= 0) { + FdAccess(thr, pc, fd); + FdRelease(thr, pc, fd); + } + int res = REAL(send)(fd, buf, len, flags); + return res; +} + +TSAN_INTERCEPTOR(long_t, sendmsg, int fd, void *msg, int flags) { + SCOPED_TSAN_INTERCEPTOR(sendmsg, fd, msg, flags); + if (fd >= 0) { + FdAccess(thr, pc, fd); + FdRelease(thr, pc, fd); + } + int res = REAL(sendmsg)(fd, msg, flags); + return res; +} + +TSAN_INTERCEPTOR(long_t, recv, int fd, void *buf, long_t len, int flags) { + SCOPED_TSAN_INTERCEPTOR(recv, fd, buf, len, flags); + if (fd >= 0) + FdAccess(thr, pc, fd); + int res = REAL(recv)(fd, buf, len, flags); + if (res >= 0 && fd >= 0) { + FdAcquire(thr, pc, fd); + } + return res; +} + +TSAN_INTERCEPTOR(int, unlink, char *path) { + SCOPED_TSAN_INTERCEPTOR(unlink, path); + Release(thr, pc, File2addr(path)); + int res = REAL(unlink)(path); + return res; +} + +TSAN_INTERCEPTOR(void*, tmpfile, int fake) { + SCOPED_TSAN_INTERCEPTOR(tmpfile, fake); + void *res = REAL(tmpfile)(fake); + if (res) { + int fd = fileno_unlocked(res); + if (fd >= 0) + FdFileCreate(thr, pc, fd); + } + return res; +} + +#if !SANITIZER_FREEBSD +TSAN_INTERCEPTOR(void*, tmpfile64, int fake) { + SCOPED_TSAN_INTERCEPTOR(tmpfile64, fake); + void *res = REAL(tmpfile64)(fake); + if (res) { + int fd = fileno_unlocked(res); + if (fd >= 0) + FdFileCreate(thr, pc, fd); + } + return res; +} +#define TSAN_MAYBE_INTERCEPT_TMPFILE64 TSAN_INTERCEPT(tmpfile64) +#else +#define TSAN_MAYBE_INTERCEPT_TMPFILE64 +#endif + +TSAN_INTERCEPTOR(uptr, fread, void *ptr, uptr size, uptr nmemb, void *f) { + // libc file streams can call user-supplied functions, see fopencookie. + { + SCOPED_TSAN_INTERCEPTOR(fread, ptr, size, nmemb, f); + MemoryAccessRange(thr, pc, (uptr)ptr, size * nmemb, true); + } + return REAL(fread)(ptr, size, nmemb, f); +} + +TSAN_INTERCEPTOR(uptr, fwrite, const void *p, uptr size, uptr nmemb, void *f) { + // libc file streams can call user-supplied functions, see fopencookie. + { + SCOPED_TSAN_INTERCEPTOR(fwrite, p, size, nmemb, f); + MemoryAccessRange(thr, pc, (uptr)p, size * nmemb, false); + } + return REAL(fwrite)(p, size, nmemb, f); +} + +static void FlushStreams() { + // Flushing all the streams here may freeze the process if a child thread is + // performing file stream operations at the same time. + REAL(fflush)(stdout); + REAL(fflush)(stderr); +} + +TSAN_INTERCEPTOR(void, abort, int fake) { + SCOPED_TSAN_INTERCEPTOR(abort, fake); + FlushStreams(); + REAL(abort)(fake); +} + +TSAN_INTERCEPTOR(int, puts, const char *s) { + SCOPED_TSAN_INTERCEPTOR(puts, s); + MemoryAccessRange(thr, pc, (uptr)s, internal_strlen(s), false); + return REAL(puts)(s); +} + +TSAN_INTERCEPTOR(int, rmdir, char *path) { + SCOPED_TSAN_INTERCEPTOR(rmdir, path); + Release(thr, pc, Dir2addr(path)); + int res = REAL(rmdir)(path); + return res; +} + +TSAN_INTERCEPTOR(void*, opendir, char *path) { + SCOPED_TSAN_INTERCEPTOR(opendir, path); + void *res = REAL(opendir)(path); + if (res != 0) + Acquire(thr, pc, Dir2addr(path)); + return res; +} + +#if !SANITIZER_FREEBSD +TSAN_INTERCEPTOR(int, epoll_ctl, int epfd, int op, int fd, void *ev) { + SCOPED_TSAN_INTERCEPTOR(epoll_ctl, epfd, op, fd, ev); + if (epfd >= 0) + FdAccess(thr, pc, epfd); + if (epfd >= 0 && fd >= 0) + FdAccess(thr, pc, fd); + if (op == EPOLL_CTL_ADD && epfd >= 0) + FdRelease(thr, pc, epfd); + int res = REAL(epoll_ctl)(epfd, op, fd, ev); + return res; +} +#define TSAN_MAYBE_INTERCEPT_EPOLL_CTL TSAN_INTERCEPT(epoll_ctl) +#else +#define TSAN_MAYBE_INTERCEPT_EPOLL_CTL +#endif + +#if !SANITIZER_FREEBSD +TSAN_INTERCEPTOR(int, epoll_wait, int epfd, void *ev, int cnt, int timeout) { + SCOPED_TSAN_INTERCEPTOR(epoll_wait, epfd, ev, cnt, timeout); + if (epfd >= 0) + FdAccess(thr, pc, epfd); + int res = BLOCK_REAL(epoll_wait)(epfd, ev, cnt, timeout); + if (res > 0 && epfd >= 0) + FdAcquire(thr, pc, epfd); + return res; +} +#define TSAN_MAYBE_INTERCEPT_EPOLL_WAIT TSAN_INTERCEPT(epoll_wait) +#else +#define TSAN_MAYBE_INTERCEPT_EPOLL_WAIT +#endif + +namespace __tsan { + +static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire, + bool sigact, int sig, my_siginfo_t *info, void *uctx) { + if (acquire) + Acquire(thr, 0, (uptr)&sigactions[sig]); + // Ensure that the handler does not spoil errno. + const int saved_errno = errno; + errno = 99; + // Need to remember pc before the call, because the handler can reset it. + uptr pc = sigact ? + (uptr)sigactions[sig].sa_sigaction : + (uptr)sigactions[sig].sa_handler; + pc += 1; // return address is expected, OutputReport() will undo this + if (sigact) + sigactions[sig].sa_sigaction(sig, info, uctx); + else + sigactions[sig].sa_handler(sig); + // We do not detect errno spoiling for SIGTERM, + // because some SIGTERM handlers do spoil errno but reraise SIGTERM, + // tsan reports false positive in such case. + // It's difficult to properly detect this situation (reraise), + // because in async signal processing case (when handler is called directly + // from rtl_generic_sighandler) we have not yet received the reraised + // signal; and it looks too fragile to intercept all ways to reraise a signal. + if (flags()->report_bugs && !sync && sig != SIGTERM && errno != 99) { + VarSizeStackTrace stack; + ObtainCurrentStack(thr, pc, &stack); + ThreadRegistryLock l(ctx->thread_registry); + ScopedReport rep(ReportTypeErrnoInSignal); + if (!IsFiredSuppression(ctx, rep, stack)) { + rep.AddStack(stack, true); + OutputReport(thr, rep); + } + } + errno = saved_errno; +} + +void ProcessPendingSignals(ThreadState *thr) { + SignalContext *sctx = SigCtx(thr); + if (sctx == 0 || + atomic_load(&sctx->have_pending_signals, memory_order_relaxed) == 0) + return; + atomic_store(&sctx->have_pending_signals, 0, memory_order_relaxed); + atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed); + // These are too big for stack. + static THREADLOCAL __sanitizer_sigset_t emptyset, oldset; + REAL(sigfillset)(&emptyset); + pthread_sigmask(SIG_SETMASK, &emptyset, &oldset); + for (int sig = 0; sig < kSigCount; sig++) { + SignalDesc *signal = &sctx->pending_signals[sig]; + if (signal->armed) { + signal->armed = false; + if (sigactions[sig].sa_handler != SIG_DFL + && sigactions[sig].sa_handler != SIG_IGN) { + CallUserSignalHandler(thr, false, true, signal->sigaction, + sig, &signal->siginfo, &signal->ctx); + } + } + } + pthread_sigmask(SIG_SETMASK, &oldset, 0); + atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed); +} + +} // namespace __tsan + +static bool is_sync_signal(SignalContext *sctx, int sig) { + return sig == SIGSEGV || sig == SIGBUS || sig == SIGILL || + sig == SIGABRT || sig == SIGFPE || sig == SIGPIPE || sig == SIGSYS || + // If we are sending signal to ourselves, we must process it now. + (sctx && sig == sctx->int_signal_send); +} + +void ALWAYS_INLINE rtl_generic_sighandler(bool sigact, int sig, + my_siginfo_t *info, void *ctx) { + ThreadState *thr = cur_thread(); + SignalContext *sctx = SigCtx(thr); + if (sig < 0 || sig >= kSigCount) { + VPrintf(1, "ThreadSanitizer: ignoring signal %d\n", sig); + return; + } + // Don't mess with synchronous signals. + const bool sync = is_sync_signal(sctx, sig); + if (sync || + // If we are in blocking function, we can safely process it now + // (but check if we are in a recursive interceptor, + // i.e. pthread_join()->munmap()). + (sctx && atomic_load(&sctx->in_blocking_func, memory_order_relaxed))) { + atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed); + if (sctx && atomic_load(&sctx->in_blocking_func, memory_order_relaxed)) { + // We ignore interceptors in blocking functions, + // temporary enbled them again while we are calling user function. + int const i = thr->ignore_interceptors; + thr->ignore_interceptors = 0; + atomic_store(&sctx->in_blocking_func, 0, memory_order_relaxed); + CallUserSignalHandler(thr, sync, true, sigact, sig, info, ctx); + thr->ignore_interceptors = i; + atomic_store(&sctx->in_blocking_func, 1, memory_order_relaxed); + } else { + // Be very conservative with when we do acquire in this case. + // It's unsafe to do acquire in async handlers, because ThreadState + // can be in inconsistent state. + // SIGSYS looks relatively safe -- it's synchronous and can actually + // need some global state. + bool acq = (sig == SIGSYS); + CallUserSignalHandler(thr, sync, acq, sigact, sig, info, ctx); + } + atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed); + return; + } + + if (sctx == 0) + return; + SignalDesc *signal = &sctx->pending_signals[sig]; + if (signal->armed == false) { + signal->armed = true; + signal->sigaction = sigact; + if (info) + internal_memcpy(&signal->siginfo, info, sizeof(*info)); + if (ctx) + internal_memcpy(&signal->ctx, ctx, sizeof(signal->ctx)); + atomic_store(&sctx->have_pending_signals, 1, memory_order_relaxed); + } +} + +static void rtl_sighandler(int sig) { + rtl_generic_sighandler(false, sig, 0, 0); +} + +static void rtl_sigaction(int sig, my_siginfo_t *info, void *ctx) { + rtl_generic_sighandler(true, sig, info, ctx); +} + +TSAN_INTERCEPTOR(int, sigaction, int sig, sigaction_t *act, sigaction_t *old) { + SCOPED_TSAN_INTERCEPTOR(sigaction, sig, act, old); + if (old) + internal_memcpy(old, &sigactions[sig], sizeof(*old)); + if (act == 0) + return 0; + internal_memcpy(&sigactions[sig], act, sizeof(*act)); + sigaction_t newact; + internal_memcpy(&newact, act, sizeof(newact)); + REAL(sigfillset)(&newact.sa_mask); + if (act->sa_handler != SIG_IGN && act->sa_handler != SIG_DFL) { + if (newact.sa_flags & SA_SIGINFO) + newact.sa_sigaction = rtl_sigaction; + else + newact.sa_handler = rtl_sighandler; + } + ReleaseStore(thr, pc, (uptr)&sigactions[sig]); + int res = REAL(sigaction)(sig, &newact, 0); + return res; +} + +TSAN_INTERCEPTOR(sighandler_t, signal, int sig, sighandler_t h) { + sigaction_t act; + act.sa_handler = h; + REAL(memset)(&act.sa_mask, -1, sizeof(act.sa_mask)); + act.sa_flags = 0; + sigaction_t old; + int res = sigaction(sig, &act, &old); + if (res) + return SIG_ERR; + return old.sa_handler; +} + +TSAN_INTERCEPTOR(int, sigsuspend, const __sanitizer_sigset_t *mask) { + SCOPED_TSAN_INTERCEPTOR(sigsuspend, mask); + return REAL(sigsuspend)(mask); +} + +TSAN_INTERCEPTOR(int, raise, int sig) { + SCOPED_TSAN_INTERCEPTOR(raise, sig); + SignalContext *sctx = SigCtx(thr); + CHECK_NE(sctx, 0); + int prev = sctx->int_signal_send; + sctx->int_signal_send = sig; + int res = REAL(raise)(sig); + CHECK_EQ(sctx->int_signal_send, sig); + sctx->int_signal_send = prev; + return res; +} + +TSAN_INTERCEPTOR(int, kill, int pid, int sig) { + SCOPED_TSAN_INTERCEPTOR(kill, pid, sig); + SignalContext *sctx = SigCtx(thr); + CHECK_NE(sctx, 0); + int prev = sctx->int_signal_send; + if (pid == (int)internal_getpid()) { + sctx->int_signal_send = sig; + } + int res = REAL(kill)(pid, sig); + if (pid == (int)internal_getpid()) { + CHECK_EQ(sctx->int_signal_send, sig); + sctx->int_signal_send = prev; + } + return res; +} + +TSAN_INTERCEPTOR(int, pthread_kill, void *tid, int sig) { + SCOPED_TSAN_INTERCEPTOR(pthread_kill, tid, sig); + SignalContext *sctx = SigCtx(thr); + CHECK_NE(sctx, 0); + int prev = sctx->int_signal_send; + if (tid == pthread_self()) { + sctx->int_signal_send = sig; + } + int res = REAL(pthread_kill)(tid, sig); + if (tid == pthread_self()) { + CHECK_EQ(sctx->int_signal_send, sig); + sctx->int_signal_send = prev; + } + return res; +} + +TSAN_INTERCEPTOR(int, gettimeofday, void *tv, void *tz) { + SCOPED_TSAN_INTERCEPTOR(gettimeofday, tv, tz); + // It's intercepted merely to process pending signals. + return REAL(gettimeofday)(tv, tz); +} + +TSAN_INTERCEPTOR(int, getaddrinfo, void *node, void *service, + void *hints, void *rv) { + SCOPED_TSAN_INTERCEPTOR(getaddrinfo, node, service, hints, rv); + // We miss atomic synchronization in getaddrinfo, + // and can report false race between malloc and free + // inside of getaddrinfo. So ignore memory accesses. + ThreadIgnoreBegin(thr, pc); + int res = REAL(getaddrinfo)(node, service, hints, rv); + ThreadIgnoreEnd(thr, pc); + return res; +} + +TSAN_INTERCEPTOR(int, fork, int fake) { + if (cur_thread()->in_symbolizer) + return REAL(fork)(fake); + SCOPED_INTERCEPTOR_RAW(fork, fake); + ForkBefore(thr, pc); + int pid = REAL(fork)(fake); + if (pid == 0) { + // child + ForkChildAfter(thr, pc); + FdOnFork(thr, pc); + } else if (pid > 0) { + // parent + ForkParentAfter(thr, pc); + } else { + // error + ForkParentAfter(thr, pc); + } + return pid; +} + +TSAN_INTERCEPTOR(int, vfork, int fake) { + // Some programs (e.g. openjdk) call close for all file descriptors + // in the child process. Under tsan it leads to false positives, because + // address space is shared, so the parent process also thinks that + // the descriptors are closed (while they are actually not). + // This leads to false positives due to missed synchronization. + // Strictly saying this is undefined behavior, because vfork child is not + // allowed to call any functions other than exec/exit. But this is what + // openjdk does, so we want to handle it. + // We could disable interceptors in the child process. But it's not possible + // to simply intercept and wrap vfork, because vfork child is not allowed + // to return from the function that calls vfork, and that's exactly what + // we would do. So this would require some assembly trickery as well. + // Instead we simply turn vfork into fork. + return WRAP(fork)(fake); +} + +static int OnExit(ThreadState *thr) { + int status = Finalize(thr); + FlushStreams(); + return status; +} + +struct TsanInterceptorContext { + ThreadState *thr; + const uptr caller_pc; + const uptr pc; +}; + +static void HandleRecvmsg(ThreadState *thr, uptr pc, + __sanitizer_msghdr *msg) { + int fds[64]; + int cnt = ExtractRecvmsgFDs(msg, fds, ARRAY_SIZE(fds)); + for (int i = 0; i < cnt; i++) + FdEventCreate(thr, pc, fds[i]); +} + +#include "sanitizer_common/sanitizer_platform_interceptors.h" +// Causes interceptor recursion (getaddrinfo() and fopen()) +#undef SANITIZER_INTERCEPT_GETADDRINFO +// There interceptors do not seem to be strictly necessary for tsan. +// But we see cases where the interceptors consume 70% of execution time. +// Memory blocks passed to fgetgrent_r are "written to" by tsan several times. +// First, there is some recursion (getgrnam_r calls fgetgrent_r), and each +// function "writes to" the buffer. Then, the same memory is "written to" +// twice, first as buf and then as pwbufp (both of them refer to the same +// addresses). +#undef SANITIZER_INTERCEPT_GETPWENT +#undef SANITIZER_INTERCEPT_GETPWENT_R +#undef SANITIZER_INTERCEPT_FGETPWENT +#undef SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS +#undef SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS + +#define COMMON_INTERCEPT_FUNCTION(name) INTERCEPT_FUNCTION(name) + +#define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \ + MemoryAccessRange(((TsanInterceptorContext *)ctx)->thr, \ + ((TsanInterceptorContext *)ctx)->pc, (uptr)ptr, size, \ + true) + +#define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \ + MemoryAccessRange(((TsanInterceptorContext *) ctx)->thr, \ + ((TsanInterceptorContext *) ctx)->pc, (uptr) ptr, size, \ + false) + +#define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \ + SCOPED_TSAN_INTERCEPTOR(func, __VA_ARGS__); \ + TsanInterceptorContext _ctx = {thr, caller_pc, pc}; \ + ctx = (void *)&_ctx; \ + (void) ctx; + +#define COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, func, ...) \ + SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__); \ + TsanInterceptorContext _ctx = {thr, caller_pc, pc}; \ + ctx = (void *)&_ctx; \ + (void) ctx; + +#define COMMON_INTERCEPTOR_FILE_OPEN(ctx, file, path) \ + Acquire(thr, pc, File2addr(path)); \ + if (file) { \ + int fd = fileno_unlocked(file); \ + if (fd >= 0) FdFileCreate(thr, pc, fd); \ + } + +#define COMMON_INTERCEPTOR_FILE_CLOSE(ctx, file) \ + if (file) { \ + int fd = fileno_unlocked(file); \ + if (fd >= 0) FdClose(thr, pc, fd); \ + } + +#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, res) \ + libignore()->OnLibraryLoaded(filename) + +#define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() \ + libignore()->OnLibraryUnloaded() + +#define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \ + FdAcquire(((TsanInterceptorContext *) ctx)->thr, pc, fd) + +#define COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd) \ + FdRelease(((TsanInterceptorContext *) ctx)->thr, pc, fd) + +#define COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd) \ + FdAccess(((TsanInterceptorContext *) ctx)->thr, pc, fd) + +#define COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, newfd) \ + FdSocketAccept(((TsanInterceptorContext *) ctx)->thr, pc, fd, newfd) + +#define COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name) \ + ThreadSetName(((TsanInterceptorContext *) ctx)->thr, name) + +#define COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name) \ + __tsan::ctx->thread_registry->SetThreadNameByUserId(thread, name) + +#define COMMON_INTERCEPTOR_BLOCK_REAL(name) BLOCK_REAL(name) + +#define COMMON_INTERCEPTOR_ON_EXIT(ctx) \ + OnExit(((TsanInterceptorContext *) ctx)->thr) + +#define COMMON_INTERCEPTOR_MUTEX_LOCK(ctx, m) \ + MutexLock(((TsanInterceptorContext *)ctx)->thr, \ + ((TsanInterceptorContext *)ctx)->pc, (uptr)m) + +#define COMMON_INTERCEPTOR_MUTEX_UNLOCK(ctx, m) \ + MutexUnlock(((TsanInterceptorContext *)ctx)->thr, \ + ((TsanInterceptorContext *)ctx)->pc, (uptr)m) + +#define COMMON_INTERCEPTOR_MUTEX_REPAIR(ctx, m) \ + MutexRepair(((TsanInterceptorContext *)ctx)->thr, \ + ((TsanInterceptorContext *)ctx)->pc, (uptr)m) + +#define COMMON_INTERCEPTOR_HANDLE_RECVMSG(ctx, msg) \ + HandleRecvmsg(((TsanInterceptorContext *)ctx)->thr, \ + ((TsanInterceptorContext *)ctx)->pc, msg) + +#include "sanitizer_common/sanitizer_common_interceptors.inc" + +#define TSAN_SYSCALL() \ + ThreadState *thr = cur_thread(); \ + if (thr->ignore_interceptors) \ + return; \ + ScopedSyscall scoped_syscall(thr) \ +/**/ + +struct ScopedSyscall { + ThreadState *thr; + + explicit ScopedSyscall(ThreadState *thr) + : thr(thr) { + Initialize(thr); + } + + ~ScopedSyscall() { + ProcessPendingSignals(thr); + } +}; + +static void syscall_access_range(uptr pc, uptr p, uptr s, bool write) { + TSAN_SYSCALL(); + MemoryAccessRange(thr, pc, p, s, write); +} + +static void syscall_acquire(uptr pc, uptr addr) { + TSAN_SYSCALL(); + Acquire(thr, pc, addr); + DPrintf("syscall_acquire(%p)\n", addr); +} + +static void syscall_release(uptr pc, uptr addr) { + TSAN_SYSCALL(); + DPrintf("syscall_release(%p)\n", addr); + Release(thr, pc, addr); +} + +static void syscall_fd_close(uptr pc, int fd) { + TSAN_SYSCALL(); + FdClose(thr, pc, fd); +} + +static USED void syscall_fd_acquire(uptr pc, int fd) { + TSAN_SYSCALL(); + FdAcquire(thr, pc, fd); + DPrintf("syscall_fd_acquire(%p)\n", fd); +} + +static USED void syscall_fd_release(uptr pc, int fd) { + TSAN_SYSCALL(); + DPrintf("syscall_fd_release(%p)\n", fd); + FdRelease(thr, pc, fd); +} + +static void syscall_pre_fork(uptr pc) { + TSAN_SYSCALL(); + ForkBefore(thr, pc); +} + +static void syscall_post_fork(uptr pc, int pid) { + TSAN_SYSCALL(); + if (pid == 0) { + // child + ForkChildAfter(thr, pc); + FdOnFork(thr, pc); + } else if (pid > 0) { + // parent + ForkParentAfter(thr, pc); + } else { + // error + ForkParentAfter(thr, pc); + } +} + +#define COMMON_SYSCALL_PRE_READ_RANGE(p, s) \ + syscall_access_range(GET_CALLER_PC(), (uptr)(p), (uptr)(s), false) + +#define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) \ + syscall_access_range(GET_CALLER_PC(), (uptr)(p), (uptr)(s), true) + +#define COMMON_SYSCALL_POST_READ_RANGE(p, s) \ + do { \ + (void)(p); \ + (void)(s); \ + } while (false) + +#define COMMON_SYSCALL_POST_WRITE_RANGE(p, s) \ + do { \ + (void)(p); \ + (void)(s); \ + } while (false) + +#define COMMON_SYSCALL_ACQUIRE(addr) \ + syscall_acquire(GET_CALLER_PC(), (uptr)(addr)) + +#define COMMON_SYSCALL_RELEASE(addr) \ + syscall_release(GET_CALLER_PC(), (uptr)(addr)) + +#define COMMON_SYSCALL_FD_CLOSE(fd) syscall_fd_close(GET_CALLER_PC(), fd) + +#define COMMON_SYSCALL_FD_ACQUIRE(fd) syscall_fd_acquire(GET_CALLER_PC(), fd) + +#define COMMON_SYSCALL_FD_RELEASE(fd) syscall_fd_release(GET_CALLER_PC(), fd) + +#define COMMON_SYSCALL_PRE_FORK() \ + syscall_pre_fork(GET_CALLER_PC()) + +#define COMMON_SYSCALL_POST_FORK(res) \ + syscall_post_fork(GET_CALLER_PC(), res) + +#include "sanitizer_common/sanitizer_common_syscalls.inc" + +namespace __tsan { + +static void finalize(void *arg) { + ThreadState *thr = cur_thread(); + int status = Finalize(thr); + // Make sure the output is not lost. + FlushStreams(); + if (status) + REAL(_exit)(status); +} + +static void unreachable() { + Report("FATAL: ThreadSanitizer: unreachable called\n"); + Die(); +} + +void InitializeInterceptors() { + // We need to setup it early, because functions like dlsym() can call it. + REAL(memset) = internal_memset; + REAL(memcpy) = internal_memcpy; + REAL(memcmp) = internal_memcmp; + + // Instruct libc malloc to consume less memory. +#if !SANITIZER_FREEBSD + mallopt(1, 0); // M_MXFAST + mallopt(-3, 32*1024); // M_MMAP_THRESHOLD +#endif + + InitializeCommonInterceptors(); + + // We can not use TSAN_INTERCEPT to get setjmp addr, + // because it does &setjmp and setjmp is not present in some versions of libc. + using __interception::GetRealFunctionAddress; + GetRealFunctionAddress("setjmp", (uptr*)&REAL(setjmp), 0, 0); + GetRealFunctionAddress("_setjmp", (uptr*)&REAL(_setjmp), 0, 0); + GetRealFunctionAddress("sigsetjmp", (uptr*)&REAL(sigsetjmp), 0, 0); + GetRealFunctionAddress("__sigsetjmp", (uptr*)&REAL(__sigsetjmp), 0, 0); + + TSAN_INTERCEPT(longjmp); + TSAN_INTERCEPT(siglongjmp); + + TSAN_INTERCEPT(malloc); + TSAN_INTERCEPT(__libc_memalign); + TSAN_INTERCEPT(calloc); + TSAN_INTERCEPT(realloc); + TSAN_INTERCEPT(free); + TSAN_INTERCEPT(cfree); + TSAN_INTERCEPT(mmap); + TSAN_MAYBE_INTERCEPT_MMAP64; + TSAN_INTERCEPT(munmap); + TSAN_MAYBE_INTERCEPT_MEMALIGN; + TSAN_INTERCEPT(valloc); + TSAN_MAYBE_INTERCEPT_PVALLOC; + TSAN_INTERCEPT(posix_memalign); + + TSAN_INTERCEPT(strlen); + TSAN_INTERCEPT(memset); + TSAN_INTERCEPT(memcpy); + TSAN_INTERCEPT(memmove); + TSAN_INTERCEPT(memcmp); + TSAN_INTERCEPT(strchr); + TSAN_INTERCEPT(strchrnul); + TSAN_INTERCEPT(strrchr); + TSAN_INTERCEPT(strcpy); // NOLINT + TSAN_INTERCEPT(strncpy); + TSAN_INTERCEPT(strstr); + TSAN_INTERCEPT(strdup); + + TSAN_INTERCEPT(pthread_create); + TSAN_INTERCEPT(pthread_join); + TSAN_INTERCEPT(pthread_detach); + + TSAN_INTERCEPT_VER(pthread_cond_init, "GLIBC_2.3.2"); + TSAN_INTERCEPT_VER(pthread_cond_signal, "GLIBC_2.3.2"); + TSAN_INTERCEPT_VER(pthread_cond_broadcast, "GLIBC_2.3.2"); + TSAN_INTERCEPT_VER(pthread_cond_wait, "GLIBC_2.3.2"); + TSAN_INTERCEPT_VER(pthread_cond_timedwait, "GLIBC_2.3.2"); + TSAN_INTERCEPT_VER(pthread_cond_destroy, "GLIBC_2.3.2"); + + TSAN_INTERCEPT(pthread_mutex_init); + TSAN_INTERCEPT(pthread_mutex_destroy); + TSAN_INTERCEPT(pthread_mutex_trylock); + TSAN_INTERCEPT(pthread_mutex_timedlock); + + TSAN_INTERCEPT(pthread_spin_init); + TSAN_INTERCEPT(pthread_spin_destroy); + TSAN_INTERCEPT(pthread_spin_lock); + TSAN_INTERCEPT(pthread_spin_trylock); + TSAN_INTERCEPT(pthread_spin_unlock); + + TSAN_INTERCEPT(pthread_rwlock_init); + TSAN_INTERCEPT(pthread_rwlock_destroy); + TSAN_INTERCEPT(pthread_rwlock_rdlock); + TSAN_INTERCEPT(pthread_rwlock_tryrdlock); + TSAN_INTERCEPT(pthread_rwlock_timedrdlock); + TSAN_INTERCEPT(pthread_rwlock_wrlock); + TSAN_INTERCEPT(pthread_rwlock_trywrlock); + TSAN_INTERCEPT(pthread_rwlock_timedwrlock); + TSAN_INTERCEPT(pthread_rwlock_unlock); + + TSAN_INTERCEPT(pthread_barrier_init); + TSAN_INTERCEPT(pthread_barrier_destroy); + TSAN_INTERCEPT(pthread_barrier_wait); + + TSAN_INTERCEPT(pthread_once); + + TSAN_INTERCEPT(sem_init); + TSAN_INTERCEPT(sem_destroy); + TSAN_INTERCEPT(sem_wait); + TSAN_INTERCEPT(sem_trywait); + TSAN_INTERCEPT(sem_timedwait); + TSAN_INTERCEPT(sem_post); + TSAN_INTERCEPT(sem_getvalue); + + TSAN_INTERCEPT(stat); + TSAN_MAYBE_INTERCEPT___XSTAT; + TSAN_MAYBE_INTERCEPT_STAT64; + TSAN_MAYBE_INTERCEPT___XSTAT64; + TSAN_INTERCEPT(lstat); + TSAN_MAYBE_INTERCEPT___LXSTAT; + TSAN_MAYBE_INTERCEPT_LSTAT64; + TSAN_MAYBE_INTERCEPT___LXSTAT64; + TSAN_INTERCEPT(fstat); + TSAN_MAYBE_INTERCEPT___FXSTAT; + TSAN_MAYBE_INTERCEPT_FSTAT64; + TSAN_MAYBE_INTERCEPT___FXSTAT64; + TSAN_INTERCEPT(open); + TSAN_MAYBE_INTERCEPT_OPEN64; + TSAN_INTERCEPT(creat); + TSAN_MAYBE_INTERCEPT_CREAT64; + TSAN_INTERCEPT(dup); + TSAN_INTERCEPT(dup2); + TSAN_INTERCEPT(dup3); + TSAN_MAYBE_INTERCEPT_EVENTFD; + TSAN_MAYBE_INTERCEPT_SIGNALFD; + TSAN_MAYBE_INTERCEPT_INOTIFY_INIT; + TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1; + TSAN_INTERCEPT(socket); + TSAN_INTERCEPT(socketpair); + TSAN_INTERCEPT(connect); + TSAN_INTERCEPT(bind); + TSAN_INTERCEPT(listen); + TSAN_MAYBE_INTERCEPT_EPOLL_CREATE; + TSAN_MAYBE_INTERCEPT_EPOLL_CREATE1; + TSAN_INTERCEPT(close); + TSAN_MAYBE_INTERCEPT___CLOSE; + TSAN_MAYBE_INTERCEPT___RES_ICLOSE; + TSAN_INTERCEPT(pipe); + TSAN_INTERCEPT(pipe2); + + TSAN_INTERCEPT(send); + TSAN_INTERCEPT(sendmsg); + TSAN_INTERCEPT(recv); + + TSAN_INTERCEPT(unlink); + TSAN_INTERCEPT(tmpfile); + TSAN_MAYBE_INTERCEPT_TMPFILE64; + TSAN_INTERCEPT(fread); + TSAN_INTERCEPT(fwrite); + TSAN_INTERCEPT(abort); + TSAN_INTERCEPT(puts); + TSAN_INTERCEPT(rmdir); + TSAN_INTERCEPT(opendir); + + TSAN_MAYBE_INTERCEPT_EPOLL_CTL; + TSAN_MAYBE_INTERCEPT_EPOLL_WAIT; + + TSAN_INTERCEPT(sigaction); + TSAN_INTERCEPT(signal); + TSAN_INTERCEPT(sigsuspend); + TSAN_INTERCEPT(raise); + TSAN_INTERCEPT(kill); + TSAN_INTERCEPT(pthread_kill); + TSAN_INTERCEPT(sleep); + TSAN_INTERCEPT(usleep); + TSAN_INTERCEPT(nanosleep); + TSAN_INTERCEPT(gettimeofday); + TSAN_INTERCEPT(getaddrinfo); + + TSAN_INTERCEPT(fork); + TSAN_INTERCEPT(vfork); + TSAN_INTERCEPT(on_exit); + TSAN_INTERCEPT(__cxa_atexit); + TSAN_INTERCEPT(_exit); + + // Need to setup it, because interceptors check that the function is resolved. + // But atexit is emitted directly into the module, so can't be resolved. + REAL(atexit) = (int(*)(void(*)()))unreachable; + if (REAL(__cxa_atexit)(&finalize, 0, 0)) { + Printf("ThreadSanitizer: failed to setup atexit callback\n"); + Die(); + } + + if (pthread_key_create(&g_thread_finalize_key, &thread_finalize)) { + Printf("ThreadSanitizer: failed to create thread key\n"); + Die(); + } + + FdInit(); +} + +void *internal_start_thread(void(*func)(void *arg), void *arg) { + // Start the thread with signals blocked, otherwise it can steal user signals. + __sanitizer_sigset_t set, old; + internal_sigfillset(&set); + internal_sigprocmask(SIG_SETMASK, &set, &old); + void *th; + REAL(pthread_create)(&th, 0, (void*(*)(void *arg))func, arg); + internal_sigprocmask(SIG_SETMASK, &old, 0); + return th; +} + +void internal_join_thread(void *th) { + REAL(pthread_join)(th, 0); +} + +} // namespace __tsan diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_interface.cc b/contrib/compiler-rt/lib/tsan/rtl/tsan_interface.cc new file mode 100644 index 0000000..9de3808 --- /dev/null +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_interface.cc @@ -0,0 +1,98 @@ +//===-- tsan_interface.cc -------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +//===----------------------------------------------------------------------===// + +#include "tsan_interface.h" +#include "tsan_interface_ann.h" +#include "tsan_rtl.h" +#include "sanitizer_common/sanitizer_internal_defs.h" + +#define CALLERPC ((uptr)__builtin_return_address(0)) + +using namespace __tsan; // NOLINT + +typedef u16 uint16_t; +typedef u32 uint32_t; +typedef u64 uint64_t; + +void __tsan_init() { + Initialize(cur_thread()); +} + +void __tsan_read16(void *addr) { + MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog8); + MemoryRead(cur_thread(), CALLERPC, (uptr)addr + 8, kSizeLog8); +} + +void __tsan_write16(void *addr) { + MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog8); + MemoryWrite(cur_thread(), CALLERPC, (uptr)addr + 8, kSizeLog8); +} + +u16 __tsan_unaligned_read2(const uu16 *addr) { + UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, false, false); + return *addr; +} + +u32 __tsan_unaligned_read4(const uu32 *addr) { + UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 4, false, false); + return *addr; +} + +u64 __tsan_unaligned_read8(const uu64 *addr) { + UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, false, false); + return *addr; +} + +void __tsan_unaligned_write2(uu16 *addr, u16 v) { + UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, true, false); + *addr = v; +} + +void __tsan_unaligned_write4(uu32 *addr, u32 v) { + UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 4, true, false); + *addr = v; +} + +void __tsan_unaligned_write8(uu64 *addr, u64 v) { + UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, true, false); + *addr = v; +} + +extern "C" { +SANITIZER_INTERFACE_ATTRIBUTE +uint16_t __sanitizer_unaligned_load16(void *addr) + ALIAS("__tsan_unaligned_read2"); +SANITIZER_INTERFACE_ATTRIBUTE +uint32_t __sanitizer_unaligned_load32(void *addr) + ALIAS("__tsan_unaligned_read4"); +SANITIZER_INTERFACE_ATTRIBUTE +uint64_t __sanitizer_unaligned_load64(void *addr) + ALIAS("__tsan_unaligned_read8"); +SANITIZER_INTERFACE_ATTRIBUTE +void __sanitizer_unaligned_store16(void *addr, uint16_t v) + ALIAS("__tsan_unaligned_write2"); +SANITIZER_INTERFACE_ATTRIBUTE +void __sanitizer_unaligned_store32(void *addr, uint32_t v) + ALIAS("__tsan_unaligned_write4"); +SANITIZER_INTERFACE_ATTRIBUTE +void __sanitizer_unaligned_store64(void *addr, uint64_t v) + ALIAS("__tsan_unaligned_write8"); +} + +void __tsan_acquire(void *addr) { + Acquire(cur_thread(), CALLERPC, (uptr)addr); +} + +void __tsan_release(void *addr) { + Release(cur_thread(), CALLERPC, (uptr)addr); +} diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_interface.h b/contrib/compiler-rt/lib/tsan/rtl/tsan_interface.h new file mode 100644 index 0000000..7045069 --- /dev/null +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_interface.h @@ -0,0 +1,67 @@ +//===-- tsan_interface.h ----------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +// The functions declared in this header will be inserted by the instrumentation +// module. +// This header can be included by the instrumented program or by TSan tests. +//===----------------------------------------------------------------------===// +#ifndef TSAN_INTERFACE_H +#define TSAN_INTERFACE_H + +#include <sanitizer_common/sanitizer_internal_defs.h> + +// This header should NOT include any other headers. +// All functions in this header are extern "C" and start with __tsan_. + +#ifdef __cplusplus +extern "C" { +#endif + +// This function should be called at the very beginning of the process, +// before any instrumented code is executed and before any call to malloc. +SANITIZER_INTERFACE_ATTRIBUTE void __tsan_init(); + +SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read1(void *addr); +SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read2(void *addr); +SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read4(void *addr); +SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read8(void *addr); +SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read16(void *addr); + +SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write1(void *addr); +SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write2(void *addr); +SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write4(void *addr); +SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write8(void *addr); +SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write16(void *addr); + +SANITIZER_INTERFACE_ATTRIBUTE u16 __tsan_unaligned_read2(const uu16 *addr); +SANITIZER_INTERFACE_ATTRIBUTE u32 __tsan_unaligned_read4(const uu32 *addr); +SANITIZER_INTERFACE_ATTRIBUTE u64 __tsan_unaligned_read8(const uu64 *addr); +SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_write2(uu16 *addr, u16 v); +SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_write4(uu32 *addr, u32 v); +SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_write8(uu64 *addr, u64 v); + +SANITIZER_INTERFACE_ATTRIBUTE void __tsan_vptr_read(void **vptr_p); +SANITIZER_INTERFACE_ATTRIBUTE +void __tsan_vptr_update(void **vptr_p, void *new_val); + +SANITIZER_INTERFACE_ATTRIBUTE void __tsan_func_entry(void *call_pc); +SANITIZER_INTERFACE_ATTRIBUTE void __tsan_func_exit(); + +SANITIZER_INTERFACE_ATTRIBUTE +void __tsan_read_range(void *addr, unsigned long size); // NOLINT +SANITIZER_INTERFACE_ATTRIBUTE +void __tsan_write_range(void *addr, unsigned long size); // NOLINT + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // TSAN_INTERFACE_H diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cc b/contrib/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cc new file mode 100644 index 0000000..fd3c846 --- /dev/null +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cc @@ -0,0 +1,459 @@ +//===-- tsan_interface_ann.cc ---------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +//===----------------------------------------------------------------------===// +#include "sanitizer_common/sanitizer_libc.h" +#include "sanitizer_common/sanitizer_internal_defs.h" +#include "sanitizer_common/sanitizer_placement_new.h" +#include "sanitizer_common/sanitizer_stacktrace.h" +#include "tsan_interface_ann.h" +#include "tsan_mutex.h" +#include "tsan_report.h" +#include "tsan_rtl.h" +#include "tsan_mman.h" +#include "tsan_flags.h" +#include "tsan_platform.h" +#include "tsan_vector.h" + +#define CALLERPC ((uptr)__builtin_return_address(0)) + +using namespace __tsan; // NOLINT + +namespace __tsan { + +class ScopedAnnotation { + public: + ScopedAnnotation(ThreadState *thr, const char *aname, const char *f, int l, + uptr pc) + : thr_(thr) { + FuncEntry(thr_, pc); + DPrintf("#%d: annotation %s() %s:%d\n", thr_->tid, aname, f, l); + } + + ~ScopedAnnotation() { + FuncExit(thr_); + CheckNoLocks(thr_); + } + private: + ThreadState *const thr_; +}; + +#define SCOPED_ANNOTATION(typ) \ + if (!flags()->enable_annotations) \ + return; \ + ThreadState *thr = cur_thread(); \ + const uptr caller_pc = (uptr)__builtin_return_address(0); \ + StatInc(thr, StatAnnotation); \ + StatInc(thr, Stat##typ); \ + ScopedAnnotation sa(thr, __func__, f, l, caller_pc); \ + const uptr pc = StackTrace::GetCurrentPc(); \ + (void)pc; \ +/**/ + +static const int kMaxDescLen = 128; + +struct ExpectRace { + ExpectRace *next; + ExpectRace *prev; + int hitcount; + int addcount; + uptr addr; + uptr size; + char *file; + int line; + char desc[kMaxDescLen]; +}; + +struct DynamicAnnContext { + Mutex mtx; + ExpectRace expect; + ExpectRace benign; + + DynamicAnnContext() + : mtx(MutexTypeAnnotations, StatMtxAnnotations) { + } +}; + +static DynamicAnnContext *dyn_ann_ctx; +static char dyn_ann_ctx_placeholder[sizeof(DynamicAnnContext)] ALIGNED(64); + +static void AddExpectRace(ExpectRace *list, + char *f, int l, uptr addr, uptr size, char *desc) { + ExpectRace *race = list->next; + for (; race != list; race = race->next) { + if (race->addr == addr && race->size == size) { + race->addcount++; + return; + } + } + race = (ExpectRace*)internal_alloc(MBlockExpectRace, sizeof(ExpectRace)); + race->addr = addr; + race->size = size; + race->file = f; + race->line = l; + race->desc[0] = 0; + race->hitcount = 0; + race->addcount = 1; + if (desc) { + int i = 0; + for (; i < kMaxDescLen - 1 && desc[i]; i++) + race->desc[i] = desc[i]; + race->desc[i] = 0; + } + race->prev = list; + race->next = list->next; + race->next->prev = race; + list->next = race; +} + +static ExpectRace *FindRace(ExpectRace *list, uptr addr, uptr size) { + for (ExpectRace *race = list->next; race != list; race = race->next) { + uptr maxbegin = max(race->addr, addr); + uptr minend = min(race->addr + race->size, addr + size); + if (maxbegin < minend) + return race; + } + return 0; +} + +static bool CheckContains(ExpectRace *list, uptr addr, uptr size) { + ExpectRace *race = FindRace(list, addr, size); + if (race == 0) + return false; + DPrintf("Hit expected/benign race: %s addr=%zx:%d %s:%d\n", + race->desc, race->addr, (int)race->size, race->file, race->line); + race->hitcount++; + return true; +} + +static void InitList(ExpectRace *list) { + list->next = list; + list->prev = list; +} + +void InitializeDynamicAnnotations() { + dyn_ann_ctx = new(dyn_ann_ctx_placeholder) DynamicAnnContext; + InitList(&dyn_ann_ctx->expect); + InitList(&dyn_ann_ctx->benign); +} + +bool IsExpectedReport(uptr addr, uptr size) { + Lock lock(&dyn_ann_ctx->mtx); + if (CheckContains(&dyn_ann_ctx->expect, addr, size)) + return true; + if (CheckContains(&dyn_ann_ctx->benign, addr, size)) + return true; + return false; +} + +static void CollectMatchedBenignRaces(Vector<ExpectRace> *matched, + int *unique_count, int *hit_count, int ExpectRace::*counter) { + ExpectRace *list = &dyn_ann_ctx->benign; + for (ExpectRace *race = list->next; race != list; race = race->next) { + (*unique_count)++; + if (race->*counter == 0) + continue; + (*hit_count) += race->*counter; + uptr i = 0; + for (; i < matched->Size(); i++) { + ExpectRace *race0 = &(*matched)[i]; + if (race->line == race0->line + && internal_strcmp(race->file, race0->file) == 0 + && internal_strcmp(race->desc, race0->desc) == 0) { + race0->*counter += race->*counter; + break; + } + } + if (i == matched->Size()) + matched->PushBack(*race); + } +} + +void PrintMatchedBenignRaces() { + Lock lock(&dyn_ann_ctx->mtx); + int unique_count = 0; + int hit_count = 0; + int add_count = 0; + Vector<ExpectRace> hit_matched(MBlockScopedBuf); + CollectMatchedBenignRaces(&hit_matched, &unique_count, &hit_count, + &ExpectRace::hitcount); + Vector<ExpectRace> add_matched(MBlockScopedBuf); + CollectMatchedBenignRaces(&add_matched, &unique_count, &add_count, + &ExpectRace::addcount); + if (hit_matched.Size()) { + Printf("ThreadSanitizer: Matched %d \"benign\" races (pid=%d):\n", + hit_count, (int)internal_getpid()); + for (uptr i = 0; i < hit_matched.Size(); i++) { + Printf("%d %s:%d %s\n", + hit_matched[i].hitcount, hit_matched[i].file, + hit_matched[i].line, hit_matched[i].desc); + } + } + if (hit_matched.Size()) { + Printf("ThreadSanitizer: Annotated %d \"benign\" races, %d unique" + " (pid=%d):\n", + add_count, unique_count, (int)internal_getpid()); + for (uptr i = 0; i < add_matched.Size(); i++) { + Printf("%d %s:%d %s\n", + add_matched[i].addcount, add_matched[i].file, + add_matched[i].line, add_matched[i].desc); + } + } +} + +static void ReportMissedExpectedRace(ExpectRace *race) { + Printf("==================\n"); + Printf("WARNING: ThreadSanitizer: missed expected data race\n"); + Printf(" %s addr=%zx %s:%d\n", + race->desc, race->addr, race->file, race->line); + Printf("==================\n"); +} +} // namespace __tsan + +using namespace __tsan; // NOLINT + +extern "C" { +void INTERFACE_ATTRIBUTE AnnotateHappensBefore(char *f, int l, uptr addr) { + SCOPED_ANNOTATION(AnnotateHappensBefore); + Release(thr, pc, addr); +} + +void INTERFACE_ATTRIBUTE AnnotateHappensAfter(char *f, int l, uptr addr) { + SCOPED_ANNOTATION(AnnotateHappensAfter); + Acquire(thr, pc, addr); +} + +void INTERFACE_ATTRIBUTE AnnotateCondVarSignal(char *f, int l, uptr cv) { + SCOPED_ANNOTATION(AnnotateCondVarSignal); +} + +void INTERFACE_ATTRIBUTE AnnotateCondVarSignalAll(char *f, int l, uptr cv) { + SCOPED_ANNOTATION(AnnotateCondVarSignalAll); +} + +void INTERFACE_ATTRIBUTE AnnotateMutexIsNotPHB(char *f, int l, uptr mu) { + SCOPED_ANNOTATION(AnnotateMutexIsNotPHB); +} + +void INTERFACE_ATTRIBUTE AnnotateCondVarWait(char *f, int l, uptr cv, + uptr lock) { + SCOPED_ANNOTATION(AnnotateCondVarWait); +} + +void INTERFACE_ATTRIBUTE AnnotateRWLockCreate(char *f, int l, uptr m) { + SCOPED_ANNOTATION(AnnotateRWLockCreate); + MutexCreate(thr, pc, m, true, true, false); +} + +void INTERFACE_ATTRIBUTE AnnotateRWLockCreateStatic(char *f, int l, uptr m) { + SCOPED_ANNOTATION(AnnotateRWLockCreateStatic); + MutexCreate(thr, pc, m, true, true, true); +} + +void INTERFACE_ATTRIBUTE AnnotateRWLockDestroy(char *f, int l, uptr m) { + SCOPED_ANNOTATION(AnnotateRWLockDestroy); + MutexDestroy(thr, pc, m); +} + +void INTERFACE_ATTRIBUTE AnnotateRWLockAcquired(char *f, int l, uptr m, + uptr is_w) { + SCOPED_ANNOTATION(AnnotateRWLockAcquired); + if (is_w) + MutexLock(thr, pc, m); + else + MutexReadLock(thr, pc, m); +} + +void INTERFACE_ATTRIBUTE AnnotateRWLockReleased(char *f, int l, uptr m, + uptr is_w) { + SCOPED_ANNOTATION(AnnotateRWLockReleased); + if (is_w) + MutexUnlock(thr, pc, m); + else + MutexReadUnlock(thr, pc, m); +} + +void INTERFACE_ATTRIBUTE AnnotateTraceMemory(char *f, int l, uptr mem) { + SCOPED_ANNOTATION(AnnotateTraceMemory); +} + +void INTERFACE_ATTRIBUTE AnnotateFlushState(char *f, int l) { + SCOPED_ANNOTATION(AnnotateFlushState); +} + +void INTERFACE_ATTRIBUTE AnnotateNewMemory(char *f, int l, uptr mem, + uptr size) { + SCOPED_ANNOTATION(AnnotateNewMemory); +} + +void INTERFACE_ATTRIBUTE AnnotateNoOp(char *f, int l, uptr mem) { + SCOPED_ANNOTATION(AnnotateNoOp); +} + +void INTERFACE_ATTRIBUTE AnnotateFlushExpectedRaces(char *f, int l) { + SCOPED_ANNOTATION(AnnotateFlushExpectedRaces); + Lock lock(&dyn_ann_ctx->mtx); + while (dyn_ann_ctx->expect.next != &dyn_ann_ctx->expect) { + ExpectRace *race = dyn_ann_ctx->expect.next; + if (race->hitcount == 0) { + ctx->nmissed_expected++; + ReportMissedExpectedRace(race); + } + race->prev->next = race->next; + race->next->prev = race->prev; + internal_free(race); + } +} + +void INTERFACE_ATTRIBUTE AnnotateEnableRaceDetection( + char *f, int l, int enable) { + SCOPED_ANNOTATION(AnnotateEnableRaceDetection); + // FIXME: Reconsider this functionality later. It may be irrelevant. +} + +void INTERFACE_ATTRIBUTE AnnotateMutexIsUsedAsCondVar( + char *f, int l, uptr mu) { + SCOPED_ANNOTATION(AnnotateMutexIsUsedAsCondVar); +} + +void INTERFACE_ATTRIBUTE AnnotatePCQGet( + char *f, int l, uptr pcq) { + SCOPED_ANNOTATION(AnnotatePCQGet); +} + +void INTERFACE_ATTRIBUTE AnnotatePCQPut( + char *f, int l, uptr pcq) { + SCOPED_ANNOTATION(AnnotatePCQPut); +} + +void INTERFACE_ATTRIBUTE AnnotatePCQDestroy( + char *f, int l, uptr pcq) { + SCOPED_ANNOTATION(AnnotatePCQDestroy); +} + +void INTERFACE_ATTRIBUTE AnnotatePCQCreate( + char *f, int l, uptr pcq) { + SCOPED_ANNOTATION(AnnotatePCQCreate); +} + +void INTERFACE_ATTRIBUTE AnnotateExpectRace( + char *f, int l, uptr mem, char *desc) { + SCOPED_ANNOTATION(AnnotateExpectRace); + Lock lock(&dyn_ann_ctx->mtx); + AddExpectRace(&dyn_ann_ctx->expect, + f, l, mem, 1, desc); + DPrintf("Add expected race: %s addr=%zx %s:%d\n", desc, mem, f, l); +} + +static void BenignRaceImpl( + char *f, int l, uptr mem, uptr size, char *desc) { + Lock lock(&dyn_ann_ctx->mtx); + AddExpectRace(&dyn_ann_ctx->benign, + f, l, mem, size, desc); + DPrintf("Add benign race: %s addr=%zx %s:%d\n", desc, mem, f, l); +} + +// FIXME: Turn it off later. WTF is benign race?1?? Go talk to Hans Boehm. +void INTERFACE_ATTRIBUTE AnnotateBenignRaceSized( + char *f, int l, uptr mem, uptr size, char *desc) { + SCOPED_ANNOTATION(AnnotateBenignRaceSized); + BenignRaceImpl(f, l, mem, size, desc); +} + +void INTERFACE_ATTRIBUTE AnnotateBenignRace( + char *f, int l, uptr mem, char *desc) { + SCOPED_ANNOTATION(AnnotateBenignRace); + BenignRaceImpl(f, l, mem, 1, desc); +} + +void INTERFACE_ATTRIBUTE AnnotateIgnoreReadsBegin(char *f, int l) { + SCOPED_ANNOTATION(AnnotateIgnoreReadsBegin); + ThreadIgnoreBegin(thr, pc); +} + +void INTERFACE_ATTRIBUTE AnnotateIgnoreReadsEnd(char *f, int l) { + SCOPED_ANNOTATION(AnnotateIgnoreReadsEnd); + ThreadIgnoreEnd(thr, pc); +} + +void INTERFACE_ATTRIBUTE AnnotateIgnoreWritesBegin(char *f, int l) { + SCOPED_ANNOTATION(AnnotateIgnoreWritesBegin); + ThreadIgnoreBegin(thr, pc); +} + +void INTERFACE_ATTRIBUTE AnnotateIgnoreWritesEnd(char *f, int l) { + SCOPED_ANNOTATION(AnnotateIgnoreWritesEnd); + ThreadIgnoreEnd(thr, pc); +} + +void INTERFACE_ATTRIBUTE AnnotateIgnoreSyncBegin(char *f, int l) { + SCOPED_ANNOTATION(AnnotateIgnoreSyncBegin); + ThreadIgnoreSyncBegin(thr, pc); +} + +void INTERFACE_ATTRIBUTE AnnotateIgnoreSyncEnd(char *f, int l) { + SCOPED_ANNOTATION(AnnotateIgnoreSyncEnd); + ThreadIgnoreSyncEnd(thr, pc); +} + +void INTERFACE_ATTRIBUTE AnnotatePublishMemoryRange( + char *f, int l, uptr addr, uptr size) { + SCOPED_ANNOTATION(AnnotatePublishMemoryRange); +} + +void INTERFACE_ATTRIBUTE AnnotateUnpublishMemoryRange( + char *f, int l, uptr addr, uptr size) { + SCOPED_ANNOTATION(AnnotateUnpublishMemoryRange); +} + +void INTERFACE_ATTRIBUTE AnnotateThreadName( + char *f, int l, char *name) { + SCOPED_ANNOTATION(AnnotateThreadName); + ThreadSetName(thr, name); +} + +// We deliberately omit the implementation of WTFAnnotateHappensBefore() and +// WTFAnnotateHappensAfter(). Those are being used by Webkit to annotate +// atomic operations, which should be handled by ThreadSanitizer correctly. +void INTERFACE_ATTRIBUTE WTFAnnotateHappensBefore(char *f, int l, uptr addr) { + SCOPED_ANNOTATION(AnnotateHappensBefore); +} + +void INTERFACE_ATTRIBUTE WTFAnnotateHappensAfter(char *f, int l, uptr addr) { + SCOPED_ANNOTATION(AnnotateHappensAfter); +} + +void INTERFACE_ATTRIBUTE WTFAnnotateBenignRaceSized( + char *f, int l, uptr mem, uptr sz, char *desc) { + SCOPED_ANNOTATION(AnnotateBenignRaceSized); + BenignRaceImpl(f, l, mem, sz, desc); +} + +int INTERFACE_ATTRIBUTE RunningOnValgrind() { + return flags()->running_on_valgrind; +} + +double __attribute__((weak)) INTERFACE_ATTRIBUTE ValgrindSlowdown(void) { + return 10.0; +} + +const char INTERFACE_ATTRIBUTE* ThreadSanitizerQuery(const char *query) { + if (internal_strcmp(query, "pure_happens_before") == 0) + return "1"; + else + return "0"; +} + +void INTERFACE_ATTRIBUTE +AnnotateMemoryIsInitialized(char *f, int l, uptr mem, uptr sz) {} +void INTERFACE_ATTRIBUTE +AnnotateMemoryIsUninitialized(char *f, int l, uptr mem, uptr sz) {} +} // extern "C" diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_interface_ann.h b/contrib/compiler-rt/lib/tsan/rtl/tsan_interface_ann.h new file mode 100644 index 0000000..963bcc5 --- /dev/null +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_interface_ann.h @@ -0,0 +1,33 @@ +//===-- tsan_interface_ann.h ------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +// Interface for dynamic annotations. +//===----------------------------------------------------------------------===// +#ifndef TSAN_INTERFACE_ANN_H +#define TSAN_INTERFACE_ANN_H + +#include <sanitizer_common/sanitizer_internal_defs.h> + +// This header should NOT include any other headers. +// All functions in this header are extern "C" and start with __tsan_. + +#ifdef __cplusplus +extern "C" { +#endif + +SANITIZER_INTERFACE_ATTRIBUTE void __tsan_acquire(void *addr); +SANITIZER_INTERFACE_ATTRIBUTE void __tsan_release(void *addr); + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // TSAN_INTERFACE_ANN_H diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cc b/contrib/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cc new file mode 100644 index 0000000..9b69951 --- /dev/null +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cc @@ -0,0 +1,953 @@ +//===-- tsan_interface_atomic.cc ------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +//===----------------------------------------------------------------------===// + +// ThreadSanitizer atomic operations are based on C++11/C1x standards. +// For background see C++11 standard. A slightly older, publicly +// available draft of the standard (not entirely up-to-date, but close enough +// for casual browsing) is available here: +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf +// The following page contains more background information: +// http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/ + +#include "sanitizer_common/sanitizer_placement_new.h" +#include "sanitizer_common/sanitizer_stacktrace.h" +#include "sanitizer_common/sanitizer_mutex.h" +#include "tsan_flags.h" +#include "tsan_rtl.h" + +using namespace __tsan; // NOLINT + +// These should match declarations from public tsan_interface_atomic.h header. +typedef unsigned char a8; +typedef unsigned short a16; // NOLINT +typedef unsigned int a32; +typedef unsigned long long a64; // NOLINT +#if !defined(SANITIZER_GO) && (defined(__SIZEOF_INT128__) \ + || (__clang_major__ * 100 + __clang_minor__ >= 302)) +__extension__ typedef __int128 a128; +# define __TSAN_HAS_INT128 1 +#else +# define __TSAN_HAS_INT128 0 +#endif + +#ifndef SANITIZER_GO +// Protects emulation of 128-bit atomic operations. +static StaticSpinMutex mutex128; +#endif + +// Part of ABI, do not change. +// http://llvm.org/viewvc/llvm-project/libcxx/trunk/include/atomic?view=markup +typedef enum { + mo_relaxed, + mo_consume, + mo_acquire, + mo_release, + mo_acq_rel, + mo_seq_cst +} morder; + +static bool IsLoadOrder(morder mo) { + return mo == mo_relaxed || mo == mo_consume + || mo == mo_acquire || mo == mo_seq_cst; +} + +static bool IsStoreOrder(morder mo) { + return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst; +} + +static bool IsReleaseOrder(morder mo) { + return mo == mo_release || mo == mo_acq_rel || mo == mo_seq_cst; +} + +static bool IsAcquireOrder(morder mo) { + return mo == mo_consume || mo == mo_acquire + || mo == mo_acq_rel || mo == mo_seq_cst; +} + +static bool IsAcqRelOrder(morder mo) { + return mo == mo_acq_rel || mo == mo_seq_cst; +} + +template<typename T> T func_xchg(volatile T *v, T op) { + T res = __sync_lock_test_and_set(v, op); + // __sync_lock_test_and_set does not contain full barrier. + __sync_synchronize(); + return res; +} + +template<typename T> T func_add(volatile T *v, T op) { + return __sync_fetch_and_add(v, op); +} + +template<typename T> T func_sub(volatile T *v, T op) { + return __sync_fetch_and_sub(v, op); +} + +template<typename T> T func_and(volatile T *v, T op) { + return __sync_fetch_and_and(v, op); +} + +template<typename T> T func_or(volatile T *v, T op) { + return __sync_fetch_and_or(v, op); +} + +template<typename T> T func_xor(volatile T *v, T op) { + return __sync_fetch_and_xor(v, op); +} + +template<typename T> T func_nand(volatile T *v, T op) { + // clang does not support __sync_fetch_and_nand. + T cmp = *v; + for (;;) { + T newv = ~(cmp & op); + T cur = __sync_val_compare_and_swap(v, cmp, newv); + if (cmp == cur) + return cmp; + cmp = cur; + } +} + +template<typename T> T func_cas(volatile T *v, T cmp, T xch) { + return __sync_val_compare_and_swap(v, cmp, xch); +} + +// clang does not support 128-bit atomic ops. +// Atomic ops are executed under tsan internal mutex, +// here we assume that the atomic variables are not accessed +// from non-instrumented code. +#if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) && !defined(SANITIZER_GO) +a128 func_xchg(volatile a128 *v, a128 op) { + SpinMutexLock lock(&mutex128); + a128 cmp = *v; + *v = op; + return cmp; +} + +a128 func_add(volatile a128 *v, a128 op) { + SpinMutexLock lock(&mutex128); + a128 cmp = *v; + *v = cmp + op; + return cmp; +} + +a128 func_sub(volatile a128 *v, a128 op) { + SpinMutexLock lock(&mutex128); + a128 cmp = *v; + *v = cmp - op; + return cmp; +} + +a128 func_and(volatile a128 *v, a128 op) { + SpinMutexLock lock(&mutex128); + a128 cmp = *v; + *v = cmp & op; + return cmp; +} + +a128 func_or(volatile a128 *v, a128 op) { + SpinMutexLock lock(&mutex128); + a128 cmp = *v; + *v = cmp | op; + return cmp; +} + +a128 func_xor(volatile a128 *v, a128 op) { + SpinMutexLock lock(&mutex128); + a128 cmp = *v; + *v = cmp ^ op; + return cmp; +} + +a128 func_nand(volatile a128 *v, a128 op) { + SpinMutexLock lock(&mutex128); + a128 cmp = *v; + *v = ~(cmp & op); + return cmp; +} + +a128 func_cas(volatile a128 *v, a128 cmp, a128 xch) { + SpinMutexLock lock(&mutex128); + a128 cur = *v; + if (cur == cmp) + *v = xch; + return cur; +} +#endif + +template<typename T> +static int SizeLog() { + if (sizeof(T) <= 1) + return kSizeLog1; + else if (sizeof(T) <= 2) + return kSizeLog2; + else if (sizeof(T) <= 4) + return kSizeLog4; + else + return kSizeLog8; + // For 16-byte atomics we also use 8-byte memory access, + // this leads to false negatives only in very obscure cases. +} + +#ifndef SANITIZER_GO +static atomic_uint8_t *to_atomic(const volatile a8 *a) { + return reinterpret_cast<atomic_uint8_t *>(const_cast<a8 *>(a)); +} + +static atomic_uint16_t *to_atomic(const volatile a16 *a) { + return reinterpret_cast<atomic_uint16_t *>(const_cast<a16 *>(a)); +} +#endif + +static atomic_uint32_t *to_atomic(const volatile a32 *a) { + return reinterpret_cast<atomic_uint32_t *>(const_cast<a32 *>(a)); +} + +static atomic_uint64_t *to_atomic(const volatile a64 *a) { + return reinterpret_cast<atomic_uint64_t *>(const_cast<a64 *>(a)); +} + +static memory_order to_mo(morder mo) { + switch (mo) { + case mo_relaxed: return memory_order_relaxed; + case mo_consume: return memory_order_consume; + case mo_acquire: return memory_order_acquire; + case mo_release: return memory_order_release; + case mo_acq_rel: return memory_order_acq_rel; + case mo_seq_cst: return memory_order_seq_cst; + } + CHECK(0); + return memory_order_seq_cst; +} + +template<typename T> +static T NoTsanAtomicLoad(const volatile T *a, morder mo) { + return atomic_load(to_atomic(a), to_mo(mo)); +} + +#if __TSAN_HAS_INT128 && !defined(SANITIZER_GO) +static a128 NoTsanAtomicLoad(const volatile a128 *a, morder mo) { + SpinMutexLock lock(&mutex128); + return *a; +} +#endif + +template<typename T> +static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a, + morder mo) { + CHECK(IsLoadOrder(mo)); + // This fast-path is critical for performance. + // Assume the access is atomic. + if (!IsAcquireOrder(mo)) { + MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>()); + return NoTsanAtomicLoad(a, mo); + } + SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, false); + AcquireImpl(thr, pc, &s->clock); + T v = NoTsanAtomicLoad(a, mo); + s->mtx.ReadUnlock(); + MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>()); + return v; +} + +template<typename T> +static void NoTsanAtomicStore(volatile T *a, T v, morder mo) { + atomic_store(to_atomic(a), v, to_mo(mo)); +} + +#if __TSAN_HAS_INT128 && !defined(SANITIZER_GO) +static void NoTsanAtomicStore(volatile a128 *a, a128 v, morder mo) { + SpinMutexLock lock(&mutex128); + *a = v; +} +#endif + +template<typename T> +static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v, + morder mo) { + CHECK(IsStoreOrder(mo)); + MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>()); + // This fast-path is critical for performance. + // Assume the access is atomic. + // Strictly saying even relaxed store cuts off release sequence, + // so must reset the clock. + if (!IsReleaseOrder(mo)) { + NoTsanAtomicStore(a, v, mo); + return; + } + __sync_synchronize(); + SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, true); + thr->fast_state.IncrementEpoch(); + // Can't increment epoch w/o writing to the trace as well. + TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); + ReleaseImpl(thr, pc, &s->clock); + NoTsanAtomicStore(a, v, mo); + s->mtx.Unlock(); +} + +template<typename T, T (*F)(volatile T *v, T op)> +static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) { + MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>()); + SyncVar *s = 0; + if (mo != mo_relaxed) { + s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, true); + thr->fast_state.IncrementEpoch(); + // Can't increment epoch w/o writing to the trace as well. + TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); + if (IsAcqRelOrder(mo)) + AcquireReleaseImpl(thr, pc, &s->clock); + else if (IsReleaseOrder(mo)) + ReleaseImpl(thr, pc, &s->clock); + else if (IsAcquireOrder(mo)) + AcquireImpl(thr, pc, &s->clock); + } + v = F(a, v); + if (s) + s->mtx.Unlock(); + return v; +} + +template<typename T> +static T NoTsanAtomicExchange(volatile T *a, T v, morder mo) { + return func_xchg(a, v); +} + +template<typename T> +static T NoTsanAtomicFetchAdd(volatile T *a, T v, morder mo) { + return func_add(a, v); +} + +template<typename T> +static T NoTsanAtomicFetchSub(volatile T *a, T v, morder mo) { + return func_sub(a, v); +} + +template<typename T> +static T NoTsanAtomicFetchAnd(volatile T *a, T v, morder mo) { + return func_and(a, v); +} + +template<typename T> +static T NoTsanAtomicFetchOr(volatile T *a, T v, morder mo) { + return func_or(a, v); +} + +template<typename T> +static T NoTsanAtomicFetchXor(volatile T *a, T v, morder mo) { + return func_xor(a, v); +} + +template<typename T> +static T NoTsanAtomicFetchNand(volatile T *a, T v, morder mo) { + return func_nand(a, v); +} + +template<typename T> +static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v, + morder mo) { + return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo); +} + +template<typename T> +static T AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v, + morder mo) { + return AtomicRMW<T, func_add>(thr, pc, a, v, mo); +} + +template<typename T> +static T AtomicFetchSub(ThreadState *thr, uptr pc, volatile T *a, T v, + morder mo) { + return AtomicRMW<T, func_sub>(thr, pc, a, v, mo); +} + +template<typename T> +static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v, + morder mo) { + return AtomicRMW<T, func_and>(thr, pc, a, v, mo); +} + +template<typename T> +static T AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v, + morder mo) { + return AtomicRMW<T, func_or>(thr, pc, a, v, mo); +} + +template<typename T> +static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v, + morder mo) { + return AtomicRMW<T, func_xor>(thr, pc, a, v, mo); +} + +template<typename T> +static T AtomicFetchNand(ThreadState *thr, uptr pc, volatile T *a, T v, + morder mo) { + return AtomicRMW<T, func_nand>(thr, pc, a, v, mo); +} + +template<typename T> +static bool NoTsanAtomicCAS(volatile T *a, T *c, T v, morder mo, morder fmo) { + return atomic_compare_exchange_strong(to_atomic(a), c, v, to_mo(mo)); +} + +#if __TSAN_HAS_INT128 +static bool NoTsanAtomicCAS(volatile a128 *a, a128 *c, a128 v, + morder mo, morder fmo) { + a128 old = *c; + a128 cur = func_cas(a, old, v); + if (cur == old) + return true; + *c = cur; + return false; +} +#endif + +template<typename T> +static T NoTsanAtomicCAS(volatile T *a, T c, T v, morder mo, morder fmo) { + NoTsanAtomicCAS(a, &c, v, mo, fmo); + return c; +} + +template<typename T> +static bool AtomicCAS(ThreadState *thr, uptr pc, + volatile T *a, T *c, T v, morder mo, morder fmo) { + (void)fmo; // Unused because llvm does not pass it yet. + MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>()); + SyncVar *s = 0; + bool write_lock = mo != mo_acquire && mo != mo_consume; + if (mo != mo_relaxed) { + s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, write_lock); + thr->fast_state.IncrementEpoch(); + // Can't increment epoch w/o writing to the trace as well. + TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); + if (IsAcqRelOrder(mo)) + AcquireReleaseImpl(thr, pc, &s->clock); + else if (IsReleaseOrder(mo)) + ReleaseImpl(thr, pc, &s->clock); + else if (IsAcquireOrder(mo)) + AcquireImpl(thr, pc, &s->clock); + } + T cc = *c; + T pr = func_cas(a, cc, v); + if (s) { + if (write_lock) + s->mtx.Unlock(); + else + s->mtx.ReadUnlock(); + } + if (pr == cc) + return true; + *c = pr; + return false; +} + +template<typename T> +static T AtomicCAS(ThreadState *thr, uptr pc, + volatile T *a, T c, T v, morder mo, morder fmo) { + AtomicCAS(thr, pc, a, &c, v, mo, fmo); + return c; +} + +#ifndef SANITIZER_GO +static void NoTsanAtomicFence(morder mo) { + __sync_synchronize(); +} + +static void AtomicFence(ThreadState *thr, uptr pc, morder mo) { + // FIXME(dvyukov): not implemented. + __sync_synchronize(); +} +#endif + +// Interface functions follow. +#ifndef SANITIZER_GO + +// C/C++ + +#define SCOPED_ATOMIC(func, ...) \ + const uptr callpc = (uptr)__builtin_return_address(0); \ + uptr pc = StackTrace::GetCurrentPc(); \ + mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \ + ThreadState *const thr = cur_thread(); \ + if (thr->ignore_interceptors) \ + return NoTsanAtomic##func(__VA_ARGS__); \ + AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \ + ScopedAtomic sa(thr, callpc, a, mo, __func__); \ + return Atomic##func(thr, pc, __VA_ARGS__); \ +/**/ + +class ScopedAtomic { + public: + ScopedAtomic(ThreadState *thr, uptr pc, const volatile void *a, + morder mo, const char *func) + : thr_(thr) { + FuncEntry(thr_, pc); + DPrintf("#%d: %s(%p, %d)\n", thr_->tid, func, a, mo); + } + ~ScopedAtomic() { + ProcessPendingSignals(thr_); + FuncExit(thr_); + } + private: + ThreadState *thr_; +}; + +static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) { + StatInc(thr, StatAtomic); + StatInc(thr, t); + StatInc(thr, size == 1 ? StatAtomic1 + : size == 2 ? StatAtomic2 + : size == 4 ? StatAtomic4 + : size == 8 ? StatAtomic8 + : StatAtomic16); + StatInc(thr, mo == mo_relaxed ? StatAtomicRelaxed + : mo == mo_consume ? StatAtomicConsume + : mo == mo_acquire ? StatAtomicAcquire + : mo == mo_release ? StatAtomicRelease + : mo == mo_acq_rel ? StatAtomicAcq_Rel + : StatAtomicSeq_Cst); +} + +extern "C" { +SANITIZER_INTERFACE_ATTRIBUTE +a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) { + SCOPED_ATOMIC(Load, a, mo); +} + +SANITIZER_INTERFACE_ATTRIBUTE +a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) { + SCOPED_ATOMIC(Load, a, mo); +} + +SANITIZER_INTERFACE_ATTRIBUTE +a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) { + SCOPED_ATOMIC(Load, a, mo); +} + +SANITIZER_INTERFACE_ATTRIBUTE +a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) { + SCOPED_ATOMIC(Load, a, mo); +} + +#if __TSAN_HAS_INT128 +SANITIZER_INTERFACE_ATTRIBUTE +a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) { + SCOPED_ATOMIC(Load, a, mo); +} +#endif + +SANITIZER_INTERFACE_ATTRIBUTE +void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) { + SCOPED_ATOMIC(Store, a, v, mo); +} + +SANITIZER_INTERFACE_ATTRIBUTE +void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) { + SCOPED_ATOMIC(Store, a, v, mo); +} + +SANITIZER_INTERFACE_ATTRIBUTE +void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) { + SCOPED_ATOMIC(Store, a, v, mo); +} + +SANITIZER_INTERFACE_ATTRIBUTE +void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) { + SCOPED_ATOMIC(Store, a, v, mo); +} + +#if __TSAN_HAS_INT128 +SANITIZER_INTERFACE_ATTRIBUTE +void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) { + SCOPED_ATOMIC(Store, a, v, mo); +} +#endif + +SANITIZER_INTERFACE_ATTRIBUTE +a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) { + SCOPED_ATOMIC(Exchange, a, v, mo); +} + +SANITIZER_INTERFACE_ATTRIBUTE +a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) { + SCOPED_ATOMIC(Exchange, a, v, mo); +} + +SANITIZER_INTERFACE_ATTRIBUTE +a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) { + SCOPED_ATOMIC(Exchange, a, v, mo); +} + +SANITIZER_INTERFACE_ATTRIBUTE +a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) { + SCOPED_ATOMIC(Exchange, a, v, mo); +} + +#if __TSAN_HAS_INT128 +SANITIZER_INTERFACE_ATTRIBUTE +a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) { + SCOPED_ATOMIC(Exchange, a, v, mo); +} +#endif + +SANITIZER_INTERFACE_ATTRIBUTE +a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) { + SCOPED_ATOMIC(FetchAdd, a, v, mo); +} + +SANITIZER_INTERFACE_ATTRIBUTE +a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) { + SCOPED_ATOMIC(FetchAdd, a, v, mo); +} + +SANITIZER_INTERFACE_ATTRIBUTE +a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) { + SCOPED_ATOMIC(FetchAdd, a, v, mo); +} + +SANITIZER_INTERFACE_ATTRIBUTE +a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) { + SCOPED_ATOMIC(FetchAdd, a, v, mo); +} + +#if __TSAN_HAS_INT128 +SANITIZER_INTERFACE_ATTRIBUTE +a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) { + SCOPED_ATOMIC(FetchAdd, a, v, mo); +} +#endif + +SANITIZER_INTERFACE_ATTRIBUTE +a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) { + SCOPED_ATOMIC(FetchSub, a, v, mo); +} + +SANITIZER_INTERFACE_ATTRIBUTE +a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) { + SCOPED_ATOMIC(FetchSub, a, v, mo); +} + +SANITIZER_INTERFACE_ATTRIBUTE +a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) { + SCOPED_ATOMIC(FetchSub, a, v, mo); +} + +SANITIZER_INTERFACE_ATTRIBUTE +a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) { + SCOPED_ATOMIC(FetchSub, a, v, mo); +} + +#if __TSAN_HAS_INT128 +SANITIZER_INTERFACE_ATTRIBUTE +a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) { + SCOPED_ATOMIC(FetchSub, a, v, mo); +} +#endif + +SANITIZER_INTERFACE_ATTRIBUTE +a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) { + SCOPED_ATOMIC(FetchAnd, a, v, mo); +} + +SANITIZER_INTERFACE_ATTRIBUTE +a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) { + SCOPED_ATOMIC(FetchAnd, a, v, mo); +} + +SANITIZER_INTERFACE_ATTRIBUTE +a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) { + SCOPED_ATOMIC(FetchAnd, a, v, mo); +} + +SANITIZER_INTERFACE_ATTRIBUTE +a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) { + SCOPED_ATOMIC(FetchAnd, a, v, mo); +} + +#if __TSAN_HAS_INT128 +SANITIZER_INTERFACE_ATTRIBUTE +a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) { + SCOPED_ATOMIC(FetchAnd, a, v, mo); +} +#endif + +SANITIZER_INTERFACE_ATTRIBUTE +a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) { + SCOPED_ATOMIC(FetchOr, a, v, mo); +} + +SANITIZER_INTERFACE_ATTRIBUTE +a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) { + SCOPED_ATOMIC(FetchOr, a, v, mo); +} + +SANITIZER_INTERFACE_ATTRIBUTE +a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) { + SCOPED_ATOMIC(FetchOr, a, v, mo); +} + +SANITIZER_INTERFACE_ATTRIBUTE +a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) { + SCOPED_ATOMIC(FetchOr, a, v, mo); +} + +#if __TSAN_HAS_INT128 +SANITIZER_INTERFACE_ATTRIBUTE +a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) { + SCOPED_ATOMIC(FetchOr, a, v, mo); +} +#endif + +SANITIZER_INTERFACE_ATTRIBUTE +a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) { + SCOPED_ATOMIC(FetchXor, a, v, mo); +} + +SANITIZER_INTERFACE_ATTRIBUTE +a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) { + SCOPED_ATOMIC(FetchXor, a, v, mo); +} + +SANITIZER_INTERFACE_ATTRIBUTE +a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) { + SCOPED_ATOMIC(FetchXor, a, v, mo); +} + +SANITIZER_INTERFACE_ATTRIBUTE +a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) { + SCOPED_ATOMIC(FetchXor, a, v, mo); +} + +#if __TSAN_HAS_INT128 +SANITIZER_INTERFACE_ATTRIBUTE +a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) { + SCOPED_ATOMIC(FetchXor, a, v, mo); +} +#endif + +SANITIZER_INTERFACE_ATTRIBUTE +a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) { + SCOPED_ATOMIC(FetchNand, a, v, mo); +} + +SANITIZER_INTERFACE_ATTRIBUTE +a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) { + SCOPED_ATOMIC(FetchNand, a, v, mo); +} + +SANITIZER_INTERFACE_ATTRIBUTE +a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) { + SCOPED_ATOMIC(FetchNand, a, v, mo); +} + +SANITIZER_INTERFACE_ATTRIBUTE +a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) { + SCOPED_ATOMIC(FetchNand, a, v, mo); +} + +#if __TSAN_HAS_INT128 +SANITIZER_INTERFACE_ATTRIBUTE +a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) { + SCOPED_ATOMIC(FetchNand, a, v, mo); +} +#endif + +SANITIZER_INTERFACE_ATTRIBUTE +int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v, + morder mo, morder fmo) { + SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); +} + +SANITIZER_INTERFACE_ATTRIBUTE +int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v, + morder mo, morder fmo) { + SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); +} + +SANITIZER_INTERFACE_ATTRIBUTE +int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v, + morder mo, morder fmo) { + SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); +} + +SANITIZER_INTERFACE_ATTRIBUTE +int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v, + morder mo, morder fmo) { + SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); +} + +#if __TSAN_HAS_INT128 +SANITIZER_INTERFACE_ATTRIBUTE +int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v, + morder mo, morder fmo) { + SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); +} +#endif + +SANITIZER_INTERFACE_ATTRIBUTE +int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v, + morder mo, morder fmo) { + SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); +} + +SANITIZER_INTERFACE_ATTRIBUTE +int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v, + morder mo, morder fmo) { + SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); +} + +SANITIZER_INTERFACE_ATTRIBUTE +int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v, + morder mo, morder fmo) { + SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); +} + +SANITIZER_INTERFACE_ATTRIBUTE +int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v, + morder mo, morder fmo) { + SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); +} + +#if __TSAN_HAS_INT128 +SANITIZER_INTERFACE_ATTRIBUTE +int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v, + morder mo, morder fmo) { + SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); +} +#endif + +SANITIZER_INTERFACE_ATTRIBUTE +a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v, + morder mo, morder fmo) { + SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); +} + +SANITIZER_INTERFACE_ATTRIBUTE +a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v, + morder mo, morder fmo) { + SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); +} + +SANITIZER_INTERFACE_ATTRIBUTE +a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v, + morder mo, morder fmo) { + SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); +} + +SANITIZER_INTERFACE_ATTRIBUTE +a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v, + morder mo, morder fmo) { + SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); +} + +#if __TSAN_HAS_INT128 +SANITIZER_INTERFACE_ATTRIBUTE +a128 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v, + morder mo, morder fmo) { + SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); +} +#endif + +SANITIZER_INTERFACE_ATTRIBUTE +void __tsan_atomic_thread_fence(morder mo) { + char* a = 0; + SCOPED_ATOMIC(Fence, mo); +} + +SANITIZER_INTERFACE_ATTRIBUTE +void __tsan_atomic_signal_fence(morder mo) { +} +} // extern "C" + +#else // #ifndef SANITIZER_GO + +// Go + +#define ATOMIC(func, ...) \ + if (thr->ignore_sync) { \ + NoTsanAtomic##func(__VA_ARGS__); \ + } else { \ + FuncEntry(thr, cpc); \ + Atomic##func(thr, pc, __VA_ARGS__); \ + FuncExit(thr); \ + } \ +/**/ + +#define ATOMIC_RET(func, ret, ...) \ + if (thr->ignore_sync) { \ + (ret) = NoTsanAtomic##func(__VA_ARGS__); \ + } else { \ + FuncEntry(thr, cpc); \ + (ret) = Atomic##func(thr, pc, __VA_ARGS__); \ + FuncExit(thr); \ + } \ +/**/ + +extern "C" { +SANITIZER_INTERFACE_ATTRIBUTE +void __tsan_go_atomic32_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) { + ATOMIC_RET(Load, *(a32*)(a+8), *(a32**)a, mo_acquire); +} + +SANITIZER_INTERFACE_ATTRIBUTE +void __tsan_go_atomic64_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) { + ATOMIC_RET(Load, *(a64*)(a+8), *(a64**)a, mo_acquire); +} + +SANITIZER_INTERFACE_ATTRIBUTE +void __tsan_go_atomic32_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) { + ATOMIC(Store, *(a32**)a, *(a32*)(a+8), mo_release); +} + +SANITIZER_INTERFACE_ATTRIBUTE +void __tsan_go_atomic64_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) { + ATOMIC(Store, *(a64**)a, *(a64*)(a+8), mo_release); +} + +SANITIZER_INTERFACE_ATTRIBUTE +void __tsan_go_atomic32_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) { + ATOMIC_RET(FetchAdd, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel); +} + +SANITIZER_INTERFACE_ATTRIBUTE +void __tsan_go_atomic64_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) { + ATOMIC_RET(FetchAdd, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel); +} + +SANITIZER_INTERFACE_ATTRIBUTE +void __tsan_go_atomic32_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) { + ATOMIC_RET(Exchange, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel); +} + +SANITIZER_INTERFACE_ATTRIBUTE +void __tsan_go_atomic64_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) { + ATOMIC_RET(Exchange, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel); +} + +SANITIZER_INTERFACE_ATTRIBUTE +void __tsan_go_atomic32_compare_exchange( + ThreadState *thr, uptr cpc, uptr pc, u8 *a) { + a32 cur = 0; + a32 cmp = *(a32*)(a+8); + ATOMIC_RET(CAS, cur, *(a32**)a, cmp, *(a32*)(a+12), mo_acq_rel, mo_acquire); + *(bool*)(a+16) = (cur == cmp); +} + +SANITIZER_INTERFACE_ATTRIBUTE +void __tsan_go_atomic64_compare_exchange( + ThreadState *thr, uptr cpc, uptr pc, u8 *a) { + a64 cur = 0; + a64 cmp = *(a64*)(a+8); + ATOMIC_RET(CAS, cur, *(a64**)a, cmp, *(a64*)(a+16), mo_acq_rel, mo_acquire); + *(bool*)(a+24) = (cur == cmp); +} +} // extern "C" +#endif // #ifndef SANITIZER_GO diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_interface_inl.h b/contrib/compiler-rt/lib/tsan/rtl/tsan_interface_inl.h new file mode 100644 index 0000000..0187e49 --- /dev/null +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_interface_inl.h @@ -0,0 +1,85 @@ +//===-- tsan_interface_inl.h ------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +//===----------------------------------------------------------------------===// + +#include "tsan_interface.h" +#include "tsan_rtl.h" + +#define CALLERPC ((uptr)__builtin_return_address(0)) + +using namespace __tsan; // NOLINT + +void __tsan_read1(void *addr) { + MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog1); +} + +void __tsan_read2(void *addr) { + MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog2); +} + +void __tsan_read4(void *addr) { + MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog4); +} + +void __tsan_read8(void *addr) { + MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog8); +} + +void __tsan_write1(void *addr) { + MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog1); +} + +void __tsan_write2(void *addr) { + MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog2); +} + +void __tsan_write4(void *addr) { + MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog4); +} + +void __tsan_write8(void *addr) { + MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog8); +} + +void __tsan_vptr_update(void **vptr_p, void *new_val) { + CHECK_EQ(sizeof(vptr_p), 8); + if (*vptr_p != new_val) { + ThreadState *thr = cur_thread(); + thr->is_vptr_access = true; + MemoryWrite(thr, CALLERPC, (uptr)vptr_p, kSizeLog8); + thr->is_vptr_access = false; + } +} + +void __tsan_vptr_read(void **vptr_p) { + CHECK_EQ(sizeof(vptr_p), 8); + ThreadState *thr = cur_thread(); + thr->is_vptr_access = true; + MemoryRead(thr, CALLERPC, (uptr)vptr_p, kSizeLog8); + thr->is_vptr_access = false; +} + +void __tsan_func_entry(void *pc) { + FuncEntry(cur_thread(), (uptr)pc); +} + +void __tsan_func_exit() { + FuncExit(cur_thread()); +} + +void __tsan_read_range(void *addr, uptr size) { + MemoryAccessRange(cur_thread(), CALLERPC, (uptr)addr, size, false); +} + +void __tsan_write_range(void *addr, uptr size) { + MemoryAccessRange(cur_thread(), CALLERPC, (uptr)addr, size, true); +} diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_interface_java.cc b/contrib/compiler-rt/lib/tsan/rtl/tsan_interface_java.cc new file mode 100644 index 0000000..8615349 --- /dev/null +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_interface_java.cc @@ -0,0 +1,221 @@ +//===-- tsan_interface_java.cc --------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +//===----------------------------------------------------------------------===// + +#include "tsan_interface_java.h" +#include "tsan_rtl.h" +#include "tsan_mutex.h" +#include "sanitizer_common/sanitizer_internal_defs.h" +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_placement_new.h" +#include "sanitizer_common/sanitizer_stacktrace.h" +#include "sanitizer_common/sanitizer_procmaps.h" + +using namespace __tsan; // NOLINT + +const jptr kHeapAlignment = 8; + +namespace __tsan { + +struct JavaContext { + const uptr heap_begin; + const uptr heap_size; + + JavaContext(jptr heap_begin, jptr heap_size) + : heap_begin(heap_begin) + , heap_size(heap_size) { + } +}; + +class ScopedJavaFunc { + public: + ScopedJavaFunc(ThreadState *thr, uptr pc) + : thr_(thr) { + Initialize(thr_); + FuncEntry(thr, pc); + } + + ~ScopedJavaFunc() { + FuncExit(thr_); + // FIXME(dvyukov): process pending signals. + } + + private: + ThreadState *thr_; +}; + +static u64 jctx_buf[sizeof(JavaContext) / sizeof(u64) + 1]; +static JavaContext *jctx; + +} // namespace __tsan + +#define SCOPED_JAVA_FUNC(func) \ + ThreadState *thr = cur_thread(); \ + const uptr caller_pc = GET_CALLER_PC(); \ + const uptr pc = StackTrace::GetCurrentPc(); \ + (void)pc; \ + ScopedJavaFunc scoped(thr, caller_pc); \ +/**/ + +void __tsan_java_init(jptr heap_begin, jptr heap_size) { + SCOPED_JAVA_FUNC(__tsan_java_init); + DPrintf("#%d: java_init(%p, %p)\n", thr->tid, heap_begin, heap_size); + CHECK_EQ(jctx, 0); + CHECK_GT(heap_begin, 0); + CHECK_GT(heap_size, 0); + CHECK_EQ(heap_begin % kHeapAlignment, 0); + CHECK_EQ(heap_size % kHeapAlignment, 0); + CHECK_LT(heap_begin, heap_begin + heap_size); + jctx = new(jctx_buf) JavaContext(heap_begin, heap_size); +} + +int __tsan_java_fini() { + SCOPED_JAVA_FUNC(__tsan_java_fini); + DPrintf("#%d: java_fini()\n", thr->tid); + CHECK_NE(jctx, 0); + // FIXME(dvyukov): this does not call atexit() callbacks. + int status = Finalize(thr); + DPrintf("#%d: java_fini() = %d\n", thr->tid, status); + return status; +} + +void __tsan_java_alloc(jptr ptr, jptr size) { + SCOPED_JAVA_FUNC(__tsan_java_alloc); + DPrintf("#%d: java_alloc(%p, %p)\n", thr->tid, ptr, size); + CHECK_NE(jctx, 0); + CHECK_NE(size, 0); + CHECK_EQ(ptr % kHeapAlignment, 0); + CHECK_EQ(size % kHeapAlignment, 0); + CHECK_GE(ptr, jctx->heap_begin); + CHECK_LE(ptr + size, jctx->heap_begin + jctx->heap_size); + + OnUserAlloc(thr, pc, ptr, size, false); +} + +void __tsan_java_free(jptr ptr, jptr size) { + SCOPED_JAVA_FUNC(__tsan_java_free); + DPrintf("#%d: java_free(%p, %p)\n", thr->tid, ptr, size); + CHECK_NE(jctx, 0); + CHECK_NE(size, 0); + CHECK_EQ(ptr % kHeapAlignment, 0); + CHECK_EQ(size % kHeapAlignment, 0); + CHECK_GE(ptr, jctx->heap_begin); + CHECK_LE(ptr + size, jctx->heap_begin + jctx->heap_size); + + ctx->metamap.FreeRange(thr, pc, ptr, size); +} + +void __tsan_java_move(jptr src, jptr dst, jptr size) { + SCOPED_JAVA_FUNC(__tsan_java_move); + DPrintf("#%d: java_move(%p, %p, %p)\n", thr->tid, src, dst, size); + CHECK_NE(jctx, 0); + CHECK_NE(size, 0); + CHECK_EQ(src % kHeapAlignment, 0); + CHECK_EQ(dst % kHeapAlignment, 0); + CHECK_EQ(size % kHeapAlignment, 0); + CHECK_GE(src, jctx->heap_begin); + CHECK_LE(src + size, jctx->heap_begin + jctx->heap_size); + CHECK_GE(dst, jctx->heap_begin); + CHECK_LE(dst + size, jctx->heap_begin + jctx->heap_size); + CHECK_NE(dst, src); + CHECK_NE(size, 0); + + // Assuming it's not running concurrently with threads that do + // memory accesses and mutex operations (stop-the-world phase). + ctx->metamap.MoveMemory(src, dst, size); + + // Move shadow. + u64 *s = (u64*)MemToShadow(src); + u64 *d = (u64*)MemToShadow(dst); + u64 *send = (u64*)MemToShadow(src + size); + uptr inc = 1; + if (dst > src) { + s = (u64*)MemToShadow(src + size) - 1; + d = (u64*)MemToShadow(dst + size) - 1; + send = (u64*)MemToShadow(src) - 1; + inc = -1; + } + for (; s != send; s += inc, d += inc) { + *d = *s; + *s = 0; + } +} + +void __tsan_java_finalize() { + SCOPED_JAVA_FUNC(__tsan_java_finalize); + DPrintf("#%d: java_mutex_finalize()\n", thr->tid); + AcquireGlobal(thr, 0); +} + +void __tsan_java_mutex_lock(jptr addr) { + SCOPED_JAVA_FUNC(__tsan_java_mutex_lock); + DPrintf("#%d: java_mutex_lock(%p)\n", thr->tid, addr); + CHECK_NE(jctx, 0); + CHECK_GE(addr, jctx->heap_begin); + CHECK_LT(addr, jctx->heap_begin + jctx->heap_size); + + MutexCreate(thr, pc, addr, true, true, true); + MutexLock(thr, pc, addr); +} + +void __tsan_java_mutex_unlock(jptr addr) { + SCOPED_JAVA_FUNC(__tsan_java_mutex_unlock); + DPrintf("#%d: java_mutex_unlock(%p)\n", thr->tid, addr); + CHECK_NE(jctx, 0); + CHECK_GE(addr, jctx->heap_begin); + CHECK_LT(addr, jctx->heap_begin + jctx->heap_size); + + MutexUnlock(thr, pc, addr); +} + +void __tsan_java_mutex_read_lock(jptr addr) { + SCOPED_JAVA_FUNC(__tsan_java_mutex_read_lock); + DPrintf("#%d: java_mutex_read_lock(%p)\n", thr->tid, addr); + CHECK_NE(jctx, 0); + CHECK_GE(addr, jctx->heap_begin); + CHECK_LT(addr, jctx->heap_begin + jctx->heap_size); + + MutexCreate(thr, pc, addr, true, true, true); + MutexReadLock(thr, pc, addr); +} + +void __tsan_java_mutex_read_unlock(jptr addr) { + SCOPED_JAVA_FUNC(__tsan_java_mutex_read_unlock); + DPrintf("#%d: java_mutex_read_unlock(%p)\n", thr->tid, addr); + CHECK_NE(jctx, 0); + CHECK_GE(addr, jctx->heap_begin); + CHECK_LT(addr, jctx->heap_begin + jctx->heap_size); + + MutexReadUnlock(thr, pc, addr); +} + +void __tsan_java_mutex_lock_rec(jptr addr, int rec) { + SCOPED_JAVA_FUNC(__tsan_java_mutex_lock_rec); + DPrintf("#%d: java_mutex_lock_rec(%p, %d)\n", thr->tid, addr, rec); + CHECK_NE(jctx, 0); + CHECK_GE(addr, jctx->heap_begin); + CHECK_LT(addr, jctx->heap_begin + jctx->heap_size); + CHECK_GT(rec, 0); + + MutexCreate(thr, pc, addr, true, true, true); + MutexLock(thr, pc, addr, rec); +} + +int __tsan_java_mutex_unlock_rec(jptr addr) { + SCOPED_JAVA_FUNC(__tsan_java_mutex_unlock_rec); + DPrintf("#%d: java_mutex_unlock_rec(%p)\n", thr->tid, addr); + CHECK_NE(jctx, 0); + CHECK_GE(addr, jctx->heap_begin); + CHECK_LT(addr, jctx->heap_begin + jctx->heap_size); + + return MutexUnlock(thr, pc, addr, true); +} diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_interface_java.h b/contrib/compiler-rt/lib/tsan/rtl/tsan_interface_java.h new file mode 100644 index 0000000..1f793df --- /dev/null +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_interface_java.h @@ -0,0 +1,88 @@ +//===-- tsan_interface_java.h -----------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +// Interface for verification of Java or mixed Java/C++ programs. +// The interface is intended to be used from within a JVM and notify TSan +// about such events like Java locks and GC memory compaction. +// +// For plain memory accesses and function entry/exit a JVM is intended to use +// C++ interfaces: __tsan_readN/writeN and __tsan_func_enter/exit. +// +// For volatile memory accesses and atomic operations JVM is intended to use +// standard atomics API: __tsan_atomicN_load/store/etc. +// +// For usage examples see lit_tests/java_*.cc +//===----------------------------------------------------------------------===// +#ifndef TSAN_INTERFACE_JAVA_H +#define TSAN_INTERFACE_JAVA_H + +#ifndef INTERFACE_ATTRIBUTE +# define INTERFACE_ATTRIBUTE __attribute__((visibility("default"))) +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +typedef unsigned long jptr; // NOLINT + +// Must be called before any other callback from Java. +void __tsan_java_init(jptr heap_begin, jptr heap_size) INTERFACE_ATTRIBUTE; +// Must be called when the application exits. +// Not necessary the last callback (concurrently running threads are OK). +// Returns exit status or 0 if tsan does not want to override it. +int __tsan_java_fini() INTERFACE_ATTRIBUTE; + +// Callback for memory allocations. +// May be omitted for allocations that are not subject to data races +// nor contain synchronization objects (e.g. String). +void __tsan_java_alloc(jptr ptr, jptr size) INTERFACE_ATTRIBUTE; +// Callback for memory free. +// Can be aggregated for several objects (preferably). +void __tsan_java_free(jptr ptr, jptr size) INTERFACE_ATTRIBUTE; +// Callback for memory move by GC. +// Can be aggregated for several objects (preferably). +// The ranges can overlap. +void __tsan_java_move(jptr src, jptr dst, jptr size) INTERFACE_ATTRIBUTE; +// This function must be called on the finalizer thread +// before executing a batch of finalizers. +// It ensures necessary synchronization between +// java object creation and finalization. +void __tsan_java_finalize() INTERFACE_ATTRIBUTE; + +// Mutex lock. +// Addr is any unique address associated with the mutex. +// Can be called on recursive reentry. +void __tsan_java_mutex_lock(jptr addr) INTERFACE_ATTRIBUTE; +// Mutex unlock. +void __tsan_java_mutex_unlock(jptr addr) INTERFACE_ATTRIBUTE; +// Mutex read lock. +void __tsan_java_mutex_read_lock(jptr addr) INTERFACE_ATTRIBUTE; +// Mutex read unlock. +void __tsan_java_mutex_read_unlock(jptr addr) INTERFACE_ATTRIBUTE; +// Recursive mutex lock, intended for handling of Object.wait(). +// The 'rec' value must be obtained from the previous +// __tsan_java_mutex_unlock_rec(). +void __tsan_java_mutex_lock_rec(jptr addr, int rec) INTERFACE_ATTRIBUTE; +// Recursive mutex unlock, intended for handling of Object.wait(). +// The return value says how many times this thread called lock() +// w/o a pairing unlock() (i.e. how many recursive levels it unlocked). +// It must be passed back to __tsan_java_mutex_lock_rec() to restore +// the same recursion level. +int __tsan_java_mutex_unlock_rec(jptr addr) INTERFACE_ATTRIBUTE; + +#ifdef __cplusplus +} // extern "C" +#endif + +#undef INTERFACE_ATTRIBUTE + +#endif // #ifndef TSAN_INTERFACE_JAVA_H diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_md5.cc b/contrib/compiler-rt/lib/tsan/rtl/tsan_md5.cc new file mode 100644 index 0000000..51279c1 --- /dev/null +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_md5.cc @@ -0,0 +1,243 @@ +//===-- tsan_md5.cc -------------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +//===----------------------------------------------------------------------===// +#include "tsan_defs.h" + +namespace __tsan { + +#define F(x, y, z) ((z) ^ ((x) & ((y) ^ (z)))) +#define G(x, y, z) ((y) ^ ((z) & ((x) ^ (y)))) +#define H(x, y, z) ((x) ^ (y) ^ (z)) +#define I(x, y, z) ((y) ^ ((x) | ~(z))) + +#define STEP(f, a, b, c, d, x, t, s) \ + (a) += f((b), (c), (d)) + (x) + (t); \ + (a) = (((a) << (s)) | (((a) & 0xffffffff) >> (32 - (s)))); \ + (a) += (b); + +#define SET(n) \ + (*(const MD5_u32plus *)&ptr[(n) * 4]) +#define GET(n) \ + SET(n) + +typedef unsigned int MD5_u32plus; +typedef unsigned long ulong_t; // NOLINT + +typedef struct { + MD5_u32plus lo, hi; + MD5_u32plus a, b, c, d; + unsigned char buffer[64]; + MD5_u32plus block[16]; +} MD5_CTX; + +static const void *body(MD5_CTX *ctx, const void *data, ulong_t size) { + const unsigned char *ptr = (const unsigned char *)data; + MD5_u32plus a, b, c, d; + MD5_u32plus saved_a, saved_b, saved_c, saved_d; + + a = ctx->a; + b = ctx->b; + c = ctx->c; + d = ctx->d; + + do { + saved_a = a; + saved_b = b; + saved_c = c; + saved_d = d; + + STEP(F, a, b, c, d, SET(0), 0xd76aa478, 7) + STEP(F, d, a, b, c, SET(1), 0xe8c7b756, 12) + STEP(F, c, d, a, b, SET(2), 0x242070db, 17) + STEP(F, b, c, d, a, SET(3), 0xc1bdceee, 22) + STEP(F, a, b, c, d, SET(4), 0xf57c0faf, 7) + STEP(F, d, a, b, c, SET(5), 0x4787c62a, 12) + STEP(F, c, d, a, b, SET(6), 0xa8304613, 17) + STEP(F, b, c, d, a, SET(7), 0xfd469501, 22) + STEP(F, a, b, c, d, SET(8), 0x698098d8, 7) + STEP(F, d, a, b, c, SET(9), 0x8b44f7af, 12) + STEP(F, c, d, a, b, SET(10), 0xffff5bb1, 17) + STEP(F, b, c, d, a, SET(11), 0x895cd7be, 22) + STEP(F, a, b, c, d, SET(12), 0x6b901122, 7) + STEP(F, d, a, b, c, SET(13), 0xfd987193, 12) + STEP(F, c, d, a, b, SET(14), 0xa679438e, 17) + STEP(F, b, c, d, a, SET(15), 0x49b40821, 22) + + STEP(G, a, b, c, d, GET(1), 0xf61e2562, 5) + STEP(G, d, a, b, c, GET(6), 0xc040b340, 9) + STEP(G, c, d, a, b, GET(11), 0x265e5a51, 14) + STEP(G, b, c, d, a, GET(0), 0xe9b6c7aa, 20) + STEP(G, a, b, c, d, GET(5), 0xd62f105d, 5) + STEP(G, d, a, b, c, GET(10), 0x02441453, 9) + STEP(G, c, d, a, b, GET(15), 0xd8a1e681, 14) + STEP(G, b, c, d, a, GET(4), 0xe7d3fbc8, 20) + STEP(G, a, b, c, d, GET(9), 0x21e1cde6, 5) + STEP(G, d, a, b, c, GET(14), 0xc33707d6, 9) + STEP(G, c, d, a, b, GET(3), 0xf4d50d87, 14) + STEP(G, b, c, d, a, GET(8), 0x455a14ed, 20) + STEP(G, a, b, c, d, GET(13), 0xa9e3e905, 5) + STEP(G, d, a, b, c, GET(2), 0xfcefa3f8, 9) + STEP(G, c, d, a, b, GET(7), 0x676f02d9, 14) + STEP(G, b, c, d, a, GET(12), 0x8d2a4c8a, 20) + + STEP(H, a, b, c, d, GET(5), 0xfffa3942, 4) + STEP(H, d, a, b, c, GET(8), 0x8771f681, 11) + STEP(H, c, d, a, b, GET(11), 0x6d9d6122, 16) + STEP(H, b, c, d, a, GET(14), 0xfde5380c, 23) + STEP(H, a, b, c, d, GET(1), 0xa4beea44, 4) + STEP(H, d, a, b, c, GET(4), 0x4bdecfa9, 11) + STEP(H, c, d, a, b, GET(7), 0xf6bb4b60, 16) + STEP(H, b, c, d, a, GET(10), 0xbebfbc70, 23) + STEP(H, a, b, c, d, GET(13), 0x289b7ec6, 4) + STEP(H, d, a, b, c, GET(0), 0xeaa127fa, 11) + STEP(H, c, d, a, b, GET(3), 0xd4ef3085, 16) + STEP(H, b, c, d, a, GET(6), 0x04881d05, 23) + STEP(H, a, b, c, d, GET(9), 0xd9d4d039, 4) + STEP(H, d, a, b, c, GET(12), 0xe6db99e5, 11) + STEP(H, c, d, a, b, GET(15), 0x1fa27cf8, 16) + STEP(H, b, c, d, a, GET(2), 0xc4ac5665, 23) + + STEP(I, a, b, c, d, GET(0), 0xf4292244, 6) + STEP(I, d, a, b, c, GET(7), 0x432aff97, 10) + STEP(I, c, d, a, b, GET(14), 0xab9423a7, 15) + STEP(I, b, c, d, a, GET(5), 0xfc93a039, 21) + STEP(I, a, b, c, d, GET(12), 0x655b59c3, 6) + STEP(I, d, a, b, c, GET(3), 0x8f0ccc92, 10) + STEP(I, c, d, a, b, GET(10), 0xffeff47d, 15) + STEP(I, b, c, d, a, GET(1), 0x85845dd1, 21) + STEP(I, a, b, c, d, GET(8), 0x6fa87e4f, 6) + STEP(I, d, a, b, c, GET(15), 0xfe2ce6e0, 10) + STEP(I, c, d, a, b, GET(6), 0xa3014314, 15) + STEP(I, b, c, d, a, GET(13), 0x4e0811a1, 21) + STEP(I, a, b, c, d, GET(4), 0xf7537e82, 6) + STEP(I, d, a, b, c, GET(11), 0xbd3af235, 10) + STEP(I, c, d, a, b, GET(2), 0x2ad7d2bb, 15) + STEP(I, b, c, d, a, GET(9), 0xeb86d391, 21) + + a += saved_a; + b += saved_b; + c += saved_c; + d += saved_d; + + ptr += 64; + } while (size -= 64); + + ctx->a = a; + ctx->b = b; + ctx->c = c; + ctx->d = d; + + return ptr; +} + +void MD5_Init(MD5_CTX *ctx) { + ctx->a = 0x67452301; + ctx->b = 0xefcdab89; + ctx->c = 0x98badcfe; + ctx->d = 0x10325476; + + ctx->lo = 0; + ctx->hi = 0; +} + +void MD5_Update(MD5_CTX *ctx, const void *data, ulong_t size) { + MD5_u32plus saved_lo; + ulong_t used, free; + + saved_lo = ctx->lo; + if ((ctx->lo = (saved_lo + size) & 0x1fffffff) < saved_lo) + ctx->hi++; + ctx->hi += size >> 29; + + used = saved_lo & 0x3f; + + if (used) { + free = 64 - used; + + if (size < free) { + internal_memcpy(&ctx->buffer[used], data, size); + return; + } + + internal_memcpy(&ctx->buffer[used], data, free); + data = (const unsigned char *)data + free; + size -= free; + body(ctx, ctx->buffer, 64); + } + + if (size >= 64) { + data = body(ctx, data, size & ~(ulong_t)0x3f); + size &= 0x3f; + } + + internal_memcpy(ctx->buffer, data, size); +} + +void MD5_Final(unsigned char *result, MD5_CTX *ctx) { + ulong_t used, free; + + used = ctx->lo & 0x3f; + + ctx->buffer[used++] = 0x80; + + free = 64 - used; + + if (free < 8) { + internal_memset(&ctx->buffer[used], 0, free); + body(ctx, ctx->buffer, 64); + used = 0; + free = 64; + } + + internal_memset(&ctx->buffer[used], 0, free - 8); + + ctx->lo <<= 3; + ctx->buffer[56] = ctx->lo; + ctx->buffer[57] = ctx->lo >> 8; + ctx->buffer[58] = ctx->lo >> 16; + ctx->buffer[59] = ctx->lo >> 24; + ctx->buffer[60] = ctx->hi; + ctx->buffer[61] = ctx->hi >> 8; + ctx->buffer[62] = ctx->hi >> 16; + ctx->buffer[63] = ctx->hi >> 24; + + body(ctx, ctx->buffer, 64); + + result[0] = ctx->a; + result[1] = ctx->a >> 8; + result[2] = ctx->a >> 16; + result[3] = ctx->a >> 24; + result[4] = ctx->b; + result[5] = ctx->b >> 8; + result[6] = ctx->b >> 16; + result[7] = ctx->b >> 24; + result[8] = ctx->c; + result[9] = ctx->c >> 8; + result[10] = ctx->c >> 16; + result[11] = ctx->c >> 24; + result[12] = ctx->d; + result[13] = ctx->d >> 8; + result[14] = ctx->d >> 16; + result[15] = ctx->d >> 24; + + internal_memset(ctx, 0, sizeof(*ctx)); +} + +MD5Hash md5_hash(const void *data, uptr size) { + MD5Hash res; + MD5_CTX ctx; + MD5_Init(&ctx); + MD5_Update(&ctx, data, size); + MD5_Final((unsigned char*)&res.hash[0], &ctx); + return res; +} +} // namespace __tsan diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_mman.cc b/contrib/compiler-rt/lib/tsan/rtl/tsan_mman.cc new file mode 100644 index 0000000..285bdb3 --- /dev/null +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_mman.cc @@ -0,0 +1,217 @@ +//===-- tsan_mman.cc ------------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +//===----------------------------------------------------------------------===// +#include "sanitizer_common/sanitizer_allocator_interface.h" +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_placement_new.h" +#include "tsan_mman.h" +#include "tsan_rtl.h" +#include "tsan_report.h" +#include "tsan_flags.h" + +// May be overriden by front-end. +extern "C" void WEAK __sanitizer_malloc_hook(void *ptr, uptr size) { + (void)ptr; + (void)size; +} + +extern "C" void WEAK __sanitizer_free_hook(void *ptr) { + (void)ptr; +} + +namespace __tsan { + +struct MapUnmapCallback { + void OnMap(uptr p, uptr size) const { } + void OnUnmap(uptr p, uptr size) const { + // We are about to unmap a chunk of user memory. + // Mark the corresponding shadow memory as not needed. + DontNeedShadowFor(p, size); + } +}; + +static char allocator_placeholder[sizeof(Allocator)] ALIGNED(64); +Allocator *allocator() { + return reinterpret_cast<Allocator*>(&allocator_placeholder); +} + +void InitializeAllocator() { + allocator()->Init(); +} + +void AllocatorThreadStart(ThreadState *thr) { + allocator()->InitCache(&thr->alloc_cache); + internal_allocator()->InitCache(&thr->internal_alloc_cache); +} + +void AllocatorThreadFinish(ThreadState *thr) { + allocator()->DestroyCache(&thr->alloc_cache); + internal_allocator()->DestroyCache(&thr->internal_alloc_cache); +} + +void AllocatorPrintStats() { + allocator()->PrintStats(); +} + +static void SignalUnsafeCall(ThreadState *thr, uptr pc) { + if (atomic_load(&thr->in_signal_handler, memory_order_relaxed) == 0 || + !flags()->report_signal_unsafe) + return; + VarSizeStackTrace stack; + ObtainCurrentStack(thr, pc, &stack); + ThreadRegistryLock l(ctx->thread_registry); + ScopedReport rep(ReportTypeSignalUnsafe); + if (!IsFiredSuppression(ctx, rep, stack)) { + rep.AddStack(stack, true); + OutputReport(thr, rep); + } +} + +void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) { + if ((sz >= (1ull << 40)) || (align >= (1ull << 40))) + return AllocatorReturnNull(); + void *p = allocator()->Allocate(&thr->alloc_cache, sz, align); + if (p == 0) + return 0; + if (ctx && ctx->initialized) + OnUserAlloc(thr, pc, (uptr)p, sz, true); + if (signal) + SignalUnsafeCall(thr, pc); + return p; +} + +void user_free(ThreadState *thr, uptr pc, void *p, bool signal) { + if (ctx && ctx->initialized) + OnUserFree(thr, pc, (uptr)p, true); + allocator()->Deallocate(&thr->alloc_cache, p); + if (signal) + SignalUnsafeCall(thr, pc); +} + +void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) { + DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p); + ctx->metamap.AllocBlock(thr, pc, p, sz); + if (write && thr->ignore_reads_and_writes == 0) + MemoryRangeImitateWrite(thr, pc, (uptr)p, sz); + else + MemoryResetRange(thr, pc, (uptr)p, sz); +} + +void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write) { + CHECK_NE(p, (void*)0); + uptr sz = ctx->metamap.FreeBlock(thr, pc, p); + DPrintf("#%d: free(%p, %zu)\n", thr->tid, p, sz); + if (write && thr->ignore_reads_and_writes == 0) + MemoryRangeFreed(thr, pc, (uptr)p, sz); +} + +void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) { + void *p2 = 0; + // FIXME: Handle "shrinking" more efficiently, + // it seems that some software actually does this. + if (sz) { + p2 = user_alloc(thr, pc, sz); + if (p2 == 0) + return 0; + if (p) { + uptr oldsz = user_alloc_usable_size(p); + internal_memcpy(p2, p, min(oldsz, sz)); + } + } + if (p) + user_free(thr, pc, p); + return p2; +} + +uptr user_alloc_usable_size(const void *p) { + if (p == 0) + return 0; + MBlock *b = ctx->metamap.GetBlock((uptr)p); + return b ? b->siz : 0; +} + +void invoke_malloc_hook(void *ptr, uptr size) { + ThreadState *thr = cur_thread(); + if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors) + return; + __sanitizer_malloc_hook(ptr, size); +} + +void invoke_free_hook(void *ptr) { + ThreadState *thr = cur_thread(); + if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors) + return; + __sanitizer_free_hook(ptr); +} + +void *internal_alloc(MBlockType typ, uptr sz) { + ThreadState *thr = cur_thread(); + if (thr->nomalloc) { + thr->nomalloc = 0; // CHECK calls internal_malloc(). + CHECK(0); + } + return InternalAlloc(sz, &thr->internal_alloc_cache); +} + +void internal_free(void *p) { + ThreadState *thr = cur_thread(); + if (thr->nomalloc) { + thr->nomalloc = 0; // CHECK calls internal_malloc(). + CHECK(0); + } + InternalFree(p, &thr->internal_alloc_cache); +} + +} // namespace __tsan + +using namespace __tsan; + +extern "C" { +uptr __sanitizer_get_current_allocated_bytes() { + uptr stats[AllocatorStatCount]; + allocator()->GetStats(stats); + return stats[AllocatorStatAllocated]; +} + +uptr __sanitizer_get_heap_size() { + uptr stats[AllocatorStatCount]; + allocator()->GetStats(stats); + return stats[AllocatorStatMapped]; +} + +uptr __sanitizer_get_free_bytes() { + return 1; +} + +uptr __sanitizer_get_unmapped_bytes() { + return 1; +} + +uptr __sanitizer_get_estimated_allocated_size(uptr size) { + return size; +} + +int __sanitizer_get_ownership(const void *p) { + return allocator()->GetBlockBegin(p) != 0; +} + +uptr __sanitizer_get_allocated_size(const void *p) { + return user_alloc_usable_size(p); +} + +void __tsan_on_thread_idle() { + ThreadState *thr = cur_thread(); + allocator()->SwallowCache(&thr->alloc_cache); + internal_allocator()->SwallowCache(&thr->internal_alloc_cache); + ctx->metamap.OnThreadIdle(thr); +} +} // extern "C" diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_mman.h b/contrib/compiler-rt/lib/tsan/rtl/tsan_mman.h new file mode 100644 index 0000000..7d41fa8 --- /dev/null +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_mman.h @@ -0,0 +1,80 @@ +//===-- tsan_mman.h ---------------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +//===----------------------------------------------------------------------===// +#ifndef TSAN_MMAN_H +#define TSAN_MMAN_H + +#include "tsan_defs.h" + +namespace __tsan { + +const uptr kDefaultAlignment = 16; + +void InitializeAllocator(); +void AllocatorThreadStart(ThreadState *thr); +void AllocatorThreadFinish(ThreadState *thr); +void AllocatorPrintStats(); + +// For user allocations. +void *user_alloc(ThreadState *thr, uptr pc, uptr sz, + uptr align = kDefaultAlignment, bool signal = true); +// Does not accept NULL. +void user_free(ThreadState *thr, uptr pc, void *p, bool signal = true); +void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz); +void *user_alloc_aligned(ThreadState *thr, uptr pc, uptr sz, uptr align); +uptr user_alloc_usable_size(const void *p); + +// Invoking malloc/free hooks that may be installed by the user. +void invoke_malloc_hook(void *ptr, uptr size); +void invoke_free_hook(void *ptr); + +enum MBlockType { + MBlockScopedBuf, + MBlockString, + MBlockStackTrace, + MBlockShadowStack, + MBlockSync, + MBlockClock, + MBlockThreadContex, + MBlockDeadInfo, + MBlockRacyStacks, + MBlockRacyAddresses, + MBlockAtExit, + MBlockFlag, + MBlockReport, + MBlockReportMop, + MBlockReportThread, + MBlockReportMutex, + MBlockReportLoc, + MBlockReportStack, + MBlockSuppression, + MBlockExpectRace, + MBlockSignal, + MBlockJmpBuf, + + // This must be the last. + MBlockTypeCount +}; + +// For internal data structures. +void *internal_alloc(MBlockType typ, uptr sz); +void internal_free(void *p); + +template<typename T> +void DestroyAndFree(T *&p) { + p->~T(); + internal_free(p); + p = 0; +} + +} // namespace __tsan +#endif // TSAN_MMAN_H diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_mutex.cc b/contrib/compiler-rt/lib/tsan/rtl/tsan_mutex.cc new file mode 100644 index 0000000..9ea9bae --- /dev/null +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_mutex.cc @@ -0,0 +1,287 @@ +//===-- tsan_mutex.cc -----------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +//===----------------------------------------------------------------------===// +#include "sanitizer_common/sanitizer_libc.h" +#include "tsan_mutex.h" +#include "tsan_platform.h" +#include "tsan_rtl.h" + +namespace __tsan { + +// Simple reader-writer spin-mutex. Optimized for not-so-contended case. +// Readers have preference, can possibly starvate writers. + +// The table fixes what mutexes can be locked under what mutexes. +// E.g. if the row for MutexTypeThreads contains MutexTypeReport, +// then Report mutex can be locked while under Threads mutex. +// The leaf mutexes can be locked under any other mutexes. +// Recursive locking is not supported. +#if TSAN_DEBUG && !SANITIZER_GO +const MutexType MutexTypeLeaf = (MutexType)-1; +static MutexType CanLockTab[MutexTypeCount][MutexTypeCount] = { + /*0 MutexTypeInvalid*/ {}, + /*1 MutexTypeTrace*/ {MutexTypeLeaf}, + /*2 MutexTypeThreads*/ {MutexTypeReport}, + /*3 MutexTypeReport*/ {MutexTypeSyncVar, + MutexTypeMBlock, MutexTypeJavaMBlock}, + /*4 MutexTypeSyncVar*/ {MutexTypeDDetector}, + /*5 MutexTypeSyncTab*/ {}, // unused + /*6 MutexTypeSlab*/ {MutexTypeLeaf}, + /*7 MutexTypeAnnotations*/ {}, + /*8 MutexTypeAtExit*/ {MutexTypeSyncVar}, + /*9 MutexTypeMBlock*/ {MutexTypeSyncVar}, + /*10 MutexTypeJavaMBlock*/ {MutexTypeSyncVar}, + /*11 MutexTypeDDetector*/ {}, +}; + +static bool CanLockAdj[MutexTypeCount][MutexTypeCount]; +#endif + +void InitializeMutex() { +#if TSAN_DEBUG && !SANITIZER_GO + // Build the "can lock" adjacency matrix. + // If [i][j]==true, then one can lock mutex j while under mutex i. + const int N = MutexTypeCount; + int cnt[N] = {}; + bool leaf[N] = {}; + for (int i = 1; i < N; i++) { + for (int j = 0; j < N; j++) { + MutexType z = CanLockTab[i][j]; + if (z == MutexTypeInvalid) + continue; + if (z == MutexTypeLeaf) { + CHECK(!leaf[i]); + leaf[i] = true; + continue; + } + CHECK(!CanLockAdj[i][(int)z]); + CanLockAdj[i][(int)z] = true; + cnt[i]++; + } + } + for (int i = 0; i < N; i++) { + CHECK(!leaf[i] || cnt[i] == 0); + } + // Add leaf mutexes. + for (int i = 0; i < N; i++) { + if (!leaf[i]) + continue; + for (int j = 0; j < N; j++) { + if (i == j || leaf[j] || j == MutexTypeInvalid) + continue; + CHECK(!CanLockAdj[j][i]); + CanLockAdj[j][i] = true; + } + } + // Build the transitive closure. + bool CanLockAdj2[MutexTypeCount][MutexTypeCount]; + for (int i = 0; i < N; i++) { + for (int j = 0; j < N; j++) { + CanLockAdj2[i][j] = CanLockAdj[i][j]; + } + } + for (int k = 0; k < N; k++) { + for (int i = 0; i < N; i++) { + for (int j = 0; j < N; j++) { + if (CanLockAdj2[i][k] && CanLockAdj2[k][j]) { + CanLockAdj2[i][j] = true; + } + } + } + } +#if 0 + Printf("Can lock graph:\n"); + for (int i = 0; i < N; i++) { + for (int j = 0; j < N; j++) { + Printf("%d ", CanLockAdj[i][j]); + } + Printf("\n"); + } + Printf("Can lock graph closure:\n"); + for (int i = 0; i < N; i++) { + for (int j = 0; j < N; j++) { + Printf("%d ", CanLockAdj2[i][j]); + } + Printf("\n"); + } +#endif + // Verify that the graph is acyclic. + for (int i = 0; i < N; i++) { + if (CanLockAdj2[i][i]) { + Printf("Mutex %d participates in a cycle\n", i); + Die(); + } + } +#endif +} + +InternalDeadlockDetector::InternalDeadlockDetector() { + // Rely on zero initialization because some mutexes can be locked before ctor. +} + +#if TSAN_DEBUG && !SANITIZER_GO +void InternalDeadlockDetector::Lock(MutexType t) { + // Printf("LOCK %d @%zu\n", t, seq_ + 1); + CHECK_GT(t, MutexTypeInvalid); + CHECK_LT(t, MutexTypeCount); + u64 max_seq = 0; + u64 max_idx = MutexTypeInvalid; + for (int i = 0; i != MutexTypeCount; i++) { + if (locked_[i] == 0) + continue; + CHECK_NE(locked_[i], max_seq); + if (max_seq < locked_[i]) { + max_seq = locked_[i]; + max_idx = i; + } + } + locked_[t] = ++seq_; + if (max_idx == MutexTypeInvalid) + return; + // Printf(" last %d @%zu\n", max_idx, max_seq); + if (!CanLockAdj[max_idx][t]) { + Printf("ThreadSanitizer: internal deadlock detected\n"); + Printf("ThreadSanitizer: can't lock %d while under %zu\n", + t, (uptr)max_idx); + CHECK(0); + } +} + +void InternalDeadlockDetector::Unlock(MutexType t) { + // Printf("UNLO %d @%zu #%zu\n", t, seq_, locked_[t]); + CHECK(locked_[t]); + locked_[t] = 0; +} + +void InternalDeadlockDetector::CheckNoLocks() { + for (int i = 0; i != MutexTypeCount; i++) { + CHECK_EQ(locked_[i], 0); + } +} +#endif + +void CheckNoLocks(ThreadState *thr) { +#if TSAN_DEBUG && !SANITIZER_GO + thr->internal_deadlock_detector.CheckNoLocks(); +#endif +} + +const uptr kUnlocked = 0; +const uptr kWriteLock = 1; +const uptr kReadLock = 2; + +class Backoff { + public: + Backoff() + : iter_() { + } + + bool Do() { + if (iter_++ < kActiveSpinIters) + proc_yield(kActiveSpinCnt); + else + internal_sched_yield(); + return true; + } + + u64 Contention() const { + u64 active = iter_ % kActiveSpinIters; + u64 passive = iter_ - active; + return active + 10 * passive; + } + + private: + int iter_; + static const int kActiveSpinIters = 10; + static const int kActiveSpinCnt = 20; +}; + +Mutex::Mutex(MutexType type, StatType stat_type) { + CHECK_GT(type, MutexTypeInvalid); + CHECK_LT(type, MutexTypeCount); +#if TSAN_DEBUG + type_ = type; +#endif +#if TSAN_COLLECT_STATS + stat_type_ = stat_type; +#endif + atomic_store(&state_, kUnlocked, memory_order_relaxed); +} + +Mutex::~Mutex() { + CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked); +} + +void Mutex::Lock() { +#if TSAN_DEBUG && !SANITIZER_GO + cur_thread()->internal_deadlock_detector.Lock(type_); +#endif + uptr cmp = kUnlocked; + if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock, + memory_order_acquire)) + return; + for (Backoff backoff; backoff.Do();) { + if (atomic_load(&state_, memory_order_relaxed) == kUnlocked) { + cmp = kUnlocked; + if (atomic_compare_exchange_weak(&state_, &cmp, kWriteLock, + memory_order_acquire)) { +#if TSAN_COLLECT_STATS && !SANITIZER_GO + StatInc(cur_thread(), stat_type_, backoff.Contention()); +#endif + return; + } + } + } +} + +void Mutex::Unlock() { + uptr prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release); + (void)prev; + DCHECK_NE(prev & kWriteLock, 0); +#if TSAN_DEBUG && !SANITIZER_GO + cur_thread()->internal_deadlock_detector.Unlock(type_); +#endif +} + +void Mutex::ReadLock() { +#if TSAN_DEBUG && !SANITIZER_GO + cur_thread()->internal_deadlock_detector.Lock(type_); +#endif + uptr prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire); + if ((prev & kWriteLock) == 0) + return; + for (Backoff backoff; backoff.Do();) { + prev = atomic_load(&state_, memory_order_acquire); + if ((prev & kWriteLock) == 0) { +#if TSAN_COLLECT_STATS && !SANITIZER_GO + StatInc(cur_thread(), stat_type_, backoff.Contention()); +#endif + return; + } + } +} + +void Mutex::ReadUnlock() { + uptr prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release); + (void)prev; + DCHECK_EQ(prev & kWriteLock, 0); + DCHECK_GT(prev & ~kWriteLock, 0); +#if TSAN_DEBUG && !SANITIZER_GO + cur_thread()->internal_deadlock_detector.Unlock(type_); +#endif +} + +void Mutex::CheckLocked() { + CHECK_NE(atomic_load(&state_, memory_order_relaxed), 0); +} + +} // namespace __tsan diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_mutex.h b/contrib/compiler-rt/lib/tsan/rtl/tsan_mutex.h new file mode 100644 index 0000000..7bb1c48 --- /dev/null +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_mutex.h @@ -0,0 +1,88 @@ +//===-- tsan_mutex.h --------------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +//===----------------------------------------------------------------------===// +#ifndef TSAN_MUTEX_H +#define TSAN_MUTEX_H + +#include "sanitizer_common/sanitizer_atomic.h" +#include "sanitizer_common/sanitizer_mutex.h" +#include "tsan_defs.h" + +namespace __tsan { + +enum MutexType { + MutexTypeInvalid, + MutexTypeTrace, + MutexTypeThreads, + MutexTypeReport, + MutexTypeSyncVar, + MutexTypeSyncTab, + MutexTypeSlab, + MutexTypeAnnotations, + MutexTypeAtExit, + MutexTypeMBlock, + MutexTypeJavaMBlock, + MutexTypeDDetector, + + // This must be the last. + MutexTypeCount +}; + +class Mutex { + public: + explicit Mutex(MutexType type, StatType stat_type); + ~Mutex(); + + void Lock(); + void Unlock(); + + void ReadLock(); + void ReadUnlock(); + + void CheckLocked(); + + private: + atomic_uintptr_t state_; +#if TSAN_DEBUG + MutexType type_; +#endif +#if TSAN_COLLECT_STATS + StatType stat_type_; +#endif + + Mutex(const Mutex&); + void operator = (const Mutex&); +}; + +typedef GenericScopedLock<Mutex> Lock; +typedef GenericScopedReadLock<Mutex> ReadLock; + +class InternalDeadlockDetector { + public: + InternalDeadlockDetector(); + void Lock(MutexType t); + void Unlock(MutexType t); + void CheckNoLocks(); + private: + u64 seq_; + u64 locked_[MutexTypeCount]; +}; + +void InitializeMutex(); + +// Checks that the current thread does not hold any runtime locks +// (e.g. when returning from an interceptor). +void CheckNoLocks(ThreadState *thr); + +} // namespace __tsan + +#endif // TSAN_MUTEX_H diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_mutexset.cc b/contrib/compiler-rt/lib/tsan/rtl/tsan_mutexset.cc new file mode 100644 index 0000000..2158777 --- /dev/null +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_mutexset.cc @@ -0,0 +1,89 @@ +//===-- tsan_mutexset.cc --------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +//===----------------------------------------------------------------------===// +#include "tsan_mutexset.h" +#include "tsan_rtl.h" + +namespace __tsan { + +const uptr MutexSet::kMaxSize; + +MutexSet::MutexSet() { + size_ = 0; + internal_memset(&descs_, 0, sizeof(descs_)); +} + +void MutexSet::Add(u64 id, bool write, u64 epoch) { + // Look up existing mutex with the same id. + for (uptr i = 0; i < size_; i++) { + if (descs_[i].id == id) { + descs_[i].count++; + descs_[i].epoch = epoch; + return; + } + } + // On overflow, find the oldest mutex and drop it. + if (size_ == kMaxSize) { + u64 minepoch = (u64)-1; + u64 mini = (u64)-1; + for (uptr i = 0; i < size_; i++) { + if (descs_[i].epoch < minepoch) { + minepoch = descs_[i].epoch; + mini = i; + } + } + RemovePos(mini); + CHECK_EQ(size_, kMaxSize - 1); + } + // Add new mutex descriptor. + descs_[size_].id = id; + descs_[size_].write = write; + descs_[size_].epoch = epoch; + descs_[size_].count = 1; + size_++; +} + +void MutexSet::Del(u64 id, bool write) { + for (uptr i = 0; i < size_; i++) { + if (descs_[i].id == id) { + if (--descs_[i].count == 0) + RemovePos(i); + return; + } + } +} + +void MutexSet::Remove(u64 id) { + for (uptr i = 0; i < size_; i++) { + if (descs_[i].id == id) { + RemovePos(i); + return; + } + } +} + +void MutexSet::RemovePos(uptr i) { + CHECK_LT(i, size_); + descs_[i] = descs_[size_ - 1]; + size_--; +} + +uptr MutexSet::Size() const { + return size_; +} + +MutexSet::Desc MutexSet::Get(uptr i) const { + CHECK_LT(i, size_); + return descs_[i]; +} + +} // namespace __tsan diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_mutexset.h b/contrib/compiler-rt/lib/tsan/rtl/tsan_mutexset.h new file mode 100644 index 0000000..68f0ec2 --- /dev/null +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_mutexset.h @@ -0,0 +1,70 @@ +//===-- tsan_mutexset.h -----------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +// MutexSet holds the set of mutexes currently held by a thread. +//===----------------------------------------------------------------------===// +#ifndef TSAN_MUTEXSET_H +#define TSAN_MUTEXSET_H + +#include "tsan_defs.h" + +namespace __tsan { + +class MutexSet { + public: + // Holds limited number of mutexes. + // The oldest mutexes are discarded on overflow. + static const uptr kMaxSize = 16; + struct Desc { + u64 id; + u64 epoch; + int count; + bool write; + }; + + MutexSet(); + // The 'id' is obtained from SyncVar::GetId(). + void Add(u64 id, bool write, u64 epoch); + void Del(u64 id, bool write); + void Remove(u64 id); // Removes the mutex completely (if it's destroyed). + uptr Size() const; + Desc Get(uptr i) const; + + void operator=(const MutexSet &other) { + internal_memcpy(this, &other, sizeof(*this)); + } + + private: +#ifndef SANITIZER_GO + uptr size_; + Desc descs_[kMaxSize]; +#endif + + void RemovePos(uptr i); + MutexSet(const MutexSet&); +}; + +// Go does not have mutexes, so do not spend memory and time. +// (Go sync.Mutex is actually a semaphore -- can be unlocked +// in different goroutine). +#ifdef SANITIZER_GO +MutexSet::MutexSet() {} +void MutexSet::Add(u64 id, bool write, u64 epoch) {} +void MutexSet::Del(u64 id, bool write) {} +void MutexSet::Remove(u64 id) {} +void MutexSet::RemovePos(uptr i) {} +uptr MutexSet::Size() const { return 0; } +MutexSet::Desc MutexSet::Get(uptr i) const { return Desc(); } +#endif + +} // namespace __tsan + +#endif // TSAN_MUTEXSET_H diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_platform.h b/contrib/compiler-rt/lib/tsan/rtl/tsan_platform.h new file mode 100644 index 0000000..270a751 --- /dev/null +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_platform.h @@ -0,0 +1,270 @@ +//===-- tsan_platform.h -----------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +// Platform-specific code. +//===----------------------------------------------------------------------===// + +#ifndef TSAN_PLATFORM_H +#define TSAN_PLATFORM_H + +#if !defined(__LP64__) && !defined(_WIN64) +# error "Only 64-bit is supported" +#endif + +#include "tsan_defs.h" +#include "tsan_trace.h" + +namespace __tsan { + +#if !defined(SANITIZER_GO) + +/* +C/C++ on linux and freebsd +0000 0000 1000 - 0100 0000 0000: main binary and/or MAP_32BIT mappings +0100 0000 0000 - 0200 0000 0000: - +0200 0000 0000 - 1000 0000 0000: shadow +1000 0000 0000 - 3000 0000 0000: - +3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects) +4000 0000 0000 - 6000 0000 0000: - +6000 0000 0000 - 6200 0000 0000: traces +6200 0000 0000 - 7d00 0000 0000: - +7d00 0000 0000 - 7e00 0000 0000: heap +7e00 0000 0000 - 7e80 0000 0000: - +7e80 0000 0000 - 8000 0000 0000: modules and main thread stack +*/ + +const uptr kMetaShadowBeg = 0x300000000000ull; +const uptr kMetaShadowEnd = 0x400000000000ull; +const uptr kTraceMemBeg = 0x600000000000ull; +const uptr kTraceMemEnd = 0x620000000000ull; +const uptr kShadowBeg = 0x020000000000ull; +const uptr kShadowEnd = 0x100000000000ull; +const uptr kHeapMemBeg = 0x7d0000000000ull; +const uptr kHeapMemEnd = 0x7e0000000000ull; +const uptr kLoAppMemBeg = 0x000000001000ull; +const uptr kLoAppMemEnd = 0x010000000000ull; +const uptr kHiAppMemBeg = 0x7e8000000000ull; +const uptr kHiAppMemEnd = 0x800000000000ull; +const uptr kAppMemMsk = 0x7c0000000000ull; +const uptr kAppMemXor = 0x020000000000ull; + +ALWAYS_INLINE +bool IsAppMem(uptr mem) { + return (mem >= kHeapMemBeg && mem < kHeapMemEnd) || + (mem >= kLoAppMemBeg && mem < kLoAppMemEnd) || + (mem >= kHiAppMemBeg && mem < kHiAppMemEnd); +} + +ALWAYS_INLINE +bool IsShadowMem(uptr mem) { + return mem >= kShadowBeg && mem <= kShadowEnd; +} + +ALWAYS_INLINE +bool IsMetaMem(uptr mem) { + return mem >= kMetaShadowBeg && mem <= kMetaShadowEnd; +} + +ALWAYS_INLINE +uptr MemToShadow(uptr x) { + DCHECK(IsAppMem(x)); + return (((x) & ~(kAppMemMsk | (kShadowCell - 1))) + ^ kAppMemXor) * kShadowCnt; +} + +ALWAYS_INLINE +u32 *MemToMeta(uptr x) { + DCHECK(IsAppMem(x)); + return (u32*)(((((x) & ~(kAppMemMsk | (kMetaShadowCell - 1))) + ^ kAppMemXor) / kMetaShadowCell * kMetaShadowSize) | kMetaShadowBeg); +} + +ALWAYS_INLINE +uptr ShadowToMem(uptr s) { + CHECK(IsShadowMem(s)); + if (s >= MemToShadow(kLoAppMemBeg) && s <= MemToShadow(kLoAppMemEnd - 1)) + return (s / kShadowCnt) ^ kAppMemXor; + else + return ((s / kShadowCnt) ^ kAppMemXor) | kAppMemMsk; +} + +static USED uptr UserRegions[] = { + kLoAppMemBeg, kLoAppMemEnd, + kHiAppMemBeg, kHiAppMemEnd, + kHeapMemBeg, kHeapMemEnd, +}; + +#elif defined(SANITIZER_GO) && !SANITIZER_WINDOWS + +/* Go on linux, darwin and freebsd +0000 0000 1000 - 0000 1000 0000: executable +0000 1000 0000 - 00c0 0000 0000: - +00c0 0000 0000 - 00e0 0000 0000: heap +00e0 0000 0000 - 2000 0000 0000: - +2000 0000 0000 - 2380 0000 0000: shadow +2380 0000 0000 - 3000 0000 0000: - +3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects) +4000 0000 0000 - 6000 0000 0000: - +6000 0000 0000 - 6200 0000 0000: traces +6200 0000 0000 - 8000 0000 0000: - +*/ + +const uptr kMetaShadowBeg = 0x300000000000ull; +const uptr kMetaShadowEnd = 0x400000000000ull; +const uptr kTraceMemBeg = 0x600000000000ull; +const uptr kTraceMemEnd = 0x620000000000ull; +const uptr kShadowBeg = 0x200000000000ull; +const uptr kShadowEnd = 0x238000000000ull; +const uptr kAppMemBeg = 0x000000001000ull; +const uptr kAppMemEnd = 0x00e000000000ull; + +ALWAYS_INLINE +bool IsAppMem(uptr mem) { + return mem >= kAppMemBeg && mem < kAppMemEnd; +} + +ALWAYS_INLINE +bool IsShadowMem(uptr mem) { + return mem >= kShadowBeg && mem <= kShadowEnd; +} + +ALWAYS_INLINE +bool IsMetaMem(uptr mem) { + return mem >= kMetaShadowBeg && mem <= kMetaShadowEnd; +} + +ALWAYS_INLINE +uptr MemToShadow(uptr x) { + DCHECK(IsAppMem(x)); + return ((x & ~(kShadowCell - 1)) * kShadowCnt) | kShadowBeg; +} + +ALWAYS_INLINE +u32 *MemToMeta(uptr x) { + DCHECK(IsAppMem(x)); + return (u32*)(((x & ~(kMetaShadowCell - 1)) / \ + kMetaShadowCell * kMetaShadowSize) | kMetaShadowBeg); +} + +ALWAYS_INLINE +uptr ShadowToMem(uptr s) { + CHECK(IsShadowMem(s)); + return (s & ~kShadowBeg) / kShadowCnt; +} + +static USED uptr UserRegions[] = { + kAppMemBeg, kAppMemEnd, +}; + +#elif defined(SANITIZER_GO) && SANITIZER_WINDOWS + +/* Go on windows +0000 0000 1000 - 0000 1000 0000: executable +0000 1000 0000 - 00f8 0000 0000: - +00c0 0000 0000 - 00e0 0000 0000: heap +00e0 0000 0000 - 0100 0000 0000: - +0100 0000 0000 - 0380 0000 0000: shadow +0380 0000 0000 - 0560 0000 0000: - +0560 0000 0000 - 0760 0000 0000: traces +0760 0000 0000 - 07d0 0000 0000: metainfo (memory blocks and sync objects) +07d0 0000 0000 - 8000 0000 0000: - +*/ + +const uptr kMetaShadowBeg = 0x076000000000ull; +const uptr kMetaShadowEnd = 0x07d000000000ull; +const uptr kTraceMemBeg = 0x056000000000ull; +const uptr kTraceMemEnd = 0x076000000000ull; +const uptr kShadowBeg = 0x010000000000ull; +const uptr kShadowEnd = 0x038000000000ull; +const uptr kAppMemBeg = 0x000000001000ull; +const uptr kAppMemEnd = 0x00e000000000ull; + +ALWAYS_INLINE +bool IsAppMem(uptr mem) { + return mem >= kAppMemBeg && mem < kAppMemEnd; +} + +ALWAYS_INLINE +bool IsShadowMem(uptr mem) { + return mem >= kShadowBeg && mem <= kShadowEnd; +} + +ALWAYS_INLINE +bool IsMetaMem(uptr mem) { + return mem >= kMetaShadowBeg && mem <= kMetaShadowEnd; +} + +ALWAYS_INLINE +uptr MemToShadow(uptr x) { + DCHECK(IsAppMem(x)); + return ((x & ~(kShadowCell - 1)) * kShadowCnt) | kShadowBeg; +} + +ALWAYS_INLINE +u32 *MemToMeta(uptr x) { + DCHECK(IsAppMem(x)); + return (u32*)(((x & ~(kMetaShadowCell - 1)) / \ + kMetaShadowCell * kMetaShadowSize) | kMetaShadowEnd); +} + +ALWAYS_INLINE +uptr ShadowToMem(uptr s) { + CHECK(IsShadowMem(s)); + // FIXME(dvyukov): this is most likely wrong as the mapping is not bijection. + return (x & ~kShadowBeg) / kShadowCnt; +} + +static USED uptr UserRegions[] = { + kAppMemBeg, kAppMemEnd, +}; + +#else +# error "Unknown platform" +#endif + +// The additional page is to catch shadow stack overflow as paging fault. +// Windows wants 64K alignment for mmaps. +const uptr kTotalTraceSize = (kTraceSize * sizeof(Event) + sizeof(Trace) + + (64 << 10) + (64 << 10) - 1) & ~((64 << 10) - 1); + +uptr ALWAYS_INLINE GetThreadTrace(int tid) { + uptr p = kTraceMemBeg + (uptr)tid * kTotalTraceSize; + DCHECK_LT(p, kTraceMemEnd); + return p; +} + +uptr ALWAYS_INLINE GetThreadTraceHeader(int tid) { + uptr p = kTraceMemBeg + (uptr)tid * kTotalTraceSize + + kTraceSize * sizeof(Event); + DCHECK_LT(p, kTraceMemEnd); + return p; +} + +void InitializePlatform(); +void FlushShadowMemory(); +void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive); + +void *internal_start_thread(void(*func)(void*), void *arg); +void internal_join_thread(void *th); + +// Says whether the addr relates to a global var. +// Guesses with high probability, may yield both false positives and negatives. +bool IsGlobalVar(uptr addr); +int ExtractResolvFDs(void *state, int *fds, int nfd); +int ExtractRecvmsgFDs(void *msg, int *fds, int nfd); + +int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m, + void *abstime), void *c, void *m, void *abstime, + void(*cleanup)(void *arg), void *arg); + +} // namespace __tsan + +#endif // TSAN_PLATFORM_H diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cc b/contrib/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cc new file mode 100644 index 0000000..4dcfa55 --- /dev/null +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cc @@ -0,0 +1,404 @@ +//===-- tsan_platform_linux.cc --------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +// Linux- and FreeBSD-specific code. +//===----------------------------------------------------------------------===// + + +#include "sanitizer_common/sanitizer_platform.h" +#if SANITIZER_LINUX || SANITIZER_FREEBSD + +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_libc.h" +#include "sanitizer_common/sanitizer_procmaps.h" +#include "sanitizer_common/sanitizer_stoptheworld.h" +#include "sanitizer_common/sanitizer_stackdepot.h" +#include "tsan_platform.h" +#include "tsan_rtl.h" +#include "tsan_flags.h" + +#include <fcntl.h> +#include <pthread.h> +#include <signal.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <stdarg.h> +#include <sys/mman.h> +#include <sys/syscall.h> +#include <sys/socket.h> +#include <sys/time.h> +#include <sys/types.h> +#include <sys/resource.h> +#include <sys/stat.h> +#include <unistd.h> +#include <errno.h> +#include <sched.h> +#include <dlfcn.h> +#if SANITIZER_LINUX +#define __need_res_state +#include <resolv.h> +#endif + +#ifdef sa_handler +# undef sa_handler +#endif + +#ifdef sa_sigaction +# undef sa_sigaction +#endif + +#if SANITIZER_FREEBSD +extern "C" void *__libc_stack_end; +void *__libc_stack_end = 0; +#endif + +namespace __tsan { + +static uptr g_data_start; +static uptr g_data_end; + +const uptr kPageSize = 4096; + +enum { + MemTotal = 0, + MemShadow = 1, + MemMeta = 2, + MemFile = 3, + MemMmap = 4, + MemTrace = 5, + MemHeap = 6, + MemOther = 7, + MemCount = 8, +}; + +void FillProfileCallback(uptr p, uptr rss, bool file, + uptr *mem, uptr stats_size) { + mem[MemTotal] += rss; + if (p >= kShadowBeg && p < kShadowEnd) + mem[MemShadow] += rss; + else if (p >= kMetaShadowBeg && p < kMetaShadowEnd) + mem[MemMeta] += rss; +#ifndef SANITIZER_GO + else if (p >= kHeapMemBeg && p < kHeapMemEnd) + mem[MemHeap] += rss; + else if (p >= kLoAppMemBeg && p < kLoAppMemEnd) + mem[file ? MemFile : MemMmap] += rss; + else if (p >= kHiAppMemBeg && p < kHiAppMemEnd) + mem[file ? MemFile : MemMmap] += rss; +#else + else if (p >= kAppMemBeg && p < kAppMemEnd) + mem[file ? MemFile : MemMmap] += rss; +#endif + else if (p >= kTraceMemBeg && p < kTraceMemEnd) + mem[MemTrace] += rss; + else + mem[MemOther] += rss; +} + +void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) { + uptr mem[MemCount] = {}; + __sanitizer::GetMemoryProfile(FillProfileCallback, mem, 7); + StackDepotStats *stacks = StackDepotGetStats(); + internal_snprintf(buf, buf_size, + "RSS %zd MB: shadow:%zd meta:%zd file:%zd mmap:%zd" + " trace:%zd heap:%zd other:%zd stacks=%zd[%zd] nthr=%zd/%zd\n", + mem[MemTotal] >> 20, mem[MemShadow] >> 20, mem[MemMeta] >> 20, + mem[MemFile] >> 20, mem[MemMmap] >> 20, mem[MemTrace] >> 20, + mem[MemHeap] >> 20, mem[MemOther] >> 20, + stacks->allocated >> 20, stacks->n_uniq_ids, + nlive, nthread); +} + +#if SANITIZER_LINUX +void FlushShadowMemoryCallback( + const SuspendedThreadsList &suspended_threads_list, + void *argument) { + FlushUnneededShadowMemory(kShadowBeg, kShadowEnd - kShadowBeg); +} +#endif + +void FlushShadowMemory() { +#if SANITIZER_LINUX + StopTheWorld(FlushShadowMemoryCallback, 0); +#endif +} + +#ifndef SANITIZER_GO +static void ProtectRange(uptr beg, uptr end) { + CHECK_LE(beg, end); + if (beg == end) + return; + if (beg != (uptr)Mprotect(beg, end - beg)) { + Printf("FATAL: ThreadSanitizer can not protect [%zx,%zx]\n", beg, end); + Printf("FATAL: Make sure you are not using unlimited stack\n"); + Die(); + } +} + +// Mark shadow for .rodata sections with the special kShadowRodata marker. +// Accesses to .rodata can't race, so this saves time, memory and trace space. +static void MapRodata() { + // First create temp file. + const char *tmpdir = GetEnv("TMPDIR"); + if (tmpdir == 0) + tmpdir = GetEnv("TEST_TMPDIR"); +#ifdef P_tmpdir + if (tmpdir == 0) + tmpdir = P_tmpdir; +#endif + if (tmpdir == 0) + return; + char name[256]; + internal_snprintf(name, sizeof(name), "%s/tsan.rodata.%d", + tmpdir, (int)internal_getpid()); + uptr openrv = internal_open(name, O_RDWR | O_CREAT | O_EXCL, 0600); + if (internal_iserror(openrv)) + return; + internal_unlink(name); // Unlink it now, so that we can reuse the buffer. + fd_t fd = openrv; + // Fill the file with kShadowRodata. + const uptr kMarkerSize = 512 * 1024 / sizeof(u64); + InternalScopedBuffer<u64> marker(kMarkerSize); + // volatile to prevent insertion of memset + for (volatile u64 *p = marker.data(); p < marker.data() + kMarkerSize; p++) + *p = kShadowRodata; + internal_write(fd, marker.data(), marker.size()); + // Map the file into memory. + uptr page = internal_mmap(0, kPageSize, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS, fd, 0); + if (internal_iserror(page)) { + internal_close(fd); + return; + } + // Map the file into shadow of .rodata sections. + MemoryMappingLayout proc_maps(/*cache_enabled*/true); + uptr start, end, offset, prot; + // Reusing the buffer 'name'. + while (proc_maps.Next(&start, &end, &offset, name, ARRAY_SIZE(name), &prot)) { + if (name[0] != 0 && name[0] != '[' + && (prot & MemoryMappingLayout::kProtectionRead) + && (prot & MemoryMappingLayout::kProtectionExecute) + && !(prot & MemoryMappingLayout::kProtectionWrite) + && IsAppMem(start)) { + // Assume it's .rodata + char *shadow_start = (char*)MemToShadow(start); + char *shadow_end = (char*)MemToShadow(end); + for (char *p = shadow_start; p < shadow_end; p += marker.size()) { + internal_mmap(p, Min<uptr>(marker.size(), shadow_end - p), + PROT_READ, MAP_PRIVATE | MAP_FIXED, fd, 0); + } + } + } + internal_close(fd); +} + +void InitializeShadowMemory() { + // Map memory shadow. + uptr shadow = (uptr)MmapFixedNoReserve(kShadowBeg, + kShadowEnd - kShadowBeg); + if (shadow != kShadowBeg) { + Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n"); + Printf("FATAL: Make sure to compile with -fPIE and " + "to link with -pie (%p, %p).\n", shadow, kShadowBeg); + Die(); + } + // This memory range is used for thread stacks and large user mmaps. + // Frequently a thread uses only a small part of stack and similarly + // a program uses a small part of large mmap. On some programs + // we see 20% memory usage reduction without huge pages for this range. +#ifdef MADV_NOHUGEPAGE + madvise((void*)MemToShadow(0x7f0000000000ULL), + 0x10000000000ULL * kShadowMultiplier, MADV_NOHUGEPAGE); +#endif + DPrintf("memory shadow: %zx-%zx (%zuGB)\n", + kShadowBeg, kShadowEnd, + (kShadowEnd - kShadowBeg) >> 30); + + // Map meta shadow. + uptr meta_size = kMetaShadowEnd - kMetaShadowBeg; + uptr meta = (uptr)MmapFixedNoReserve(kMetaShadowBeg, meta_size); + if (meta != kMetaShadowBeg) { + Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n"); + Printf("FATAL: Make sure to compile with -fPIE and " + "to link with -pie (%p, %p).\n", meta, kMetaShadowBeg); + Die(); + } + DPrintf("meta shadow: %zx-%zx (%zuGB)\n", + meta, meta + meta_size, meta_size >> 30); + + MapRodata(); +} + +static void InitDataSeg() { + MemoryMappingLayout proc_maps(true); + uptr start, end, offset; + char name[128]; +#if SANITIZER_FREEBSD + // On FreeBSD BSS is usually the last block allocated within the + // low range and heap is the last block allocated within the range + // 0x800000000-0x8ffffffff. + while (proc_maps.Next(&start, &end, &offset, name, ARRAY_SIZE(name), + /*protection*/ 0)) { + DPrintf("%p-%p %p %s\n", start, end, offset, name); + if ((start & 0xffff00000000ULL) == 0 && (end & 0xffff00000000ULL) == 0 && + name[0] == '\0') { + g_data_start = start; + g_data_end = end; + } + } +#else + bool prev_is_data = false; + while (proc_maps.Next(&start, &end, &offset, name, ARRAY_SIZE(name), + /*protection*/ 0)) { + DPrintf("%p-%p %p %s\n", start, end, offset, name); + bool is_data = offset != 0 && name[0] != 0; + // BSS may get merged with [heap] in /proc/self/maps. This is not very + // reliable. + bool is_bss = offset == 0 && + (name[0] == 0 || internal_strcmp(name, "[heap]") == 0) && prev_is_data; + if (g_data_start == 0 && is_data) + g_data_start = start; + if (is_bss) + g_data_end = end; + prev_is_data = is_data; + } +#endif + DPrintf("guessed data_start=%p data_end=%p\n", g_data_start, g_data_end); + CHECK_LT(g_data_start, g_data_end); + CHECK_GE((uptr)&g_data_start, g_data_start); + CHECK_LT((uptr)&g_data_start, g_data_end); +} + +static void CheckAndProtect() { + // Ensure that the binary is indeed compiled with -pie. + MemoryMappingLayout proc_maps(true); + uptr p, end; + while (proc_maps.Next(&p, &end, 0, 0, 0, 0)) { + if (IsAppMem(p)) + continue; + if (p >= kHeapMemEnd && + p < kHeapMemEnd + PrimaryAllocator::AdditionalSize()) + continue; + if (p >= 0xf000000000000000ull) // vdso + break; + Printf("FATAL: ThreadSanitizer: unexpected memory mapping %p-%p\n", p, end); + Die(); + } + + ProtectRange(kLoAppMemEnd, kShadowBeg); + ProtectRange(kShadowEnd, kMetaShadowBeg); + ProtectRange(kMetaShadowEnd, kTraceMemBeg); + // Memory for traces is mapped lazily in MapThreadTrace. + // Protect the whole range for now, so that user does not map something here. + ProtectRange(kTraceMemBeg, kTraceMemEnd); + ProtectRange(kTraceMemEnd, kHeapMemBeg); + ProtectRange(kHeapMemEnd + PrimaryAllocator::AdditionalSize(), kHiAppMemBeg); +} +#endif // #ifndef SANITIZER_GO + +void InitializePlatform() { + DisableCoreDumperIfNecessary(); + + // Go maps shadow memory lazily and works fine with limited address space. + // Unlimited stack is not a problem as well, because the executable + // is not compiled with -pie. + if (kCppMode) { + bool reexec = false; + // TSan doesn't play well with unlimited stack size (as stack + // overlaps with shadow memory). If we detect unlimited stack size, + // we re-exec the program with limited stack size as a best effort. + if (StackSizeIsUnlimited()) { + const uptr kMaxStackSize = 32 * 1024 * 1024; + VReport(1, "Program is run with unlimited stack size, which wouldn't " + "work with ThreadSanitizer.\n" + "Re-execing with stack size limited to %zd bytes.\n", + kMaxStackSize); + SetStackSizeLimitInBytes(kMaxStackSize); + reexec = true; + } + + if (!AddressSpaceIsUnlimited()) { + Report("WARNING: Program is run with limited virtual address space," + " which wouldn't work with ThreadSanitizer.\n"); + Report("Re-execing with unlimited virtual address space.\n"); + SetAddressSpaceUnlimited(); + reexec = true; + } + if (reexec) + ReExec(); + } + +#ifndef SANITIZER_GO + CheckAndProtect(); + InitTlsSize(); + InitDataSeg(); +#endif +} + +bool IsGlobalVar(uptr addr) { + return g_data_start && addr >= g_data_start && addr < g_data_end; +} + +#ifndef SANITIZER_GO +// Extract file descriptors passed to glibc internal __res_iclose function. +// This is required to properly "close" the fds, because we do not see internal +// closes within glibc. The code is a pure hack. +int ExtractResolvFDs(void *state, int *fds, int nfd) { +#if SANITIZER_LINUX + int cnt = 0; + __res_state *statp = (__res_state*)state; + for (int i = 0; i < MAXNS && cnt < nfd; i++) { + if (statp->_u._ext.nsaddrs[i] && statp->_u._ext.nssocks[i] != -1) + fds[cnt++] = statp->_u._ext.nssocks[i]; + } + return cnt; +#else + return 0; +#endif +} + +// Extract file descriptors passed via UNIX domain sockets. +// This is requried to properly handle "open" of these fds. +// see 'man recvmsg' and 'man 3 cmsg'. +int ExtractRecvmsgFDs(void *msgp, int *fds, int nfd) { + int res = 0; + msghdr *msg = (msghdr*)msgp; + struct cmsghdr *cmsg = CMSG_FIRSTHDR(msg); + for (; cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) { + if (cmsg->cmsg_level != SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) + continue; + int n = (cmsg->cmsg_len - CMSG_LEN(0)) / sizeof(fds[0]); + for (int i = 0; i < n; i++) { + fds[res++] = ((int*)CMSG_DATA(cmsg))[i]; + if (res == nfd) + return res; + } + } + return res; +} + +int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m, + void *abstime), void *c, void *m, void *abstime, + void(*cleanup)(void *arg), void *arg) { + // pthread_cleanup_push/pop are hardcore macros mess. + // We can't intercept nor call them w/o including pthread.h. + int res; + pthread_cleanup_push(cleanup, arg); + res = fn(c, m, abstime); + pthread_cleanup_pop(0); + return res; +} +#endif + +} // namespace __tsan + +#endif // SANITIZER_LINUX || SANITIZER_FREEBSD diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_platform_mac.cc b/contrib/compiler-rt/lib/tsan/rtl/tsan_platform_mac.cc new file mode 100644 index 0000000..15b9f9d --- /dev/null +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_platform_mac.cc @@ -0,0 +1,92 @@ +//===-- tsan_platform_mac.cc ----------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +// Mac-specific code. +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_platform.h" +#if SANITIZER_MAC + +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_libc.h" +#include "sanitizer_common/sanitizer_procmaps.h" +#include "tsan_platform.h" +#include "tsan_rtl.h" +#include "tsan_flags.h" + +#include <pthread.h> +#include <signal.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <stdarg.h> +#include <sys/mman.h> +#include <sys/syscall.h> +#include <sys/time.h> +#include <sys/types.h> +#include <sys/resource.h> +#include <sys/stat.h> +#include <unistd.h> +#include <errno.h> +#include <sched.h> + +namespace __tsan { + +uptr GetShadowMemoryConsumption() { + return 0; +} + +void FlushShadowMemory() { +} + +void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) { +} + +#ifndef SANITIZER_GO +void InitializeShadowMemory() { + uptr shadow = (uptr)MmapFixedNoReserve(kShadowBeg, + kShadowEnd - kShadowBeg); + if (shadow != kShadowBeg) { + Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n"); + Printf("FATAL: Make sure to compile with -fPIE and " + "to link with -pie.\n"); + Die(); + } + DPrintf("kShadow %zx-%zx (%zuGB)\n", + kShadowBeg, kShadowEnd, + (kShadowEnd - kShadowBeg) >> 30); + DPrintf("kAppMem %zx-%zx (%zuGB)\n", + kAppMemBeg, kAppMemEnd, + (kAppMemEnd - kAppMemBeg) >> 30); +} +#endif + +void InitializePlatform() { + DisableCoreDumperIfNecessary(); +} + +#ifndef SANITIZER_GO +int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m, + void *abstime), void *c, void *m, void *abstime, + void(*cleanup)(void *arg), void *arg) { + // pthread_cleanup_push/pop are hardcore macros mess. + // We can't intercept nor call them w/o including pthread.h. + int res; + pthread_cleanup_push(cleanup, arg); + res = fn(c, m, abstime); + pthread_cleanup_pop(0); + return res; +} +#endif + +} // namespace __tsan + +#endif // SANITIZER_MAC diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_platform_windows.cc b/contrib/compiler-rt/lib/tsan/rtl/tsan_platform_windows.cc new file mode 100644 index 0000000..cfbe77d --- /dev/null +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_platform_windows.cc @@ -0,0 +1,39 @@ +//===-- tsan_platform_windows.cc ------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +// Windows-specific code. +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_platform.h" +#if SANITIZER_WINDOWS + +#include "tsan_platform.h" + +#include <stdlib.h> + +namespace __tsan { + +uptr GetShadowMemoryConsumption() { + return 0; +} + +void FlushShadowMemory() { +} + +void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) { +} + +void InitializePlatform() { +} + +} // namespace __tsan + +#endif // SANITIZER_WINDOWS diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_report.cc b/contrib/compiler-rt/lib/tsan/rtl/tsan_report.cc new file mode 100644 index 0000000..c22f12a --- /dev/null +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_report.cc @@ -0,0 +1,412 @@ +//===-- tsan_report.cc ----------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +//===----------------------------------------------------------------------===// +#include "tsan_report.h" +#include "tsan_platform.h" +#include "tsan_rtl.h" +#include "sanitizer_common/sanitizer_placement_new.h" +#include "sanitizer_common/sanitizer_report_decorator.h" +#include "sanitizer_common/sanitizer_stacktrace_printer.h" + +namespace __tsan { + +ReportStack::ReportStack() : frames(nullptr), suppressable(false) {} + +ReportStack *ReportStack::New() { + void *mem = internal_alloc(MBlockReportStack, sizeof(ReportStack)); + return new(mem) ReportStack(); +} + +ReportLocation::ReportLocation(ReportLocationType type) + : type(type), global(), heap_chunk_start(0), heap_chunk_size(0), tid(0), + fd(0), suppressable(false), stack(nullptr) {} + +ReportLocation *ReportLocation::New(ReportLocationType type) { + void *mem = internal_alloc(MBlockReportStack, sizeof(ReportLocation)); + return new(mem) ReportLocation(type); +} + +class Decorator: public __sanitizer::SanitizerCommonDecorator { + public: + Decorator() : SanitizerCommonDecorator() { } + const char *Warning() { return Red(); } + const char *EndWarning() { return Default(); } + const char *Access() { return Blue(); } + const char *EndAccess() { return Default(); } + const char *ThreadDescription() { return Cyan(); } + const char *EndThreadDescription() { return Default(); } + const char *Location() { return Green(); } + const char *EndLocation() { return Default(); } + const char *Sleep() { return Yellow(); } + const char *EndSleep() { return Default(); } + const char *Mutex() { return Magenta(); } + const char *EndMutex() { return Default(); } +}; + +ReportDesc::ReportDesc() + : stacks(MBlockReportStack) + , mops(MBlockReportMop) + , locs(MBlockReportLoc) + , mutexes(MBlockReportMutex) + , threads(MBlockReportThread) + , unique_tids(MBlockReportThread) + , sleep() + , count() { +} + +ReportMop::ReportMop() + : mset(MBlockReportMutex) { +} + +ReportDesc::~ReportDesc() { + // FIXME(dvyukov): it must be leaking a lot of memory. +} + +#ifndef SANITIZER_GO + +const int kThreadBufSize = 32; +const char *thread_name(char *buf, int tid) { + if (tid == 0) + return "main thread"; + internal_snprintf(buf, kThreadBufSize, "thread T%d", tid); + return buf; +} + +static const char *ReportTypeString(ReportType typ) { + if (typ == ReportTypeRace) + return "data race"; + if (typ == ReportTypeVptrRace) + return "data race on vptr (ctor/dtor vs virtual call)"; + if (typ == ReportTypeUseAfterFree) + return "heap-use-after-free"; + if (typ == ReportTypeVptrUseAfterFree) + return "heap-use-after-free (virtual call vs free)"; + if (typ == ReportTypeThreadLeak) + return "thread leak"; + if (typ == ReportTypeMutexDestroyLocked) + return "destroy of a locked mutex"; + if (typ == ReportTypeMutexDoubleLock) + return "double lock of a mutex"; + if (typ == ReportTypeMutexBadUnlock) + return "unlock of an unlocked mutex (or by a wrong thread)"; + if (typ == ReportTypeMutexBadReadLock) + return "read lock of a write locked mutex"; + if (typ == ReportTypeMutexBadReadUnlock) + return "read unlock of a write locked mutex"; + if (typ == ReportTypeSignalUnsafe) + return "signal-unsafe call inside of a signal"; + if (typ == ReportTypeErrnoInSignal) + return "signal handler spoils errno"; + if (typ == ReportTypeDeadlock) + return "lock-order-inversion (potential deadlock)"; + return ""; +} + +void PrintStack(const ReportStack *ent) { + if (ent == 0 || ent->frames == 0) { + Printf(" [failed to restore the stack]\n\n"); + return; + } + SymbolizedStack *frame = ent->frames; + for (int i = 0; frame && frame->info.address; frame = frame->next, i++) { + InternalScopedString res(2 * GetPageSizeCached()); + RenderFrame(&res, common_flags()->stack_trace_format, i, frame->info, + common_flags()->strip_path_prefix, "__interceptor_"); + Printf("%s\n", res.data()); + } + Printf("\n"); +} + +static void PrintMutexSet(Vector<ReportMopMutex> const& mset) { + for (uptr i = 0; i < mset.Size(); i++) { + if (i == 0) + Printf(" (mutexes:"); + const ReportMopMutex m = mset[i]; + Printf(" %s M%llu", m.write ? "write" : "read", m.id); + Printf(i == mset.Size() - 1 ? ")" : ","); + } +} + +static const char *MopDesc(bool first, bool write, bool atomic) { + return atomic ? (first ? (write ? "Atomic write" : "Atomic read") + : (write ? "Previous atomic write" : "Previous atomic read")) + : (first ? (write ? "Write" : "Read") + : (write ? "Previous write" : "Previous read")); +} + +static void PrintMop(const ReportMop *mop, bool first) { + Decorator d; + char thrbuf[kThreadBufSize]; + Printf("%s", d.Access()); + Printf(" %s of size %d at %p by %s", + MopDesc(first, mop->write, mop->atomic), + mop->size, (void*)mop->addr, + thread_name(thrbuf, mop->tid)); + PrintMutexSet(mop->mset); + Printf(":\n"); + Printf("%s", d.EndAccess()); + PrintStack(mop->stack); +} + +static void PrintLocation(const ReportLocation *loc) { + Decorator d; + char thrbuf[kThreadBufSize]; + bool print_stack = false; + Printf("%s", d.Location()); + if (loc->type == ReportLocationGlobal) { + const DataInfo &global = loc->global; + Printf(" Location is global '%s' of size %zu at %p (%s+%p)\n\n", + global.name, global.size, global.start, + StripModuleName(global.module), global.module_offset); + } else if (loc->type == ReportLocationHeap) { + char thrbuf[kThreadBufSize]; + Printf(" Location is heap block of size %zu at %p allocated by %s:\n", + loc->heap_chunk_size, loc->heap_chunk_start, + thread_name(thrbuf, loc->tid)); + print_stack = true; + } else if (loc->type == ReportLocationStack) { + Printf(" Location is stack of %s.\n\n", thread_name(thrbuf, loc->tid)); + } else if (loc->type == ReportLocationTLS) { + Printf(" Location is TLS of %s.\n\n", thread_name(thrbuf, loc->tid)); + } else if (loc->type == ReportLocationFD) { + Printf(" Location is file descriptor %d created by %s at:\n", + loc->fd, thread_name(thrbuf, loc->tid)); + print_stack = true; + } + Printf("%s", d.EndLocation()); + if (print_stack) + PrintStack(loc->stack); +} + +static void PrintMutexShort(const ReportMutex *rm, const char *after) { + Decorator d; + Printf("%sM%zd%s%s", d.Mutex(), rm->id, d.EndMutex(), after); +} + +static void PrintMutexShortWithAddress(const ReportMutex *rm, + const char *after) { + Decorator d; + Printf("%sM%zd (%p)%s%s", d.Mutex(), rm->id, rm->addr, d.EndMutex(), after); +} + +static void PrintMutex(const ReportMutex *rm) { + Decorator d; + if (rm->destroyed) { + Printf("%s", d.Mutex()); + Printf(" Mutex M%llu is already destroyed.\n\n", rm->id); + Printf("%s", d.EndMutex()); + } else { + Printf("%s", d.Mutex()); + Printf(" Mutex M%llu (%p) created at:\n", rm->id, rm->addr); + Printf("%s", d.EndMutex()); + PrintStack(rm->stack); + } +} + +static void PrintThread(const ReportThread *rt) { + Decorator d; + if (rt->id == 0) // Little sense in describing the main thread. + return; + Printf("%s", d.ThreadDescription()); + Printf(" Thread T%d", rt->id); + if (rt->name && rt->name[0] != '\0') + Printf(" '%s'", rt->name); + char thrbuf[kThreadBufSize]; + Printf(" (tid=%zu, %s) created by %s", + rt->pid, rt->running ? "running" : "finished", + thread_name(thrbuf, rt->parent_tid)); + if (rt->stack) + Printf(" at:"); + Printf("\n"); + Printf("%s", d.EndThreadDescription()); + PrintStack(rt->stack); +} + +static void PrintSleep(const ReportStack *s) { + Decorator d; + Printf("%s", d.Sleep()); + Printf(" As if synchronized via sleep:\n"); + Printf("%s", d.EndSleep()); + PrintStack(s); +} + +static ReportStack *ChooseSummaryStack(const ReportDesc *rep) { + if (rep->mops.Size()) + return rep->mops[0]->stack; + if (rep->stacks.Size()) + return rep->stacks[0]; + if (rep->mutexes.Size()) + return rep->mutexes[0]->stack; + if (rep->threads.Size()) + return rep->threads[0]->stack; + return 0; +} + +static bool FrameIsInternal(const SymbolizedStack *frame) { + if (frame == 0) + return false; + const char *file = frame->info.file; + return file != 0 && + (internal_strstr(file, "tsan_interceptors.cc") || + internal_strstr(file, "sanitizer_common_interceptors.inc") || + internal_strstr(file, "tsan_interface_")); +} + +static SymbolizedStack *SkipTsanInternalFrames(SymbolizedStack *frames) { + while (FrameIsInternal(frames) && frames->next) + frames = frames->next; + return frames; +} + +void PrintReport(const ReportDesc *rep) { + Decorator d; + Printf("==================\n"); + const char *rep_typ_str = ReportTypeString(rep->typ); + Printf("%s", d.Warning()); + Printf("WARNING: ThreadSanitizer: %s (pid=%d)\n", rep_typ_str, + (int)internal_getpid()); + Printf("%s", d.EndWarning()); + + if (rep->typ == ReportTypeDeadlock) { + char thrbuf[kThreadBufSize]; + Printf(" Cycle in lock order graph: "); + for (uptr i = 0; i < rep->mutexes.Size(); i++) + PrintMutexShortWithAddress(rep->mutexes[i], " => "); + PrintMutexShort(rep->mutexes[0], "\n\n"); + CHECK_GT(rep->mutexes.Size(), 0U); + CHECK_EQ(rep->mutexes.Size() * (flags()->second_deadlock_stack ? 2 : 1), + rep->stacks.Size()); + for (uptr i = 0; i < rep->mutexes.Size(); i++) { + Printf(" Mutex "); + PrintMutexShort(rep->mutexes[(i + 1) % rep->mutexes.Size()], + " acquired here while holding mutex "); + PrintMutexShort(rep->mutexes[i], " in "); + Printf("%s", d.ThreadDescription()); + Printf("%s:\n", thread_name(thrbuf, rep->unique_tids[i])); + Printf("%s", d.EndThreadDescription()); + if (flags()->second_deadlock_stack) { + PrintStack(rep->stacks[2*i]); + Printf(" Mutex "); + PrintMutexShort(rep->mutexes[i], + " previously acquired by the same thread here:\n"); + PrintStack(rep->stacks[2*i+1]); + } else { + PrintStack(rep->stacks[i]); + if (i == 0) + Printf(" Hint: use TSAN_OPTIONS=second_deadlock_stack=1 " + "to get more informative warning message\n\n"); + } + } + } else { + for (uptr i = 0; i < rep->stacks.Size(); i++) { + if (i) + Printf(" and:\n"); + PrintStack(rep->stacks[i]); + } + } + + for (uptr i = 0; i < rep->mops.Size(); i++) + PrintMop(rep->mops[i], i == 0); + + if (rep->sleep) + PrintSleep(rep->sleep); + + for (uptr i = 0; i < rep->locs.Size(); i++) + PrintLocation(rep->locs[i]); + + if (rep->typ != ReportTypeDeadlock) { + for (uptr i = 0; i < rep->mutexes.Size(); i++) + PrintMutex(rep->mutexes[i]); + } + + for (uptr i = 0; i < rep->threads.Size(); i++) + PrintThread(rep->threads[i]); + + if (rep->typ == ReportTypeThreadLeak && rep->count > 1) + Printf(" And %d more similar thread leaks.\n\n", rep->count - 1); + + if (ReportStack *stack = ChooseSummaryStack(rep)) { + if (SymbolizedStack *frame = SkipTsanInternalFrames(stack->frames)) { + const AddressInfo &info = frame->info; + ReportErrorSummary(rep_typ_str, info.file, info.line, info.function); + } + } + + Printf("==================\n"); +} + +#else // #ifndef SANITIZER_GO + +const int kMainThreadId = 1; + +void PrintStack(const ReportStack *ent) { + if (ent == 0 || ent->frames == 0) { + Printf(" [failed to restore the stack]\n"); + return; + } + SymbolizedStack *frame = ent->frames; + for (int i = 0; frame; frame = frame->next, i++) { + const AddressInfo &info = frame->info; + Printf(" %s()\n %s:%d +0x%zx\n", info.function, info.file, info.line, + (void *)info.module_offset); + } +} + +static void PrintMop(const ReportMop *mop, bool first) { + Printf("\n"); + Printf("%s by ", + (first ? (mop->write ? "Write" : "Read") + : (mop->write ? "Previous write" : "Previous read"))); + if (mop->tid == kMainThreadId) + Printf("main goroutine:\n"); + else + Printf("goroutine %d:\n", mop->tid); + PrintStack(mop->stack); +} + +static void PrintThread(const ReportThread *rt) { + if (rt->id == kMainThreadId) + return; + Printf("\n"); + Printf("Goroutine %d (%s) created at:\n", + rt->id, rt->running ? "running" : "finished"); + PrintStack(rt->stack); +} + +void PrintReport(const ReportDesc *rep) { + Printf("==================\n"); + if (rep->typ == ReportTypeRace) { + Printf("WARNING: DATA RACE"); + for (uptr i = 0; i < rep->mops.Size(); i++) + PrintMop(rep->mops[i], i == 0); + for (uptr i = 0; i < rep->threads.Size(); i++) + PrintThread(rep->threads[i]); + } else if (rep->typ == ReportTypeDeadlock) { + Printf("WARNING: DEADLOCK\n"); + for (uptr i = 0; i < rep->mutexes.Size(); i++) { + Printf("Goroutine %d lock mutex %d while holding mutex %d:\n", + 999, rep->mutexes[i]->id, + rep->mutexes[(i+1) % rep->mutexes.Size()]->id); + PrintStack(rep->stacks[2*i]); + Printf("\n"); + Printf("Mutex %d was previously locked here:\n", + rep->mutexes[(i+1) % rep->mutexes.Size()]->id); + PrintStack(rep->stacks[2*i + 1]); + Printf("\n"); + } + } + Printf("==================\n"); +} + +#endif + +} // namespace __tsan diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_report.h b/contrib/compiler-rt/lib/tsan/rtl/tsan_report.h new file mode 100644 index 0000000..3e344a0 --- /dev/null +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_report.h @@ -0,0 +1,129 @@ +//===-- tsan_report.h -------------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +//===----------------------------------------------------------------------===// +#ifndef TSAN_REPORT_H +#define TSAN_REPORT_H + +#include "sanitizer_common/sanitizer_symbolizer.h" +#include "tsan_defs.h" +#include "tsan_vector.h" + +namespace __tsan { + +enum ReportType { + ReportTypeRace, + ReportTypeVptrRace, + ReportTypeUseAfterFree, + ReportTypeVptrUseAfterFree, + ReportTypeThreadLeak, + ReportTypeMutexDestroyLocked, + ReportTypeMutexDoubleLock, + ReportTypeMutexBadUnlock, + ReportTypeMutexBadReadLock, + ReportTypeMutexBadReadUnlock, + ReportTypeSignalUnsafe, + ReportTypeErrnoInSignal, + ReportTypeDeadlock +}; + +struct ReportStack { + SymbolizedStack *frames; + bool suppressable; + static ReportStack *New(); + + private: + ReportStack(); +}; + +struct ReportMopMutex { + u64 id; + bool write; +}; + +struct ReportMop { + int tid; + uptr addr; + int size; + bool write; + bool atomic; + Vector<ReportMopMutex> mset; + ReportStack *stack; + + ReportMop(); +}; + +enum ReportLocationType { + ReportLocationGlobal, + ReportLocationHeap, + ReportLocationStack, + ReportLocationTLS, + ReportLocationFD +}; + +struct ReportLocation { + ReportLocationType type; + DataInfo global; + uptr heap_chunk_start; + uptr heap_chunk_size; + int tid; + int fd; + bool suppressable; + ReportStack *stack; + + static ReportLocation *New(ReportLocationType type); + private: + explicit ReportLocation(ReportLocationType type); +}; + +struct ReportThread { + int id; + uptr pid; + bool running; + char *name; + int parent_tid; + ReportStack *stack; +}; + +struct ReportMutex { + u64 id; + uptr addr; + bool destroyed; + ReportStack *stack; +}; + +class ReportDesc { + public: + ReportType typ; + Vector<ReportStack*> stacks; + Vector<ReportMop*> mops; + Vector<ReportLocation*> locs; + Vector<ReportMutex*> mutexes; + Vector<ReportThread*> threads; + Vector<int> unique_tids; + ReportStack *sleep; + int count; + + ReportDesc(); + ~ReportDesc(); + + private: + ReportDesc(const ReportDesc&); + void operator = (const ReportDesc&); +}; + +// Format and output the report to the console/log. No additional logic. +void PrintReport(const ReportDesc *rep); +void PrintStack(const ReportStack *stack); + +} // namespace __tsan + +#endif // TSAN_REPORT_H diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_rtl.cc b/contrib/compiler-rt/lib/tsan/rtl/tsan_rtl.cc new file mode 100644 index 0000000..7cb7008 --- /dev/null +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_rtl.cc @@ -0,0 +1,1020 @@ +//===-- tsan_rtl.cc -------------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +// Main file (entry points) for the TSan run-time. +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_atomic.h" +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_libc.h" +#include "sanitizer_common/sanitizer_stackdepot.h" +#include "sanitizer_common/sanitizer_placement_new.h" +#include "sanitizer_common/sanitizer_symbolizer.h" +#include "tsan_defs.h" +#include "tsan_platform.h" +#include "tsan_rtl.h" +#include "tsan_mman.h" +#include "tsan_suppressions.h" +#include "tsan_symbolize.h" + +#ifdef __SSE3__ +// <emmintrin.h> transitively includes <stdlib.h>, +// and it's prohibited to include std headers into tsan runtime. +// So we do this dirty trick. +#define _MM_MALLOC_H_INCLUDED +#define __MM_MALLOC_H +#include <emmintrin.h> +typedef __m128i m128; +#endif + +volatile int __tsan_resumed = 0; + +extern "C" void __tsan_resume() { + __tsan_resumed = 1; +} + +namespace __tsan { + +#ifndef SANITIZER_GO +THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(64); +#endif +static char ctx_placeholder[sizeof(Context)] ALIGNED(64); +Context *ctx; + +// Can be overriden by a front-end. +#ifdef TSAN_EXTERNAL_HOOKS +bool OnFinalize(bool failed); +void OnInitialize(); +#else +SANITIZER_INTERFACE_ATTRIBUTE +bool WEAK OnFinalize(bool failed) { + return failed; +} +SANITIZER_INTERFACE_ATTRIBUTE +void WEAK OnInitialize() {} +#endif + +static char thread_registry_placeholder[sizeof(ThreadRegistry)]; + +static ThreadContextBase *CreateThreadContext(u32 tid) { + // Map thread trace when context is created. + MapThreadTrace(GetThreadTrace(tid), TraceSize() * sizeof(Event)); + MapThreadTrace(GetThreadTraceHeader(tid), sizeof(Trace)); + new(ThreadTrace(tid)) Trace(); + void *mem = internal_alloc(MBlockThreadContex, sizeof(ThreadContext)); + return new(mem) ThreadContext(tid); +} + +#ifndef SANITIZER_GO +static const u32 kThreadQuarantineSize = 16; +#else +static const u32 kThreadQuarantineSize = 64; +#endif + +Context::Context() + : initialized() + , report_mtx(MutexTypeReport, StatMtxReport) + , nreported() + , nmissed_expected() + , thread_registry(new(thread_registry_placeholder) ThreadRegistry( + CreateThreadContext, kMaxTid, kThreadQuarantineSize, kMaxTidReuse)) + , racy_stacks(MBlockRacyStacks) + , racy_addresses(MBlockRacyAddresses) + , fired_suppressions(8) { +} + +// The objects are allocated in TLS, so one may rely on zero-initialization. +ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch, + unsigned reuse_count, + uptr stk_addr, uptr stk_size, + uptr tls_addr, uptr tls_size) + : fast_state(tid, epoch) + // Do not touch these, rely on zero initialization, + // they may be accessed before the ctor. + // , ignore_reads_and_writes() + // , ignore_interceptors() + , clock(tid, reuse_count) +#ifndef SANITIZER_GO + , jmp_bufs(MBlockJmpBuf) +#endif + , tid(tid) + , unique_id(unique_id) + , stk_addr(stk_addr) + , stk_size(stk_size) + , tls_addr(tls_addr) + , tls_size(tls_size) +#ifndef SANITIZER_GO + , last_sleep_clock(tid) +#endif +{ +} + +static void MemoryProfiler(Context *ctx, fd_t fd, int i) { + uptr n_threads; + uptr n_running_threads; + ctx->thread_registry->GetNumberOfThreads(&n_threads, &n_running_threads); + InternalScopedBuffer<char> buf(4096); + WriteMemoryProfile(buf.data(), buf.size(), n_threads, n_running_threads); + internal_write(fd, buf.data(), internal_strlen(buf.data())); +} + +static void BackgroundThread(void *arg) { +#ifndef SANITIZER_GO + // This is a non-initialized non-user thread, nothing to see here. + // We don't use ScopedIgnoreInterceptors, because we want ignores to be + // enabled even when the thread function exits (e.g. during pthread thread + // shutdown code). + cur_thread()->ignore_interceptors++; +#endif + const u64 kMs2Ns = 1000 * 1000; + + fd_t mprof_fd = kInvalidFd; + if (flags()->profile_memory && flags()->profile_memory[0]) { + if (internal_strcmp(flags()->profile_memory, "stdout") == 0) { + mprof_fd = 1; + } else if (internal_strcmp(flags()->profile_memory, "stderr") == 0) { + mprof_fd = 2; + } else { + InternalScopedString filename(kMaxPathLength); + filename.append("%s.%d", flags()->profile_memory, (int)internal_getpid()); + uptr openrv = OpenFile(filename.data(), true); + if (internal_iserror(openrv)) { + Printf("ThreadSanitizer: failed to open memory profile file '%s'\n", + &filename[0]); + } else { + mprof_fd = openrv; + } + } + } + + u64 last_flush = NanoTime(); + uptr last_rss = 0; + for (int i = 0; + atomic_load(&ctx->stop_background_thread, memory_order_relaxed) == 0; + i++) { + SleepForMillis(100); + u64 now = NanoTime(); + + // Flush memory if requested. + if (flags()->flush_memory_ms > 0) { + if (last_flush + flags()->flush_memory_ms * kMs2Ns < now) { + VPrintf(1, "ThreadSanitizer: periodic memory flush\n"); + FlushShadowMemory(); + last_flush = NanoTime(); + } + } + // GetRSS can be expensive on huge programs, so don't do it every 100ms. + if (flags()->memory_limit_mb > 0) { + uptr rss = GetRSS(); + uptr limit = uptr(flags()->memory_limit_mb) << 20; + VPrintf(1, "ThreadSanitizer: memory flush check" + " RSS=%llu LAST=%llu LIMIT=%llu\n", + (u64)rss >> 20, (u64)last_rss >> 20, (u64)limit >> 20); + if (2 * rss > limit + last_rss) { + VPrintf(1, "ThreadSanitizer: flushing memory due to RSS\n"); + FlushShadowMemory(); + rss = GetRSS(); + VPrintf(1, "ThreadSanitizer: memory flushed RSS=%llu\n", (u64)rss>>20); + } + last_rss = rss; + } + + // Write memory profile if requested. + if (mprof_fd != kInvalidFd) + MemoryProfiler(ctx, mprof_fd, i); + +#ifndef SANITIZER_GO + // Flush symbolizer cache if requested. + if (flags()->flush_symbolizer_ms > 0) { + u64 last = atomic_load(&ctx->last_symbolize_time_ns, + memory_order_relaxed); + if (last != 0 && last + flags()->flush_symbolizer_ms * kMs2Ns < now) { + Lock l(&ctx->report_mtx); + SpinMutexLock l2(&CommonSanitizerReportMutex); + SymbolizeFlush(); + atomic_store(&ctx->last_symbolize_time_ns, 0, memory_order_relaxed); + } + } +#endif + } +} + +static void StartBackgroundThread() { + ctx->background_thread = internal_start_thread(&BackgroundThread, 0); +} + +#ifndef SANITIZER_GO +static void StopBackgroundThread() { + atomic_store(&ctx->stop_background_thread, 1, memory_order_relaxed); + internal_join_thread(ctx->background_thread); + ctx->background_thread = 0; +} +#endif + +void DontNeedShadowFor(uptr addr, uptr size) { + uptr shadow_beg = MemToShadow(addr); + uptr shadow_end = MemToShadow(addr + size); + FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg); +} + +void MapShadow(uptr addr, uptr size) { + // Global data is not 64K aligned, but there are no adjacent mappings, + // so we can get away with unaligned mapping. + // CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment + MmapFixedNoReserve(MemToShadow(addr), size * kShadowMultiplier); + + // Meta shadow is 2:1, so tread carefully. + static bool data_mapped = false; + static uptr mapped_meta_end = 0; + uptr meta_begin = (uptr)MemToMeta(addr); + uptr meta_end = (uptr)MemToMeta(addr + size); + meta_begin = RoundDownTo(meta_begin, 64 << 10); + meta_end = RoundUpTo(meta_end, 64 << 10); + if (!data_mapped) { + // First call maps data+bss. + data_mapped = true; + MmapFixedNoReserve(meta_begin, meta_end - meta_begin); + } else { + // Mapping continous heap. + // Windows wants 64K alignment. + meta_begin = RoundDownTo(meta_begin, 64 << 10); + meta_end = RoundUpTo(meta_end, 64 << 10); + if (meta_end <= mapped_meta_end) + return; + if (meta_begin < mapped_meta_end) + meta_begin = mapped_meta_end; + MmapFixedNoReserve(meta_begin, meta_end - meta_begin); + mapped_meta_end = meta_end; + } + VPrintf(2, "mapped meta shadow for (%p-%p) at (%p-%p)\n", + addr, addr+size, meta_begin, meta_end); +} + +void MapThreadTrace(uptr addr, uptr size) { + DPrintf("#0: Mapping trace at %p-%p(0x%zx)\n", addr, addr + size, size); + CHECK_GE(addr, kTraceMemBeg); + CHECK_LE(addr + size, kTraceMemEnd); + CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment + uptr addr1 = (uptr)MmapFixedNoReserve(addr, size); + if (addr1 != addr) { + Printf("FATAL: ThreadSanitizer can not mmap thread trace (%p/%p->%p)\n", + addr, size, addr1); + Die(); + } +} + +static void CheckShadowMapping() { + for (uptr i = 0; i < ARRAY_SIZE(UserRegions); i += 2) { + const uptr beg = UserRegions[i]; + const uptr end = UserRegions[i + 1]; + VPrintf(3, "checking shadow region %p-%p\n", beg, end); + for (uptr p0 = beg; p0 <= end; p0 += (end - beg) / 4) { + for (int x = -1; x <= 1; x++) { + const uptr p = p0 + x; + if (p < beg || p >= end) + continue; + const uptr s = MemToShadow(p); + VPrintf(3, " checking pointer %p -> %p\n", p, s); + CHECK(IsAppMem(p)); + CHECK(IsShadowMem(s)); + CHECK_EQ(p & ~(kShadowCell - 1), ShadowToMem(s)); + const uptr m = (uptr)MemToMeta(p); + CHECK(IsMetaMem(m)); + } + } + } +} + +void Initialize(ThreadState *thr) { + // Thread safe because done before all threads exist. + static bool is_initialized = false; + if (is_initialized) + return; + is_initialized = true; + // We are not ready to handle interceptors yet. + ScopedIgnoreInterceptors ignore; + SanitizerToolName = "ThreadSanitizer"; + // Install tool-specific callbacks in sanitizer_common. + SetCheckFailedCallback(TsanCheckFailed); + + ctx = new(ctx_placeholder) Context; + const char *options = GetEnv(kTsanOptionsEnv); + InitializeFlags(&ctx->flags, options); +#ifndef SANITIZER_GO + InitializeAllocator(); +#endif + InitializeInterceptors(); + CheckShadowMapping(); + InitializePlatform(); + InitializeMutex(); + InitializeDynamicAnnotations(); +#ifndef SANITIZER_GO + InitializeShadowMemory(); +#endif + // Setup correct file descriptor for error reports. + __sanitizer_set_report_path(common_flags()->log_path); + InitializeSuppressions(); +#ifndef SANITIZER_GO + InitializeLibIgnore(); + Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer, ExitSymbolizer); +#endif + StartBackgroundThread(); +#ifndef SANITIZER_GO + SetSandboxingCallback(StopBackgroundThread); +#endif + if (common_flags()->detect_deadlocks) + ctx->dd = DDetector::Create(flags()); + + VPrintf(1, "***** Running under ThreadSanitizer v2 (pid %d) *****\n", + (int)internal_getpid()); + + // Initialize thread 0. + int tid = ThreadCreate(thr, 0, 0, true); + CHECK_EQ(tid, 0); + ThreadStart(thr, tid, internal_getpid()); + ctx->initialized = true; + + if (flags()->stop_on_start) { + Printf("ThreadSanitizer is suspended at startup (pid %d)." + " Call __tsan_resume().\n", + (int)internal_getpid()); + while (__tsan_resumed == 0) {} + } + + OnInitialize(); +} + +int Finalize(ThreadState *thr) { + bool failed = false; + + if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1) + SleepForMillis(flags()->atexit_sleep_ms); + + // Wait for pending reports. + ctx->report_mtx.Lock(); + CommonSanitizerReportMutex.Lock(); + CommonSanitizerReportMutex.Unlock(); + ctx->report_mtx.Unlock(); + +#ifndef SANITIZER_GO + if (common_flags()->verbosity) + AllocatorPrintStats(); +#endif + + ThreadFinalize(thr); + + if (ctx->nreported) { + failed = true; +#ifndef SANITIZER_GO + Printf("ThreadSanitizer: reported %d warnings\n", ctx->nreported); +#else + Printf("Found %d data race(s)\n", ctx->nreported); +#endif + } + + if (ctx->nmissed_expected) { + failed = true; + Printf("ThreadSanitizer: missed %d expected races\n", + ctx->nmissed_expected); + } + + if (common_flags()->print_suppressions) + PrintMatchedSuppressions(); +#ifndef SANITIZER_GO + if (flags()->print_benign) + PrintMatchedBenignRaces(); +#endif + + failed = OnFinalize(failed); + + StatAggregate(ctx->stat, thr->stat); + StatOutput(ctx->stat); + return failed ? flags()->exitcode : 0; +} + +#ifndef SANITIZER_GO +void ForkBefore(ThreadState *thr, uptr pc) { + ctx->thread_registry->Lock(); + ctx->report_mtx.Lock(); +} + +void ForkParentAfter(ThreadState *thr, uptr pc) { + ctx->report_mtx.Unlock(); + ctx->thread_registry->Unlock(); +} + +void ForkChildAfter(ThreadState *thr, uptr pc) { + ctx->report_mtx.Unlock(); + ctx->thread_registry->Unlock(); + + uptr nthread = 0; + ctx->thread_registry->GetNumberOfThreads(0, 0, &nthread /* alive threads */); + VPrintf(1, "ThreadSanitizer: forked new process with pid %d," + " parent had %d threads\n", (int)internal_getpid(), (int)nthread); + if (nthread == 1) { + internal_start_thread(&BackgroundThread, 0); + } else { + // We've just forked a multi-threaded process. We cannot reasonably function + // after that (some mutexes may be locked before fork). So just enable + // ignores for everything in the hope that we will exec soon. + ctx->after_multithreaded_fork = true; + thr->ignore_interceptors++; + ThreadIgnoreBegin(thr, pc); + ThreadIgnoreSyncBegin(thr, pc); + } +} +#endif + +#ifdef SANITIZER_GO +NOINLINE +void GrowShadowStack(ThreadState *thr) { + const int sz = thr->shadow_stack_end - thr->shadow_stack; + const int newsz = 2 * sz; + uptr *newstack = (uptr*)internal_alloc(MBlockShadowStack, + newsz * sizeof(uptr)); + internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr)); + internal_free(thr->shadow_stack); + thr->shadow_stack = newstack; + thr->shadow_stack_pos = newstack + sz; + thr->shadow_stack_end = newstack + newsz; +} +#endif + +u32 CurrentStackId(ThreadState *thr, uptr pc) { + if (thr->shadow_stack_pos == 0) // May happen during bootstrap. + return 0; + if (pc != 0) { +#ifndef SANITIZER_GO + DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end); +#else + if (thr->shadow_stack_pos == thr->shadow_stack_end) + GrowShadowStack(thr); +#endif + thr->shadow_stack_pos[0] = pc; + thr->shadow_stack_pos++; + } + u32 id = StackDepotPut( + StackTrace(thr->shadow_stack, thr->shadow_stack_pos - thr->shadow_stack)); + if (pc != 0) + thr->shadow_stack_pos--; + return id; +} + +void TraceSwitch(ThreadState *thr) { + thr->nomalloc++; + Trace *thr_trace = ThreadTrace(thr->tid); + Lock l(&thr_trace->mtx); + unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % TraceParts(); + TraceHeader *hdr = &thr_trace->headers[trace]; + hdr->epoch0 = thr->fast_state.epoch(); + ObtainCurrentStack(thr, 0, &hdr->stack0); + hdr->mset0 = thr->mset; + thr->nomalloc--; +} + +Trace *ThreadTrace(int tid) { + return (Trace*)GetThreadTraceHeader(tid); +} + +uptr TraceTopPC(ThreadState *thr) { + Event *events = (Event*)GetThreadTrace(thr->tid); + uptr pc = events[thr->fast_state.GetTracePos()]; + return pc; +} + +uptr TraceSize() { + return (uptr)(1ull << (kTracePartSizeBits + flags()->history_size + 1)); +} + +uptr TraceParts() { + return TraceSize() / kTracePartSize; +} + +#ifndef SANITIZER_GO +extern "C" void __tsan_trace_switch() { + TraceSwitch(cur_thread()); +} + +extern "C" void __tsan_report_race() { + ReportRace(cur_thread()); +} +#endif + +ALWAYS_INLINE +Shadow LoadShadow(u64 *p) { + u64 raw = atomic_load((atomic_uint64_t*)p, memory_order_relaxed); + return Shadow(raw); +} + +ALWAYS_INLINE +void StoreShadow(u64 *sp, u64 s) { + atomic_store((atomic_uint64_t*)sp, s, memory_order_relaxed); +} + +ALWAYS_INLINE +void StoreIfNotYetStored(u64 *sp, u64 *s) { + StoreShadow(sp, *s); + *s = 0; +} + +ALWAYS_INLINE +void HandleRace(ThreadState *thr, u64 *shadow_mem, + Shadow cur, Shadow old) { + thr->racy_state[0] = cur.raw(); + thr->racy_state[1] = old.raw(); + thr->racy_shadow_addr = shadow_mem; +#ifndef SANITIZER_GO + HACKY_CALL(__tsan_report_race); +#else + ReportRace(thr); +#endif +} + +static inline bool HappensBefore(Shadow old, ThreadState *thr) { + return thr->clock.get(old.TidWithIgnore()) >= old.epoch(); +} + +ALWAYS_INLINE +void MemoryAccessImpl1(ThreadState *thr, uptr addr, + int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic, + u64 *shadow_mem, Shadow cur) { + StatInc(thr, StatMop); + StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead); + StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog)); + + // This potentially can live in an MMX/SSE scratch register. + // The required intrinsics are: + // __m128i _mm_move_epi64(__m128i*); + // _mm_storel_epi64(u64*, __m128i); + u64 store_word = cur.raw(); + + // scan all the shadow values and dispatch to 4 categories: + // same, replace, candidate and race (see comments below). + // we consider only 3 cases regarding access sizes: + // equal, intersect and not intersect. initially I considered + // larger and smaller as well, it allowed to replace some + // 'candidates' with 'same' or 'replace', but I think + // it's just not worth it (performance- and complexity-wise). + + Shadow old(0); + if (kShadowCnt == 1) { + int idx = 0; +#include "tsan_update_shadow_word_inl.h" + } else if (kShadowCnt == 2) { + int idx = 0; +#include "tsan_update_shadow_word_inl.h" + idx = 1; +#include "tsan_update_shadow_word_inl.h" + } else if (kShadowCnt == 4) { + int idx = 0; +#include "tsan_update_shadow_word_inl.h" + idx = 1; +#include "tsan_update_shadow_word_inl.h" + idx = 2; +#include "tsan_update_shadow_word_inl.h" + idx = 3; +#include "tsan_update_shadow_word_inl.h" + } else if (kShadowCnt == 8) { + int idx = 0; +#include "tsan_update_shadow_word_inl.h" + idx = 1; +#include "tsan_update_shadow_word_inl.h" + idx = 2; +#include "tsan_update_shadow_word_inl.h" + idx = 3; +#include "tsan_update_shadow_word_inl.h" + idx = 4; +#include "tsan_update_shadow_word_inl.h" + idx = 5; +#include "tsan_update_shadow_word_inl.h" + idx = 6; +#include "tsan_update_shadow_word_inl.h" + idx = 7; +#include "tsan_update_shadow_word_inl.h" + } else { + CHECK(false); + } + + // we did not find any races and had already stored + // the current access info, so we are done + if (LIKELY(store_word == 0)) + return; + // choose a random candidate slot and replace it + StoreShadow(shadow_mem + (cur.epoch() % kShadowCnt), store_word); + StatInc(thr, StatShadowReplace); + return; + RACE: + HandleRace(thr, shadow_mem, cur, old); + return; +} + +void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr, + int size, bool kAccessIsWrite, bool kIsAtomic) { + while (size) { + int size1 = 1; + int kAccessSizeLog = kSizeLog1; + if (size >= 8 && (addr & ~7) == ((addr + 7) & ~7)) { + size1 = 8; + kAccessSizeLog = kSizeLog8; + } else if (size >= 4 && (addr & ~7) == ((addr + 3) & ~7)) { + size1 = 4; + kAccessSizeLog = kSizeLog4; + } else if (size >= 2 && (addr & ~7) == ((addr + 1) & ~7)) { + size1 = 2; + kAccessSizeLog = kSizeLog2; + } + MemoryAccess(thr, pc, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic); + addr += size1; + size -= size1; + } +} + +ALWAYS_INLINE +bool ContainsSameAccessSlow(u64 *s, u64 a, u64 sync_epoch, bool is_write) { + Shadow cur(a); + for (uptr i = 0; i < kShadowCnt; i++) { + Shadow old(LoadShadow(&s[i])); + if (Shadow::Addr0AndSizeAreEqual(cur, old) && + old.TidWithIgnore() == cur.TidWithIgnore() && + old.epoch() > sync_epoch && + old.IsAtomic() == cur.IsAtomic() && + old.IsRead() <= cur.IsRead()) + return true; + } + return false; +} + +#if defined(__SSE3__) && TSAN_SHADOW_COUNT == 4 +#define SHUF(v0, v1, i0, i1, i2, i3) _mm_castps_si128(_mm_shuffle_ps( \ + _mm_castsi128_ps(v0), _mm_castsi128_ps(v1), \ + (i0)*1 + (i1)*4 + (i2)*16 + (i3)*64)) +ALWAYS_INLINE +bool ContainsSameAccessFast(u64 *s, u64 a, u64 sync_epoch, bool is_write) { + // This is an optimized version of ContainsSameAccessSlow. + // load current access into access[0:63] + const m128 access = _mm_cvtsi64_si128(a); + // duplicate high part of access in addr0: + // addr0[0:31] = access[32:63] + // addr0[32:63] = access[32:63] + // addr0[64:95] = access[32:63] + // addr0[96:127] = access[32:63] + const m128 addr0 = SHUF(access, access, 1, 1, 1, 1); + // load 4 shadow slots + const m128 shadow0 = _mm_load_si128((__m128i*)s); + const m128 shadow1 = _mm_load_si128((__m128i*)s + 1); + // load high parts of 4 shadow slots into addr_vect: + // addr_vect[0:31] = shadow0[32:63] + // addr_vect[32:63] = shadow0[96:127] + // addr_vect[64:95] = shadow1[32:63] + // addr_vect[96:127] = shadow1[96:127] + m128 addr_vect = SHUF(shadow0, shadow1, 1, 3, 1, 3); + if (!is_write) { + // set IsRead bit in addr_vect + const m128 rw_mask1 = _mm_cvtsi64_si128(1<<15); + const m128 rw_mask = SHUF(rw_mask1, rw_mask1, 0, 0, 0, 0); + addr_vect = _mm_or_si128(addr_vect, rw_mask); + } + // addr0 == addr_vect? + const m128 addr_res = _mm_cmpeq_epi32(addr0, addr_vect); + // epoch1[0:63] = sync_epoch + const m128 epoch1 = _mm_cvtsi64_si128(sync_epoch); + // epoch[0:31] = sync_epoch[0:31] + // epoch[32:63] = sync_epoch[0:31] + // epoch[64:95] = sync_epoch[0:31] + // epoch[96:127] = sync_epoch[0:31] + const m128 epoch = SHUF(epoch1, epoch1, 0, 0, 0, 0); + // load low parts of shadow cell epochs into epoch_vect: + // epoch_vect[0:31] = shadow0[0:31] + // epoch_vect[32:63] = shadow0[64:95] + // epoch_vect[64:95] = shadow1[0:31] + // epoch_vect[96:127] = shadow1[64:95] + const m128 epoch_vect = SHUF(shadow0, shadow1, 0, 2, 0, 2); + // epoch_vect >= sync_epoch? + const m128 epoch_res = _mm_cmpgt_epi32(epoch_vect, epoch); + // addr_res & epoch_res + const m128 res = _mm_and_si128(addr_res, epoch_res); + // mask[0] = res[7] + // mask[1] = res[15] + // ... + // mask[15] = res[127] + const int mask = _mm_movemask_epi8(res); + return mask != 0; +} +#endif + +ALWAYS_INLINE +bool ContainsSameAccess(u64 *s, u64 a, u64 sync_epoch, bool is_write) { +#if defined(__SSE3__) && TSAN_SHADOW_COUNT == 4 + bool res = ContainsSameAccessFast(s, a, sync_epoch, is_write); + // NOTE: this check can fail if the shadow is concurrently mutated + // by other threads. + DCHECK_EQ(res, ContainsSameAccessSlow(s, a, sync_epoch, is_write)); + return res; +#else + return ContainsSameAccessSlow(s, a, sync_epoch, is_write); +#endif +} + +ALWAYS_INLINE USED +void MemoryAccess(ThreadState *thr, uptr pc, uptr addr, + int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic) { + u64 *shadow_mem = (u64*)MemToShadow(addr); + DPrintf2("#%d: MemoryAccess: @%p %p size=%d" + " is_write=%d shadow_mem=%p {%zx, %zx, %zx, %zx}\n", + (int)thr->fast_state.tid(), (void*)pc, (void*)addr, + (int)(1 << kAccessSizeLog), kAccessIsWrite, shadow_mem, + (uptr)shadow_mem[0], (uptr)shadow_mem[1], + (uptr)shadow_mem[2], (uptr)shadow_mem[3]); +#if TSAN_DEBUG + if (!IsAppMem(addr)) { + Printf("Access to non app mem %zx\n", addr); + DCHECK(IsAppMem(addr)); + } + if (!IsShadowMem((uptr)shadow_mem)) { + Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr); + DCHECK(IsShadowMem((uptr)shadow_mem)); + } +#endif + + if (kCppMode && *shadow_mem == kShadowRodata) { + // Access to .rodata section, no races here. + // Measurements show that it can be 10-20% of all memory accesses. + StatInc(thr, StatMop); + StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead); + StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog)); + StatInc(thr, StatMopRodata); + return; + } + + FastState fast_state = thr->fast_state; + if (fast_state.GetIgnoreBit()) { + StatInc(thr, StatMop); + StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead); + StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog)); + StatInc(thr, StatMopIgnored); + return; + } + + Shadow cur(fast_state); + cur.SetAddr0AndSizeLog(addr & 7, kAccessSizeLog); + cur.SetWrite(kAccessIsWrite); + cur.SetAtomic(kIsAtomic); + + if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(), + thr->fast_synch_epoch, kAccessIsWrite))) { + StatInc(thr, StatMop); + StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead); + StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog)); + StatInc(thr, StatMopSame); + return; + } + + if (kCollectHistory) { + fast_state.IncrementEpoch(); + thr->fast_state = fast_state; + TraceAddEvent(thr, fast_state, EventTypeMop, pc); + cur.IncrementEpoch(); + } + + MemoryAccessImpl1(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic, + shadow_mem, cur); +} + +// Called by MemoryAccessRange in tsan_rtl_thread.cc +ALWAYS_INLINE USED +void MemoryAccessImpl(ThreadState *thr, uptr addr, + int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic, + u64 *shadow_mem, Shadow cur) { + if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(), + thr->fast_synch_epoch, kAccessIsWrite))) { + StatInc(thr, StatMop); + StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead); + StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog)); + StatInc(thr, StatMopSame); + return; + } + + MemoryAccessImpl1(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic, + shadow_mem, cur); +} + +static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size, + u64 val) { + (void)thr; + (void)pc; + if (size == 0) + return; + // FIXME: fix me. + uptr offset = addr % kShadowCell; + if (offset) { + offset = kShadowCell - offset; + if (size <= offset) + return; + addr += offset; + size -= offset; + } + DCHECK_EQ(addr % 8, 0); + // If a user passes some insane arguments (memset(0)), + // let it just crash as usual. + if (!IsAppMem(addr) || !IsAppMem(addr + size - 1)) + return; + // Don't want to touch lots of shadow memory. + // If a program maps 10MB stack, there is no need reset the whole range. + size = (size + (kShadowCell - 1)) & ~(kShadowCell - 1); + // UnmapOrDie/MmapFixedNoReserve does not work on Windows, + // so we do it only for C/C++. + if (kGoMode || size < common_flags()->clear_shadow_mmap_threshold) { + u64 *p = (u64*)MemToShadow(addr); + CHECK(IsShadowMem((uptr)p)); + CHECK(IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1))); + // FIXME: may overwrite a part outside the region + for (uptr i = 0; i < size / kShadowCell * kShadowCnt;) { + p[i++] = val; + for (uptr j = 1; j < kShadowCnt; j++) + p[i++] = 0; + } + } else { + // The region is big, reset only beginning and end. + const uptr kPageSize = 4096; + u64 *begin = (u64*)MemToShadow(addr); + u64 *end = begin + size / kShadowCell * kShadowCnt; + u64 *p = begin; + // Set at least first kPageSize/2 to page boundary. + while ((p < begin + kPageSize / kShadowSize / 2) || ((uptr)p % kPageSize)) { + *p++ = val; + for (uptr j = 1; j < kShadowCnt; j++) + *p++ = 0; + } + // Reset middle part. + u64 *p1 = p; + p = RoundDown(end, kPageSize); + UnmapOrDie((void*)p1, (uptr)p - (uptr)p1); + MmapFixedNoReserve((uptr)p1, (uptr)p - (uptr)p1); + // Set the ending. + while (p < end) { + *p++ = val; + for (uptr j = 1; j < kShadowCnt; j++) + *p++ = 0; + } + } +} + +void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size) { + MemoryRangeSet(thr, pc, addr, size, 0); +} + +void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) { + // Processing more than 1k (4k of shadow) is expensive, + // can cause excessive memory consumption (user does not necessary touch + // the whole range) and most likely unnecessary. + if (size > 1024) + size = 1024; + CHECK_EQ(thr->is_freeing, false); + thr->is_freeing = true; + MemoryAccessRange(thr, pc, addr, size, true); + thr->is_freeing = false; + if (kCollectHistory) { + thr->fast_state.IncrementEpoch(); + TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc); + } + Shadow s(thr->fast_state); + s.ClearIgnoreBit(); + s.MarkAsFreed(); + s.SetWrite(true); + s.SetAddr0AndSizeLog(0, 3); + MemoryRangeSet(thr, pc, addr, size, s.raw()); +} + +void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size) { + if (kCollectHistory) { + thr->fast_state.IncrementEpoch(); + TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc); + } + Shadow s(thr->fast_state); + s.ClearIgnoreBit(); + s.SetWrite(true); + s.SetAddr0AndSizeLog(0, 3); + MemoryRangeSet(thr, pc, addr, size, s.raw()); +} + +ALWAYS_INLINE USED +void FuncEntry(ThreadState *thr, uptr pc) { + StatInc(thr, StatFuncEnter); + DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.tid(), (void*)pc); + if (kCollectHistory) { + thr->fast_state.IncrementEpoch(); + TraceAddEvent(thr, thr->fast_state, EventTypeFuncEnter, pc); + } + + // Shadow stack maintenance can be replaced with + // stack unwinding during trace switch (which presumably must be faster). + DCHECK_GE(thr->shadow_stack_pos, thr->shadow_stack); +#ifndef SANITIZER_GO + DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end); +#else + if (thr->shadow_stack_pos == thr->shadow_stack_end) + GrowShadowStack(thr); +#endif + thr->shadow_stack_pos[0] = pc; + thr->shadow_stack_pos++; +} + +ALWAYS_INLINE USED +void FuncExit(ThreadState *thr) { + StatInc(thr, StatFuncExit); + DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.tid()); + if (kCollectHistory) { + thr->fast_state.IncrementEpoch(); + TraceAddEvent(thr, thr->fast_state, EventTypeFuncExit, 0); + } + + DCHECK_GT(thr->shadow_stack_pos, thr->shadow_stack); +#ifndef SANITIZER_GO + DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end); +#endif + thr->shadow_stack_pos--; +} + +void ThreadIgnoreBegin(ThreadState *thr, uptr pc) { + DPrintf("#%d: ThreadIgnoreBegin\n", thr->tid); + thr->ignore_reads_and_writes++; + CHECK_GT(thr->ignore_reads_and_writes, 0); + thr->fast_state.SetIgnoreBit(); +#ifndef SANITIZER_GO + if (!ctx->after_multithreaded_fork) + thr->mop_ignore_set.Add(CurrentStackId(thr, pc)); +#endif +} + +void ThreadIgnoreEnd(ThreadState *thr, uptr pc) { + DPrintf("#%d: ThreadIgnoreEnd\n", thr->tid); + thr->ignore_reads_and_writes--; + CHECK_GE(thr->ignore_reads_and_writes, 0); + if (thr->ignore_reads_and_writes == 0) { + thr->fast_state.ClearIgnoreBit(); +#ifndef SANITIZER_GO + thr->mop_ignore_set.Reset(); +#endif + } +} + +void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc) { + DPrintf("#%d: ThreadIgnoreSyncBegin\n", thr->tid); + thr->ignore_sync++; + CHECK_GT(thr->ignore_sync, 0); +#ifndef SANITIZER_GO + if (!ctx->after_multithreaded_fork) + thr->sync_ignore_set.Add(CurrentStackId(thr, pc)); +#endif +} + +void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc) { + DPrintf("#%d: ThreadIgnoreSyncEnd\n", thr->tid); + thr->ignore_sync--; + CHECK_GE(thr->ignore_sync, 0); +#ifndef SANITIZER_GO + if (thr->ignore_sync == 0) + thr->sync_ignore_set.Reset(); +#endif +} + +bool MD5Hash::operator==(const MD5Hash &other) const { + return hash[0] == other.hash[0] && hash[1] == other.hash[1]; +} + +#if TSAN_DEBUG +void build_consistency_debug() {} +#else +void build_consistency_release() {} +#endif + +#if TSAN_COLLECT_STATS +void build_consistency_stats() {} +#else +void build_consistency_nostats() {} +#endif + +#if TSAN_SHADOW_COUNT == 1 +void build_consistency_shadow1() {} +#elif TSAN_SHADOW_COUNT == 2 +void build_consistency_shadow2() {} +#elif TSAN_SHADOW_COUNT == 4 +void build_consistency_shadow4() {} +#else +void build_consistency_shadow8() {} +#endif + +} // namespace __tsan + +#ifndef SANITIZER_GO +// Must be included in this file to make sure everything is inlined. +#include "tsan_interface_inl.h" +#endif diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_rtl.h b/contrib/compiler-rt/lib/tsan/rtl/tsan_rtl.h new file mode 100644 index 0000000..8d88687 --- /dev/null +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_rtl.h @@ -0,0 +1,734 @@ +//===-- tsan_rtl.h ----------------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +// Main internal TSan header file. +// +// Ground rules: +// - C++ run-time should not be used (static CTORs, RTTI, exceptions, static +// function-scope locals) +// - All functions/classes/etc reside in namespace __tsan, except for those +// declared in tsan_interface.h. +// - Platform-specific files should be used instead of ifdefs (*). +// - No system headers included in header files (*). +// - Platform specific headres included only into platform-specific files (*). +// +// (*) Except when inlining is critical for performance. +//===----------------------------------------------------------------------===// + +#ifndef TSAN_RTL_H +#define TSAN_RTL_H + +#include "sanitizer_common/sanitizer_allocator.h" +#include "sanitizer_common/sanitizer_allocator_internal.h" +#include "sanitizer_common/sanitizer_asm.h" +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_deadlock_detector_interface.h" +#include "sanitizer_common/sanitizer_libignore.h" +#include "sanitizer_common/sanitizer_suppressions.h" +#include "sanitizer_common/sanitizer_thread_registry.h" +#include "tsan_clock.h" +#include "tsan_defs.h" +#include "tsan_flags.h" +#include "tsan_sync.h" +#include "tsan_trace.h" +#include "tsan_vector.h" +#include "tsan_report.h" +#include "tsan_platform.h" +#include "tsan_mutexset.h" +#include "tsan_ignoreset.h" +#include "tsan_stack_trace.h" + +#if SANITIZER_WORDSIZE != 64 +# error "ThreadSanitizer is supported only on 64-bit platforms" +#endif + +namespace __tsan { + +#ifndef SANITIZER_GO +struct MapUnmapCallback; +typedef SizeClassAllocator64<kHeapMemBeg, kHeapMemEnd - kHeapMemBeg, 0, + DefaultSizeClassMap, MapUnmapCallback> PrimaryAllocator; +typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache; +typedef LargeMmapAllocator<MapUnmapCallback> SecondaryAllocator; +typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, + SecondaryAllocator> Allocator; +Allocator *allocator(); +#endif + +void TsanCheckFailed(const char *file, int line, const char *cond, + u64 v1, u64 v2); + +const u64 kShadowRodata = (u64)-1; // .rodata shadow marker + +// FastState (from most significant bit): +// ignore : 1 +// tid : kTidBits +// unused : - +// history_size : 3 +// epoch : kClkBits +class FastState { + public: + FastState(u64 tid, u64 epoch) { + x_ = tid << kTidShift; + x_ |= epoch; + DCHECK_EQ(tid, this->tid()); + DCHECK_EQ(epoch, this->epoch()); + DCHECK_EQ(GetIgnoreBit(), false); + } + + explicit FastState(u64 x) + : x_(x) { + } + + u64 raw() const { + return x_; + } + + u64 tid() const { + u64 res = (x_ & ~kIgnoreBit) >> kTidShift; + return res; + } + + u64 TidWithIgnore() const { + u64 res = x_ >> kTidShift; + return res; + } + + u64 epoch() const { + u64 res = x_ & ((1ull << kClkBits) - 1); + return res; + } + + void IncrementEpoch() { + u64 old_epoch = epoch(); + x_ += 1; + DCHECK_EQ(old_epoch + 1, epoch()); + (void)old_epoch; + } + + void SetIgnoreBit() { x_ |= kIgnoreBit; } + void ClearIgnoreBit() { x_ &= ~kIgnoreBit; } + bool GetIgnoreBit() const { return (s64)x_ < 0; } + + void SetHistorySize(int hs) { + CHECK_GE(hs, 0); + CHECK_LE(hs, 7); + x_ = (x_ & ~(kHistoryMask << kHistoryShift)) | (u64(hs) << kHistoryShift); + } + + ALWAYS_INLINE + int GetHistorySize() const { + return (int)((x_ >> kHistoryShift) & kHistoryMask); + } + + void ClearHistorySize() { + SetHistorySize(0); + } + + ALWAYS_INLINE + u64 GetTracePos() const { + const int hs = GetHistorySize(); + // When hs == 0, the trace consists of 2 parts. + const u64 mask = (1ull << (kTracePartSizeBits + hs + 1)) - 1; + return epoch() & mask; + } + + private: + friend class Shadow; + static const int kTidShift = 64 - kTidBits - 1; + static const u64 kIgnoreBit = 1ull << 63; + static const u64 kFreedBit = 1ull << 63; + static const u64 kHistoryShift = kClkBits; + static const u64 kHistoryMask = 7; + u64 x_; +}; + +// Shadow (from most significant bit): +// freed : 1 +// tid : kTidBits +// is_atomic : 1 +// is_read : 1 +// size_log : 2 +// addr0 : 3 +// epoch : kClkBits +class Shadow : public FastState { + public: + explicit Shadow(u64 x) + : FastState(x) { + } + + explicit Shadow(const FastState &s) + : FastState(s.x_) { + ClearHistorySize(); + } + + void SetAddr0AndSizeLog(u64 addr0, unsigned kAccessSizeLog) { + DCHECK_EQ((x_ >> kClkBits) & 31, 0); + DCHECK_LE(addr0, 7); + DCHECK_LE(kAccessSizeLog, 3); + x_ |= ((kAccessSizeLog << 3) | addr0) << kClkBits; + DCHECK_EQ(kAccessSizeLog, size_log()); + DCHECK_EQ(addr0, this->addr0()); + } + + void SetWrite(unsigned kAccessIsWrite) { + DCHECK_EQ(x_ & kReadBit, 0); + if (!kAccessIsWrite) + x_ |= kReadBit; + DCHECK_EQ(kAccessIsWrite, IsWrite()); + } + + void SetAtomic(bool kIsAtomic) { + DCHECK(!IsAtomic()); + if (kIsAtomic) + x_ |= kAtomicBit; + DCHECK_EQ(IsAtomic(), kIsAtomic); + } + + bool IsAtomic() const { + return x_ & kAtomicBit; + } + + bool IsZero() const { + return x_ == 0; + } + + static inline bool TidsAreEqual(const Shadow s1, const Shadow s2) { + u64 shifted_xor = (s1.x_ ^ s2.x_) >> kTidShift; + DCHECK_EQ(shifted_xor == 0, s1.TidWithIgnore() == s2.TidWithIgnore()); + return shifted_xor == 0; + } + + static ALWAYS_INLINE + bool Addr0AndSizeAreEqual(const Shadow s1, const Shadow s2) { + u64 masked_xor = ((s1.x_ ^ s2.x_) >> kClkBits) & 31; + return masked_xor == 0; + } + + static ALWAYS_INLINE bool TwoRangesIntersect(Shadow s1, Shadow s2, + unsigned kS2AccessSize) { + bool res = false; + u64 diff = s1.addr0() - s2.addr0(); + if ((s64)diff < 0) { // s1.addr0 < s2.addr0 // NOLINT + // if (s1.addr0() + size1) > s2.addr0()) return true; + if (s1.size() > -diff) + res = true; + } else { + // if (s2.addr0() + kS2AccessSize > s1.addr0()) return true; + if (kS2AccessSize > diff) + res = true; + } + DCHECK_EQ(res, TwoRangesIntersectSlow(s1, s2)); + DCHECK_EQ(res, TwoRangesIntersectSlow(s2, s1)); + return res; + } + + u64 ALWAYS_INLINE addr0() const { return (x_ >> kClkBits) & 7; } + u64 ALWAYS_INLINE size() const { return 1ull << size_log(); } + bool ALWAYS_INLINE IsWrite() const { return !IsRead(); } + bool ALWAYS_INLINE IsRead() const { return x_ & kReadBit; } + + // The idea behind the freed bit is as follows. + // When the memory is freed (or otherwise unaccessible) we write to the shadow + // values with tid/epoch related to the free and the freed bit set. + // During memory accesses processing the freed bit is considered + // as msb of tid. So any access races with shadow with freed bit set + // (it is as if write from a thread with which we never synchronized before). + // This allows us to detect accesses to freed memory w/o additional + // overheads in memory access processing and at the same time restore + // tid/epoch of free. + void MarkAsFreed() { + x_ |= kFreedBit; + } + + bool IsFreed() const { + return x_ & kFreedBit; + } + + bool GetFreedAndReset() { + bool res = x_ & kFreedBit; + x_ &= ~kFreedBit; + return res; + } + + bool ALWAYS_INLINE IsBothReadsOrAtomic(bool kIsWrite, bool kIsAtomic) const { + bool v = x_ & ((u64(kIsWrite ^ 1) << kReadShift) + | (u64(kIsAtomic) << kAtomicShift)); + DCHECK_EQ(v, (!IsWrite() && !kIsWrite) || (IsAtomic() && kIsAtomic)); + return v; + } + + bool ALWAYS_INLINE IsRWNotWeaker(bool kIsWrite, bool kIsAtomic) const { + bool v = ((x_ >> kReadShift) & 3) + <= u64((kIsWrite ^ 1) | (kIsAtomic << 1)); + DCHECK_EQ(v, (IsAtomic() < kIsAtomic) || + (IsAtomic() == kIsAtomic && !IsWrite() <= !kIsWrite)); + return v; + } + + bool ALWAYS_INLINE IsRWWeakerOrEqual(bool kIsWrite, bool kIsAtomic) const { + bool v = ((x_ >> kReadShift) & 3) + >= u64((kIsWrite ^ 1) | (kIsAtomic << 1)); + DCHECK_EQ(v, (IsAtomic() > kIsAtomic) || + (IsAtomic() == kIsAtomic && !IsWrite() >= !kIsWrite)); + return v; + } + + private: + static const u64 kReadShift = 5 + kClkBits; + static const u64 kReadBit = 1ull << kReadShift; + static const u64 kAtomicShift = 6 + kClkBits; + static const u64 kAtomicBit = 1ull << kAtomicShift; + + u64 size_log() const { return (x_ >> (3 + kClkBits)) & 3; } + + static bool TwoRangesIntersectSlow(const Shadow s1, const Shadow s2) { + if (s1.addr0() == s2.addr0()) return true; + if (s1.addr0() < s2.addr0() && s1.addr0() + s1.size() > s2.addr0()) + return true; + if (s2.addr0() < s1.addr0() && s2.addr0() + s2.size() > s1.addr0()) + return true; + return false; + } +}; + +struct SignalContext; + +struct JmpBuf { + uptr sp; + uptr mangled_sp; + int int_signal_send; + bool in_blocking_func; + uptr in_signal_handler; + uptr *shadow_stack_pos; +}; + +// This struct is stored in TLS. +struct ThreadState { + FastState fast_state; + // Synch epoch represents the threads's epoch before the last synchronization + // action. It allows to reduce number of shadow state updates. + // For example, fast_synch_epoch=100, last write to addr X was at epoch=150, + // if we are processing write to X from the same thread at epoch=200, + // we do nothing, because both writes happen in the same 'synch epoch'. + // That is, if another memory access does not race with the former write, + // it does not race with the latter as well. + // QUESTION: can we can squeeze this into ThreadState::Fast? + // E.g. ThreadState::Fast is a 44-bit, 32 are taken by synch_epoch and 12 are + // taken by epoch between synchs. + // This way we can save one load from tls. + u64 fast_synch_epoch; + // This is a slow path flag. On fast path, fast_state.GetIgnoreBit() is read. + // We do not distinguish beteween ignoring reads and writes + // for better performance. + int ignore_reads_and_writes; + int ignore_sync; + // Go does not support ignores. +#ifndef SANITIZER_GO + IgnoreSet mop_ignore_set; + IgnoreSet sync_ignore_set; +#endif + // C/C++ uses fixed size shadow stack embed into Trace. + // Go uses malloc-allocated shadow stack with dynamic size. + uptr *shadow_stack; + uptr *shadow_stack_end; + uptr *shadow_stack_pos; + u64 *racy_shadow_addr; + u64 racy_state[2]; + MutexSet mset; + ThreadClock clock; +#ifndef SANITIZER_GO + AllocatorCache alloc_cache; + InternalAllocatorCache internal_alloc_cache; + Vector<JmpBuf> jmp_bufs; + int ignore_interceptors; +#endif + u64 stat[StatCnt]; + const int tid; + const int unique_id; + bool in_symbolizer; + bool in_ignored_lib; + bool is_dead; + bool is_freeing; + bool is_vptr_access; + const uptr stk_addr; + const uptr stk_size; + const uptr tls_addr; + const uptr tls_size; + ThreadContext *tctx; + + InternalDeadlockDetector internal_deadlock_detector; + DDPhysicalThread *dd_pt; + DDLogicalThread *dd_lt; + + atomic_uintptr_t in_signal_handler; + SignalContext *signal_ctx; + + DenseSlabAllocCache block_cache; + DenseSlabAllocCache sync_cache; + DenseSlabAllocCache clock_cache; + +#ifndef SANITIZER_GO + u32 last_sleep_stack_id; + ThreadClock last_sleep_clock; +#endif + + // Set in regions of runtime that must be signal-safe and fork-safe. + // If set, malloc must not be called. + int nomalloc; + + explicit ThreadState(Context *ctx, int tid, int unique_id, u64 epoch, + unsigned reuse_count, + uptr stk_addr, uptr stk_size, + uptr tls_addr, uptr tls_size); +}; + +#ifndef SANITIZER_GO +__attribute__((tls_model("initial-exec"))) +extern THREADLOCAL char cur_thread_placeholder[]; +INLINE ThreadState *cur_thread() { + return reinterpret_cast<ThreadState *>(&cur_thread_placeholder); +} +#endif + +class ThreadContext : public ThreadContextBase { + public: + explicit ThreadContext(int tid); + ~ThreadContext(); + ThreadState *thr; + u32 creation_stack_id; + SyncClock sync; + // Epoch at which the thread had started. + // If we see an event from the thread stamped by an older epoch, + // the event is from a dead thread that shared tid with this thread. + u64 epoch0; + u64 epoch1; + + // Override superclass callbacks. + void OnDead(); + void OnJoined(void *arg); + void OnFinished(); + void OnStarted(void *arg); + void OnCreated(void *arg); + void OnReset(); + void OnDetached(void *arg); +}; + +struct RacyStacks { + MD5Hash hash[2]; + bool operator==(const RacyStacks &other) const { + if (hash[0] == other.hash[0] && hash[1] == other.hash[1]) + return true; + if (hash[0] == other.hash[1] && hash[1] == other.hash[0]) + return true; + return false; + } +}; + +struct RacyAddress { + uptr addr_min; + uptr addr_max; +}; + +struct FiredSuppression { + ReportType type; + uptr pc; + Suppression *supp; +}; + +struct Context { + Context(); + + bool initialized; + bool after_multithreaded_fork; + + MetaMap metamap; + + Mutex report_mtx; + int nreported; + int nmissed_expected; + atomic_uint64_t last_symbolize_time_ns; + + void *background_thread; + atomic_uint32_t stop_background_thread; + + ThreadRegistry *thread_registry; + + Vector<RacyStacks> racy_stacks; + Vector<RacyAddress> racy_addresses; + // Number of fired suppressions may be large enough. + InternalMmapVector<FiredSuppression> fired_suppressions; + DDetector *dd; + + ClockAlloc clock_alloc; + + Flags flags; + + u64 stat[StatCnt]; + u64 int_alloc_cnt[MBlockTypeCount]; + u64 int_alloc_siz[MBlockTypeCount]; +}; + +extern Context *ctx; // The one and the only global runtime context. + +struct ScopedIgnoreInterceptors { + ScopedIgnoreInterceptors() { +#ifndef SANITIZER_GO + cur_thread()->ignore_interceptors++; +#endif + } + + ~ScopedIgnoreInterceptors() { +#ifndef SANITIZER_GO + cur_thread()->ignore_interceptors--; +#endif + } +}; + +class ScopedReport { + public: + explicit ScopedReport(ReportType typ); + ~ScopedReport(); + + void AddMemoryAccess(uptr addr, Shadow s, StackTrace stack, + const MutexSet *mset); + void AddStack(StackTrace stack, bool suppressable = false); + void AddThread(const ThreadContext *tctx, bool suppressable = false); + void AddThread(int unique_tid, bool suppressable = false); + void AddUniqueTid(int unique_tid); + void AddMutex(const SyncVar *s); + u64 AddMutex(u64 id); + void AddLocation(uptr addr, uptr size); + void AddSleep(u32 stack_id); + void SetCount(int count); + + const ReportDesc *GetReport() const; + + private: + ReportDesc *rep_; + // Symbolizer makes lots of intercepted calls. If we try to process them, + // at best it will cause deadlocks on internal mutexes. + ScopedIgnoreInterceptors ignore_interceptors_; + + void AddDeadMutex(u64 id); + + ScopedReport(const ScopedReport&); + void operator = (const ScopedReport&); +}; + +void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk, + MutexSet *mset); + +template<typename StackTraceTy> +void ObtainCurrentStack(ThreadState *thr, uptr toppc, StackTraceTy *stack) { + uptr size = thr->shadow_stack_pos - thr->shadow_stack; + uptr start = 0; + if (size + !!toppc > kStackTraceMax) { + start = size + !!toppc - kStackTraceMax; + size = kStackTraceMax - !!toppc; + } + stack->Init(&thr->shadow_stack[start], size, toppc); +} + + +void StatAggregate(u64 *dst, u64 *src); +void StatOutput(u64 *stat); +void ALWAYS_INLINE StatInc(ThreadState *thr, StatType typ, u64 n = 1) { + if (kCollectStats) + thr->stat[typ] += n; +} +void ALWAYS_INLINE StatSet(ThreadState *thr, StatType typ, u64 n) { + if (kCollectStats) + thr->stat[typ] = n; +} + +void MapShadow(uptr addr, uptr size); +void MapThreadTrace(uptr addr, uptr size); +void DontNeedShadowFor(uptr addr, uptr size); +void InitializeShadowMemory(); +void InitializeInterceptors(); +void InitializeLibIgnore(); +void InitializeDynamicAnnotations(); + +void ForkBefore(ThreadState *thr, uptr pc); +void ForkParentAfter(ThreadState *thr, uptr pc); +void ForkChildAfter(ThreadState *thr, uptr pc); + +void ReportRace(ThreadState *thr); +bool OutputReport(ThreadState *thr, const ScopedReport &srep); +bool IsFiredSuppression(Context *ctx, const ScopedReport &srep, + StackTrace trace); +bool IsExpectedReport(uptr addr, uptr size); +void PrintMatchedBenignRaces(); + +#if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 1 +# define DPrintf Printf +#else +# define DPrintf(...) +#endif + +#if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 2 +# define DPrintf2 Printf +#else +# define DPrintf2(...) +#endif + +u32 CurrentStackId(ThreadState *thr, uptr pc); +ReportStack *SymbolizeStackId(u32 stack_id); +void PrintCurrentStack(ThreadState *thr, uptr pc); +void PrintCurrentStackSlow(uptr pc); // uses libunwind + +void Initialize(ThreadState *thr); +int Finalize(ThreadState *thr); + +void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write); +void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write); + +void MemoryAccess(ThreadState *thr, uptr pc, uptr addr, + int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic); +void MemoryAccessImpl(ThreadState *thr, uptr addr, + int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic, + u64 *shadow_mem, Shadow cur); +void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, + uptr size, bool is_write); +void MemoryAccessRangeStep(ThreadState *thr, uptr pc, uptr addr, + uptr size, uptr step, bool is_write); +void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr, + int size, bool kAccessIsWrite, bool kIsAtomic); + +const int kSizeLog1 = 0; +const int kSizeLog2 = 1; +const int kSizeLog4 = 2; +const int kSizeLog8 = 3; + +void ALWAYS_INLINE MemoryRead(ThreadState *thr, uptr pc, + uptr addr, int kAccessSizeLog) { + MemoryAccess(thr, pc, addr, kAccessSizeLog, false, false); +} + +void ALWAYS_INLINE MemoryWrite(ThreadState *thr, uptr pc, + uptr addr, int kAccessSizeLog) { + MemoryAccess(thr, pc, addr, kAccessSizeLog, true, false); +} + +void ALWAYS_INLINE MemoryReadAtomic(ThreadState *thr, uptr pc, + uptr addr, int kAccessSizeLog) { + MemoryAccess(thr, pc, addr, kAccessSizeLog, false, true); +} + +void ALWAYS_INLINE MemoryWriteAtomic(ThreadState *thr, uptr pc, + uptr addr, int kAccessSizeLog) { + MemoryAccess(thr, pc, addr, kAccessSizeLog, true, true); +} + +void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size); +void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size); +void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size); + +void ThreadIgnoreBegin(ThreadState *thr, uptr pc); +void ThreadIgnoreEnd(ThreadState *thr, uptr pc); +void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc); +void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc); + +void FuncEntry(ThreadState *thr, uptr pc); +void FuncExit(ThreadState *thr); + +int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached); +void ThreadStart(ThreadState *thr, int tid, uptr os_id); +void ThreadFinish(ThreadState *thr); +int ThreadTid(ThreadState *thr, uptr pc, uptr uid); +void ThreadJoin(ThreadState *thr, uptr pc, int tid); +void ThreadDetach(ThreadState *thr, uptr pc, int tid); +void ThreadFinalize(ThreadState *thr); +void ThreadSetName(ThreadState *thr, const char *name); +int ThreadCount(ThreadState *thr); +void ProcessPendingSignals(ThreadState *thr); + +void MutexCreate(ThreadState *thr, uptr pc, uptr addr, + bool rw, bool recursive, bool linker_init); +void MutexDestroy(ThreadState *thr, uptr pc, uptr addr); +void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec = 1, + bool try_lock = false); +int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all = false); +void MutexReadLock(ThreadState *thr, uptr pc, uptr addr, bool try_lock = false); +void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr); +void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr); +void MutexRepair(ThreadState *thr, uptr pc, uptr addr); // call on EOWNERDEAD + +void Acquire(ThreadState *thr, uptr pc, uptr addr); +// AcquireGlobal synchronizes the current thread with all other threads. +// In terms of happens-before relation, it draws a HB edge from all threads +// (where they happen to execute right now) to the current thread. We use it to +// handle Go finalizers. Namely, finalizer goroutine executes AcquireGlobal +// right before executing finalizers. This provides a coarse, but simple +// approximation of the actual required synchronization. +void AcquireGlobal(ThreadState *thr, uptr pc); +void Release(ThreadState *thr, uptr pc, uptr addr); +void ReleaseStore(ThreadState *thr, uptr pc, uptr addr); +void AfterSleep(ThreadState *thr, uptr pc); +void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c); +void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c); +void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c); +void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c); + +// The hacky call uses custom calling convention and an assembly thunk. +// It is considerably faster that a normal call for the caller +// if it is not executed (it is intended for slow paths from hot functions). +// The trick is that the call preserves all registers and the compiler +// does not treat it as a call. +// If it does not work for you, use normal call. +#if TSAN_DEBUG == 0 +// The caller may not create the stack frame for itself at all, +// so we create a reserve stack frame for it (1024b must be enough). +#define HACKY_CALL(f) \ + __asm__ __volatile__("sub $1024, %%rsp;" \ + CFI_INL_ADJUST_CFA_OFFSET(1024) \ + ".hidden " #f "_thunk;" \ + "call " #f "_thunk;" \ + "add $1024, %%rsp;" \ + CFI_INL_ADJUST_CFA_OFFSET(-1024) \ + ::: "memory", "cc"); +#else +#define HACKY_CALL(f) f() +#endif + +void TraceSwitch(ThreadState *thr); +uptr TraceTopPC(ThreadState *thr); +uptr TraceSize(); +uptr TraceParts(); +Trace *ThreadTrace(int tid); + +extern "C" void __tsan_trace_switch(); +void ALWAYS_INLINE TraceAddEvent(ThreadState *thr, FastState fs, + EventType typ, u64 addr) { + if (!kCollectHistory) + return; + DCHECK_GE((int)typ, 0); + DCHECK_LE((int)typ, 7); + DCHECK_EQ(GetLsb(addr, 61), addr); + StatInc(thr, StatEvents); + u64 pos = fs.GetTracePos(); + if (UNLIKELY((pos % kTracePartSize) == 0)) { +#ifndef SANITIZER_GO + HACKY_CALL(__tsan_trace_switch); +#else + TraceSwitch(thr); +#endif + } + Event *trace = (Event*)GetThreadTrace(fs.tid()); + Event *evp = &trace[pos]; + Event ev = (u64)addr | ((u64)typ << 61); + *evp = ev; +} + +} // namespace __tsan + +#endif // TSAN_RTL_H diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_rtl_amd64.S b/contrib/compiler-rt/lib/tsan/rtl/tsan_rtl_amd64.S new file mode 100644 index 0000000..8db62f9 --- /dev/null +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_rtl_amd64.S @@ -0,0 +1,324 @@ +#include "sanitizer_common/sanitizer_asm.h" +.section .text + +.hidden __tsan_trace_switch +.globl __tsan_trace_switch_thunk +__tsan_trace_switch_thunk: + CFI_STARTPROC + # Save scratch registers. + push %rax + CFI_ADJUST_CFA_OFFSET(8) + CFI_REL_OFFSET(%rax, 0) + push %rcx + CFI_ADJUST_CFA_OFFSET(8) + CFI_REL_OFFSET(%rcx, 0) + push %rdx + CFI_ADJUST_CFA_OFFSET(8) + CFI_REL_OFFSET(%rdx, 0) + push %rsi + CFI_ADJUST_CFA_OFFSET(8) + CFI_REL_OFFSET(%rsi, 0) + push %rdi + CFI_ADJUST_CFA_OFFSET(8) + CFI_REL_OFFSET(%rdi, 0) + push %r8 + CFI_ADJUST_CFA_OFFSET(8) + CFI_REL_OFFSET(%r8, 0) + push %r9 + CFI_ADJUST_CFA_OFFSET(8) + CFI_REL_OFFSET(%r9, 0) + push %r10 + CFI_ADJUST_CFA_OFFSET(8) + CFI_REL_OFFSET(%r10, 0) + push %r11 + CFI_ADJUST_CFA_OFFSET(8) + CFI_REL_OFFSET(%r11, 0) + # Align stack frame. + push %rbx # non-scratch + CFI_ADJUST_CFA_OFFSET(8) + CFI_REL_OFFSET(%rbx, 0) + mov %rsp, %rbx # save current rsp + CFI_DEF_CFA_REGISTER(%rbx) + shr $4, %rsp # clear 4 lsb, align to 16 + shl $4, %rsp + + call __tsan_trace_switch + + # Unalign stack frame back. + mov %rbx, %rsp # restore the original rsp + CFI_DEF_CFA_REGISTER(%rsp) + pop %rbx + CFI_ADJUST_CFA_OFFSET(-8) + # Restore scratch registers. + pop %r11 + CFI_ADJUST_CFA_OFFSET(-8) + pop %r10 + CFI_ADJUST_CFA_OFFSET(-8) + pop %r9 + CFI_ADJUST_CFA_OFFSET(-8) + pop %r8 + CFI_ADJUST_CFA_OFFSET(-8) + pop %rdi + CFI_ADJUST_CFA_OFFSET(-8) + pop %rsi + CFI_ADJUST_CFA_OFFSET(-8) + pop %rdx + CFI_ADJUST_CFA_OFFSET(-8) + pop %rcx + CFI_ADJUST_CFA_OFFSET(-8) + pop %rax + CFI_ADJUST_CFA_OFFSET(-8) + CFI_RESTORE(%rax) + CFI_RESTORE(%rbx) + CFI_RESTORE(%rcx) + CFI_RESTORE(%rdx) + CFI_RESTORE(%rsi) + CFI_RESTORE(%rdi) + CFI_RESTORE(%r8) + CFI_RESTORE(%r9) + CFI_RESTORE(%r10) + CFI_RESTORE(%r11) + ret + CFI_ENDPROC + +.hidden __tsan_report_race +.globl __tsan_report_race_thunk +__tsan_report_race_thunk: + CFI_STARTPROC + # Save scratch registers. + push %rax + CFI_ADJUST_CFA_OFFSET(8) + CFI_REL_OFFSET(%rax, 0) + push %rcx + CFI_ADJUST_CFA_OFFSET(8) + CFI_REL_OFFSET(%rcx, 0) + push %rdx + CFI_ADJUST_CFA_OFFSET(8) + CFI_REL_OFFSET(%rdx, 0) + push %rsi + CFI_ADJUST_CFA_OFFSET(8) + CFI_REL_OFFSET(%rsi, 0) + push %rdi + CFI_ADJUST_CFA_OFFSET(8) + CFI_REL_OFFSET(%rdi, 0) + push %r8 + CFI_ADJUST_CFA_OFFSET(8) + CFI_REL_OFFSET(%r8, 0) + push %r9 + CFI_ADJUST_CFA_OFFSET(8) + CFI_REL_OFFSET(%r9, 0) + push %r10 + CFI_ADJUST_CFA_OFFSET(8) + CFI_REL_OFFSET(%r10, 0) + push %r11 + CFI_ADJUST_CFA_OFFSET(8) + CFI_REL_OFFSET(%r11, 0) + # Align stack frame. + push %rbx # non-scratch + CFI_ADJUST_CFA_OFFSET(8) + CFI_REL_OFFSET(%rbx, 0) + mov %rsp, %rbx # save current rsp + CFI_DEF_CFA_REGISTER(%rbx) + shr $4, %rsp # clear 4 lsb, align to 16 + shl $4, %rsp + + call __tsan_report_race + + # Unalign stack frame back. + mov %rbx, %rsp # restore the original rsp + CFI_DEF_CFA_REGISTER(%rsp) + pop %rbx + CFI_ADJUST_CFA_OFFSET(-8) + # Restore scratch registers. + pop %r11 + CFI_ADJUST_CFA_OFFSET(-8) + pop %r10 + CFI_ADJUST_CFA_OFFSET(-8) + pop %r9 + CFI_ADJUST_CFA_OFFSET(-8) + pop %r8 + CFI_ADJUST_CFA_OFFSET(-8) + pop %rdi + CFI_ADJUST_CFA_OFFSET(-8) + pop %rsi + CFI_ADJUST_CFA_OFFSET(-8) + pop %rdx + CFI_ADJUST_CFA_OFFSET(-8) + pop %rcx + CFI_ADJUST_CFA_OFFSET(-8) + pop %rax + CFI_ADJUST_CFA_OFFSET(-8) + CFI_RESTORE(%rax) + CFI_RESTORE(%rbx) + CFI_RESTORE(%rcx) + CFI_RESTORE(%rdx) + CFI_RESTORE(%rsi) + CFI_RESTORE(%rdi) + CFI_RESTORE(%r8) + CFI_RESTORE(%r9) + CFI_RESTORE(%r10) + CFI_RESTORE(%r11) + ret + CFI_ENDPROC + +.hidden __tsan_setjmp +.comm _ZN14__interception11real_setjmpE,8,8 +.globl setjmp +.type setjmp, @function +setjmp: + CFI_STARTPROC + // save env parameter + push %rdi + CFI_ADJUST_CFA_OFFSET(8) + CFI_REL_OFFSET(%rdi, 0) + // obtain %rsp +#if defined(__FreeBSD__) + lea 8(%rsp), %rdi + mov %rdi, %rsi +#else + lea 16(%rsp), %rdi + mov %rdi, %rsi + xor %fs:0x30, %rsi // magic mangling of rsp (see libc setjmp) + rol $0x11, %rsi +#endif + // call tsan interceptor + call __tsan_setjmp + // restore env parameter + pop %rdi + CFI_ADJUST_CFA_OFFSET(-8) + CFI_RESTORE(%rdi) + // tail jump to libc setjmp + movl $0, %eax + movq _ZN14__interception11real_setjmpE@GOTPCREL(%rip), %rdx + jmp *(%rdx) + CFI_ENDPROC +.size setjmp, .-setjmp + +.comm _ZN14__interception12real__setjmpE,8,8 +.globl _setjmp +.type _setjmp, @function +_setjmp: + CFI_STARTPROC + // save env parameter + push %rdi + CFI_ADJUST_CFA_OFFSET(8) + CFI_REL_OFFSET(%rdi, 0) + // obtain %rsp +#if defined(__FreeBSD__) + lea 8(%rsp), %rdi + mov %rdi, %rsi +#else + lea 16(%rsp), %rdi + mov %rdi, %rsi + xor %fs:0x30, %rsi // magic mangling of rsp (see libc setjmp) + rol $0x11, %rsi +#endif + // call tsan interceptor + call __tsan_setjmp + // restore env parameter + pop %rdi + CFI_ADJUST_CFA_OFFSET(-8) + CFI_RESTORE(%rdi) + // tail jump to libc setjmp + movl $0, %eax + movq _ZN14__interception12real__setjmpE@GOTPCREL(%rip), %rdx + jmp *(%rdx) + CFI_ENDPROC +.size _setjmp, .-_setjmp + +.comm _ZN14__interception14real_sigsetjmpE,8,8 +.globl sigsetjmp +.type sigsetjmp, @function +sigsetjmp: + CFI_STARTPROC + // save env parameter + push %rdi + CFI_ADJUST_CFA_OFFSET(8) + CFI_REL_OFFSET(%rdi, 0) + // save savesigs parameter + push %rsi + CFI_ADJUST_CFA_OFFSET(8) + CFI_REL_OFFSET(%rsi, 0) + // align stack frame + sub $8, %rsp + CFI_ADJUST_CFA_OFFSET(8) + // obtain %rsp +#if defined(__FreeBSD__) + lea 24(%rsp), %rdi + mov %rdi, %rsi +#else + lea 32(%rsp), %rdi + mov %rdi, %rsi + xor %fs:0x30, %rsi // magic mangling of rsp (see libc setjmp) + rol $0x11, %rsi +#endif + // call tsan interceptor + call __tsan_setjmp + // unalign stack frame + add $8, %rsp + CFI_ADJUST_CFA_OFFSET(-8) + // restore savesigs parameter + pop %rsi + CFI_ADJUST_CFA_OFFSET(-8) + CFI_RESTORE(%rsi) + // restore env parameter + pop %rdi + CFI_ADJUST_CFA_OFFSET(-8) + CFI_RESTORE(%rdi) + // tail jump to libc sigsetjmp + movl $0, %eax + movq _ZN14__interception14real_sigsetjmpE@GOTPCREL(%rip), %rdx + jmp *(%rdx) + CFI_ENDPROC +.size sigsetjmp, .-sigsetjmp + +.comm _ZN14__interception16real___sigsetjmpE,8,8 +.globl __sigsetjmp +.type __sigsetjmp, @function +__sigsetjmp: + CFI_STARTPROC + // save env parameter + push %rdi + CFI_ADJUST_CFA_OFFSET(8) + CFI_REL_OFFSET(%rdi, 0) + // save savesigs parameter + push %rsi + CFI_ADJUST_CFA_OFFSET(8) + CFI_REL_OFFSET(%rsi, 0) + // align stack frame + sub $8, %rsp + CFI_ADJUST_CFA_OFFSET(8) + // obtain %rsp +#if defined(__FreeBSD__) + lea 24(%rsp), %rdi + mov %rdi, %rsi +#else + lea 32(%rsp), %rdi + mov %rdi, %rsi + xor %fs:0x30, %rsi // magic mangling of rsp (see libc setjmp) + rol $0x11, %rsi +#endif + // call tsan interceptor + call __tsan_setjmp + // unalign stack frame + add $8, %rsp + CFI_ADJUST_CFA_OFFSET(-8) + // restore savesigs parameter + pop %rsi + CFI_ADJUST_CFA_OFFSET(-8) + CFI_RESTORE(%rsi) + // restore env parameter + pop %rdi + CFI_ADJUST_CFA_OFFSET(-8) + CFI_RESTORE(%rdi) + // tail jump to libc sigsetjmp + movl $0, %eax + movq _ZN14__interception16real___sigsetjmpE@GOTPCREL(%rip), %rdx + jmp *(%rdx) + CFI_ENDPROC +.size __sigsetjmp, .-__sigsetjmp + +#if defined(__FreeBSD__) || defined(__linux__) +/* We do not need executable stack. */ +.section .note.GNU-stack,"",@progbits +#endif diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cc b/contrib/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cc new file mode 100644 index 0000000..ddf2b69 --- /dev/null +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cc @@ -0,0 +1,491 @@ +//===-- tsan_rtl_mutex.cc -------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +//===----------------------------------------------------------------------===// + +#include <sanitizer_common/sanitizer_deadlock_detector_interface.h> +#include <sanitizer_common/sanitizer_stackdepot.h> + +#include "tsan_rtl.h" +#include "tsan_flags.h" +#include "tsan_sync.h" +#include "tsan_report.h" +#include "tsan_symbolize.h" +#include "tsan_platform.h" + +namespace __tsan { + +void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r); + +struct Callback : DDCallback { + ThreadState *thr; + uptr pc; + + Callback(ThreadState *thr, uptr pc) + : thr(thr) + , pc(pc) { + DDCallback::pt = thr->dd_pt; + DDCallback::lt = thr->dd_lt; + } + + virtual u32 Unwind() { + return CurrentStackId(thr, pc); + } + virtual int UniqueTid() { + return thr->unique_id; + } +}; + +void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s) { + Callback cb(thr, pc); + ctx->dd->MutexInit(&cb, &s->dd); + s->dd.ctx = s->GetId(); +} + +static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ, + uptr addr, u64 mid) { + // In Go, these misuses are either impossible, or detected by std lib, + // or false positives (e.g. unlock in a different thread). + if (kGoMode) + return; + ThreadRegistryLock l(ctx->thread_registry); + ScopedReport rep(typ); + rep.AddMutex(mid); + VarSizeStackTrace trace; + ObtainCurrentStack(thr, pc, &trace); + rep.AddStack(trace, true); + rep.AddLocation(addr, 1); + OutputReport(thr, rep); +} + +void MutexCreate(ThreadState *thr, uptr pc, uptr addr, + bool rw, bool recursive, bool linker_init) { + DPrintf("#%d: MutexCreate %zx\n", thr->tid, addr); + StatInc(thr, StatMutexCreate); + if (!linker_init && IsAppMem(addr)) { + CHECK(!thr->is_freeing); + thr->is_freeing = true; + MemoryWrite(thr, pc, addr, kSizeLog1); + thr->is_freeing = false; + } + SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); + s->is_rw = rw; + s->is_recursive = recursive; + s->is_linker_init = linker_init; + if (kCppMode && s->creation_stack_id == 0) + s->creation_stack_id = CurrentStackId(thr, pc); + s->mtx.Unlock(); +} + +void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) { + DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr); + StatInc(thr, StatMutexDestroy); +#ifndef SANITIZER_GO + // Global mutexes not marked as LINKER_INITIALIZED + // cause tons of not interesting reports, so just ignore it. + if (IsGlobalVar(addr)) + return; +#endif + if (IsAppMem(addr)) { + CHECK(!thr->is_freeing); + thr->is_freeing = true; + MemoryWrite(thr, pc, addr, kSizeLog1); + thr->is_freeing = false; + } + SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr); + if (s == 0) + return; + if (common_flags()->detect_deadlocks) { + Callback cb(thr, pc); + ctx->dd->MutexDestroy(&cb, &s->dd); + ctx->dd->MutexInit(&cb, &s->dd); + } + bool unlock_locked = false; + if (flags()->report_destroy_locked + && s->owner_tid != SyncVar::kInvalidTid + && !s->is_broken) { + s->is_broken = true; + unlock_locked = true; + } + u64 mid = s->GetId(); + u32 last_lock = s->last_lock; + if (!unlock_locked) + s->Reset(thr); // must not reset it before the report is printed + s->mtx.Unlock(); + if (unlock_locked) { + ThreadRegistryLock l(ctx->thread_registry); + ScopedReport rep(ReportTypeMutexDestroyLocked); + rep.AddMutex(mid); + VarSizeStackTrace trace; + ObtainCurrentStack(thr, pc, &trace); + rep.AddStack(trace); + FastState last(last_lock); + RestoreStack(last.tid(), last.epoch(), &trace, 0); + rep.AddStack(trace, true); + rep.AddLocation(addr, 1); + OutputReport(thr, rep); + } + if (unlock_locked) { + SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr); + if (s != 0) { + s->Reset(thr); + s->mtx.Unlock(); + } + } + thr->mset.Remove(mid); + // s will be destroyed and freed in MetaMap::FreeBlock. +} + +void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec, bool try_lock) { + DPrintf("#%d: MutexLock %zx rec=%d\n", thr->tid, addr, rec); + CHECK_GT(rec, 0); + if (IsAppMem(addr)) + MemoryReadAtomic(thr, pc, addr, kSizeLog1); + SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); + thr->fast_state.IncrementEpoch(); + TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId()); + bool report_double_lock = false; + if (s->owner_tid == SyncVar::kInvalidTid) { + CHECK_EQ(s->recursion, 0); + s->owner_tid = thr->tid; + s->last_lock = thr->fast_state.raw(); + } else if (s->owner_tid == thr->tid) { + CHECK_GT(s->recursion, 0); + } else if (flags()->report_mutex_bugs && !s->is_broken) { + s->is_broken = true; + report_double_lock = true; + } + if (s->recursion == 0) { + StatInc(thr, StatMutexLock); + AcquireImpl(thr, pc, &s->clock); + AcquireImpl(thr, pc, &s->read_clock); + } else if (!s->is_recursive) { + StatInc(thr, StatMutexRecLock); + } + s->recursion += rec; + thr->mset.Add(s->GetId(), true, thr->fast_state.epoch()); + if (common_flags()->detect_deadlocks && (s->recursion - rec) == 0) { + Callback cb(thr, pc); + if (!try_lock) + ctx->dd->MutexBeforeLock(&cb, &s->dd, true); + ctx->dd->MutexAfterLock(&cb, &s->dd, true, try_lock); + } + u64 mid = s->GetId(); + s->mtx.Unlock(); + // Can't touch s after this point. + if (report_double_lock) + ReportMutexMisuse(thr, pc, ReportTypeMutexDoubleLock, addr, mid); + if (common_flags()->detect_deadlocks) { + Callback cb(thr, pc); + ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb)); + } +} + +int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all) { + DPrintf("#%d: MutexUnlock %zx all=%d\n", thr->tid, addr, all); + if (IsAppMem(addr)) + MemoryReadAtomic(thr, pc, addr, kSizeLog1); + SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); + thr->fast_state.IncrementEpoch(); + TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId()); + int rec = 0; + bool report_bad_unlock = false; + if (kCppMode && (s->recursion == 0 || s->owner_tid != thr->tid)) { + if (flags()->report_mutex_bugs && !s->is_broken) { + s->is_broken = true; + report_bad_unlock = true; + } + } else { + rec = all ? s->recursion : 1; + s->recursion -= rec; + if (s->recursion == 0) { + StatInc(thr, StatMutexUnlock); + s->owner_tid = SyncVar::kInvalidTid; + ReleaseStoreImpl(thr, pc, &s->clock); + } else { + StatInc(thr, StatMutexRecUnlock); + } + } + thr->mset.Del(s->GetId(), true); + if (common_flags()->detect_deadlocks && s->recursion == 0 && + !report_bad_unlock) { + Callback cb(thr, pc); + ctx->dd->MutexBeforeUnlock(&cb, &s->dd, true); + } + u64 mid = s->GetId(); + s->mtx.Unlock(); + // Can't touch s after this point. + if (report_bad_unlock) + ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid); + if (common_flags()->detect_deadlocks && !report_bad_unlock) { + Callback cb(thr, pc); + ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb)); + } + return rec; +} + +void MutexReadLock(ThreadState *thr, uptr pc, uptr addr, bool trylock) { + DPrintf("#%d: MutexReadLock %zx\n", thr->tid, addr); + StatInc(thr, StatMutexReadLock); + if (IsAppMem(addr)) + MemoryReadAtomic(thr, pc, addr, kSizeLog1); + SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false); + thr->fast_state.IncrementEpoch(); + TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId()); + bool report_bad_lock = false; + if (s->owner_tid != SyncVar::kInvalidTid) { + if (flags()->report_mutex_bugs && !s->is_broken) { + s->is_broken = true; + report_bad_lock = true; + } + } + AcquireImpl(thr, pc, &s->clock); + s->last_lock = thr->fast_state.raw(); + thr->mset.Add(s->GetId(), false, thr->fast_state.epoch()); + if (common_flags()->detect_deadlocks && s->recursion == 0) { + Callback cb(thr, pc); + if (!trylock) + ctx->dd->MutexBeforeLock(&cb, &s->dd, false); + ctx->dd->MutexAfterLock(&cb, &s->dd, false, trylock); + } + u64 mid = s->GetId(); + s->mtx.ReadUnlock(); + // Can't touch s after this point. + if (report_bad_lock) + ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadLock, addr, mid); + if (common_flags()->detect_deadlocks) { + Callback cb(thr, pc); + ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb)); + } +} + +void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) { + DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr); + StatInc(thr, StatMutexReadUnlock); + if (IsAppMem(addr)) + MemoryReadAtomic(thr, pc, addr, kSizeLog1); + SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); + thr->fast_state.IncrementEpoch(); + TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId()); + bool report_bad_unlock = false; + if (s->owner_tid != SyncVar::kInvalidTid) { + if (flags()->report_mutex_bugs && !s->is_broken) { + s->is_broken = true; + report_bad_unlock = true; + } + } + ReleaseImpl(thr, pc, &s->read_clock); + if (common_flags()->detect_deadlocks && s->recursion == 0) { + Callback cb(thr, pc); + ctx->dd->MutexBeforeUnlock(&cb, &s->dd, false); + } + u64 mid = s->GetId(); + s->mtx.Unlock(); + // Can't touch s after this point. + thr->mset.Del(mid, false); + if (report_bad_unlock) + ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadUnlock, addr, mid); + if (common_flags()->detect_deadlocks) { + Callback cb(thr, pc); + ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb)); + } +} + +void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) { + DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr); + if (IsAppMem(addr)) + MemoryReadAtomic(thr, pc, addr, kSizeLog1); + SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); + bool write = true; + bool report_bad_unlock = false; + if (s->owner_tid == SyncVar::kInvalidTid) { + // Seems to be read unlock. + write = false; + StatInc(thr, StatMutexReadUnlock); + thr->fast_state.IncrementEpoch(); + TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId()); + ReleaseImpl(thr, pc, &s->read_clock); + } else if (s->owner_tid == thr->tid) { + // Seems to be write unlock. + thr->fast_state.IncrementEpoch(); + TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId()); + CHECK_GT(s->recursion, 0); + s->recursion--; + if (s->recursion == 0) { + StatInc(thr, StatMutexUnlock); + s->owner_tid = SyncVar::kInvalidTid; + ReleaseImpl(thr, pc, &s->clock); + } else { + StatInc(thr, StatMutexRecUnlock); + } + } else if (!s->is_broken) { + s->is_broken = true; + report_bad_unlock = true; + } + thr->mset.Del(s->GetId(), write); + if (common_flags()->detect_deadlocks && s->recursion == 0) { + Callback cb(thr, pc); + ctx->dd->MutexBeforeUnlock(&cb, &s->dd, write); + } + u64 mid = s->GetId(); + s->mtx.Unlock(); + // Can't touch s after this point. + if (report_bad_unlock) + ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid); + if (common_flags()->detect_deadlocks) { + Callback cb(thr, pc); + ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb)); + } +} + +void MutexRepair(ThreadState *thr, uptr pc, uptr addr) { + DPrintf("#%d: MutexRepair %zx\n", thr->tid, addr); + SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); + s->owner_tid = SyncVar::kInvalidTid; + s->recursion = 0; + s->mtx.Unlock(); +} + +void Acquire(ThreadState *thr, uptr pc, uptr addr) { + DPrintf("#%d: Acquire %zx\n", thr->tid, addr); + if (thr->ignore_sync) + return; + SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false); + AcquireImpl(thr, pc, &s->clock); + s->mtx.ReadUnlock(); +} + +static void UpdateClockCallback(ThreadContextBase *tctx_base, void *arg) { + ThreadState *thr = reinterpret_cast<ThreadState*>(arg); + ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base); + if (tctx->status == ThreadStatusRunning) + thr->clock.set(tctx->tid, tctx->thr->fast_state.epoch()); + else + thr->clock.set(tctx->tid, tctx->epoch1); +} + +void AcquireGlobal(ThreadState *thr, uptr pc) { + DPrintf("#%d: AcquireGlobal\n", thr->tid); + if (thr->ignore_sync) + return; + ThreadRegistryLock l(ctx->thread_registry); + ctx->thread_registry->RunCallbackForEachThreadLocked( + UpdateClockCallback, thr); +} + +void Release(ThreadState *thr, uptr pc, uptr addr) { + DPrintf("#%d: Release %zx\n", thr->tid, addr); + if (thr->ignore_sync) + return; + SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); + thr->fast_state.IncrementEpoch(); + // Can't increment epoch w/o writing to the trace as well. + TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); + ReleaseImpl(thr, pc, &s->clock); + s->mtx.Unlock(); +} + +void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) { + DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr); + if (thr->ignore_sync) + return; + SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); + thr->fast_state.IncrementEpoch(); + // Can't increment epoch w/o writing to the trace as well. + TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); + ReleaseStoreImpl(thr, pc, &s->clock); + s->mtx.Unlock(); +} + +#ifndef SANITIZER_GO +static void UpdateSleepClockCallback(ThreadContextBase *tctx_base, void *arg) { + ThreadState *thr = reinterpret_cast<ThreadState*>(arg); + ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base); + if (tctx->status == ThreadStatusRunning) + thr->last_sleep_clock.set(tctx->tid, tctx->thr->fast_state.epoch()); + else + thr->last_sleep_clock.set(tctx->tid, tctx->epoch1); +} + +void AfterSleep(ThreadState *thr, uptr pc) { + DPrintf("#%d: AfterSleep %zx\n", thr->tid); + if (thr->ignore_sync) + return; + thr->last_sleep_stack_id = CurrentStackId(thr, pc); + ThreadRegistryLock l(ctx->thread_registry); + ctx->thread_registry->RunCallbackForEachThreadLocked( + UpdateSleepClockCallback, thr); +} +#endif + +void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) { + if (thr->ignore_sync) + return; + thr->clock.set(thr->fast_state.epoch()); + thr->clock.acquire(&thr->clock_cache, c); + StatInc(thr, StatSyncAcquire); +} + +void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) { + if (thr->ignore_sync) + return; + thr->clock.set(thr->fast_state.epoch()); + thr->fast_synch_epoch = thr->fast_state.epoch(); + thr->clock.release(&thr->clock_cache, c); + StatInc(thr, StatSyncRelease); +} + +void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c) { + if (thr->ignore_sync) + return; + thr->clock.set(thr->fast_state.epoch()); + thr->fast_synch_epoch = thr->fast_state.epoch(); + thr->clock.ReleaseStore(&thr->clock_cache, c); + StatInc(thr, StatSyncRelease); +} + +void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) { + if (thr->ignore_sync) + return; + thr->clock.set(thr->fast_state.epoch()); + thr->fast_synch_epoch = thr->fast_state.epoch(); + thr->clock.acq_rel(&thr->clock_cache, c); + StatInc(thr, StatSyncAcquire); + StatInc(thr, StatSyncRelease); +} + +void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) { + if (r == 0) + return; + ThreadRegistryLock l(ctx->thread_registry); + ScopedReport rep(ReportTypeDeadlock); + for (int i = 0; i < r->n; i++) { + rep.AddMutex(r->loop[i].mtx_ctx0); + rep.AddUniqueTid((int)r->loop[i].thr_ctx); + rep.AddThread((int)r->loop[i].thr_ctx); + } + uptr dummy_pc = 0x42; + for (int i = 0; i < r->n; i++) { + for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) { + u32 stk = r->loop[i].stk[j]; + if (stk) { + rep.AddStack(StackDepotGet(stk), true); + } else { + // Sometimes we fail to extract the stack trace (FIXME: investigate), + // but we should still produce some stack trace in the report. + rep.AddStack(StackTrace(&dummy_pc, 1), true); + } + } + } + OutputReport(thr, rep); +} + +} // namespace __tsan diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cc b/contrib/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cc new file mode 100644 index 0000000..0481b23 --- /dev/null +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cc @@ -0,0 +1,692 @@ +//===-- tsan_rtl_report.cc ------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_libc.h" +#include "sanitizer_common/sanitizer_placement_new.h" +#include "sanitizer_common/sanitizer_stackdepot.h" +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_stacktrace.h" +#include "tsan_platform.h" +#include "tsan_rtl.h" +#include "tsan_suppressions.h" +#include "tsan_symbolize.h" +#include "tsan_report.h" +#include "tsan_sync.h" +#include "tsan_mman.h" +#include "tsan_flags.h" +#include "tsan_fd.h" + +namespace __tsan { + +using namespace __sanitizer; // NOLINT + +static ReportStack *SymbolizeStack(StackTrace trace); + +void TsanCheckFailed(const char *file, int line, const char *cond, + u64 v1, u64 v2) { + // There is high probability that interceptors will check-fail as well, + // on the other hand there is no sense in processing interceptors + // since we are going to die soon. + ScopedIgnoreInterceptors ignore; + Printf("FATAL: ThreadSanitizer CHECK failed: " + "%s:%d \"%s\" (0x%zx, 0x%zx)\n", + file, line, cond, (uptr)v1, (uptr)v2); + PrintCurrentStackSlow(StackTrace::GetCurrentPc()); + Die(); +} + +// Can be overriden by an application/test to intercept reports. +#ifdef TSAN_EXTERNAL_HOOKS +bool OnReport(const ReportDesc *rep, bool suppressed); +#else +SANITIZER_INTERFACE_ATTRIBUTE +bool WEAK OnReport(const ReportDesc *rep, bool suppressed) { + (void)rep; + return suppressed; +} +#endif + +static void StackStripMain(SymbolizedStack *frames) { + SymbolizedStack *last_frame = nullptr; + SymbolizedStack *last_frame2 = nullptr; + for (SymbolizedStack *cur = frames; cur; cur = cur->next) { + last_frame2 = last_frame; + last_frame = cur; + } + + if (last_frame2 == 0) + return; +#ifndef SANITIZER_GO + const char *last = last_frame->info.function; + const char *last2 = last_frame2->info.function; + // Strip frame above 'main' + if (last2 && 0 == internal_strcmp(last2, "main")) { + last_frame->ClearAll(); + last_frame2->next = nullptr; + // Strip our internal thread start routine. + } else if (last && 0 == internal_strcmp(last, "__tsan_thread_start_func")) { + last_frame->ClearAll(); + last_frame2->next = nullptr; + // Strip global ctors init. + } else if (last && 0 == internal_strcmp(last, "__do_global_ctors_aux")) { + last_frame->ClearAll(); + last_frame2->next = nullptr; + // If both are 0, then we probably just failed to symbolize. + } else if (last || last2) { + // Ensure that we recovered stack completely. Trimmed stack + // can actually happen if we do not instrument some code, + // so it's only a debug print. However we must try hard to not miss it + // due to our fault. + DPrintf("Bottom stack frame of stack %zx is missed\n", stack->info.address); + } +#else + // The last frame always point into runtime (gosched0, goexit0, runtime.main). + last_frame->ClearAll(); + last_frame2->next = nullptr; +#endif +} + +ReportStack *SymbolizeStackId(u32 stack_id) { + if (stack_id == 0) + return 0; + StackTrace stack = StackDepotGet(stack_id); + if (stack.trace == nullptr) + return nullptr; + return SymbolizeStack(stack); +} + +static ReportStack *SymbolizeStack(StackTrace trace) { + if (trace.size == 0) + return 0; + SymbolizedStack *top = nullptr; + for (uptr si = 0; si < trace.size; si++) { + const uptr pc = trace.trace[si]; + uptr pc1 = pc; +#ifndef SANITIZER_GO + // We obtain the return address, but we're interested in the previous + // instruction. + if ((pc & kExternalPCBit) == 0) + pc1 = StackTrace::GetPreviousInstructionPc(pc); +#else + // FIXME(dvyukov): Go sometimes uses address of a function as top pc. + if (si != trace.size - 1) + pc1 -= 1; +#endif + SymbolizedStack *ent = SymbolizeCode(pc1); + CHECK_NE(ent, 0); + SymbolizedStack *last = ent; + while (last->next) { + last->info.address = pc; // restore original pc for report + last = last->next; + } + last->info.address = pc; // restore original pc for report + last->next = top; + top = ent; + } + StackStripMain(top); + + ReportStack *stack = ReportStack::New(); + stack->frames = top; + return stack; +} + +ScopedReport::ScopedReport(ReportType typ) { + ctx->thread_registry->CheckLocked(); + void *mem = internal_alloc(MBlockReport, sizeof(ReportDesc)); + rep_ = new(mem) ReportDesc; + rep_->typ = typ; + ctx->report_mtx.Lock(); + CommonSanitizerReportMutex.Lock(); +} + +ScopedReport::~ScopedReport() { + CommonSanitizerReportMutex.Unlock(); + ctx->report_mtx.Unlock(); + DestroyAndFree(rep_); +} + +void ScopedReport::AddStack(StackTrace stack, bool suppressable) { + ReportStack **rs = rep_->stacks.PushBack(); + *rs = SymbolizeStack(stack); + (*rs)->suppressable = suppressable; +} + +void ScopedReport::AddMemoryAccess(uptr addr, Shadow s, StackTrace stack, + const MutexSet *mset) { + void *mem = internal_alloc(MBlockReportMop, sizeof(ReportMop)); + ReportMop *mop = new(mem) ReportMop; + rep_->mops.PushBack(mop); + mop->tid = s.tid(); + mop->addr = addr + s.addr0(); + mop->size = s.size(); + mop->write = s.IsWrite(); + mop->atomic = s.IsAtomic(); + mop->stack = SymbolizeStack(stack); + if (mop->stack) + mop->stack->suppressable = true; + for (uptr i = 0; i < mset->Size(); i++) { + MutexSet::Desc d = mset->Get(i); + u64 mid = this->AddMutex(d.id); + ReportMopMutex mtx = {mid, d.write}; + mop->mset.PushBack(mtx); + } +} + +void ScopedReport::AddUniqueTid(int unique_tid) { + rep_->unique_tids.PushBack(unique_tid); +} + +void ScopedReport::AddThread(const ThreadContext *tctx, bool suppressable) { + for (uptr i = 0; i < rep_->threads.Size(); i++) { + if ((u32)rep_->threads[i]->id == tctx->tid) + return; + } + void *mem = internal_alloc(MBlockReportThread, sizeof(ReportThread)); + ReportThread *rt = new(mem) ReportThread(); + rep_->threads.PushBack(rt); + rt->id = tctx->tid; + rt->pid = tctx->os_id; + rt->running = (tctx->status == ThreadStatusRunning); + rt->name = internal_strdup(tctx->name); + rt->parent_tid = tctx->parent_tid; + rt->stack = 0; + rt->stack = SymbolizeStackId(tctx->creation_stack_id); + if (rt->stack) + rt->stack->suppressable = suppressable; +} + +#ifndef SANITIZER_GO +static ThreadContext *FindThreadByUidLocked(int unique_id) { + ctx->thread_registry->CheckLocked(); + for (unsigned i = 0; i < kMaxTid; i++) { + ThreadContext *tctx = static_cast<ThreadContext*>( + ctx->thread_registry->GetThreadLocked(i)); + if (tctx && tctx->unique_id == (u32)unique_id) { + return tctx; + } + } + return 0; +} + +static ThreadContext *FindThreadByTidLocked(int tid) { + ctx->thread_registry->CheckLocked(); + return static_cast<ThreadContext*>( + ctx->thread_registry->GetThreadLocked(tid)); +} + +static bool IsInStackOrTls(ThreadContextBase *tctx_base, void *arg) { + uptr addr = (uptr)arg; + ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base); + if (tctx->status != ThreadStatusRunning) + return false; + ThreadState *thr = tctx->thr; + CHECK(thr); + return ((addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size) || + (addr >= thr->tls_addr && addr < thr->tls_addr + thr->tls_size)); +} + +ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) { + ctx->thread_registry->CheckLocked(); + ThreadContext *tctx = static_cast<ThreadContext*>( + ctx->thread_registry->FindThreadContextLocked(IsInStackOrTls, + (void*)addr)); + if (!tctx) + return 0; + ThreadState *thr = tctx->thr; + CHECK(thr); + *is_stack = (addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size); + return tctx; +} +#endif + +void ScopedReport::AddThread(int unique_tid, bool suppressable) { +#ifndef SANITIZER_GO + AddThread(FindThreadByUidLocked(unique_tid), suppressable); +#endif +} + +void ScopedReport::AddMutex(const SyncVar *s) { + for (uptr i = 0; i < rep_->mutexes.Size(); i++) { + if (rep_->mutexes[i]->id == s->uid) + return; + } + void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex)); + ReportMutex *rm = new(mem) ReportMutex(); + rep_->mutexes.PushBack(rm); + rm->id = s->uid; + rm->addr = s->addr; + rm->destroyed = false; + rm->stack = SymbolizeStackId(s->creation_stack_id); +} + +u64 ScopedReport::AddMutex(u64 id) { + u64 uid = 0; + u64 mid = id; + uptr addr = SyncVar::SplitId(id, &uid); + SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr); + // Check that the mutex is still alive. + // Another mutex can be created at the same address, + // so check uid as well. + if (s && s->CheckId(uid)) { + mid = s->uid; + AddMutex(s); + } else { + AddDeadMutex(id); + } + if (s) + s->mtx.Unlock(); + return mid; +} + +void ScopedReport::AddDeadMutex(u64 id) { + for (uptr i = 0; i < rep_->mutexes.Size(); i++) { + if (rep_->mutexes[i]->id == id) + return; + } + void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex)); + ReportMutex *rm = new(mem) ReportMutex(); + rep_->mutexes.PushBack(rm); + rm->id = id; + rm->addr = 0; + rm->destroyed = true; + rm->stack = 0; +} + +void ScopedReport::AddLocation(uptr addr, uptr size) { + if (addr == 0) + return; +#ifndef SANITIZER_GO + int fd = -1; + int creat_tid = -1; + u32 creat_stack = 0; + if (FdLocation(addr, &fd, &creat_tid, &creat_stack)) { + ReportLocation *loc = ReportLocation::New(ReportLocationFD); + loc->fd = fd; + loc->tid = creat_tid; + loc->stack = SymbolizeStackId(creat_stack); + rep_->locs.PushBack(loc); + ThreadContext *tctx = FindThreadByUidLocked(creat_tid); + if (tctx) + AddThread(tctx); + return; + } + MBlock *b = 0; + Allocator *a = allocator(); + if (a->PointerIsMine((void*)addr)) { + void *block_begin = a->GetBlockBegin((void*)addr); + if (block_begin) + b = ctx->metamap.GetBlock((uptr)block_begin); + } + if (b != 0) { + ThreadContext *tctx = FindThreadByTidLocked(b->tid); + ReportLocation *loc = ReportLocation::New(ReportLocationHeap); + loc->heap_chunk_start = (uptr)allocator()->GetBlockBegin((void *)addr); + loc->heap_chunk_size = b->siz; + loc->tid = tctx ? tctx->tid : b->tid; + loc->stack = SymbolizeStackId(b->stk); + rep_->locs.PushBack(loc); + if (tctx) + AddThread(tctx); + return; + } + bool is_stack = false; + if (ThreadContext *tctx = IsThreadStackOrTls(addr, &is_stack)) { + ReportLocation *loc = + ReportLocation::New(is_stack ? ReportLocationStack : ReportLocationTLS); + loc->tid = tctx->tid; + rep_->locs.PushBack(loc); + AddThread(tctx); + } + if (ReportLocation *loc = SymbolizeData(addr)) { + loc->suppressable = true; + rep_->locs.PushBack(loc); + return; + } +#endif +} + +#ifndef SANITIZER_GO +void ScopedReport::AddSleep(u32 stack_id) { + rep_->sleep = SymbolizeStackId(stack_id); +} +#endif + +void ScopedReport::SetCount(int count) { + rep_->count = count; +} + +const ReportDesc *ScopedReport::GetReport() const { + return rep_; +} + +void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk, + MutexSet *mset) { + // This function restores stack trace and mutex set for the thread/epoch. + // It does so by getting stack trace and mutex set at the beginning of + // trace part, and then replaying the trace till the given epoch. + ctx->thread_registry->CheckLocked(); + ThreadContext *tctx = static_cast<ThreadContext*>( + ctx->thread_registry->GetThreadLocked(tid)); + if (tctx == 0) + return; + if (tctx->status != ThreadStatusRunning + && tctx->status != ThreadStatusFinished + && tctx->status != ThreadStatusDead) + return; + Trace* trace = ThreadTrace(tctx->tid); + Lock l(&trace->mtx); + const int partidx = (epoch / kTracePartSize) % TraceParts(); + TraceHeader* hdr = &trace->headers[partidx]; + if (epoch < hdr->epoch0) + return; + const u64 epoch0 = RoundDown(epoch, TraceSize()); + const u64 eend = epoch % TraceSize(); + const u64 ebegin = RoundDown(eend, kTracePartSize); + DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n", + tid, (uptr)epoch, (uptr)ebegin, (uptr)eend, partidx); + InternalScopedBuffer<uptr> stack(kShadowStackSize); + for (uptr i = 0; i < hdr->stack0.size; i++) { + stack[i] = hdr->stack0.trace[i]; + DPrintf2(" #%02lu: pc=%zx\n", i, stack[i]); + } + if (mset) + *mset = hdr->mset0; + uptr pos = hdr->stack0.size; + Event *events = (Event*)GetThreadTrace(tid); + for (uptr i = ebegin; i <= eend; i++) { + Event ev = events[i]; + EventType typ = (EventType)(ev >> 61); + uptr pc = (uptr)(ev & ((1ull << 61) - 1)); + DPrintf2(" %zu typ=%d pc=%zx\n", i, typ, pc); + if (typ == EventTypeMop) { + stack[pos] = pc; + } else if (typ == EventTypeFuncEnter) { + stack[pos++] = pc; + } else if (typ == EventTypeFuncExit) { + if (pos > 0) + pos--; + } + if (mset) { + if (typ == EventTypeLock) { + mset->Add(pc, true, epoch0 + i); + } else if (typ == EventTypeUnlock) { + mset->Del(pc, true); + } else if (typ == EventTypeRLock) { + mset->Add(pc, false, epoch0 + i); + } else if (typ == EventTypeRUnlock) { + mset->Del(pc, false); + } + } + for (uptr j = 0; j <= pos; j++) + DPrintf2(" #%zu: %zx\n", j, stack[j]); + } + if (pos == 0 && stack[0] == 0) + return; + pos++; + stk->Init(stack.data(), pos); +} + +static bool HandleRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2], + uptr addr_min, uptr addr_max) { + bool equal_stack = false; + RacyStacks hash; + if (flags()->suppress_equal_stacks) { + hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr)); + hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr)); + for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) { + if (hash == ctx->racy_stacks[i]) { + DPrintf("ThreadSanitizer: suppressing report as doubled (stack)\n"); + equal_stack = true; + break; + } + } + } + bool equal_address = false; + RacyAddress ra0 = {addr_min, addr_max}; + if (flags()->suppress_equal_addresses) { + for (uptr i = 0; i < ctx->racy_addresses.Size(); i++) { + RacyAddress ra2 = ctx->racy_addresses[i]; + uptr maxbeg = max(ra0.addr_min, ra2.addr_min); + uptr minend = min(ra0.addr_max, ra2.addr_max); + if (maxbeg < minend) { + DPrintf("ThreadSanitizer: suppressing report as doubled (addr)\n"); + equal_address = true; + break; + } + } + } + if (equal_stack || equal_address) { + if (!equal_stack) + ctx->racy_stacks.PushBack(hash); + if (!equal_address) + ctx->racy_addresses.PushBack(ra0); + return true; + } + return false; +} + +static void AddRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2], + uptr addr_min, uptr addr_max) { + if (flags()->suppress_equal_stacks) { + RacyStacks hash; + hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr)); + hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr)); + ctx->racy_stacks.PushBack(hash); + } + if (flags()->suppress_equal_addresses) { + RacyAddress ra0 = {addr_min, addr_max}; + ctx->racy_addresses.PushBack(ra0); + } +} + +bool OutputReport(ThreadState *thr, const ScopedReport &srep) { + atomic_store(&ctx->last_symbolize_time_ns, NanoTime(), memory_order_relaxed); + const ReportDesc *rep = srep.GetReport(); + Suppression *supp = 0; + uptr suppress_pc = 0; + for (uptr i = 0; suppress_pc == 0 && i < rep->mops.Size(); i++) + suppress_pc = IsSuppressed(rep->typ, rep->mops[i]->stack, &supp); + for (uptr i = 0; suppress_pc == 0 && i < rep->stacks.Size(); i++) + suppress_pc = IsSuppressed(rep->typ, rep->stacks[i], &supp); + for (uptr i = 0; suppress_pc == 0 && i < rep->threads.Size(); i++) + suppress_pc = IsSuppressed(rep->typ, rep->threads[i]->stack, &supp); + for (uptr i = 0; suppress_pc == 0 && i < rep->locs.Size(); i++) + suppress_pc = IsSuppressed(rep->typ, rep->locs[i], &supp); + if (suppress_pc != 0) { + FiredSuppression s = {srep.GetReport()->typ, suppress_pc, supp}; + ctx->fired_suppressions.push_back(s); + } + { + bool old_is_freeing = thr->is_freeing; + thr->is_freeing = false; + bool suppressed = OnReport(rep, suppress_pc != 0); + thr->is_freeing = old_is_freeing; + if (suppressed) + return false; + } + PrintReport(rep); + ctx->nreported++; + if (flags()->halt_on_error) + internal__exit(flags()->exitcode); + return true; +} + +bool IsFiredSuppression(Context *ctx, const ScopedReport &srep, + StackTrace trace) { + for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) { + if (ctx->fired_suppressions[k].type != srep.GetReport()->typ) + continue; + for (uptr j = 0; j < trace.size; j++) { + FiredSuppression *s = &ctx->fired_suppressions[k]; + if (trace.trace[j] == s->pc) { + if (s->supp) + s->supp->hit_count++; + return true; + } + } + } + return false; +} + +static bool IsFiredSuppression(Context *ctx, + const ScopedReport &srep, + uptr addr) { + for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) { + if (ctx->fired_suppressions[k].type != srep.GetReport()->typ) + continue; + FiredSuppression *s = &ctx->fired_suppressions[k]; + if (addr == s->pc) { + if (s->supp) + s->supp->hit_count++; + return true; + } + } + return false; +} + +static bool RaceBetweenAtomicAndFree(ThreadState *thr) { + Shadow s0(thr->racy_state[0]); + Shadow s1(thr->racy_state[1]); + CHECK(!(s0.IsAtomic() && s1.IsAtomic())); + if (!s0.IsAtomic() && !s1.IsAtomic()) + return true; + if (s0.IsAtomic() && s1.IsFreed()) + return true; + if (s1.IsAtomic() && thr->is_freeing) + return true; + return false; +} + +void ReportRace(ThreadState *thr) { + CheckNoLocks(thr); + + // Symbolizer makes lots of intercepted calls. If we try to process them, + // at best it will cause deadlocks on internal mutexes. + ScopedIgnoreInterceptors ignore; + + if (!flags()->report_bugs) + return; + if (!flags()->report_atomic_races && !RaceBetweenAtomicAndFree(thr)) + return; + + bool freed = false; + { + Shadow s(thr->racy_state[1]); + freed = s.GetFreedAndReset(); + thr->racy_state[1] = s.raw(); + } + + uptr addr = ShadowToMem((uptr)thr->racy_shadow_addr); + uptr addr_min = 0; + uptr addr_max = 0; + { + uptr a0 = addr + Shadow(thr->racy_state[0]).addr0(); + uptr a1 = addr + Shadow(thr->racy_state[1]).addr0(); + uptr e0 = a0 + Shadow(thr->racy_state[0]).size(); + uptr e1 = a1 + Shadow(thr->racy_state[1]).size(); + addr_min = min(a0, a1); + addr_max = max(e0, e1); + if (IsExpectedReport(addr_min, addr_max - addr_min)) + return; + } + + ThreadRegistryLock l0(ctx->thread_registry); + + ReportType typ = ReportTypeRace; + if (thr->is_vptr_access && freed) + typ = ReportTypeVptrUseAfterFree; + else if (thr->is_vptr_access) + typ = ReportTypeVptrRace; + else if (freed) + typ = ReportTypeUseAfterFree; + ScopedReport rep(typ); + if (IsFiredSuppression(ctx, rep, addr)) + return; + const uptr kMop = 2; + VarSizeStackTrace traces[kMop]; + const uptr toppc = TraceTopPC(thr); + ObtainCurrentStack(thr, toppc, &traces[0]); + if (IsFiredSuppression(ctx, rep, traces[0])) + return; + InternalScopedBuffer<MutexSet> mset2(1); + new(mset2.data()) MutexSet(); + Shadow s2(thr->racy_state[1]); + RestoreStack(s2.tid(), s2.epoch(), &traces[1], mset2.data()); + if (IsFiredSuppression(ctx, rep, traces[1])) + return; + + if (HandleRacyStacks(thr, traces, addr_min, addr_max)) + return; + + for (uptr i = 0; i < kMop; i++) { + Shadow s(thr->racy_state[i]); + rep.AddMemoryAccess(addr, s, traces[i], + i == 0 ? &thr->mset : mset2.data()); + } + + for (uptr i = 0; i < kMop; i++) { + FastState s(thr->racy_state[i]); + ThreadContext *tctx = static_cast<ThreadContext*>( + ctx->thread_registry->GetThreadLocked(s.tid())); + if (s.epoch() < tctx->epoch0 || s.epoch() > tctx->epoch1) + continue; + rep.AddThread(tctx); + } + + rep.AddLocation(addr_min, addr_max - addr_min); + +#ifndef SANITIZER_GO + { // NOLINT + Shadow s(thr->racy_state[1]); + if (s.epoch() <= thr->last_sleep_clock.get(s.tid())) + rep.AddSleep(thr->last_sleep_stack_id); + } +#endif + + if (!OutputReport(thr, rep)) + return; + + AddRacyStacks(thr, traces, addr_min, addr_max); +} + +void PrintCurrentStack(ThreadState *thr, uptr pc) { + VarSizeStackTrace trace; + ObtainCurrentStack(thr, pc, &trace); + PrintStack(SymbolizeStack(trace)); +} + +void PrintCurrentStackSlow(uptr pc) { +#ifndef SANITIZER_GO + BufferedStackTrace *ptrace = + new(internal_alloc(MBlockStackTrace, sizeof(BufferedStackTrace))) + BufferedStackTrace(); + ptrace->Unwind(kStackTraceMax, pc, 0, 0, 0, 0, false); + for (uptr i = 0; i < ptrace->size / 2; i++) { + uptr tmp = ptrace->trace_buffer[i]; + ptrace->trace_buffer[i] = ptrace->trace_buffer[ptrace->size - i - 1]; + ptrace->trace_buffer[ptrace->size - i - 1] = tmp; + } + PrintStack(SymbolizeStack(*ptrace)); +#endif +} + +} // namespace __tsan + +using namespace __tsan; + +extern "C" { +SANITIZER_INTERFACE_ATTRIBUTE +void __sanitizer_print_stack_trace() { + PrintCurrentStackSlow(StackTrace::GetCurrentPc()); +} +} // extern "C" diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cc b/contrib/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cc new file mode 100644 index 0000000..7b7b27c --- /dev/null +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cc @@ -0,0 +1,403 @@ +//===-- tsan_rtl_thread.cc ------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_placement_new.h" +#include "tsan_rtl.h" +#include "tsan_mman.h" +#include "tsan_platform.h" +#include "tsan_report.h" +#include "tsan_sync.h" + +namespace __tsan { + +// ThreadContext implementation. + +ThreadContext::ThreadContext(int tid) + : ThreadContextBase(tid) + , thr() + , sync() + , epoch0() + , epoch1() { +} + +#ifndef SANITIZER_GO +ThreadContext::~ThreadContext() { +} +#endif + +void ThreadContext::OnDead() { + CHECK_EQ(sync.size(), 0); +} + +void ThreadContext::OnJoined(void *arg) { + ThreadState *caller_thr = static_cast<ThreadState *>(arg); + AcquireImpl(caller_thr, 0, &sync); + sync.Reset(&caller_thr->clock_cache); +} + +struct OnCreatedArgs { + ThreadState *thr; + uptr pc; +}; + +void ThreadContext::OnCreated(void *arg) { + thr = 0; + if (tid == 0) + return; + OnCreatedArgs *args = static_cast<OnCreatedArgs *>(arg); + args->thr->fast_state.IncrementEpoch(); + // Can't increment epoch w/o writing to the trace as well. + TraceAddEvent(args->thr, args->thr->fast_state, EventTypeMop, 0); + ReleaseImpl(args->thr, 0, &sync); + creation_stack_id = CurrentStackId(args->thr, args->pc); + if (reuse_count == 0) + StatInc(args->thr, StatThreadMaxTid); +} + +void ThreadContext::OnReset() { + CHECK_EQ(sync.size(), 0); + FlushUnneededShadowMemory(GetThreadTrace(tid), TraceSize() * sizeof(Event)); + //!!! FlushUnneededShadowMemory(GetThreadTraceHeader(tid), sizeof(Trace)); +} + +void ThreadContext::OnDetached(void *arg) { + ThreadState *thr1 = static_cast<ThreadState*>(arg); + sync.Reset(&thr1->clock_cache); +} + +struct OnStartedArgs { + ThreadState *thr; + uptr stk_addr; + uptr stk_size; + uptr tls_addr; + uptr tls_size; +}; + +void ThreadContext::OnStarted(void *arg) { + OnStartedArgs *args = static_cast<OnStartedArgs*>(arg); + thr = args->thr; + // RoundUp so that one trace part does not contain events + // from different threads. + epoch0 = RoundUp(epoch1 + 1, kTracePartSize); + epoch1 = (u64)-1; + new(thr) ThreadState(ctx, tid, unique_id, epoch0, reuse_count, + args->stk_addr, args->stk_size, args->tls_addr, args->tls_size); +#ifndef SANITIZER_GO + thr->shadow_stack = &ThreadTrace(thr->tid)->shadow_stack[0]; + thr->shadow_stack_pos = thr->shadow_stack; + thr->shadow_stack_end = thr->shadow_stack + kShadowStackSize; +#else + // Setup dynamic shadow stack. + const int kInitStackSize = 8; + thr->shadow_stack = (uptr*)internal_alloc(MBlockShadowStack, + kInitStackSize * sizeof(uptr)); + thr->shadow_stack_pos = thr->shadow_stack; + thr->shadow_stack_end = thr->shadow_stack + kInitStackSize; +#endif +#ifndef SANITIZER_GO + AllocatorThreadStart(thr); +#endif + if (common_flags()->detect_deadlocks) { + thr->dd_pt = ctx->dd->CreatePhysicalThread(); + thr->dd_lt = ctx->dd->CreateLogicalThread(unique_id); + } + thr->fast_synch_epoch = epoch0; + AcquireImpl(thr, 0, &sync); + thr->fast_state.SetHistorySize(flags()->history_size); + const uptr trace = (epoch0 / kTracePartSize) % TraceParts(); + Trace *thr_trace = ThreadTrace(thr->tid); + thr_trace->headers[trace].epoch0 = epoch0; + StatInc(thr, StatSyncAcquire); + sync.Reset(&thr->clock_cache); + DPrintf("#%d: ThreadStart epoch=%zu stk_addr=%zx stk_size=%zx " + "tls_addr=%zx tls_size=%zx\n", + tid, (uptr)epoch0, args->stk_addr, args->stk_size, + args->tls_addr, args->tls_size); +} + +void ThreadContext::OnFinished() { + if (!detached) { + thr->fast_state.IncrementEpoch(); + // Can't increment epoch w/o writing to the trace as well. + TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); + ReleaseImpl(thr, 0, &sync); + } + epoch1 = thr->fast_state.epoch(); + + if (common_flags()->detect_deadlocks) { + ctx->dd->DestroyPhysicalThread(thr->dd_pt); + ctx->dd->DestroyLogicalThread(thr->dd_lt); + } + ctx->clock_alloc.FlushCache(&thr->clock_cache); + ctx->metamap.OnThreadIdle(thr); +#ifndef SANITIZER_GO + AllocatorThreadFinish(thr); +#endif + thr->~ThreadState(); + StatAggregate(ctx->stat, thr->stat); + thr = 0; +} + +#ifndef SANITIZER_GO +struct ThreadLeak { + ThreadContext *tctx; + int count; +}; + +static void MaybeReportThreadLeak(ThreadContextBase *tctx_base, void *arg) { + Vector<ThreadLeak> &leaks = *(Vector<ThreadLeak>*)arg; + ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base); + if (tctx->detached || tctx->status != ThreadStatusFinished) + return; + for (uptr i = 0; i < leaks.Size(); i++) { + if (leaks[i].tctx->creation_stack_id == tctx->creation_stack_id) { + leaks[i].count++; + return; + } + } + ThreadLeak leak = {tctx, 1}; + leaks.PushBack(leak); +} +#endif + +#ifndef SANITIZER_GO +static void ReportIgnoresEnabled(ThreadContext *tctx, IgnoreSet *set) { + if (tctx->tid == 0) { + Printf("ThreadSanitizer: main thread finished with ignores enabled\n"); + } else { + Printf("ThreadSanitizer: thread T%d %s finished with ignores enabled," + " created at:\n", tctx->tid, tctx->name); + PrintStack(SymbolizeStackId(tctx->creation_stack_id)); + } + Printf(" One of the following ignores was not ended" + " (in order of probability)\n"); + for (uptr i = 0; i < set->Size(); i++) { + Printf(" Ignore was enabled at:\n"); + PrintStack(SymbolizeStackId(set->At(i))); + } + Die(); +} + +static void ThreadCheckIgnore(ThreadState *thr) { + if (ctx->after_multithreaded_fork) + return; + if (thr->ignore_reads_and_writes) + ReportIgnoresEnabled(thr->tctx, &thr->mop_ignore_set); + if (thr->ignore_sync) + ReportIgnoresEnabled(thr->tctx, &thr->sync_ignore_set); +} +#else +static void ThreadCheckIgnore(ThreadState *thr) {} +#endif + +void ThreadFinalize(ThreadState *thr) { + ThreadCheckIgnore(thr); +#ifndef SANITIZER_GO + if (!flags()->report_thread_leaks) + return; + ThreadRegistryLock l(ctx->thread_registry); + Vector<ThreadLeak> leaks(MBlockScopedBuf); + ctx->thread_registry->RunCallbackForEachThreadLocked( + MaybeReportThreadLeak, &leaks); + for (uptr i = 0; i < leaks.Size(); i++) { + ScopedReport rep(ReportTypeThreadLeak); + rep.AddThread(leaks[i].tctx, true); + rep.SetCount(leaks[i].count); + OutputReport(thr, rep); + } +#endif +} + +int ThreadCount(ThreadState *thr) { + uptr result; + ctx->thread_registry->GetNumberOfThreads(0, 0, &result); + return (int)result; +} + +int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) { + StatInc(thr, StatThreadCreate); + OnCreatedArgs args = { thr, pc }; + int tid = ctx->thread_registry->CreateThread(uid, detached, thr->tid, &args); + DPrintf("#%d: ThreadCreate tid=%d uid=%zu\n", thr->tid, tid, uid); + StatSet(thr, StatThreadMaxAlive, ctx->thread_registry->GetMaxAliveThreads()); + return tid; +} + +void ThreadStart(ThreadState *thr, int tid, uptr os_id) { + uptr stk_addr = 0; + uptr stk_size = 0; + uptr tls_addr = 0; + uptr tls_size = 0; + GetThreadStackAndTls(tid == 0, &stk_addr, &stk_size, &tls_addr, &tls_size); + + if (tid) { + if (stk_addr && stk_size) + MemoryRangeImitateWrite(thr, /*pc=*/ 1, stk_addr, stk_size); + + if (tls_addr && tls_size) { + // Check that the thr object is in tls; + const uptr thr_beg = (uptr)thr; + const uptr thr_end = (uptr)thr + sizeof(*thr); + CHECK_GE(thr_beg, tls_addr); + CHECK_LE(thr_beg, tls_addr + tls_size); + CHECK_GE(thr_end, tls_addr); + CHECK_LE(thr_end, tls_addr + tls_size); + // Since the thr object is huge, skip it. + MemoryRangeImitateWrite(thr, /*pc=*/ 2, tls_addr, thr_beg - tls_addr); + MemoryRangeImitateWrite(thr, /*pc=*/ 2, + thr_end, tls_addr + tls_size - thr_end); + } + } + + ThreadRegistry *tr = ctx->thread_registry; + OnStartedArgs args = { thr, stk_addr, stk_size, tls_addr, tls_size }; + tr->StartThread(tid, os_id, &args); + + tr->Lock(); + thr->tctx = (ThreadContext*)tr->GetThreadLocked(tid); + tr->Unlock(); + +#ifndef SANITIZER_GO + if (ctx->after_multithreaded_fork) { + thr->ignore_interceptors++; + ThreadIgnoreBegin(thr, 0); + ThreadIgnoreSyncBegin(thr, 0); + } +#endif +} + +void ThreadFinish(ThreadState *thr) { + ThreadCheckIgnore(thr); + StatInc(thr, StatThreadFinish); + if (thr->stk_addr && thr->stk_size) + DontNeedShadowFor(thr->stk_addr, thr->stk_size); + if (thr->tls_addr && thr->tls_size) + DontNeedShadowFor(thr->tls_addr, thr->tls_size); + thr->is_dead = true; + ctx->thread_registry->FinishThread(thr->tid); +} + +static bool FindThreadByUid(ThreadContextBase *tctx, void *arg) { + uptr uid = (uptr)arg; + if (tctx->user_id == uid && tctx->status != ThreadStatusInvalid) { + tctx->user_id = 0; + return true; + } + return false; +} + +int ThreadTid(ThreadState *thr, uptr pc, uptr uid) { + int res = ctx->thread_registry->FindThread(FindThreadByUid, (void*)uid); + DPrintf("#%d: ThreadTid uid=%zu tid=%d\n", thr->tid, uid, res); + return res; +} + +void ThreadJoin(ThreadState *thr, uptr pc, int tid) { + CHECK_GT(tid, 0); + CHECK_LT(tid, kMaxTid); + DPrintf("#%d: ThreadJoin tid=%d\n", thr->tid, tid); + ctx->thread_registry->JoinThread(tid, thr); +} + +void ThreadDetach(ThreadState *thr, uptr pc, int tid) { + CHECK_GT(tid, 0); + CHECK_LT(tid, kMaxTid); + ctx->thread_registry->DetachThread(tid, thr); +} + +void ThreadSetName(ThreadState *thr, const char *name) { + ctx->thread_registry->SetThreadName(thr->tid, name); +} + +void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, + uptr size, bool is_write) { + if (size == 0) + return; + + u64 *shadow_mem = (u64*)MemToShadow(addr); + DPrintf2("#%d: MemoryAccessRange: @%p %p size=%d is_write=%d\n", + thr->tid, (void*)pc, (void*)addr, + (int)size, is_write); + +#if TSAN_DEBUG + if (!IsAppMem(addr)) { + Printf("Access to non app mem %zx\n", addr); + DCHECK(IsAppMem(addr)); + } + if (!IsAppMem(addr + size - 1)) { + Printf("Access to non app mem %zx\n", addr + size - 1); + DCHECK(IsAppMem(addr + size - 1)); + } + if (!IsShadowMem((uptr)shadow_mem)) { + Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr); + DCHECK(IsShadowMem((uptr)shadow_mem)); + } + if (!IsShadowMem((uptr)(shadow_mem + size * kShadowCnt / 8 - 1))) { + Printf("Bad shadow addr %p (%zx)\n", + shadow_mem + size * kShadowCnt / 8 - 1, addr + size - 1); + DCHECK(IsShadowMem((uptr)(shadow_mem + size * kShadowCnt / 8 - 1))); + } +#endif + + StatInc(thr, StatMopRange); + + if (*shadow_mem == kShadowRodata) { + // Access to .rodata section, no races here. + // Measurements show that it can be 10-20% of all memory accesses. + StatInc(thr, StatMopRangeRodata); + return; + } + + FastState fast_state = thr->fast_state; + if (fast_state.GetIgnoreBit()) + return; + + fast_state.IncrementEpoch(); + thr->fast_state = fast_state; + TraceAddEvent(thr, fast_state, EventTypeMop, pc); + + bool unaligned = (addr % kShadowCell) != 0; + + // Handle unaligned beginning, if any. + for (; addr % kShadowCell && size; addr++, size--) { + int const kAccessSizeLog = 0; + Shadow cur(fast_state); + cur.SetWrite(is_write); + cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kAccessSizeLog); + MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false, + shadow_mem, cur); + } + if (unaligned) + shadow_mem += kShadowCnt; + // Handle middle part, if any. + for (; size >= kShadowCell; addr += kShadowCell, size -= kShadowCell) { + int const kAccessSizeLog = 3; + Shadow cur(fast_state); + cur.SetWrite(is_write); + cur.SetAddr0AndSizeLog(0, kAccessSizeLog); + MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false, + shadow_mem, cur); + shadow_mem += kShadowCnt; + } + // Handle ending, if any. + for (; size; addr++, size--) { + int const kAccessSizeLog = 0; + Shadow cur(fast_state); + cur.SetWrite(is_write); + cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kAccessSizeLog); + MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false, + shadow_mem, cur); + } +} + +} // namespace __tsan diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_stack_trace.cc b/contrib/compiler-rt/lib/tsan/rtl/tsan_stack_trace.cc new file mode 100644 index 0000000..ceca3f8 --- /dev/null +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_stack_trace.cc @@ -0,0 +1,46 @@ +//===-- tsan_stack_trace.cc -----------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +//===----------------------------------------------------------------------===// +#include "tsan_stack_trace.h" +#include "tsan_rtl.h" +#include "tsan_mman.h" + +namespace __tsan { + +VarSizeStackTrace::VarSizeStackTrace() + : StackTrace(nullptr, 0), trace_buffer(nullptr) {} + +VarSizeStackTrace::~VarSizeStackTrace() { + ResizeBuffer(0); +} + +void VarSizeStackTrace::ResizeBuffer(uptr new_size) { + if (trace_buffer) { + internal_free(trace_buffer); + } + trace_buffer = + (new_size > 0) + ? (uptr *)internal_alloc(MBlockStackTrace, + new_size * sizeof(trace_buffer[0])) + : nullptr; + trace = trace_buffer; + size = new_size; +} + +void VarSizeStackTrace::Init(const uptr *pcs, uptr cnt, uptr extra_top_pc) { + ResizeBuffer(cnt + !!extra_top_pc); + internal_memcpy(trace_buffer, pcs, cnt * sizeof(trace_buffer[0])); + if (extra_top_pc) + trace_buffer[cnt] = extra_top_pc; +} + +} // namespace __tsan diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_stack_trace.h b/contrib/compiler-rt/lib/tsan/rtl/tsan_stack_trace.h new file mode 100644 index 0000000..5bf89bb --- /dev/null +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_stack_trace.h @@ -0,0 +1,39 @@ +//===-- tsan_stack_trace.h --------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +//===----------------------------------------------------------------------===// +#ifndef TSAN_STACK_TRACE_H +#define TSAN_STACK_TRACE_H + +#include "sanitizer_common/sanitizer_stacktrace.h" +#include "tsan_defs.h" + +namespace __tsan { + +// StackTrace which calls malloc/free to allocate the buffer for +// addresses in stack traces. +struct VarSizeStackTrace : public StackTrace { + uptr *trace_buffer; // Owned. + + VarSizeStackTrace(); + ~VarSizeStackTrace(); + void Init(const uptr *pcs, uptr cnt, uptr extra_top_pc = 0); + + private: + void ResizeBuffer(uptr new_size); + + VarSizeStackTrace(const VarSizeStackTrace &); + void operator=(const VarSizeStackTrace &); +}; + +} // namespace __tsan + +#endif // TSAN_STACK_TRACE_H diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_stat.cc b/contrib/compiler-rt/lib/tsan/rtl/tsan_stat.cc new file mode 100644 index 0000000..350a2ba --- /dev/null +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_stat.cc @@ -0,0 +1,179 @@ +//===-- tsan_stat.cc ------------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +//===----------------------------------------------------------------------===// +#include "tsan_stat.h" +#include "tsan_rtl.h" + +namespace __tsan { + +void StatAggregate(u64 *dst, u64 *src) { + if (!kCollectStats) + return; + for (int i = 0; i < StatCnt; i++) + dst[i] += src[i]; +} + +void StatOutput(u64 *stat) { + if (!kCollectStats) + return; + + stat[StatShadowNonZero] = stat[StatShadowProcessed] - stat[StatShadowZero]; + + static const char *name[StatCnt] = {}; + name[StatMop] = "Memory accesses "; + name[StatMopRead] = " Including reads "; + name[StatMopWrite] = " writes "; + name[StatMop1] = " Including size 1 "; + name[StatMop2] = " size 2 "; + name[StatMop4] = " size 4 "; + name[StatMop8] = " size 8 "; + name[StatMopSame] = " Including same "; + name[StatMopIgnored] = " Including ignored "; + name[StatMopRange] = " Including range "; + name[StatMopRodata] = " Including .rodata "; + name[StatMopRangeRodata] = " Including .rodata range "; + name[StatShadowProcessed] = "Shadow processed "; + name[StatShadowZero] = " Including empty "; + name[StatShadowNonZero] = " Including non empty "; + name[StatShadowSameSize] = " Including same size "; + name[StatShadowIntersect] = " intersect "; + name[StatShadowNotIntersect] = " not intersect "; + name[StatShadowSameThread] = " Including same thread "; + name[StatShadowAnotherThread] = " another thread "; + name[StatShadowReplace] = " Including evicted "; + + name[StatFuncEnter] = "Function entries "; + name[StatFuncExit] = "Function exits "; + name[StatEvents] = "Events collected "; + + name[StatThreadCreate] = "Total threads created "; + name[StatThreadFinish] = " threads finished "; + name[StatThreadReuse] = " threads reused "; + name[StatThreadMaxTid] = " max tid "; + name[StatThreadMaxAlive] = " max alive threads "; + + name[StatMutexCreate] = "Mutexes created "; + name[StatMutexDestroy] = " destroyed "; + name[StatMutexLock] = " lock "; + name[StatMutexUnlock] = " unlock "; + name[StatMutexRecLock] = " recursive lock "; + name[StatMutexRecUnlock] = " recursive unlock "; + name[StatMutexReadLock] = " read lock "; + name[StatMutexReadUnlock] = " read unlock "; + + name[StatSyncCreated] = "Sync objects created "; + name[StatSyncDestroyed] = " destroyed "; + name[StatSyncAcquire] = " acquired "; + name[StatSyncRelease] = " released "; + + name[StatClockAcquire] = "Clock acquire "; + name[StatClockAcquireEmpty] = " empty clock "; + name[StatClockAcquireFastRelease] = " fast from release-store "; + name[StatClockAcquireLarge] = " contains my tid "; + name[StatClockAcquireRepeat] = " repeated (fast) "; + name[StatClockAcquireFull] = " full (slow) "; + name[StatClockAcquiredSomething] = " acquired something "; + name[StatClockRelease] = "Clock release "; + name[StatClockReleaseResize] = " resize "; + name[StatClockReleaseFast1] = " fast1 "; + name[StatClockReleaseFast2] = " fast2 "; + name[StatClockReleaseSlow] = " dirty overflow (slow) "; + name[StatClockReleaseFull] = " full (slow) "; + name[StatClockReleaseAcquired] = " was acquired "; + name[StatClockReleaseClearTail] = " clear tail "; + name[StatClockStore] = "Clock release store "; + name[StatClockStoreResize] = " resize "; + name[StatClockStoreFast] = " fast "; + name[StatClockStoreFull] = " slow "; + name[StatClockStoreTail] = " clear tail "; + name[StatClockAcquireRelease] = "Clock acquire-release "; + + name[StatAtomic] = "Atomic operations "; + name[StatAtomicLoad] = " Including load "; + name[StatAtomicStore] = " store "; + name[StatAtomicExchange] = " exchange "; + name[StatAtomicFetchAdd] = " fetch_add "; + name[StatAtomicFetchSub] = " fetch_sub "; + name[StatAtomicFetchAnd] = " fetch_and "; + name[StatAtomicFetchOr] = " fetch_or "; + name[StatAtomicFetchXor] = " fetch_xor "; + name[StatAtomicFetchNand] = " fetch_nand "; + name[StatAtomicCAS] = " compare_exchange "; + name[StatAtomicFence] = " fence "; + name[StatAtomicRelaxed] = " Including relaxed "; + name[StatAtomicConsume] = " consume "; + name[StatAtomicAcquire] = " acquire "; + name[StatAtomicRelease] = " release "; + name[StatAtomicAcq_Rel] = " acq_rel "; + name[StatAtomicSeq_Cst] = " seq_cst "; + name[StatAtomic1] = " Including size 1 "; + name[StatAtomic2] = " size 2 "; + name[StatAtomic4] = " size 4 "; + name[StatAtomic8] = " size 8 "; + name[StatAtomic16] = " size 16 "; + + name[StatAnnotation] = "Dynamic annotations "; + name[StatAnnotateHappensBefore] = " HappensBefore "; + name[StatAnnotateHappensAfter] = " HappensAfter "; + name[StatAnnotateCondVarSignal] = " CondVarSignal "; + name[StatAnnotateCondVarSignalAll] = " CondVarSignalAll "; + name[StatAnnotateMutexIsNotPHB] = " MutexIsNotPHB "; + name[StatAnnotateCondVarWait] = " CondVarWait "; + name[StatAnnotateRWLockCreate] = " RWLockCreate "; + name[StatAnnotateRWLockCreateStatic] = " StatAnnotateRWLockCreateStatic "; + name[StatAnnotateRWLockDestroy] = " RWLockDestroy "; + name[StatAnnotateRWLockAcquired] = " RWLockAcquired "; + name[StatAnnotateRWLockReleased] = " RWLockReleased "; + name[StatAnnotateTraceMemory] = " TraceMemory "; + name[StatAnnotateFlushState] = " FlushState "; + name[StatAnnotateNewMemory] = " NewMemory "; + name[StatAnnotateNoOp] = " NoOp "; + name[StatAnnotateFlushExpectedRaces] = " FlushExpectedRaces "; + name[StatAnnotateEnableRaceDetection] = " EnableRaceDetection "; + name[StatAnnotateMutexIsUsedAsCondVar] = " MutexIsUsedAsCondVar "; + name[StatAnnotatePCQGet] = " PCQGet "; + name[StatAnnotatePCQPut] = " PCQPut "; + name[StatAnnotatePCQDestroy] = " PCQDestroy "; + name[StatAnnotatePCQCreate] = " PCQCreate "; + name[StatAnnotateExpectRace] = " ExpectRace "; + name[StatAnnotateBenignRaceSized] = " BenignRaceSized "; + name[StatAnnotateBenignRace] = " BenignRace "; + name[StatAnnotateIgnoreReadsBegin] = " IgnoreReadsBegin "; + name[StatAnnotateIgnoreReadsEnd] = " IgnoreReadsEnd "; + name[StatAnnotateIgnoreWritesBegin] = " IgnoreWritesBegin "; + name[StatAnnotateIgnoreWritesEnd] = " IgnoreWritesEnd "; + name[StatAnnotateIgnoreSyncBegin] = " IgnoreSyncBegin "; + name[StatAnnotateIgnoreSyncEnd] = " IgnoreSyncEnd "; + name[StatAnnotatePublishMemoryRange] = " PublishMemoryRange "; + name[StatAnnotateUnpublishMemoryRange] = " UnpublishMemoryRange "; + name[StatAnnotateThreadName] = " ThreadName "; + + name[StatMtxTotal] = "Contentionz "; + name[StatMtxTrace] = " Trace "; + name[StatMtxThreads] = " Threads "; + name[StatMtxReport] = " Report "; + name[StatMtxSyncVar] = " SyncVar "; + name[StatMtxSyncTab] = " SyncTab "; + name[StatMtxSlab] = " Slab "; + name[StatMtxAtExit] = " Atexit "; + name[StatMtxAnnotations] = " Annotations "; + name[StatMtxMBlock] = " MBlock "; + name[StatMtxJavaMBlock] = " JavaMBlock "; + name[StatMtxDeadlockDetector] = " DeadlockDetector "; + name[StatMtxFD] = " FD "; + + Printf("Statistics:\n"); + for (int i = 0; i < StatCnt; i++) + Printf("%s: %16zu\n", name[i], (uptr)stat[i]); +} + +} // namespace __tsan diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_stat.h b/contrib/compiler-rt/lib/tsan/rtl/tsan_stat.h new file mode 100644 index 0000000..0bd949e --- /dev/null +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_stat.h @@ -0,0 +1,182 @@ +//===-- tsan_stat.h ---------------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +//===----------------------------------------------------------------------===// + +#ifndef TSAN_STAT_H +#define TSAN_STAT_H + +namespace __tsan { + +enum StatType { + // Memory access processing related stuff. + StatMop, + StatMopRead, + StatMopWrite, + StatMop1, // These must be consequtive. + StatMop2, + StatMop4, + StatMop8, + StatMopSame, + StatMopIgnored, + StatMopRange, + StatMopRodata, + StatMopRangeRodata, + StatShadowProcessed, + StatShadowZero, + StatShadowNonZero, // Derived. + StatShadowSameSize, + StatShadowIntersect, + StatShadowNotIntersect, + StatShadowSameThread, + StatShadowAnotherThread, + StatShadowReplace, + + // Func processing. + StatFuncEnter, + StatFuncExit, + + // Trace processing. + StatEvents, + + // Threads. + StatThreadCreate, + StatThreadFinish, + StatThreadReuse, + StatThreadMaxTid, + StatThreadMaxAlive, + + // Mutexes. + StatMutexCreate, + StatMutexDestroy, + StatMutexLock, + StatMutexUnlock, + StatMutexRecLock, + StatMutexRecUnlock, + StatMutexReadLock, + StatMutexReadUnlock, + + // Synchronization. + StatSyncCreated, + StatSyncDestroyed, + StatSyncAcquire, + StatSyncRelease, + + // Clocks - acquire. + StatClockAcquire, + StatClockAcquireEmpty, + StatClockAcquireFastRelease, + StatClockAcquireLarge, + StatClockAcquireRepeat, + StatClockAcquireFull, + StatClockAcquiredSomething, + // Clocks - release. + StatClockRelease, + StatClockReleaseResize, + StatClockReleaseFast1, + StatClockReleaseFast2, + StatClockReleaseSlow, + StatClockReleaseFull, + StatClockReleaseAcquired, + StatClockReleaseClearTail, + // Clocks - release store. + StatClockStore, + StatClockStoreResize, + StatClockStoreFast, + StatClockStoreFull, + StatClockStoreTail, + // Clocks - acquire-release. + StatClockAcquireRelease, + + // Atomics. + StatAtomic, + StatAtomicLoad, + StatAtomicStore, + StatAtomicExchange, + StatAtomicFetchAdd, + StatAtomicFetchSub, + StatAtomicFetchAnd, + StatAtomicFetchOr, + StatAtomicFetchXor, + StatAtomicFetchNand, + StatAtomicCAS, + StatAtomicFence, + StatAtomicRelaxed, + StatAtomicConsume, + StatAtomicAcquire, + StatAtomicRelease, + StatAtomicAcq_Rel, + StatAtomicSeq_Cst, + StatAtomic1, + StatAtomic2, + StatAtomic4, + StatAtomic8, + StatAtomic16, + + // Dynamic annotations. + StatAnnotation, + StatAnnotateHappensBefore, + StatAnnotateHappensAfter, + StatAnnotateCondVarSignal, + StatAnnotateCondVarSignalAll, + StatAnnotateMutexIsNotPHB, + StatAnnotateCondVarWait, + StatAnnotateRWLockCreate, + StatAnnotateRWLockCreateStatic, + StatAnnotateRWLockDestroy, + StatAnnotateRWLockAcquired, + StatAnnotateRWLockReleased, + StatAnnotateTraceMemory, + StatAnnotateFlushState, + StatAnnotateNewMemory, + StatAnnotateNoOp, + StatAnnotateFlushExpectedRaces, + StatAnnotateEnableRaceDetection, + StatAnnotateMutexIsUsedAsCondVar, + StatAnnotatePCQGet, + StatAnnotatePCQPut, + StatAnnotatePCQDestroy, + StatAnnotatePCQCreate, + StatAnnotateExpectRace, + StatAnnotateBenignRaceSized, + StatAnnotateBenignRace, + StatAnnotateIgnoreReadsBegin, + StatAnnotateIgnoreReadsEnd, + StatAnnotateIgnoreWritesBegin, + StatAnnotateIgnoreWritesEnd, + StatAnnotateIgnoreSyncBegin, + StatAnnotateIgnoreSyncEnd, + StatAnnotatePublishMemoryRange, + StatAnnotateUnpublishMemoryRange, + StatAnnotateThreadName, + + // Internal mutex contentionz. + StatMtxTotal, + StatMtxTrace, + StatMtxThreads, + StatMtxReport, + StatMtxSyncVar, + StatMtxSyncTab, + StatMtxSlab, + StatMtxAnnotations, + StatMtxAtExit, + StatMtxMBlock, + StatMtxJavaMBlock, + StatMtxDeadlockDetector, + StatMtxFD, + + // This must be the last. + StatCnt +}; + +} // namespace __tsan + +#endif // TSAN_STAT_H diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_suppressions.cc b/contrib/compiler-rt/lib/tsan/rtl/tsan_suppressions.cc new file mode 100644 index 0000000..299fc80 --- /dev/null +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_suppressions.cc @@ -0,0 +1,144 @@ +//===-- tsan_suppressions.cc ----------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_libc.h" +#include "sanitizer_common/sanitizer_placement_new.h" +#include "sanitizer_common/sanitizer_suppressions.h" +#include "tsan_suppressions.h" +#include "tsan_rtl.h" +#include "tsan_flags.h" +#include "tsan_mman.h" +#include "tsan_platform.h" + +// Suppressions for true/false positives in standard libraries. +static const char *const std_suppressions = +// Libstdc++ 4.4 has data races in std::string. +// See http://crbug.com/181502 for an example. +"race:^_M_rep$\n" +"race:^_M_is_leaked$\n" +// False positive when using std <thread>. +// Happens because we miss atomic synchronization in libstdc++. +// See http://llvm.org/bugs/show_bug.cgi?id=17066 for details. +"race:std::_Sp_counted_ptr_inplace<std::thread::_Impl\n"; + +// Can be overriden in frontend. +#ifndef SANITIZER_GO +extern "C" const char *WEAK __tsan_default_suppressions() { + return 0; +} +#endif + +namespace __tsan { + +static bool suppressions_inited = false; + +void InitializeSuppressions() { + CHECK(!suppressions_inited); + SuppressionContext::InitIfNecessary(); +#ifndef SANITIZER_GO + SuppressionContext::Get()->Parse(__tsan_default_suppressions()); + SuppressionContext::Get()->Parse(std_suppressions); +#endif + suppressions_inited = true; +} + +SuppressionType conv(ReportType typ) { + if (typ == ReportTypeRace) + return SuppressionRace; + else if (typ == ReportTypeVptrRace) + return SuppressionRace; + else if (typ == ReportTypeUseAfterFree) + return SuppressionRace; + else if (typ == ReportTypeVptrUseAfterFree) + return SuppressionRace; + else if (typ == ReportTypeThreadLeak) + return SuppressionThread; + else if (typ == ReportTypeMutexDestroyLocked) + return SuppressionMutex; + else if (typ == ReportTypeMutexDoubleLock) + return SuppressionMutex; + else if (typ == ReportTypeMutexBadUnlock) + return SuppressionMutex; + else if (typ == ReportTypeMutexBadReadLock) + return SuppressionMutex; + else if (typ == ReportTypeMutexBadReadUnlock) + return SuppressionMutex; + else if (typ == ReportTypeSignalUnsafe) + return SuppressionSignal; + else if (typ == ReportTypeErrnoInSignal) + return SuppressionNone; + else if (typ == ReportTypeDeadlock) + return SuppressionDeadlock; + Printf("ThreadSanitizer: unknown report type %d\n", typ), + Die(); +} + +uptr IsSuppressed(ReportType typ, const ReportStack *stack, Suppression **sp) { + if (!SuppressionContext::Get()->SuppressionCount() || stack == 0 || + !stack->suppressable) + return 0; + SuppressionType stype = conv(typ); + if (stype == SuppressionNone) + return 0; + Suppression *s; + for (const SymbolizedStack *frame = stack->frames; frame; + frame = frame->next) { + const AddressInfo &info = frame->info; + if (SuppressionContext::Get()->Match(info.function, stype, &s) || + SuppressionContext::Get()->Match(info.file, stype, &s) || + SuppressionContext::Get()->Match(info.module, stype, &s)) { + DPrintf("ThreadSanitizer: matched suppression '%s'\n", s->templ); + s->hit_count++; + *sp = s; + return info.address; + } + } + return 0; +} + +uptr IsSuppressed(ReportType typ, const ReportLocation *loc, Suppression **sp) { + if (!SuppressionContext::Get()->SuppressionCount() || loc == 0 || + loc->type != ReportLocationGlobal || !loc->suppressable) + return 0; + SuppressionType stype = conv(typ); + if (stype == SuppressionNone) + return 0; + Suppression *s; + const DataInfo &global = loc->global; + if (SuppressionContext::Get()->Match(global.name, stype, &s) || + SuppressionContext::Get()->Match(global.module, stype, &s)) { + DPrintf("ThreadSanitizer: matched suppression '%s'\n", s->templ); + s->hit_count++; + *sp = s; + return global.start; + } + return 0; +} + +void PrintMatchedSuppressions() { + InternalMmapVector<Suppression *> matched(1); + SuppressionContext::Get()->GetMatched(&matched); + if (!matched.size()) + return; + int hit_count = 0; + for (uptr i = 0; i < matched.size(); i++) + hit_count += matched[i]->hit_count; + Printf("ThreadSanitizer: Matched %d suppressions (pid=%d):\n", hit_count, + (int)internal_getpid()); + for (uptr i = 0; i < matched.size(); i++) { + Printf("%d %s:%s\n", matched[i]->hit_count, + SuppressionTypeString(matched[i]->type), matched[i]->templ); + } +} +} // namespace __tsan diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_suppressions.h b/contrib/compiler-rt/lib/tsan/rtl/tsan_suppressions.h new file mode 100644 index 0000000..c618b3d --- /dev/null +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_suppressions.h @@ -0,0 +1,28 @@ +//===-- tsan_suppressions.h -------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +//===----------------------------------------------------------------------===// +#ifndef TSAN_SUPPRESSIONS_H +#define TSAN_SUPPRESSIONS_H + +#include "sanitizer_common/sanitizer_suppressions.h" +#include "tsan_report.h" + +namespace __tsan { + +void InitializeSuppressions(); +void PrintMatchedSuppressions(); +uptr IsSuppressed(ReportType typ, const ReportStack *stack, Suppression **sp); +uptr IsSuppressed(ReportType typ, const ReportLocation *loc, Suppression **sp); + +} // namespace __tsan + +#endif // TSAN_SUPPRESSIONS_H diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_symbolize.cc b/contrib/compiler-rt/lib/tsan/rtl/tsan_symbolize.cc new file mode 100644 index 0000000..3beb44f --- /dev/null +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_symbolize.cc @@ -0,0 +1,88 @@ +//===-- tsan_symbolize.cc -------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +//===----------------------------------------------------------------------===// + +#include "tsan_symbolize.h" + +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_placement_new.h" +#include "sanitizer_common/sanitizer_symbolizer.h" +#include "tsan_flags.h" +#include "tsan_report.h" +#include "tsan_rtl.h" + +namespace __tsan { + +void EnterSymbolizer() { + ThreadState *thr = cur_thread(); + CHECK(!thr->in_symbolizer); + thr->in_symbolizer = true; + thr->ignore_interceptors++; +} + +void ExitSymbolizer() { + ThreadState *thr = cur_thread(); + CHECK(thr->in_symbolizer); + thr->in_symbolizer = false; + thr->ignore_interceptors--; +} + +// May be overriden by JIT/JAVA/etc, +// whatever produces PCs marked with kExternalPCBit. +extern "C" bool __tsan_symbolize_external(uptr pc, + char *func_buf, uptr func_siz, + char *file_buf, uptr file_siz, + int *line, int *col) + SANITIZER_WEAK_ATTRIBUTE; + +bool __tsan_symbolize_external(uptr pc, + char *func_buf, uptr func_siz, + char *file_buf, uptr file_siz, + int *line, int *col) { + return false; +} + +SymbolizedStack *SymbolizeCode(uptr addr) { + // Check if PC comes from non-native land. + if (addr & kExternalPCBit) { + // Declare static to not consume too much stack space. + // We symbolize reports in a single thread, so this is fine. + static char func_buf[1024]; + static char file_buf[1024]; + int line, col; + SymbolizedStack *frame = SymbolizedStack::New(addr); + if (__tsan_symbolize_external(addr, func_buf, sizeof(func_buf), file_buf, + sizeof(file_buf), &line, &col)) { + frame->info.function = internal_strdup(func_buf); + frame->info.file = internal_strdup(file_buf); + frame->info.line = line; + frame->info.column = col; + } + return frame; + } + return Symbolizer::GetOrInit()->SymbolizePC(addr); +} + +ReportLocation *SymbolizeData(uptr addr) { + DataInfo info; + if (!Symbolizer::GetOrInit()->SymbolizeData(addr, &info)) + return 0; + ReportLocation *ent = ReportLocation::New(ReportLocationGlobal); + ent->global = info; + return ent; +} + +void SymbolizeFlush() { + Symbolizer::GetOrInit()->Flush(); +} + +} // namespace __tsan diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_symbolize.h b/contrib/compiler-rt/lib/tsan/rtl/tsan_symbolize.h new file mode 100644 index 0000000..b59b6cf --- /dev/null +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_symbolize.h @@ -0,0 +1,35 @@ +//===-- tsan_symbolize.h ----------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +//===----------------------------------------------------------------------===// +#ifndef TSAN_SYMBOLIZE_H +#define TSAN_SYMBOLIZE_H + +#include "tsan_defs.h" +#include "tsan_report.h" + +namespace __tsan { + +// Denotes fake PC values that come from JIT/JAVA/etc. +// For such PC values __tsan_symbolize_external() will be called. +const uptr kExternalPCBit = 1ULL << 60; + +void EnterSymbolizer(); +void ExitSymbolizer(); +SymbolizedStack *SymbolizeCode(uptr addr); +ReportLocation *SymbolizeData(uptr addr); +void SymbolizeFlush(); + +ReportStack *NewReportStackEntry(uptr addr); + +} // namespace __tsan + +#endif // TSAN_SYMBOLIZE_H diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_sync.cc b/contrib/compiler-rt/lib/tsan/rtl/tsan_sync.cc new file mode 100644 index 0000000..1041073 --- /dev/null +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_sync.cc @@ -0,0 +1,225 @@ +//===-- tsan_sync.cc ------------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +//===----------------------------------------------------------------------===// +#include "sanitizer_common/sanitizer_placement_new.h" +#include "tsan_sync.h" +#include "tsan_rtl.h" +#include "tsan_mman.h" + +namespace __tsan { + +void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s); + +SyncVar::SyncVar() + : mtx(MutexTypeSyncVar, StatMtxSyncVar) { + Reset(0); +} + +void SyncVar::Init(ThreadState *thr, uptr pc, uptr addr, u64 uid) { + this->addr = addr; + this->uid = uid; + this->next = 0; + + creation_stack_id = 0; + if (kCppMode) // Go does not use them + creation_stack_id = CurrentStackId(thr, pc); + if (common_flags()->detect_deadlocks) + DDMutexInit(thr, pc, this); +} + +void SyncVar::Reset(ThreadState *thr) { + uid = 0; + creation_stack_id = 0; + owner_tid = kInvalidTid; + last_lock = 0; + recursion = 0; + is_rw = 0; + is_recursive = 0; + is_broken = 0; + is_linker_init = 0; + + if (thr == 0) { + CHECK_EQ(clock.size(), 0); + CHECK_EQ(read_clock.size(), 0); + } else { + clock.Reset(&thr->clock_cache); + read_clock.Reset(&thr->clock_cache); + } +} + +MetaMap::MetaMap() { + atomic_store(&uid_gen_, 0, memory_order_relaxed); +} + +void MetaMap::AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz) { + u32 idx = block_alloc_.Alloc(&thr->block_cache); + MBlock *b = block_alloc_.Map(idx); + b->siz = sz; + b->tid = thr->tid; + b->stk = CurrentStackId(thr, pc); + u32 *meta = MemToMeta(p); + DCHECK_EQ(*meta, 0); + *meta = idx | kFlagBlock; +} + +uptr MetaMap::FreeBlock(ThreadState *thr, uptr pc, uptr p) { + MBlock* b = GetBlock(p); + if (b == 0) + return 0; + uptr sz = RoundUpTo(b->siz, kMetaShadowCell); + FreeRange(thr, pc, p, sz); + return sz; +} + +void MetaMap::FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz) { + u32 *meta = MemToMeta(p); + u32 *end = MemToMeta(p + sz); + if (end == meta) + end++; + for (; meta < end; meta++) { + u32 idx = *meta; + *meta = 0; + for (;;) { + if (idx == 0) + break; + if (idx & kFlagBlock) { + block_alloc_.Free(&thr->block_cache, idx & ~kFlagMask); + break; + } else if (idx & kFlagSync) { + DCHECK(idx & kFlagSync); + SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask); + u32 next = s->next; + s->Reset(thr); + sync_alloc_.Free(&thr->sync_cache, idx & ~kFlagMask); + idx = next; + } else { + CHECK(0); + } + } + } +} + +MBlock* MetaMap::GetBlock(uptr p) { + u32 *meta = MemToMeta(p); + u32 idx = *meta; + for (;;) { + if (idx == 0) + return 0; + if (idx & kFlagBlock) + return block_alloc_.Map(idx & ~kFlagMask); + DCHECK(idx & kFlagSync); + SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask); + idx = s->next; + } +} + +SyncVar* MetaMap::GetOrCreateAndLock(ThreadState *thr, uptr pc, + uptr addr, bool write_lock) { + return GetAndLock(thr, pc, addr, write_lock, true); +} + +SyncVar* MetaMap::GetIfExistsAndLock(uptr addr) { + return GetAndLock(0, 0, addr, true, false); +} + +SyncVar* MetaMap::GetAndLock(ThreadState *thr, uptr pc, + uptr addr, bool write_lock, bool create) { + u32 *meta = MemToMeta(addr); + u32 idx0 = *meta; + u32 myidx = 0; + SyncVar *mys = 0; + for (;;) { + u32 idx = idx0; + for (;;) { + if (idx == 0) + break; + if (idx & kFlagBlock) + break; + DCHECK(idx & kFlagSync); + SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask); + if (s->addr == addr) { + if (myidx != 0) { + mys->Reset(thr); + sync_alloc_.Free(&thr->sync_cache, myidx); + } + if (write_lock) + s->mtx.Lock(); + else + s->mtx.ReadLock(); + return s; + } + idx = s->next; + } + if (!create) + return 0; + if (*meta != idx0) { + idx0 = *meta; + continue; + } + + if (myidx == 0) { + const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed); + myidx = sync_alloc_.Alloc(&thr->sync_cache); + mys = sync_alloc_.Map(myidx); + mys->Init(thr, pc, addr, uid); + } + mys->next = idx0; + if (atomic_compare_exchange_strong((atomic_uint32_t*)meta, &idx0, + myidx | kFlagSync, memory_order_release)) { + if (write_lock) + mys->mtx.Lock(); + else + mys->mtx.ReadLock(); + return mys; + } + } +} + +void MetaMap::MoveMemory(uptr src, uptr dst, uptr sz) { + // src and dst can overlap, + // there are no concurrent accesses to the regions (e.g. stop-the-world). + CHECK_NE(src, dst); + CHECK_NE(sz, 0); + uptr diff = dst - src; + u32 *src_meta = MemToMeta(src); + u32 *dst_meta = MemToMeta(dst); + u32 *src_meta_end = MemToMeta(src + sz); + uptr inc = 1; + if (dst > src) { + src_meta = MemToMeta(src + sz) - 1; + dst_meta = MemToMeta(dst + sz) - 1; + src_meta_end = MemToMeta(src) - 1; + inc = -1; + } + for (; src_meta != src_meta_end; src_meta += inc, dst_meta += inc) { + CHECK_EQ(*dst_meta, 0); + u32 idx = *src_meta; + *src_meta = 0; + *dst_meta = idx; + // Patch the addresses in sync objects. + while (idx != 0) { + if (idx & kFlagBlock) + break; + CHECK(idx & kFlagSync); + SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask); + s->addr += diff; + idx = s->next; + } + } +} + +void MetaMap::OnThreadIdle(ThreadState *thr) { + block_alloc_.FlushCache(&thr->block_cache); + sync_alloc_.FlushCache(&thr->sync_cache); +} + +} // namespace __tsan diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_sync.h b/contrib/compiler-rt/lib/tsan/rtl/tsan_sync.h new file mode 100644 index 0000000..574810d --- /dev/null +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_sync.h @@ -0,0 +1,103 @@ +//===-- tsan_sync.h ---------------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +//===----------------------------------------------------------------------===// +#ifndef TSAN_SYNC_H +#define TSAN_SYNC_H + +#include "sanitizer_common/sanitizer_atomic.h" +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_deadlock_detector_interface.h" +#include "tsan_defs.h" +#include "tsan_clock.h" +#include "tsan_mutex.h" +#include "tsan_dense_alloc.h" + +namespace __tsan { + +struct SyncVar { + SyncVar(); + + static const int kInvalidTid = -1; + + uptr addr; // overwritten by DenseSlabAlloc freelist + Mutex mtx; + u64 uid; // Globally unique id. + u32 creation_stack_id; + int owner_tid; // Set only by exclusive owners. + u64 last_lock; + int recursion; + bool is_rw; + bool is_recursive; + bool is_broken; + bool is_linker_init; + u32 next; // in MetaMap + DDMutex dd; + SyncClock read_clock; // Used for rw mutexes only. + // The clock is placed last, so that it is situated on a different cache line + // with the mtx. This reduces contention for hot sync objects. + SyncClock clock; + + void Init(ThreadState *thr, uptr pc, uptr addr, u64 uid); + void Reset(ThreadState *thr); + + u64 GetId() const { + // 47 lsb is addr, then 14 bits is low part of uid, then 3 zero bits. + return GetLsb((u64)addr | (uid << 47), 61); + } + bool CheckId(u64 uid) const { + CHECK_EQ(uid, GetLsb(uid, 14)); + return GetLsb(this->uid, 14) == uid; + } + static uptr SplitId(u64 id, u64 *uid) { + *uid = id >> 47; + return (uptr)GetLsb(id, 47); + } +}; + +/* MetaMap allows to map arbitrary user pointers onto various descriptors. + Currently it maps pointers to heap block descriptors and sync var descs. + It uses 1/2 direct shadow, see tsan_platform.h. +*/ +class MetaMap { + public: + MetaMap(); + + void AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz); + uptr FreeBlock(ThreadState *thr, uptr pc, uptr p); + void FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz); + MBlock* GetBlock(uptr p); + + SyncVar* GetOrCreateAndLock(ThreadState *thr, uptr pc, + uptr addr, bool write_lock); + SyncVar* GetIfExistsAndLock(uptr addr); + + void MoveMemory(uptr src, uptr dst, uptr sz); + + void OnThreadIdle(ThreadState *thr); + + private: + static const u32 kFlagMask = 3 << 30; + static const u32 kFlagBlock = 1 << 30; + static const u32 kFlagSync = 2 << 30; + typedef DenseSlabAlloc<MBlock, 1<<16, 1<<12> BlockAlloc; + typedef DenseSlabAlloc<SyncVar, 1<<16, 1<<10> SyncAlloc; + BlockAlloc block_alloc_; + SyncAlloc sync_alloc_; + atomic_uint64_t uid_gen_; + + SyncVar* GetAndLock(ThreadState *thr, uptr pc, uptr addr, bool write_lock, + bool create); +}; + +} // namespace __tsan + +#endif // TSAN_SYNC_H diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_trace.h b/contrib/compiler-rt/lib/tsan/rtl/tsan_trace.h new file mode 100644 index 0000000..1da8752 --- /dev/null +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_trace.h @@ -0,0 +1,72 @@ +//===-- tsan_trace.h --------------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +//===----------------------------------------------------------------------===// +#ifndef TSAN_TRACE_H +#define TSAN_TRACE_H + +#include "tsan_defs.h" +#include "tsan_mutex.h" +#include "tsan_stack_trace.h" +#include "tsan_mutexset.h" + +namespace __tsan { + +const int kTracePartSizeBits = 14; +const int kTracePartSize = 1 << kTracePartSizeBits; +const int kTraceParts = 4 * 1024 * 1024 / kTracePartSize; +const int kTraceSize = kTracePartSize * kTraceParts; + +// Must fit into 3 bits. +enum EventType { + EventTypeMop, + EventTypeFuncEnter, + EventTypeFuncExit, + EventTypeLock, + EventTypeUnlock, + EventTypeRLock, + EventTypeRUnlock +}; + +// Represents a thread event (from most significant bit): +// u64 typ : 3; // EventType. +// u64 addr : 61; // Associated pc. +typedef u64 Event; + +struct TraceHeader { +#ifndef SANITIZER_GO + BufferedStackTrace stack0; // Start stack for the trace. +#else + VarSizeStackTrace stack0; +#endif + u64 epoch0; // Start epoch for the trace. + MutexSet mset0; + + TraceHeader() : stack0(), epoch0() {} +}; + +struct Trace { + TraceHeader headers[kTraceParts]; + Mutex mtx; +#ifndef SANITIZER_GO + // Must be last to catch overflow as paging fault. + // Go shadow stack is dynamically allocated. + uptr shadow_stack[kShadowStackSize]; +#endif + + Trace() + : mtx(MutexTypeTrace, StatMtxTrace) { + } +}; + +} // namespace __tsan + +#endif // TSAN_TRACE_H diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_update_shadow_word_inl.h b/contrib/compiler-rt/lib/tsan/rtl/tsan_update_shadow_word_inl.h new file mode 100644 index 0000000..c80e0a8 --- /dev/null +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_update_shadow_word_inl.h @@ -0,0 +1,65 @@ +//===-- tsan_update_shadow_word_inl.h ---------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +// Body of the hottest inner loop. +// If we wrap this body into a function, compilers (both gcc and clang) +// produce sligtly less efficient code. +//===----------------------------------------------------------------------===// +do { + StatInc(thr, StatShadowProcessed); + const unsigned kAccessSize = 1 << kAccessSizeLog; + u64 *sp = &shadow_mem[idx]; + old = LoadShadow(sp); + if (old.IsZero()) { + StatInc(thr, StatShadowZero); + if (store_word) + StoreIfNotYetStored(sp, &store_word); + // The above StoreIfNotYetStored could be done unconditionally + // and it even shows 4% gain on synthetic benchmarks (r4307). + break; + } + // is the memory access equal to the previous? + if (Shadow::Addr0AndSizeAreEqual(cur, old)) { + StatInc(thr, StatShadowSameSize); + // same thread? + if (Shadow::TidsAreEqual(old, cur)) { + StatInc(thr, StatShadowSameThread); + if (old.IsRWWeakerOrEqual(kAccessIsWrite, kIsAtomic)) + StoreIfNotYetStored(sp, &store_word); + break; + } + StatInc(thr, StatShadowAnotherThread); + if (HappensBefore(old, thr)) { + StoreIfNotYetStored(sp, &store_word); + break; + } + if (old.IsBothReadsOrAtomic(kAccessIsWrite, kIsAtomic)) + break; + goto RACE; + } + // Do the memory access intersect? + if (Shadow::TwoRangesIntersect(old, cur, kAccessSize)) { + StatInc(thr, StatShadowIntersect); + if (Shadow::TidsAreEqual(old, cur)) { + StatInc(thr, StatShadowSameThread); + break; + } + StatInc(thr, StatShadowAnotherThread); + if (old.IsBothReadsOrAtomic(kAccessIsWrite, kIsAtomic)) + break; + if (HappensBefore(old, thr)) + break; + goto RACE; + } + // The accesses do not intersect. + StatInc(thr, StatShadowNotIntersect); + break; +} while (0); diff --git a/contrib/compiler-rt/lib/tsan/rtl/tsan_vector.h b/contrib/compiler-rt/lib/tsan/rtl/tsan_vector.h new file mode 100644 index 0000000..a7fb3fa --- /dev/null +++ b/contrib/compiler-rt/lib/tsan/rtl/tsan_vector.h @@ -0,0 +1,127 @@ +//===-- tsan_vector.h -------------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +//===----------------------------------------------------------------------===// + +// Low-fat STL-like vector container. + +#ifndef TSAN_VECTOR_H +#define TSAN_VECTOR_H + +#include "tsan_defs.h" +#include "tsan_mman.h" + +namespace __tsan { + +template<typename T> +class Vector { + public: + explicit Vector(MBlockType typ) + : typ_(typ) + , begin_() + , end_() + , last_() { + } + + ~Vector() { + if (begin_) + internal_free(begin_); + } + + void Reset() { + if (begin_) + internal_free(begin_); + begin_ = 0; + end_ = 0; + last_ = 0; + } + + uptr Size() const { + return end_ - begin_; + } + + T &operator[](uptr i) { + DCHECK_LT(i, end_ - begin_); + return begin_[i]; + } + + const T &operator[](uptr i) const { + DCHECK_LT(i, end_ - begin_); + return begin_[i]; + } + + T *PushBack() { + EnsureSize(Size() + 1); + T *p = &end_[-1]; + internal_memset(p, 0, sizeof(*p)); + return p; + } + + T *PushBack(const T& v) { + EnsureSize(Size() + 1); + T *p = &end_[-1]; + internal_memcpy(p, &v, sizeof(*p)); + return p; + } + + void PopBack() { + DCHECK_GT(end_, begin_); + end_--; + } + + void Resize(uptr size) { + if (size == 0) { + end_ = begin_; + return; + } + uptr old_size = Size(); + EnsureSize(size); + if (old_size < size) { + for (uptr i = old_size; i < size; i++) + internal_memset(&begin_[i], 0, sizeof(begin_[i])); + } + } + + private: + const MBlockType typ_; + T *begin_; + T *end_; + T *last_; + + void EnsureSize(uptr size) { + if (size <= Size()) + return; + if (size <= (uptr)(last_ - begin_)) { + end_ = begin_ + size; + return; + } + uptr cap0 = last_ - begin_; + uptr cap = cap0 * 5 / 4; // 25% growth + if (cap == 0) + cap = 16; + if (cap < size) + cap = size; + T *p = (T*)internal_alloc(typ_, cap * sizeof(T)); + if (cap0) { + internal_memcpy(p, begin_, cap0 * sizeof(T)); + internal_free(begin_); + } + begin_ = p; + end_ = begin_ + size; + last_ = begin_ + cap; + } + + Vector(const Vector&); + void operator=(const Vector&); +}; +} // namespace __tsan + +#endif // #ifndef TSAN_VECTOR_H |