diff options
author | dim <dim@FreeBSD.org> | 2015-01-08 19:47:10 +0000 |
---|---|---|
committer | dim <dim@FreeBSD.org> | 2015-01-08 19:47:10 +0000 |
commit | ab328f15cea04a45750ef56019d2b3d971e033f3 (patch) | |
tree | f47eabbd2a48be6d6fec3ddeeefae5b4aeb87dbc /contrib/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cc | |
parent | 8189659be8e499f37c87fdd05ef5ec9f88619d56 (diff) | |
parent | 2f1c5cc1039d86db0037cb086bd58f4b90dc6f66 (diff) | |
download | FreeBSD-src-ab328f15cea04a45750ef56019d2b3d971e033f3.zip FreeBSD-src-ab328f15cea04a45750ef56019d2b3d971e033f3.tar.gz |
Update compiler-rt to trunk r224034. This brings a number of new
builtins, and also the various sanitizers. Support for these will be
added in a later commit.
Diffstat (limited to 'contrib/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cc')
-rw-r--r-- | contrib/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cc | 160 |
1 files changed, 160 insertions, 0 deletions
diff --git a/contrib/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cc b/contrib/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cc new file mode 100644 index 0000000..f10f1f9 --- /dev/null +++ b/contrib/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cc @@ -0,0 +1,160 @@ +//===-- sanitizer_stackdepot.cc -------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is shared between AddressSanitizer and ThreadSanitizer +// run-time libraries. +//===----------------------------------------------------------------------===// + +#include "sanitizer_stackdepot.h" + +#include "sanitizer_common.h" +#include "sanitizer_stackdepotbase.h" + +namespace __sanitizer { + +struct StackDepotNode { + StackDepotNode *link; + u32 id; + atomic_uint32_t hash_and_use_count; // hash_bits : 12; use_count : 20; + uptr size; + uptr stack[1]; // [size] + + static const u32 kTabSizeLog = 20; + // Lower kTabSizeLog bits are equal for all items in one bucket. + // We use these bits to store the per-stack use counter. + static const u32 kUseCountBits = kTabSizeLog; + static const u32 kMaxUseCount = 1 << kUseCountBits; + static const u32 kUseCountMask = (1 << kUseCountBits) - 1; + static const u32 kHashMask = ~kUseCountMask; + + typedef StackTrace args_type; + bool eq(u32 hash, const args_type &args) const { + u32 hash_bits = + atomic_load(&hash_and_use_count, memory_order_relaxed) & kHashMask; + if ((hash & kHashMask) != hash_bits || args.size != size) return false; + uptr i = 0; + for (; i < size; i++) { + if (stack[i] != args.trace[i]) return false; + } + return true; + } + static uptr storage_size(const args_type &args) { + return sizeof(StackDepotNode) + (args.size - 1) * sizeof(uptr); + } + static u32 hash(const args_type &args) { + // murmur2 + const u32 m = 0x5bd1e995; + const u32 seed = 0x9747b28c; + const u32 r = 24; + u32 h = seed ^ (args.size * sizeof(uptr)); + for (uptr i = 0; i < args.size; i++) { + u32 k = args.trace[i]; + k *= m; + k ^= k >> r; + k *= m; + h *= m; + h ^= k; + } + h ^= h >> 13; + h *= m; + h ^= h >> 15; + return h; + } + static bool is_valid(const args_type &args) { + return args.size > 0 && args.trace; + } + void store(const args_type &args, u32 hash) { + atomic_store(&hash_and_use_count, hash & kHashMask, memory_order_relaxed); + size = args.size; + internal_memcpy(stack, args.trace, size * sizeof(uptr)); + } + args_type load() const { + return args_type(&stack[0], size); + } + StackDepotHandle get_handle() { return StackDepotHandle(this); } + + typedef StackDepotHandle handle_type; +}; + +COMPILER_CHECK(StackDepotNode::kMaxUseCount == (u32)kStackDepotMaxUseCount); + +u32 StackDepotHandle::id() { return node_->id; } +int StackDepotHandle::use_count() { + return atomic_load(&node_->hash_and_use_count, memory_order_relaxed) & + StackDepotNode::kUseCountMask; +} +void StackDepotHandle::inc_use_count_unsafe() { + u32 prev = + atomic_fetch_add(&node_->hash_and_use_count, 1, memory_order_relaxed) & + StackDepotNode::kUseCountMask; + CHECK_LT(prev + 1, StackDepotNode::kMaxUseCount); +} + +// FIXME(dvyukov): this single reserved bit is used in TSan. +typedef StackDepotBase<StackDepotNode, 1, StackDepotNode::kTabSizeLog> + StackDepot; +static StackDepot theDepot; + +StackDepotStats *StackDepotGetStats() { + return theDepot.GetStats(); +} + +u32 StackDepotPut(StackTrace stack) { + StackDepotHandle h = theDepot.Put(stack); + return h.valid() ? h.id() : 0; +} + +StackDepotHandle StackDepotPut_WithHandle(StackTrace stack) { + return theDepot.Put(stack); +} + +StackTrace StackDepotGet(u32 id) { + return theDepot.Get(id); +} + +void StackDepotLockAll() { + theDepot.LockAll(); +} + +void StackDepotUnlockAll() { + theDepot.UnlockAll(); +} + +bool StackDepotReverseMap::IdDescPair::IdComparator( + const StackDepotReverseMap::IdDescPair &a, + const StackDepotReverseMap::IdDescPair &b) { + return a.id < b.id; +} + +StackDepotReverseMap::StackDepotReverseMap() + : map_(StackDepotGetStats()->n_uniq_ids + 100) { + for (int idx = 0; idx < StackDepot::kTabSize; idx++) { + atomic_uintptr_t *p = &theDepot.tab[idx]; + uptr v = atomic_load(p, memory_order_consume); + StackDepotNode *s = (StackDepotNode*)(v & ~1); + for (; s; s = s->link) { + IdDescPair pair = {s->id, s}; + map_.push_back(pair); + } + } + InternalSort(&map_, map_.size(), IdDescPair::IdComparator); +} + +StackTrace StackDepotReverseMap::Get(u32 id) { + if (!map_.size()) + return StackTrace(); + IdDescPair pair = {id, 0}; + uptr idx = InternalBinarySearch(map_, 0, map_.size(), pair, + IdDescPair::IdComparator); + if (idx > map_.size()) + return StackTrace(); + return map_[idx].desc->load(); +} + +} // namespace __sanitizer |