summaryrefslogtreecommitdiffstats
path: root/lib/asan/asan_allocator.cc
diff options
context:
space:
mode:
Diffstat (limited to 'lib/asan/asan_allocator.cc')
-rw-r--r--lib/asan/asan_allocator.cc566
1 files changed, 323 insertions, 243 deletions
diff --git a/lib/asan/asan_allocator.cc b/lib/asan/asan_allocator.cc
index f86dc0b..352cce00 100644
--- a/lib/asan/asan_allocator.cc
+++ b/lib/asan/asan_allocator.cc
@@ -1,4 +1,4 @@
-//===-- asan_allocator.cc ---------------------------------------*- C++ -*-===//
+//===-- asan_allocator.cc -------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -34,50 +34,68 @@
#include "asan_stats.h"
#include "asan_thread.h"
#include "asan_thread_registry.h"
+#include "sanitizer_common/sanitizer_atomic.h"
-#include <stdint.h>
-#include <string.h>
-#include <unistd.h>
+#if defined(_WIN32) && !defined(__clang__)
+#include <intrin.h>
+#endif
namespace __asan {
-#define REDZONE FLAG_redzone
-static const size_t kMinAllocSize = REDZONE * 2;
-static const size_t kMinMmapSize = 4UL << 20; // 4M
-static const uint64_t kMaxAvailableRam = 128ULL << 30; // 128G
-static const size_t kMaxThreadLocalQuarantine = 1 << 20; // 1M
-static const size_t kMaxSizeForThreadLocalFreeList = 1 << 17;
+#define REDZONE ((uptr)(flags()->redzone))
+static const uptr kMinAllocSize = REDZONE * 2;
+static const u64 kMaxAvailableRam = 128ULL << 30; // 128G
+static const uptr kMaxThreadLocalQuarantine = 1 << 20; // 1M
+
+static const uptr kMinMmapSize = (ASAN_LOW_MEMORY) ? 4UL << 17 : 4UL << 20;
+static const uptr kMaxSizeForThreadLocalFreeList =
+ (ASAN_LOW_MEMORY) ? 1 << 15 : 1 << 17;
// Size classes less than kMallocSizeClassStep are powers of two.
// All other size classes are multiples of kMallocSizeClassStep.
-static const size_t kMallocSizeClassStepLog = 26;
-static const size_t kMallocSizeClassStep = 1UL << kMallocSizeClassStepLog;
+static const uptr kMallocSizeClassStepLog = 26;
+static const uptr kMallocSizeClassStep = 1UL << kMallocSizeClassStepLog;
-#if __WORDSIZE == 32
-static const size_t kMaxAllowedMallocSize = 3UL << 30; // 3G
-#else
-static const size_t kMaxAllowedMallocSize = 8UL << 30; // 8G
-#endif
+static const uptr kMaxAllowedMallocSize =
+ (__WORDSIZE == 32) ? 3UL << 30 : 8UL << 30;
-static inline bool IsAligned(uintptr_t a, uintptr_t alignment) {
+static inline bool IsAligned(uptr a, uptr alignment) {
return (a & (alignment - 1)) == 0;
}
-static inline size_t Log2(size_t x) {
+static inline uptr Log2(uptr x) {
CHECK(IsPowerOfTwo(x));
+#if !defined(_WIN32) || defined(__clang__)
return __builtin_ctzl(x);
+#elif defined(_WIN64)
+ unsigned long ret; // NOLINT
+ _BitScanForward64(&ret, x);
+ return ret;
+#else
+ unsigned long ret; // NOLINT
+ _BitScanForward(&ret, x);
+ return ret;
+#endif
}
-static inline size_t RoundUpToPowerOfTwo(size_t size) {
+static inline uptr RoundUpToPowerOfTwo(uptr size) {
CHECK(size);
if (IsPowerOfTwo(size)) return size;
- size_t up = __WORDSIZE - __builtin_clzl(size);
- CHECK(size < (1ULL << up));
- CHECK(size > (1ULL << (up - 1)));
- return 1UL << up;
+
+ unsigned long up; // NOLINT
+#if !defined(_WIN32) || defined(__clang__)
+ up = __WORDSIZE - 1 - __builtin_clzl(size);
+#elif defined(_WIN64)
+ _BitScanReverse64(&up, size);
+#else
+ _BitScanReverse(&up, size);
+#endif
+ CHECK(size < (1ULL << (up + 1)));
+ CHECK(size > (1ULL << up));
+ return 1UL << (up + 1);
}
-static inline size_t SizeClassToSize(uint8_t size_class) {
+static inline uptr SizeClassToSize(u8 size_class) {
CHECK(size_class < kNumberOfSizeClasses);
if (size_class <= kMallocSizeClassStepLog) {
return 1UL << size_class;
@@ -86,10 +104,10 @@ static inline size_t SizeClassToSize(uint8_t size_class) {
}
}
-static inline uint8_t SizeToSizeClass(size_t size) {
- uint8_t res = 0;
+static inline u8 SizeToSizeClass(uptr size) {
+ u8 res = 0;
if (size <= kMallocSizeClassStep) {
- size_t rounded = RoundUpToPowerOfTwo(size);
+ uptr rounded = RoundUpToPowerOfTwo(size);
res = Log2(rounded);
} else {
res = ((size + kMallocSizeClassStep - 1) / kMallocSizeClassStep)
@@ -102,7 +120,7 @@ static inline uint8_t SizeToSizeClass(size_t size) {
// Given REDZONE bytes, we need to mark first size bytes
// as addressable and the rest REDZONE-size bytes as unaddressable.
-static void PoisonHeapPartialRightRedzone(uintptr_t mem, size_t size) {
+static void PoisonHeapPartialRightRedzone(uptr mem, uptr size) {
CHECK(size <= REDZONE);
CHECK(IsAligned(mem, REDZONE));
CHECK(IsPowerOfTwo(SHADOW_GRANULARITY));
@@ -112,11 +130,11 @@ static void PoisonHeapPartialRightRedzone(uintptr_t mem, size_t size) {
kAsanHeapRightRedzoneMagic);
}
-static uint8_t *MmapNewPagesAndPoisonShadow(size_t size) {
+static u8 *MmapNewPagesAndPoisonShadow(uptr size) {
CHECK(IsAligned(size, kPageSize));
- uint8_t *res = (uint8_t*)AsanMmapSomewhereOrDie(size, __FUNCTION__);
- PoisonShadow((uintptr_t)res, size, kAsanHeapLeftRedzoneMagic);
- if (FLAG_debug) {
+ u8 *res = (u8*)MmapOrDie(size, __FUNCTION__);
+ PoisonShadow((uptr)res, size, kAsanHeapLeftRedzoneMagic);
+ if (flags()->debug) {
Printf("ASAN_MMAP: [%p, %p)\n", res, res + size);
}
return res;
@@ -128,103 +146,114 @@ static uint8_t *MmapNewPagesAndPoisonShadow(size_t size) {
// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
//
// The pseudo state CHUNK_MEMALIGN is used to mark that the address is not
-// the beginning of a AsanChunk (in which case 'next' contains the address
-// of the AsanChunk).
+// the beginning of a AsanChunk (in which the actual chunk resides at
+// this - this->used_size).
//
// The magic numbers for the enum values are taken randomly.
enum {
- CHUNK_AVAILABLE = 0x573B,
- CHUNK_ALLOCATED = 0x3204,
- CHUNK_QUARANTINE = 0x1978,
- CHUNK_MEMALIGN = 0xDC68,
+ CHUNK_AVAILABLE = 0x57,
+ CHUNK_ALLOCATED = 0x32,
+ CHUNK_QUARANTINE = 0x19,
+ CHUNK_MEMALIGN = 0xDC
};
struct ChunkBase {
- uint16_t chunk_state;
- uint8_t size_class;
- uint32_t offset; // User-visible memory starts at this+offset (beg()).
- int32_t alloc_tid;
- int32_t free_tid;
- size_t used_size; // Size requested by the user.
+ // First 8 bytes.
+ uptr chunk_state : 8;
+ uptr alloc_tid : 24;
+ uptr size_class : 8;
+ uptr free_tid : 24;
+
+ // Second 8 bytes.
+ uptr alignment_log : 8;
+ uptr used_size : FIRST_32_SECOND_64(32, 56); // Size requested by the user.
+
+ // This field may overlap with the user area and thus should not
+ // be used while the chunk is in CHUNK_ALLOCATED state.
AsanChunk *next;
- uintptr_t beg() { return (uintptr_t)this + offset; }
- size_t Size() { return SizeClassToSize(size_class); }
- uint8_t SizeClass() { return size_class; }
+ // Typically the beginning of the user-accessible memory is 'this'+REDZONE
+ // and is also aligned by REDZONE. However, if the memory is allocated
+ // by memalign, the alignment might be higher and the user-accessible memory
+ // starts at the first properly aligned address after 'this'.
+ uptr Beg() { return RoundUpTo((uptr)this + 1, 1 << alignment_log); }
+ uptr Size() { return SizeClassToSize(size_class); }
+ u8 SizeClass() { return size_class; }
};
struct AsanChunk: public ChunkBase {
- uint32_t *compressed_alloc_stack() {
- CHECK(REDZONE >= sizeof(ChunkBase));
- return (uint32_t*)((uintptr_t)this + sizeof(ChunkBase));
+ u32 *compressed_alloc_stack() {
+ return (u32*)((uptr)this + sizeof(ChunkBase));
}
- uint32_t *compressed_free_stack() {
- CHECK(REDZONE >= sizeof(ChunkBase));
- return (uint32_t*)((uintptr_t)this + REDZONE);
+ u32 *compressed_free_stack() {
+ return (u32*)((uptr)this + Max((uptr)REDZONE, (uptr)sizeof(ChunkBase)));
}
// The left redzone after the ChunkBase is given to the alloc stack trace.
- size_t compressed_alloc_stack_size() {
- return (REDZONE - sizeof(ChunkBase)) / sizeof(uint32_t);
+ uptr compressed_alloc_stack_size() {
+ if (REDZONE < sizeof(ChunkBase)) return 0;
+ return (REDZONE - sizeof(ChunkBase)) / sizeof(u32);
}
- size_t compressed_free_stack_size() {
- return (REDZONE) / sizeof(uint32_t);
+ uptr compressed_free_stack_size() {
+ if (REDZONE < sizeof(ChunkBase)) return 0;
+ return (REDZONE) / sizeof(u32);
}
- bool AddrIsInside(uintptr_t addr, size_t access_size, size_t *offset) {
- if (addr >= beg() && (addr + access_size) <= (beg() + used_size)) {
- *offset = addr - beg();
+ bool AddrIsInside(uptr addr, uptr access_size, uptr *offset) {
+ if (addr >= Beg() && (addr + access_size) <= (Beg() + used_size)) {
+ *offset = addr - Beg();
return true;
}
return false;
}
- bool AddrIsAtLeft(uintptr_t addr, size_t access_size, size_t *offset) {
- if (addr < beg()) {
- *offset = beg() - addr;
+ bool AddrIsAtLeft(uptr addr, uptr access_size, uptr *offset) {
+ if (addr < Beg()) {
+ *offset = Beg() - addr;
return true;
}
return false;
}
- bool AddrIsAtRight(uintptr_t addr, size_t access_size, size_t *offset) {
- if (addr + access_size >= beg() + used_size) {
- if (addr <= beg() + used_size)
+ bool AddrIsAtRight(uptr addr, uptr access_size, uptr *offset) {
+ if (addr + access_size >= Beg() + used_size) {
+ if (addr <= Beg() + used_size)
*offset = 0;
else
- *offset = addr - (beg() + used_size);
+ *offset = addr - (Beg() + used_size);
return true;
}
return false;
}
- void DescribeAddress(uintptr_t addr, size_t access_size) {
- size_t offset;
- Printf("%p is located ", addr);
+ void DescribeAddress(uptr addr, uptr access_size) {
+ uptr offset;
+ AsanPrintf("%p is located ", (void*)addr);
if (AddrIsInside(addr, access_size, &offset)) {
- Printf("%ld bytes inside of", offset);
+ AsanPrintf("%zu bytes inside of", offset);
} else if (AddrIsAtLeft(addr, access_size, &offset)) {
- Printf("%ld bytes to the left of", offset);
+ AsanPrintf("%zu bytes to the left of", offset);
} else if (AddrIsAtRight(addr, access_size, &offset)) {
- Printf("%ld bytes to the right of", offset);
+ AsanPrintf("%zu bytes to the right of", offset);
} else {
- Printf(" somewhere around (this is AddressSanitizer bug!)");
+ AsanPrintf(" somewhere around (this is AddressSanitizer bug!)");
}
- Printf(" %lu-byte region [%p,%p)\n",
- used_size, beg(), beg() + used_size);
+ AsanPrintf(" %zu-byte region [%p,%p)\n",
+ used_size, (void*)Beg(), (void*)(Beg() + used_size));
}
};
-static AsanChunk *PtrToChunk(uintptr_t ptr) {
+static AsanChunk *PtrToChunk(uptr ptr) {
AsanChunk *m = (AsanChunk*)(ptr - REDZONE);
if (m->chunk_state == CHUNK_MEMALIGN) {
- m = m->next;
+ m = (AsanChunk*)((uptr)m - m->used_size);
}
return m;
}
void AsanChunkFifoList::PushList(AsanChunkFifoList *q) {
+ CHECK(q->size() > 0);
if (last_) {
CHECK(first_);
CHECK(!last_->next);
@@ -234,13 +263,16 @@ void AsanChunkFifoList::PushList(AsanChunkFifoList *q) {
CHECK(!first_);
last_ = q->last_;
first_ = q->first_;
+ CHECK(first_);
}
+ CHECK(last_);
+ CHECK(!last_->next);
size_ += q->size();
q->clear();
}
void AsanChunkFifoList::Push(AsanChunk *n) {
- CHECK(n->next == NULL);
+ CHECK(n->next == 0);
if (last_) {
CHECK(first_);
CHECK(!last_->next);
@@ -260,8 +292,8 @@ AsanChunk *AsanChunkFifoList::Pop() {
CHECK(first_);
AsanChunk *res = first_;
first_ = first_->next;
- if (first_ == NULL)
- last_ = NULL;
+ if (first_ == 0)
+ last_ = 0;
CHECK(size_ >= res->Size());
size_ -= res->Size();
if (last_) {
@@ -272,11 +304,11 @@ AsanChunk *AsanChunkFifoList::Pop() {
// All pages we ever allocated.
struct PageGroup {
- uintptr_t beg;
- uintptr_t end;
- size_t size_of_chunk;
- uintptr_t last_chunk;
- bool InRange(uintptr_t addr) {
+ uptr beg;
+ uptr end;
+ uptr size_of_chunk;
+ uptr last_chunk;
+ bool InRange(uptr addr) {
return addr >= beg && addr < end;
}
};
@@ -286,12 +318,12 @@ class MallocInfo {
explicit MallocInfo(LinkerInitialized x) : mu_(x) { }
- AsanChunk *AllocateChunks(uint8_t size_class, size_t n_chunks) {
- AsanChunk *m = NULL;
+ AsanChunk *AllocateChunks(u8 size_class, uptr n_chunks) {
+ AsanChunk *m = 0;
AsanChunk **fl = &free_lists_[size_class];
{
ScopedLock lock(&mu_);
- for (size_t i = 0; i < n_chunks; i++) {
+ for (uptr i = 0; i < n_chunks; i++) {
if (!(*fl)) {
*fl = GetNewChunks(size_class);
}
@@ -307,17 +339,17 @@ class MallocInfo {
void SwallowThreadLocalMallocStorage(AsanThreadLocalMallocStorage *x,
bool eat_free_lists) {
- CHECK(FLAG_quarantine_size > 0);
+ CHECK(flags()->quarantine_size > 0);
ScopedLock lock(&mu_);
AsanChunkFifoList *q = &x->quarantine_;
if (q->size() > 0) {
quarantine_.PushList(q);
- while (quarantine_.size() > FLAG_quarantine_size) {
+ while (quarantine_.size() > (uptr)flags()->quarantine_size) {
QuarantinePop();
}
}
if (eat_free_lists) {
- for (size_t size_class = 0; size_class < kNumberOfSizeClasses;
+ for (uptr size_class = 0; size_class < kNumberOfSizeClasses;
size_class++) {
AsanChunk *m = x->free_lists_[size_class];
while (m) {
@@ -336,15 +368,13 @@ class MallocInfo {
quarantine_.Push(chunk);
}
- AsanChunk *FindMallocedOrFreed(uintptr_t addr, size_t access_size) {
+ AsanChunk *FindMallocedOrFreed(uptr addr, uptr access_size) {
ScopedLock lock(&mu_);
return FindChunkByAddr(addr);
}
- // TODO(glider): AllocationSize() may become very slow if the size of
- // page_groups_ grows. This can be fixed by increasing kMinMmapSize,
- // but a better solution is to speed up the search somehow.
- size_t AllocationSize(uintptr_t ptr) {
+ uptr AllocationSize(uptr ptr) {
+ if (!ptr) return 0;
ScopedLock lock(&mu_);
// first, check if this is our memory
@@ -368,40 +398,60 @@ class MallocInfo {
void PrintStatus() {
ScopedLock lock(&mu_);
- size_t malloced = 0;
+ uptr malloced = 0;
- Printf(" MallocInfo: in quarantine: %ld malloced: %ld; ",
+ Printf(" MallocInfo: in quarantine: %zu malloced: %zu; ",
quarantine_.size() >> 20, malloced >> 20);
- for (size_t j = 1; j < kNumberOfSizeClasses; j++) {
+ for (uptr j = 1; j < kNumberOfSizeClasses; j++) {
AsanChunk *i = free_lists_[j];
if (!i) continue;
- size_t t = 0;
+ uptr t = 0;
for (; i; i = i->next) {
t += i->Size();
}
- Printf("%ld:%ld ", j, t >> 20);
+ Printf("%zu:%zu ", j, t >> 20);
}
Printf("\n");
}
- PageGroup *FindPageGroup(uintptr_t addr) {
+ PageGroup *FindPageGroup(uptr addr) {
ScopedLock lock(&mu_);
return FindPageGroupUnlocked(addr);
}
private:
- PageGroup *FindPageGroupUnlocked(uintptr_t addr) {
- for (int i = 0; i < n_page_groups_; i++) {
- PageGroup *g = page_groups_[i];
- if (g->InRange(addr)) {
- return g;
+ PageGroup *FindPageGroupUnlocked(uptr addr) {
+ int n = atomic_load(&n_page_groups_, memory_order_relaxed);
+ // If the page groups are not sorted yet, sort them.
+ if (n_sorted_page_groups_ < n) {
+ SortArray((uptr*)page_groups_, n);
+ n_sorted_page_groups_ = n;
+ }
+ // Binary search over the page groups.
+ int beg = 0, end = n;
+ while (beg < end) {
+ int med = (beg + end) / 2;
+ uptr g = (uptr)page_groups_[med];
+ if (addr > g) {
+ // 'g' points to the end of the group, so 'addr'
+ // may not belong to page_groups_[med] or any previous group.
+ beg = med + 1;
+ } else {
+ // 'addr' may belong to page_groups_[med] or a previous group.
+ end = med;
}
}
- return NULL;
+ if (beg >= n)
+ return 0;
+ PageGroup *g = page_groups_[beg];
+ CHECK(g);
+ if (g->InRange(addr))
+ return g;
+ return 0;
}
// We have an address between two chunks, and we want to report just one.
- AsanChunk *ChooseChunk(uintptr_t addr,
+ AsanChunk *ChooseChunk(uptr addr,
AsanChunk *left_chunk, AsanChunk *right_chunk) {
// Prefer an allocated chunk or a chunk from quarantine.
if (left_chunk->chunk_state == CHUNK_AVAILABLE &&
@@ -411,7 +461,7 @@ class MallocInfo {
left_chunk->chunk_state != CHUNK_AVAILABLE)
return left_chunk;
// Choose based on offset.
- size_t l_offset = 0, r_offset = 0;
+ uptr l_offset = 0, r_offset = 0;
CHECK(left_chunk->AddrIsAtRight(addr, 1, &l_offset));
CHECK(right_chunk->AddrIsAtLeft(addr, 1, &r_offset));
if (l_offset < r_offset)
@@ -419,33 +469,33 @@ class MallocInfo {
return right_chunk;
}
- AsanChunk *FindChunkByAddr(uintptr_t addr) {
+ AsanChunk *FindChunkByAddr(uptr addr) {
PageGroup *g = FindPageGroupUnlocked(addr);
if (!g) return 0;
CHECK(g->size_of_chunk);
- uintptr_t offset_from_beg = addr - g->beg;
- uintptr_t this_chunk_addr = g->beg +
+ uptr offset_from_beg = addr - g->beg;
+ uptr this_chunk_addr = g->beg +
(offset_from_beg / g->size_of_chunk) * g->size_of_chunk;
CHECK(g->InRange(this_chunk_addr));
AsanChunk *m = (AsanChunk*)this_chunk_addr;
CHECK(m->chunk_state == CHUNK_ALLOCATED ||
m->chunk_state == CHUNK_AVAILABLE ||
m->chunk_state == CHUNK_QUARANTINE);
- size_t offset = 0;
+ uptr offset = 0;
if (m->AddrIsInside(addr, 1, &offset))
return m;
if (m->AddrIsAtRight(addr, 1, &offset)) {
if (this_chunk_addr == g->last_chunk) // rightmost chunk
return m;
- uintptr_t right_chunk_addr = this_chunk_addr + g->size_of_chunk;
+ uptr right_chunk_addr = this_chunk_addr + g->size_of_chunk;
CHECK(g->InRange(right_chunk_addr));
return ChooseChunk(addr, m, (AsanChunk*)right_chunk_addr);
} else {
CHECK(m->AddrIsAtLeft(addr, 1, &offset));
if (this_chunk_addr == g->beg) // leftmost chunk
return m;
- uintptr_t left_chunk_addr = this_chunk_addr - g->size_of_chunk;
+ uptr left_chunk_addr = this_chunk_addr - g->size_of_chunk;
CHECK(g->InRange(left_chunk_addr));
return ChooseChunk(addr, (AsanChunk*)left_chunk_addr, m);
}
@@ -459,10 +509,11 @@ class MallocInfo {
CHECK(m->chunk_state == CHUNK_QUARANTINE);
m->chunk_state = CHUNK_AVAILABLE;
+ PoisonShadow((uptr)m, m->Size(), kAsanHeapLeftRedzoneMagic);
CHECK(m->alloc_tid >= 0);
CHECK(m->free_tid >= 0);
- size_t size_class = m->SizeClass();
+ uptr size_class = m->SizeClass();
m->next = free_lists_[size_class];
free_lists_[size_class] = m;
@@ -475,12 +526,12 @@ class MallocInfo {
}
// Get a list of newly allocated chunks.
- AsanChunk *GetNewChunks(uint8_t size_class) {
- size_t size = SizeClassToSize(size_class);
+ AsanChunk *GetNewChunks(u8 size_class) {
+ uptr size = SizeClassToSize(size_class);
CHECK(IsPowerOfTwo(kMinMmapSize));
CHECK(size < kMinMmapSize || (size % kMinMmapSize) == 0);
- size_t mmap_size = Max(size, kMinMmapSize);
- size_t n_chunks = mmap_size / size;
+ uptr mmap_size = Max(size, kMinMmapSize);
+ uptr n_chunks = mmap_size / size;
CHECK(n_chunks * size == mmap_size);
if (size < kPageSize) {
// Size is small, just poison the last chunk.
@@ -490,7 +541,7 @@ class MallocInfo {
mmap_size += kPageSize;
}
CHECK(n_chunks > 0);
- uint8_t *mem = MmapNewPagesAndPoisonShadow(mmap_size);
+ u8 *mem = MmapNewPagesAndPoisonShadow(mmap_size);
// Statistics.
AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
@@ -498,8 +549,8 @@ class MallocInfo {
thread_stats.mmaped += mmap_size;
thread_stats.mmaped_by_size[size_class] += n_chunks;
- AsanChunk *res = NULL;
- for (size_t i = 0; i < n_chunks; i++) {
+ AsanChunk *res = 0;
+ for (uptr i = 0; i < n_chunks; i++) {
AsanChunk *m = (AsanChunk*)(mem + i * size);
m->chunk_state = CHUNK_AVAILABLE;
m->size_class = size_class;
@@ -508,13 +559,13 @@ class MallocInfo {
}
PageGroup *pg = (PageGroup*)(mem + n_chunks * size);
// This memory is already poisoned, no need to poison it again.
- pg->beg = (uintptr_t)mem;
+ pg->beg = (uptr)mem;
pg->end = pg->beg + mmap_size;
pg->size_of_chunk = size;
- pg->last_chunk = (uintptr_t)(mem + size * (n_chunks - 1));
- int page_group_idx = AtomicInc(&n_page_groups_) - 1;
- CHECK(page_group_idx < (int)ASAN_ARRAY_SIZE(page_groups_));
- page_groups_[page_group_idx] = pg;
+ pg->last_chunk = (uptr)(mem + size * (n_chunks - 1));
+ int idx = atomic_fetch_add(&n_page_groups_, 1, memory_order_relaxed);
+ CHECK(idx < (int)ASAN_ARRAY_SIZE(page_groups_));
+ page_groups_[idx] = pg;
return res;
}
@@ -523,7 +574,8 @@ class MallocInfo {
AsanLock mu_;
PageGroup *page_groups_[kMaxAvailableRam / kMinMmapSize];
- int n_page_groups_; // atomic
+ atomic_uint32_t n_page_groups_;
+ int n_sorted_page_groups_;
};
static MallocInfo malloc_info(LINKER_INITIALIZED);
@@ -532,7 +584,7 @@ void AsanThreadLocalMallocStorage::CommitBack() {
malloc_info.SwallowThreadLocalMallocStorage(this, true);
}
-static void Describe(uintptr_t addr, size_t access_size) {
+static void Describe(uptr addr, uptr access_size) {
AsanChunk *m = malloc_info.FindMallocedOrFreed(addr, access_size);
if (!m) return;
m->DescribeAddress(addr, access_size);
@@ -544,55 +596,56 @@ static void Describe(uintptr_t addr, size_t access_size) {
m->compressed_alloc_stack_size());
AsanThread *t = asanThreadRegistry().GetCurrent();
CHECK(t);
- if (m->free_tid >= 0) {
+ if (m->free_tid != kInvalidTid) {
AsanThreadSummary *free_thread =
asanThreadRegistry().FindByTid(m->free_tid);
- Printf("freed by thread T%d here:\n", free_thread->tid());
+ AsanPrintf("freed by thread T%d here:\n", free_thread->tid());
AsanStackTrace free_stack;
AsanStackTrace::UncompressStack(&free_stack, m->compressed_free_stack(),
m->compressed_free_stack_size());
free_stack.PrintStack();
- Printf("previously allocated by thread T%d here:\n",
- alloc_thread->tid());
+ AsanPrintf("previously allocated by thread T%d here:\n",
+ alloc_thread->tid());
alloc_stack.PrintStack();
t->summary()->Announce();
free_thread->Announce();
alloc_thread->Announce();
} else {
- Printf("allocated by thread T%d here:\n", alloc_thread->tid());
+ AsanPrintf("allocated by thread T%d here:\n", alloc_thread->tid());
alloc_stack.PrintStack();
t->summary()->Announce();
alloc_thread->Announce();
}
}
-static uint8_t *Allocate(size_t alignment, size_t size, AsanStackTrace *stack) {
+static u8 *Allocate(uptr alignment, uptr size, AsanStackTrace *stack) {
__asan_init();
CHECK(stack);
if (size == 0) {
size = 1; // TODO(kcc): do something smarter
}
CHECK(IsPowerOfTwo(alignment));
- size_t rounded_size = RoundUpTo(size, REDZONE);
- size_t needed_size = rounded_size + REDZONE;
+ uptr rounded_size = RoundUpTo(size, REDZONE);
+ uptr needed_size = rounded_size + REDZONE;
if (alignment > REDZONE) {
needed_size += alignment;
}
CHECK(IsAligned(needed_size, REDZONE));
if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
- Report("WARNING: AddressSanitizer failed to allocate %p bytes\n", size);
+ Report("WARNING: AddressSanitizer failed to allocate %p bytes\n",
+ (void*)size);
return 0;
}
- uint8_t size_class = SizeToSizeClass(needed_size);
- size_t size_to_allocate = SizeClassToSize(size_class);
+ u8 size_class = SizeToSizeClass(needed_size);
+ uptr size_to_allocate = SizeClassToSize(size_class);
CHECK(size_to_allocate >= kMinAllocSize);
CHECK(size_to_allocate >= needed_size);
CHECK(IsAligned(size_to_allocate, REDZONE));
- if (FLAG_v >= 2) {
- Printf("Allocate align: %ld size: %ld class: %d real: %ld\n",
+ if (flags()->verbosity >= 3) {
+ Printf("Allocate align: %zu size: %zu class: %u real: %zu\n",
alignment, size, size_class, size_to_allocate);
}
@@ -604,7 +657,7 @@ static uint8_t *Allocate(size_t alignment, size_t size, AsanStackTrace *stack) {
thread_stats.malloced_redzones += size_to_allocate - size;
thread_stats.malloced_by_size[size_class]++;
- AsanChunk *m = NULL;
+ AsanChunk *m = 0;
if (!t || size_to_allocate >= kMaxSizeForThreadLocalFreeList) {
// get directly from global storage.
m = malloc_info.AllocateChunks(size_class, 1);
@@ -613,7 +666,7 @@ static uint8_t *Allocate(size_t alignment, size_t size, AsanStackTrace *stack) {
// get from the thread-local storage.
AsanChunk **fl = &t->malloc_storage().free_lists_[size_class];
if (!*fl) {
- size_t n_new_chunks = kMaxSizeForThreadLocalFreeList / size_to_allocate;
+ uptr n_new_chunks = kMaxSizeForThreadLocalFreeList / size_to_allocate;
*fl = malloc_info.AllocateChunks(size_class, n_new_chunks);
thread_stats.malloc_small_slow++;
}
@@ -623,24 +676,27 @@ static uint8_t *Allocate(size_t alignment, size_t size, AsanStackTrace *stack) {
CHECK(m);
CHECK(m->chunk_state == CHUNK_AVAILABLE);
m->chunk_state = CHUNK_ALLOCATED;
- m->next = NULL;
+ m->next = 0;
CHECK(m->Size() == size_to_allocate);
- uintptr_t addr = (uintptr_t)m + REDZONE;
- CHECK(addr == (uintptr_t)m->compressed_free_stack());
+ uptr addr = (uptr)m + REDZONE;
+ CHECK(addr <= (uptr)m->compressed_free_stack());
if (alignment > REDZONE && (addr & (alignment - 1))) {
addr = RoundUpTo(addr, alignment);
CHECK((addr & (alignment - 1)) == 0);
AsanChunk *p = (AsanChunk*)(addr - REDZONE);
p->chunk_state = CHUNK_MEMALIGN;
- p->next = m;
+ p->used_size = (uptr)p - (uptr)m;
+ m->alignment_log = Log2(alignment);
+ CHECK(m->Beg() == addr);
+ } else {
+ m->alignment_log = Log2(REDZONE);
}
CHECK(m == PtrToChunk(addr));
m->used_size = size;
- m->offset = addr - (uintptr_t)m;
- CHECK(m->beg() == addr);
+ CHECK(m->Beg() == addr);
m->alloc_tid = t ? t->tid() : 0;
- m->free_tid = AsanThread::kInvalidTid;
+ m->free_tid = kInvalidTid;
AsanStackTrace::CompressStack(stack, m->compressed_alloc_stack(),
m->compressed_alloc_stack_size());
PoisonShadow(addr, rounded_size, 0);
@@ -648,42 +704,49 @@ static uint8_t *Allocate(size_t alignment, size_t size, AsanStackTrace *stack) {
PoisonHeapPartialRightRedzone(addr + rounded_size - REDZONE,
size & (REDZONE - 1));
}
- if (size <= FLAG_max_malloc_fill_size) {
- real_memset((void*)addr, 0, rounded_size);
+ if (size <= (uptr)(flags()->max_malloc_fill_size)) {
+ REAL(memset)((void*)addr, 0, rounded_size);
}
- return (uint8_t*)addr;
+ return (u8*)addr;
}
-static void Deallocate(uint8_t *ptr, AsanStackTrace *stack) {
+static void Deallocate(u8 *ptr, AsanStackTrace *stack) {
if (!ptr) return;
CHECK(stack);
- if (FLAG_debug) {
- CHECK(malloc_info.FindPageGroup((uintptr_t)ptr));
+ if (flags()->debug) {
+ CHECK(malloc_info.FindPageGroup((uptr)ptr));
}
// Printf("Deallocate %p\n", ptr);
- AsanChunk *m = PtrToChunk((uintptr_t)ptr);
- if (m->chunk_state == CHUNK_QUARANTINE) {
- Report("ERROR: AddressSanitizer attempting double-free on %p:\n", ptr);
+ AsanChunk *m = PtrToChunk((uptr)ptr);
+
+ // Flip the chunk_state atomically to avoid race on double-free.
+ u8 old_chunk_state = atomic_exchange((atomic_uint8_t*)m, CHUNK_QUARANTINE,
+ memory_order_acq_rel);
+
+ if (old_chunk_state == CHUNK_QUARANTINE) {
+ AsanReport("ERROR: AddressSanitizer attempting double-free on %p:\n", ptr);
stack->PrintStack();
- m->DescribeAddress((uintptr_t)ptr, 1);
+ Describe((uptr)ptr, 1);
ShowStatsAndAbort();
- } else if (m->chunk_state != CHUNK_ALLOCATED) {
- Report("ERROR: AddressSanitizer attempting free on address which was not"
- " malloc()-ed: %p\n", ptr);
+ } else if (old_chunk_state != CHUNK_ALLOCATED) {
+ AsanReport("ERROR: AddressSanitizer attempting free on address "
+ "which was not malloc()-ed: %p\n", ptr);
stack->PrintStack();
ShowStatsAndAbort();
}
- CHECK(m->chunk_state == CHUNK_ALLOCATED);
- CHECK(m->free_tid == AsanThread::kInvalidTid);
+ CHECK(old_chunk_state == CHUNK_ALLOCATED);
+ // With REDZONE==16 m->next is in the user area, otherwise it should be 0.
+ CHECK(REDZONE <= 16 || !m->next);
+ CHECK(m->free_tid == kInvalidTid);
CHECK(m->alloc_tid >= 0);
AsanThread *t = asanThreadRegistry().GetCurrent();
m->free_tid = t ? t->tid() : 0;
AsanStackTrace::CompressStack(stack, m->compressed_free_stack(),
m->compressed_free_stack_size());
- size_t rounded_size = RoundUpTo(m->used_size, REDZONE);
- PoisonShadow((uintptr_t)ptr, rounded_size, kAsanHeapFreeMagic);
+ uptr rounded_size = RoundUpTo(m->used_size, REDZONE);
+ PoisonShadow((uptr)ptr, rounded_size, kAsanHeapFreeMagic);
// Statistics.
AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
@@ -691,22 +754,21 @@ static void Deallocate(uint8_t *ptr, AsanStackTrace *stack) {
thread_stats.freed += m->used_size;
thread_stats.freed_by_size[m->SizeClass()]++;
- m->chunk_state = CHUNK_QUARANTINE;
+ CHECK(m->chunk_state == CHUNK_QUARANTINE);
+
if (t) {
AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
- CHECK(!m->next);
ms->quarantine_.Push(m);
if (ms->quarantine_.size() > kMaxThreadLocalQuarantine) {
malloc_info.SwallowThreadLocalMallocStorage(ms, false);
}
} else {
- CHECK(!m->next);
malloc_info.BypassThreadLocalQuarantine(m);
}
}
-static uint8_t *Reallocate(uint8_t *old_ptr, size_t new_size,
+static u8 *Reallocate(u8 *old_ptr, uptr new_size,
AsanStackTrace *stack) {
CHECK(old_ptr && new_size);
@@ -715,13 +777,14 @@ static uint8_t *Reallocate(uint8_t *old_ptr, size_t new_size,
thread_stats.reallocs++;
thread_stats.realloced += new_size;
- AsanChunk *m = PtrToChunk((uintptr_t)old_ptr);
+ AsanChunk *m = PtrToChunk((uptr)old_ptr);
CHECK(m->chunk_state == CHUNK_ALLOCATED);
- size_t old_size = m->used_size;
- size_t memcpy_size = Min(new_size, old_size);
- uint8_t *new_ptr = Allocate(0, new_size, stack);
+ uptr old_size = m->used_size;
+ uptr memcpy_size = Min(new_size, old_size);
+ u8 *new_ptr = Allocate(0, new_size, stack);
if (new_ptr) {
- real_memcpy(new_ptr, old_ptr, memcpy_size);
+ CHECK(REAL(memcpy) != 0);
+ REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
Deallocate(old_ptr, stack);
}
return new_ptr;
@@ -738,9 +801,9 @@ static uint8_t *Reallocate(uint8_t *old_ptr, size_t new_size,
// program must provide implementation of this hook.
// If macro is undefined, the hook is no-op.
#ifdef ASAN_NEW_HOOK
-extern "C" void ASAN_NEW_HOOK(void *ptr, size_t size);
+extern "C" void ASAN_NEW_HOOK(void *ptr, uptr size);
#else
-static inline void ASAN_NEW_HOOK(void *ptr, size_t size) { }
+static inline void ASAN_NEW_HOOK(void *ptr, uptr size) { }
#endif
#ifdef ASAN_DELETE_HOOK
@@ -751,7 +814,7 @@ static inline void ASAN_DELETE_HOOK(void *ptr) { }
namespace __asan {
-void *asan_memalign(size_t alignment, size_t size, AsanStackTrace *stack) {
+void *asan_memalign(uptr alignment, uptr size, AsanStackTrace *stack) {
void *ptr = (void*)Allocate(alignment, size, stack);
ASAN_NEW_HOOK(ptr, size);
return ptr;
@@ -759,43 +822,43 @@ void *asan_memalign(size_t alignment, size_t size, AsanStackTrace *stack) {
void asan_free(void *ptr, AsanStackTrace *stack) {
ASAN_DELETE_HOOK(ptr);
- Deallocate((uint8_t*)ptr, stack);
+ Deallocate((u8*)ptr, stack);
}
-void *asan_malloc(size_t size, AsanStackTrace *stack) {
+void *asan_malloc(uptr size, AsanStackTrace *stack) {
void *ptr = (void*)Allocate(0, size, stack);
ASAN_NEW_HOOK(ptr, size);
return ptr;
}
-void *asan_calloc(size_t nmemb, size_t size, AsanStackTrace *stack) {
+void *asan_calloc(uptr nmemb, uptr size, AsanStackTrace *stack) {
void *ptr = (void*)Allocate(0, nmemb * size, stack);
if (ptr)
- real_memset(ptr, 0, nmemb * size);
+ REAL(memset)(ptr, 0, nmemb * size);
ASAN_NEW_HOOK(ptr, nmemb * size);
return ptr;
}
-void *asan_realloc(void *p, size_t size, AsanStackTrace *stack) {
- if (p == NULL) {
+void *asan_realloc(void *p, uptr size, AsanStackTrace *stack) {
+ if (p == 0) {
void *ptr = (void*)Allocate(0, size, stack);
ASAN_NEW_HOOK(ptr, size);
return ptr;
} else if (size == 0) {
ASAN_DELETE_HOOK(p);
- Deallocate((uint8_t*)p, stack);
- return NULL;
+ Deallocate((u8*)p, stack);
+ return 0;
}
- return Reallocate((uint8_t*)p, size, stack);
+ return Reallocate((u8*)p, size, stack);
}
-void *asan_valloc(size_t size, AsanStackTrace *stack) {
+void *asan_valloc(uptr size, AsanStackTrace *stack) {
void *ptr = (void*)Allocate(kPageSize, size, stack);
ASAN_NEW_HOOK(ptr, size);
return ptr;
}
-void *asan_pvalloc(size_t size, AsanStackTrace *stack) {
+void *asan_pvalloc(uptr size, AsanStackTrace *stack) {
size = RoundUpTo(size, kPageSize);
if (size == 0) {
// pvalloc(0) should allocate one page.
@@ -806,61 +869,76 @@ void *asan_pvalloc(size_t size, AsanStackTrace *stack) {
return ptr;
}
-int asan_posix_memalign(void **memptr, size_t alignment, size_t size,
+int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
AsanStackTrace *stack) {
void *ptr = Allocate(alignment, size, stack);
- CHECK(IsAligned((uintptr_t)ptr, alignment));
+ CHECK(IsAligned((uptr)ptr, alignment));
ASAN_NEW_HOOK(ptr, size);
*memptr = ptr;
return 0;
}
-size_t __asan_mz_size(const void *ptr) {
- return malloc_info.AllocationSize((uintptr_t)ptr);
+uptr asan_malloc_usable_size(void *ptr, AsanStackTrace *stack) {
+ CHECK(stack);
+ if (ptr == 0) return 0;
+ uptr usable_size = malloc_info.AllocationSize((uptr)ptr);
+ if (flags()->check_malloc_usable_size && (usable_size == 0)) {
+ AsanReport("ERROR: AddressSanitizer attempting to call "
+ "malloc_usable_size() for pointer which is "
+ "not owned: %p\n", ptr);
+ stack->PrintStack();
+ Describe((uptr)ptr, 1);
+ ShowStatsAndAbort();
+ }
+ return usable_size;
+}
+
+uptr asan_mz_size(const void *ptr) {
+ return malloc_info.AllocationSize((uptr)ptr);
}
-void DescribeHeapAddress(uintptr_t addr, uintptr_t access_size) {
+void DescribeHeapAddress(uptr addr, uptr access_size) {
Describe(addr, access_size);
}
-void __asan_mz_force_lock() {
+void asan_mz_force_lock() {
malloc_info.ForceLock();
}
-void __asan_mz_force_unlock() {
+void asan_mz_force_unlock() {
malloc_info.ForceUnlock();
}
// ---------------------- Fake stack-------------------- {{{1
FakeStack::FakeStack() {
- CHECK(real_memset);
- real_memset(this, 0, sizeof(*this));
+ CHECK(REAL(memset) != 0);
+ REAL(memset)(this, 0, sizeof(*this));
}
-bool FakeStack::AddrIsInSizeClass(uintptr_t addr, size_t size_class) {
- uintptr_t mem = allocated_size_classes_[size_class];
- uintptr_t size = ClassMmapSize(size_class);
+bool FakeStack::AddrIsInSizeClass(uptr addr, uptr size_class) {
+ uptr mem = allocated_size_classes_[size_class];
+ uptr size = ClassMmapSize(size_class);
bool res = mem && addr >= mem && addr < mem + size;
return res;
}
-uintptr_t FakeStack::AddrIsInFakeStack(uintptr_t addr) {
- for (size_t i = 0; i < kNumberOfSizeClasses; i++) {
+uptr FakeStack::AddrIsInFakeStack(uptr addr) {
+ for (uptr i = 0; i < kNumberOfSizeClasses; i++) {
if (AddrIsInSizeClass(addr, i)) return allocated_size_classes_[i];
}
return 0;
}
// We may want to compute this during compilation.
-inline size_t FakeStack::ComputeSizeClass(size_t alloc_size) {
- size_t rounded_size = RoundUpToPowerOfTwo(alloc_size);
- size_t log = Log2(rounded_size);
+inline uptr FakeStack::ComputeSizeClass(uptr alloc_size) {
+ uptr rounded_size = RoundUpToPowerOfTwo(alloc_size);
+ uptr log = Log2(rounded_size);
CHECK(alloc_size <= (1UL << log));
if (!(alloc_size > (1UL << (log-1)))) {
- Printf("alloc_size %ld log %ld\n", alloc_size, log);
+ Printf("alloc_size %zu log %zu\n", alloc_size, log);
}
CHECK(alloc_size > (1UL << (log-1)));
- size_t res = log < kMinStackFrameSizeLog ? 0 : log - kMinStackFrameSizeLog;
+ uptr res = log < kMinStackFrameSizeLog ? 0 : log - kMinStackFrameSizeLog;
CHECK(res < kNumberOfSizeClasses);
CHECK(ClassSize(res) >= rounded_size);
return res;
@@ -892,36 +970,36 @@ FakeFrame *FakeFrameFifo::FifoPop() {
return res;
}
-void FakeStack::Init(size_t stack_size) {
+void FakeStack::Init(uptr stack_size) {
stack_size_ = stack_size;
alive_ = true;
}
void FakeStack::Cleanup() {
alive_ = false;
- for (size_t i = 0; i < kNumberOfSizeClasses; i++) {
- uintptr_t mem = allocated_size_classes_[i];
+ for (uptr i = 0; i < kNumberOfSizeClasses; i++) {
+ uptr mem = allocated_size_classes_[i];
if (mem) {
PoisonShadow(mem, ClassMmapSize(i), 0);
allocated_size_classes_[i] = 0;
- AsanUnmapOrDie((void*)mem, ClassMmapSize(i));
+ UnmapOrDie((void*)mem, ClassMmapSize(i));
}
}
}
-size_t FakeStack::ClassMmapSize(size_t size_class) {
+uptr FakeStack::ClassMmapSize(uptr size_class) {
return RoundUpToPowerOfTwo(stack_size_);
}
-void FakeStack::AllocateOneSizeClass(size_t size_class) {
+void FakeStack::AllocateOneSizeClass(uptr size_class) {
CHECK(ClassMmapSize(size_class) >= kPageSize);
- uintptr_t new_mem = (uintptr_t)AsanMmapSomewhereOrDie(
+ uptr new_mem = (uptr)MmapOrDie(
ClassMmapSize(size_class), __FUNCTION__);
- // Printf("T%d new_mem[%ld]: %p-%p mmap %ld\n",
+ // Printf("T%d new_mem[%zu]: %p-%p mmap %zu\n",
// asanThreadRegistry().GetCurrent()->tid(),
// size_class, new_mem, new_mem + ClassMmapSize(size_class),
// ClassMmapSize(size_class));
- size_t i;
+ uptr i;
for (i = 0; i < ClassMmapSize(size_class);
i += ClassSize(size_class)) {
size_classes_[size_class].FifoPush((FakeFrame*)(new_mem + i));
@@ -930,10 +1008,10 @@ void FakeStack::AllocateOneSizeClass(size_t size_class) {
allocated_size_classes_[size_class] = new_mem;
}
-uintptr_t FakeStack::AllocateStack(size_t size, size_t real_stack) {
+uptr FakeStack::AllocateStack(uptr size, uptr real_stack) {
if (!alive_) return real_stack;
CHECK(size <= kMaxStackMallocSize && size > 1);
- size_t size_class = ComputeSizeClass(size);
+ uptr size_class = ComputeSizeClass(size);
if (!allocated_size_classes_[size_class]) {
AllocateOneSizeClass(size_class);
}
@@ -947,23 +1025,23 @@ uintptr_t FakeStack::AllocateStack(size_t size, size_t real_stack) {
DeallocateFrame(top);
}
call_stack_.LifoPush(fake_frame);
- uintptr_t ptr = (uintptr_t)fake_frame;
+ uptr ptr = (uptr)fake_frame;
PoisonShadow(ptr, size, 0);
return ptr;
}
void FakeStack::DeallocateFrame(FakeFrame *fake_frame) {
CHECK(alive_);
- size_t size = fake_frame->size_minus_one + 1;
- size_t size_class = ComputeSizeClass(size);
+ uptr size = fake_frame->size_minus_one + 1;
+ uptr size_class = ComputeSizeClass(size);
CHECK(allocated_size_classes_[size_class]);
- uintptr_t ptr = (uintptr_t)fake_frame;
+ uptr ptr = (uptr)fake_frame;
CHECK(AddrIsInSizeClass(ptr, size_class));
CHECK(AddrIsInSizeClass(ptr + size - 1, size_class));
size_classes_[size_class].FifoPush(fake_frame);
}
-void FakeStack::OnFree(size_t ptr, size_t size, size_t real_stack) {
+void FakeStack::OnFree(uptr ptr, uptr size, uptr real_stack) {
FakeFrame *fake_frame = (FakeFrame*)ptr;
CHECK(fake_frame->magic = kRetiredStackFrameMagic);
CHECK(fake_frame->descr != 0);
@@ -976,20 +1054,20 @@ void FakeStack::OnFree(size_t ptr, size_t size, size_t real_stack) {
// ---------------------- Interface ---------------- {{{1
using namespace __asan; // NOLINT
-size_t __asan_stack_malloc(size_t size, size_t real_stack) {
- if (!FLAG_use_fake_stack) return real_stack;
+uptr __asan_stack_malloc(uptr size, uptr real_stack) {
+ if (!flags()->use_fake_stack) return real_stack;
AsanThread *t = asanThreadRegistry().GetCurrent();
if (!t) {
// TSD is gone, use the real stack.
return real_stack;
}
- size_t ptr = t->fake_stack().AllocateStack(size, real_stack);
- // Printf("__asan_stack_malloc %p %ld %p\n", ptr, size, real_stack);
+ uptr ptr = t->fake_stack().AllocateStack(size, real_stack);
+ // Printf("__asan_stack_malloc %p %zu %p\n", ptr, size, real_stack);
return ptr;
}
-void __asan_stack_free(size_t ptr, size_t size, size_t real_stack) {
- if (!FLAG_use_fake_stack) return;
+void __asan_stack_free(uptr ptr, uptr size, uptr real_stack) {
+ if (!flags()->use_fake_stack) return;
if (ptr != real_stack) {
FakeStack::OnFree(ptr, size, real_stack);
}
@@ -997,23 +1075,25 @@ void __asan_stack_free(size_t ptr, size_t size, size_t real_stack) {
// ASan allocator doesn't reserve extra bytes, so normally we would
// just return "size".
-size_t __asan_get_estimated_allocated_size(size_t size) {
+uptr __asan_get_estimated_allocated_size(uptr size) {
if (size == 0) return 1;
return Min(size, kMaxAllowedMallocSize);
}
bool __asan_get_ownership(const void *p) {
- return (p == NULL) ||
- (malloc_info.AllocationSize((uintptr_t)p) > 0);
+ return malloc_info.AllocationSize((uptr)p) > 0;
}
-size_t __asan_get_allocated_size(const void *p) {
- if (p == NULL) return 0;
- size_t allocated_size = malloc_info.AllocationSize((uintptr_t)p);
+uptr __asan_get_allocated_size(const void *p) {
+ if (p == 0) return 0;
+ uptr allocated_size = malloc_info.AllocationSize((uptr)p);
// Die if p is not malloced or if it is already freed.
if (allocated_size == 0) {
- Printf("__asan_get_allocated_size failed, ptr=%p is not owned\n", p);
+ AsanReport("ERROR: AddressSanitizer attempting to call "
+ "__asan_get_allocated_size() for pointer which is "
+ "not owned: %p\n", p);
PRINT_CURRENT_STACK();
+ Describe((uptr)p, 1);
ShowStatsAndAbort();
}
return allocated_size;
OpenPOWER on IntegriCloud