summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/lib/Transforms/Instrumentation
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/lib/Transforms/Instrumentation')
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp1159
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp32
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp156
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp142
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp51
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp413
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp334
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp201
8 files changed, 1434 insertions, 1054 deletions
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/contrib/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index 25f1f02..25f78b0 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/contrib/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -24,6 +24,9 @@
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/Triple.h"
+#include "llvm/Analysis/MemoryBuiltins.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/DIBuilder.h"
#include "llvm/IR/DataLayout.h"
@@ -43,12 +46,14 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/SwapByteOrder.h"
+#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Utils/ASanStackFrameLayout.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Cloning.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/ModuleUtils.h"
+#include "llvm/Transforms/Utils/PromoteMemToReg.h"
#include <algorithm>
#include <string>
#include <system_error>
@@ -64,22 +69,21 @@ static const uint64_t kDefaultShadowOffset64 = 1ULL << 44;
static const uint64_t kSmallX86_64ShadowOffset = 0x7FFF8000; // < 2G.
static const uint64_t kPPC64_ShadowOffset64 = 1ULL << 41;
static const uint64_t kMIPS32_ShadowOffset32 = 0x0aaa0000;
-static const uint64_t kMIPS64_ShadowOffset64 = 1ULL << 36;
+static const uint64_t kMIPS64_ShadowOffset64 = 1ULL << 37;
+static const uint64_t kAArch64_ShadowOffset64 = 1ULL << 36;
static const uint64_t kFreeBSD_ShadowOffset32 = 1ULL << 30;
static const uint64_t kFreeBSD_ShadowOffset64 = 1ULL << 46;
static const uint64_t kWindowsShadowOffset32 = 3ULL << 28;
-static const size_t kMinStackMallocSize = 1 << 6; // 64B
+static const size_t kMinStackMallocSize = 1 << 6; // 64B
static const size_t kMaxStackMallocSize = 1 << 16; // 64K
static const uintptr_t kCurrentStackFrameMagic = 0x41B58AB3;
static const uintptr_t kRetiredStackFrameMagic = 0x45E0360E;
static const char *const kAsanModuleCtorName = "asan.module_ctor";
static const char *const kAsanModuleDtorName = "asan.module_dtor";
-static const uint64_t kAsanCtorAndDtorPriority = 1;
+static const uint64_t kAsanCtorAndDtorPriority = 1;
static const char *const kAsanReportErrorTemplate = "__asan_report_";
-static const char *const kAsanReportLoadN = "__asan_report_load_n";
-static const char *const kAsanReportStoreN = "__asan_report_store_n";
static const char *const kAsanRegisterGlobalsName = "__asan_register_globals";
static const char *const kAsanUnregisterGlobalsName =
"__asan_unregister_globals";
@@ -89,7 +93,7 @@ static const char *const kAsanInitName = "__asan_init_v5";
static const char *const kAsanPtrCmp = "__sanitizer_ptr_cmp";
static const char *const kAsanPtrSub = "__sanitizer_ptr_sub";
static const char *const kAsanHandleNoReturnName = "__asan_handle_no_return";
-static const int kMaxAsanStackMallocSizeClass = 10;
+static const int kMaxAsanStackMallocSizeClass = 10;
static const char *const kAsanStackMallocNameTemplate = "__asan_stack_malloc_";
static const char *const kAsanStackFreeNameTemplate = "__asan_stack_free_";
static const char *const kAsanGenPrefix = "__asan_gen_";
@@ -102,101 +106,124 @@ static const char *const kAsanUnpoisonStackMemoryName =
static const char *const kAsanOptionDetectUAR =
"__asan_option_detect_stack_use_after_return";
-#ifndef NDEBUG
-static const int kAsanStackAfterReturnMagic = 0xf5;
-#endif
+static const char *const kAsanAllocaPoison =
+ "__asan_alloca_poison";
+static const char *const kAsanAllocasUnpoison =
+ "__asan_allocas_unpoison";
// Accesses sizes are powers of two: 1, 2, 4, 8, 16.
static const size_t kNumberOfAccessSizes = 5;
static const unsigned kAllocaRzSize = 32;
-static const unsigned kAsanAllocaLeftMagic = 0xcacacacaU;
-static const unsigned kAsanAllocaRightMagic = 0xcbcbcbcbU;
-static const unsigned kAsanAllocaPartialVal1 = 0xcbcbcb00U;
-static const unsigned kAsanAllocaPartialVal2 = 0x000000cbU;
// Command-line flags.
// This flag may need to be replaced with -f[no-]asan-reads.
static cl::opt<bool> ClInstrumentReads("asan-instrument-reads",
- cl::desc("instrument read instructions"), cl::Hidden, cl::init(true));
-static cl::opt<bool> ClInstrumentWrites("asan-instrument-writes",
- cl::desc("instrument write instructions"), cl::Hidden, cl::init(true));
-static cl::opt<bool> ClInstrumentAtomics("asan-instrument-atomics",
- cl::desc("instrument atomic instructions (rmw, cmpxchg)"),
- cl::Hidden, cl::init(true));
-static cl::opt<bool> ClAlwaysSlowPath("asan-always-slow-path",
- cl::desc("use instrumentation with slow path for all accesses"),
- cl::Hidden, cl::init(false));
+ cl::desc("instrument read instructions"),
+ cl::Hidden, cl::init(true));
+static cl::opt<bool> ClInstrumentWrites(
+ "asan-instrument-writes", cl::desc("instrument write instructions"),
+ cl::Hidden, cl::init(true));
+static cl::opt<bool> ClInstrumentAtomics(
+ "asan-instrument-atomics",
+ cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden,
+ cl::init(true));
+static cl::opt<bool> ClAlwaysSlowPath(
+ "asan-always-slow-path",
+ cl::desc("use instrumentation with slow path for all accesses"), cl::Hidden,
+ cl::init(false));
// This flag limits the number of instructions to be instrumented
// in any given BB. Normally, this should be set to unlimited (INT_MAX),
// but due to http://llvm.org/bugs/show_bug.cgi?id=12652 we temporary
// set it to 10000.
-static cl::opt<int> ClMaxInsnsToInstrumentPerBB("asan-max-ins-per-bb",
- cl::init(10000),
- cl::desc("maximal number of instructions to instrument in any given BB"),
- cl::Hidden);
+static cl::opt<int> ClMaxInsnsToInstrumentPerBB(
+ "asan-max-ins-per-bb", cl::init(10000),
+ cl::desc("maximal number of instructions to instrument in any given BB"),
+ cl::Hidden);
// This flag may need to be replaced with -f[no]asan-stack.
-static cl::opt<bool> ClStack("asan-stack",
- cl::desc("Handle stack memory"), cl::Hidden, cl::init(true));
+static cl::opt<bool> ClStack("asan-stack", cl::desc("Handle stack memory"),
+ cl::Hidden, cl::init(true));
static cl::opt<bool> ClUseAfterReturn("asan-use-after-return",
- cl::desc("Check return-after-free"), cl::Hidden, cl::init(true));
+ cl::desc("Check return-after-free"),
+ cl::Hidden, cl::init(true));
// This flag may need to be replaced with -f[no]asan-globals.
static cl::opt<bool> ClGlobals("asan-globals",
- cl::desc("Handle global objects"), cl::Hidden, cl::init(true));
+ cl::desc("Handle global objects"), cl::Hidden,
+ cl::init(true));
static cl::opt<bool> ClInitializers("asan-initialization-order",
- cl::desc("Handle C++ initializer order"), cl::Hidden, cl::init(true));
-static cl::opt<bool> ClInvalidPointerPairs("asan-detect-invalid-pointer-pair",
- cl::desc("Instrument <, <=, >, >=, - with pointer operands"),
- cl::Hidden, cl::init(false));
-static cl::opt<unsigned> ClRealignStack("asan-realign-stack",
- cl::desc("Realign stack to the value of this flag (power of two)"),
- cl::Hidden, cl::init(32));
+ cl::desc("Handle C++ initializer order"),
+ cl::Hidden, cl::init(true));
+static cl::opt<bool> ClInvalidPointerPairs(
+ "asan-detect-invalid-pointer-pair",
+ cl::desc("Instrument <, <=, >, >=, - with pointer operands"), cl::Hidden,
+ cl::init(false));
+static cl::opt<unsigned> ClRealignStack(
+ "asan-realign-stack",
+ cl::desc("Realign stack to the value of this flag (power of two)"),
+ cl::Hidden, cl::init(32));
static cl::opt<int> ClInstrumentationWithCallsThreshold(
"asan-instrumentation-with-call-threshold",
- cl::desc("If the function being instrumented contains more than "
- "this number of memory accesses, use callbacks instead of "
- "inline checks (-1 means never use callbacks)."),
- cl::Hidden, cl::init(7000));
+ cl::desc(
+ "If the function being instrumented contains more than "
+ "this number of memory accesses, use callbacks instead of "
+ "inline checks (-1 means never use callbacks)."),
+ cl::Hidden, cl::init(7000));
static cl::opt<std::string> ClMemoryAccessCallbackPrefix(
- "asan-memory-access-callback-prefix",
- cl::desc("Prefix for memory access callbacks"), cl::Hidden,
- cl::init("__asan_"));
+ "asan-memory-access-callback-prefix",
+ cl::desc("Prefix for memory access callbacks"), cl::Hidden,
+ cl::init("__asan_"));
static cl::opt<bool> ClInstrumentAllocas("asan-instrument-allocas",
- cl::desc("instrument dynamic allocas"), cl::Hidden, cl::init(false));
+ cl::desc("instrument dynamic allocas"),
+ cl::Hidden, cl::init(false));
+static cl::opt<bool> ClSkipPromotableAllocas(
+ "asan-skip-promotable-allocas",
+ cl::desc("Do not instrument promotable allocas"), cl::Hidden,
+ cl::init(true));
// These flags allow to change the shadow mapping.
// The shadow mapping looks like
// Shadow = (Mem >> scale) + (1 << offset_log)
static cl::opt<int> ClMappingScale("asan-mapping-scale",
- cl::desc("scale of asan shadow mapping"), cl::Hidden, cl::init(0));
+ cl::desc("scale of asan shadow mapping"),
+ cl::Hidden, cl::init(0));
// Optimization flags. Not user visible, used mostly for testing
// and benchmarking the tool.
-static cl::opt<bool> ClOpt("asan-opt",
- cl::desc("Optimize instrumentation"), cl::Hidden, cl::init(true));
-static cl::opt<bool> ClOptSameTemp("asan-opt-same-temp",
- cl::desc("Instrument the same temp just once"), cl::Hidden,
- cl::init(true));
+static cl::opt<bool> ClOpt("asan-opt", cl::desc("Optimize instrumentation"),
+ cl::Hidden, cl::init(true));
+static cl::opt<bool> ClOptSameTemp(
+ "asan-opt-same-temp", cl::desc("Instrument the same temp just once"),
+ cl::Hidden, cl::init(true));
static cl::opt<bool> ClOptGlobals("asan-opt-globals",
- cl::desc("Don't instrument scalar globals"), cl::Hidden, cl::init(true));
-
-static cl::opt<bool> ClCheckLifetime("asan-check-lifetime",
- cl::desc("Use llvm.lifetime intrinsics to insert extra checks"),
- cl::Hidden, cl::init(false));
+ cl::desc("Don't instrument scalar globals"),
+ cl::Hidden, cl::init(true));
+static cl::opt<bool> ClOptStack(
+ "asan-opt-stack", cl::desc("Don't instrument scalar stack variables"),
+ cl::Hidden, cl::init(false));
+
+static cl::opt<bool> ClCheckLifetime(
+ "asan-check-lifetime",
+ cl::desc("Use llvm.lifetime intrinsics to insert extra checks"), cl::Hidden,
+ cl::init(false));
static cl::opt<bool> ClDynamicAllocaStack(
"asan-stack-dynamic-alloca",
cl::desc("Use dynamic alloca to represent stack variables"), cl::Hidden,
- cl::init(false));
+ cl::init(true));
+
+static cl::opt<uint32_t> ClForceExperiment(
+ "asan-force-experiment",
+ cl::desc("Force optimization experiment (for testing)"), cl::Hidden,
+ cl::init(0));
// Debug flags.
static cl::opt<int> ClDebug("asan-debug", cl::desc("debug"), cl::Hidden,
cl::init(0));
static cl::opt<int> ClDebugStack("asan-debug-stack", cl::desc("debug stack"),
cl::Hidden, cl::init(0));
-static cl::opt<std::string> ClDebugFunc("asan-debug-func",
- cl::Hidden, cl::desc("Debug func"));
+static cl::opt<std::string> ClDebugFunc("asan-debug-func", cl::Hidden,
+ cl::desc("Debug func"));
static cl::opt<int> ClDebugMin("asan-debug-min", cl::desc("Debug min inst"),
cl::Hidden, cl::init(-1));
static cl::opt<int> ClDebugMax("asan-debug-max", cl::desc("Debug man inst"),
@@ -204,12 +231,10 @@ static cl::opt<int> ClDebugMax("asan-debug-max", cl::desc("Debug man inst"),
STATISTIC(NumInstrumentedReads, "Number of instrumented reads");
STATISTIC(NumInstrumentedWrites, "Number of instrumented writes");
-STATISTIC(NumInstrumentedDynamicAllocas,
- "Number of instrumented dynamic allocas");
-STATISTIC(NumOptimizedAccessesToGlobalArray,
- "Number of optimized accesses to global arrays");
STATISTIC(NumOptimizedAccessesToGlobalVar,
"Number of optimized accesses to global vars");
+STATISTIC(NumOptimizedAccessesToStackVar,
+ "Number of optimized accesses to stack vars");
namespace {
/// Frontend-provided metadata for source location.
@@ -224,8 +249,8 @@ struct LocationMetadata {
void parse(MDNode *MDN) {
assert(MDN->getNumOperands() == 3);
- MDString *MDFilename = cast<MDString>(MDN->getOperand(0));
- Filename = MDFilename->getString();
+ MDString *DIFilename = cast<MDString>(MDN->getOperand(0));
+ Filename = DIFilename->getString();
LineNo =
mdconst::extract<ConstantInt>(MDN->getOperand(1))->getLimitedValue();
ColumnNo =
@@ -237,9 +262,7 @@ struct LocationMetadata {
class GlobalsMetadata {
public:
struct Entry {
- Entry()
- : SourceLoc(), Name(), IsDynInit(false),
- IsBlacklisted(false) {}
+ Entry() : SourceLoc(), Name(), IsDynInit(false), IsBlacklisted(false) {}
LocationMetadata SourceLoc;
StringRef Name;
bool IsDynInit;
@@ -248,19 +271,17 @@ class GlobalsMetadata {
GlobalsMetadata() : inited_(false) {}
- void init(Module& M) {
+ void init(Module &M) {
assert(!inited_);
inited_ = true;
NamedMDNode *Globals = M.getNamedMetadata("llvm.asan.globals");
- if (!Globals)
- return;
+ if (!Globals) return;
for (auto MDN : Globals->operands()) {
// Metadata node contains the global and the fields of "Entry".
assert(MDN->getNumOperands() == 5);
auto *GV = mdconst::extract_or_null<GlobalVariable>(MDN->getOperand(0));
// The optimizer may optimize away a global entirely.
- if (!GV)
- continue;
+ if (!GV) continue;
// We can already have an entry for GV if it was merged with another
// global.
Entry &E = Entries[GV];
@@ -285,7 +306,7 @@ class GlobalsMetadata {
private:
bool inited_;
- DenseMap<GlobalVariable*, Entry> Entries;
+ DenseMap<GlobalVariable *, Entry> Entries;
};
/// This struct defines the shadow mapping using the rule:
@@ -308,6 +329,7 @@ static ShadowMapping getShadowMapping(Triple &TargetTriple, int LongSize) {
TargetTriple.getArch() == llvm::Triple::mipsel;
bool IsMIPS64 = TargetTriple.getArch() == llvm::Triple::mips64 ||
TargetTriple.getArch() == llvm::Triple::mips64el;
+ bool IsAArch64 = TargetTriple.getArch() == llvm::Triple::aarch64;
bool IsWindows = TargetTriple.isOSWindows();
ShadowMapping Mapping;
@@ -334,6 +356,8 @@ static ShadowMapping getShadowMapping(Triple &TargetTriple, int LongSize) {
Mapping.Offset = kSmallX86_64ShadowOffset;
else if (IsMIPS64)
Mapping.Offset = kMIPS64_ShadowOffset64;
+ else if (IsAArch64)
+ Mapping.Offset = kAArch64_ShadowOffset64;
else
Mapping.Offset = kDefaultShadowOffset64;
}
@@ -367,17 +391,42 @@ struct AddressSanitizer : public FunctionPass {
}
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<DominatorTreeWrapperPass>();
+ AU.addRequired<TargetLibraryInfoWrapperPass>();
}
- void instrumentMop(Instruction *I, bool UseCalls);
+ uint64_t getAllocaSizeInBytes(AllocaInst *AI) const {
+ Type *Ty = AI->getAllocatedType();
+ uint64_t SizeInBytes =
+ AI->getModule()->getDataLayout().getTypeAllocSize(Ty);
+ return SizeInBytes;
+ }
+ /// Check if we want (and can) handle this alloca.
+ bool isInterestingAlloca(AllocaInst &AI);
+
+ // Check if we have dynamic alloca.
+ bool isDynamicAlloca(AllocaInst &AI) const {
+ return AI.isArrayAllocation() || !AI.isStaticAlloca();
+ }
+
+ /// If it is an interesting memory access, return the PointerOperand
+ /// and set IsWrite/Alignment. Otherwise return nullptr.
+ Value *isInterestingMemoryAccess(Instruction *I, bool *IsWrite,
+ uint64_t *TypeSize,
+ unsigned *Alignment);
+ void instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis, Instruction *I,
+ bool UseCalls, const DataLayout &DL);
void instrumentPointerComparisonOrSubtraction(Instruction *I);
void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore,
Value *Addr, uint32_t TypeSize, bool IsWrite,
- Value *SizeArgument, bool UseCalls);
+ Value *SizeArgument, bool UseCalls, uint32_t Exp);
+ void instrumentUnusualSizeOrAlignment(Instruction *I, Value *Addr,
+ uint32_t TypeSize, bool IsWrite,
+ Value *SizeArgument, bool UseCalls,
+ uint32_t Exp);
Value *createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
Value *ShadowValue, uint32_t TypeSize);
Instruction *generateCrashCode(Instruction *InsertBefore, Value *Addr,
bool IsWrite, size_t AccessSizeIndex,
- Value *SizeArgument);
+ Value *SizeArgument, uint32_t Exp);
void instrumentMemIntrinsic(MemIntrinsic *MI);
Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
bool runOnFunction(Function &F) override;
@@ -392,9 +441,10 @@ struct AddressSanitizer : public FunctionPass {
bool LooksLikeCodeInBug11395(Instruction *I);
bool GlobalIsLinkerInitialized(GlobalVariable *G);
+ bool isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis, Value *Addr,
+ uint64_t TypeSize) const;
LLVMContext *C;
- const DataLayout *DL;
Triple TargetTriple;
int LongSize;
Type *IntptrTy;
@@ -404,15 +454,16 @@ struct AddressSanitizer : public FunctionPass {
Function *AsanInitFunction;
Function *AsanHandleNoReturnFunc;
Function *AsanPtrCmpFunction, *AsanPtrSubFunction;
- // This array is indexed by AccessIsWrite and log2(AccessSize).
- Function *AsanErrorCallback[2][kNumberOfAccessSizes];
- Function *AsanMemoryAccessCallback[2][kNumberOfAccessSizes];
- // This array is indexed by AccessIsWrite.
- Function *AsanErrorCallbackSized[2],
- *AsanMemoryAccessCallbackSized[2];
+ // This array is indexed by AccessIsWrite, Experiment and log2(AccessSize).
+ Function *AsanErrorCallback[2][2][kNumberOfAccessSizes];
+ Function *AsanMemoryAccessCallback[2][2][kNumberOfAccessSizes];
+ // This array is indexed by AccessIsWrite and Experiment.
+ Function *AsanErrorCallbackSized[2][2];
+ Function *AsanMemoryAccessCallbackSized[2][2];
Function *AsanMemmove, *AsanMemcpy, *AsanMemset;
InlineAsm *EmptyAsm;
GlobalsMetadata GlobalsMD;
+ DenseMap<AllocaInst *, bool> ProcessedAllocas;
friend struct FunctionStackPoisoner;
};
@@ -422,9 +473,7 @@ class AddressSanitizerModule : public ModulePass {
AddressSanitizerModule() : ModulePass(ID) {}
bool runOnModule(Module &M) override;
static char ID; // Pass identification, replacement for typeid
- const char *getPassName() const override {
- return "AddressSanitizerModule";
- }
+ const char *getPassName() const override { return "AddressSanitizerModule"; }
private:
void initializeCallbacks(Module &M);
@@ -440,7 +489,6 @@ class AddressSanitizerModule : public ModulePass {
GlobalsMetadata GlobalsMD;
Type *IntptrTy;
LLVMContext *C;
- const DataLayout *DL;
Triple TargetTriple;
ShadowMapping Mapping;
Function *AsanPoisonGlobals;
@@ -467,13 +515,14 @@ struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> {
Type *IntptrPtrTy;
ShadowMapping Mapping;
- SmallVector<AllocaInst*, 16> AllocaVec;
- SmallVector<Instruction*, 8> RetVec;
+ SmallVector<AllocaInst *, 16> AllocaVec;
+ SmallVector<Instruction *, 8> RetVec;
unsigned StackAlignment;
Function *AsanStackMallocFunc[kMaxAsanStackMallocSizeClass + 1],
- *AsanStackFreeFunc[kMaxAsanStackMallocSizeClass + 1];
+ *AsanStackFreeFunc[kMaxAsanStackMallocSizeClass + 1];
Function *AsanPoisonStackMemoryFunc, *AsanUnpoisonStackMemoryFunc;
+ Function *AsanAllocaPoisonFunc, *AsanAllocasUnpoisonFunc;
// Stores a place and arguments of poisoning/unpoisoning call for alloca.
struct AllocaPoisonCall {
@@ -484,42 +533,33 @@ struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> {
};
SmallVector<AllocaPoisonCall, 8> AllocaPoisonCallVec;
- // Stores left and right redzone shadow addresses for dynamic alloca
- // and pointer to alloca instruction itself.
- // LeftRzAddr is a shadow address for alloca left redzone.
- // RightRzAddr is a shadow address for alloca right redzone.
- struct DynamicAllocaCall {
- AllocaInst *AI;
- Value *LeftRzAddr;
- Value *RightRzAddr;
- bool Poison;
- explicit DynamicAllocaCall(AllocaInst *AI,
- Value *LeftRzAddr = nullptr,
- Value *RightRzAddr = nullptr)
- : AI(AI), LeftRzAddr(LeftRzAddr), RightRzAddr(RightRzAddr), Poison(true)
- {}
- };
- SmallVector<DynamicAllocaCall, 1> DynamicAllocaVec;
+ SmallVector<AllocaInst *, 1> DynamicAllocaVec;
+ SmallVector<IntrinsicInst *, 1> StackRestoreVec;
+ AllocaInst *DynamicAllocaLayout = nullptr;
// Maps Value to an AllocaInst from which the Value is originated.
- typedef DenseMap<Value*, AllocaInst*> AllocaForValueMapTy;
+ typedef DenseMap<Value *, AllocaInst *> AllocaForValueMapTy;
AllocaForValueMapTy AllocaForValue;
bool HasNonEmptyInlineAsm;
std::unique_ptr<CallInst> EmptyInlineAsm;
FunctionStackPoisoner(Function &F, AddressSanitizer &ASan)
- : F(F), ASan(ASan), DIB(*F.getParent(), /*AllowUnresolved*/ false),
- C(ASan.C), IntptrTy(ASan.IntptrTy),
- IntptrPtrTy(PointerType::get(IntptrTy, 0)), Mapping(ASan.Mapping),
- StackAlignment(1 << Mapping.Scale), HasNonEmptyInlineAsm(false),
+ : F(F),
+ ASan(ASan),
+ DIB(*F.getParent(), /*AllowUnresolved*/ false),
+ C(ASan.C),
+ IntptrTy(ASan.IntptrTy),
+ IntptrPtrTy(PointerType::get(IntptrTy, 0)),
+ Mapping(ASan.Mapping),
+ StackAlignment(1 << Mapping.Scale),
+ HasNonEmptyInlineAsm(false),
EmptyInlineAsm(CallInst::Create(ASan.EmptyAsm)) {}
bool runOnFunction() {
if (!ClStack) return false;
// Collect alloca, ret, lifetime instructions etc.
- for (BasicBlock *BB : depth_first(&F.getEntryBlock()))
- visit(*BB);
+ for (BasicBlock *BB : depth_first(&F.getEntryBlock())) visit(*BB);
if (AllocaVec.empty() && DynamicAllocaVec.empty()) return false;
@@ -538,44 +578,30 @@ struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> {
// Then unpoison everything back before the function returns.
void poisonStack();
+ void createDynamicAllocasInitStorage();
+
// ----------------------- Visitors.
/// \brief Collect all Ret instructions.
- void visitReturnInst(ReturnInst &RI) {
- RetVec.push_back(&RI);
+ void visitReturnInst(ReturnInst &RI) { RetVec.push_back(&RI); }
+
+ void unpoisonDynamicAllocasBeforeInst(Instruction *InstBefore,
+ Value *SavedStack) {
+ IRBuilder<> IRB(InstBefore);
+ IRB.CreateCall(AsanAllocasUnpoisonFunc,
+ {IRB.CreateLoad(DynamicAllocaLayout),
+ IRB.CreatePtrToInt(SavedStack, IntptrTy)});
}
// Unpoison dynamic allocas redzones.
- void unpoisonDynamicAlloca(DynamicAllocaCall &AllocaCall) {
- if (!AllocaCall.Poison)
- return;
- for (auto Ret : RetVec) {
- IRBuilder<> IRBRet(Ret);
- PointerType *Int32PtrTy = PointerType::getUnqual(IRBRet.getInt32Ty());
- Value *Zero = Constant::getNullValue(IRBRet.getInt32Ty());
- Value *PartialRzAddr = IRBRet.CreateSub(AllocaCall.RightRzAddr,
- ConstantInt::get(IntptrTy, 4));
- IRBRet.CreateStore(Zero, IRBRet.CreateIntToPtr(AllocaCall.LeftRzAddr,
- Int32PtrTy));
- IRBRet.CreateStore(Zero, IRBRet.CreateIntToPtr(PartialRzAddr,
- Int32PtrTy));
- IRBRet.CreateStore(Zero, IRBRet.CreateIntToPtr(AllocaCall.RightRzAddr,
- Int32PtrTy));
- }
- }
+ void unpoisonDynamicAllocas() {
+ for (auto &Ret : RetVec)
+ unpoisonDynamicAllocasBeforeInst(Ret, DynamicAllocaLayout);
- // Right shift for BigEndian and left shift for LittleEndian.
- Value *shiftAllocaMagic(Value *Val, IRBuilder<> &IRB, Value *Shift) {
- return ASan.DL->isLittleEndian() ? IRB.CreateShl(Val, Shift)
- : IRB.CreateLShr(Val, Shift);
+ for (auto &StackRestoreInst : StackRestoreVec)
+ unpoisonDynamicAllocasBeforeInst(StackRestoreInst,
+ StackRestoreInst->getOperand(0));
}
- // Compute PartialRzMagic for dynamic alloca call. Since we don't know the
- // size of requested memory until runtime, we should compute it dynamically.
- // If PartialSize is 0, PartialRzMagic would contain kAsanAllocaRightMagic,
- // otherwise it would contain the value that we will use to poison the
- // partial redzone for alloca call.
- Value *computePartialRzMagic(Value *PartialSize, IRBuilder<> &IRB);
-
// Deploy and poison redzones around dynamic alloca call. To do this, we
// should replace this call with another one with changed parameters and
// replace all its uses with new address, so
@@ -586,20 +612,15 @@ struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> {
// addr = tmp + 32 (first 32 bytes are for the left redzone).
// Additional_size is added to make new memory allocation contain not only
// requested memory, but also left, partial and right redzones.
- // After that, we should poison redzones:
- // (1) Left redzone with kAsanAllocaLeftMagic.
- // (2) Partial redzone with the value, computed in runtime by
- // computePartialRzMagic function.
- // (3) Right redzone with kAsanAllocaRightMagic.
- void handleDynamicAllocaCall(DynamicAllocaCall &AllocaCall);
+ void handleDynamicAllocaCall(AllocaInst *AI);
/// \brief Collect Alloca instructions we want (and can) handle.
void visitAllocaInst(AllocaInst &AI) {
- if (!isInterestingAlloca(AI)) return;
+ if (!ASan.isInterestingAlloca(AI)) return;
StackAlignment = std::max(StackAlignment, AI.getAlignment());
- if (isDynamicAlloca(AI))
- DynamicAllocaVec.push_back(DynamicAllocaCall(&AI));
+ if (ASan.isDynamicAlloca(AI))
+ DynamicAllocaVec.push_back(&AI);
else
AllocaVec.push_back(&AI);
}
@@ -607,10 +628,10 @@ struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> {
/// \brief Collect lifetime intrinsic calls to check for use-after-scope
/// errors.
void visitIntrinsicInst(IntrinsicInst &II) {
- if (!ClCheckLifetime) return;
Intrinsic::ID ID = II.getIntrinsicID();
- if (ID != Intrinsic::lifetime_start &&
- ID != Intrinsic::lifetime_end)
+ if (ID == Intrinsic::stackrestore) StackRestoreVec.push_back(&II);
+ if (!ClCheckLifetime) return;
+ if (ID != Intrinsic::lifetime_start && ID != Intrinsic::lifetime_end)
return;
// Found lifetime intrinsic, add ASan instrumentation if necessary.
ConstantInt *Size = dyn_cast<ConstantInt>(II.getArgOperand(0));
@@ -640,28 +661,11 @@ struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> {
bool doesDominateAllExits(const Instruction *I) const {
for (auto Ret : RetVec) {
- if (!ASan.getDominatorTree().dominates(I, Ret))
- return false;
+ if (!ASan.getDominatorTree().dominates(I, Ret)) return false;
}
return true;
}
- bool isDynamicAlloca(AllocaInst &AI) const {
- return AI.isArrayAllocation() || !AI.isStaticAlloca();
- }
-
- // Check if we want (and can) handle this alloca.
- bool isInterestingAlloca(AllocaInst &AI) const {
- return (AI.getAllocatedType()->isSized() &&
- // alloca() may be called with 0 size, ignore it.
- getAllocaSizeInBytes(&AI) > 0);
- }
-
- uint64_t getAllocaSizeInBytes(AllocaInst *AI) const {
- Type *Ty = AI->getAllocatedType();
- uint64_t SizeInBytes = ASan.DL->getTypeAllocSize(Ty);
- return SizeInBytes;
- }
/// Finds alloca where the value comes from.
AllocaInst *findAllocaForValue(Value *V);
void poisonRedZones(ArrayRef<uint8_t> ShadowBytes, IRBuilder<> &IRB,
@@ -679,21 +683,25 @@ struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> {
} // namespace
char AddressSanitizer::ID = 0;
-INITIALIZE_PASS_BEGIN(AddressSanitizer, "asan",
- "AddressSanitizer: detects use-after-free and out-of-bounds bugs.",
- false, false)
+INITIALIZE_PASS_BEGIN(
+ AddressSanitizer, "asan",
+ "AddressSanitizer: detects use-after-free and out-of-bounds bugs.", false,
+ false)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
-INITIALIZE_PASS_END(AddressSanitizer, "asan",
- "AddressSanitizer: detects use-after-free and out-of-bounds bugs.",
- false, false)
+INITIALIZE_PASS_END(
+ AddressSanitizer, "asan",
+ "AddressSanitizer: detects use-after-free and out-of-bounds bugs.", false,
+ false)
FunctionPass *llvm::createAddressSanitizerFunctionPass() {
return new AddressSanitizer();
}
char AddressSanitizerModule::ID = 0;
-INITIALIZE_PASS(AddressSanitizerModule, "asan-module",
+INITIALIZE_PASS(
+ AddressSanitizerModule, "asan-module",
"AddressSanitizer: detects use-after-free and out-of-bounds bugs."
- "ModulePass", false, false)
+ "ModulePass",
+ false, false)
ModulePass *llvm::createAddressSanitizerModulePass() {
return new AddressSanitizerModule();
}
@@ -705,16 +713,15 @@ static size_t TypeSizeToSizeIndex(uint32_t TypeSize) {
}
// \brief Create a constant for Str so that we can pass it to the run-time lib.
-static GlobalVariable *createPrivateGlobalForString(
- Module &M, StringRef Str, bool AllowMerging) {
+static GlobalVariable *createPrivateGlobalForString(Module &M, StringRef Str,
+ bool AllowMerging) {
Constant *StrConst = ConstantDataArray::getString(M.getContext(), Str);
// We use private linkage for module-local strings. If they can be merged
// with another one, we set the unnamed_addr attribute.
GlobalVariable *GV =
new GlobalVariable(M, StrConst->getType(), true,
GlobalValue::PrivateLinkage, StrConst, kAsanGenPrefix);
- if (AllowMerging)
- GV->setUnnamedAddr(true);
+ if (AllowMerging) GV->setUnnamedAddr(true);
GV->setAlignment(1); // Strings may not be merged w/o setting align 1.
return GV;
}
@@ -743,8 +750,7 @@ static bool GlobalWasGeneratedByAsan(GlobalVariable *G) {
Value *AddressSanitizer::memToShadow(Value *Shadow, IRBuilder<> &IRB) {
// Shadow >> scale
Shadow = IRB.CreateLShr(Shadow, Mapping.Scale);
- if (Mapping.Offset == 0)
- return Shadow;
+ if (Mapping.Offset == 0) return Shadow;
// (Shadow >> scale) | offset
if (Mapping.OrShadowOffset)
return IRB.CreateOr(Shadow, ConstantInt::get(IntptrTy, Mapping.Offset));
@@ -756,53 +762,86 @@ Value *AddressSanitizer::memToShadow(Value *Shadow, IRBuilder<> &IRB) {
void AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
IRBuilder<> IRB(MI);
if (isa<MemTransferInst>(MI)) {
- IRB.CreateCall3(
+ IRB.CreateCall(
isa<MemMoveInst>(MI) ? AsanMemmove : AsanMemcpy,
- IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
- IRB.CreatePointerCast(MI->getOperand(1), IRB.getInt8PtrTy()),
- IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false));
+ {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
+ IRB.CreatePointerCast(MI->getOperand(1), IRB.getInt8PtrTy()),
+ IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
} else if (isa<MemSetInst>(MI)) {
- IRB.CreateCall3(
+ IRB.CreateCall(
AsanMemset,
- IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
- IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false),
- IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false));
+ {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
+ IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false),
+ IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
}
MI->eraseFromParent();
}
-// If I is an interesting memory access, return the PointerOperand
-// and set IsWrite/Alignment. Otherwise return nullptr.
-static Value *isInterestingMemoryAccess(Instruction *I, bool *IsWrite,
- unsigned *Alignment) {
+/// Check if we want (and can) handle this alloca.
+bool AddressSanitizer::isInterestingAlloca(AllocaInst &AI) {
+ auto PreviouslySeenAllocaInfo = ProcessedAllocas.find(&AI);
+
+ if (PreviouslySeenAllocaInfo != ProcessedAllocas.end())
+ return PreviouslySeenAllocaInfo->getSecond();
+
+ bool IsInteresting =
+ (AI.getAllocatedType()->isSized() &&
+ // alloca() may be called with 0 size, ignore it.
+ getAllocaSizeInBytes(&AI) > 0 &&
+ // We are only interested in allocas not promotable to registers.
+ // Promotable allocas are common under -O0.
+ (!ClSkipPromotableAllocas || !isAllocaPromotable(&AI) ||
+ isDynamicAlloca(AI)));
+
+ ProcessedAllocas[&AI] = IsInteresting;
+ return IsInteresting;
+}
+
+/// If I is an interesting memory access, return the PointerOperand
+/// and set IsWrite/Alignment. Otherwise return nullptr.
+Value *AddressSanitizer::isInterestingMemoryAccess(Instruction *I,
+ bool *IsWrite,
+ uint64_t *TypeSize,
+ unsigned *Alignment) {
// Skip memory accesses inserted by another instrumentation.
- if (I->getMetadata("nosanitize"))
- return nullptr;
+ if (I->getMetadata("nosanitize")) return nullptr;
+
+ Value *PtrOperand = nullptr;
+ const DataLayout &DL = I->getModule()->getDataLayout();
if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
if (!ClInstrumentReads) return nullptr;
*IsWrite = false;
+ *TypeSize = DL.getTypeStoreSizeInBits(LI->getType());
*Alignment = LI->getAlignment();
- return LI->getPointerOperand();
- }
- if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
+ PtrOperand = LI->getPointerOperand();
+ } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
if (!ClInstrumentWrites) return nullptr;
*IsWrite = true;
+ *TypeSize = DL.getTypeStoreSizeInBits(SI->getValueOperand()->getType());
*Alignment = SI->getAlignment();
- return SI->getPointerOperand();
- }
- if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
+ PtrOperand = SI->getPointerOperand();
+ } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
if (!ClInstrumentAtomics) return nullptr;
*IsWrite = true;
+ *TypeSize = DL.getTypeStoreSizeInBits(RMW->getValOperand()->getType());
*Alignment = 0;
- return RMW->getPointerOperand();
- }
- if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
+ PtrOperand = RMW->getPointerOperand();
+ } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
if (!ClInstrumentAtomics) return nullptr;
*IsWrite = true;
+ *TypeSize = DL.getTypeStoreSizeInBits(XCHG->getCompareOperand()->getType());
*Alignment = 0;
- return XCHG->getPointerOperand();
+ PtrOperand = XCHG->getPointerOperand();
}
- return nullptr;
+
+ // Treat memory accesses to promotable allocas as non-interesting since they
+ // will not cause memory violations. This greatly speeds up the instrumented
+ // executable at -O0.
+ if (ClSkipPromotableAllocas)
+ if (auto AI = dyn_cast_or_null<AllocaInst>(PtrOperand))
+ return isInterestingAlloca(*AI) ? AI : nullptr;
+
+ return PtrOperand;
}
static bool isPointerOperand(Value *V) {
@@ -814,17 +853,15 @@ static bool isPointerOperand(Value *V) {
// the frontend.
static bool isInterestingPointerComparisonOrSubtraction(Instruction *I) {
if (ICmpInst *Cmp = dyn_cast<ICmpInst>(I)) {
- if (!Cmp->isRelational())
- return false;
+ if (!Cmp->isRelational()) return false;
} else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) {
- if (BO->getOpcode() != Instruction::Sub)
- return false;
+ if (BO->getOpcode() != Instruction::Sub) return false;
} else {
return false;
}
if (!isPointerOperand(I->getOperand(0)) ||
!isPointerOperand(I->getOperand(1)))
- return false;
+ return false;
return true;
}
@@ -835,8 +872,8 @@ bool AddressSanitizer::GlobalIsLinkerInitialized(GlobalVariable *G) {
return G->hasInitializer() && !GlobalsMD.get(G).IsDynInit;
}
-void
-AddressSanitizer::instrumentPointerComparisonOrSubtraction(Instruction *I) {
+void AddressSanitizer::instrumentPointerComparisonOrSubtraction(
+ Instruction *I) {
IRBuilder<> IRB(I);
Function *F = isa<ICmpInst>(I) ? AsanPtrCmpFunction : AsanPtrSubFunction;
Value *Param[2] = {I->getOperand(0), I->getOperand(1)};
@@ -844,41 +881,50 @@ AddressSanitizer::instrumentPointerComparisonOrSubtraction(Instruction *I) {
if (Param[i]->getType()->isPointerTy())
Param[i] = IRB.CreatePointerCast(Param[i], IntptrTy);
}
- IRB.CreateCall2(F, Param[0], Param[1]);
+ IRB.CreateCall(F, Param);
}
-void AddressSanitizer::instrumentMop(Instruction *I, bool UseCalls) {
+void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
+ Instruction *I, bool UseCalls,
+ const DataLayout &DL) {
bool IsWrite = false;
unsigned Alignment = 0;
- Value *Addr = isInterestingMemoryAccess(I, &IsWrite, &Alignment);
+ uint64_t TypeSize = 0;
+ Value *Addr = isInterestingMemoryAccess(I, &IsWrite, &TypeSize, &Alignment);
assert(Addr);
+
+ // Optimization experiments.
+ // The experiments can be used to evaluate potential optimizations that remove
+ // instrumentation (assess false negatives). Instead of completely removing
+ // some instrumentation, you set Exp to a non-zero value (mask of optimization
+ // experiments that want to remove instrumentation of this instruction).
+ // If Exp is non-zero, this pass will emit special calls into runtime
+ // (e.g. __asan_report_exp_load1 instead of __asan_report_load1). These calls
+ // make runtime terminate the program in a special way (with a different
+ // exit status). Then you run the new compiler on a buggy corpus, collect
+ // the special terminations (ideally, you don't see them at all -- no false
+ // negatives) and make the decision on the optimization.
+ uint32_t Exp = ClForceExperiment;
+
if (ClOpt && ClOptGlobals) {
- if (GlobalVariable *G = dyn_cast<GlobalVariable>(Addr)) {
- // If initialization order checking is disabled, a simple access to a
- // dynamically initialized global is always valid.
- if (!ClInitializers || GlobalIsLinkerInitialized(G)) {
- NumOptimizedAccessesToGlobalVar++;
- return;
- }
- }
- ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr);
- if (CE && CE->isGEPWithNoNotionalOverIndexing()) {
- if (GlobalVariable *G = dyn_cast<GlobalVariable>(CE->getOperand(0))) {
- if (CE->getOperand(1)->isNullValue() && GlobalIsLinkerInitialized(G)) {
- NumOptimizedAccessesToGlobalArray++;
- return;
- }
- }
+ // If initialization order checking is disabled, a simple access to a
+ // dynamically initialized global is always valid.
+ GlobalVariable *G = dyn_cast<GlobalVariable>(GetUnderlyingObject(Addr, DL));
+ if (G != NULL && (!ClInitializers || GlobalIsLinkerInitialized(G)) &&
+ isSafeAccess(ObjSizeVis, Addr, TypeSize)) {
+ NumOptimizedAccessesToGlobalVar++;
+ return;
}
}
- Type *OrigPtrTy = Addr->getType();
- Type *OrigTy = cast<PointerType>(OrigPtrTy)->getElementType();
-
- assert(OrigTy->isSized());
- uint32_t TypeSize = DL->getTypeStoreSizeInBits(OrigTy);
-
- assert((TypeSize % 8) == 0);
+ if (ClOpt && ClOptStack) {
+ // A direct inbounds access to a stack variable is always valid.
+ if (isa<AllocaInst>(GetUnderlyingObject(Addr, DL)) &&
+ isSafeAccess(ObjSizeVis, Addr, TypeSize)) {
+ NumOptimizedAccessesToStackVar++;
+ return;
+ }
+ }
if (IsWrite)
NumInstrumentedWrites++;
@@ -891,65 +937,57 @@ void AddressSanitizer::instrumentMop(Instruction *I, bool UseCalls) {
if ((TypeSize == 8 || TypeSize == 16 || TypeSize == 32 || TypeSize == 64 ||
TypeSize == 128) &&
(Alignment >= Granularity || Alignment == 0 || Alignment >= TypeSize / 8))
- return instrumentAddress(I, I, Addr, TypeSize, IsWrite, nullptr, UseCalls);
- // Instrument unusual size or unusual alignment.
- // We can not do it with a single check, so we do 1-byte check for the first
- // and the last bytes. We call __asan_report_*_n(addr, real_size) to be able
- // to report the actual access size.
- IRBuilder<> IRB(I);
- Value *Size = ConstantInt::get(IntptrTy, TypeSize / 8);
- Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
- if (UseCalls) {
- IRB.CreateCall2(AsanMemoryAccessCallbackSized[IsWrite], AddrLong, Size);
- } else {
- Value *LastByte = IRB.CreateIntToPtr(
- IRB.CreateAdd(AddrLong, ConstantInt::get(IntptrTy, TypeSize / 8 - 1)),
- OrigPtrTy);
- instrumentAddress(I, I, Addr, 8, IsWrite, Size, false);
- instrumentAddress(I, I, LastByte, 8, IsWrite, Size, false);
- }
+ return instrumentAddress(I, I, Addr, TypeSize, IsWrite, nullptr, UseCalls,
+ Exp);
+ instrumentUnusualSizeOrAlignment(I, Addr, TypeSize, IsWrite, nullptr,
+ UseCalls, Exp);
}
-// Validate the result of Module::getOrInsertFunction called for an interface
-// function of AddressSanitizer. If the instrumented module defines a function
-// with the same name, their prototypes must match, otherwise
-// getOrInsertFunction returns a bitcast.
-static Function *checkInterfaceFunction(Constant *FuncOrBitcast) {
- if (isa<Function>(FuncOrBitcast)) return cast<Function>(FuncOrBitcast);
- FuncOrBitcast->dump();
- report_fatal_error("trying to redefine an AddressSanitizer "
- "interface function");
-}
-
-Instruction *AddressSanitizer::generateCrashCode(
- Instruction *InsertBefore, Value *Addr,
- bool IsWrite, size_t AccessSizeIndex, Value *SizeArgument) {
+Instruction *AddressSanitizer::generateCrashCode(Instruction *InsertBefore,
+ Value *Addr, bool IsWrite,
+ size_t AccessSizeIndex,
+ Value *SizeArgument,
+ uint32_t Exp) {
IRBuilder<> IRB(InsertBefore);
- CallInst *Call = SizeArgument
- ? IRB.CreateCall2(AsanErrorCallbackSized[IsWrite], Addr, SizeArgument)
- : IRB.CreateCall(AsanErrorCallback[IsWrite][AccessSizeIndex], Addr);
+ Value *ExpVal = Exp == 0 ? nullptr : ConstantInt::get(IRB.getInt32Ty(), Exp);
+ CallInst *Call = nullptr;
+ if (SizeArgument) {
+ if (Exp == 0)
+ Call = IRB.CreateCall(AsanErrorCallbackSized[IsWrite][0],
+ {Addr, SizeArgument});
+ else
+ Call = IRB.CreateCall(AsanErrorCallbackSized[IsWrite][1],
+ {Addr, SizeArgument, ExpVal});
+ } else {
+ if (Exp == 0)
+ Call =
+ IRB.CreateCall(AsanErrorCallback[IsWrite][0][AccessSizeIndex], Addr);
+ else
+ Call = IRB.CreateCall(AsanErrorCallback[IsWrite][1][AccessSizeIndex],
+ {Addr, ExpVal});
+ }
// We don't do Call->setDoesNotReturn() because the BB already has
// UnreachableInst at the end.
// This EmptyAsm is required to avoid callback merge.
- IRB.CreateCall(EmptyAsm);
+ IRB.CreateCall(EmptyAsm, {});
return Call;
}
Value *AddressSanitizer::createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
- Value *ShadowValue,
- uint32_t TypeSize) {
+ Value *ShadowValue,
+ uint32_t TypeSize) {
size_t Granularity = 1 << Mapping.Scale;
// Addr & (Granularity - 1)
- Value *LastAccessedByte = IRB.CreateAnd(
- AddrLong, ConstantInt::get(IntptrTy, Granularity - 1));
+ Value *LastAccessedByte =
+ IRB.CreateAnd(AddrLong, ConstantInt::get(IntptrTy, Granularity - 1));
// (Addr & (Granularity - 1)) + size - 1
if (TypeSize / 8 > 1)
LastAccessedByte = IRB.CreateAdd(
LastAccessedByte, ConstantInt::get(IntptrTy, TypeSize / 8 - 1));
// (uint8_t) ((Addr & (Granularity-1)) + size - 1)
- LastAccessedByte = IRB.CreateIntCast(
- LastAccessedByte, ShadowValue->getType(), false);
+ LastAccessedByte =
+ IRB.CreateIntCast(LastAccessedByte, ShadowValue->getType(), false);
// ((uint8_t) ((Addr & (Granularity-1)) + size - 1)) >= ShadowValue
return IRB.CreateICmpSGE(LastAccessedByte, ShadowValue);
}
@@ -957,24 +995,29 @@ Value *AddressSanitizer::createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
Instruction *InsertBefore, Value *Addr,
uint32_t TypeSize, bool IsWrite,
- Value *SizeArgument, bool UseCalls) {
+ Value *SizeArgument, bool UseCalls,
+ uint32_t Exp) {
IRBuilder<> IRB(InsertBefore);
Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
size_t AccessSizeIndex = TypeSizeToSizeIndex(TypeSize);
if (UseCalls) {
- IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][AccessSizeIndex],
- AddrLong);
+ if (Exp == 0)
+ IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][0][AccessSizeIndex],
+ AddrLong);
+ else
+ IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][1][AccessSizeIndex],
+ {AddrLong, ConstantInt::get(IRB.getInt32Ty(), Exp)});
return;
}
- Type *ShadowTy = IntegerType::get(
- *C, std::max(8U, TypeSize >> Mapping.Scale));
+ Type *ShadowTy =
+ IntegerType::get(*C, std::max(8U, TypeSize >> Mapping.Scale));
Type *ShadowPtrTy = PointerType::get(ShadowTy, 0);
Value *ShadowPtr = memToShadow(AddrLong, IRB);
Value *CmpVal = Constant::getNullValue(ShadowTy);
- Value *ShadowValue = IRB.CreateLoad(
- IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy));
+ Value *ShadowValue =
+ IRB.CreateLoad(IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy));
Value *Cmp = IRB.CreateICmpNE(ShadowValue, CmpVal);
size_t Granularity = 1 << Mapping.Scale;
@@ -983,10 +1026,9 @@ void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
if (ClAlwaysSlowPath || (TypeSize < 8 * Granularity)) {
// We use branch weights for the slow path check, to indicate that the slow
// path is rarely taken. This seems to be the case for SPEC benchmarks.
- TerminatorInst *CheckTerm =
- SplitBlockAndInsertIfThen(Cmp, InsertBefore, false,
- MDBuilder(*C).createBranchWeights(1, 100000));
- assert(dyn_cast<BranchInst>(CheckTerm)->isUnconditional());
+ TerminatorInst *CheckTerm = SplitBlockAndInsertIfThen(
+ Cmp, InsertBefore, false, MDBuilder(*C).createBranchWeights(1, 100000));
+ assert(cast<BranchInst>(CheckTerm)->isUnconditional());
BasicBlock *NextBB = CheckTerm->getSuccessor(0);
IRB.SetInsertPoint(CheckTerm);
Value *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeSize);
@@ -999,11 +1041,37 @@ void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
CrashTerm = SplitBlockAndInsertIfThen(Cmp, InsertBefore, true);
}
- Instruction *Crash = generateCrashCode(
- CrashTerm, AddrLong, IsWrite, AccessSizeIndex, SizeArgument);
+ Instruction *Crash = generateCrashCode(CrashTerm, AddrLong, IsWrite,
+ AccessSizeIndex, SizeArgument, Exp);
Crash->setDebugLoc(OrigIns->getDebugLoc());
}
+// Instrument unusual size or unusual alignment.
+// We can not do it with a single check, so we do 1-byte check for the first
+// and the last bytes. We call __asan_report_*_n(addr, real_size) to be able
+// to report the actual access size.
+void AddressSanitizer::instrumentUnusualSizeOrAlignment(
+ Instruction *I, Value *Addr, uint32_t TypeSize, bool IsWrite,
+ Value *SizeArgument, bool UseCalls, uint32_t Exp) {
+ IRBuilder<> IRB(I);
+ Value *Size = ConstantInt::get(IntptrTy, TypeSize / 8);
+ Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
+ if (UseCalls) {
+ if (Exp == 0)
+ IRB.CreateCall(AsanMemoryAccessCallbackSized[IsWrite][0],
+ {AddrLong, Size});
+ else
+ IRB.CreateCall(AsanMemoryAccessCallbackSized[IsWrite][1],
+ {AddrLong, Size, ConstantInt::get(IRB.getInt32Ty(), Exp)});
+ } else {
+ Value *LastByte = IRB.CreateIntToPtr(
+ IRB.CreateAdd(AddrLong, ConstantInt::get(IntptrTy, TypeSize / 8 - 1)),
+ Addr->getType());
+ instrumentAddress(I, I, Addr, 8, IsWrite, Size, false, Exp);
+ instrumentAddress(I, I, LastByte, 8, IsWrite, Size, false, Exp);
+ }
+}
+
void AddressSanitizerModule::poisonOneInitializer(Function &GlobalInit,
GlobalValue *ModuleName) {
// Set up the arguments to our poison/unpoison functions.
@@ -1025,12 +1093,11 @@ void AddressSanitizerModule::createInitializerPoisonCalls(
ConstantArray *CA = cast<ConstantArray>(GV->getInitializer());
for (Use &OP : CA->operands()) {
- if (isa<ConstantAggregateZero>(OP))
- continue;
+ if (isa<ConstantAggregateZero>(OP)) continue;
ConstantStruct *CS = cast<ConstantStruct>(OP);
// Must have a function or null ptr.
- if (Function* F = dyn_cast<Function>(CS->getOperand(1))) {
+ if (Function *F = dyn_cast<Function>(CS->getOperand(1))) {
if (F->getName() == kAsanModuleCtorName) continue;
ConstantInt *Priority = dyn_cast<ConstantInt>(CS->getOperand(0));
// Don't instrument CTORs that will run before asan.module_ctor.
@@ -1055,30 +1122,38 @@ bool AddressSanitizerModule::ShouldInstrumentGlobal(GlobalVariable *G) {
G->getLinkage() != GlobalVariable::PrivateLinkage &&
G->getLinkage() != GlobalVariable::InternalLinkage)
return false;
- if (G->hasComdat())
- return false;
+ if (G->hasComdat()) return false;
// Two problems with thread-locals:
// - The address of the main thread's copy can't be computed at link-time.
// - Need to poison all copies, not just the main thread's one.
- if (G->isThreadLocal())
- return false;
+ if (G->isThreadLocal()) return false;
// For now, just ignore this Global if the alignment is large.
if (G->getAlignment() > MinRedzoneSizeForGlobal()) return false;
if (G->hasSection()) {
StringRef Section(G->getSection());
+ // Globals from llvm.metadata aren't emitted, do not instrument them.
+ if (Section == "llvm.metadata") return false;
+
+ // Callbacks put into the CRT initializer/terminator sections
+ // should not be instrumented.
+ // See https://code.google.com/p/address-sanitizer/issues/detail?id=305
+ // and http://msdn.microsoft.com/en-US/en-en/library/bb918180(v=vs.120).aspx
+ if (Section.startswith(".CRT")) {
+ DEBUG(dbgs() << "Ignoring a global initializer callback: " << *G << "\n");
+ return false;
+ }
+
if (TargetTriple.isOSBinFormatMachO()) {
StringRef ParsedSegment, ParsedSection;
unsigned TAA = 0, StubSize = 0;
bool TAAParsed;
- std::string ErrorCode =
- MCSectionMachO::ParseSectionSpecifier(Section, ParsedSegment,
- ParsedSection, TAA, TAAParsed,
- StubSize);
+ std::string ErrorCode = MCSectionMachO::ParseSectionSpecifier(
+ Section, ParsedSegment, ParsedSection, TAA, TAAParsed, StubSize);
if (!ErrorCode.empty()) {
- report_fatal_error("Invalid section specifier '" + ParsedSection +
- "': " + ErrorCode + ".");
+ assert(false && "Invalid section specifier.");
+ return false;
}
// Ignore the globals from the __OBJC section. The ObjC runtime assumes
@@ -1108,18 +1183,6 @@ bool AddressSanitizerModule::ShouldInstrumentGlobal(GlobalVariable *G) {
return false;
}
}
-
- // Callbacks put into the CRT initializer/terminator sections
- // should not be instrumented.
- // See https://code.google.com/p/address-sanitizer/issues/detail?id=305
- // and http://msdn.microsoft.com/en-US/en-en/library/bb918180(v=vs.120).aspx
- if (Section.startswith(".CRT")) {
- DEBUG(dbgs() << "Ignoring a global initializer callback: " << *G << "\n");
- return false;
- }
-
- // Globals from llvm.metadata aren't emitted, do not instrument them.
- if (Section == "llvm.metadata") return false;
}
return true;
@@ -1128,20 +1191,19 @@ bool AddressSanitizerModule::ShouldInstrumentGlobal(GlobalVariable *G) {
void AddressSanitizerModule::initializeCallbacks(Module &M) {
IRBuilder<> IRB(*C);
// Declare our poisoning and unpoisoning functions.
- AsanPoisonGlobals = checkInterfaceFunction(M.getOrInsertFunction(
+ AsanPoisonGlobals = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
kAsanPoisonGlobalsName, IRB.getVoidTy(), IntptrTy, nullptr));
AsanPoisonGlobals->setLinkage(Function::ExternalLinkage);
- AsanUnpoisonGlobals = checkInterfaceFunction(M.getOrInsertFunction(
+ AsanUnpoisonGlobals = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
kAsanUnpoisonGlobalsName, IRB.getVoidTy(), nullptr));
AsanUnpoisonGlobals->setLinkage(Function::ExternalLinkage);
// Declare functions that register/unregister globals.
- AsanRegisterGlobals = checkInterfaceFunction(M.getOrInsertFunction(
- kAsanRegisterGlobalsName, IRB.getVoidTy(),
- IntptrTy, IntptrTy, nullptr));
+ AsanRegisterGlobals = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
+ kAsanRegisterGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr));
AsanRegisterGlobals->setLinkage(Function::ExternalLinkage);
- AsanUnregisterGlobals = checkInterfaceFunction(M.getOrInsertFunction(
- kAsanUnregisterGlobalsName,
- IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr));
+ AsanUnregisterGlobals = checkSanitizerInterfaceFunction(
+ M.getOrInsertFunction(kAsanUnregisterGlobalsName, IRB.getVoidTy(),
+ IntptrTy, IntptrTy, nullptr));
AsanUnregisterGlobals->setLinkage(Function::ExternalLinkage);
}
@@ -1154,8 +1216,7 @@ bool AddressSanitizerModule::InstrumentGlobals(IRBuilder<> &IRB, Module &M) {
SmallVector<GlobalVariable *, 16> GlobalsToChange;
for (auto &G : M.globals()) {
- if (ShouldInstrumentGlobal(&G))
- GlobalsToChange.push_back(&G);
+ if (ShouldInstrumentGlobal(&G)) GlobalsToChange.push_back(&G);
}
size_t n = GlobalsToChange.size();
@@ -1180,8 +1241,9 @@ bool AddressSanitizerModule::InstrumentGlobals(IRBuilder<> &IRB, Module &M) {
// We shouldn't merge same module names, as this string serves as unique
// module ID in runtime.
GlobalVariable *ModuleName = createPrivateGlobalForString(
- M, M.getModuleIdentifier(), /*AllowMerging*/false);
+ M, M.getModuleIdentifier(), /*AllowMerging*/ false);
+ auto &DL = M.getDataLayout();
for (size_t i = 0; i < n; i++) {
static const uint64_t kMaxGlobalRedzone = 1 << 18;
GlobalVariable *G = GlobalsToChange[i];
@@ -1195,32 +1257,30 @@ bool AddressSanitizerModule::InstrumentGlobals(IRBuilder<> &IRB, Module &M) {
PointerType *PtrTy = cast<PointerType>(G->getType());
Type *Ty = PtrTy->getElementType();
- uint64_t SizeInBytes = DL->getTypeAllocSize(Ty);
+ uint64_t SizeInBytes = DL.getTypeAllocSize(Ty);
uint64_t MinRZ = MinRedzoneSizeForGlobal();
// MinRZ <= RZ <= kMaxGlobalRedzone
// and trying to make RZ to be ~ 1/4 of SizeInBytes.
- uint64_t RZ = std::max(MinRZ,
- std::min(kMaxGlobalRedzone,
- (SizeInBytes / MinRZ / 4) * MinRZ));
+ uint64_t RZ = std::max(
+ MinRZ, std::min(kMaxGlobalRedzone, (SizeInBytes / MinRZ / 4) * MinRZ));
uint64_t RightRedzoneSize = RZ;
// Round up to MinRZ
- if (SizeInBytes % MinRZ)
- RightRedzoneSize += MinRZ - (SizeInBytes % MinRZ);
+ if (SizeInBytes % MinRZ) RightRedzoneSize += MinRZ - (SizeInBytes % MinRZ);
assert(((RightRedzoneSize + SizeInBytes) % MinRZ) == 0);
Type *RightRedZoneTy = ArrayType::get(IRB.getInt8Ty(), RightRedzoneSize);
StructType *NewTy = StructType::get(Ty, RightRedZoneTy, nullptr);
- Constant *NewInitializer = ConstantStruct::get(
- NewTy, G->getInitializer(),
- Constant::getNullValue(RightRedZoneTy), nullptr);
+ Constant *NewInitializer =
+ ConstantStruct::get(NewTy, G->getInitializer(),
+ Constant::getNullValue(RightRedZoneTy), nullptr);
// Create a new global variable with enough space for a redzone.
GlobalValue::LinkageTypes Linkage = G->getLinkage();
if (G->isConstant() && Linkage == GlobalValue::PrivateLinkage)
Linkage = GlobalValue::InternalLinkage;
- GlobalVariable *NewGlobal = new GlobalVariable(
- M, NewTy, G->isConstant(), Linkage,
- NewInitializer, "", G, G->getThreadLocalMode());
+ GlobalVariable *NewGlobal =
+ new GlobalVariable(M, NewTy, G->isConstant(), Linkage, NewInitializer,
+ "", G, G->getThreadLocalMode());
NewGlobal->copyAttributesFrom(G);
NewGlobal->setAlignment(MinRZ);
@@ -1229,7 +1289,7 @@ bool AddressSanitizerModule::InstrumentGlobals(IRBuilder<> &IRB, Module &M) {
Indices2[1] = IRB.getInt32(0);
G->replaceAllUsesWith(
- ConstantExpr::getGetElementPtr(NewGlobal, Indices2, true));
+ ConstantExpr::getGetElementPtr(NewTy, NewGlobal, Indices2, true));
NewGlobal->takeName(G);
G->eraseFromParent();
@@ -1249,8 +1309,7 @@ bool AddressSanitizerModule::InstrumentGlobals(IRBuilder<> &IRB, Module &M) {
ConstantExpr::getPointerCast(ModuleName, IntptrTy),
ConstantInt::get(IntptrTy, MD.IsDynInit), SourceLoc, nullptr);
- if (ClInitializers && MD.IsDynInit)
- HasDynamicallyInitializedGlobals = true;
+ if (ClInitializers && MD.IsDynInit) HasDynamicallyInitializedGlobals = true;
DEBUG(dbgs() << "NEW GLOBAL: " << *NewGlobal << "\n");
}
@@ -1263,20 +1322,20 @@ bool AddressSanitizerModule::InstrumentGlobals(IRBuilder<> &IRB, Module &M) {
// Create calls for poisoning before initializers run and unpoisoning after.
if (HasDynamicallyInitializedGlobals)
createInitializerPoisonCalls(M, ModuleName);
- IRB.CreateCall2(AsanRegisterGlobals,
- IRB.CreatePointerCast(AllGlobals, IntptrTy),
- ConstantInt::get(IntptrTy, n));
+ IRB.CreateCall(AsanRegisterGlobals,
+ {IRB.CreatePointerCast(AllGlobals, IntptrTy),
+ ConstantInt::get(IntptrTy, n)});
// We also need to unregister globals at the end, e.g. when a shared library
// gets closed.
- Function *AsanDtorFunction = Function::Create(
- FunctionType::get(Type::getVoidTy(*C), false),
- GlobalValue::InternalLinkage, kAsanModuleDtorName, &M);
+ Function *AsanDtorFunction =
+ Function::Create(FunctionType::get(Type::getVoidTy(*C), false),
+ GlobalValue::InternalLinkage, kAsanModuleDtorName, &M);
BasicBlock *AsanDtorBB = BasicBlock::Create(*C, "", AsanDtorFunction);
IRBuilder<> IRB_Dtor(ReturnInst::Create(*C, AsanDtorBB));
- IRB_Dtor.CreateCall2(AsanUnregisterGlobals,
- IRB.CreatePointerCast(AllGlobals, IntptrTy),
- ConstantInt::get(IntptrTy, n));
+ IRB_Dtor.CreateCall(AsanUnregisterGlobals,
+ {IRB.CreatePointerCast(AllGlobals, IntptrTy),
+ ConstantInt::get(IntptrTy, n)});
appendToGlobalDtors(M, AsanDtorFunction, kAsanCtorAndDtorPriority);
DEBUG(dbgs() << M);
@@ -1284,12 +1343,8 @@ bool AddressSanitizerModule::InstrumentGlobals(IRBuilder<> &IRB, Module &M) {
}
bool AddressSanitizerModule::runOnModule(Module &M) {
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- if (!DLP)
- return false;
- DL = &DLP->getDataLayout();
C = &(M.getContext());
- int LongSize = DL->getPointerSizeInBits();
+ int LongSize = M.getDataLayout().getPointerSizeInBits();
IntptrTy = Type::getIntNTy(*C, LongSize);
TargetTriple = Triple(M.getTargetTriple());
Mapping = getShadowMapping(TargetTriple, LongSize);
@@ -1301,8 +1356,7 @@ bool AddressSanitizerModule::runOnModule(Module &M) {
assert(CtorFunc);
IRBuilder<> IRB(CtorFunc->getEntryBlock().getTerminator());
- if (ClGlobals)
- Changed |= InstrumentGlobals(IRB, M);
+ if (ClGlobals) Changed |= InstrumentGlobals(IRB, M);
return Changed;
}
@@ -1310,50 +1364,51 @@ bool AddressSanitizerModule::runOnModule(Module &M) {
void AddressSanitizer::initializeCallbacks(Module &M) {
IRBuilder<> IRB(*C);
// Create __asan_report* callbacks.
- for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
- for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
- AccessSizeIndex++) {
- // IsWrite and TypeSize are encoded in the function name.
- std::string Suffix =
- (AccessIsWrite ? "store" : "load") + itostr(1 << AccessSizeIndex);
- AsanErrorCallback[AccessIsWrite][AccessSizeIndex] =
- checkInterfaceFunction(
- M.getOrInsertFunction(kAsanReportErrorTemplate + Suffix,
- IRB.getVoidTy(), IntptrTy, nullptr));
- AsanMemoryAccessCallback[AccessIsWrite][AccessSizeIndex] =
- checkInterfaceFunction(
- M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + Suffix,
- IRB.getVoidTy(), IntptrTy, nullptr));
+ // IsWrite, TypeSize and Exp are encoded in the function name.
+ for (int Exp = 0; Exp < 2; Exp++) {
+ for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
+ const std::string TypeStr = AccessIsWrite ? "store" : "load";
+ const std::string ExpStr = Exp ? "exp_" : "";
+ const Type *ExpType = Exp ? Type::getInt32Ty(*C) : nullptr;
+ AsanErrorCallbackSized[AccessIsWrite][Exp] =
+ checkSanitizerInterfaceFunction(M.getOrInsertFunction(
+ kAsanReportErrorTemplate + ExpStr + TypeStr + "_n",
+ IRB.getVoidTy(), IntptrTy, IntptrTy, ExpType, nullptr));
+ AsanMemoryAccessCallbackSized[AccessIsWrite][Exp] =
+ checkSanitizerInterfaceFunction(M.getOrInsertFunction(
+ ClMemoryAccessCallbackPrefix + ExpStr + TypeStr + "N",
+ IRB.getVoidTy(), IntptrTy, IntptrTy, ExpType, nullptr));
+ for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
+ AccessSizeIndex++) {
+ const std::string Suffix = TypeStr + itostr(1 << AccessSizeIndex);
+ AsanErrorCallback[AccessIsWrite][Exp][AccessSizeIndex] =
+ checkSanitizerInterfaceFunction(M.getOrInsertFunction(
+ kAsanReportErrorTemplate + ExpStr + Suffix, IRB.getVoidTy(),
+ IntptrTy, ExpType, nullptr));
+ AsanMemoryAccessCallback[AccessIsWrite][Exp][AccessSizeIndex] =
+ checkSanitizerInterfaceFunction(M.getOrInsertFunction(
+ ClMemoryAccessCallbackPrefix + ExpStr + Suffix, IRB.getVoidTy(),
+ IntptrTy, ExpType, nullptr));
+ }
}
}
- AsanErrorCallbackSized[0] = checkInterfaceFunction(M.getOrInsertFunction(
- kAsanReportLoadN, IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr));
- AsanErrorCallbackSized[1] = checkInterfaceFunction(M.getOrInsertFunction(
- kAsanReportStoreN, IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr));
-
- AsanMemoryAccessCallbackSized[0] = checkInterfaceFunction(
- M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + "loadN",
- IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr));
- AsanMemoryAccessCallbackSized[1] = checkInterfaceFunction(
- M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + "storeN",
- IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr));
-
- AsanMemmove = checkInterfaceFunction(M.getOrInsertFunction(
+
+ AsanMemmove = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
ClMemoryAccessCallbackPrefix + "memmove", IRB.getInt8PtrTy(),
IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy, nullptr));
- AsanMemcpy = checkInterfaceFunction(M.getOrInsertFunction(
+ AsanMemcpy = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
ClMemoryAccessCallbackPrefix + "memcpy", IRB.getInt8PtrTy(),
IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy, nullptr));
- AsanMemset = checkInterfaceFunction(M.getOrInsertFunction(
+ AsanMemset = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
ClMemoryAccessCallbackPrefix + "memset", IRB.getInt8PtrTy(),
IRB.getInt8PtrTy(), IRB.getInt32Ty(), IntptrTy, nullptr));
- AsanHandleNoReturnFunc = checkInterfaceFunction(
+ AsanHandleNoReturnFunc = checkSanitizerInterfaceFunction(
M.getOrInsertFunction(kAsanHandleNoReturnName, IRB.getVoidTy(), nullptr));
- AsanPtrCmpFunction = checkInterfaceFunction(M.getOrInsertFunction(
+ AsanPtrCmpFunction = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
kAsanPtrCmp, IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr));
- AsanPtrSubFunction = checkInterfaceFunction(M.getOrInsertFunction(
+ AsanPtrSubFunction = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
kAsanPtrSub, IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr));
// We insert an empty inline asm after __asan_report* to avoid callback merge.
EmptyAsm = InlineAsm::get(FunctionType::get(IRB.getVoidTy(), false),
@@ -1364,28 +1419,18 @@ void AddressSanitizer::initializeCallbacks(Module &M) {
// virtual
bool AddressSanitizer::doInitialization(Module &M) {
// Initialize the private fields. No one has accessed them before.
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- if (!DLP)
- report_fatal_error("data layout missing");
- DL = &DLP->getDataLayout();
GlobalsMD.init(M);
C = &(M.getContext());
- LongSize = DL->getPointerSizeInBits();
+ LongSize = M.getDataLayout().getPointerSizeInBits();
IntptrTy = Type::getIntNTy(*C, LongSize);
TargetTriple = Triple(M.getTargetTriple());
- AsanCtorFunction = Function::Create(
- FunctionType::get(Type::getVoidTy(*C), false),
- GlobalValue::InternalLinkage, kAsanModuleCtorName, &M);
- BasicBlock *AsanCtorBB = BasicBlock::Create(*C, "", AsanCtorFunction);
- // call __asan_init in the module ctor.
- IRBuilder<> IRB(ReturnInst::Create(*C, AsanCtorBB));
- AsanInitFunction = checkInterfaceFunction(
- M.getOrInsertFunction(kAsanInitName, IRB.getVoidTy(), nullptr));
- AsanInitFunction->setLinkage(Function::ExternalLinkage);
- IRB.CreateCall(AsanInitFunction);
+ std::tie(AsanCtorFunction, AsanInitFunction) =
+ createSanitizerCtorAndInitFunctions(M, kAsanModuleCtorName, kAsanInitName,
+ /*InitArgTypes=*/{},
+ /*InitArgs=*/{});
Mapping = getShadowMapping(TargetTriple, LongSize);
@@ -1403,7 +1448,7 @@ bool AddressSanitizer::maybeInsertAsanInitAtFunctionEntry(Function &F) {
// instrumented functions.
if (F.getName().find(" load]") != std::string::npos) {
IRBuilder<> IRB(F.begin()->begin());
- IRB.CreateCall(AsanInitFunction);
+ IRB.CreateCall(AsanInitFunction, {});
return true;
}
return false;
@@ -1420,22 +1465,21 @@ bool AddressSanitizer::runOnFunction(Function &F) {
// If needed, insert __asan_init before checking for SanitizeAddress attr.
maybeInsertAsanInitAtFunctionEntry(F);
- if (!F.hasFnAttribute(Attribute::SanitizeAddress))
- return false;
+ if (!F.hasFnAttribute(Attribute::SanitizeAddress)) return false;
- if (!ClDebugFunc.empty() && ClDebugFunc != F.getName())
- return false;
+ if (!ClDebugFunc.empty() && ClDebugFunc != F.getName()) return false;
// We want to instrument every address only once per basic block (unless there
// are calls between uses).
- SmallSet<Value*, 16> TempsToInstrument;
- SmallVector<Instruction*, 16> ToInstrument;
- SmallVector<Instruction*, 8> NoReturnCalls;
- SmallVector<BasicBlock*, 16> AllBlocks;
- SmallVector<Instruction*, 16> PointerComparisonsOrSubtracts;
+ SmallSet<Value *, 16> TempsToInstrument;
+ SmallVector<Instruction *, 16> ToInstrument;
+ SmallVector<Instruction *, 8> NoReturnCalls;
+ SmallVector<BasicBlock *, 16> AllBlocks;
+ SmallVector<Instruction *, 16> PointerComparisonsOrSubtracts;
int NumAllocas = 0;
bool IsWrite;
unsigned Alignment;
+ uint64_t TypeSize;
// Fill the set of memory operations to instrument.
for (auto &BB : F) {
@@ -1444,8 +1488,8 @@ bool AddressSanitizer::runOnFunction(Function &F) {
int NumInsnsPerBB = 0;
for (auto &Inst : BB) {
if (LooksLikeCodeInBug11395(&Inst)) return false;
- if (Value *Addr =
- isInterestingMemoryAccess(&Inst, &IsWrite, &Alignment)) {
+ if (Value *Addr = isInterestingMemoryAccess(&Inst, &IsWrite, &TypeSize,
+ &Alignment)) {
if (ClOpt && ClOptSameTemp) {
if (!TempsToInstrument.insert(Addr).second)
continue; // We've seen this temp in the current BB.
@@ -1457,21 +1501,18 @@ bool AddressSanitizer::runOnFunction(Function &F) {
} else if (isa<MemIntrinsic>(Inst)) {
// ok, take it.
} else {
- if (isa<AllocaInst>(Inst))
- NumAllocas++;
+ if (isa<AllocaInst>(Inst)) NumAllocas++;
CallSite CS(&Inst);
if (CS) {
// A call inside BB.
TempsToInstrument.clear();
- if (CS.doesNotReturn())
- NoReturnCalls.push_back(CS.getInstruction());
+ if (CS.doesNotReturn()) NoReturnCalls.push_back(CS.getInstruction());
}
continue;
}
ToInstrument.push_back(&Inst);
NumInsnsPerBB++;
- if (NumInsnsPerBB >= ClMaxInsnsToInstrumentPerBB)
- break;
+ if (NumInsnsPerBB >= ClMaxInsnsToInstrumentPerBB) break;
}
}
@@ -1480,13 +1521,20 @@ bool AddressSanitizer::runOnFunction(Function &F) {
ToInstrument.size() > (unsigned)ClInstrumentationWithCallsThreshold)
UseCalls = true;
+ const TargetLibraryInfo *TLI =
+ &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
+ const DataLayout &DL = F.getParent()->getDataLayout();
+ ObjectSizeOffsetVisitor ObjSizeVis(DL, TLI, F.getContext(),
+ /*RoundToAlign=*/true);
+
// Instrument.
int NumInstrumented = 0;
for (auto Inst : ToInstrument) {
if (ClDebugMin < 0 || ClDebugMax < 0 ||
(NumInstrumented >= ClDebugMin && NumInstrumented <= ClDebugMax)) {
- if (isInterestingMemoryAccess(Inst, &IsWrite, &Alignment))
- instrumentMop(Inst, UseCalls);
+ if (isInterestingMemoryAccess(Inst, &IsWrite, &TypeSize, &Alignment))
+ instrumentMop(ObjSizeVis, Inst, UseCalls,
+ F.getParent()->getDataLayout());
else
instrumentMemIntrinsic(cast<MemIntrinsic>(Inst));
}
@@ -1500,7 +1548,7 @@ bool AddressSanitizer::runOnFunction(Function &F) {
// See e.g. http://code.google.com/p/address-sanitizer/issues/detail?id=37
for (auto CI : NoReturnCalls) {
IRBuilder<> IRB(CI);
- IRB.CreateCall(AsanHandleNoReturnFunc);
+ IRB.CreateCall(AsanHandleNoReturnFunc, {});
}
for (auto Inst : PointerComparisonsOrSubtracts) {
@@ -1531,24 +1579,29 @@ void FunctionStackPoisoner::initializeCallbacks(Module &M) {
IRBuilder<> IRB(*C);
for (int i = 0; i <= kMaxAsanStackMallocSizeClass; i++) {
std::string Suffix = itostr(i);
- AsanStackMallocFunc[i] = checkInterfaceFunction(M.getOrInsertFunction(
- kAsanStackMallocNameTemplate + Suffix, IntptrTy, IntptrTy, nullptr));
- AsanStackFreeFunc[i] = checkInterfaceFunction(
+ AsanStackMallocFunc[i] = checkSanitizerInterfaceFunction(
+ M.getOrInsertFunction(kAsanStackMallocNameTemplate + Suffix, IntptrTy,
+ IntptrTy, nullptr));
+ AsanStackFreeFunc[i] = checkSanitizerInterfaceFunction(
M.getOrInsertFunction(kAsanStackFreeNameTemplate + Suffix,
IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr));
}
- AsanPoisonStackMemoryFunc = checkInterfaceFunction(
+ AsanPoisonStackMemoryFunc = checkSanitizerInterfaceFunction(
M.getOrInsertFunction(kAsanPoisonStackMemoryName, IRB.getVoidTy(),
IntptrTy, IntptrTy, nullptr));
- AsanUnpoisonStackMemoryFunc = checkInterfaceFunction(
+ AsanUnpoisonStackMemoryFunc = checkSanitizerInterfaceFunction(
M.getOrInsertFunction(kAsanUnpoisonStackMemoryName, IRB.getVoidTy(),
IntptrTy, IntptrTy, nullptr));
+ AsanAllocaPoisonFunc = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
+ kAsanAllocaPoison, IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr));
+ AsanAllocasUnpoisonFunc =
+ checkSanitizerInterfaceFunction(M.getOrInsertFunction(
+ kAsanAllocasUnpoison, IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr));
}
-void
-FunctionStackPoisoner::poisonRedZones(ArrayRef<uint8_t> ShadowBytes,
- IRBuilder<> &IRB, Value *ShadowBase,
- bool DoPoison) {
+void FunctionStackPoisoner::poisonRedZones(ArrayRef<uint8_t> ShadowBytes,
+ IRBuilder<> &IRB, Value *ShadowBase,
+ bool DoPoison) {
size_t n = ShadowBytes.size();
size_t i = 0;
// We need to (un)poison n bytes of stack shadow. Poison as many as we can
@@ -1559,7 +1612,7 @@ FunctionStackPoisoner::poisonRedZones(ArrayRef<uint8_t> ShadowBytes,
for (; i + LargeStoreSizeInBytes - 1 < n; i += LargeStoreSizeInBytes) {
uint64_t Val = 0;
for (size_t j = 0; j < LargeStoreSizeInBytes; j++) {
- if (ASan.DL->isLittleEndian())
+ if (F.getParent()->getDataLayout().isLittleEndian())
Val |= (uint64_t)ShadowBytes[i + j] << (8 * j);
else
Val = (Val << 8) | ShadowBytes[i + j];
@@ -1578,9 +1631,8 @@ FunctionStackPoisoner::poisonRedZones(ArrayRef<uint8_t> ShadowBytes,
static int StackMallocSizeClass(uint64_t LocalStackSize) {
assert(LocalStackSize <= kMaxStackMallocSize);
uint64_t MaxSize = kMinStackMallocSize;
- for (int i = 0; ; i++, MaxSize *= 2)
- if (LocalStackSize <= MaxSize)
- return i;
+ for (int i = 0;; i++, MaxSize *= 2)
+ if (LocalStackSize <= MaxSize) return i;
llvm_unreachable("impossible LocalStackSize");
}
@@ -1592,18 +1644,21 @@ static int StackMallocSizeClass(uint64_t LocalStackSize) {
void FunctionStackPoisoner::SetShadowToStackAfterReturnInlined(
IRBuilder<> &IRB, Value *ShadowBase, int Size) {
assert(!(Size % 8));
- assert(kAsanStackAfterReturnMagic == 0xf5);
+
+ // kAsanStackAfterReturnMagic is 0xf5.
+ const uint64_t kAsanStackAfterReturnMagic64 = 0xf5f5f5f5f5f5f5f5ULL;
+
for (int i = 0; i < Size; i += 8) {
Value *p = IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i));
- IRB.CreateStore(ConstantInt::get(IRB.getInt64Ty(), 0xf5f5f5f5f5f5f5f5ULL),
- IRB.CreateIntToPtr(p, IRB.getInt64Ty()->getPointerTo()));
+ IRB.CreateStore(
+ ConstantInt::get(IRB.getInt64Ty(), kAsanStackAfterReturnMagic64),
+ IRB.CreateIntToPtr(p, IRB.getInt64Ty()->getPointerTo()));
}
}
static DebugLoc getFunctionEntryDebugLocation(Function &F) {
for (const auto &Inst : F.getEntryBlock())
- if (!isa<AllocaInst>(Inst))
- return Inst.getDebugLoc();
+ if (!isa<AllocaInst>(Inst)) return Inst.getDebugLoc();
return DebugLoc();
}
@@ -1637,13 +1692,25 @@ Value *FunctionStackPoisoner::createAllocaForLayout(
return IRB.CreatePointerCast(Alloca, IntptrTy);
}
+void FunctionStackPoisoner::createDynamicAllocasInitStorage() {
+ BasicBlock &FirstBB = *F.begin();
+ IRBuilder<> IRB(dyn_cast<Instruction>(FirstBB.begin()));
+ DynamicAllocaLayout = IRB.CreateAlloca(IntptrTy, nullptr);
+ IRB.CreateStore(Constant::getNullValue(IntptrTy), DynamicAllocaLayout);
+ DynamicAllocaLayout->setAlignment(32);
+}
+
void FunctionStackPoisoner::poisonStack() {
assert(AllocaVec.size() > 0 || DynamicAllocaVec.size() > 0);
- if (ClInstrumentAllocas)
+ if (ClInstrumentAllocas && DynamicAllocaVec.size() > 0) {
// Handle dynamic allocas.
- for (auto &AllocaCall : DynamicAllocaVec)
- handleDynamicAllocaCall(AllocaCall);
+ createDynamicAllocasInitStorage();
+ for (auto &AI : DynamicAllocaVec)
+ handleDynamicAllocaCall(AI);
+
+ unpoisonDynamicAllocas();
+ }
if (AllocaVec.size() == 0) return;
@@ -1657,9 +1724,9 @@ void FunctionStackPoisoner::poisonStack() {
SmallVector<ASanStackVariableDescription, 16> SVD;
SVD.reserve(AllocaVec.size());
for (AllocaInst *AI : AllocaVec) {
- ASanStackVariableDescription D = { AI->getName().data(),
- getAllocaSizeInBytes(AI),
- AI->getAlignment(), AI, 0};
+ ASanStackVariableDescription D = {AI->getName().data(),
+ ASan.getAllocaSizeInBytes(AI),
+ AI->getAlignment(), AI, 0};
SVD.push_back(D);
}
// Minimal header size (left redzone) is 4 pointers,
@@ -1671,9 +1738,11 @@ void FunctionStackPoisoner::poisonStack() {
uint64_t LocalStackSize = L.FrameSize;
bool DoStackMalloc =
ClUseAfterReturn && LocalStackSize <= kMaxStackMallocSize;
- // Don't do dynamic alloca in presence of inline asm: too often it
- // makes assumptions on which registers are available.
+ // Don't do dynamic alloca in presence of inline asm: too often it makes
+ // assumptions on which registers are available. Don't do stack malloc in the
+ // presence of inline asm on 32-bit platforms for the same reason.
bool DoDynamicAlloca = ClDynamicAllocaStack && !HasNonEmptyInlineAsm;
+ DoStackMalloc &= !HasNonEmptyInlineAsm || ASan.LongSize != 32;
Value *StaticAlloca =
DoDynamicAlloca ? nullptr : createAllocaForLayout(IRB, L, false);
@@ -1739,7 +1808,7 @@ void FunctionStackPoisoner::poisonStack() {
Value *NewAllocaPtr = IRB.CreateIntToPtr(
IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, Desc.Offset)),
AI->getType());
- replaceDbgDeclareForAlloca(AI, NewAllocaPtr, DIB);
+ replaceDbgDeclareForAlloca(AI, NewAllocaPtr, DIB, /*Deref=*/true);
AI->replaceAllUsesWith(NewAllocaPtr);
}
@@ -1750,19 +1819,19 @@ void FunctionStackPoisoner::poisonStack() {
BasePlus0);
// Write the frame description constant to redzone[1].
Value *BasePlus1 = IRB.CreateIntToPtr(
- IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, ASan.LongSize/8)),
- IntptrPtrTy);
+ IRB.CreateAdd(LocalStackBase,
+ ConstantInt::get(IntptrTy, ASan.LongSize / 8)),
+ IntptrPtrTy);
GlobalVariable *StackDescriptionGlobal =
createPrivateGlobalForString(*F.getParent(), L.DescriptionString,
- /*AllowMerging*/true);
- Value *Description = IRB.CreatePointerCast(StackDescriptionGlobal,
- IntptrTy);
+ /*AllowMerging*/ true);
+ Value *Description = IRB.CreatePointerCast(StackDescriptionGlobal, IntptrTy);
IRB.CreateStore(Description, BasePlus1);
// Write the PC to redzone[2].
Value *BasePlus2 = IRB.CreateIntToPtr(
- IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy,
- 2 * ASan.LongSize/8)),
- IntptrPtrTy);
+ IRB.CreateAdd(LocalStackBase,
+ ConstantInt::get(IntptrTy, 2 * ASan.LongSize / 8)),
+ IntptrPtrTy);
IRB.CreateStore(IRB.CreatePointerCast(&F, IntptrTy), BasePlus2);
// Poison the stack redzones at the entry.
@@ -1807,8 +1876,9 @@ void FunctionStackPoisoner::poisonStack() {
IRBPoison.CreateIntToPtr(SavedFlagPtr, IRBPoison.getInt8PtrTy()));
} else {
// For larger frames call __asan_stack_free_*.
- IRBPoison.CreateCall2(AsanStackFreeFunc[StackMallocIdx], FakeStack,
- ConstantInt::get(IntptrTy, LocalStackSize));
+ IRBPoison.CreateCall(
+ AsanStackFreeFunc[StackMallocIdx],
+ {FakeStack, ConstantInt::get(IntptrTy, LocalStackSize)});
}
IRBuilder<> IRBElse(ElseTerm);
@@ -1822,14 +1892,8 @@ void FunctionStackPoisoner::poisonStack() {
}
}
- if (ClInstrumentAllocas)
- // Unpoison dynamic allocas.
- for (auto &AllocaCall : DynamicAllocaVec)
- unpoisonDynamicAlloca(AllocaCall);
-
// We are done. Remove the old unused alloca instructions.
- for (auto AI : AllocaVec)
- AI->eraseFromParent();
+ for (auto AI : AllocaVec) AI->eraseFromParent();
}
void FunctionStackPoisoner::poisonAlloca(Value *V, uint64_t Size,
@@ -1837,9 +1901,9 @@ void FunctionStackPoisoner::poisonAlloca(Value *V, uint64_t Size,
// For now just insert the call to ASan runtime.
Value *AddrArg = IRB.CreatePointerCast(V, IntptrTy);
Value *SizeArg = ConstantInt::get(IntptrTy, Size);
- IRB.CreateCall2(DoPoison ? AsanPoisonStackMemoryFunc
- : AsanUnpoisonStackMemoryFunc,
- AddrArg, SizeArg);
+ IRB.CreateCall(DoPoison ? AsanPoisonStackMemoryFunc
+ : AsanUnpoisonStackMemoryFunc,
+ {AddrArg, SizeArg});
}
// Handling llvm.lifetime intrinsics for a given %alloca:
@@ -1854,12 +1918,11 @@ void FunctionStackPoisoner::poisonAlloca(Value *V, uint64_t Size,
AllocaInst *FunctionStackPoisoner::findAllocaForValue(Value *V) {
if (AllocaInst *AI = dyn_cast<AllocaInst>(V))
// We're intested only in allocas we can handle.
- return isInterestingAlloca(*AI) ? AI : nullptr;
+ return ASan.isInterestingAlloca(*AI) ? AI : nullptr;
// See if we've already calculated (or started to calculate) alloca for a
// given value.
AllocaForValueMapTy::iterator I = AllocaForValue.find(V);
- if (I != AllocaForValue.end())
- return I->second;
+ if (I != AllocaForValue.end()) return I->second;
// Store 0 while we're calculating alloca for value V to avoid
// infinite recursion if the value references itself.
AllocaForValue[V] = nullptr;
@@ -1867,8 +1930,7 @@ AllocaInst *FunctionStackPoisoner::findAllocaForValue(Value *V) {
if (CastInst *CI = dyn_cast<CastInst>(V))
Res = findAllocaForValue(CI->getOperand(0));
else if (PHINode *PN = dyn_cast<PHINode>(V)) {
- for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
- Value *IncValue = PN->getIncomingValue(i);
+ for (Value *IncValue : PN->incoming_values()) {
// Allow self-referencing phi-nodes.
if (IncValue == PN) continue;
AllocaInst *IncValueAI = findAllocaForValue(IncValue);
@@ -1878,82 +1940,29 @@ AllocaInst *FunctionStackPoisoner::findAllocaForValue(Value *V) {
Res = IncValueAI;
}
}
- if (Res)
- AllocaForValue[V] = Res;
+ if (Res) AllocaForValue[V] = Res;
return Res;
}
-// Compute PartialRzMagic for dynamic alloca call. PartialRzMagic is
-// constructed from two separate 32-bit numbers: PartialRzMagic = Val1 | Val2.
-// (1) Val1 is resposible for forming base value for PartialRzMagic, containing
-// only 00 for fully addressable and 0xcb for fully poisoned bytes for each
-// 8-byte chunk of user memory respectively.
-// (2) Val2 forms the value for marking first poisoned byte in shadow memory
-// with appropriate value (0x01 - 0x07 or 0xcb if Padding % 8 == 0).
-
-// Shift = Padding & ~7; // the number of bits we need to shift to access first
-// chunk in shadow memory, containing nonzero bytes.
-// Example:
-// Padding = 21 Padding = 16
-// Shadow: |00|00|05|cb| Shadow: |00|00|cb|cb|
-// ^ ^
-// | |
-// Shift = 21 & ~7 = 16 Shift = 16 & ~7 = 16
-//
-// Val1 = 0xcbcbcbcb << Shift;
-// PartialBits = Padding ? Padding & 7 : 0xcb;
-// Val2 = PartialBits << Shift;
-// Result = Val1 | Val2;
-Value *FunctionStackPoisoner::computePartialRzMagic(Value *PartialSize,
- IRBuilder<> &IRB) {
- PartialSize = IRB.CreateIntCast(PartialSize, IRB.getInt32Ty(), false);
- Value *Shift = IRB.CreateAnd(PartialSize, IRB.getInt32(~7));
- unsigned Val1Int = kAsanAllocaPartialVal1;
- unsigned Val2Int = kAsanAllocaPartialVal2;
- if (!ASan.DL->isLittleEndian()) {
- Val1Int = sys::getSwappedBytes(Val1Int);
- Val2Int = sys::getSwappedBytes(Val2Int);
- }
- Value *Val1 = shiftAllocaMagic(IRB.getInt32(Val1Int), IRB, Shift);
- Value *PartialBits = IRB.CreateAnd(PartialSize, IRB.getInt32(7));
- // For BigEndian get 0x000000YZ -> 0xYZ000000.
- if (ASan.DL->isBigEndian())
- PartialBits = IRB.CreateShl(PartialBits, IRB.getInt32(24));
- Value *Val2 = IRB.getInt32(Val2Int);
- Value *Cond =
- IRB.CreateICmpNE(PartialBits, Constant::getNullValue(IRB.getInt32Ty()));
- Val2 = IRB.CreateSelect(Cond, shiftAllocaMagic(PartialBits, IRB, Shift),
- shiftAllocaMagic(Val2, IRB, Shift));
- return IRB.CreateOr(Val1, Val2);
-}
-
-void FunctionStackPoisoner::handleDynamicAllocaCall(
- DynamicAllocaCall &AllocaCall) {
- AllocaInst *AI = AllocaCall.AI;
- if (!doesDominateAllExits(AI)) {
- // We do not yet handle complex allocas
- AllocaCall.Poison = false;
- return;
- }
-
+void FunctionStackPoisoner::handleDynamicAllocaCall(AllocaInst *AI) {
IRBuilder<> IRB(AI);
- PointerType *Int32PtrTy = PointerType::getUnqual(IRB.getInt32Ty());
const unsigned Align = std::max(kAllocaRzSize, AI->getAlignment());
const uint64_t AllocaRedzoneMask = kAllocaRzSize - 1;
Value *Zero = Constant::getNullValue(IntptrTy);
Value *AllocaRzSize = ConstantInt::get(IntptrTy, kAllocaRzSize);
Value *AllocaRzMask = ConstantInt::get(IntptrTy, AllocaRedzoneMask);
- Value *NotAllocaRzMask = ConstantInt::get(IntptrTy, ~AllocaRedzoneMask);
// Since we need to extend alloca with additional memory to locate
// redzones, and OldSize is number of allocated blocks with
// ElementSize size, get allocated memory size in bytes by
// OldSize * ElementSize.
- unsigned ElementSize = ASan.DL->getTypeAllocSize(AI->getAllocatedType());
- Value *OldSize = IRB.CreateMul(AI->getArraySize(),
- ConstantInt::get(IntptrTy, ElementSize));
+ const unsigned ElementSize =
+ F.getParent()->getDataLayout().getTypeAllocSize(AI->getAllocatedType());
+ Value *OldSize =
+ IRB.CreateMul(IRB.CreateIntCast(AI->getArraySize(), IntptrTy, false),
+ ConstantInt::get(IntptrTy, ElementSize));
// PartialSize = OldSize % 32
Value *PartialSize = IRB.CreateAnd(OldSize, AllocaRzMask);
@@ -1981,41 +1990,35 @@ void FunctionStackPoisoner::handleDynamicAllocaCall(
Value *NewAddress = IRB.CreateAdd(IRB.CreatePtrToInt(NewAlloca, IntptrTy),
ConstantInt::get(IntptrTy, Align));
- Value *NewAddressPtr = IRB.CreateIntToPtr(NewAddress, AI->getType());
-
- // LeftRzAddress = NewAddress - kAllocaRzSize
- Value *LeftRzAddress = IRB.CreateSub(NewAddress, AllocaRzSize);
-
- // Poisoning left redzone.
- AllocaCall.LeftRzAddr = ASan.memToShadow(LeftRzAddress, IRB);
- IRB.CreateStore(ConstantInt::get(IRB.getInt32Ty(), kAsanAllocaLeftMagic),
- IRB.CreateIntToPtr(AllocaCall.LeftRzAddr, Int32PtrTy));
-
- // PartialRzAligned = PartialRzAddr & ~AllocaRzMask
- Value *PartialRzAddr = IRB.CreateAdd(NewAddress, OldSize);
- Value *PartialRzAligned = IRB.CreateAnd(PartialRzAddr, NotAllocaRzMask);
+ // Insert __asan_alloca_poison call for new created alloca.
+ IRB.CreateCall(AsanAllocaPoisonFunc, {NewAddress, OldSize});
- // Poisoning partial redzone.
- Value *PartialRzMagic = computePartialRzMagic(PartialSize, IRB);
- Value *PartialRzShadowAddr = ASan.memToShadow(PartialRzAligned, IRB);
- IRB.CreateStore(PartialRzMagic,
- IRB.CreateIntToPtr(PartialRzShadowAddr, Int32PtrTy));
+ // Store the last alloca's address to DynamicAllocaLayout. We'll need this
+ // for unpoisoning stuff.
+ IRB.CreateStore(IRB.CreatePtrToInt(NewAlloca, IntptrTy), DynamicAllocaLayout);
- // RightRzAddress
- // = (PartialRzAddr + AllocaRzMask) & ~AllocaRzMask
- Value *RightRzAddress = IRB.CreateAnd(
- IRB.CreateAdd(PartialRzAddr, AllocaRzMask), NotAllocaRzMask);
-
- // Poisoning right redzone.
- AllocaCall.RightRzAddr = ASan.memToShadow(RightRzAddress, IRB);
- IRB.CreateStore(ConstantInt::get(IRB.getInt32Ty(), kAsanAllocaRightMagic),
- IRB.CreateIntToPtr(AllocaCall.RightRzAddr, Int32PtrTy));
+ Value *NewAddressPtr = IRB.CreateIntToPtr(NewAddress, AI->getType());
- // Replace all uses of AddessReturnedByAlloca with NewAddress.
+ // Replace all uses of AddessReturnedByAlloca with NewAddressPtr.
AI->replaceAllUsesWith(NewAddressPtr);
- // We are done. Erase old alloca and store left, partial and right redzones
- // shadow addresses for future unpoisoning.
+ // We are done. Erase old alloca from parent.
AI->eraseFromParent();
- NumInstrumentedDynamicAllocas++;
+}
+
+// isSafeAccess returns true if Addr is always inbounds with respect to its
+// base object. For example, it is a field access or an array access with
+// constant inbounds index.
+bool AddressSanitizer::isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis,
+ Value *Addr, uint64_t TypeSize) const {
+ SizeOffsetType SizeOffset = ObjSizeVis.compute(Addr);
+ if (!ObjSizeVis.bothKnown(SizeOffset)) return false;
+ uint64_t Size = SizeOffset.first.getZExtValue();
+ int64_t Offset = SizeOffset.second.getSExtValue();
+ // Three checks are required to ensure safety:
+ // . Offset >= 0 (since the offset is given from the base ptr)
+ // . Size >= Offset (unsigned)
+ // . Size - Offset >= NeededSize (unsigned)
+ return Offset >= 0 && Size >= uint64_t(Offset) &&
+ Size - uint64_t(Offset) >= TypeSize / 8;
}
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp b/contrib/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp
index 9a5cea8..f685803 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp
+++ b/contrib/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp
@@ -16,6 +16,7 @@
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Analysis/TargetFolder.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/InstIterator.h"
@@ -24,7 +25,6 @@
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetLibraryInfo.h"
using namespace llvm;
#define DEBUG_TYPE "bounds-checking"
@@ -49,12 +49,10 @@ namespace {
bool runOnFunction(Function &F) override;
void getAnalysisUsage(AnalysisUsage &AU) const override {
- AU.addRequired<DataLayoutPass>();
- AU.addRequired<TargetLibraryInfo>();
+ AU.addRequired<TargetLibraryInfoWrapperPass>();
}
private:
- const DataLayout *DL;
const TargetLibraryInfo *TLI;
ObjectSizeOffsetEvaluator *ObjSizeEval;
BuilderTy *Builder;
@@ -63,7 +61,7 @@ namespace {
BasicBlock *getTrapBB();
void emitBranchToTrap(Value *Cmp = nullptr);
- bool instrument(Value *Ptr, Value *Val);
+ bool instrument(Value *Ptr, Value *Val, const DataLayout &DL);
};
}
@@ -84,7 +82,7 @@ BasicBlock *BoundsChecking::getTrapBB() {
Builder->SetInsertPoint(TrapBB);
llvm::Value *F = Intrinsic::getDeclaration(Fn->getParent(), Intrinsic::trap);
- CallInst *TrapCall = Builder->CreateCall(F);
+ CallInst *TrapCall = Builder->CreateCall(F, {});
TrapCall->setDoesNotReturn();
TrapCall->setDoesNotThrow();
TrapCall->setDebugLoc(Inst->getDebugLoc());
@@ -125,8 +123,9 @@ void BoundsChecking::emitBranchToTrap(Value *Cmp) {
/// result from the load or the value being stored. It is used to determine the
/// size of memory block that is touched.
/// Returns true if any change was made to the IR, false otherwise.
-bool BoundsChecking::instrument(Value *Ptr, Value *InstVal) {
- uint64_t NeededSize = DL->getTypeStoreSize(InstVal->getType());
+bool BoundsChecking::instrument(Value *Ptr, Value *InstVal,
+ const DataLayout &DL) {
+ uint64_t NeededSize = DL.getTypeStoreSize(InstVal->getType());
DEBUG(dbgs() << "Instrument " << *Ptr << " for " << Twine(NeededSize)
<< " bytes\n");
@@ -141,7 +140,7 @@ bool BoundsChecking::instrument(Value *Ptr, Value *InstVal) {
Value *Offset = SizeOffset.second;
ConstantInt *SizeCI = dyn_cast<ConstantInt>(Size);
- Type *IntTy = DL->getIntPtrType(Ptr->getType());
+ Type *IntTy = DL.getIntPtrType(Ptr->getType());
Value *NeededSizeVal = ConstantInt::get(IntTy, NeededSize);
// three checks are required to ensure safety:
@@ -165,8 +164,8 @@ bool BoundsChecking::instrument(Value *Ptr, Value *InstVal) {
}
bool BoundsChecking::runOnFunction(Function &F) {
- DL = &getAnalysis<DataLayoutPass>().getDataLayout();
- TLI = &getAnalysis<TargetLibraryInfo>();
+ const DataLayout &DL = F.getParent()->getDataLayout();
+ TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
TrapBB = nullptr;
BuilderTy TheBuilder(F.getContext(), TargetFolder(DL));
@@ -192,13 +191,16 @@ bool BoundsChecking::runOnFunction(Function &F) {
Builder->SetInsertPoint(Inst);
if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
- MadeChange |= instrument(LI->getPointerOperand(), LI);
+ MadeChange |= instrument(LI->getPointerOperand(), LI, DL);
} else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
- MadeChange |= instrument(SI->getPointerOperand(), SI->getValueOperand());
+ MadeChange |=
+ instrument(SI->getPointerOperand(), SI->getValueOperand(), DL);
} else if (AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(Inst)) {
- MadeChange |= instrument(AI->getPointerOperand(),AI->getCompareOperand());
+ MadeChange |=
+ instrument(AI->getPointerOperand(), AI->getCompareOperand(), DL);
} else if (AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(Inst)) {
- MadeChange |= instrument(AI->getPointerOperand(), AI->getValOperand());
+ MadeChange |=
+ instrument(AI->getPointerOperand(), AI->getValOperand(), DL);
} else {
llvm_unreachable("unknown Instruction type");
}
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp b/contrib/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
index 8f24476..2de6e1a 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
+++ b/contrib/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
@@ -83,14 +83,14 @@ static cl::opt<bool> ClPreserveAlignment(
cl::desc("respect alignment requirements provided by input IR"), cl::Hidden,
cl::init(false));
-// The ABI list file controls how shadow parameters are passed. The pass treats
+// The ABI list files control how shadow parameters are passed. The pass treats
// every function labelled "uninstrumented" in the ABI list file as conforming
// to the "native" (i.e. unsanitized) ABI. Unless the ABI list contains
// additional annotations for those functions, a call to one of those functions
// will produce a warning message, as the labelling behaviour of the function is
// unknown. The other supported annotations are "functional" and "discard",
// which are described below under DataFlowSanitizer::WrapperKind.
-static cl::opt<std::string> ClABIListFile(
+static cl::list<std::string> ClABIListFiles(
"dfsan-abilist",
cl::desc("File listing native ABI functions and how the pass treats them"),
cl::Hidden);
@@ -141,7 +141,9 @@ class DFSanABIList {
std::unique_ptr<SpecialCaseList> SCL;
public:
- DFSanABIList(std::unique_ptr<SpecialCaseList> SCL) : SCL(std::move(SCL)) {}
+ DFSanABIList() {}
+
+ void set(std::unique_ptr<SpecialCaseList> List) { SCL = std::move(List); }
/// Returns whether either this function or its source file are listed in the
/// given category.
@@ -215,7 +217,6 @@ class DataFlowSanitizer : public ModulePass {
WK_Custom
};
- const DataLayout *DL;
Module *Mod;
LLVMContext *Ctx;
IntegerType *ShadowTy;
@@ -247,7 +248,7 @@ class DataFlowSanitizer : public ModulePass {
DFSanABIList ABIList;
DenseMap<Value *, Function *> UnwrappedFnMap;
AttributeSet ReadOnlyNoneAttrs;
- DenseMap<const Function *, DISubprogram> FunctionDIs;
+ DenseMap<const Function *, DISubprogram *> FunctionDIs;
Value *getShadowAddress(Value *Addr, Instruction *Pos);
bool isInstrumented(const Function *F);
@@ -264,9 +265,9 @@ class DataFlowSanitizer : public ModulePass {
Constant *getOrBuildTrampolineFunction(FunctionType *FT, StringRef FName);
public:
- DataFlowSanitizer(StringRef ABIListFile = StringRef(),
- void *(*getArgTLS)() = nullptr,
- void *(*getRetValTLS)() = nullptr);
+ DataFlowSanitizer(
+ const std::vector<std::string> &ABIListFiles = std::vector<std::string>(),
+ void *(*getArgTLS)() = nullptr, void *(*getRetValTLS)() = nullptr);
static char ID;
bool doInitialization(Module &M) override;
bool runOnModule(Module &M) override;
@@ -351,25 +352,26 @@ char DataFlowSanitizer::ID;
INITIALIZE_PASS(DataFlowSanitizer, "dfsan",
"DataFlowSanitizer: dynamic data flow analysis.", false, false)
-ModulePass *llvm::createDataFlowSanitizerPass(StringRef ABIListFile,
- void *(*getArgTLS)(),
- void *(*getRetValTLS)()) {
- return new DataFlowSanitizer(ABIListFile, getArgTLS, getRetValTLS);
+ModulePass *
+llvm::createDataFlowSanitizerPass(const std::vector<std::string> &ABIListFiles,
+ void *(*getArgTLS)(),
+ void *(*getRetValTLS)()) {
+ return new DataFlowSanitizer(ABIListFiles, getArgTLS, getRetValTLS);
}
-DataFlowSanitizer::DataFlowSanitizer(StringRef ABIListFile,
- void *(*getArgTLS)(),
- void *(*getRetValTLS)())
- : ModulePass(ID), GetArgTLSPtr(getArgTLS), GetRetvalTLSPtr(getRetValTLS),
- ABIList(SpecialCaseList::createOrDie(ABIListFile.empty() ? ClABIListFile
- : ABIListFile)) {
+DataFlowSanitizer::DataFlowSanitizer(
+ const std::vector<std::string> &ABIListFiles, void *(*getArgTLS)(),
+ void *(*getRetValTLS)())
+ : ModulePass(ID), GetArgTLSPtr(getArgTLS), GetRetvalTLSPtr(getRetValTLS) {
+ std::vector<std::string> AllABIListFiles(std::move(ABIListFiles));
+ AllABIListFiles.insert(AllABIListFiles.end(), ClABIListFiles.begin(),
+ ClABIListFiles.end());
+ ABIList.set(SpecialCaseList::createOrDie(AllABIListFiles));
}
FunctionType *DataFlowSanitizer::getArgsFunctionType(FunctionType *T) {
- llvm::SmallVector<Type *, 4> ArgTypes;
- std::copy(T->param_begin(), T->param_end(), std::back_inserter(ArgTypes));
- for (unsigned i = 0, e = T->getNumParams(); i != e; ++i)
- ArgTypes.push_back(ShadowTy);
+ llvm::SmallVector<Type *, 4> ArgTypes(T->param_begin(), T->param_end());
+ ArgTypes.append(T->getNumParams(), ShadowTy);
if (T->isVarArg())
ArgTypes.push_back(ShadowPtrTy);
Type *RetType = T->getReturnType();
@@ -382,9 +384,8 @@ FunctionType *DataFlowSanitizer::getTrampolineFunctionType(FunctionType *T) {
assert(!T->isVarArg());
llvm::SmallVector<Type *, 4> ArgTypes;
ArgTypes.push_back(T->getPointerTo());
- std::copy(T->param_begin(), T->param_end(), std::back_inserter(ArgTypes));
- for (unsigned i = 0, e = T->getNumParams(); i != e; ++i)
- ArgTypes.push_back(ShadowTy);
+ ArgTypes.append(T->param_begin(), T->param_end());
+ ArgTypes.append(T->getNumParams(), ShadowTy);
Type *RetType = T->getReturnType();
if (!RetType->isVoidTy())
ArgTypes.push_back(ShadowPtrTy);
@@ -420,16 +421,13 @@ bool DataFlowSanitizer::doInitialization(Module &M) {
bool IsMIPS64 = TargetTriple.getArch() == llvm::Triple::mips64 ||
TargetTriple.getArch() == llvm::Triple::mips64el;
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- if (!DLP)
- report_fatal_error("data layout missing");
- DL = &DLP->getDataLayout();
+ const DataLayout &DL = M.getDataLayout();
Mod = &M;
Ctx = &M.getContext();
ShadowTy = IntegerType::get(*Ctx, ShadowWidth);
ShadowPtrTy = PointerType::getUnqual(ShadowTy);
- IntptrTy = DL->getIntPtrType(*Ctx);
+ IntptrTy = DL.getIntPtrType(*Ctx);
ZeroShadow = ConstantInt::getSigned(ShadowTy, 0);
ShadowPtrMul = ConstantInt::getSigned(IntptrTy, ShadowWidth / 8);
if (IsX86_64)
@@ -528,9 +526,9 @@ DataFlowSanitizer::buildWrapperFunction(Function *F, StringRef NewFName,
F->getParent());
NewF->copyAttributesFrom(F);
NewF->removeAttributes(
- AttributeSet::ReturnIndex,
- AttributeFuncs::typeIncompatible(NewFT->getReturnType(),
- AttributeSet::ReturnIndex));
+ AttributeSet::ReturnIndex,
+ AttributeSet::get(F->getContext(), AttributeSet::ReturnIndex,
+ AttributeFuncs::typeIncompatible(NewFT->getReturnType())));
BasicBlock *BB = BasicBlock::Create(*Ctx, "entry", NewF);
if (F->isVarArg()) {
@@ -591,9 +589,6 @@ Constant *DataFlowSanitizer::getOrBuildTrampolineFunction(FunctionType *FT,
}
bool DataFlowSanitizer::runOnModule(Module &M) {
- if (!DL)
- return false;
-
if (ABIList.isIn(M, "skip"))
return false;
@@ -708,9 +703,9 @@ bool DataFlowSanitizer::runOnModule(Module &M) {
Function *NewF = Function::Create(NewFT, F.getLinkage(), "", &M);
NewF->copyAttributesFrom(&F);
NewF->removeAttributes(
- AttributeSet::ReturnIndex,
- AttributeFuncs::typeIncompatible(NewFT->getReturnType(),
- AttributeSet::ReturnIndex));
+ AttributeSet::ReturnIndex,
+ AttributeSet::get(NewF->getContext(), AttributeSet::ReturnIndex,
+ AttributeFuncs::typeIncompatible(NewFT->getReturnType())));
for (Function::arg_iterator FArg = F.arg_begin(),
NewFArg = NewF->arg_begin(),
FArgEnd = F.arg_end();
@@ -758,7 +753,7 @@ bool DataFlowSanitizer::runOnModule(Module &M) {
// Patch the pointer to LLVM function in debug info descriptor.
auto DI = FunctionDIs.find(&F);
if (DI != FunctionDIs.end())
- DI->second.replaceFunction(&F);
+ DI->second->replaceFunction(&F);
UnwrappedFnMap[WrappedFnCst] = &F;
*i = NewF;
@@ -855,7 +850,7 @@ bool DataFlowSanitizer::runOnModule(Module &M) {
BranchInst *BI = cast<BranchInst>(SplitBlockAndInsertIfThen(
Ne, Pos, /*Unreachable=*/false, ColdCallWeights));
IRBuilder<> ThenIRB(BI);
- ThenIRB.CreateCall(DFSF.DFS.DFSanNonzeroLabelFn);
+ ThenIRB.CreateCall(DFSF.DFS.DFSanNonzeroLabelFn, {});
}
}
}
@@ -870,7 +865,7 @@ Value *DFSanFunction::getArgTLSPtr() {
return ArgTLSPtr = DFS.ArgTLS;
IRBuilder<> IRB(F->getEntryBlock().begin());
- return ArgTLSPtr = IRB.CreateCall(DFS.GetArgTLS);
+ return ArgTLSPtr = IRB.CreateCall(DFS.GetArgTLS, {});
}
Value *DFSanFunction::getRetvalTLS() {
@@ -880,7 +875,7 @@ Value *DFSanFunction::getRetvalTLS() {
return RetvalTLSPtr = DFS.RetvalTLS;
IRBuilder<> IRB(F->getEntryBlock().begin());
- return RetvalTLSPtr = IRB.CreateCall(DFS.GetRetvalTLS);
+ return RetvalTLSPtr = IRB.CreateCall(DFS.GetRetvalTLS, {});
}
Value *DFSanFunction::getArgTLS(unsigned Idx, Instruction *Pos) {
@@ -977,7 +972,7 @@ Value *DFSanFunction::combineShadows(Value *V1, Value *V2, Instruction *Pos) {
IRBuilder<> IRB(Pos);
if (AvoidNewBlocks) {
- CallInst *Call = IRB.CreateCall2(DFS.DFSanCheckedUnionFn, V1, V2);
+ CallInst *Call = IRB.CreateCall(DFS.DFSanCheckedUnionFn, {V1, V2});
Call->addAttribute(AttributeSet::ReturnIndex, Attribute::ZExt);
Call->addAttribute(1, Attribute::ZExt);
Call->addAttribute(2, Attribute::ZExt);
@@ -990,7 +985,7 @@ Value *DFSanFunction::combineShadows(Value *V1, Value *V2, Instruction *Pos) {
BranchInst *BI = cast<BranchInst>(SplitBlockAndInsertIfThen(
Ne, Pos, /*Unreachable=*/false, DFS.ColdCallWeights, &DT));
IRBuilder<> ThenIRB(BI);
- CallInst *Call = ThenIRB.CreateCall2(DFS.DFSanUnionFn, V1, V2);
+ CallInst *Call = ThenIRB.CreateCall(DFS.DFSanUnionFn, {V1, V2});
Call->addAttribute(AttributeSet::ReturnIndex, Attribute::ZExt);
Call->addAttribute(1, Attribute::ZExt);
Call->addAttribute(2, Attribute::ZExt);
@@ -1054,7 +1049,7 @@ Value *DFSanFunction::loadShadow(Value *Addr, uint64_t Size, uint64_t Align,
uint64_t ShadowAlign = Align * DFS.ShadowWidth / 8;
SmallVector<Value *, 2> Objs;
- GetUnderlyingObjects(Addr, Objs, DFS.DL);
+ GetUnderlyingObjects(Addr, Objs, Pos->getModule()->getDataLayout());
bool AllConstants = true;
for (SmallVector<Value *, 2>::iterator i = Objs.begin(), e = Objs.end();
i != e; ++i) {
@@ -1080,8 +1075,8 @@ Value *DFSanFunction::loadShadow(Value *Addr, uint64_t Size, uint64_t Align,
}
case 2: {
IRBuilder<> IRB(Pos);
- Value *ShadowAddr1 =
- IRB.CreateGEP(ShadowAddr, ConstantInt::get(DFS.IntptrTy, 1));
+ Value *ShadowAddr1 = IRB.CreateGEP(DFS.ShadowTy, ShadowAddr,
+ ConstantInt::get(DFS.IntptrTy, 1));
return combineShadows(IRB.CreateAlignedLoad(ShadowAddr, ShadowAlign),
IRB.CreateAlignedLoad(ShadowAddr1, ShadowAlign), Pos);
}
@@ -1092,8 +1087,9 @@ Value *DFSanFunction::loadShadow(Value *Addr, uint64_t Size, uint64_t Align,
// shadow is non-equal.
BasicBlock *FallbackBB = BasicBlock::Create(*DFS.Ctx, "", F);
IRBuilder<> FallbackIRB(FallbackBB);
- CallInst *FallbackCall = FallbackIRB.CreateCall2(
- DFS.DFSanUnionLoadFn, ShadowAddr, ConstantInt::get(DFS.IntptrTy, Size));
+ CallInst *FallbackCall = FallbackIRB.CreateCall(
+ DFS.DFSanUnionLoadFn,
+ {ShadowAddr, ConstantInt::get(DFS.IntptrTy, Size)});
FallbackCall->addAttribute(AttributeSet::ReturnIndex, Attribute::ZExt);
// Compare each of the shadows stored in the loaded 64 bits to each other,
@@ -1132,7 +1128,8 @@ Value *DFSanFunction::loadShadow(Value *Addr, uint64_t Size, uint64_t Align,
BasicBlock *NextBB = BasicBlock::Create(*DFS.Ctx, "", F);
DT.addNewBlock(NextBB, LastBr->getParent());
IRBuilder<> NextIRB(NextBB);
- WideAddr = NextIRB.CreateGEP(WideAddr, ConstantInt::get(DFS.IntptrTy, 1));
+ WideAddr = NextIRB.CreateGEP(Type::getInt64Ty(*DFS.Ctx), WideAddr,
+ ConstantInt::get(DFS.IntptrTy, 1));
Value *NextWideShadow = NextIRB.CreateAlignedLoad(WideAddr, ShadowAlign);
ShadowsEq = NextIRB.CreateICmpEQ(WideShadow, NextWideShadow);
LastBr->setSuccessor(0, NextBB);
@@ -1148,14 +1145,15 @@ Value *DFSanFunction::loadShadow(Value *Addr, uint64_t Size, uint64_t Align,
}
IRBuilder<> IRB(Pos);
- CallInst *FallbackCall = IRB.CreateCall2(
- DFS.DFSanUnionLoadFn, ShadowAddr, ConstantInt::get(DFS.IntptrTy, Size));
+ CallInst *FallbackCall = IRB.CreateCall(
+ DFS.DFSanUnionLoadFn, {ShadowAddr, ConstantInt::get(DFS.IntptrTy, Size)});
FallbackCall->addAttribute(AttributeSet::ReturnIndex, Attribute::ZExt);
return FallbackCall;
}
void DFSanVisitor::visitLoadInst(LoadInst &LI) {
- uint64_t Size = DFSF.DFS.DL->getTypeStoreSize(LI.getType());
+ auto &DL = LI.getModule()->getDataLayout();
+ uint64_t Size = DL.getTypeStoreSize(LI.getType());
if (Size == 0) {
DFSF.setShadow(&LI, DFSF.DFS.ZeroShadow);
return;
@@ -1165,7 +1163,7 @@ void DFSanVisitor::visitLoadInst(LoadInst &LI) {
if (ClPreserveAlignment) {
Align = LI.getAlignment();
if (Align == 0)
- Align = DFSF.DFS.DL->getABITypeAlignment(LI.getType());
+ Align = DL.getABITypeAlignment(LI.getType());
} else {
Align = 1;
}
@@ -1217,7 +1215,8 @@ void DFSanFunction::storeShadow(Value *Addr, uint64_t Size, uint64_t Align,
Value *ShadowVecAddr =
IRB.CreateBitCast(ShadowAddr, PointerType::getUnqual(ShadowVecTy));
do {
- Value *CurShadowVecAddr = IRB.CreateConstGEP1_32(ShadowVecAddr, Offset);
+ Value *CurShadowVecAddr =
+ IRB.CreateConstGEP1_32(ShadowVecTy, ShadowVecAddr, Offset);
IRB.CreateAlignedStore(ShadowVec, CurShadowVecAddr, ShadowAlign);
Size -= ShadowVecSize;
++Offset;
@@ -1225,7 +1224,8 @@ void DFSanFunction::storeShadow(Value *Addr, uint64_t Size, uint64_t Align,
Offset *= ShadowVecSize;
}
while (Size > 0) {
- Value *CurShadowAddr = IRB.CreateConstGEP1_32(ShadowAddr, Offset);
+ Value *CurShadowAddr =
+ IRB.CreateConstGEP1_32(DFS.ShadowTy, ShadowAddr, Offset);
IRB.CreateAlignedStore(Shadow, CurShadowAddr, ShadowAlign);
--Size;
++Offset;
@@ -1233,8 +1233,8 @@ void DFSanFunction::storeShadow(Value *Addr, uint64_t Size, uint64_t Align,
}
void DFSanVisitor::visitStoreInst(StoreInst &SI) {
- uint64_t Size =
- DFSF.DFS.DL->getTypeStoreSize(SI.getValueOperand()->getType());
+ auto &DL = SI.getModule()->getDataLayout();
+ uint64_t Size = DL.getTypeStoreSize(SI.getValueOperand()->getType());
if (Size == 0)
return;
@@ -1242,7 +1242,7 @@ void DFSanVisitor::visitStoreInst(StoreInst &SI) {
if (ClPreserveAlignment) {
Align = SI.getAlignment();
if (Align == 0)
- Align = DFSF.DFS.DL->getABITypeAlignment(SI.getValueOperand()->getType());
+ Align = DL.getABITypeAlignment(SI.getValueOperand()->getType());
} else {
Align = 1;
}
@@ -1333,10 +1333,10 @@ void DFSanVisitor::visitSelectInst(SelectInst &I) {
void DFSanVisitor::visitMemSetInst(MemSetInst &I) {
IRBuilder<> IRB(&I);
Value *ValShadow = DFSF.getShadow(I.getValue());
- IRB.CreateCall3(
- DFSF.DFS.DFSanSetLabelFn, ValShadow,
- IRB.CreateBitCast(I.getDest(), Type::getInt8PtrTy(*DFSF.DFS.Ctx)),
- IRB.CreateZExtOrTrunc(I.getLength(), DFSF.DFS.IntptrTy));
+ IRB.CreateCall(DFSF.DFS.DFSanSetLabelFn,
+ {ValShadow, IRB.CreateBitCast(I.getDest(), Type::getInt8PtrTy(
+ *DFSF.DFS.Ctx)),
+ IRB.CreateZExtOrTrunc(I.getLength(), DFSF.DFS.IntptrTy)});
}
void DFSanVisitor::visitMemTransferInst(MemTransferInst &I) {
@@ -1358,8 +1358,8 @@ void DFSanVisitor::visitMemTransferInst(MemTransferInst &I) {
Type *Int8Ptr = Type::getInt8PtrTy(*DFSF.DFS.Ctx);
DestShadow = IRB.CreateBitCast(DestShadow, Int8Ptr);
SrcShadow = IRB.CreateBitCast(SrcShadow, Int8Ptr);
- IRB.CreateCall5(I.getCalledValue(), DestShadow, SrcShadow, LenShadow,
- AlignShadow, I.getVolatileCst());
+ IRB.CreateCall(I.getCalledValue(), {DestShadow, SrcShadow, LenShadow,
+ AlignShadow, I.getVolatileCst()});
}
void DFSanVisitor::visitReturnInst(ReturnInst &RI) {
@@ -1473,17 +1473,17 @@ void DFSanVisitor::visitCallSite(CallSite CS) {
Args.push_back(DFSF.getShadow(*i));
if (FT->isVarArg()) {
- auto LabelVAAlloca =
- new AllocaInst(ArrayType::get(DFSF.DFS.ShadowTy,
- CS.arg_size() - FT->getNumParams()),
- "labelva", DFSF.F->getEntryBlock().begin());
+ auto *LabelVATy = ArrayType::get(DFSF.DFS.ShadowTy,
+ CS.arg_size() - FT->getNumParams());
+ auto *LabelVAAlloca = new AllocaInst(LabelVATy, "labelva",
+ DFSF.F->getEntryBlock().begin());
for (unsigned n = 0; i != CS.arg_end(); ++i, ++n) {
- auto LabelVAPtr = IRB.CreateStructGEP(LabelVAAlloca, n);
+ auto LabelVAPtr = IRB.CreateStructGEP(LabelVATy, LabelVAAlloca, n);
IRB.CreateStore(DFSF.getShadow(*i), LabelVAPtr);
}
- Args.push_back(IRB.CreateStructGEP(LabelVAAlloca, 0));
+ Args.push_back(IRB.CreateStructGEP(LabelVATy, LabelVAAlloca, 0));
}
if (!FT->getReturnType()->isVoidTy()) {
@@ -1532,7 +1532,7 @@ void DFSanVisitor::visitCallSite(CallSite CS) {
Next = II->getNormalDest()->begin();
} else {
BasicBlock *NewBB =
- SplitEdge(II->getParent(), II->getNormalDest(), &DFSF.DFS);
+ SplitEdge(II->getParent(), II->getNormalDest(), &DFSF.DT);
Next = NewBB->begin();
}
} else {
@@ -1569,10 +1569,11 @@ void DFSanVisitor::visitCallSite(CallSite CS) {
ArrayType *VarArgArrayTy = ArrayType::get(DFSF.DFS.ShadowTy, VarArgSize);
AllocaInst *VarArgShadow =
new AllocaInst(VarArgArrayTy, "", DFSF.F->getEntryBlock().begin());
- Args.push_back(IRB.CreateConstGEP2_32(VarArgShadow, 0, 0));
+ Args.push_back(IRB.CreateConstGEP2_32(VarArgArrayTy, VarArgShadow, 0, 0));
for (unsigned n = 0; i != e; ++i, ++n) {
- IRB.CreateStore(DFSF.getShadow(*i),
- IRB.CreateConstGEP2_32(VarArgShadow, 0, n));
+ IRB.CreateStore(
+ DFSF.getShadow(*i),
+ IRB.CreateConstGEP2_32(VarArgArrayTy, VarArgShadow, 0, n));
Args.push_back(*i);
}
}
@@ -1587,8 +1588,7 @@ void DFSanVisitor::visitCallSite(CallSite CS) {
NewCS.setCallingConv(CS.getCallingConv());
NewCS.setAttributes(CS.getAttributes().removeAttributes(
*DFSF.DFS.Ctx, AttributeSet::ReturnIndex,
- AttributeFuncs::typeIncompatible(NewCS.getInstruction()->getType(),
- AttributeSet::ReturnIndex)));
+ AttributeFuncs::typeIncompatible(NewCS.getInstruction()->getType())));
if (Next) {
ExtractValueInst *ExVal =
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp b/contrib/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp
index 60b541f..9a3ed5c 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp
+++ b/contrib/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp
@@ -57,6 +57,7 @@ GCOVOptions GCOVOptions::getDefault() {
Options.UseCfgChecksum = false;
Options.NoRedZone = false;
Options.FunctionNamesInData = true;
+ Options.ExitBlockBeforeBody = DefaultExitBlockBeforeBody;
if (DefaultGCOVVersion.size() != 4) {
llvm::report_fatal_error(std::string("Invalid -default-gcov-version: ") +
@@ -72,20 +73,10 @@ namespace {
class GCOVProfiler : public ModulePass {
public:
static char ID;
- GCOVProfiler() : ModulePass(ID), Options(GCOVOptions::getDefault()) {
- init();
- }
- GCOVProfiler(const GCOVOptions &Options) : ModulePass(ID), Options(Options){
+ GCOVProfiler() : GCOVProfiler(GCOVOptions::getDefault()) {}
+ GCOVProfiler(const GCOVOptions &Opts) : ModulePass(ID), Options(Opts) {
assert((Options.EmitNotes || Options.EmitData) &&
"GCOVProfiler asked to do nothing?");
- init();
- }
- const char *getPassName() const override {
- return "GCOV Profiler";
- }
-
- private:
- void init() {
ReversedVersion[0] = Options.Version[3];
ReversedVersion[1] = Options.Version[2];
ReversedVersion[2] = Options.Version[1];
@@ -93,6 +84,11 @@ namespace {
ReversedVersion[4] = '\0';
initializeGCOVProfilerPass(*PassRegistry::getPassRegistry());
}
+ const char *getPassName() const override {
+ return "GCOV Profiler";
+ }
+
+ private:
bool runOnModule(Module &M) override;
// Create the .gcno files for the Module based on DebugInfo.
@@ -130,7 +126,7 @@ namespace {
Function *insertFlush(ArrayRef<std::pair<GlobalVariable*, MDNode*> >);
void insertIndirectCounterIncrement();
- std::string mangleName(DICompileUnit CU, const char *NewStem);
+ std::string mangleName(const DICompileUnit *CU, const char *NewStem);
GCOVOptions Options;
@@ -153,10 +149,10 @@ ModulePass *llvm::createGCOVProfilerPass(const GCOVOptions &Options) {
return new GCOVProfiler(Options);
}
-static StringRef getFunctionName(DISubprogram SP) {
- if (!SP.getLinkageName().empty())
- return SP.getLinkageName();
- return SP.getName();
+static StringRef getFunctionName(const DISubprogram *SP) {
+ if (!SP->getLinkageName().empty())
+ return SP->getLinkageName();
+ return SP->getName();
}
namespace {
@@ -167,7 +163,7 @@ namespace {
static const char *const BlockTag;
static const char *const EdgeTag;
- GCOVRecord() {}
+ GCOVRecord() = default;
void writeBytes(const char *Bytes, int Size) {
os->write(Bytes, Size);
@@ -313,13 +309,13 @@ namespace {
// object users can construct, the blocks and lines will be rooted here.
class GCOVFunction : public GCOVRecord {
public:
- GCOVFunction(DISubprogram SP, raw_ostream *os, uint32_t Ident,
+ GCOVFunction(const DISubprogram *SP, raw_ostream *os, uint32_t Ident,
bool UseCfgChecksum, bool ExitBlockBeforeBody)
: SP(SP), Ident(Ident), UseCfgChecksum(UseCfgChecksum), CfgChecksum(0),
ReturnBlock(1, os) {
this->os = os;
- Function *F = SP.getFunction();
+ Function *F = SP->getFunction();
DEBUG(dbgs() << "Function: " << getFunctionName(SP) << "\n");
uint32_t i = 0;
@@ -334,7 +330,7 @@ namespace {
std::string FunctionNameAndLine;
raw_string_ostream FNLOS(FunctionNameAndLine);
- FNLOS << getFunctionName(SP) << SP.getLineNumber();
+ FNLOS << getFunctionName(SP) << SP->getLine();
FNLOS.flush();
FuncChecksum = hash_value(FunctionNameAndLine);
}
@@ -370,7 +366,7 @@ namespace {
void writeOut() {
writeBytes(FunctionTag, 4);
uint32_t BlockLen = 1 + 1 + 1 + lengthOfGCOVString(getFunctionName(SP)) +
- 1 + lengthOfGCOVString(SP.getFilename()) + 1;
+ 1 + lengthOfGCOVString(SP->getFilename()) + 1;
if (UseCfgChecksum)
++BlockLen;
write(BlockLen);
@@ -379,8 +375,8 @@ namespace {
if (UseCfgChecksum)
write(CfgChecksum);
writeGCOVString(getFunctionName(SP));
- writeGCOVString(SP.getFilename());
- write(SP.getLineNumber());
+ writeGCOVString(SP->getFilename());
+ write(SP->getLine());
// Emit count of blocks.
writeBytes(BlockTag, 4);
@@ -415,7 +411,7 @@ namespace {
}
private:
- DISubprogram SP;
+ const DISubprogram *SP;
uint32_t Ident;
uint32_t FuncChecksum;
bool UseCfgChecksum;
@@ -425,7 +421,8 @@ namespace {
};
}
-std::string GCOVProfiler::mangleName(DICompileUnit CU, const char *NewStem) {
+std::string GCOVProfiler::mangleName(const DICompileUnit *CU,
+ const char *NewStem) {
if (NamedMDNode *GCov = M->getNamedMetadata("llvm.gcov")) {
for (int i = 0, e = GCov->getNumOperands(); i != e; ++i) {
MDNode *N = GCov->getOperand(i);
@@ -441,12 +438,12 @@ std::string GCOVProfiler::mangleName(DICompileUnit CU, const char *NewStem) {
}
}
- SmallString<128> Filename = CU.getFilename();
+ SmallString<128> Filename = CU->getFilename();
sys::path::replace_extension(Filename, NewStem);
StringRef FName = sys::path::filename(Filename);
SmallString<128> CurPath;
if (sys::fs::current_path(CurPath)) return FName;
- sys::path::append(CurPath, FName.str());
+ sys::path::append(CurPath, FName);
return CurPath.str();
}
@@ -470,7 +467,8 @@ static bool functionHasLines(Function *F) {
if (isa<DbgInfoIntrinsic>(I)) continue;
const DebugLoc &Loc = I->getDebugLoc();
- if (Loc.isUnknown()) continue;
+ if (!Loc)
+ continue;
// Artificial lines such as calls to the global constructors.
if (Loc.getLine() == 0) continue;
@@ -490,21 +488,14 @@ void GCOVProfiler::emitProfileNotes() {
// this pass over the original .o's as they're produced, or run it after
// LTO, we'll generate the same .gcno files.
- DICompileUnit CU(CU_Nodes->getOperand(i));
+ auto *CU = cast<DICompileUnit>(CU_Nodes->getOperand(i));
std::error_code EC;
raw_fd_ostream out(mangleName(CU, "gcno"), EC, sys::fs::F_None);
std::string EdgeDestinations;
- DIArray SPs = CU.getSubprograms();
unsigned FunctionIdent = 0;
- for (unsigned i = 0, e = SPs.getNumElements(); i != e; ++i) {
- DISubprogram SP(SPs.getElement(i));
- assert((!SP || SP.isSubprogram()) &&
- "A MDNode in subprograms of a CU should be null or a DISubprogram.");
- if (!SP)
- continue;
-
- Function *F = SP.getFunction();
+ for (auto *SP : CU->getSubprograms()) {
+ Function *F = SP->getFunction();
if (!F) continue;
if (!functionHasLines(F)) continue;
@@ -518,7 +509,7 @@ void GCOVProfiler::emitProfileNotes() {
Funcs.push_back(make_unique<GCOVFunction>(SP, &out, FunctionIdent++,
Options.UseCfgChecksum,
- DefaultExitBlockBeforeBody));
+ Options.ExitBlockBeforeBody));
GCOVFunction &Func = *Funcs.back();
for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) {
@@ -540,16 +531,18 @@ void GCOVProfiler::emitProfileNotes() {
if (isa<DbgInfoIntrinsic>(I)) continue;
const DebugLoc &Loc = I->getDebugLoc();
- if (Loc.isUnknown()) continue;
+ if (!Loc)
+ continue;
// Artificial lines such as calls to the global constructors.
if (Loc.getLine() == 0) continue;
if (Line == Loc.getLine()) continue;
Line = Loc.getLine();
- if (SP != getDISubprogram(Loc.getScope(*Ctx))) continue;
+ if (SP != getDISubprogram(Loc.getScope()))
+ continue;
- GCOVLines &Lines = Block.getFile(SP.getFilename());
+ GCOVLines &Lines = Block.getFile(SP->getFilename());
Lines.addLine(Loc.getLine());
}
}
@@ -578,16 +571,10 @@ bool GCOVProfiler::emitProfileArcs() {
bool Result = false;
bool InsertIndCounterIncrCode = false;
for (unsigned i = 0, e = CU_Nodes->getNumOperands(); i != e; ++i) {
- DICompileUnit CU(CU_Nodes->getOperand(i));
- DIArray SPs = CU.getSubprograms();
+ auto *CU = cast<DICompileUnit>(CU_Nodes->getOperand(i));
SmallVector<std::pair<GlobalVariable *, MDNode *>, 8> CountersBySP;
- for (unsigned i = 0, e = SPs.getNumElements(); i != e; ++i) {
- DISubprogram SP(SPs.getElement(i));
- assert((!SP || SP.isSubprogram()) &&
- "A MDNode in subprograms of a CU should be null or a DISubprogram.");
- if (!SP)
- continue;
- Function *F = SP.getFunction();
+ for (auto *SP : CU->getSubprograms()) {
+ Function *F = SP->getFunction();
if (!F) continue;
if (!functionHasLines(F)) continue;
if (!Result) Result = true;
@@ -607,7 +594,7 @@ bool GCOVProfiler::emitProfileArcs() {
GlobalValue::InternalLinkage,
Constant::getNullValue(CounterTy),
"__llvm_gcov_ctr");
- CountersBySP.push_back(std::make_pair(Counters, (MDNode*)SP));
+ CountersBySP.push_back(std::make_pair(Counters, SP));
UniqueVector<BasicBlock *> ComplexEdgePreds;
UniqueVector<BasicBlock *> ComplexEdgeSuccs;
@@ -632,7 +619,8 @@ bool GCOVProfiler::emitProfileArcs() {
SmallVector<Value *, 2> Idx;
Idx.push_back(Builder.getInt64(0));
Idx.push_back(Sel);
- Value *Counter = Builder.CreateInBoundsGEP(Counters, Idx);
+ Value *Counter = Builder.CreateInBoundsGEP(Counters->getValueType(),
+ Counters, Idx);
Value *Count = Builder.CreateLoad(Counter);
Count = Builder.CreateAdd(Count, Builder.getInt64(1));
Builder.CreateStore(Count, Counter);
@@ -666,8 +654,8 @@ bool GCOVProfiler::emitProfileArcs() {
// Build code to increment the counter.
InsertIndCounterIncrCode = true;
- Builder.CreateCall2(getIncrementIndirectCounterFunc(),
- EdgeState, CounterPtrArray);
+ Builder.CreateCall(getIncrementIndirectCounterFunc(),
+ {EdgeState, CounterPtrArray});
}
}
}
@@ -700,7 +688,7 @@ bool GCOVProfiler::emitProfileArcs() {
// Initialize the environment and register the local writeout and flush
// functions.
Constant *GCOVInit = M->getOrInsertFunction("llvm_gcov_init", FTy);
- Builder.CreateCall2(GCOVInit, WriteoutF, FlushF);
+ Builder.CreateCall(GCOVInit, {WriteoutF, FlushF});
Builder.CreateRetVoid();
appendToGlobalCtors(*M, F, 0);
@@ -859,34 +847,34 @@ Function *GCOVProfiler::insertCounterWriteout(
NamedMDNode *CU_Nodes = M->getNamedMetadata("llvm.dbg.cu");
if (CU_Nodes) {
for (unsigned i = 0, e = CU_Nodes->getNumOperands(); i != e; ++i) {
- DICompileUnit CU(CU_Nodes->getOperand(i));
+ auto *CU = cast<DICompileUnit>(CU_Nodes->getOperand(i));
std::string FilenameGcda = mangleName(CU, "gcda");
uint32_t CfgChecksum = FileChecksums.empty() ? 0 : FileChecksums[i];
- Builder.CreateCall3(StartFile,
- Builder.CreateGlobalStringPtr(FilenameGcda),
+ Builder.CreateCall(StartFile,
+ {Builder.CreateGlobalStringPtr(FilenameGcda),
Builder.CreateGlobalStringPtr(ReversedVersion),
- Builder.getInt32(CfgChecksum));
+ Builder.getInt32(CfgChecksum)});
for (unsigned j = 0, e = CountersBySP.size(); j != e; ++j) {
- DISubprogram SP(CountersBySP[j].second);
+ auto *SP = cast_or_null<DISubprogram>(CountersBySP[j].second);
uint32_t FuncChecksum = Funcs.empty() ? 0 : Funcs[j]->getFuncChecksum();
- Builder.CreateCall5(
- EmitFunction, Builder.getInt32(j),
- Options.FunctionNamesInData ?
- Builder.CreateGlobalStringPtr(getFunctionName(SP)) :
- Constant::getNullValue(Builder.getInt8PtrTy()),
- Builder.getInt32(FuncChecksum),
- Builder.getInt8(Options.UseCfgChecksum),
- Builder.getInt32(CfgChecksum));
+ Builder.CreateCall(
+ EmitFunction,
+ {Builder.getInt32(j),
+ Options.FunctionNamesInData
+ ? Builder.CreateGlobalStringPtr(getFunctionName(SP))
+ : Constant::getNullValue(Builder.getInt8PtrTy()),
+ Builder.getInt32(FuncChecksum),
+ Builder.getInt8(Options.UseCfgChecksum),
+ Builder.getInt32(CfgChecksum)});
GlobalVariable *GV = CountersBySP[j].first;
unsigned Arcs =
cast<ArrayType>(GV->getType()->getElementType())->getNumElements();
- Builder.CreateCall2(EmitArcs,
- Builder.getInt32(Arcs),
- Builder.CreateConstGEP2_64(GV, 0, 0));
+ Builder.CreateCall(EmitArcs, {Builder.getInt32(Arcs),
+ Builder.CreateConstGEP2_64(GV, 0, 0)});
}
- Builder.CreateCall(SummaryInfo);
- Builder.CreateCall(EndFile);
+ Builder.CreateCall(SummaryInfo, {});
+ Builder.CreateCall(EndFile, {});
}
}
@@ -926,7 +914,7 @@ void GCOVProfiler::insertIndirectCounterIncrement() {
Value *ZExtPred = Builder.CreateZExt(Pred, Builder.getInt64Ty());
Arg = std::next(Fn->arg_begin());
Arg->setName("counters");
- Value *GEP = Builder.CreateGEP(Arg, ZExtPred);
+ Value *GEP = Builder.CreateGEP(Type::getInt64PtrTy(*Ctx), Arg, ZExtPred);
Value *Counter = Builder.CreateLoad(GEP, "counter");
Cond = Builder.CreateICmpEQ(Counter,
Constant::getNullValue(
@@ -966,7 +954,7 @@ insertFlush(ArrayRef<std::pair<GlobalVariable*, MDNode*> > CountersBySP) {
assert(WriteoutF && "Need to create the writeout function first!");
IRBuilder<> Builder(Entry);
- Builder.CreateCall(WriteoutF);
+ Builder.CreateCall(WriteoutF, {});
// Zero out the counters.
for (ArrayRef<std::pair<GlobalVariable *, MDNode *> >::iterator
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp b/contrib/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp
index 2a3d154..05a9c8a 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp
+++ b/contrib/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp
@@ -97,7 +97,8 @@ private:
/// Add uses of our data variables and runtime hook.
void emitUses();
- /// Create a static initializer for our data, on platforms that need it.
+ /// Create a static initializer for our data, on platforms that need it,
+ /// and for any profile output file that was specified.
void emitInitialization();
};
@@ -145,8 +146,8 @@ void InstrProfiling::lowerIncrement(InstrProfIncrementInst *Inc) {
IRBuilder<> Builder(Inc->getParent(), *Inc);
uint64_t Index = Inc->getIndex()->getZExtValue();
- llvm::Value *Addr = Builder.CreateConstInBoundsGEP2_64(Counters, 0, Index);
- llvm::Value *Count = Builder.CreateLoad(Addr, "pgocount");
+ Value *Addr = Builder.CreateConstInBoundsGEP2_64(Counters, 0, Index);
+ Value *Count = Builder.CreateLoad(Addr, "pgocount");
Count = Builder.CreateAdd(Count, Builder.getInt64(1));
Inc->replaceAllUsesWith(Builder.CreateStore(Count, Addr));
Inc->eraseFromParent();
@@ -195,9 +196,13 @@ InstrProfiling::getOrCreateRegionCounters(InstrProfIncrementInst *Inc) {
if (It != RegionCounters.end())
return It->second;
- // Move the name variable to the right section.
+ // Move the name variable to the right section. Make sure it is placed in the
+ // same comdat as its associated function. Otherwise, we may get multiple
+ // counters for the same function in certain cases.
+ Function *Fn = Inc->getParent()->getParent();
Name->setSection(getNameSection());
Name->setAlignment(1);
+ Name->setComdat(Fn->getComdat());
uint64_t NumCounters = Inc->getNumCounters()->getZExtValue();
LLVMContext &Ctx = M->getContext();
@@ -210,6 +215,7 @@ InstrProfiling::getOrCreateRegionCounters(InstrProfIncrementInst *Inc) {
Counters->setVisibility(Name->getVisibility());
Counters->setSection(getCountersSection());
Counters->setAlignment(8);
+ Counters->setComdat(Fn->getComdat());
RegionCounters[Inc->getName()] = Counters;
@@ -234,6 +240,7 @@ InstrProfiling::getOrCreateRegionCounters(InstrProfIncrementInst *Inc) {
Data->setVisibility(Name->getVisibility());
Data->setSection(getDataSection());
Data->setAlignment(8);
+ Data->setComdat(Fn->getComdat());
// Mark the data variable as used so that it isn't stripped out.
UsedVars.push_back(Data);
@@ -256,7 +263,7 @@ void InstrProfiling::emitRegistration() {
if (Options.NoRedZone)
RegisterF->addFnAttr(Attribute::NoRedZone);
- auto *RuntimeRegisterTy = llvm::FunctionType::get(VoidTy, VoidPtrTy, false);
+ auto *RuntimeRegisterTy = FunctionType::get(VoidTy, VoidPtrTy, false);
auto *RuntimeRegisterF =
Function::Create(RuntimeRegisterTy, GlobalVariable::ExternalLinkage,
"__llvm_profile_register_function", M);
@@ -288,6 +295,7 @@ void InstrProfiling::emitRuntimeHook() {
User->addFnAttr(Attribute::NoInline);
if (Options.NoRedZone)
User->addFnAttr(Attribute::NoRedZone);
+ User->setVisibility(GlobalValue::HiddenVisibility);
IRBuilder<> IRB(BasicBlock::Create(M->getContext(), "", User));
auto *Load = IRB.CreateLoad(Var);
@@ -302,7 +310,7 @@ void InstrProfiling::emitUses() {
return;
GlobalVariable *LLVMUsed = M->getGlobalVariable("llvm.used");
- std::vector<Constant*> MergedVars;
+ std::vector<Constant *> MergedVars;
if (LLVMUsed) {
// Collect the existing members of llvm.used.
ConstantArray *Inits = cast<ConstantArray>(LLVMUsed->getInitializer());
@@ -315,20 +323,22 @@ void InstrProfiling::emitUses() {
// Add uses for our data.
for (auto *Value : UsedVars)
MergedVars.push_back(
- ConstantExpr::getBitCast(cast<llvm::Constant>(Value), i8PTy));
+ ConstantExpr::getBitCast(cast<Constant>(Value), i8PTy));
// Recreate llvm.used.
ArrayType *ATy = ArrayType::get(i8PTy, MergedVars.size());
- LLVMUsed = new llvm::GlobalVariable(
- *M, ATy, false, llvm::GlobalValue::AppendingLinkage,
- llvm::ConstantArray::get(ATy, MergedVars), "llvm.used");
+ LLVMUsed =
+ new GlobalVariable(*M, ATy, false, GlobalValue::AppendingLinkage,
+ ConstantArray::get(ATy, MergedVars), "llvm.used");
LLVMUsed->setSection("llvm.metadata");
}
void InstrProfiling::emitInitialization() {
+ std::string InstrProfileOutput = Options.InstrProfileOutput;
+
Constant *RegisterF = M->getFunction("__llvm_profile_register_functions");
- if (!RegisterF)
+ if (!RegisterF && InstrProfileOutput.empty())
return;
// Create the initialization function.
@@ -343,7 +353,24 @@ void InstrProfiling::emitInitialization() {
// Add the basic block and the necessary calls.
IRBuilder<> IRB(BasicBlock::Create(M->getContext(), "", F));
- IRB.CreateCall(RegisterF);
+ if (RegisterF)
+ IRB.CreateCall(RegisterF, {});
+ if (!InstrProfileOutput.empty()) {
+ auto *Int8PtrTy = Type::getInt8PtrTy(M->getContext());
+ auto *SetNameTy = FunctionType::get(VoidTy, Int8PtrTy, false);
+ auto *SetNameF =
+ Function::Create(SetNameTy, GlobalValue::ExternalLinkage,
+ "__llvm_profile_override_default_filename", M);
+
+ // Create variable for profile name
+ Constant *ProfileNameConst =
+ ConstantDataArray::getString(M->getContext(), InstrProfileOutput, true);
+ GlobalVariable *ProfileName =
+ new GlobalVariable(*M, ProfileNameConst->getType(), true,
+ GlobalValue::PrivateLinkage, ProfileNameConst);
+
+ IRB.CreateCall(SetNameF, IRB.CreatePointerCast(ProfileName, Int8PtrTy));
+ }
IRB.CreateRetVoid();
appendToGlobalCtors(*M, F, 0);
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/contrib/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index d7d752f..100824e 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/contrib/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -120,6 +120,7 @@ using namespace llvm;
#define DEBUG_TYPE "msan"
+static const unsigned kOriginSize = 4;
static const unsigned kMinOriginAlignment = 4;
static const unsigned kShadowTLSAlignment = 8;
@@ -190,6 +191,9 @@ static cl::opt<bool> ClCheckConstantShadow("msan-check-constant-shadow",
cl::desc("Insert checks for constant shadow values"),
cl::Hidden, cl::init(false));
+static const char *const kMsanModuleCtorName = "msan.module_ctor";
+static const char *const kMsanInitName = "__msan_init";
+
namespace {
// Memory map parameters used in application-to-shadow address calculation.
@@ -209,7 +213,7 @@ struct PlatformMemoryMapParams {
};
// i386 Linux
-static const MemoryMapParams LinuxMemoryMapParams32 = {
+static const MemoryMapParams Linux_I386_MemoryMapParams = {
0x000080000000, // AndMask
0, // XorMask (not used)
0, // ShadowBase (not used)
@@ -217,15 +221,23 @@ static const MemoryMapParams LinuxMemoryMapParams32 = {
};
// x86_64 Linux
-static const MemoryMapParams LinuxMemoryMapParams64 = {
+static const MemoryMapParams Linux_X86_64_MemoryMapParams = {
0x400000000000, // AndMask
0, // XorMask (not used)
0, // ShadowBase (not used)
0x200000000000, // OriginBase
};
+// mips64 Linux
+static const MemoryMapParams Linux_MIPS64_MemoryMapParams = {
+ 0x004000000000, // AndMask
+ 0, // XorMask (not used)
+ 0, // ShadowBase (not used)
+ 0x002000000000, // OriginBase
+};
+
// i386 FreeBSD
-static const MemoryMapParams FreeBSDMemoryMapParams32 = {
+static const MemoryMapParams FreeBSD_I386_MemoryMapParams = {
0x000180000000, // AndMask
0x000040000000, // XorMask
0x000020000000, // ShadowBase
@@ -233,21 +245,26 @@ static const MemoryMapParams FreeBSDMemoryMapParams32 = {
};
// x86_64 FreeBSD
-static const MemoryMapParams FreeBSDMemoryMapParams64 = {
+static const MemoryMapParams FreeBSD_X86_64_MemoryMapParams = {
0xc00000000000, // AndMask
0x200000000000, // XorMask
0x100000000000, // ShadowBase
0x380000000000, // OriginBase
};
-static const PlatformMemoryMapParams LinuxMemoryMapParams = {
- &LinuxMemoryMapParams32,
- &LinuxMemoryMapParams64,
+static const PlatformMemoryMapParams Linux_X86_MemoryMapParams = {
+ &Linux_I386_MemoryMapParams,
+ &Linux_X86_64_MemoryMapParams,
+};
+
+static const PlatformMemoryMapParams Linux_MIPS_MemoryMapParams = {
+ NULL,
+ &Linux_MIPS64_MemoryMapParams,
};
-static const PlatformMemoryMapParams FreeBSDMemoryMapParams = {
- &FreeBSDMemoryMapParams32,
- &FreeBSDMemoryMapParams64,
+static const PlatformMemoryMapParams FreeBSD_X86_MemoryMapParams = {
+ &FreeBSD_I386_MemoryMapParams,
+ &FreeBSD_X86_64_MemoryMapParams,
};
/// \brief An instrumentation pass implementing detection of uninitialized
@@ -260,7 +277,6 @@ class MemorySanitizer : public FunctionPass {
MemorySanitizer(int TrackOrigins = 0)
: FunctionPass(ID),
TrackOrigins(std::max(TrackOrigins, (int)ClTrackOrigins)),
- DL(nullptr),
WarningFn(nullptr) {}
const char *getPassName() const override { return "MemorySanitizer"; }
bool runOnFunction(Function &F) override;
@@ -273,7 +289,6 @@ class MemorySanitizer : public FunctionPass {
/// \brief Track origins (allocation points) of uninitialized values.
int TrackOrigins;
- const DataLayout *DL;
LLVMContext *C;
Type *IntptrTy;
Type *OriginTy;
@@ -320,9 +335,11 @@ class MemorySanitizer : public FunctionPass {
MDNode *OriginStoreWeights;
/// \brief An empty volatile inline asm that prevents callback merge.
InlineAsm *EmptyAsm;
+ Function *MsanCtorFunction;
friend struct MemorySanitizerVisitor;
friend struct VarArgAMD64Helper;
+ friend struct VarArgMIPS64Helper;
};
} // namespace
@@ -434,32 +451,43 @@ void MemorySanitizer::initializeCallbacks(Module &M) {
///
/// inserts a call to __msan_init to the module's constructor list.
bool MemorySanitizer::doInitialization(Module &M) {
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- if (!DLP)
- report_fatal_error("data layout missing");
- DL = &DLP->getDataLayout();
+ auto &DL = M.getDataLayout();
Triple TargetTriple(M.getTargetTriple());
- const PlatformMemoryMapParams *PlatformMapParams;
- if (TargetTriple.getOS() == Triple::FreeBSD)
- PlatformMapParams = &FreeBSDMemoryMapParams;
- else
- PlatformMapParams = &LinuxMemoryMapParams;
-
- C = &(M.getContext());
- unsigned PtrSize = DL->getPointerSizeInBits(/* AddressSpace */0);
- switch (PtrSize) {
- case 64:
- MapParams = PlatformMapParams->bits64;
+ switch (TargetTriple.getOS()) {
+ case Triple::FreeBSD:
+ switch (TargetTriple.getArch()) {
+ case Triple::x86_64:
+ MapParams = FreeBSD_X86_MemoryMapParams.bits64;
+ break;
+ case Triple::x86:
+ MapParams = FreeBSD_X86_MemoryMapParams.bits32;
+ break;
+ default:
+ report_fatal_error("unsupported architecture");
+ }
break;
- case 32:
- MapParams = PlatformMapParams->bits32;
+ case Triple::Linux:
+ switch (TargetTriple.getArch()) {
+ case Triple::x86_64:
+ MapParams = Linux_X86_MemoryMapParams.bits64;
+ break;
+ case Triple::x86:
+ MapParams = Linux_X86_MemoryMapParams.bits32;
+ break;
+ case Triple::mips64:
+ case Triple::mips64el:
+ MapParams = Linux_MIPS_MemoryMapParams.bits64;
+ break;
+ default:
+ report_fatal_error("unsupported architecture");
+ }
break;
default:
- report_fatal_error("unsupported pointer size");
- break;
+ report_fatal_error("unsupported operating system");
}
+ C = &(M.getContext());
IRBuilder<> IRB(*C);
IntptrTy = IRB.getIntPtrTy(DL);
OriginTy = IRB.getInt32Ty();
@@ -467,9 +495,12 @@ bool MemorySanitizer::doInitialization(Module &M) {
ColdCallWeights = MDBuilder(*C).createBranchWeights(1, 1000);
OriginStoreWeights = MDBuilder(*C).createBranchWeights(1, 1000);
- // Insert a call to __msan_init/__msan_track_origins into the module's CTORs.
- appendToGlobalCtors(M, cast<Function>(M.getOrInsertFunction(
- "__msan_init", IRB.getVoidTy(), nullptr)), 0);
+ std::tie(MsanCtorFunction, std::ignore) =
+ createSanitizerCtorAndInitFunctions(M, kMsanModuleCtorName, kMsanInitName,
+ /*InitArgTypes=*/{},
+ /*InitArgs=*/{});
+
+ appendToGlobalCtors(M, MsanCtorFunction, 0);
if (TrackOrigins)
new GlobalVariable(M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
@@ -555,8 +586,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
MemorySanitizerVisitor(Function &F, MemorySanitizer &MS)
: F(F), MS(MS), VAHelper(CreateVarArgHelper(F, MS, *this)) {
- bool SanitizeFunction = F.getAttributes().hasAttribute(
- AttributeSet::FunctionIndex, Attribute::SanitizeMemory);
+ bool SanitizeFunction = F.hasFnAttribute(Attribute::SanitizeMemory);
InsertChecks = SanitizeFunction;
PropagateShadow = SanitizeFunction;
PoisonStack = SanitizeFunction && ClPoisonStack;
@@ -575,39 +605,86 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
return IRB.CreateCall(MS.MsanChainOriginFn, V);
}
+ Value *originToIntptr(IRBuilder<> &IRB, Value *Origin) {
+ const DataLayout &DL = F.getParent()->getDataLayout();
+ unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy);
+ if (IntptrSize == kOriginSize) return Origin;
+ assert(IntptrSize == kOriginSize * 2);
+ Origin = IRB.CreateIntCast(Origin, MS.IntptrTy, /* isSigned */ false);
+ return IRB.CreateOr(Origin, IRB.CreateShl(Origin, kOriginSize * 8));
+ }
+
+ /// \brief Fill memory range with the given origin value.
+ void paintOrigin(IRBuilder<> &IRB, Value *Origin, Value *OriginPtr,
+ unsigned Size, unsigned Alignment) {
+ const DataLayout &DL = F.getParent()->getDataLayout();
+ unsigned IntptrAlignment = DL.getABITypeAlignment(MS.IntptrTy);
+ unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy);
+ assert(IntptrAlignment >= kMinOriginAlignment);
+ assert(IntptrSize >= kOriginSize);
+
+ unsigned Ofs = 0;
+ unsigned CurrentAlignment = Alignment;
+ if (Alignment >= IntptrAlignment && IntptrSize > kOriginSize) {
+ Value *IntptrOrigin = originToIntptr(IRB, Origin);
+ Value *IntptrOriginPtr =
+ IRB.CreatePointerCast(OriginPtr, PointerType::get(MS.IntptrTy, 0));
+ for (unsigned i = 0; i < Size / IntptrSize; ++i) {
+ Value *Ptr = i ? IRB.CreateConstGEP1_32(MS.IntptrTy, IntptrOriginPtr, i)
+ : IntptrOriginPtr;
+ IRB.CreateAlignedStore(IntptrOrigin, Ptr, CurrentAlignment);
+ Ofs += IntptrSize / kOriginSize;
+ CurrentAlignment = IntptrAlignment;
+ }
+ }
+
+ for (unsigned i = Ofs; i < (Size + kOriginSize - 1) / kOriginSize; ++i) {
+ Value *GEP =
+ i ? IRB.CreateConstGEP1_32(nullptr, OriginPtr, i) : OriginPtr;
+ IRB.CreateAlignedStore(Origin, GEP, CurrentAlignment);
+ CurrentAlignment = kMinOriginAlignment;
+ }
+ }
+
void storeOrigin(IRBuilder<> &IRB, Value *Addr, Value *Shadow, Value *Origin,
unsigned Alignment, bool AsCall) {
+ const DataLayout &DL = F.getParent()->getDataLayout();
unsigned OriginAlignment = std::max(kMinOriginAlignment, Alignment);
+ unsigned StoreSize = DL.getTypeStoreSize(Shadow->getType());
if (isa<StructType>(Shadow->getType())) {
- IRB.CreateAlignedStore(updateOrigin(Origin, IRB),
- getOriginPtr(Addr, IRB, Alignment),
- OriginAlignment);
+ paintOrigin(IRB, updateOrigin(Origin, IRB),
+ getOriginPtr(Addr, IRB, Alignment), StoreSize,
+ OriginAlignment);
} else {
Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB);
- // TODO(eugenis): handle non-zero constant shadow by inserting an
- // unconditional check (can not simply fail compilation as this could
- // be in the dead code).
- if (!ClCheckConstantShadow)
- if (isa<Constant>(ConvertedShadow)) return;
+ Constant *ConstantShadow = dyn_cast_or_null<Constant>(ConvertedShadow);
+ if (ConstantShadow) {
+ if (ClCheckConstantShadow && !ConstantShadow->isZeroValue())
+ paintOrigin(IRB, updateOrigin(Origin, IRB),
+ getOriginPtr(Addr, IRB, Alignment), StoreSize,
+ OriginAlignment);
+ return;
+ }
+
unsigned TypeSizeInBits =
- MS.DL->getTypeSizeInBits(ConvertedShadow->getType());
+ DL.getTypeSizeInBits(ConvertedShadow->getType());
unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits);
if (AsCall && SizeIndex < kNumberOfAccessSizes) {
Value *Fn = MS.MaybeStoreOriginFn[SizeIndex];
Value *ConvertedShadow2 = IRB.CreateZExt(
ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex)));
- IRB.CreateCall3(Fn, ConvertedShadow2,
- IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()),
- Origin);
+ IRB.CreateCall(Fn, {ConvertedShadow2,
+ IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()),
+ Origin});
} else {
Value *Cmp = IRB.CreateICmpNE(
ConvertedShadow, getCleanShadow(ConvertedShadow), "_mscmp");
Instruction *CheckTerm = SplitBlockAndInsertIfThen(
Cmp, IRB.GetInsertPoint(), false, MS.OriginStoreWeights);
IRBuilder<> IRBNew(CheckTerm);
- IRBNew.CreateAlignedStore(updateOrigin(Origin, IRBNew),
- getOriginPtr(Addr, IRBNew, Alignment),
- OriginAlignment);
+ paintOrigin(IRBNew, updateOrigin(Origin, IRBNew),
+ getOriginPtr(Addr, IRBNew, Alignment), StoreSize,
+ OriginAlignment);
}
}
}
@@ -643,19 +720,34 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
DEBUG(dbgs() << " SHAD0 : " << *Shadow << "\n");
Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB);
DEBUG(dbgs() << " SHAD1 : " << *ConvertedShadow << "\n");
- // See the comment in storeOrigin().
- if (!ClCheckConstantShadow)
- if (isa<Constant>(ConvertedShadow)) return;
- unsigned TypeSizeInBits =
- MS.DL->getTypeSizeInBits(ConvertedShadow->getType());
+
+ Constant *ConstantShadow = dyn_cast_or_null<Constant>(ConvertedShadow);
+ if (ConstantShadow) {
+ if (ClCheckConstantShadow && !ConstantShadow->isZeroValue()) {
+ if (MS.TrackOrigins) {
+ IRB.CreateStore(Origin ? (Value *)Origin : (Value *)IRB.getInt32(0),
+ MS.OriginTLS);
+ }
+ IRB.CreateCall(MS.WarningFn, {});
+ IRB.CreateCall(MS.EmptyAsm, {});
+ // FIXME: Insert UnreachableInst if !ClKeepGoing?
+ // This may invalidate some of the following checks and needs to be done
+ // at the very end.
+ }
+ return;
+ }
+
+ const DataLayout &DL = OrigIns->getModule()->getDataLayout();
+
+ unsigned TypeSizeInBits = DL.getTypeSizeInBits(ConvertedShadow->getType());
unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits);
if (AsCall && SizeIndex < kNumberOfAccessSizes) {
Value *Fn = MS.MaybeWarningFn[SizeIndex];
Value *ConvertedShadow2 =
IRB.CreateZExt(ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex)));
- IRB.CreateCall2(Fn, ConvertedShadow2, MS.TrackOrigins && Origin
+ IRB.CreateCall(Fn, {ConvertedShadow2, MS.TrackOrigins && Origin
? Origin
- : (Value *)IRB.getInt32(0));
+ : (Value *)IRB.getInt32(0)});
} else {
Value *Cmp = IRB.CreateICmpNE(ConvertedShadow,
getCleanShadow(ConvertedShadow), "_mscmp");
@@ -668,8 +760,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
IRB.CreateStore(Origin ? (Value *)Origin : (Value *)IRB.getInt32(0),
MS.OriginTLS);
}
- IRB.CreateCall(MS.WarningFn);
- IRB.CreateCall(MS.EmptyAsm);
+ IRB.CreateCall(MS.WarningFn, {});
+ IRB.CreateCall(MS.EmptyAsm, {});
DEBUG(dbgs() << " CHECK: " << *Cmp << "\n");
}
}
@@ -687,7 +779,6 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
/// \brief Add MemorySanitizer instrumentation to a function.
bool runOnFunction() {
MS.initializeCallbacks(*F.getParent());
- if (!MS.DL) return false;
// In the presence of unreachable blocks, we may see Phi nodes with
// incoming nodes from such blocks. Since InstVisitor skips unreachable
@@ -743,8 +834,9 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
// This may return weird-sized types like i1.
if (IntegerType *IT = dyn_cast<IntegerType>(OrigTy))
return IT;
+ const DataLayout &DL = F.getParent()->getDataLayout();
if (VectorType *VT = dyn_cast<VectorType>(OrigTy)) {
- uint32_t EltSize = MS.DL->getTypeSizeInBits(VT->getElementType());
+ uint32_t EltSize = DL.getTypeSizeInBits(VT->getElementType());
return VectorType::get(IntegerType::get(*MS.C, EltSize),
VT->getNumElements());
}
@@ -760,7 +852,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
DEBUG(dbgs() << "getShadowTy: " << *ST << " ===> " << *Res << "\n");
return Res;
}
- uint32_t TypeSize = MS.DL->getTypeSizeInBits(OrigTy);
+ uint32_t TypeSize = DL.getTypeSizeInBits(OrigTy);
return IntegerType::get(*MS.C, TypeSize);
}
@@ -953,14 +1045,16 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
Function *F = A->getParent();
IRBuilder<> EntryIRB(F->getEntryBlock().getFirstNonPHI());
unsigned ArgOffset = 0;
+ const DataLayout &DL = F->getParent()->getDataLayout();
for (auto &FArg : F->args()) {
if (!FArg.getType()->isSized()) {
DEBUG(dbgs() << "Arg is not sized\n");
continue;
}
- unsigned Size = FArg.hasByValAttr()
- ? MS.DL->getTypeAllocSize(FArg.getType()->getPointerElementType())
- : MS.DL->getTypeAllocSize(FArg.getType());
+ unsigned Size =
+ FArg.hasByValAttr()
+ ? DL.getTypeAllocSize(FArg.getType()->getPointerElementType())
+ : DL.getTypeAllocSize(FArg.getType());
if (A == &FArg) {
bool Overflow = ArgOffset + Size > kParamTLSSize;
Value *Base = getShadowPtrForArgument(&FArg, EntryIRB, ArgOffset);
@@ -971,7 +1065,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
unsigned ArgAlign = FArg.getParamAlignment();
if (ArgAlign == 0) {
Type *EltType = A->getType()->getPointerElementType();
- ArgAlign = MS.DL->getABITypeAlignment(EltType);
+ ArgAlign = DL.getABITypeAlignment(EltType);
}
if (Overflow) {
// ParamTLS overflow.
@@ -1708,11 +1802,11 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
/// Similar situation exists for memcpy and memset.
void visitMemMoveInst(MemMoveInst &I) {
IRBuilder<> IRB(&I);
- IRB.CreateCall3(
- MS.MemmoveFn,
- IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
- IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()),
- IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false));
+ IRB.CreateCall(
+ MS.MemmoveFn,
+ {IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
+ IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()),
+ IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
I.eraseFromParent();
}
@@ -1722,22 +1816,22 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
// alignment.
void visitMemCpyInst(MemCpyInst &I) {
IRBuilder<> IRB(&I);
- IRB.CreateCall3(
- MS.MemcpyFn,
- IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
- IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()),
- IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false));
+ IRB.CreateCall(
+ MS.MemcpyFn,
+ {IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
+ IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()),
+ IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
I.eraseFromParent();
}
// Same as memcpy.
void visitMemSetInst(MemSetInst &I) {
IRBuilder<> IRB(&I);
- IRB.CreateCall3(
- MS.MemsetFn,
- IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
- IRB.CreateIntCast(I.getArgOperand(1), IRB.getInt32Ty(), false),
- IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false));
+ IRB.CreateCall(
+ MS.MemsetFn,
+ {IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
+ IRB.CreateIntCast(I.getArgOperand(1), IRB.getInt32Ty(), false),
+ IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
I.eraseFromParent();
}
@@ -2018,8 +2112,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
: Lower64ShadowExtend(IRB, S2, getShadowTy(&I));
Value *V1 = I.getOperand(0);
Value *V2 = I.getOperand(1);
- Value *Shift = IRB.CreateCall2(I.getCalledValue(),
- IRB.CreateBitCast(S1, V1->getType()), V2);
+ Value *Shift = IRB.CreateCall(I.getCalledValue(),
+ {IRB.CreateBitCast(S1, V1->getType()), V2});
Shift = IRB.CreateBitCast(Shift, getShadowTy(&I));
setShadow(&I, IRB.CreateOr(Shift, S2Conv));
setOriginForNaryOp(I);
@@ -2099,7 +2193,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
Function *ShadowFn = Intrinsic::getDeclaration(
F.getParent(), getSignedPackIntrinsic(I.getIntrinsicID()));
- Value *S = IRB.CreateCall2(ShadowFn, S1_ext, S2_ext, "_msprop_vector_pack");
+ Value *S =
+ IRB.CreateCall(ShadowFn, {S1_ext, S2_ext}, "_msprop_vector_pack");
if (isX86_MMX) S = IRB.CreateBitCast(S, getShadowTy(&I));
setShadow(&I, S);
setOriginForNaryOp(I);
@@ -2178,15 +2273,12 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
case llvm::Intrinsic::x86_sse_cvttps2pi:
handleVectorConvertIntrinsic(I, 2);
break;
- case llvm::Intrinsic::x86_avx512_psll_dq:
- case llvm::Intrinsic::x86_avx512_psrl_dq:
case llvm::Intrinsic::x86_avx2_psll_w:
case llvm::Intrinsic::x86_avx2_psll_d:
case llvm::Intrinsic::x86_avx2_psll_q:
case llvm::Intrinsic::x86_avx2_pslli_w:
case llvm::Intrinsic::x86_avx2_pslli_d:
case llvm::Intrinsic::x86_avx2_pslli_q:
- case llvm::Intrinsic::x86_avx2_psll_dq:
case llvm::Intrinsic::x86_avx2_psrl_w:
case llvm::Intrinsic::x86_avx2_psrl_d:
case llvm::Intrinsic::x86_avx2_psrl_q:
@@ -2197,14 +2289,12 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
case llvm::Intrinsic::x86_avx2_psrli_q:
case llvm::Intrinsic::x86_avx2_psrai_w:
case llvm::Intrinsic::x86_avx2_psrai_d:
- case llvm::Intrinsic::x86_avx2_psrl_dq:
case llvm::Intrinsic::x86_sse2_psll_w:
case llvm::Intrinsic::x86_sse2_psll_d:
case llvm::Intrinsic::x86_sse2_psll_q:
case llvm::Intrinsic::x86_sse2_pslli_w:
case llvm::Intrinsic::x86_sse2_pslli_d:
case llvm::Intrinsic::x86_sse2_pslli_q:
- case llvm::Intrinsic::x86_sse2_psll_dq:
case llvm::Intrinsic::x86_sse2_psrl_w:
case llvm::Intrinsic::x86_sse2_psrl_d:
case llvm::Intrinsic::x86_sse2_psrl_q:
@@ -2215,7 +2305,6 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
case llvm::Intrinsic::x86_sse2_psrli_q:
case llvm::Intrinsic::x86_sse2_psrai_w:
case llvm::Intrinsic::x86_sse2_psrai_d:
- case llvm::Intrinsic::x86_sse2_psrl_dq:
case llvm::Intrinsic::x86_mmx_psll_w:
case llvm::Intrinsic::x86_mmx_psll_d:
case llvm::Intrinsic::x86_mmx_psll_q:
@@ -2247,14 +2336,6 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
handleVectorShiftIntrinsic(I, /* Variable */ true);
break;
- // Byte shifts are not implemented.
- // case llvm::Intrinsic::x86_avx512_psll_dq_bs:
- // case llvm::Intrinsic::x86_avx512_psrl_dq_bs:
- // case llvm::Intrinsic::x86_avx2_psll_dq_bs:
- // case llvm::Intrinsic::x86_avx2_psrl_dq_bs:
- // case llvm::Intrinsic::x86_sse2_psll_dq_bs:
- // case llvm::Intrinsic::x86_sse2_psrl_dq_bs:
-
case llvm::Intrinsic::x86_sse2_packsswb_128:
case llvm::Intrinsic::x86_sse2_packssdw_128:
case llvm::Intrinsic::x86_sse2_packuswb_128:
@@ -2356,10 +2437,11 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
DEBUG(dbgs() << " Arg#" << i << ": " << *A <<
" Shadow: " << *ArgShadow << "\n");
bool ArgIsInitialized = false;
+ const DataLayout &DL = F.getParent()->getDataLayout();
if (CS.paramHasAttr(i + 1, Attribute::ByVal)) {
assert(A->getType()->isPointerTy() &&
"ByVal argument is not a pointer!");
- Size = MS.DL->getTypeAllocSize(A->getType()->getPointerElementType());
+ Size = DL.getTypeAllocSize(A->getType()->getPointerElementType());
if (ArgOffset + Size > kParamTLSSize) break;
unsigned ParamAlignment = CS.getParamAlignment(i + 1);
unsigned Alignment = std::min(ParamAlignment, kShadowTLSAlignment);
@@ -2367,7 +2449,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
getShadowPtr(A, Type::getInt8Ty(*MS.C), IRB),
Size, Alignment);
} else {
- Size = MS.DL->getTypeAllocSize(A->getType());
+ Size = DL.getTypeAllocSize(A->getType());
if (ArgOffset + Size > kParamTLSSize) break;
Store = IRB.CreateAlignedStore(ArgShadow, ArgShadowBase,
kShadowTLSAlignment);
@@ -2460,11 +2542,12 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
setShadow(&I, getCleanShadow(&I));
setOrigin(&I, getCleanOrigin());
IRBuilder<> IRB(I.getNextNode());
- uint64_t Size = MS.DL->getTypeAllocSize(I.getAllocatedType());
+ const DataLayout &DL = F.getParent()->getDataLayout();
+ uint64_t Size = DL.getTypeAllocSize(I.getAllocatedType());
if (PoisonStack && ClPoisonStackWithCall) {
- IRB.CreateCall2(MS.MsanPoisonStackFn,
- IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()),
- ConstantInt::get(MS.IntptrTy, Size));
+ IRB.CreateCall(MS.MsanPoisonStackFn,
+ {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()),
+ ConstantInt::get(MS.IntptrTy, Size)});
} else {
Value *ShadowBase = getShadowPtr(&I, Type::getInt8PtrTy(*MS.C), IRB);
Value *PoisonValue = IRB.getInt8(PoisonStack ? ClPoisonStackPattern : 0);
@@ -2484,11 +2567,11 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
createPrivateNonConstGlobalForString(*F.getParent(),
StackDescription.str());
- IRB.CreateCall4(MS.MsanSetAllocaOrigin4Fn,
- IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()),
+ IRB.CreateCall(MS.MsanSetAllocaOrigin4Fn,
+ {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()),
ConstantInt::get(MS.IntptrTy, Size),
IRB.CreatePointerCast(Descr, IRB.getInt8PtrTy()),
- IRB.CreatePointerCast(&F, MS.IntptrTy));
+ IRB.CreatePointerCast(&F, MS.IntptrTy)});
}
}
@@ -2652,6 +2735,7 @@ struct VarArgAMD64Helper : public VarArgHelper {
unsigned GpOffset = 0;
unsigned FpOffset = AMD64GpEndOffset;
unsigned OverflowOffset = AMD64FpEndOffset;
+ const DataLayout &DL = F.getParent()->getDataLayout();
for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end();
ArgIt != End; ++ArgIt) {
Value *A = *ArgIt;
@@ -2661,7 +2745,7 @@ struct VarArgAMD64Helper : public VarArgHelper {
// ByVal arguments always go to the overflow area.
assert(A->getType()->isPointerTy());
Type *RealTy = A->getType()->getPointerElementType();
- uint64_t ArgSize = MS.DL->getTypeAllocSize(RealTy);
+ uint64_t ArgSize = DL.getTypeAllocSize(RealTy);
Value *Base = getShadowPtrForVAArgument(RealTy, IRB, OverflowOffset);
OverflowOffset += RoundUpToAlignment(ArgSize, 8);
IRB.CreateMemCpy(Base, MSV.getShadowPtr(A, IRB.getInt8Ty(), IRB),
@@ -2683,7 +2767,7 @@ struct VarArgAMD64Helper : public VarArgHelper {
FpOffset += 16;
break;
case AK_Memory:
- uint64_t ArgSize = MS.DL->getTypeAllocSize(A->getType());
+ uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
Base = getShadowPtrForVAArgument(A->getType(), IRB, OverflowOffset);
OverflowOffset += RoundUpToAlignment(ArgSize, 8);
}
@@ -2768,12 +2852,114 @@ struct VarArgAMD64Helper : public VarArgHelper {
Value *OverflowArgAreaPtr = IRB.CreateLoad(OverflowArgAreaPtrPtr);
Value *OverflowArgAreaShadowPtr =
MSV.getShadowPtr(OverflowArgAreaPtr, IRB.getInt8Ty(), IRB);
- Value *SrcPtr = IRB.CreateConstGEP1_32(VAArgTLSCopy, AMD64FpEndOffset);
+ Value *SrcPtr = IRB.CreateConstGEP1_32(IRB.getInt8Ty(), VAArgTLSCopy,
+ AMD64FpEndOffset);
IRB.CreateMemCpy(OverflowArgAreaShadowPtr, SrcPtr, VAArgOverflowSize, 16);
}
}
};
+/// \brief MIPS64-specific implementation of VarArgHelper.
+struct VarArgMIPS64Helper : public VarArgHelper {
+ Function &F;
+ MemorySanitizer &MS;
+ MemorySanitizerVisitor &MSV;
+ Value *VAArgTLSCopy;
+ Value *VAArgSize;
+
+ SmallVector<CallInst*, 16> VAStartInstrumentationList;
+
+ VarArgMIPS64Helper(Function &F, MemorySanitizer &MS,
+ MemorySanitizerVisitor &MSV)
+ : F(F), MS(MS), MSV(MSV), VAArgTLSCopy(nullptr),
+ VAArgSize(nullptr) {}
+
+ void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {
+ unsigned VAArgOffset = 0;
+ const DataLayout &DL = F.getParent()->getDataLayout();
+ for (CallSite::arg_iterator ArgIt = CS.arg_begin() + 1, End = CS.arg_end();
+ ArgIt != End; ++ArgIt) {
+ Value *A = *ArgIt;
+ Value *Base;
+ uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
+#if defined(__MIPSEB__) || defined(MIPSEB)
+ // Adjusting the shadow for argument with size < 8 to match the placement
+ // of bits in big endian system
+ if (ArgSize < 8)
+ VAArgOffset += (8 - ArgSize);
+#endif
+ Base = getShadowPtrForVAArgument(A->getType(), IRB, VAArgOffset);
+ VAArgOffset += ArgSize;
+ VAArgOffset = RoundUpToAlignment(VAArgOffset, 8);
+ IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
+ }
+
+ Constant *TotalVAArgSize = ConstantInt::get(IRB.getInt64Ty(), VAArgOffset);
+ // Here using VAArgOverflowSizeTLS as VAArgSizeTLS to avoid creation of
+ // a new class member i.e. it is the total size of all VarArgs.
+ IRB.CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
+ }
+
+ /// \brief Compute the shadow address for a given va_arg.
+ Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB,
+ int ArgOffset) {
+ Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
+ Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
+ return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0),
+ "_msarg");
+ }
+
+ void visitVAStartInst(VAStartInst &I) override {
+ IRBuilder<> IRB(&I);
+ VAStartInstrumentationList.push_back(&I);
+ Value *VAListTag = I.getArgOperand(0);
+ Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB);
+ IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
+ /* size */8, /* alignment */8, false);
+ }
+
+ void visitVACopyInst(VACopyInst &I) override {
+ IRBuilder<> IRB(&I);
+ Value *VAListTag = I.getArgOperand(0);
+ Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB);
+ // Unpoison the whole __va_list_tag.
+ // FIXME: magic ABI constants.
+ IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
+ /* size */8, /* alignment */8, false);
+ }
+
+ void finalizeInstrumentation() override {
+ assert(!VAArgSize && !VAArgTLSCopy &&
+ "finalizeInstrumentation called twice");
+ IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
+ VAArgSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS);
+ Value *CopySize = IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, 0),
+ VAArgSize);
+
+ if (!VAStartInstrumentationList.empty()) {
+ // If there is a va_start in this function, make a backup copy of
+ // va_arg_tls somewhere in the function entry block.
+ VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
+ IRB.CreateMemCpy(VAArgTLSCopy, MS.VAArgTLS, CopySize, 8);
+ }
+
+ // Instrument va_start.
+ // Copy va_list shadow from the backup copy of the TLS contents.
+ for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
+ CallInst *OrigInst = VAStartInstrumentationList[i];
+ IRBuilder<> IRB(OrigInst->getNextNode());
+ Value *VAListTag = OrigInst->getArgOperand(0);
+ Value *RegSaveAreaPtrPtr =
+ IRB.CreateIntToPtr(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
+ Type::getInt64PtrTy(*MS.C));
+ Value *RegSaveAreaPtr = IRB.CreateLoad(RegSaveAreaPtrPtr);
+ Value *RegSaveAreaShadowPtr =
+ MSV.getShadowPtr(RegSaveAreaPtr, IRB.getInt8Ty(), IRB);
+ IRB.CreateMemCpy(RegSaveAreaShadowPtr, VAArgTLSCopy, CopySize, 8);
+ }
+ }
+};
+
/// \brief A no-op implementation of VarArgHelper.
struct VarArgNoOpHelper : public VarArgHelper {
VarArgNoOpHelper(Function &F, MemorySanitizer &MS,
@@ -2795,6 +2981,9 @@ VarArgHelper *CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
llvm::Triple TargetTriple(Func.getParent()->getTargetTriple());
if (TargetTriple.getArch() == llvm::Triple::x86_64)
return new VarArgAMD64Helper(Func, Msan, Visitor);
+ else if (TargetTriple.getArch() == llvm::Triple::mips64 ||
+ TargetTriple.getArch() == llvm::Triple::mips64el)
+ return new VarArgMIPS64Helper(Func, Msan, Visitor);
else
return new VarArgNoOpHelper(Func, Msan, Visitor);
}
@@ -2802,6 +2991,8 @@ VarArgHelper *CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
} // namespace
bool MemorySanitizer::runOnFunction(Function &F) {
+ if (&F == MsanCtorFunction)
+ return false;
MemorySanitizerVisitor Visitor(F, *this);
// Clear out readonly/readnone attributes.
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp b/contrib/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp
index c048a99..f6ae0c2 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp
+++ b/contrib/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp
@@ -11,19 +11,17 @@
// and potentially with other Sanitizers.
//
// We create a Guard variable with the same linkage
-// as the function and inject this code into the entry block (CoverageLevel=1)
-// or all blocks (CoverageLevel>=2):
+// as the function and inject this code into the entry block (SCK_Function)
+// or all blocks (SCK_BB):
// if (Guard < 0) {
// __sanitizer_cov(&Guard);
// }
// The accesses to Guard are atomic. The rest of the logic is
// in __sanitizer_cov (it's fine to call it more than once).
//
-// With CoverageLevel>=3 we also split critical edges this effectively
+// With SCK_Edge we also split critical edges this effectively
// instrumenting all edges.
//
-// CoverageLevel>=4 add indirect call profiling implented as a function call.
-//
// This coverage implementation provides very limited data:
// it only tells if a given function (block) was ever executed. No counters.
// But for many use cases this is what we need and the added slowdown small.
@@ -55,11 +53,13 @@ using namespace llvm;
static const char *const kSanCovModuleInitName = "__sanitizer_cov_module_init";
static const char *const kSanCovName = "__sanitizer_cov";
+static const char *const kSanCovWithCheckName = "__sanitizer_cov_with_check";
static const char *const kSanCovIndirCallName = "__sanitizer_cov_indir_call16";
static const char *const kSanCovTraceEnter = "__sanitizer_cov_trace_func_enter";
static const char *const kSanCovTraceBB = "__sanitizer_cov_trace_basic_block";
+static const char *const kSanCovTraceCmp = "__sanitizer_cov_trace_cmp";
static const char *const kSanCovModuleCtorName = "sancov.module_ctor";
-static const uint64_t kSanCtorAndDtorPriority = 1;
+static const uint64_t kSanCtorAndDtorPriority = 2;
static cl::opt<int> ClCoverageLevel("sanitizer-coverage-level",
cl::desc("Sanitizer Coverage. 0: none, 1: entry block, 2: all blocks, "
@@ -67,11 +67,11 @@ static cl::opt<int> ClCoverageLevel("sanitizer-coverage-level",
"4: above plus indirect calls"),
cl::Hidden, cl::init(0));
-static cl::opt<int> ClCoverageBlockThreshold(
+static cl::opt<unsigned> ClCoverageBlockThreshold(
"sanitizer-coverage-block-threshold",
- cl::desc("Add coverage instrumentation only to the entry block if there "
- "are more than this number of blocks."),
- cl::Hidden, cl::init(1500));
+ cl::desc("Use a callback with a guard check inside it if there are"
+ " more than this number of blocks."),
+ cl::Hidden, cl::init(500));
static cl::opt<bool>
ClExperimentalTracing("sanitizer-coverage-experimental-tracing",
@@ -79,13 +79,63 @@ static cl::opt<bool>
"callbacks at every basic block"),
cl::Hidden, cl::init(false));
+static cl::opt<bool>
+ ClExperimentalCMPTracing("sanitizer-coverage-experimental-trace-compares",
+ cl::desc("Experimental tracing of CMP and similar "
+ "instructions"),
+ cl::Hidden, cl::init(false));
+
+// Experimental 8-bit counters used as an additional search heuristic during
+// coverage-guided fuzzing.
+// The counters are not thread-friendly:
+// - contention on these counters may cause significant slowdown;
+// - the counter updates are racy and the results may be inaccurate.
+// They are also inaccurate due to 8-bit integer overflow.
+static cl::opt<bool> ClUse8bitCounters("sanitizer-coverage-8bit-counters",
+ cl::desc("Experimental 8-bit counters"),
+ cl::Hidden, cl::init(false));
+
namespace {
+SanitizerCoverageOptions getOptions(int LegacyCoverageLevel) {
+ SanitizerCoverageOptions Res;
+ switch (LegacyCoverageLevel) {
+ case 0:
+ Res.CoverageType = SanitizerCoverageOptions::SCK_None;
+ break;
+ case 1:
+ Res.CoverageType = SanitizerCoverageOptions::SCK_Function;
+ break;
+ case 2:
+ Res.CoverageType = SanitizerCoverageOptions::SCK_BB;
+ break;
+ case 3:
+ Res.CoverageType = SanitizerCoverageOptions::SCK_Edge;
+ break;
+ case 4:
+ Res.CoverageType = SanitizerCoverageOptions::SCK_Edge;
+ Res.IndirectCalls = true;
+ break;
+ }
+ return Res;
+}
+
+SanitizerCoverageOptions OverrideFromCL(SanitizerCoverageOptions Options) {
+ // Sets CoverageType and IndirectCalls.
+ SanitizerCoverageOptions CLOpts = getOptions(ClCoverageLevel);
+ Options.CoverageType = std::max(Options.CoverageType, CLOpts.CoverageType);
+ Options.IndirectCalls |= CLOpts.IndirectCalls;
+ Options.TraceBB |= ClExperimentalTracing;
+ Options.TraceCmp |= ClExperimentalCMPTracing;
+ Options.Use8bitCounters |= ClUse8bitCounters;
+ return Options;
+}
+
class SanitizerCoverageModule : public ModulePass {
public:
- SanitizerCoverageModule(int CoverageLevel = 0)
- : ModulePass(ID),
- CoverageLevel(std::max(CoverageLevel, (int)ClCoverageLevel)) {}
+ SanitizerCoverageModule(
+ const SanitizerCoverageOptions &Options = SanitizerCoverageOptions())
+ : ModulePass(ID), Options(OverrideFromCL(Options)) {}
bool runOnModule(Module &M) override;
bool runOnFunction(Function &F);
static char ID; // Pass identification, replacement for typeid
@@ -93,104 +143,135 @@ class SanitizerCoverageModule : public ModulePass {
return "SanitizerCoverageModule";
}
- void getAnalysisUsage(AnalysisUsage &AU) const override {
- AU.addRequired<DataLayoutPass>();
- }
-
private:
void InjectCoverageForIndirectCalls(Function &F,
ArrayRef<Instruction *> IndirCalls);
- bool InjectCoverage(Function &F, ArrayRef<BasicBlock *> AllBlocks,
- ArrayRef<Instruction *> IndirCalls);
- void InjectCoverageAtBlock(Function &F, BasicBlock &BB);
+ void InjectTraceForCmp(Function &F, ArrayRef<Instruction *> CmpTraceTargets);
+ bool InjectCoverage(Function &F, ArrayRef<BasicBlock *> AllBlocks);
+ void SetNoSanitizeMetadata(Instruction *I);
+ void InjectCoverageAtBlock(Function &F, BasicBlock &BB, bool UseCalls);
+ unsigned NumberOfInstrumentedBlocks() {
+ return SanCovFunction->getNumUses() + SanCovWithCheckFunction->getNumUses();
+ }
Function *SanCovFunction;
+ Function *SanCovWithCheckFunction;
Function *SanCovIndirCallFunction;
- Function *SanCovModuleInit;
Function *SanCovTraceEnter, *SanCovTraceBB;
+ Function *SanCovTraceCmpFunction;
InlineAsm *EmptyAsm;
- Type *IntptrTy;
+ Type *IntptrTy, *Int64Ty;
LLVMContext *C;
+ const DataLayout *DL;
GlobalVariable *GuardArray;
+ GlobalVariable *EightBitCounterArray;
- int CoverageLevel;
+ SanitizerCoverageOptions Options;
};
} // namespace
-static Function *checkInterfaceFunction(Constant *FuncOrBitcast) {
- if (Function *F = dyn_cast<Function>(FuncOrBitcast))
- return F;
- std::string Err;
- raw_string_ostream Stream(Err);
- Stream << "SanitizerCoverage interface function redefined: "
- << *FuncOrBitcast;
- report_fatal_error(Err);
-}
-
bool SanitizerCoverageModule::runOnModule(Module &M) {
- if (!CoverageLevel) return false;
+ if (Options.CoverageType == SanitizerCoverageOptions::SCK_None)
+ return false;
C = &(M.getContext());
- DataLayoutPass *DLP = &getAnalysis<DataLayoutPass>();
- IntptrTy = Type::getIntNTy(*C, DLP->getDataLayout().getPointerSizeInBits());
+ DL = &M.getDataLayout();
+ IntptrTy = Type::getIntNTy(*C, DL->getPointerSizeInBits());
Type *VoidTy = Type::getVoidTy(*C);
IRBuilder<> IRB(*C);
+ Type *Int8PtrTy = PointerType::getUnqual(IRB.getInt8Ty());
Type *Int32PtrTy = PointerType::getUnqual(IRB.getInt32Ty());
+ Int64Ty = IRB.getInt64Ty();
- Function *CtorFunc =
- Function::Create(FunctionType::get(VoidTy, false),
- GlobalValue::InternalLinkage, kSanCovModuleCtorName, &M);
- ReturnInst::Create(*C, BasicBlock::Create(*C, "", CtorFunc));
- appendToGlobalCtors(M, CtorFunc, kSanCtorAndDtorPriority);
-
- SanCovFunction = checkInterfaceFunction(
+ SanCovFunction = checkSanitizerInterfaceFunction(
M.getOrInsertFunction(kSanCovName, VoidTy, Int32PtrTy, nullptr));
- SanCovIndirCallFunction = checkInterfaceFunction(M.getOrInsertFunction(
- kSanCovIndirCallName, VoidTy, IntptrTy, IntptrTy, nullptr));
- SanCovModuleInit = checkInterfaceFunction(
- M.getOrInsertFunction(kSanCovModuleInitName, Type::getVoidTy(*C),
- Int32PtrTy, IntptrTy, nullptr));
- SanCovModuleInit->setLinkage(Function::ExternalLinkage);
+ SanCovWithCheckFunction = checkSanitizerInterfaceFunction(
+ M.getOrInsertFunction(kSanCovWithCheckName, VoidTy, Int32PtrTy, nullptr));
+ SanCovIndirCallFunction =
+ checkSanitizerInterfaceFunction(M.getOrInsertFunction(
+ kSanCovIndirCallName, VoidTy, IntptrTy, IntptrTy, nullptr));
+ SanCovTraceCmpFunction =
+ checkSanitizerInterfaceFunction(M.getOrInsertFunction(
+ kSanCovTraceCmp, VoidTy, Int64Ty, Int64Ty, Int64Ty, nullptr));
+
// We insert an empty inline asm after cov callbacks to avoid callback merge.
EmptyAsm = InlineAsm::get(FunctionType::get(IRB.getVoidTy(), false),
StringRef(""), StringRef(""),
/*hasSideEffects=*/true);
- if (ClExperimentalTracing) {
- SanCovTraceEnter = checkInterfaceFunction(
+ if (Options.TraceBB) {
+ SanCovTraceEnter = checkSanitizerInterfaceFunction(
M.getOrInsertFunction(kSanCovTraceEnter, VoidTy, Int32PtrTy, nullptr));
- SanCovTraceBB = checkInterfaceFunction(
+ SanCovTraceBB = checkSanitizerInterfaceFunction(
M.getOrInsertFunction(kSanCovTraceBB, VoidTy, Int32PtrTy, nullptr));
}
// At this point we create a dummy array of guards because we don't
// know how many elements we will need.
Type *Int32Ty = IRB.getInt32Ty();
+ Type *Int8Ty = IRB.getInt8Ty();
+
GuardArray =
new GlobalVariable(M, Int32Ty, false, GlobalValue::ExternalLinkage,
nullptr, "__sancov_gen_cov_tmp");
+ if (Options.Use8bitCounters)
+ EightBitCounterArray =
+ new GlobalVariable(M, Int8Ty, false, GlobalVariable::ExternalLinkage,
+ nullptr, "__sancov_gen_cov_tmp");
for (auto &F : M)
runOnFunction(F);
+ auto N = NumberOfInstrumentedBlocks();
+
// Now we know how many elements we need. Create an array of guards
// with one extra element at the beginning for the size.
- Type *Int32ArrayNTy =
- ArrayType::get(Int32Ty, SanCovFunction->getNumUses() + 1);
+ Type *Int32ArrayNTy = ArrayType::get(Int32Ty, N + 1);
GlobalVariable *RealGuardArray = new GlobalVariable(
M, Int32ArrayNTy, false, GlobalValue::PrivateLinkage,
Constant::getNullValue(Int32ArrayNTy), "__sancov_gen_cov");
+
// Replace the dummy array with the real one.
GuardArray->replaceAllUsesWith(
IRB.CreatePointerCast(RealGuardArray, Int32PtrTy));
GuardArray->eraseFromParent();
- // Call __sanitizer_cov_module_init
- IRB.SetInsertPoint(CtorFunc->getEntryBlock().getTerminator());
- IRB.CreateCall2(SanCovModuleInit,
- IRB.CreatePointerCast(RealGuardArray, Int32PtrTy),
- ConstantInt::get(IntptrTy, SanCovFunction->getNumUses()));
+ GlobalVariable *RealEightBitCounterArray;
+ if (Options.Use8bitCounters) {
+ // Make sure the array is 16-aligned.
+ static const int kCounterAlignment = 16;
+ Type *Int8ArrayNTy =
+ ArrayType::get(Int8Ty, RoundUpToAlignment(N, kCounterAlignment));
+ RealEightBitCounterArray = new GlobalVariable(
+ M, Int8ArrayNTy, false, GlobalValue::PrivateLinkage,
+ Constant::getNullValue(Int8ArrayNTy), "__sancov_gen_cov_counter");
+ RealEightBitCounterArray->setAlignment(kCounterAlignment);
+ EightBitCounterArray->replaceAllUsesWith(
+ IRB.CreatePointerCast(RealEightBitCounterArray, Int8PtrTy));
+ EightBitCounterArray->eraseFromParent();
+ }
+
+ // Create variable for module (compilation unit) name
+ Constant *ModNameStrConst =
+ ConstantDataArray::getString(M.getContext(), M.getName(), true);
+ GlobalVariable *ModuleName =
+ new GlobalVariable(M, ModNameStrConst->getType(), true,
+ GlobalValue::PrivateLinkage, ModNameStrConst);
+
+ Function *CtorFunc;
+ std::tie(CtorFunc, std::ignore) = createSanitizerCtorAndInitFunctions(
+ M, kSanCovModuleCtorName, kSanCovModuleInitName,
+ {Int32PtrTy, IntptrTy, Int8PtrTy, Int8PtrTy},
+ {IRB.CreatePointerCast(RealGuardArray, Int32PtrTy),
+ ConstantInt::get(IntptrTy, N),
+ Options.Use8bitCounters
+ ? IRB.CreatePointerCast(RealEightBitCounterArray, Int8PtrTy)
+ : Constant::getNullValue(Int8PtrTy),
+ IRB.CreatePointerCast(ModuleName, Int8PtrTy)});
+
+ appendToGlobalCtors(M, CtorFunc, kSanCtorAndDtorPriority);
+
return true;
}
@@ -198,38 +279,44 @@ bool SanitizerCoverageModule::runOnFunction(Function &F) {
if (F.empty()) return false;
if (F.getName().find(".module_ctor") != std::string::npos)
return false; // Should not instrument sanitizer init functions.
- if (CoverageLevel >= 3)
- SplitAllCriticalEdges(F, this);
+ if (Options.CoverageType >= SanitizerCoverageOptions::SCK_Edge)
+ SplitAllCriticalEdges(F);
SmallVector<Instruction*, 8> IndirCalls;
SmallVector<BasicBlock*, 16> AllBlocks;
+ SmallVector<Instruction*, 8> CmpTraceTargets;
for (auto &BB : F) {
AllBlocks.push_back(&BB);
- if (CoverageLevel >= 4)
- for (auto &Inst : BB) {
+ for (auto &Inst : BB) {
+ if (Options.IndirectCalls) {
CallSite CS(&Inst);
if (CS && !CS.getCalledFunction())
IndirCalls.push_back(&Inst);
}
+ if (Options.TraceCmp && isa<ICmpInst>(&Inst))
+ CmpTraceTargets.push_back(&Inst);
+ }
}
- InjectCoverage(F, AllBlocks, IndirCalls);
+ InjectCoverage(F, AllBlocks);
+ InjectCoverageForIndirectCalls(F, IndirCalls);
+ InjectTraceForCmp(F, CmpTraceTargets);
return true;
}
-bool
-SanitizerCoverageModule::InjectCoverage(Function &F,
- ArrayRef<BasicBlock *> AllBlocks,
- ArrayRef<Instruction *> IndirCalls) {
- if (!CoverageLevel) return false;
-
- if (CoverageLevel == 1 ||
- (unsigned)ClCoverageBlockThreshold < AllBlocks.size()) {
- InjectCoverageAtBlock(F, F.getEntryBlock());
- } else {
+bool SanitizerCoverageModule::InjectCoverage(Function &F,
+ ArrayRef<BasicBlock *> AllBlocks) {
+ switch (Options.CoverageType) {
+ case SanitizerCoverageOptions::SCK_None:
+ return false;
+ case SanitizerCoverageOptions::SCK_Function:
+ InjectCoverageAtBlock(F, F.getEntryBlock(), false);
+ return true;
+ default: {
+ bool UseCalls = ClCoverageBlockThreshold < AllBlocks.size();
for (auto BB : AllBlocks)
- InjectCoverageAtBlock(F, *BB);
+ InjectCoverageAtBlock(F, *BB, UseCalls);
+ return true;
+ }
}
- InjectCoverageForIndirectCalls(F, IndirCalls);
- return true;
}
// On every indirect call we call a run-time function
@@ -249,19 +336,44 @@ void SanitizerCoverageModule::InjectCoverageForIndirectCalls(
IRBuilder<> IRB(I);
CallSite CS(I);
Value *Callee = CS.getCalledValue();
- if (dyn_cast<InlineAsm>(Callee)) continue;
+ if (isa<InlineAsm>(Callee)) continue;
GlobalVariable *CalleeCache = new GlobalVariable(
*F.getParent(), Ty, false, GlobalValue::PrivateLinkage,
Constant::getNullValue(Ty), "__sancov_gen_callee_cache");
CalleeCache->setAlignment(kCacheAlignment);
- IRB.CreateCall2(SanCovIndirCallFunction,
- IRB.CreatePointerCast(Callee, IntptrTy),
- IRB.CreatePointerCast(CalleeCache, IntptrTy));
+ IRB.CreateCall(SanCovIndirCallFunction,
+ {IRB.CreatePointerCast(Callee, IntptrTy),
+ IRB.CreatePointerCast(CalleeCache, IntptrTy)});
+ }
+}
+
+void SanitizerCoverageModule::InjectTraceForCmp(
+ Function &F, ArrayRef<Instruction *> CmpTraceTargets) {
+ for (auto I : CmpTraceTargets) {
+ if (ICmpInst *ICMP = dyn_cast<ICmpInst>(I)) {
+ IRBuilder<> IRB(ICMP);
+ Value *A0 = ICMP->getOperand(0);
+ Value *A1 = ICMP->getOperand(1);
+ if (!A0->getType()->isIntegerTy()) continue;
+ uint64_t TypeSize = DL->getTypeStoreSizeInBits(A0->getType());
+ // __sanitizer_cov_trace_cmp((type_size << 32) | predicate, A0, A1);
+ IRB.CreateCall(
+ SanCovTraceCmpFunction,
+ {ConstantInt::get(Int64Ty, (TypeSize << 32) | ICMP->getPredicate()),
+ IRB.CreateIntCast(A0, Int64Ty, true),
+ IRB.CreateIntCast(A1, Int64Ty, true)});
+ }
}
}
-void SanitizerCoverageModule::InjectCoverageAtBlock(Function &F,
- BasicBlock &BB) {
+void SanitizerCoverageModule::SetNoSanitizeMetadata(Instruction *I) {
+ I->setMetadata(
+ I->getParent()->getParent()->getParent()->getMDKindID("nosanitize"),
+ MDNode::get(*C, None));
+}
+
+void SanitizerCoverageModule::InjectCoverageAtBlock(Function &F, BasicBlock &BB,
+ bool UseCalls) {
BasicBlock::iterator IP = BB.getFirstInsertionPt(), BE = BB.end();
// Skip static allocas at the top of the entry block so they don't become
// dynamic when we split the block. If we used our optimized stack layout,
@@ -273,31 +385,48 @@ void SanitizerCoverageModule::InjectCoverageAtBlock(Function &F,
}
bool IsEntryBB = &BB == &F.getEntryBlock();
- DebugLoc EntryLoc =
- IsEntryBB ? IP->getDebugLoc().getFnDebugLoc(*C) : IP->getDebugLoc();
+ DebugLoc EntryLoc = IsEntryBB && IP->getDebugLoc()
+ ? IP->getDebugLoc().getFnDebugLoc()
+ : IP->getDebugLoc();
IRBuilder<> IRB(IP);
IRB.SetCurrentDebugLocation(EntryLoc);
SmallVector<Value *, 1> Indices;
Value *GuardP = IRB.CreateAdd(
IRB.CreatePointerCast(GuardArray, IntptrTy),
- ConstantInt::get(IntptrTy, (1 + SanCovFunction->getNumUses()) * 4));
+ ConstantInt::get(IntptrTy, (1 + NumberOfInstrumentedBlocks()) * 4));
Type *Int32PtrTy = PointerType::getUnqual(IRB.getInt32Ty());
GuardP = IRB.CreateIntToPtr(GuardP, Int32PtrTy);
- LoadInst *Load = IRB.CreateLoad(GuardP);
- Load->setAtomic(Monotonic);
- Load->setAlignment(4);
- Load->setMetadata(F.getParent()->getMDKindID("nosanitize"),
- MDNode::get(*C, None));
- Value *Cmp = IRB.CreateICmpSGE(Constant::getNullValue(Load->getType()), Load);
- Instruction *Ins = SplitBlockAndInsertIfThen(
- Cmp, IP, false, MDBuilder(*C).createBranchWeights(1, 100000));
- IRB.SetInsertPoint(Ins);
- IRB.SetCurrentDebugLocation(EntryLoc);
- // __sanitizer_cov gets the PC of the instruction using GET_CALLER_PC.
- IRB.CreateCall(SanCovFunction, GuardP);
- IRB.CreateCall(EmptyAsm); // Avoids callback merge.
+ if (UseCalls) {
+ IRB.CreateCall(SanCovWithCheckFunction, GuardP);
+ } else {
+ LoadInst *Load = IRB.CreateLoad(GuardP);
+ Load->setAtomic(Monotonic);
+ Load->setAlignment(4);
+ SetNoSanitizeMetadata(Load);
+ Value *Cmp = IRB.CreateICmpSGE(Constant::getNullValue(Load->getType()), Load);
+ Instruction *Ins = SplitBlockAndInsertIfThen(
+ Cmp, IP, false, MDBuilder(*C).createBranchWeights(1, 100000));
+ IRB.SetInsertPoint(Ins);
+ IRB.SetCurrentDebugLocation(EntryLoc);
+ // __sanitizer_cov gets the PC of the instruction using GET_CALLER_PC.
+ IRB.CreateCall(SanCovFunction, GuardP);
+ IRB.CreateCall(EmptyAsm, {}); // Avoids callback merge.
+ }
+
+ if (Options.Use8bitCounters) {
+ IRB.SetInsertPoint(IP);
+ Value *P = IRB.CreateAdd(
+ IRB.CreatePointerCast(EightBitCounterArray, IntptrTy),
+ ConstantInt::get(IntptrTy, NumberOfInstrumentedBlocks() - 1));
+ P = IRB.CreateIntToPtr(P, IRB.getInt8PtrTy());
+ LoadInst *LI = IRB.CreateLoad(P);
+ Value *Inc = IRB.CreateAdd(LI, ConstantInt::get(IRB.getInt8Ty(), 1));
+ StoreInst *SI = IRB.CreateStore(Inc, P);
+ SetNoSanitizeMetadata(LI);
+ SetNoSanitizeMetadata(SI);
+ }
- if (ClExperimentalTracing) {
+ if (Options.TraceBB) {
// Experimental support for tracing.
// Insert a callback with the same guard variable as used for coverage.
IRB.SetInsertPoint(IP);
@@ -309,6 +438,7 @@ char SanitizerCoverageModule::ID = 0;
INITIALIZE_PASS(SanitizerCoverageModule, "sancov",
"SanitizerCoverage: TODO."
"ModulePass", false, false)
-ModulePass *llvm::createSanitizerCoverageModulePass(int CoverageLevel) {
- return new SanitizerCoverageModule(CoverageLevel);
+ModulePass *llvm::createSanitizerCoverageModulePass(
+ const SanitizerCoverageOptions &Options) {
+ return new SanitizerCoverageModule(Options);
}
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp b/contrib/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
index 1b86ae5..1a46bbb 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
+++ b/contrib/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
@@ -25,6 +25,8 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/Analysis/CaptureTracking.h"
+#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/IRBuilder.h"
@@ -68,12 +70,16 @@ STATISTIC(NumInstrumentedVtableReads, "Number of vtable ptr reads");
STATISTIC(NumOmittedReadsFromConstantGlobals,
"Number of reads from constant globals");
STATISTIC(NumOmittedReadsFromVtable, "Number of vtable reads");
+STATISTIC(NumOmittedNonCaptured, "Number of accesses ignored due to capturing");
+
+static const char *const kTsanModuleCtorName = "tsan.module_ctor";
+static const char *const kTsanInitName = "__tsan_init";
namespace {
/// ThreadSanitizer: instrument the code in module to find races.
struct ThreadSanitizer : public FunctionPass {
- ThreadSanitizer() : FunctionPass(ID), DL(nullptr) {}
+ ThreadSanitizer() : FunctionPass(ID) {}
const char *getPassName() const override;
bool runOnFunction(Function &F) override;
bool doInitialization(Module &M) override;
@@ -81,15 +87,15 @@ struct ThreadSanitizer : public FunctionPass {
private:
void initializeCallbacks(Module &M);
- bool instrumentLoadOrStore(Instruction *I);
- bool instrumentAtomic(Instruction *I);
+ bool instrumentLoadOrStore(Instruction *I, const DataLayout &DL);
+ bool instrumentAtomic(Instruction *I, const DataLayout &DL);
bool instrumentMemIntrinsic(Instruction *I);
- void chooseInstructionsToInstrument(SmallVectorImpl<Instruction*> &Local,
- SmallVectorImpl<Instruction*> &All);
+ void chooseInstructionsToInstrument(SmallVectorImpl<Instruction *> &Local,
+ SmallVectorImpl<Instruction *> &All,
+ const DataLayout &DL);
bool addrPointsToConstantData(Value *Addr);
- int getMemoryAccessFuncIndex(Value *Addr);
+ int getMemoryAccessFuncIndex(Value *Addr, const DataLayout &DL);
- const DataLayout *DL;
Type *IntptrTy;
IntegerType *OrdTy;
// Callbacks to run-time library are computed in doInitialization.
@@ -99,6 +105,8 @@ struct ThreadSanitizer : public FunctionPass {
static const size_t kNumberOfAccessSizes = 5;
Function *TsanRead[kNumberOfAccessSizes];
Function *TsanWrite[kNumberOfAccessSizes];
+ Function *TsanUnalignedRead[kNumberOfAccessSizes];
+ Function *TsanUnalignedWrite[kNumberOfAccessSizes];
Function *TsanAtomicLoad[kNumberOfAccessSizes];
Function *TsanAtomicStore[kNumberOfAccessSizes];
Function *TsanAtomicRMW[AtomicRMWInst::LAST_BINOP + 1][kNumberOfAccessSizes];
@@ -108,6 +116,7 @@ struct ThreadSanitizer : public FunctionPass {
Function *TsanVptrUpdate;
Function *TsanVptrLoad;
Function *MemmoveFn, *MemcpyFn, *MemsetFn;
+ Function *TsanCtorFunction;
};
} // namespace
@@ -124,44 +133,48 @@ FunctionPass *llvm::createThreadSanitizerPass() {
return new ThreadSanitizer();
}
-static Function *checkInterfaceFunction(Constant *FuncOrBitcast) {
- if (Function *F = dyn_cast<Function>(FuncOrBitcast))
- return F;
- FuncOrBitcast->dump();
- report_fatal_error("ThreadSanitizer interface function redefined");
-}
-
void ThreadSanitizer::initializeCallbacks(Module &M) {
IRBuilder<> IRB(M.getContext());
// Initialize the callbacks.
- TsanFuncEntry = checkInterfaceFunction(M.getOrInsertFunction(
+ TsanFuncEntry = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
"__tsan_func_entry", IRB.getVoidTy(), IRB.getInt8PtrTy(), nullptr));
- TsanFuncExit = checkInterfaceFunction(M.getOrInsertFunction(
- "__tsan_func_exit", IRB.getVoidTy(), nullptr));
+ TsanFuncExit = checkSanitizerInterfaceFunction(
+ M.getOrInsertFunction("__tsan_func_exit", IRB.getVoidTy(), nullptr));
OrdTy = IRB.getInt32Ty();
for (size_t i = 0; i < kNumberOfAccessSizes; ++i) {
const size_t ByteSize = 1 << i;
const size_t BitSize = ByteSize * 8;
SmallString<32> ReadName("__tsan_read" + itostr(ByteSize));
- TsanRead[i] = checkInterfaceFunction(M.getOrInsertFunction(
+ TsanRead[i] = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
ReadName, IRB.getVoidTy(), IRB.getInt8PtrTy(), nullptr));
SmallString<32> WriteName("__tsan_write" + itostr(ByteSize));
- TsanWrite[i] = checkInterfaceFunction(M.getOrInsertFunction(
+ TsanWrite[i] = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
WriteName, IRB.getVoidTy(), IRB.getInt8PtrTy(), nullptr));
+ SmallString<64> UnalignedReadName("__tsan_unaligned_read" +
+ itostr(ByteSize));
+ TsanUnalignedRead[i] =
+ checkSanitizerInterfaceFunction(M.getOrInsertFunction(
+ UnalignedReadName, IRB.getVoidTy(), IRB.getInt8PtrTy(), nullptr));
+
+ SmallString<64> UnalignedWriteName("__tsan_unaligned_write" +
+ itostr(ByteSize));
+ TsanUnalignedWrite[i] =
+ checkSanitizerInterfaceFunction(M.getOrInsertFunction(
+ UnalignedWriteName, IRB.getVoidTy(), IRB.getInt8PtrTy(), nullptr));
+
Type *Ty = Type::getIntNTy(M.getContext(), BitSize);
Type *PtrTy = Ty->getPointerTo();
SmallString<32> AtomicLoadName("__tsan_atomic" + itostr(BitSize) +
"_load");
- TsanAtomicLoad[i] = checkInterfaceFunction(M.getOrInsertFunction(
- AtomicLoadName, Ty, PtrTy, OrdTy, nullptr));
+ TsanAtomicLoad[i] = checkSanitizerInterfaceFunction(
+ M.getOrInsertFunction(AtomicLoadName, Ty, PtrTy, OrdTy, nullptr));
SmallString<32> AtomicStoreName("__tsan_atomic" + itostr(BitSize) +
"_store");
- TsanAtomicStore[i] = checkInterfaceFunction(M.getOrInsertFunction(
- AtomicStoreName, IRB.getVoidTy(), PtrTy, Ty, OrdTy,
- nullptr));
+ TsanAtomicStore[i] = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
+ AtomicStoreName, IRB.getVoidTy(), PtrTy, Ty, OrdTy, nullptr));
for (int op = AtomicRMWInst::FIRST_BINOP;
op <= AtomicRMWInst::LAST_BINOP; ++op) {
@@ -184,48 +197,44 @@ void ThreadSanitizer::initializeCallbacks(Module &M) {
else
continue;
SmallString<32> RMWName("__tsan_atomic" + itostr(BitSize) + NamePart);
- TsanAtomicRMW[op][i] = checkInterfaceFunction(M.getOrInsertFunction(
- RMWName, Ty, PtrTy, Ty, OrdTy, nullptr));
+ TsanAtomicRMW[op][i] = checkSanitizerInterfaceFunction(
+ M.getOrInsertFunction(RMWName, Ty, PtrTy, Ty, OrdTy, nullptr));
}
SmallString<32> AtomicCASName("__tsan_atomic" + itostr(BitSize) +
"_compare_exchange_val");
- TsanAtomicCAS[i] = checkInterfaceFunction(M.getOrInsertFunction(
+ TsanAtomicCAS[i] = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
AtomicCASName, Ty, PtrTy, Ty, Ty, OrdTy, OrdTy, nullptr));
}
- TsanVptrUpdate = checkInterfaceFunction(M.getOrInsertFunction(
- "__tsan_vptr_update", IRB.getVoidTy(), IRB.getInt8PtrTy(),
- IRB.getInt8PtrTy(), nullptr));
- TsanVptrLoad = checkInterfaceFunction(M.getOrInsertFunction(
+ TsanVptrUpdate = checkSanitizerInterfaceFunction(
+ M.getOrInsertFunction("__tsan_vptr_update", IRB.getVoidTy(),
+ IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), nullptr));
+ TsanVptrLoad = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
"__tsan_vptr_read", IRB.getVoidTy(), IRB.getInt8PtrTy(), nullptr));
- TsanAtomicThreadFence = checkInterfaceFunction(M.getOrInsertFunction(
+ TsanAtomicThreadFence = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
"__tsan_atomic_thread_fence", IRB.getVoidTy(), OrdTy, nullptr));
- TsanAtomicSignalFence = checkInterfaceFunction(M.getOrInsertFunction(
+ TsanAtomicSignalFence = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
"__tsan_atomic_signal_fence", IRB.getVoidTy(), OrdTy, nullptr));
- MemmoveFn = checkInterfaceFunction(M.getOrInsertFunction(
- "memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
- IRB.getInt8PtrTy(), IntptrTy, nullptr));
- MemcpyFn = checkInterfaceFunction(M.getOrInsertFunction(
- "memcpy", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
- IntptrTy, nullptr));
- MemsetFn = checkInterfaceFunction(M.getOrInsertFunction(
- "memset", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt32Ty(),
- IntptrTy, nullptr));
+ MemmoveFn = checkSanitizerInterfaceFunction(
+ M.getOrInsertFunction("memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
+ IRB.getInt8PtrTy(), IntptrTy, nullptr));
+ MemcpyFn = checkSanitizerInterfaceFunction(
+ M.getOrInsertFunction("memcpy", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
+ IRB.getInt8PtrTy(), IntptrTy, nullptr));
+ MemsetFn = checkSanitizerInterfaceFunction(
+ M.getOrInsertFunction("memset", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
+ IRB.getInt32Ty(), IntptrTy, nullptr));
}
bool ThreadSanitizer::doInitialization(Module &M) {
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- if (!DLP)
- report_fatal_error("data layout missing");
- DL = &DLP->getDataLayout();
+ const DataLayout &DL = M.getDataLayout();
+ IntptrTy = DL.getIntPtrType(M.getContext());
+ std::tie(TsanCtorFunction, std::ignore) = createSanitizerCtorAndInitFunctions(
+ M, kTsanModuleCtorName, kTsanInitName, /*InitArgTypes=*/{},
+ /*InitArgs=*/{});
- // Always insert a call to __tsan_init into the module's CTORs.
- IRBuilder<> IRB(M.getContext());
- IntptrTy = IRB.getIntPtrTy(DL);
- Value *TsanInit = M.getOrInsertFunction("__tsan_init",
- IRB.getVoidTy(), nullptr);
- appendToGlobalCtors(M, cast<Function>(TsanInit), 0);
+ appendToGlobalCtors(M, TsanCtorFunction, 0);
return true;
}
@@ -260,6 +269,7 @@ bool ThreadSanitizer::addrPointsToConstantData(Value *Addr) {
// Instrumenting some of the accesses may be proven redundant.
// Currently handled:
// - read-before-write (within same BB, no calls between)
+// - not captured variables
//
// We do not handle some of the patterns that should not survive
// after the classic compiler optimizations.
@@ -269,8 +279,8 @@ bool ThreadSanitizer::addrPointsToConstantData(Value *Addr) {
// 'Local' is a vector of insns within the same BB (no calls between).
// 'All' is a vector of insns that will be instrumented.
void ThreadSanitizer::chooseInstructionsToInstrument(
- SmallVectorImpl<Instruction*> &Local,
- SmallVectorImpl<Instruction*> &All) {
+ SmallVectorImpl<Instruction *> &Local, SmallVectorImpl<Instruction *> &All,
+ const DataLayout &DL) {
SmallSet<Value*, 8> WriteTargets;
// Iterate from the end.
for (SmallVectorImpl<Instruction*>::reverse_iterator It = Local.rbegin(),
@@ -291,6 +301,17 @@ void ThreadSanitizer::chooseInstructionsToInstrument(
continue;
}
}
+ Value *Addr = isa<StoreInst>(*I)
+ ? cast<StoreInst>(I)->getPointerOperand()
+ : cast<LoadInst>(I)->getPointerOperand();
+ if (isa<AllocaInst>(GetUnderlyingObject(Addr, DL)) &&
+ !PointerMayBeCaptured(Addr, true, true)) {
+ // The variable is addressable but not captured, so it cannot be
+ // referenced from a different thread and participate in a data race
+ // (see llvm/Analysis/CaptureTracking.h for details).
+ NumOmittedNonCaptured++;
+ continue;
+ }
All.push_back(I);
}
Local.clear();
@@ -311,7 +332,10 @@ static bool isAtomic(Instruction *I) {
}
bool ThreadSanitizer::runOnFunction(Function &F) {
- if (!DL) return false;
+ // This is required to prevent instrumenting call to __tsan_init from within
+ // the module constructor.
+ if (&F == TsanCtorFunction)
+ return false;
initializeCallbacks(*F.getParent());
SmallVector<Instruction*, 8> RetVec;
SmallVector<Instruction*, 8> AllLoadsAndStores;
@@ -321,6 +345,7 @@ bool ThreadSanitizer::runOnFunction(Function &F) {
bool Res = false;
bool HasCalls = false;
bool SanitizeFunction = F.hasFnAttribute(Attribute::SanitizeThread);
+ const DataLayout &DL = F.getParent()->getDataLayout();
// Traverse all instructions, collect loads/stores/returns, check for calls.
for (auto &BB : F) {
@@ -335,10 +360,11 @@ bool ThreadSanitizer::runOnFunction(Function &F) {
if (isa<MemIntrinsic>(Inst))
MemIntrinCalls.push_back(&Inst);
HasCalls = true;
- chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores);
+ chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores,
+ DL);
}
}
- chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores);
+ chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores, DL);
}
// We have collected all loads and stores.
@@ -348,14 +374,14 @@ bool ThreadSanitizer::runOnFunction(Function &F) {
// Instrument memory accesses only if we want to report bugs in the function.
if (ClInstrumentMemoryAccesses && SanitizeFunction)
for (auto Inst : AllLoadsAndStores) {
- Res |= instrumentLoadOrStore(Inst);
+ Res |= instrumentLoadOrStore(Inst, DL);
}
// Instrument atomic memory accesses in any case (they can be used to
// implement synchronization).
if (ClInstrumentAtomics)
for (auto Inst : AtomicAccesses) {
- Res |= instrumentAtomic(Inst);
+ Res |= instrumentAtomic(Inst, DL);
}
if (ClInstrumentMemIntrinsics && SanitizeFunction)
@@ -372,20 +398,21 @@ bool ThreadSanitizer::runOnFunction(Function &F) {
IRB.CreateCall(TsanFuncEntry, ReturnAddress);
for (auto RetInst : RetVec) {
IRBuilder<> IRBRet(RetInst);
- IRBRet.CreateCall(TsanFuncExit);
+ IRBRet.CreateCall(TsanFuncExit, {});
}
Res = true;
}
return Res;
}
-bool ThreadSanitizer::instrumentLoadOrStore(Instruction *I) {
+bool ThreadSanitizer::instrumentLoadOrStore(Instruction *I,
+ const DataLayout &DL) {
IRBuilder<> IRB(I);
bool IsWrite = isa<StoreInst>(*I);
Value *Addr = IsWrite
? cast<StoreInst>(I)->getPointerOperand()
: cast<LoadInst>(I)->getPointerOperand();
- int Idx = getMemoryAccessFuncIndex(Addr);
+ int Idx = getMemoryAccessFuncIndex(Addr, DL);
if (Idx < 0)
return false;
if (IsWrite && isVtableAccess(I)) {
@@ -400,9 +427,9 @@ bool ThreadSanitizer::instrumentLoadOrStore(Instruction *I) {
if (StoredValue->getType()->isIntegerTy())
StoredValue = IRB.CreateIntToPtr(StoredValue, IRB.getInt8PtrTy());
// Call TsanVptrUpdate.
- IRB.CreateCall2(TsanVptrUpdate,
- IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()),
- IRB.CreatePointerCast(StoredValue, IRB.getInt8PtrTy()));
+ IRB.CreateCall(TsanVptrUpdate,
+ {IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()),
+ IRB.CreatePointerCast(StoredValue, IRB.getInt8PtrTy())});
NumInstrumentedVtableWrites++;
return true;
}
@@ -412,7 +439,16 @@ bool ThreadSanitizer::instrumentLoadOrStore(Instruction *I) {
NumInstrumentedVtableReads++;
return true;
}
- Value *OnAccessFunc = IsWrite ? TsanWrite[Idx] : TsanRead[Idx];
+ const unsigned Alignment = IsWrite
+ ? cast<StoreInst>(I)->getAlignment()
+ : cast<LoadInst>(I)->getAlignment();
+ Type *OrigTy = cast<PointerType>(Addr->getType())->getElementType();
+ const uint32_t TypeSize = DL.getTypeStoreSizeInBits(OrigTy);
+ Value *OnAccessFunc = nullptr;
+ if (Alignment == 0 || Alignment >= 8 || (Alignment % (TypeSize / 8)) == 0)
+ OnAccessFunc = IsWrite ? TsanWrite[Idx] : TsanRead[Idx];
+ else
+ OnAccessFunc = IsWrite ? TsanUnalignedWrite[Idx] : TsanUnalignedRead[Idx];
IRB.CreateCall(OnAccessFunc, IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()));
if (IsWrite) NumInstrumentedWrites++;
else NumInstrumentedReads++;
@@ -445,16 +481,18 @@ static ConstantInt *createOrdering(IRBuilder<> *IRB, AtomicOrdering ord) {
bool ThreadSanitizer::instrumentMemIntrinsic(Instruction *I) {
IRBuilder<> IRB(I);
if (MemSetInst *M = dyn_cast<MemSetInst>(I)) {
- IRB.CreateCall3(MemsetFn,
- IRB.CreatePointerCast(M->getArgOperand(0), IRB.getInt8PtrTy()),
- IRB.CreateIntCast(M->getArgOperand(1), IRB.getInt32Ty(), false),
- IRB.CreateIntCast(M->getArgOperand(2), IntptrTy, false));
+ IRB.CreateCall(
+ MemsetFn,
+ {IRB.CreatePointerCast(M->getArgOperand(0), IRB.getInt8PtrTy()),
+ IRB.CreateIntCast(M->getArgOperand(1), IRB.getInt32Ty(), false),
+ IRB.CreateIntCast(M->getArgOperand(2), IntptrTy, false)});
I->eraseFromParent();
} else if (MemTransferInst *M = dyn_cast<MemTransferInst>(I)) {
- IRB.CreateCall3(isa<MemCpyInst>(M) ? MemcpyFn : MemmoveFn,
- IRB.CreatePointerCast(M->getArgOperand(0), IRB.getInt8PtrTy()),
- IRB.CreatePointerCast(M->getArgOperand(1), IRB.getInt8PtrTy()),
- IRB.CreateIntCast(M->getArgOperand(2), IntptrTy, false));
+ IRB.CreateCall(
+ isa<MemCpyInst>(M) ? MemcpyFn : MemmoveFn,
+ {IRB.CreatePointerCast(M->getArgOperand(0), IRB.getInt8PtrTy()),
+ IRB.CreatePointerCast(M->getArgOperand(1), IRB.getInt8PtrTy()),
+ IRB.CreateIntCast(M->getArgOperand(2), IntptrTy, false)});
I->eraseFromParent();
}
return false;
@@ -468,11 +506,11 @@ bool ThreadSanitizer::instrumentMemIntrinsic(Instruction *I) {
// The following page contains more background information:
// http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
-bool ThreadSanitizer::instrumentAtomic(Instruction *I) {
+bool ThreadSanitizer::instrumentAtomic(Instruction *I, const DataLayout &DL) {
IRBuilder<> IRB(I);
if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
Value *Addr = LI->getPointerOperand();
- int Idx = getMemoryAccessFuncIndex(Addr);
+ int Idx = getMemoryAccessFuncIndex(Addr, DL);
if (Idx < 0)
return false;
const size_t ByteSize = 1 << Idx;
@@ -486,7 +524,7 @@ bool ThreadSanitizer::instrumentAtomic(Instruction *I) {
} else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
Value *Addr = SI->getPointerOperand();
- int Idx = getMemoryAccessFuncIndex(Addr);
+ int Idx = getMemoryAccessFuncIndex(Addr, DL);
if (Idx < 0)
return false;
const size_t ByteSize = 1 << Idx;
@@ -500,7 +538,7 @@ bool ThreadSanitizer::instrumentAtomic(Instruction *I) {
ReplaceInstWithInst(I, C);
} else if (AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I)) {
Value *Addr = RMWI->getPointerOperand();
- int Idx = getMemoryAccessFuncIndex(Addr);
+ int Idx = getMemoryAccessFuncIndex(Addr, DL);
if (Idx < 0)
return false;
Function *F = TsanAtomicRMW[RMWI->getOperation()][Idx];
@@ -517,7 +555,7 @@ bool ThreadSanitizer::instrumentAtomic(Instruction *I) {
ReplaceInstWithInst(I, C);
} else if (AtomicCmpXchgInst *CASI = dyn_cast<AtomicCmpXchgInst>(I)) {
Value *Addr = CASI->getPointerOperand();
- int Idx = getMemoryAccessFuncIndex(Addr);
+ int Idx = getMemoryAccessFuncIndex(Addr, DL);
if (Idx < 0)
return false;
const size_t ByteSize = 1 << Idx;
@@ -547,11 +585,12 @@ bool ThreadSanitizer::instrumentAtomic(Instruction *I) {
return true;
}
-int ThreadSanitizer::getMemoryAccessFuncIndex(Value *Addr) {
+int ThreadSanitizer::getMemoryAccessFuncIndex(Value *Addr,
+ const DataLayout &DL) {
Type *OrigPtrTy = Addr->getType();
Type *OrigTy = cast<PointerType>(OrigPtrTy)->getElementType();
assert(OrigTy->isSized());
- uint32_t TypeSize = DL->getTypeStoreSizeInBits(OrigTy);
+ uint32_t TypeSize = DL.getTypeStoreSizeInBits(OrigTy);
if (TypeSize != 8 && TypeSize != 16 &&
TypeSize != 32 && TypeSize != 64 && TypeSize != 128) {
NumAccessesWithBadSize++;
OpenPOWER on IntegriCloud