diff options
Diffstat (limited to 'contrib/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp')
-rw-r--r-- | contrib/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp | 1232 |
1 files changed, 625 insertions, 607 deletions
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/contrib/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp index 25f1f02..2dd2fe6 100644 --- a/contrib/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp +++ b/contrib/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp @@ -24,6 +24,9 @@ #include "llvm/ADT/Statistic.h" #include "llvm/ADT/StringExtras.h" #include "llvm/ADT/Triple.h" +#include "llvm/Analysis/MemoryBuiltins.h" +#include "llvm/Analysis/TargetLibraryInfo.h" +#include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/CallSite.h" #include "llvm/IR/DIBuilder.h" #include "llvm/IR/DataLayout.h" @@ -43,12 +46,14 @@ #include "llvm/Support/Debug.h" #include "llvm/Support/Endian.h" #include "llvm/Support/SwapByteOrder.h" +#include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Scalar.h" #include "llvm/Transforms/Utils/ASanStackFrameLayout.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/Cloning.h" #include "llvm/Transforms/Utils/Local.h" #include "llvm/Transforms/Utils/ModuleUtils.h" +#include "llvm/Transforms/Utils/PromoteMemToReg.h" #include <algorithm> #include <string> #include <system_error> @@ -62,24 +67,24 @@ static const uint64_t kDefaultShadowOffset32 = 1ULL << 29; static const uint64_t kIOSShadowOffset32 = 1ULL << 30; static const uint64_t kDefaultShadowOffset64 = 1ULL << 44; static const uint64_t kSmallX86_64ShadowOffset = 0x7FFF8000; // < 2G. +static const uint64_t kLinuxKasan_ShadowOffset64 = 0xdffffc0000000000; static const uint64_t kPPC64_ShadowOffset64 = 1ULL << 41; static const uint64_t kMIPS32_ShadowOffset32 = 0x0aaa0000; -static const uint64_t kMIPS64_ShadowOffset64 = 1ULL << 36; +static const uint64_t kMIPS64_ShadowOffset64 = 1ULL << 37; +static const uint64_t kAArch64_ShadowOffset64 = 1ULL << 36; static const uint64_t kFreeBSD_ShadowOffset32 = 1ULL << 30; static const uint64_t kFreeBSD_ShadowOffset64 = 1ULL << 46; static const uint64_t kWindowsShadowOffset32 = 3ULL << 28; -static const size_t kMinStackMallocSize = 1 << 6; // 64B +static const size_t kMinStackMallocSize = 1 << 6; // 64B static const size_t kMaxStackMallocSize = 1 << 16; // 64K static const uintptr_t kCurrentStackFrameMagic = 0x41B58AB3; static const uintptr_t kRetiredStackFrameMagic = 0x45E0360E; static const char *const kAsanModuleCtorName = "asan.module_ctor"; static const char *const kAsanModuleDtorName = "asan.module_dtor"; -static const uint64_t kAsanCtorAndDtorPriority = 1; +static const uint64_t kAsanCtorAndDtorPriority = 1; static const char *const kAsanReportErrorTemplate = "__asan_report_"; -static const char *const kAsanReportLoadN = "__asan_report_load_n"; -static const char *const kAsanReportStoreN = "__asan_report_store_n"; static const char *const kAsanRegisterGlobalsName = "__asan_register_globals"; static const char *const kAsanUnregisterGlobalsName = "__asan_unregister_globals"; @@ -89,7 +94,7 @@ static const char *const kAsanInitName = "__asan_init_v5"; static const char *const kAsanPtrCmp = "__sanitizer_ptr_cmp"; static const char *const kAsanPtrSub = "__sanitizer_ptr_sub"; static const char *const kAsanHandleNoReturnName = "__asan_handle_no_return"; -static const int kMaxAsanStackMallocSizeClass = 10; +static const int kMaxAsanStackMallocSizeClass = 10; static const char *const kAsanStackMallocNameTemplate = "__asan_stack_malloc_"; static const char *const kAsanStackFreeNameTemplate = "__asan_stack_free_"; static const char *const kAsanGenPrefix = "__asan_gen_"; @@ -102,101 +107,125 @@ static const char *const kAsanUnpoisonStackMemoryName = static const char *const kAsanOptionDetectUAR = "__asan_option_detect_stack_use_after_return"; -#ifndef NDEBUG -static const int kAsanStackAfterReturnMagic = 0xf5; -#endif +static const char *const kAsanAllocaPoison = "__asan_alloca_poison"; +static const char *const kAsanAllocasUnpoison = "__asan_allocas_unpoison"; // Accesses sizes are powers of two: 1, 2, 4, 8, 16. static const size_t kNumberOfAccessSizes = 5; static const unsigned kAllocaRzSize = 32; -static const unsigned kAsanAllocaLeftMagic = 0xcacacacaU; -static const unsigned kAsanAllocaRightMagic = 0xcbcbcbcbU; -static const unsigned kAsanAllocaPartialVal1 = 0xcbcbcb00U; -static const unsigned kAsanAllocaPartialVal2 = 0x000000cbU; // Command-line flags. +static cl::opt<bool> ClEnableKasan( + "asan-kernel", cl::desc("Enable KernelAddressSanitizer instrumentation"), + cl::Hidden, cl::init(false)); // This flag may need to be replaced with -f[no-]asan-reads. static cl::opt<bool> ClInstrumentReads("asan-instrument-reads", - cl::desc("instrument read instructions"), cl::Hidden, cl::init(true)); -static cl::opt<bool> ClInstrumentWrites("asan-instrument-writes", - cl::desc("instrument write instructions"), cl::Hidden, cl::init(true)); -static cl::opt<bool> ClInstrumentAtomics("asan-instrument-atomics", - cl::desc("instrument atomic instructions (rmw, cmpxchg)"), - cl::Hidden, cl::init(true)); -static cl::opt<bool> ClAlwaysSlowPath("asan-always-slow-path", - cl::desc("use instrumentation with slow path for all accesses"), - cl::Hidden, cl::init(false)); + cl::desc("instrument read instructions"), + cl::Hidden, cl::init(true)); +static cl::opt<bool> ClInstrumentWrites( + "asan-instrument-writes", cl::desc("instrument write instructions"), + cl::Hidden, cl::init(true)); +static cl::opt<bool> ClInstrumentAtomics( + "asan-instrument-atomics", + cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, + cl::init(true)); +static cl::opt<bool> ClAlwaysSlowPath( + "asan-always-slow-path", + cl::desc("use instrumentation with slow path for all accesses"), cl::Hidden, + cl::init(false)); // This flag limits the number of instructions to be instrumented // in any given BB. Normally, this should be set to unlimited (INT_MAX), // but due to http://llvm.org/bugs/show_bug.cgi?id=12652 we temporary // set it to 10000. -static cl::opt<int> ClMaxInsnsToInstrumentPerBB("asan-max-ins-per-bb", - cl::init(10000), - cl::desc("maximal number of instructions to instrument in any given BB"), - cl::Hidden); +static cl::opt<int> ClMaxInsnsToInstrumentPerBB( + "asan-max-ins-per-bb", cl::init(10000), + cl::desc("maximal number of instructions to instrument in any given BB"), + cl::Hidden); // This flag may need to be replaced with -f[no]asan-stack. -static cl::opt<bool> ClStack("asan-stack", - cl::desc("Handle stack memory"), cl::Hidden, cl::init(true)); +static cl::opt<bool> ClStack("asan-stack", cl::desc("Handle stack memory"), + cl::Hidden, cl::init(true)); static cl::opt<bool> ClUseAfterReturn("asan-use-after-return", - cl::desc("Check return-after-free"), cl::Hidden, cl::init(true)); + cl::desc("Check return-after-free"), + cl::Hidden, cl::init(true)); // This flag may need to be replaced with -f[no]asan-globals. static cl::opt<bool> ClGlobals("asan-globals", - cl::desc("Handle global objects"), cl::Hidden, cl::init(true)); + cl::desc("Handle global objects"), cl::Hidden, + cl::init(true)); static cl::opt<bool> ClInitializers("asan-initialization-order", - cl::desc("Handle C++ initializer order"), cl::Hidden, cl::init(true)); -static cl::opt<bool> ClInvalidPointerPairs("asan-detect-invalid-pointer-pair", - cl::desc("Instrument <, <=, >, >=, - with pointer operands"), - cl::Hidden, cl::init(false)); -static cl::opt<unsigned> ClRealignStack("asan-realign-stack", - cl::desc("Realign stack to the value of this flag (power of two)"), - cl::Hidden, cl::init(32)); + cl::desc("Handle C++ initializer order"), + cl::Hidden, cl::init(true)); +static cl::opt<bool> ClInvalidPointerPairs( + "asan-detect-invalid-pointer-pair", + cl::desc("Instrument <, <=, >, >=, - with pointer operands"), cl::Hidden, + cl::init(false)); +static cl::opt<unsigned> ClRealignStack( + "asan-realign-stack", + cl::desc("Realign stack to the value of this flag (power of two)"), + cl::Hidden, cl::init(32)); static cl::opt<int> ClInstrumentationWithCallsThreshold( "asan-instrumentation-with-call-threshold", - cl::desc("If the function being instrumented contains more than " - "this number of memory accesses, use callbacks instead of " - "inline checks (-1 means never use callbacks)."), - cl::Hidden, cl::init(7000)); + cl::desc( + "If the function being instrumented contains more than " + "this number of memory accesses, use callbacks instead of " + "inline checks (-1 means never use callbacks)."), + cl::Hidden, cl::init(7000)); static cl::opt<std::string> ClMemoryAccessCallbackPrefix( - "asan-memory-access-callback-prefix", - cl::desc("Prefix for memory access callbacks"), cl::Hidden, - cl::init("__asan_")); + "asan-memory-access-callback-prefix", + cl::desc("Prefix for memory access callbacks"), cl::Hidden, + cl::init("__asan_")); static cl::opt<bool> ClInstrumentAllocas("asan-instrument-allocas", - cl::desc("instrument dynamic allocas"), cl::Hidden, cl::init(false)); + cl::desc("instrument dynamic allocas"), + cl::Hidden, cl::init(false)); +static cl::opt<bool> ClSkipPromotableAllocas( + "asan-skip-promotable-allocas", + cl::desc("Do not instrument promotable allocas"), cl::Hidden, + cl::init(true)); // These flags allow to change the shadow mapping. // The shadow mapping looks like // Shadow = (Mem >> scale) + (1 << offset_log) static cl::opt<int> ClMappingScale("asan-mapping-scale", - cl::desc("scale of asan shadow mapping"), cl::Hidden, cl::init(0)); + cl::desc("scale of asan shadow mapping"), + cl::Hidden, cl::init(0)); // Optimization flags. Not user visible, used mostly for testing // and benchmarking the tool. -static cl::opt<bool> ClOpt("asan-opt", - cl::desc("Optimize instrumentation"), cl::Hidden, cl::init(true)); -static cl::opt<bool> ClOptSameTemp("asan-opt-same-temp", - cl::desc("Instrument the same temp just once"), cl::Hidden, - cl::init(true)); +static cl::opt<bool> ClOpt("asan-opt", cl::desc("Optimize instrumentation"), + cl::Hidden, cl::init(true)); +static cl::opt<bool> ClOptSameTemp( + "asan-opt-same-temp", cl::desc("Instrument the same temp just once"), + cl::Hidden, cl::init(true)); static cl::opt<bool> ClOptGlobals("asan-opt-globals", - cl::desc("Don't instrument scalar globals"), cl::Hidden, cl::init(true)); - -static cl::opt<bool> ClCheckLifetime("asan-check-lifetime", - cl::desc("Use llvm.lifetime intrinsics to insert extra checks"), - cl::Hidden, cl::init(false)); + cl::desc("Don't instrument scalar globals"), + cl::Hidden, cl::init(true)); +static cl::opt<bool> ClOptStack( + "asan-opt-stack", cl::desc("Don't instrument scalar stack variables"), + cl::Hidden, cl::init(false)); + +static cl::opt<bool> ClCheckLifetime( + "asan-check-lifetime", + cl::desc("Use llvm.lifetime intrinsics to insert extra checks"), cl::Hidden, + cl::init(false)); static cl::opt<bool> ClDynamicAllocaStack( "asan-stack-dynamic-alloca", cl::desc("Use dynamic alloca to represent stack variables"), cl::Hidden, - cl::init(false)); + cl::init(true)); + +static cl::opt<uint32_t> ClForceExperiment( + "asan-force-experiment", + cl::desc("Force optimization experiment (for testing)"), cl::Hidden, + cl::init(0)); // Debug flags. static cl::opt<int> ClDebug("asan-debug", cl::desc("debug"), cl::Hidden, cl::init(0)); static cl::opt<int> ClDebugStack("asan-debug-stack", cl::desc("debug stack"), cl::Hidden, cl::init(0)); -static cl::opt<std::string> ClDebugFunc("asan-debug-func", - cl::Hidden, cl::desc("Debug func")); +static cl::opt<std::string> ClDebugFunc("asan-debug-func", cl::Hidden, + cl::desc("Debug func")); static cl::opt<int> ClDebugMin("asan-debug-min", cl::desc("Debug min inst"), cl::Hidden, cl::init(-1)); static cl::opt<int> ClDebugMax("asan-debug-max", cl::desc("Debug man inst"), @@ -204,12 +233,10 @@ static cl::opt<int> ClDebugMax("asan-debug-max", cl::desc("Debug man inst"), STATISTIC(NumInstrumentedReads, "Number of instrumented reads"); STATISTIC(NumInstrumentedWrites, "Number of instrumented writes"); -STATISTIC(NumInstrumentedDynamicAllocas, - "Number of instrumented dynamic allocas"); -STATISTIC(NumOptimizedAccessesToGlobalArray, - "Number of optimized accesses to global arrays"); STATISTIC(NumOptimizedAccessesToGlobalVar, "Number of optimized accesses to global vars"); +STATISTIC(NumOptimizedAccessesToStackVar, + "Number of optimized accesses to stack vars"); namespace { /// Frontend-provided metadata for source location. @@ -224,8 +251,8 @@ struct LocationMetadata { void parse(MDNode *MDN) { assert(MDN->getNumOperands() == 3); - MDString *MDFilename = cast<MDString>(MDN->getOperand(0)); - Filename = MDFilename->getString(); + MDString *DIFilename = cast<MDString>(MDN->getOperand(0)); + Filename = DIFilename->getString(); LineNo = mdconst::extract<ConstantInt>(MDN->getOperand(1))->getLimitedValue(); ColumnNo = @@ -237,9 +264,7 @@ struct LocationMetadata { class GlobalsMetadata { public: struct Entry { - Entry() - : SourceLoc(), Name(), IsDynInit(false), - IsBlacklisted(false) {} + Entry() : SourceLoc(), Name(), IsDynInit(false), IsBlacklisted(false) {} LocationMetadata SourceLoc; StringRef Name; bool IsDynInit; @@ -248,19 +273,17 @@ class GlobalsMetadata { GlobalsMetadata() : inited_(false) {} - void init(Module& M) { + void init(Module &M) { assert(!inited_); inited_ = true; NamedMDNode *Globals = M.getNamedMetadata("llvm.asan.globals"); - if (!Globals) - return; + if (!Globals) return; for (auto MDN : Globals->operands()) { // Metadata node contains the global and the fields of "Entry". assert(MDN->getNumOperands() == 5); auto *GV = mdconst::extract_or_null<GlobalVariable>(MDN->getOperand(0)); // The optimizer may optimize away a global entirely. - if (!GV) - continue; + if (!GV) continue; // We can already have an entry for GV if it was merged with another // global. Entry &E = Entries[GV]; @@ -285,7 +308,7 @@ class GlobalsMetadata { private: bool inited_; - DenseMap<GlobalVariable*, Entry> Entries; + DenseMap<GlobalVariable *, Entry> Entries; }; /// This struct defines the shadow mapping using the rule: @@ -296,7 +319,8 @@ struct ShadowMapping { bool OrShadowOffset; }; -static ShadowMapping getShadowMapping(Triple &TargetTriple, int LongSize) { +static ShadowMapping getShadowMapping(Triple &TargetTriple, int LongSize, + bool IsKasan) { bool IsAndroid = TargetTriple.getEnvironment() == llvm::Triple::Android; bool IsIOS = TargetTriple.isiOS(); bool IsFreeBSD = TargetTriple.isOSFreeBSD(); @@ -308,6 +332,7 @@ static ShadowMapping getShadowMapping(Triple &TargetTriple, int LongSize) { TargetTriple.getArch() == llvm::Triple::mipsel; bool IsMIPS64 = TargetTriple.getArch() == llvm::Triple::mips64 || TargetTriple.getArch() == llvm::Triple::mips64el; + bool IsAArch64 = TargetTriple.getArch() == llvm::Triple::aarch64; bool IsWindows = TargetTriple.isOSWindows(); ShadowMapping Mapping; @@ -330,10 +355,15 @@ static ShadowMapping getShadowMapping(Triple &TargetTriple, int LongSize) { Mapping.Offset = kPPC64_ShadowOffset64; else if (IsFreeBSD) Mapping.Offset = kFreeBSD_ShadowOffset64; - else if (IsLinux && IsX86_64) - Mapping.Offset = kSmallX86_64ShadowOffset; - else if (IsMIPS64) + else if (IsLinux && IsX86_64) { + if (IsKasan) + Mapping.Offset = kLinuxKasan_ShadowOffset64; + else + Mapping.Offset = kSmallX86_64ShadowOffset; + } else if (IsMIPS64) Mapping.Offset = kMIPS64_ShadowOffset64; + else if (IsAArch64) + Mapping.Offset = kAArch64_ShadowOffset64; else Mapping.Offset = kDefaultShadowOffset64; } @@ -359,7 +389,8 @@ static size_t RedzoneSizeForScale(int MappingScale) { /// AddressSanitizer: instrument the code in module to find memory bugs. struct AddressSanitizer : public FunctionPass { - AddressSanitizer() : FunctionPass(ID) { + explicit AddressSanitizer(bool CompileKernel = false) + : FunctionPass(ID), CompileKernel(CompileKernel || ClEnableKasan) { initializeAddressSanitizerPass(*PassRegistry::getPassRegistry()); } const char *getPassName() const override { @@ -367,17 +398,41 @@ struct AddressSanitizer : public FunctionPass { } void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired<DominatorTreeWrapperPass>(); + AU.addRequired<TargetLibraryInfoWrapperPass>(); + } + uint64_t getAllocaSizeInBytes(AllocaInst *AI) const { + Type *Ty = AI->getAllocatedType(); + uint64_t SizeInBytes = + AI->getModule()->getDataLayout().getTypeAllocSize(Ty); + return SizeInBytes; + } + /// Check if we want (and can) handle this alloca. + bool isInterestingAlloca(AllocaInst &AI); + + // Check if we have dynamic alloca. + bool isDynamicAlloca(AllocaInst &AI) const { + return AI.isArrayAllocation() || !AI.isStaticAlloca(); } - void instrumentMop(Instruction *I, bool UseCalls); + + /// If it is an interesting memory access, return the PointerOperand + /// and set IsWrite/Alignment. Otherwise return nullptr. + Value *isInterestingMemoryAccess(Instruction *I, bool *IsWrite, + uint64_t *TypeSize, unsigned *Alignment); + void instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis, Instruction *I, + bool UseCalls, const DataLayout &DL); void instrumentPointerComparisonOrSubtraction(Instruction *I); void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore, Value *Addr, uint32_t TypeSize, bool IsWrite, - Value *SizeArgument, bool UseCalls); + Value *SizeArgument, bool UseCalls, uint32_t Exp); + void instrumentUnusualSizeOrAlignment(Instruction *I, Value *Addr, + uint32_t TypeSize, bool IsWrite, + Value *SizeArgument, bool UseCalls, + uint32_t Exp); Value *createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong, Value *ShadowValue, uint32_t TypeSize); Instruction *generateCrashCode(Instruction *InsertBefore, Value *Addr, bool IsWrite, size_t AccessSizeIndex, - Value *SizeArgument); + Value *SizeArgument, uint32_t Exp); void instrumentMemIntrinsic(MemIntrinsic *MI); Value *memToShadow(Value *Shadow, IRBuilder<> &IRB); bool runOnFunction(Function &F) override; @@ -392,39 +447,41 @@ struct AddressSanitizer : public FunctionPass { bool LooksLikeCodeInBug11395(Instruction *I); bool GlobalIsLinkerInitialized(GlobalVariable *G); + bool isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis, Value *Addr, + uint64_t TypeSize) const; LLVMContext *C; - const DataLayout *DL; Triple TargetTriple; int LongSize; + bool CompileKernel; Type *IntptrTy; ShadowMapping Mapping; DominatorTree *DT; - Function *AsanCtorFunction; - Function *AsanInitFunction; + Function *AsanCtorFunction = nullptr; + Function *AsanInitFunction = nullptr; Function *AsanHandleNoReturnFunc; Function *AsanPtrCmpFunction, *AsanPtrSubFunction; - // This array is indexed by AccessIsWrite and log2(AccessSize). - Function *AsanErrorCallback[2][kNumberOfAccessSizes]; - Function *AsanMemoryAccessCallback[2][kNumberOfAccessSizes]; - // This array is indexed by AccessIsWrite. - Function *AsanErrorCallbackSized[2], - *AsanMemoryAccessCallbackSized[2]; + // This array is indexed by AccessIsWrite, Experiment and log2(AccessSize). + Function *AsanErrorCallback[2][2][kNumberOfAccessSizes]; + Function *AsanMemoryAccessCallback[2][2][kNumberOfAccessSizes]; + // This array is indexed by AccessIsWrite and Experiment. + Function *AsanErrorCallbackSized[2][2]; + Function *AsanMemoryAccessCallbackSized[2][2]; Function *AsanMemmove, *AsanMemcpy, *AsanMemset; InlineAsm *EmptyAsm; GlobalsMetadata GlobalsMD; + DenseMap<AllocaInst *, bool> ProcessedAllocas; friend struct FunctionStackPoisoner; }; class AddressSanitizerModule : public ModulePass { public: - AddressSanitizerModule() : ModulePass(ID) {} + explicit AddressSanitizerModule(bool CompileKernel = false) + : ModulePass(ID), CompileKernel(CompileKernel || ClEnableKasan) {} bool runOnModule(Module &M) override; static char ID; // Pass identification, replacement for typeid - const char *getPassName() const override { - return "AddressSanitizerModule"; - } + const char *getPassName() const override { return "AddressSanitizerModule"; } private: void initializeCallbacks(Module &M); @@ -438,9 +495,9 @@ class AddressSanitizerModule : public ModulePass { } GlobalsMetadata GlobalsMD; + bool CompileKernel; Type *IntptrTy; LLVMContext *C; - const DataLayout *DL; Triple TargetTriple; ShadowMapping Mapping; Function *AsanPoisonGlobals; @@ -467,13 +524,14 @@ struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> { Type *IntptrPtrTy; ShadowMapping Mapping; - SmallVector<AllocaInst*, 16> AllocaVec; - SmallVector<Instruction*, 8> RetVec; + SmallVector<AllocaInst *, 16> AllocaVec; + SmallVector<Instruction *, 8> RetVec; unsigned StackAlignment; Function *AsanStackMallocFunc[kMaxAsanStackMallocSizeClass + 1], - *AsanStackFreeFunc[kMaxAsanStackMallocSizeClass + 1]; + *AsanStackFreeFunc[kMaxAsanStackMallocSizeClass + 1]; Function *AsanPoisonStackMemoryFunc, *AsanUnpoisonStackMemoryFunc; + Function *AsanAllocaPoisonFunc, *AsanAllocasUnpoisonFunc; // Stores a place and arguments of poisoning/unpoisoning call for alloca. struct AllocaPoisonCall { @@ -484,42 +542,33 @@ struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> { }; SmallVector<AllocaPoisonCall, 8> AllocaPoisonCallVec; - // Stores left and right redzone shadow addresses for dynamic alloca - // and pointer to alloca instruction itself. - // LeftRzAddr is a shadow address for alloca left redzone. - // RightRzAddr is a shadow address for alloca right redzone. - struct DynamicAllocaCall { - AllocaInst *AI; - Value *LeftRzAddr; - Value *RightRzAddr; - bool Poison; - explicit DynamicAllocaCall(AllocaInst *AI, - Value *LeftRzAddr = nullptr, - Value *RightRzAddr = nullptr) - : AI(AI), LeftRzAddr(LeftRzAddr), RightRzAddr(RightRzAddr), Poison(true) - {} - }; - SmallVector<DynamicAllocaCall, 1> DynamicAllocaVec; + SmallVector<AllocaInst *, 1> DynamicAllocaVec; + SmallVector<IntrinsicInst *, 1> StackRestoreVec; + AllocaInst *DynamicAllocaLayout = nullptr; // Maps Value to an AllocaInst from which the Value is originated. - typedef DenseMap<Value*, AllocaInst*> AllocaForValueMapTy; + typedef DenseMap<Value *, AllocaInst *> AllocaForValueMapTy; AllocaForValueMapTy AllocaForValue; bool HasNonEmptyInlineAsm; std::unique_ptr<CallInst> EmptyInlineAsm; FunctionStackPoisoner(Function &F, AddressSanitizer &ASan) - : F(F), ASan(ASan), DIB(*F.getParent(), /*AllowUnresolved*/ false), - C(ASan.C), IntptrTy(ASan.IntptrTy), - IntptrPtrTy(PointerType::get(IntptrTy, 0)), Mapping(ASan.Mapping), - StackAlignment(1 << Mapping.Scale), HasNonEmptyInlineAsm(false), + : F(F), + ASan(ASan), + DIB(*F.getParent(), /*AllowUnresolved*/ false), + C(ASan.C), + IntptrTy(ASan.IntptrTy), + IntptrPtrTy(PointerType::get(IntptrTy, 0)), + Mapping(ASan.Mapping), + StackAlignment(1 << Mapping.Scale), + HasNonEmptyInlineAsm(false), EmptyInlineAsm(CallInst::Create(ASan.EmptyAsm)) {} bool runOnFunction() { if (!ClStack) return false; // Collect alloca, ret, lifetime instructions etc. - for (BasicBlock *BB : depth_first(&F.getEntryBlock())) - visit(*BB); + for (BasicBlock *BB : depth_first(&F.getEntryBlock())) visit(*BB); if (AllocaVec.empty() && DynamicAllocaVec.empty()) return false; @@ -538,44 +587,30 @@ struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> { // Then unpoison everything back before the function returns. void poisonStack(); + void createDynamicAllocasInitStorage(); + // ----------------------- Visitors. /// \brief Collect all Ret instructions. - void visitReturnInst(ReturnInst &RI) { - RetVec.push_back(&RI); + void visitReturnInst(ReturnInst &RI) { RetVec.push_back(&RI); } + + void unpoisonDynamicAllocasBeforeInst(Instruction *InstBefore, + Value *SavedStack) { + IRBuilder<> IRB(InstBefore); + IRB.CreateCall(AsanAllocasUnpoisonFunc, + {IRB.CreateLoad(DynamicAllocaLayout), + IRB.CreatePtrToInt(SavedStack, IntptrTy)}); } // Unpoison dynamic allocas redzones. - void unpoisonDynamicAlloca(DynamicAllocaCall &AllocaCall) { - if (!AllocaCall.Poison) - return; - for (auto Ret : RetVec) { - IRBuilder<> IRBRet(Ret); - PointerType *Int32PtrTy = PointerType::getUnqual(IRBRet.getInt32Ty()); - Value *Zero = Constant::getNullValue(IRBRet.getInt32Ty()); - Value *PartialRzAddr = IRBRet.CreateSub(AllocaCall.RightRzAddr, - ConstantInt::get(IntptrTy, 4)); - IRBRet.CreateStore(Zero, IRBRet.CreateIntToPtr(AllocaCall.LeftRzAddr, - Int32PtrTy)); - IRBRet.CreateStore(Zero, IRBRet.CreateIntToPtr(PartialRzAddr, - Int32PtrTy)); - IRBRet.CreateStore(Zero, IRBRet.CreateIntToPtr(AllocaCall.RightRzAddr, - Int32PtrTy)); - } - } + void unpoisonDynamicAllocas() { + for (auto &Ret : RetVec) + unpoisonDynamicAllocasBeforeInst(Ret, DynamicAllocaLayout); - // Right shift for BigEndian and left shift for LittleEndian. - Value *shiftAllocaMagic(Value *Val, IRBuilder<> &IRB, Value *Shift) { - return ASan.DL->isLittleEndian() ? IRB.CreateShl(Val, Shift) - : IRB.CreateLShr(Val, Shift); + for (auto &StackRestoreInst : StackRestoreVec) + unpoisonDynamicAllocasBeforeInst(StackRestoreInst, + StackRestoreInst->getOperand(0)); } - // Compute PartialRzMagic for dynamic alloca call. Since we don't know the - // size of requested memory until runtime, we should compute it dynamically. - // If PartialSize is 0, PartialRzMagic would contain kAsanAllocaRightMagic, - // otherwise it would contain the value that we will use to poison the - // partial redzone for alloca call. - Value *computePartialRzMagic(Value *PartialSize, IRBuilder<> &IRB); - // Deploy and poison redzones around dynamic alloca call. To do this, we // should replace this call with another one with changed parameters and // replace all its uses with new address, so @@ -586,20 +621,15 @@ struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> { // addr = tmp + 32 (first 32 bytes are for the left redzone). // Additional_size is added to make new memory allocation contain not only // requested memory, but also left, partial and right redzones. - // After that, we should poison redzones: - // (1) Left redzone with kAsanAllocaLeftMagic. - // (2) Partial redzone with the value, computed in runtime by - // computePartialRzMagic function. - // (3) Right redzone with kAsanAllocaRightMagic. - void handleDynamicAllocaCall(DynamicAllocaCall &AllocaCall); + void handleDynamicAllocaCall(AllocaInst *AI); /// \brief Collect Alloca instructions we want (and can) handle. void visitAllocaInst(AllocaInst &AI) { - if (!isInterestingAlloca(AI)) return; + if (!ASan.isInterestingAlloca(AI)) return; StackAlignment = std::max(StackAlignment, AI.getAlignment()); - if (isDynamicAlloca(AI)) - DynamicAllocaVec.push_back(DynamicAllocaCall(&AI)); + if (ASan.isDynamicAlloca(AI)) + DynamicAllocaVec.push_back(&AI); else AllocaVec.push_back(&AI); } @@ -607,10 +637,10 @@ struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> { /// \brief Collect lifetime intrinsic calls to check for use-after-scope /// errors. void visitIntrinsicInst(IntrinsicInst &II) { - if (!ClCheckLifetime) return; Intrinsic::ID ID = II.getIntrinsicID(); - if (ID != Intrinsic::lifetime_start && - ID != Intrinsic::lifetime_end) + if (ID == Intrinsic::stackrestore) StackRestoreVec.push_back(&II); + if (!ClCheckLifetime) return; + if (ID != Intrinsic::lifetime_start && ID != Intrinsic::lifetime_end) return; // Found lifetime intrinsic, add ASan instrumentation if necessary. ConstantInt *Size = dyn_cast<ConstantInt>(II.getArgOperand(0)); @@ -640,28 +670,11 @@ struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> { bool doesDominateAllExits(const Instruction *I) const { for (auto Ret : RetVec) { - if (!ASan.getDominatorTree().dominates(I, Ret)) - return false; + if (!ASan.getDominatorTree().dominates(I, Ret)) return false; } return true; } - bool isDynamicAlloca(AllocaInst &AI) const { - return AI.isArrayAllocation() || !AI.isStaticAlloca(); - } - - // Check if we want (and can) handle this alloca. - bool isInterestingAlloca(AllocaInst &AI) const { - return (AI.getAllocatedType()->isSized() && - // alloca() may be called with 0 size, ignore it. - getAllocaSizeInBytes(&AI) > 0); - } - - uint64_t getAllocaSizeInBytes(AllocaInst *AI) const { - Type *Ty = AI->getAllocatedType(); - uint64_t SizeInBytes = ASan.DL->getTypeAllocSize(Ty); - return SizeInBytes; - } /// Finds alloca where the value comes from. AllocaInst *findAllocaForValue(Value *V); void poisonRedZones(ArrayRef<uint8_t> ShadowBytes, IRBuilder<> &IRB, @@ -679,23 +692,27 @@ struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> { } // namespace char AddressSanitizer::ID = 0; -INITIALIZE_PASS_BEGIN(AddressSanitizer, "asan", - "AddressSanitizer: detects use-after-free and out-of-bounds bugs.", - false, false) +INITIALIZE_PASS_BEGIN( + AddressSanitizer, "asan", + "AddressSanitizer: detects use-after-free and out-of-bounds bugs.", false, + false) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) -INITIALIZE_PASS_END(AddressSanitizer, "asan", - "AddressSanitizer: detects use-after-free and out-of-bounds bugs.", - false, false) -FunctionPass *llvm::createAddressSanitizerFunctionPass() { - return new AddressSanitizer(); +INITIALIZE_PASS_END( + AddressSanitizer, "asan", + "AddressSanitizer: detects use-after-free and out-of-bounds bugs.", false, + false) +FunctionPass *llvm::createAddressSanitizerFunctionPass(bool CompileKernel) { + return new AddressSanitizer(CompileKernel); } char AddressSanitizerModule::ID = 0; -INITIALIZE_PASS(AddressSanitizerModule, "asan-module", +INITIALIZE_PASS( + AddressSanitizerModule, "asan-module", "AddressSanitizer: detects use-after-free and out-of-bounds bugs." - "ModulePass", false, false) -ModulePass *llvm::createAddressSanitizerModulePass() { - return new AddressSanitizerModule(); + "ModulePass", + false, false) +ModulePass *llvm::createAddressSanitizerModulePass(bool CompileKernel) { + return new AddressSanitizerModule(CompileKernel); } static size_t TypeSizeToSizeIndex(uint32_t TypeSize) { @@ -705,16 +722,15 @@ static size_t TypeSizeToSizeIndex(uint32_t TypeSize) { } // \brief Create a constant for Str so that we can pass it to the run-time lib. -static GlobalVariable *createPrivateGlobalForString( - Module &M, StringRef Str, bool AllowMerging) { +static GlobalVariable *createPrivateGlobalForString(Module &M, StringRef Str, + bool AllowMerging) { Constant *StrConst = ConstantDataArray::getString(M.getContext(), Str); // We use private linkage for module-local strings. If they can be merged // with another one, we set the unnamed_addr attribute. GlobalVariable *GV = new GlobalVariable(M, StrConst->getType(), true, GlobalValue::PrivateLinkage, StrConst, kAsanGenPrefix); - if (AllowMerging) - GV->setUnnamedAddr(true); + if (AllowMerging) GV->setUnnamedAddr(true); GV->setAlignment(1); // Strings may not be merged w/o setting align 1. return GV; } @@ -743,8 +759,7 @@ static bool GlobalWasGeneratedByAsan(GlobalVariable *G) { Value *AddressSanitizer::memToShadow(Value *Shadow, IRBuilder<> &IRB) { // Shadow >> scale Shadow = IRB.CreateLShr(Shadow, Mapping.Scale); - if (Mapping.Offset == 0) - return Shadow; + if (Mapping.Offset == 0) return Shadow; // (Shadow >> scale) | offset if (Mapping.OrShadowOffset) return IRB.CreateOr(Shadow, ConstantInt::get(IntptrTy, Mapping.Offset)); @@ -756,53 +771,86 @@ Value *AddressSanitizer::memToShadow(Value *Shadow, IRBuilder<> &IRB) { void AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) { IRBuilder<> IRB(MI); if (isa<MemTransferInst>(MI)) { - IRB.CreateCall3( + IRB.CreateCall( isa<MemMoveInst>(MI) ? AsanMemmove : AsanMemcpy, - IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()), - IRB.CreatePointerCast(MI->getOperand(1), IRB.getInt8PtrTy()), - IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)); + {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()), + IRB.CreatePointerCast(MI->getOperand(1), IRB.getInt8PtrTy()), + IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)}); } else if (isa<MemSetInst>(MI)) { - IRB.CreateCall3( + IRB.CreateCall( AsanMemset, - IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()), - IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false), - IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)); + {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()), + IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false), + IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)}); } MI->eraseFromParent(); } -// If I is an interesting memory access, return the PointerOperand -// and set IsWrite/Alignment. Otherwise return nullptr. -static Value *isInterestingMemoryAccess(Instruction *I, bool *IsWrite, - unsigned *Alignment) { +/// Check if we want (and can) handle this alloca. +bool AddressSanitizer::isInterestingAlloca(AllocaInst &AI) { + auto PreviouslySeenAllocaInfo = ProcessedAllocas.find(&AI); + + if (PreviouslySeenAllocaInfo != ProcessedAllocas.end()) + return PreviouslySeenAllocaInfo->getSecond(); + + bool IsInteresting = + (AI.getAllocatedType()->isSized() && + // alloca() may be called with 0 size, ignore it. + getAllocaSizeInBytes(&AI) > 0 && + // We are only interested in allocas not promotable to registers. + // Promotable allocas are common under -O0. + (!ClSkipPromotableAllocas || !isAllocaPromotable(&AI) || + isDynamicAlloca(AI))); + + ProcessedAllocas[&AI] = IsInteresting; + return IsInteresting; +} + +/// If I is an interesting memory access, return the PointerOperand +/// and set IsWrite/Alignment. Otherwise return nullptr. +Value *AddressSanitizer::isInterestingMemoryAccess(Instruction *I, + bool *IsWrite, + uint64_t *TypeSize, + unsigned *Alignment) { // Skip memory accesses inserted by another instrumentation. - if (I->getMetadata("nosanitize")) - return nullptr; + if (I->getMetadata("nosanitize")) return nullptr; + + Value *PtrOperand = nullptr; + const DataLayout &DL = I->getModule()->getDataLayout(); if (LoadInst *LI = dyn_cast<LoadInst>(I)) { if (!ClInstrumentReads) return nullptr; *IsWrite = false; + *TypeSize = DL.getTypeStoreSizeInBits(LI->getType()); *Alignment = LI->getAlignment(); - return LI->getPointerOperand(); - } - if (StoreInst *SI = dyn_cast<StoreInst>(I)) { + PtrOperand = LI->getPointerOperand(); + } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) { if (!ClInstrumentWrites) return nullptr; *IsWrite = true; + *TypeSize = DL.getTypeStoreSizeInBits(SI->getValueOperand()->getType()); *Alignment = SI->getAlignment(); - return SI->getPointerOperand(); - } - if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) { + PtrOperand = SI->getPointerOperand(); + } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) { if (!ClInstrumentAtomics) return nullptr; *IsWrite = true; + *TypeSize = DL.getTypeStoreSizeInBits(RMW->getValOperand()->getType()); *Alignment = 0; - return RMW->getPointerOperand(); - } - if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) { + PtrOperand = RMW->getPointerOperand(); + } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) { if (!ClInstrumentAtomics) return nullptr; *IsWrite = true; + *TypeSize = DL.getTypeStoreSizeInBits(XCHG->getCompareOperand()->getType()); *Alignment = 0; - return XCHG->getPointerOperand(); + PtrOperand = XCHG->getPointerOperand(); } - return nullptr; + + // Treat memory accesses to promotable allocas as non-interesting since they + // will not cause memory violations. This greatly speeds up the instrumented + // executable at -O0. + if (ClSkipPromotableAllocas) + if (auto AI = dyn_cast_or_null<AllocaInst>(PtrOperand)) + return isInterestingAlloca(*AI) ? AI : nullptr; + + return PtrOperand; } static bool isPointerOperand(Value *V) { @@ -814,17 +862,15 @@ static bool isPointerOperand(Value *V) { // the frontend. static bool isInterestingPointerComparisonOrSubtraction(Instruction *I) { if (ICmpInst *Cmp = dyn_cast<ICmpInst>(I)) { - if (!Cmp->isRelational()) - return false; + if (!Cmp->isRelational()) return false; } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) { - if (BO->getOpcode() != Instruction::Sub) - return false; + if (BO->getOpcode() != Instruction::Sub) return false; } else { return false; } if (!isPointerOperand(I->getOperand(0)) || !isPointerOperand(I->getOperand(1))) - return false; + return false; return true; } @@ -835,8 +881,8 @@ bool AddressSanitizer::GlobalIsLinkerInitialized(GlobalVariable *G) { return G->hasInitializer() && !GlobalsMD.get(G).IsDynInit; } -void -AddressSanitizer::instrumentPointerComparisonOrSubtraction(Instruction *I) { +void AddressSanitizer::instrumentPointerComparisonOrSubtraction( + Instruction *I) { IRBuilder<> IRB(I); Function *F = isa<ICmpInst>(I) ? AsanPtrCmpFunction : AsanPtrSubFunction; Value *Param[2] = {I->getOperand(0), I->getOperand(1)}; @@ -844,41 +890,50 @@ AddressSanitizer::instrumentPointerComparisonOrSubtraction(Instruction *I) { if (Param[i]->getType()->isPointerTy()) Param[i] = IRB.CreatePointerCast(Param[i], IntptrTy); } - IRB.CreateCall2(F, Param[0], Param[1]); + IRB.CreateCall(F, Param); } -void AddressSanitizer::instrumentMop(Instruction *I, bool UseCalls) { +void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis, + Instruction *I, bool UseCalls, + const DataLayout &DL) { bool IsWrite = false; unsigned Alignment = 0; - Value *Addr = isInterestingMemoryAccess(I, &IsWrite, &Alignment); + uint64_t TypeSize = 0; + Value *Addr = isInterestingMemoryAccess(I, &IsWrite, &TypeSize, &Alignment); assert(Addr); + + // Optimization experiments. + // The experiments can be used to evaluate potential optimizations that remove + // instrumentation (assess false negatives). Instead of completely removing + // some instrumentation, you set Exp to a non-zero value (mask of optimization + // experiments that want to remove instrumentation of this instruction). + // If Exp is non-zero, this pass will emit special calls into runtime + // (e.g. __asan_report_exp_load1 instead of __asan_report_load1). These calls + // make runtime terminate the program in a special way (with a different + // exit status). Then you run the new compiler on a buggy corpus, collect + // the special terminations (ideally, you don't see them at all -- no false + // negatives) and make the decision on the optimization. + uint32_t Exp = ClForceExperiment; + if (ClOpt && ClOptGlobals) { - if (GlobalVariable *G = dyn_cast<GlobalVariable>(Addr)) { - // If initialization order checking is disabled, a simple access to a - // dynamically initialized global is always valid. - if (!ClInitializers || GlobalIsLinkerInitialized(G)) { - NumOptimizedAccessesToGlobalVar++; - return; - } - } - ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr); - if (CE && CE->isGEPWithNoNotionalOverIndexing()) { - if (GlobalVariable *G = dyn_cast<GlobalVariable>(CE->getOperand(0))) { - if (CE->getOperand(1)->isNullValue() && GlobalIsLinkerInitialized(G)) { - NumOptimizedAccessesToGlobalArray++; - return; - } - } + // If initialization order checking is disabled, a simple access to a + // dynamically initialized global is always valid. + GlobalVariable *G = dyn_cast<GlobalVariable>(GetUnderlyingObject(Addr, DL)); + if (G != NULL && (!ClInitializers || GlobalIsLinkerInitialized(G)) && + isSafeAccess(ObjSizeVis, Addr, TypeSize)) { + NumOptimizedAccessesToGlobalVar++; + return; } } - Type *OrigPtrTy = Addr->getType(); - Type *OrigTy = cast<PointerType>(OrigPtrTy)->getElementType(); - - assert(OrigTy->isSized()); - uint32_t TypeSize = DL->getTypeStoreSizeInBits(OrigTy); - - assert((TypeSize % 8) == 0); + if (ClOpt && ClOptStack) { + // A direct inbounds access to a stack variable is always valid. + if (isa<AllocaInst>(GetUnderlyingObject(Addr, DL)) && + isSafeAccess(ObjSizeVis, Addr, TypeSize)) { + NumOptimizedAccessesToStackVar++; + return; + } + } if (IsWrite) NumInstrumentedWrites++; @@ -891,65 +946,57 @@ void AddressSanitizer::instrumentMop(Instruction *I, bool UseCalls) { if ((TypeSize == 8 || TypeSize == 16 || TypeSize == 32 || TypeSize == 64 || TypeSize == 128) && (Alignment >= Granularity || Alignment == 0 || Alignment >= TypeSize / 8)) - return instrumentAddress(I, I, Addr, TypeSize, IsWrite, nullptr, UseCalls); - // Instrument unusual size or unusual alignment. - // We can not do it with a single check, so we do 1-byte check for the first - // and the last bytes. We call __asan_report_*_n(addr, real_size) to be able - // to report the actual access size. - IRBuilder<> IRB(I); - Value *Size = ConstantInt::get(IntptrTy, TypeSize / 8); - Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy); - if (UseCalls) { - IRB.CreateCall2(AsanMemoryAccessCallbackSized[IsWrite], AddrLong, Size); - } else { - Value *LastByte = IRB.CreateIntToPtr( - IRB.CreateAdd(AddrLong, ConstantInt::get(IntptrTy, TypeSize / 8 - 1)), - OrigPtrTy); - instrumentAddress(I, I, Addr, 8, IsWrite, Size, false); - instrumentAddress(I, I, LastByte, 8, IsWrite, Size, false); - } + return instrumentAddress(I, I, Addr, TypeSize, IsWrite, nullptr, UseCalls, + Exp); + instrumentUnusualSizeOrAlignment(I, Addr, TypeSize, IsWrite, nullptr, + UseCalls, Exp); } -// Validate the result of Module::getOrInsertFunction called for an interface -// function of AddressSanitizer. If the instrumented module defines a function -// with the same name, their prototypes must match, otherwise -// getOrInsertFunction returns a bitcast. -static Function *checkInterfaceFunction(Constant *FuncOrBitcast) { - if (isa<Function>(FuncOrBitcast)) return cast<Function>(FuncOrBitcast); - FuncOrBitcast->dump(); - report_fatal_error("trying to redefine an AddressSanitizer " - "interface function"); -} - -Instruction *AddressSanitizer::generateCrashCode( - Instruction *InsertBefore, Value *Addr, - bool IsWrite, size_t AccessSizeIndex, Value *SizeArgument) { +Instruction *AddressSanitizer::generateCrashCode(Instruction *InsertBefore, + Value *Addr, bool IsWrite, + size_t AccessSizeIndex, + Value *SizeArgument, + uint32_t Exp) { IRBuilder<> IRB(InsertBefore); - CallInst *Call = SizeArgument - ? IRB.CreateCall2(AsanErrorCallbackSized[IsWrite], Addr, SizeArgument) - : IRB.CreateCall(AsanErrorCallback[IsWrite][AccessSizeIndex], Addr); + Value *ExpVal = Exp == 0 ? nullptr : ConstantInt::get(IRB.getInt32Ty(), Exp); + CallInst *Call = nullptr; + if (SizeArgument) { + if (Exp == 0) + Call = IRB.CreateCall(AsanErrorCallbackSized[IsWrite][0], + {Addr, SizeArgument}); + else + Call = IRB.CreateCall(AsanErrorCallbackSized[IsWrite][1], + {Addr, SizeArgument, ExpVal}); + } else { + if (Exp == 0) + Call = + IRB.CreateCall(AsanErrorCallback[IsWrite][0][AccessSizeIndex], Addr); + else + Call = IRB.CreateCall(AsanErrorCallback[IsWrite][1][AccessSizeIndex], + {Addr, ExpVal}); + } // We don't do Call->setDoesNotReturn() because the BB already has // UnreachableInst at the end. // This EmptyAsm is required to avoid callback merge. - IRB.CreateCall(EmptyAsm); + IRB.CreateCall(EmptyAsm, {}); return Call; } Value *AddressSanitizer::createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong, - Value *ShadowValue, - uint32_t TypeSize) { + Value *ShadowValue, + uint32_t TypeSize) { size_t Granularity = 1 << Mapping.Scale; // Addr & (Granularity - 1) - Value *LastAccessedByte = IRB.CreateAnd( - AddrLong, ConstantInt::get(IntptrTy, Granularity - 1)); + Value *LastAccessedByte = + IRB.CreateAnd(AddrLong, ConstantInt::get(IntptrTy, Granularity - 1)); // (Addr & (Granularity - 1)) + size - 1 if (TypeSize / 8 > 1) LastAccessedByte = IRB.CreateAdd( LastAccessedByte, ConstantInt::get(IntptrTy, TypeSize / 8 - 1)); // (uint8_t) ((Addr & (Granularity-1)) + size - 1) - LastAccessedByte = IRB.CreateIntCast( - LastAccessedByte, ShadowValue->getType(), false); + LastAccessedByte = + IRB.CreateIntCast(LastAccessedByte, ShadowValue->getType(), false); // ((uint8_t) ((Addr & (Granularity-1)) + size - 1)) >= ShadowValue return IRB.CreateICmpSGE(LastAccessedByte, ShadowValue); } @@ -957,24 +1004,29 @@ Value *AddressSanitizer::createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong, void AddressSanitizer::instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore, Value *Addr, uint32_t TypeSize, bool IsWrite, - Value *SizeArgument, bool UseCalls) { + Value *SizeArgument, bool UseCalls, + uint32_t Exp) { IRBuilder<> IRB(InsertBefore); Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy); size_t AccessSizeIndex = TypeSizeToSizeIndex(TypeSize); if (UseCalls) { - IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][AccessSizeIndex], - AddrLong); + if (Exp == 0) + IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][0][AccessSizeIndex], + AddrLong); + else + IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][1][AccessSizeIndex], + {AddrLong, ConstantInt::get(IRB.getInt32Ty(), Exp)}); return; } - Type *ShadowTy = IntegerType::get( - *C, std::max(8U, TypeSize >> Mapping.Scale)); + Type *ShadowTy = + IntegerType::get(*C, std::max(8U, TypeSize >> Mapping.Scale)); Type *ShadowPtrTy = PointerType::get(ShadowTy, 0); Value *ShadowPtr = memToShadow(AddrLong, IRB); Value *CmpVal = Constant::getNullValue(ShadowTy); - Value *ShadowValue = IRB.CreateLoad( - IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy)); + Value *ShadowValue = + IRB.CreateLoad(IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy)); Value *Cmp = IRB.CreateICmpNE(ShadowValue, CmpVal); size_t Granularity = 1 << Mapping.Scale; @@ -983,10 +1035,9 @@ void AddressSanitizer::instrumentAddress(Instruction *OrigIns, if (ClAlwaysSlowPath || (TypeSize < 8 * Granularity)) { // We use branch weights for the slow path check, to indicate that the slow // path is rarely taken. This seems to be the case for SPEC benchmarks. - TerminatorInst *CheckTerm = - SplitBlockAndInsertIfThen(Cmp, InsertBefore, false, - MDBuilder(*C).createBranchWeights(1, 100000)); - assert(dyn_cast<BranchInst>(CheckTerm)->isUnconditional()); + TerminatorInst *CheckTerm = SplitBlockAndInsertIfThen( + Cmp, InsertBefore, false, MDBuilder(*C).createBranchWeights(1, 100000)); + assert(cast<BranchInst>(CheckTerm)->isUnconditional()); BasicBlock *NextBB = CheckTerm->getSuccessor(0); IRB.SetInsertPoint(CheckTerm); Value *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeSize); @@ -999,11 +1050,37 @@ void AddressSanitizer::instrumentAddress(Instruction *OrigIns, CrashTerm = SplitBlockAndInsertIfThen(Cmp, InsertBefore, true); } - Instruction *Crash = generateCrashCode( - CrashTerm, AddrLong, IsWrite, AccessSizeIndex, SizeArgument); + Instruction *Crash = generateCrashCode(CrashTerm, AddrLong, IsWrite, + AccessSizeIndex, SizeArgument, Exp); Crash->setDebugLoc(OrigIns->getDebugLoc()); } +// Instrument unusual size or unusual alignment. +// We can not do it with a single check, so we do 1-byte check for the first +// and the last bytes. We call __asan_report_*_n(addr, real_size) to be able +// to report the actual access size. +void AddressSanitizer::instrumentUnusualSizeOrAlignment( + Instruction *I, Value *Addr, uint32_t TypeSize, bool IsWrite, + Value *SizeArgument, bool UseCalls, uint32_t Exp) { + IRBuilder<> IRB(I); + Value *Size = ConstantInt::get(IntptrTy, TypeSize / 8); + Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy); + if (UseCalls) { + if (Exp == 0) + IRB.CreateCall(AsanMemoryAccessCallbackSized[IsWrite][0], + {AddrLong, Size}); + else + IRB.CreateCall(AsanMemoryAccessCallbackSized[IsWrite][1], + {AddrLong, Size, ConstantInt::get(IRB.getInt32Ty(), Exp)}); + } else { + Value *LastByte = IRB.CreateIntToPtr( + IRB.CreateAdd(AddrLong, ConstantInt::get(IntptrTy, TypeSize / 8 - 1)), + Addr->getType()); + instrumentAddress(I, I, Addr, 8, IsWrite, Size, false, Exp); + instrumentAddress(I, I, LastByte, 8, IsWrite, Size, false, Exp); + } +} + void AddressSanitizerModule::poisonOneInitializer(Function &GlobalInit, GlobalValue *ModuleName) { // Set up the arguments to our poison/unpoison functions. @@ -1025,12 +1102,11 @@ void AddressSanitizerModule::createInitializerPoisonCalls( ConstantArray *CA = cast<ConstantArray>(GV->getInitializer()); for (Use &OP : CA->operands()) { - if (isa<ConstantAggregateZero>(OP)) - continue; + if (isa<ConstantAggregateZero>(OP)) continue; ConstantStruct *CS = cast<ConstantStruct>(OP); // Must have a function or null ptr. - if (Function* F = dyn_cast<Function>(CS->getOperand(1))) { + if (Function *F = dyn_cast<Function>(CS->getOperand(1))) { if (F->getName() == kAsanModuleCtorName) continue; ConstantInt *Priority = dyn_cast<ConstantInt>(CS->getOperand(0)); // Don't instrument CTORs that will run before asan.module_ctor. @@ -1055,30 +1131,38 @@ bool AddressSanitizerModule::ShouldInstrumentGlobal(GlobalVariable *G) { G->getLinkage() != GlobalVariable::PrivateLinkage && G->getLinkage() != GlobalVariable::InternalLinkage) return false; - if (G->hasComdat()) - return false; + if (G->hasComdat()) return false; // Two problems with thread-locals: // - The address of the main thread's copy can't be computed at link-time. // - Need to poison all copies, not just the main thread's one. - if (G->isThreadLocal()) - return false; + if (G->isThreadLocal()) return false; // For now, just ignore this Global if the alignment is large. if (G->getAlignment() > MinRedzoneSizeForGlobal()) return false; if (G->hasSection()) { StringRef Section(G->getSection()); + // Globals from llvm.metadata aren't emitted, do not instrument them. + if (Section == "llvm.metadata") return false; + + // Callbacks put into the CRT initializer/terminator sections + // should not be instrumented. + // See https://code.google.com/p/address-sanitizer/issues/detail?id=305 + // and http://msdn.microsoft.com/en-US/en-en/library/bb918180(v=vs.120).aspx + if (Section.startswith(".CRT")) { + DEBUG(dbgs() << "Ignoring a global initializer callback: " << *G << "\n"); + return false; + } + if (TargetTriple.isOSBinFormatMachO()) { StringRef ParsedSegment, ParsedSection; unsigned TAA = 0, StubSize = 0; bool TAAParsed; - std::string ErrorCode = - MCSectionMachO::ParseSectionSpecifier(Section, ParsedSegment, - ParsedSection, TAA, TAAParsed, - StubSize); + std::string ErrorCode = MCSectionMachO::ParseSectionSpecifier( + Section, ParsedSegment, ParsedSection, TAA, TAAParsed, StubSize); if (!ErrorCode.empty()) { - report_fatal_error("Invalid section specifier '" + ParsedSection + - "': " + ErrorCode + "."); + assert(false && "Invalid section specifier."); + return false; } // Ignore the globals from the __OBJC section. The ObjC runtime assumes @@ -1108,18 +1192,6 @@ bool AddressSanitizerModule::ShouldInstrumentGlobal(GlobalVariable *G) { return false; } } - - // Callbacks put into the CRT initializer/terminator sections - // should not be instrumented. - // See https://code.google.com/p/address-sanitizer/issues/detail?id=305 - // and http://msdn.microsoft.com/en-US/en-en/library/bb918180(v=vs.120).aspx - if (Section.startswith(".CRT")) { - DEBUG(dbgs() << "Ignoring a global initializer callback: " << *G << "\n"); - return false; - } - - // Globals from llvm.metadata aren't emitted, do not instrument them. - if (Section == "llvm.metadata") return false; } return true; @@ -1128,20 +1200,19 @@ bool AddressSanitizerModule::ShouldInstrumentGlobal(GlobalVariable *G) { void AddressSanitizerModule::initializeCallbacks(Module &M) { IRBuilder<> IRB(*C); // Declare our poisoning and unpoisoning functions. - AsanPoisonGlobals = checkInterfaceFunction(M.getOrInsertFunction( + AsanPoisonGlobals = checkSanitizerInterfaceFunction(M.getOrInsertFunction( kAsanPoisonGlobalsName, IRB.getVoidTy(), IntptrTy, nullptr)); AsanPoisonGlobals->setLinkage(Function::ExternalLinkage); - AsanUnpoisonGlobals = checkInterfaceFunction(M.getOrInsertFunction( + AsanUnpoisonGlobals = checkSanitizerInterfaceFunction(M.getOrInsertFunction( kAsanUnpoisonGlobalsName, IRB.getVoidTy(), nullptr)); AsanUnpoisonGlobals->setLinkage(Function::ExternalLinkage); // Declare functions that register/unregister globals. - AsanRegisterGlobals = checkInterfaceFunction(M.getOrInsertFunction( - kAsanRegisterGlobalsName, IRB.getVoidTy(), - IntptrTy, IntptrTy, nullptr)); + AsanRegisterGlobals = checkSanitizerInterfaceFunction(M.getOrInsertFunction( + kAsanRegisterGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr)); AsanRegisterGlobals->setLinkage(Function::ExternalLinkage); - AsanUnregisterGlobals = checkInterfaceFunction(M.getOrInsertFunction( - kAsanUnregisterGlobalsName, - IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr)); + AsanUnregisterGlobals = checkSanitizerInterfaceFunction( + M.getOrInsertFunction(kAsanUnregisterGlobalsName, IRB.getVoidTy(), + IntptrTy, IntptrTy, nullptr)); AsanUnregisterGlobals->setLinkage(Function::ExternalLinkage); } @@ -1154,8 +1225,7 @@ bool AddressSanitizerModule::InstrumentGlobals(IRBuilder<> &IRB, Module &M) { SmallVector<GlobalVariable *, 16> GlobalsToChange; for (auto &G : M.globals()) { - if (ShouldInstrumentGlobal(&G)) - GlobalsToChange.push_back(&G); + if (ShouldInstrumentGlobal(&G)) GlobalsToChange.push_back(&G); } size_t n = GlobalsToChange.size(); @@ -1180,8 +1250,9 @@ bool AddressSanitizerModule::InstrumentGlobals(IRBuilder<> &IRB, Module &M) { // We shouldn't merge same module names, as this string serves as unique // module ID in runtime. GlobalVariable *ModuleName = createPrivateGlobalForString( - M, M.getModuleIdentifier(), /*AllowMerging*/false); + M, M.getModuleIdentifier(), /*AllowMerging*/ false); + auto &DL = M.getDataLayout(); for (size_t i = 0; i < n; i++) { static const uint64_t kMaxGlobalRedzone = 1 << 18; GlobalVariable *G = GlobalsToChange[i]; @@ -1195,32 +1266,30 @@ bool AddressSanitizerModule::InstrumentGlobals(IRBuilder<> &IRB, Module &M) { PointerType *PtrTy = cast<PointerType>(G->getType()); Type *Ty = PtrTy->getElementType(); - uint64_t SizeInBytes = DL->getTypeAllocSize(Ty); + uint64_t SizeInBytes = DL.getTypeAllocSize(Ty); uint64_t MinRZ = MinRedzoneSizeForGlobal(); // MinRZ <= RZ <= kMaxGlobalRedzone // and trying to make RZ to be ~ 1/4 of SizeInBytes. - uint64_t RZ = std::max(MinRZ, - std::min(kMaxGlobalRedzone, - (SizeInBytes / MinRZ / 4) * MinRZ)); + uint64_t RZ = std::max( + MinRZ, std::min(kMaxGlobalRedzone, (SizeInBytes / MinRZ / 4) * MinRZ)); uint64_t RightRedzoneSize = RZ; // Round up to MinRZ - if (SizeInBytes % MinRZ) - RightRedzoneSize += MinRZ - (SizeInBytes % MinRZ); + if (SizeInBytes % MinRZ) RightRedzoneSize += MinRZ - (SizeInBytes % MinRZ); assert(((RightRedzoneSize + SizeInBytes) % MinRZ) == 0); Type *RightRedZoneTy = ArrayType::get(IRB.getInt8Ty(), RightRedzoneSize); StructType *NewTy = StructType::get(Ty, RightRedZoneTy, nullptr); - Constant *NewInitializer = ConstantStruct::get( - NewTy, G->getInitializer(), - Constant::getNullValue(RightRedZoneTy), nullptr); + Constant *NewInitializer = + ConstantStruct::get(NewTy, G->getInitializer(), + Constant::getNullValue(RightRedZoneTy), nullptr); // Create a new global variable with enough space for a redzone. GlobalValue::LinkageTypes Linkage = G->getLinkage(); if (G->isConstant() && Linkage == GlobalValue::PrivateLinkage) Linkage = GlobalValue::InternalLinkage; - GlobalVariable *NewGlobal = new GlobalVariable( - M, NewTy, G->isConstant(), Linkage, - NewInitializer, "", G, G->getThreadLocalMode()); + GlobalVariable *NewGlobal = + new GlobalVariable(M, NewTy, G->isConstant(), Linkage, NewInitializer, + "", G, G->getThreadLocalMode()); NewGlobal->copyAttributesFrom(G); NewGlobal->setAlignment(MinRZ); @@ -1229,7 +1298,7 @@ bool AddressSanitizerModule::InstrumentGlobals(IRBuilder<> &IRB, Module &M) { Indices2[1] = IRB.getInt32(0); G->replaceAllUsesWith( - ConstantExpr::getGetElementPtr(NewGlobal, Indices2, true)); + ConstantExpr::getGetElementPtr(NewTy, NewGlobal, Indices2, true)); NewGlobal->takeName(G); G->eraseFromParent(); @@ -1249,8 +1318,7 @@ bool AddressSanitizerModule::InstrumentGlobals(IRBuilder<> &IRB, Module &M) { ConstantExpr::getPointerCast(ModuleName, IntptrTy), ConstantInt::get(IntptrTy, MD.IsDynInit), SourceLoc, nullptr); - if (ClInitializers && MD.IsDynInit) - HasDynamicallyInitializedGlobals = true; + if (ClInitializers && MD.IsDynInit) HasDynamicallyInitializedGlobals = true; DEBUG(dbgs() << "NEW GLOBAL: " << *NewGlobal << "\n"); } @@ -1263,20 +1331,20 @@ bool AddressSanitizerModule::InstrumentGlobals(IRBuilder<> &IRB, Module &M) { // Create calls for poisoning before initializers run and unpoisoning after. if (HasDynamicallyInitializedGlobals) createInitializerPoisonCalls(M, ModuleName); - IRB.CreateCall2(AsanRegisterGlobals, - IRB.CreatePointerCast(AllGlobals, IntptrTy), - ConstantInt::get(IntptrTy, n)); + IRB.CreateCall(AsanRegisterGlobals, + {IRB.CreatePointerCast(AllGlobals, IntptrTy), + ConstantInt::get(IntptrTy, n)}); // We also need to unregister globals at the end, e.g. when a shared library // gets closed. - Function *AsanDtorFunction = Function::Create( - FunctionType::get(Type::getVoidTy(*C), false), - GlobalValue::InternalLinkage, kAsanModuleDtorName, &M); + Function *AsanDtorFunction = + Function::Create(FunctionType::get(Type::getVoidTy(*C), false), + GlobalValue::InternalLinkage, kAsanModuleDtorName, &M); BasicBlock *AsanDtorBB = BasicBlock::Create(*C, "", AsanDtorFunction); IRBuilder<> IRB_Dtor(ReturnInst::Create(*C, AsanDtorBB)); - IRB_Dtor.CreateCall2(AsanUnregisterGlobals, - IRB.CreatePointerCast(AllGlobals, IntptrTy), - ConstantInt::get(IntptrTy, n)); + IRB_Dtor.CreateCall(AsanUnregisterGlobals, + {IRB.CreatePointerCast(AllGlobals, IntptrTy), + ConstantInt::get(IntptrTy, n)}); appendToGlobalDtors(M, AsanDtorFunction, kAsanCtorAndDtorPriority); DEBUG(dbgs() << M); @@ -1284,25 +1352,22 @@ bool AddressSanitizerModule::InstrumentGlobals(IRBuilder<> &IRB, Module &M) { } bool AddressSanitizerModule::runOnModule(Module &M) { - DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>(); - if (!DLP) - return false; - DL = &DLP->getDataLayout(); C = &(M.getContext()); - int LongSize = DL->getPointerSizeInBits(); + int LongSize = M.getDataLayout().getPointerSizeInBits(); IntptrTy = Type::getIntNTy(*C, LongSize); TargetTriple = Triple(M.getTargetTriple()); - Mapping = getShadowMapping(TargetTriple, LongSize); + Mapping = getShadowMapping(TargetTriple, LongSize, CompileKernel); initializeCallbacks(M); bool Changed = false; - Function *CtorFunc = M.getFunction(kAsanModuleCtorName); - assert(CtorFunc); - IRBuilder<> IRB(CtorFunc->getEntryBlock().getTerminator()); - - if (ClGlobals) + // TODO(glider): temporarily disabled globals instrumentation for KASan. + if (ClGlobals && !CompileKernel) { + Function *CtorFunc = M.getFunction(kAsanModuleCtorName); + assert(CtorFunc); + IRBuilder<> IRB(CtorFunc->getEntryBlock().getTerminator()); Changed |= InstrumentGlobals(IRB, M); + } return Changed; } @@ -1310,50 +1375,57 @@ bool AddressSanitizerModule::runOnModule(Module &M) { void AddressSanitizer::initializeCallbacks(Module &M) { IRBuilder<> IRB(*C); // Create __asan_report* callbacks. - for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) { - for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes; - AccessSizeIndex++) { - // IsWrite and TypeSize are encoded in the function name. - std::string Suffix = - (AccessIsWrite ? "store" : "load") + itostr(1 << AccessSizeIndex); - AsanErrorCallback[AccessIsWrite][AccessSizeIndex] = - checkInterfaceFunction( - M.getOrInsertFunction(kAsanReportErrorTemplate + Suffix, - IRB.getVoidTy(), IntptrTy, nullptr)); - AsanMemoryAccessCallback[AccessIsWrite][AccessSizeIndex] = - checkInterfaceFunction( - M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + Suffix, - IRB.getVoidTy(), IntptrTy, nullptr)); + // IsWrite, TypeSize and Exp are encoded in the function name. + for (int Exp = 0; Exp < 2; Exp++) { + for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) { + const std::string TypeStr = AccessIsWrite ? "store" : "load"; + const std::string ExpStr = Exp ? "exp_" : ""; + const std::string SuffixStr = CompileKernel ? "N" : "_n"; + const std::string EndingStr = CompileKernel ? "_noabort" : ""; + const Type *ExpType = Exp ? Type::getInt32Ty(*C) : nullptr; + // TODO(glider): for KASan builds add _noabort to error reporting + // functions and make them actually noabort (remove the UnreachableInst). + AsanErrorCallbackSized[AccessIsWrite][Exp] = + checkSanitizerInterfaceFunction(M.getOrInsertFunction( + kAsanReportErrorTemplate + ExpStr + TypeStr + SuffixStr, + IRB.getVoidTy(), IntptrTy, IntptrTy, ExpType, nullptr)); + AsanMemoryAccessCallbackSized[AccessIsWrite][Exp] = + checkSanitizerInterfaceFunction(M.getOrInsertFunction( + ClMemoryAccessCallbackPrefix + ExpStr + TypeStr + "N" + EndingStr, + IRB.getVoidTy(), IntptrTy, IntptrTy, ExpType, nullptr)); + for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes; + AccessSizeIndex++) { + const std::string Suffix = TypeStr + itostr(1 << AccessSizeIndex); + AsanErrorCallback[AccessIsWrite][Exp][AccessSizeIndex] = + checkSanitizerInterfaceFunction(M.getOrInsertFunction( + kAsanReportErrorTemplate + ExpStr + Suffix, + IRB.getVoidTy(), IntptrTy, ExpType, nullptr)); + AsanMemoryAccessCallback[AccessIsWrite][Exp][AccessSizeIndex] = + checkSanitizerInterfaceFunction(M.getOrInsertFunction( + ClMemoryAccessCallbackPrefix + ExpStr + Suffix + EndingStr, + IRB.getVoidTy(), IntptrTy, ExpType, nullptr)); + } } } - AsanErrorCallbackSized[0] = checkInterfaceFunction(M.getOrInsertFunction( - kAsanReportLoadN, IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr)); - AsanErrorCallbackSized[1] = checkInterfaceFunction(M.getOrInsertFunction( - kAsanReportStoreN, IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr)); - - AsanMemoryAccessCallbackSized[0] = checkInterfaceFunction( - M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + "loadN", - IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr)); - AsanMemoryAccessCallbackSized[1] = checkInterfaceFunction( - M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + "storeN", - IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr)); - - AsanMemmove = checkInterfaceFunction(M.getOrInsertFunction( - ClMemoryAccessCallbackPrefix + "memmove", IRB.getInt8PtrTy(), + + const std::string MemIntrinCallbackPrefix = + CompileKernel ? std::string("") : ClMemoryAccessCallbackPrefix; + AsanMemmove = checkSanitizerInterfaceFunction(M.getOrInsertFunction( + MemIntrinCallbackPrefix + "memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy, nullptr)); - AsanMemcpy = checkInterfaceFunction(M.getOrInsertFunction( - ClMemoryAccessCallbackPrefix + "memcpy", IRB.getInt8PtrTy(), + AsanMemcpy = checkSanitizerInterfaceFunction(M.getOrInsertFunction( + MemIntrinCallbackPrefix + "memcpy", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy, nullptr)); - AsanMemset = checkInterfaceFunction(M.getOrInsertFunction( - ClMemoryAccessCallbackPrefix + "memset", IRB.getInt8PtrTy(), + AsanMemset = checkSanitizerInterfaceFunction(M.getOrInsertFunction( + MemIntrinCallbackPrefix + "memset", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt32Ty(), IntptrTy, nullptr)); - AsanHandleNoReturnFunc = checkInterfaceFunction( + AsanHandleNoReturnFunc = checkSanitizerInterfaceFunction( M.getOrInsertFunction(kAsanHandleNoReturnName, IRB.getVoidTy(), nullptr)); - AsanPtrCmpFunction = checkInterfaceFunction(M.getOrInsertFunction( + AsanPtrCmpFunction = checkSanitizerInterfaceFunction(M.getOrInsertFunction( kAsanPtrCmp, IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr)); - AsanPtrSubFunction = checkInterfaceFunction(M.getOrInsertFunction( + AsanPtrSubFunction = checkSanitizerInterfaceFunction(M.getOrInsertFunction( kAsanPtrSub, IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr)); // We insert an empty inline asm after __asan_report* to avoid callback merge. EmptyAsm = InlineAsm::get(FunctionType::get(IRB.getVoidTy(), false), @@ -1364,32 +1436,22 @@ void AddressSanitizer::initializeCallbacks(Module &M) { // virtual bool AddressSanitizer::doInitialization(Module &M) { // Initialize the private fields. No one has accessed them before. - DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>(); - if (!DLP) - report_fatal_error("data layout missing"); - DL = &DLP->getDataLayout(); GlobalsMD.init(M); C = &(M.getContext()); - LongSize = DL->getPointerSizeInBits(); + LongSize = M.getDataLayout().getPointerSizeInBits(); IntptrTy = Type::getIntNTy(*C, LongSize); TargetTriple = Triple(M.getTargetTriple()); - AsanCtorFunction = Function::Create( - FunctionType::get(Type::getVoidTy(*C), false), - GlobalValue::InternalLinkage, kAsanModuleCtorName, &M); - BasicBlock *AsanCtorBB = BasicBlock::Create(*C, "", AsanCtorFunction); - // call __asan_init in the module ctor. - IRBuilder<> IRB(ReturnInst::Create(*C, AsanCtorBB)); - AsanInitFunction = checkInterfaceFunction( - M.getOrInsertFunction(kAsanInitName, IRB.getVoidTy(), nullptr)); - AsanInitFunction->setLinkage(Function::ExternalLinkage); - IRB.CreateCall(AsanInitFunction); - - Mapping = getShadowMapping(TargetTriple, LongSize); - - appendToGlobalCtors(M, AsanCtorFunction, kAsanCtorAndDtorPriority); + if (!CompileKernel) { + std::tie(AsanCtorFunction, AsanInitFunction) = + createSanitizerCtorAndInitFunctions(M, kAsanModuleCtorName, kAsanInitName, + /*InitArgTypes=*/{}, + /*InitArgs=*/{}); + appendToGlobalCtors(M, AsanCtorFunction, kAsanCtorAndDtorPriority); + } + Mapping = getShadowMapping(TargetTriple, LongSize, CompileKernel); return true; } @@ -1403,7 +1465,7 @@ bool AddressSanitizer::maybeInsertAsanInitAtFunctionEntry(Function &F) { // instrumented functions. if (F.getName().find(" load]") != std::string::npos) { IRBuilder<> IRB(F.begin()->begin()); - IRB.CreateCall(AsanInitFunction); + IRB.CreateCall(AsanInitFunction, {}); return true; } return false; @@ -1420,22 +1482,21 @@ bool AddressSanitizer::runOnFunction(Function &F) { // If needed, insert __asan_init before checking for SanitizeAddress attr. maybeInsertAsanInitAtFunctionEntry(F); - if (!F.hasFnAttribute(Attribute::SanitizeAddress)) - return false; + if (!F.hasFnAttribute(Attribute::SanitizeAddress)) return false; - if (!ClDebugFunc.empty() && ClDebugFunc != F.getName()) - return false; + if (!ClDebugFunc.empty() && ClDebugFunc != F.getName()) return false; // We want to instrument every address only once per basic block (unless there // are calls between uses). - SmallSet<Value*, 16> TempsToInstrument; - SmallVector<Instruction*, 16> ToInstrument; - SmallVector<Instruction*, 8> NoReturnCalls; - SmallVector<BasicBlock*, 16> AllBlocks; - SmallVector<Instruction*, 16> PointerComparisonsOrSubtracts; + SmallSet<Value *, 16> TempsToInstrument; + SmallVector<Instruction *, 16> ToInstrument; + SmallVector<Instruction *, 8> NoReturnCalls; + SmallVector<BasicBlock *, 16> AllBlocks; + SmallVector<Instruction *, 16> PointerComparisonsOrSubtracts; int NumAllocas = 0; bool IsWrite; unsigned Alignment; + uint64_t TypeSize; // Fill the set of memory operations to instrument. for (auto &BB : F) { @@ -1444,8 +1505,8 @@ bool AddressSanitizer::runOnFunction(Function &F) { int NumInsnsPerBB = 0; for (auto &Inst : BB) { if (LooksLikeCodeInBug11395(&Inst)) return false; - if (Value *Addr = - isInterestingMemoryAccess(&Inst, &IsWrite, &Alignment)) { + if (Value *Addr = isInterestingMemoryAccess(&Inst, &IsWrite, &TypeSize, + &Alignment)) { if (ClOpt && ClOptSameTemp) { if (!TempsToInstrument.insert(Addr).second) continue; // We've seen this temp in the current BB. @@ -1457,36 +1518,39 @@ bool AddressSanitizer::runOnFunction(Function &F) { } else if (isa<MemIntrinsic>(Inst)) { // ok, take it. } else { - if (isa<AllocaInst>(Inst)) - NumAllocas++; + if (isa<AllocaInst>(Inst)) NumAllocas++; CallSite CS(&Inst); if (CS) { // A call inside BB. TempsToInstrument.clear(); - if (CS.doesNotReturn()) - NoReturnCalls.push_back(CS.getInstruction()); + if (CS.doesNotReturn()) NoReturnCalls.push_back(CS.getInstruction()); } continue; } ToInstrument.push_back(&Inst); NumInsnsPerBB++; - if (NumInsnsPerBB >= ClMaxInsnsToInstrumentPerBB) - break; + if (NumInsnsPerBB >= ClMaxInsnsToInstrumentPerBB) break; } } - bool UseCalls = false; - if (ClInstrumentationWithCallsThreshold >= 0 && - ToInstrument.size() > (unsigned)ClInstrumentationWithCallsThreshold) - UseCalls = true; + bool UseCalls = + CompileKernel || + (ClInstrumentationWithCallsThreshold >= 0 && + ToInstrument.size() > (unsigned)ClInstrumentationWithCallsThreshold); + const TargetLibraryInfo *TLI = + &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); + const DataLayout &DL = F.getParent()->getDataLayout(); + ObjectSizeOffsetVisitor ObjSizeVis(DL, TLI, F.getContext(), + /*RoundToAlign=*/true); // Instrument. int NumInstrumented = 0; for (auto Inst : ToInstrument) { if (ClDebugMin < 0 || ClDebugMax < 0 || (NumInstrumented >= ClDebugMin && NumInstrumented <= ClDebugMax)) { - if (isInterestingMemoryAccess(Inst, &IsWrite, &Alignment)) - instrumentMop(Inst, UseCalls); + if (isInterestingMemoryAccess(Inst, &IsWrite, &TypeSize, &Alignment)) + instrumentMop(ObjSizeVis, Inst, UseCalls, + F.getParent()->getDataLayout()); else instrumentMemIntrinsic(cast<MemIntrinsic>(Inst)); } @@ -1500,7 +1564,7 @@ bool AddressSanitizer::runOnFunction(Function &F) { // See e.g. http://code.google.com/p/address-sanitizer/issues/detail?id=37 for (auto CI : NoReturnCalls) { IRBuilder<> IRB(CI); - IRB.CreateCall(AsanHandleNoReturnFunc); + IRB.CreateCall(AsanHandleNoReturnFunc, {}); } for (auto Inst : PointerComparisonsOrSubtracts) { @@ -1531,24 +1595,29 @@ void FunctionStackPoisoner::initializeCallbacks(Module &M) { IRBuilder<> IRB(*C); for (int i = 0; i <= kMaxAsanStackMallocSizeClass; i++) { std::string Suffix = itostr(i); - AsanStackMallocFunc[i] = checkInterfaceFunction(M.getOrInsertFunction( - kAsanStackMallocNameTemplate + Suffix, IntptrTy, IntptrTy, nullptr)); - AsanStackFreeFunc[i] = checkInterfaceFunction( + AsanStackMallocFunc[i] = checkSanitizerInterfaceFunction( + M.getOrInsertFunction(kAsanStackMallocNameTemplate + Suffix, IntptrTy, + IntptrTy, nullptr)); + AsanStackFreeFunc[i] = checkSanitizerInterfaceFunction( M.getOrInsertFunction(kAsanStackFreeNameTemplate + Suffix, IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr)); } - AsanPoisonStackMemoryFunc = checkInterfaceFunction( + AsanPoisonStackMemoryFunc = checkSanitizerInterfaceFunction( M.getOrInsertFunction(kAsanPoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr)); - AsanUnpoisonStackMemoryFunc = checkInterfaceFunction( + AsanUnpoisonStackMemoryFunc = checkSanitizerInterfaceFunction( M.getOrInsertFunction(kAsanUnpoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr)); + AsanAllocaPoisonFunc = checkSanitizerInterfaceFunction(M.getOrInsertFunction( + kAsanAllocaPoison, IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr)); + AsanAllocasUnpoisonFunc = + checkSanitizerInterfaceFunction(M.getOrInsertFunction( + kAsanAllocasUnpoison, IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr)); } -void -FunctionStackPoisoner::poisonRedZones(ArrayRef<uint8_t> ShadowBytes, - IRBuilder<> &IRB, Value *ShadowBase, - bool DoPoison) { +void FunctionStackPoisoner::poisonRedZones(ArrayRef<uint8_t> ShadowBytes, + IRBuilder<> &IRB, Value *ShadowBase, + bool DoPoison) { size_t n = ShadowBytes.size(); size_t i = 0; // We need to (un)poison n bytes of stack shadow. Poison as many as we can @@ -1559,7 +1628,7 @@ FunctionStackPoisoner::poisonRedZones(ArrayRef<uint8_t> ShadowBytes, for (; i + LargeStoreSizeInBytes - 1 < n; i += LargeStoreSizeInBytes) { uint64_t Val = 0; for (size_t j = 0; j < LargeStoreSizeInBytes; j++) { - if (ASan.DL->isLittleEndian()) + if (F.getParent()->getDataLayout().isLittleEndian()) Val |= (uint64_t)ShadowBytes[i + j] << (8 * j); else Val = (Val << 8) | ShadowBytes[i + j]; @@ -1578,9 +1647,8 @@ FunctionStackPoisoner::poisonRedZones(ArrayRef<uint8_t> ShadowBytes, static int StackMallocSizeClass(uint64_t LocalStackSize) { assert(LocalStackSize <= kMaxStackMallocSize); uint64_t MaxSize = kMinStackMallocSize; - for (int i = 0; ; i++, MaxSize *= 2) - if (LocalStackSize <= MaxSize) - return i; + for (int i = 0;; i++, MaxSize *= 2) + if (LocalStackSize <= MaxSize) return i; llvm_unreachable("impossible LocalStackSize"); } @@ -1592,18 +1660,21 @@ static int StackMallocSizeClass(uint64_t LocalStackSize) { void FunctionStackPoisoner::SetShadowToStackAfterReturnInlined( IRBuilder<> &IRB, Value *ShadowBase, int Size) { assert(!(Size % 8)); - assert(kAsanStackAfterReturnMagic == 0xf5); + + // kAsanStackAfterReturnMagic is 0xf5. + const uint64_t kAsanStackAfterReturnMagic64 = 0xf5f5f5f5f5f5f5f5ULL; + for (int i = 0; i < Size; i += 8) { Value *p = IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i)); - IRB.CreateStore(ConstantInt::get(IRB.getInt64Ty(), 0xf5f5f5f5f5f5f5f5ULL), - IRB.CreateIntToPtr(p, IRB.getInt64Ty()->getPointerTo())); + IRB.CreateStore( + ConstantInt::get(IRB.getInt64Ty(), kAsanStackAfterReturnMagic64), + IRB.CreateIntToPtr(p, IRB.getInt64Ty()->getPointerTo())); } } static DebugLoc getFunctionEntryDebugLocation(Function &F) { for (const auto &Inst : F.getEntryBlock()) - if (!isa<AllocaInst>(Inst)) - return Inst.getDebugLoc(); + if (!isa<AllocaInst>(Inst)) return Inst.getDebugLoc(); return DebugLoc(); } @@ -1637,13 +1708,24 @@ Value *FunctionStackPoisoner::createAllocaForLayout( return IRB.CreatePointerCast(Alloca, IntptrTy); } +void FunctionStackPoisoner::createDynamicAllocasInitStorage() { + BasicBlock &FirstBB = *F.begin(); + IRBuilder<> IRB(dyn_cast<Instruction>(FirstBB.begin())); + DynamicAllocaLayout = IRB.CreateAlloca(IntptrTy, nullptr); + IRB.CreateStore(Constant::getNullValue(IntptrTy), DynamicAllocaLayout); + DynamicAllocaLayout->setAlignment(32); +} + void FunctionStackPoisoner::poisonStack() { assert(AllocaVec.size() > 0 || DynamicAllocaVec.size() > 0); - if (ClInstrumentAllocas) + if (ClInstrumentAllocas && DynamicAllocaVec.size() > 0) { // Handle dynamic allocas. - for (auto &AllocaCall : DynamicAllocaVec) - handleDynamicAllocaCall(AllocaCall); + createDynamicAllocasInitStorage(); + for (auto &AI : DynamicAllocaVec) handleDynamicAllocaCall(AI); + + unpoisonDynamicAllocas(); + } if (AllocaVec.size() == 0) return; @@ -1657,9 +1739,9 @@ void FunctionStackPoisoner::poisonStack() { SmallVector<ASanStackVariableDescription, 16> SVD; SVD.reserve(AllocaVec.size()); for (AllocaInst *AI : AllocaVec) { - ASanStackVariableDescription D = { AI->getName().data(), - getAllocaSizeInBytes(AI), - AI->getAlignment(), AI, 0}; + ASanStackVariableDescription D = {AI->getName().data(), + ASan.getAllocaSizeInBytes(AI), + AI->getAlignment(), AI, 0}; SVD.push_back(D); } // Minimal header size (left redzone) is 4 pointers, @@ -1669,11 +1751,13 @@ void FunctionStackPoisoner::poisonStack() { ComputeASanStackFrameLayout(SVD, 1UL << Mapping.Scale, MinHeaderSize, &L); DEBUG(dbgs() << L.DescriptionString << " --- " << L.FrameSize << "\n"); uint64_t LocalStackSize = L.FrameSize; - bool DoStackMalloc = - ClUseAfterReturn && LocalStackSize <= kMaxStackMallocSize; - // Don't do dynamic alloca in presence of inline asm: too often it - // makes assumptions on which registers are available. + bool DoStackMalloc = ClUseAfterReturn && !ASan.CompileKernel && + LocalStackSize <= kMaxStackMallocSize; + // Don't do dynamic alloca in presence of inline asm: too often it makes + // assumptions on which registers are available. Don't do stack malloc in the + // presence of inline asm on 32-bit platforms for the same reason. bool DoDynamicAlloca = ClDynamicAllocaStack && !HasNonEmptyInlineAsm; + DoStackMalloc &= !HasNonEmptyInlineAsm || ASan.LongSize != 32; Value *StaticAlloca = DoDynamicAlloca ? nullptr : createAllocaForLayout(IRB, L, false); @@ -1739,7 +1823,7 @@ void FunctionStackPoisoner::poisonStack() { Value *NewAllocaPtr = IRB.CreateIntToPtr( IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, Desc.Offset)), AI->getType()); - replaceDbgDeclareForAlloca(AI, NewAllocaPtr, DIB); + replaceDbgDeclareForAlloca(AI, NewAllocaPtr, DIB, /*Deref=*/true); AI->replaceAllUsesWith(NewAllocaPtr); } @@ -1750,19 +1834,19 @@ void FunctionStackPoisoner::poisonStack() { BasePlus0); // Write the frame description constant to redzone[1]. Value *BasePlus1 = IRB.CreateIntToPtr( - IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, ASan.LongSize/8)), - IntptrPtrTy); + IRB.CreateAdd(LocalStackBase, + ConstantInt::get(IntptrTy, ASan.LongSize / 8)), + IntptrPtrTy); GlobalVariable *StackDescriptionGlobal = createPrivateGlobalForString(*F.getParent(), L.DescriptionString, - /*AllowMerging*/true); - Value *Description = IRB.CreatePointerCast(StackDescriptionGlobal, - IntptrTy); + /*AllowMerging*/ true); + Value *Description = IRB.CreatePointerCast(StackDescriptionGlobal, IntptrTy); IRB.CreateStore(Description, BasePlus1); // Write the PC to redzone[2]. Value *BasePlus2 = IRB.CreateIntToPtr( - IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, - 2 * ASan.LongSize/8)), - IntptrPtrTy); + IRB.CreateAdd(LocalStackBase, + ConstantInt::get(IntptrTy, 2 * ASan.LongSize / 8)), + IntptrPtrTy); IRB.CreateStore(IRB.CreatePointerCast(&F, IntptrTy), BasePlus2); // Poison the stack redzones at the entry. @@ -1807,8 +1891,9 @@ void FunctionStackPoisoner::poisonStack() { IRBPoison.CreateIntToPtr(SavedFlagPtr, IRBPoison.getInt8PtrTy())); } else { // For larger frames call __asan_stack_free_*. - IRBPoison.CreateCall2(AsanStackFreeFunc[StackMallocIdx], FakeStack, - ConstantInt::get(IntptrTy, LocalStackSize)); + IRBPoison.CreateCall( + AsanStackFreeFunc[StackMallocIdx], + {FakeStack, ConstantInt::get(IntptrTy, LocalStackSize)}); } IRBuilder<> IRBElse(ElseTerm); @@ -1822,14 +1907,8 @@ void FunctionStackPoisoner::poisonStack() { } } - if (ClInstrumentAllocas) - // Unpoison dynamic allocas. - for (auto &AllocaCall : DynamicAllocaVec) - unpoisonDynamicAlloca(AllocaCall); - // We are done. Remove the old unused alloca instructions. - for (auto AI : AllocaVec) - AI->eraseFromParent(); + for (auto AI : AllocaVec) AI->eraseFromParent(); } void FunctionStackPoisoner::poisonAlloca(Value *V, uint64_t Size, @@ -1837,9 +1916,9 @@ void FunctionStackPoisoner::poisonAlloca(Value *V, uint64_t Size, // For now just insert the call to ASan runtime. Value *AddrArg = IRB.CreatePointerCast(V, IntptrTy); Value *SizeArg = ConstantInt::get(IntptrTy, Size); - IRB.CreateCall2(DoPoison ? AsanPoisonStackMemoryFunc - : AsanUnpoisonStackMemoryFunc, - AddrArg, SizeArg); + IRB.CreateCall( + DoPoison ? AsanPoisonStackMemoryFunc : AsanUnpoisonStackMemoryFunc, + {AddrArg, SizeArg}); } // Handling llvm.lifetime intrinsics for a given %alloca: @@ -1854,12 +1933,11 @@ void FunctionStackPoisoner::poisonAlloca(Value *V, uint64_t Size, AllocaInst *FunctionStackPoisoner::findAllocaForValue(Value *V) { if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) // We're intested only in allocas we can handle. - return isInterestingAlloca(*AI) ? AI : nullptr; + return ASan.isInterestingAlloca(*AI) ? AI : nullptr; // See if we've already calculated (or started to calculate) alloca for a // given value. AllocaForValueMapTy::iterator I = AllocaForValue.find(V); - if (I != AllocaForValue.end()) - return I->second; + if (I != AllocaForValue.end()) return I->second; // Store 0 while we're calculating alloca for value V to avoid // infinite recursion if the value references itself. AllocaForValue[V] = nullptr; @@ -1867,8 +1945,7 @@ AllocaInst *FunctionStackPoisoner::findAllocaForValue(Value *V) { if (CastInst *CI = dyn_cast<CastInst>(V)) Res = findAllocaForValue(CI->getOperand(0)); else if (PHINode *PN = dyn_cast<PHINode>(V)) { - for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { - Value *IncValue = PN->getIncomingValue(i); + for (Value *IncValue : PN->incoming_values()) { // Allow self-referencing phi-nodes. if (IncValue == PN) continue; AllocaInst *IncValueAI = findAllocaForValue(IncValue); @@ -1878,82 +1955,29 @@ AllocaInst *FunctionStackPoisoner::findAllocaForValue(Value *V) { Res = IncValueAI; } } - if (Res) - AllocaForValue[V] = Res; + if (Res) AllocaForValue[V] = Res; return Res; } -// Compute PartialRzMagic for dynamic alloca call. PartialRzMagic is -// constructed from two separate 32-bit numbers: PartialRzMagic = Val1 | Val2. -// (1) Val1 is resposible for forming base value for PartialRzMagic, containing -// only 00 for fully addressable and 0xcb for fully poisoned bytes for each -// 8-byte chunk of user memory respectively. -// (2) Val2 forms the value for marking first poisoned byte in shadow memory -// with appropriate value (0x01 - 0x07 or 0xcb if Padding % 8 == 0). - -// Shift = Padding & ~7; // the number of bits we need to shift to access first -// chunk in shadow memory, containing nonzero bytes. -// Example: -// Padding = 21 Padding = 16 -// Shadow: |00|00|05|cb| Shadow: |00|00|cb|cb| -// ^ ^ -// | | -// Shift = 21 & ~7 = 16 Shift = 16 & ~7 = 16 -// -// Val1 = 0xcbcbcbcb << Shift; -// PartialBits = Padding ? Padding & 7 : 0xcb; -// Val2 = PartialBits << Shift; -// Result = Val1 | Val2; -Value *FunctionStackPoisoner::computePartialRzMagic(Value *PartialSize, - IRBuilder<> &IRB) { - PartialSize = IRB.CreateIntCast(PartialSize, IRB.getInt32Ty(), false); - Value *Shift = IRB.CreateAnd(PartialSize, IRB.getInt32(~7)); - unsigned Val1Int = kAsanAllocaPartialVal1; - unsigned Val2Int = kAsanAllocaPartialVal2; - if (!ASan.DL->isLittleEndian()) { - Val1Int = sys::getSwappedBytes(Val1Int); - Val2Int = sys::getSwappedBytes(Val2Int); - } - Value *Val1 = shiftAllocaMagic(IRB.getInt32(Val1Int), IRB, Shift); - Value *PartialBits = IRB.CreateAnd(PartialSize, IRB.getInt32(7)); - // For BigEndian get 0x000000YZ -> 0xYZ000000. - if (ASan.DL->isBigEndian()) - PartialBits = IRB.CreateShl(PartialBits, IRB.getInt32(24)); - Value *Val2 = IRB.getInt32(Val2Int); - Value *Cond = - IRB.CreateICmpNE(PartialBits, Constant::getNullValue(IRB.getInt32Ty())); - Val2 = IRB.CreateSelect(Cond, shiftAllocaMagic(PartialBits, IRB, Shift), - shiftAllocaMagic(Val2, IRB, Shift)); - return IRB.CreateOr(Val1, Val2); -} - -void FunctionStackPoisoner::handleDynamicAllocaCall( - DynamicAllocaCall &AllocaCall) { - AllocaInst *AI = AllocaCall.AI; - if (!doesDominateAllExits(AI)) { - // We do not yet handle complex allocas - AllocaCall.Poison = false; - return; - } - +void FunctionStackPoisoner::handleDynamicAllocaCall(AllocaInst *AI) { IRBuilder<> IRB(AI); - PointerType *Int32PtrTy = PointerType::getUnqual(IRB.getInt32Ty()); const unsigned Align = std::max(kAllocaRzSize, AI->getAlignment()); const uint64_t AllocaRedzoneMask = kAllocaRzSize - 1; Value *Zero = Constant::getNullValue(IntptrTy); Value *AllocaRzSize = ConstantInt::get(IntptrTy, kAllocaRzSize); Value *AllocaRzMask = ConstantInt::get(IntptrTy, AllocaRedzoneMask); - Value *NotAllocaRzMask = ConstantInt::get(IntptrTy, ~AllocaRedzoneMask); // Since we need to extend alloca with additional memory to locate // redzones, and OldSize is number of allocated blocks with // ElementSize size, get allocated memory size in bytes by // OldSize * ElementSize. - unsigned ElementSize = ASan.DL->getTypeAllocSize(AI->getAllocatedType()); - Value *OldSize = IRB.CreateMul(AI->getArraySize(), - ConstantInt::get(IntptrTy, ElementSize)); + const unsigned ElementSize = + F.getParent()->getDataLayout().getTypeAllocSize(AI->getAllocatedType()); + Value *OldSize = + IRB.CreateMul(IRB.CreateIntCast(AI->getArraySize(), IntptrTy, false), + ConstantInt::get(IntptrTy, ElementSize)); // PartialSize = OldSize % 32 Value *PartialSize = IRB.CreateAnd(OldSize, AllocaRzMask); @@ -1981,41 +2005,35 @@ void FunctionStackPoisoner::handleDynamicAllocaCall( Value *NewAddress = IRB.CreateAdd(IRB.CreatePtrToInt(NewAlloca, IntptrTy), ConstantInt::get(IntptrTy, Align)); - Value *NewAddressPtr = IRB.CreateIntToPtr(NewAddress, AI->getType()); - - // LeftRzAddress = NewAddress - kAllocaRzSize - Value *LeftRzAddress = IRB.CreateSub(NewAddress, AllocaRzSize); - - // Poisoning left redzone. - AllocaCall.LeftRzAddr = ASan.memToShadow(LeftRzAddress, IRB); - IRB.CreateStore(ConstantInt::get(IRB.getInt32Ty(), kAsanAllocaLeftMagic), - IRB.CreateIntToPtr(AllocaCall.LeftRzAddr, Int32PtrTy)); - - // PartialRzAligned = PartialRzAddr & ~AllocaRzMask - Value *PartialRzAddr = IRB.CreateAdd(NewAddress, OldSize); - Value *PartialRzAligned = IRB.CreateAnd(PartialRzAddr, NotAllocaRzMask); + // Insert __asan_alloca_poison call for new created alloca. + IRB.CreateCall(AsanAllocaPoisonFunc, {NewAddress, OldSize}); - // Poisoning partial redzone. - Value *PartialRzMagic = computePartialRzMagic(PartialSize, IRB); - Value *PartialRzShadowAddr = ASan.memToShadow(PartialRzAligned, IRB); - IRB.CreateStore(PartialRzMagic, - IRB.CreateIntToPtr(PartialRzShadowAddr, Int32PtrTy)); + // Store the last alloca's address to DynamicAllocaLayout. We'll need this + // for unpoisoning stuff. + IRB.CreateStore(IRB.CreatePtrToInt(NewAlloca, IntptrTy), DynamicAllocaLayout); - // RightRzAddress - // = (PartialRzAddr + AllocaRzMask) & ~AllocaRzMask - Value *RightRzAddress = IRB.CreateAnd( - IRB.CreateAdd(PartialRzAddr, AllocaRzMask), NotAllocaRzMask); - - // Poisoning right redzone. - AllocaCall.RightRzAddr = ASan.memToShadow(RightRzAddress, IRB); - IRB.CreateStore(ConstantInt::get(IRB.getInt32Ty(), kAsanAllocaRightMagic), - IRB.CreateIntToPtr(AllocaCall.RightRzAddr, Int32PtrTy)); + Value *NewAddressPtr = IRB.CreateIntToPtr(NewAddress, AI->getType()); - // Replace all uses of AddessReturnedByAlloca with NewAddress. + // Replace all uses of AddessReturnedByAlloca with NewAddressPtr. AI->replaceAllUsesWith(NewAddressPtr); - // We are done. Erase old alloca and store left, partial and right redzones - // shadow addresses for future unpoisoning. + // We are done. Erase old alloca from parent. AI->eraseFromParent(); - NumInstrumentedDynamicAllocas++; +} + +// isSafeAccess returns true if Addr is always inbounds with respect to its +// base object. For example, it is a field access or an array access with +// constant inbounds index. +bool AddressSanitizer::isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis, + Value *Addr, uint64_t TypeSize) const { + SizeOffsetType SizeOffset = ObjSizeVis.compute(Addr); + if (!ObjSizeVis.bothKnown(SizeOffset)) return false; + uint64_t Size = SizeOffset.first.getZExtValue(); + int64_t Offset = SizeOffset.second.getSExtValue(); + // Three checks are required to ensure safety: + // . Offset >= 0 (since the offset is given from the base ptr) + // . Size >= Offset (unsigned) + // . Size - Offset >= NeededSize (unsigned) + return Offset >= 0 && Size >= uint64_t(Offset) && + Size - uint64_t(Offset) >= TypeSize / 8; } |