summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
diff options
context:
space:
mode:
authordim <dim@FreeBSD.org>2014-03-21 17:53:59 +0000
committerdim <dim@FreeBSD.org>2014-03-21 17:53:59 +0000
commit9cedb8bb69b89b0f0c529937247a6a80cabdbaec (patch)
treec978f0e9ec1ab92dc8123783f30b08a7fd1e2a39 /contrib/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
parent03fdc2934eb61c44c049a02b02aa974cfdd8a0eb (diff)
downloadFreeBSD-src-9cedb8bb69b89b0f0c529937247a6a80cabdbaec.zip
FreeBSD-src-9cedb8bb69b89b0f0c529937247a6a80cabdbaec.tar.gz
MFC 261991:
Upgrade our copy of llvm/clang to 3.4 release. This version supports all of the features in the current working draft of the upcoming C++ standard, provisionally named C++1y. The code generator's performance is greatly increased, and the loop auto-vectorizer is now enabled at -Os and -O2 in addition to -O3. The PowerPC backend has made several major improvements to code generation quality and compile time, and the X86, SPARC, ARM32, Aarch64 and SystemZ backends have all seen major feature work. Release notes for llvm and clang can be found here: <http://llvm.org/releases/3.4/docs/ReleaseNotes.html> <http://llvm.org/releases/3.4/tools/clang/docs/ReleaseNotes.html> MFC 262121 (by emaste): Update lldb for clang/llvm 3.4 import This commit largely restores the lldb source to the upstream r196259 snapshot with the addition of threaded inferior support and a few bug fixes. Specific upstream lldb revisions restored include: SVN git 181387 779e6ac 181703 7bef4e2 182099 b31044e 182650 f2dcf35 182683 0d91b80 183862 15c1774 183929 99447a6 184177 0b2934b 184948 4dc3761 184954 007e7bc 186990 eebd175 Sponsored by: DARPA, AFRL MFC 262186 (by emaste): Fix mismerge in r262121 A break statement was lost in the merge. The error had no functional impact, but restore it to reduce the diff against upstream. MFC 262303: Pull in r197521 from upstream clang trunk (by rdivacky): Use the integrated assembler by default on FreeBSD/ppc and ppc64. Requested by: jhibbits MFC 262611: Pull in r196874 from upstream llvm trunk: Fix a crash that occurs when PWD is invalid. MCJIT needs to be able to run in hostile environments, even when PWD is invalid. There's no need to crash MCJIT in this case. The obvious fix is to simply leave MCContext's CompilationDir empty when PWD can't be determined. This way, MCJIT clients, and other clients that link with LLVM don't need a valid working directory. If we do want to guarantee valid CompilationDir, that should be done only for clients of getCompilationDir(). This is as simple as checking for an empty string. The only current use of getCompilationDir is EmitGenDwarfInfo, which won't conceivably run with an invalid working dir. However, in the purely hypothetically and untestable case that this happens, the AT_comp_dir will be omitted from the compilation_unit DIE. This should help fix assertions occurring with ports-mgmt/tinderbox, when it is using jails, and sometimes invalidates clang's current working directory. Reported by: decke MFC 262809: Pull in r203007 from upstream clang trunk: Don't produce an alias between destructors with different calling conventions. Fixes pr19007. (Please note that is an LLVM PR identifier, not a FreeBSD one.) This should fix Firefox and/or libxul crashes (due to problems with regparm/stdcall calling conventions) on i386. Reported by: multiple users on freebsd-current PR: bin/187103 MFC 263048: Repair recognition of "CC" as an alias for the C++ compiler, since it was silently broken by upstream for a Windows-specific use-case. Apparently some versions of CMake still rely on this archaic feature... Reported by: rakuco MFC 263049: Garbage collect the old way of adding the libstdc++ include directories in clang's InitHeaderSearch.cpp. This has been superseded by David Chisnall's commit in r255321. Moreover, if libc++ is used, the libstdc++ include directories should not be in the search path at all. These directories are now only used if you pass -stdlib=libstdc++.
Diffstat (limited to 'contrib/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp')
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp537
1 files changed, 451 insertions, 86 deletions
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/contrib/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index 4e75904..d547adc 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/contrib/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -66,6 +66,31 @@
/// avoids storing origin to memory when a fully initialized value is stored.
/// This way it avoids needless overwritting origin of the 4-byte region on
/// a short (i.e. 1 byte) clean store, and it is also good for performance.
+///
+/// Atomic handling.
+///
+/// Ideally, every atomic store of application value should update the
+/// corresponding shadow location in an atomic way. Unfortunately, atomic store
+/// of two disjoint locations can not be done without severe slowdown.
+///
+/// Therefore, we implement an approximation that may err on the safe side.
+/// In this implementation, every atomically accessed location in the program
+/// may only change from (partially) uninitialized to fully initialized, but
+/// not the other way around. We load the shadow _after_ the application load,
+/// and we store the shadow _before_ the app store. Also, we always store clean
+/// shadow (if the application store is atomic). This way, if the store-load
+/// pair constitutes a happens-before arc, shadow store and load are correctly
+/// ordered such that the load will get either the value that was stored, or
+/// some later value (which is always clean).
+///
+/// This does not work very well with Compare-And-Swap (CAS) and
+/// Read-Modify-Write (RMW) operations. To follow the above logic, CAS and RMW
+/// must store the new shadow before the app operation, and load the shadow
+/// after the app operation. Computers don't work this way. Current
+/// implementation ignores the load aspect of CAS/RMW, always returning a clean
+/// value. It implements the store part as a simple atomic store by storing a
+/// clean shadow.
+
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "msan"
@@ -74,6 +99,7 @@
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/Triple.h"
#include "llvm/ADT/ValueMap.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Function.h"
@@ -90,9 +116,9 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
-#include "llvm/Transforms/Utils/BlackList.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/ModuleUtils.h"
+#include "llvm/Transforms/Utils/SpecialCaseList.h"
using namespace llvm;
@@ -156,6 +182,18 @@ static cl::opt<std::string> ClBlacklistFile("msan-blacklist",
cl::desc("File containing the list of functions where MemorySanitizer "
"should not report bugs"), cl::Hidden);
+// Experimental. Wraps all indirect calls in the instrumented code with
+// a call to the given function. This is needed to assist the dynamic
+// helper tool (MSanDR) to regain control on transition between instrumented and
+// non-instrumented code.
+static cl::opt<std::string> ClWrapIndirectCalls("msan-wrap-indirect-calls",
+ cl::desc("Wrap indirect calls with a given function"),
+ cl::Hidden);
+
+static cl::opt<bool> ClWrapIndirectCallsFast("msan-wrap-indirect-calls-fast",
+ cl::desc("Do not wrap indirect calls with target in the same module"),
+ cl::Hidden, cl::init(true));
+
namespace {
/// \brief An instrumentation pass implementing detection of uninitialized
@@ -167,12 +205,12 @@ class MemorySanitizer : public FunctionPass {
public:
MemorySanitizer(bool TrackOrigins = false,
StringRef BlacklistFile = StringRef())
- : FunctionPass(ID),
- TrackOrigins(TrackOrigins || ClTrackOrigins),
- TD(0),
- WarningFn(0),
- BlacklistFile(BlacklistFile.empty() ? ClBlacklistFile
- : BlacklistFile) { }
+ : FunctionPass(ID),
+ TrackOrigins(TrackOrigins || ClTrackOrigins),
+ TD(0),
+ WarningFn(0),
+ BlacklistFile(BlacklistFile.empty() ? ClBlacklistFile : BlacklistFile),
+ WrapIndirectCalls(!ClWrapIndirectCalls.empty()) {}
const char *getPassName() const { return "MemorySanitizer"; }
bool runOnFunction(Function &F);
bool doInitialization(Module &M);
@@ -206,13 +244,16 @@ class MemorySanitizer : public FunctionPass {
/// function.
GlobalVariable *OriginTLS;
+ GlobalVariable *MsandrModuleStart;
+ GlobalVariable *MsandrModuleEnd;
+
/// \brief The run-time callback to print a warning.
Value *WarningFn;
/// \brief Run-time helper that copies origin info for a memory range.
Value *MsanCopyOriginFn;
/// \brief Run-time helper that generates a new origin value for a stack
/// allocation.
- Value *MsanSetAllocaOriginFn;
+ Value *MsanSetAllocaOrigin4Fn;
/// \brief Run-time helper that poisons stack on function entry.
Value *MsanPoisonStackFn;
/// \brief MSan runtime replacements for memmove, memcpy and memset.
@@ -228,13 +269,19 @@ class MemorySanitizer : public FunctionPass {
MDNode *ColdCallWeights;
/// \brief Branch weights for origin store.
MDNode *OriginStoreWeights;
- /// \bried Path to blacklist file.
+ /// \brief Path to blacklist file.
SmallString<64> BlacklistFile;
/// \brief The blacklist.
- OwningPtr<BlackList> BL;
+ OwningPtr<SpecialCaseList> BL;
/// \brief An empty volatile inline asm that prevents callback merge.
InlineAsm *EmptyAsm;
+ bool WrapIndirectCalls;
+ /// \brief Run-time wrapper for indirect calls.
+ Value *IndirectCallWrapperFn;
+ // Argument and return type of IndirectCallWrapperFn: void (*f)(void).
+ Type *AnyFunctionPtrTy;
+
friend struct MemorySanitizerVisitor;
friend struct VarArgAMD64Helper;
};
@@ -280,9 +327,9 @@ void MemorySanitizer::initializeCallbacks(Module &M) {
MsanCopyOriginFn = M.getOrInsertFunction(
"__msan_copy_origin", IRB.getVoidTy(), IRB.getInt8PtrTy(),
IRB.getInt8PtrTy(), IntptrTy, NULL);
- MsanSetAllocaOriginFn = M.getOrInsertFunction(
- "__msan_set_alloca_origin", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy,
- IRB.getInt8PtrTy(), NULL);
+ MsanSetAllocaOrigin4Fn = M.getOrInsertFunction(
+ "__msan_set_alloca_origin4", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy,
+ IRB.getInt8PtrTy(), IntptrTy, NULL);
MsanPoisonStackFn = M.getOrInsertFunction(
"__msan_poison_stack", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy, NULL);
MemmoveFn = M.getOrInsertFunction(
@@ -299,35 +346,53 @@ void MemorySanitizer::initializeCallbacks(Module &M) {
RetvalTLS = new GlobalVariable(
M, ArrayType::get(IRB.getInt64Ty(), 8), false,
GlobalVariable::ExternalLinkage, 0, "__msan_retval_tls", 0,
- GlobalVariable::GeneralDynamicTLSModel);
+ GlobalVariable::InitialExecTLSModel);
RetvalOriginTLS = new GlobalVariable(
M, OriginTy, false, GlobalVariable::ExternalLinkage, 0,
- "__msan_retval_origin_tls", 0, GlobalVariable::GeneralDynamicTLSModel);
+ "__msan_retval_origin_tls", 0, GlobalVariable::InitialExecTLSModel);
ParamTLS = new GlobalVariable(
M, ArrayType::get(IRB.getInt64Ty(), 1000), false,
GlobalVariable::ExternalLinkage, 0, "__msan_param_tls", 0,
- GlobalVariable::GeneralDynamicTLSModel);
+ GlobalVariable::InitialExecTLSModel);
ParamOriginTLS = new GlobalVariable(
M, ArrayType::get(OriginTy, 1000), false, GlobalVariable::ExternalLinkage,
- 0, "__msan_param_origin_tls", 0, GlobalVariable::GeneralDynamicTLSModel);
+ 0, "__msan_param_origin_tls", 0, GlobalVariable::InitialExecTLSModel);
VAArgTLS = new GlobalVariable(
M, ArrayType::get(IRB.getInt64Ty(), 1000), false,
GlobalVariable::ExternalLinkage, 0, "__msan_va_arg_tls", 0,
- GlobalVariable::GeneralDynamicTLSModel);
+ GlobalVariable::InitialExecTLSModel);
VAArgOverflowSizeTLS = new GlobalVariable(
M, IRB.getInt64Ty(), false, GlobalVariable::ExternalLinkage, 0,
"__msan_va_arg_overflow_size_tls", 0,
- GlobalVariable::GeneralDynamicTLSModel);
+ GlobalVariable::InitialExecTLSModel);
OriginTLS = new GlobalVariable(
M, IRB.getInt32Ty(), false, GlobalVariable::ExternalLinkage, 0,
- "__msan_origin_tls", 0, GlobalVariable::GeneralDynamicTLSModel);
+ "__msan_origin_tls", 0, GlobalVariable::InitialExecTLSModel);
// We insert an empty inline asm after __msan_report* to avoid callback merge.
EmptyAsm = InlineAsm::get(FunctionType::get(IRB.getVoidTy(), false),
StringRef(""), StringRef(""),
/*hasSideEffects=*/true);
+
+ if (WrapIndirectCalls) {
+ AnyFunctionPtrTy =
+ PointerType::getUnqual(FunctionType::get(IRB.getVoidTy(), false));
+ IndirectCallWrapperFn = M.getOrInsertFunction(
+ ClWrapIndirectCalls, AnyFunctionPtrTy, AnyFunctionPtrTy, NULL);
+ }
+
+ if (ClWrapIndirectCallsFast) {
+ MsandrModuleStart = new GlobalVariable(
+ M, IRB.getInt32Ty(), false, GlobalValue::ExternalLinkage,
+ 0, "__executable_start");
+ MsandrModuleStart->setVisibility(GlobalVariable::HiddenVisibility);
+ MsandrModuleEnd = new GlobalVariable(
+ M, IRB.getInt32Ty(), false, GlobalValue::ExternalLinkage,
+ 0, "_end");
+ MsandrModuleEnd->setVisibility(GlobalVariable::HiddenVisibility);
+ }
}
/// \brief Module-level initialization.
@@ -337,7 +402,7 @@ bool MemorySanitizer::doInitialization(Module &M) {
TD = getAnalysisIfAvailable<DataLayout>();
if (!TD)
return false;
- BL.reset(new BlackList(BlacklistFile));
+ BL.reset(SpecialCaseList::createOrDie(BlacklistFile));
C = &(M.getContext());
unsigned PtrSize = TD->getPointerSizeInBits(/* AddressSpace */0);
switch (PtrSize) {
@@ -365,11 +430,13 @@ bool MemorySanitizer::doInitialization(Module &M) {
appendToGlobalCtors(M, cast<Function>(M.getOrInsertFunction(
"__msan_init", IRB.getVoidTy(), NULL)), 0);
- new GlobalVariable(M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
- IRB.getInt32(TrackOrigins), "__msan_track_origins");
+ if (TrackOrigins)
+ new GlobalVariable(M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
+ IRB.getInt32(TrackOrigins), "__msan_track_origins");
- new GlobalVariable(M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
- IRB.getInt32(ClKeepGoing), "__msan_keep_going");
+ if (ClKeepGoing)
+ new GlobalVariable(M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
+ IRB.getInt32(ClKeepGoing), "__msan_keep_going");
return true;
}
@@ -420,27 +487,40 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
MemorySanitizer &MS;
SmallVector<PHINode *, 16> ShadowPHINodes, OriginPHINodes;
ValueMap<Value*, Value*> ShadowMap, OriginMap;
+ OwningPtr<VarArgHelper> VAHelper;
+
+ // The following flags disable parts of MSan instrumentation based on
+ // blacklist contents and command-line options.
bool InsertChecks;
bool LoadShadow;
- OwningPtr<VarArgHelper> VAHelper;
+ bool PoisonStack;
+ bool PoisonUndef;
+ bool CheckReturnValue;
struct ShadowOriginAndInsertPoint {
- Instruction *Shadow;
- Instruction *Origin;
+ Value *Shadow;
+ Value *Origin;
Instruction *OrigIns;
- ShadowOriginAndInsertPoint(Instruction *S, Instruction *O, Instruction *I)
+ ShadowOriginAndInsertPoint(Value *S, Value *O, Instruction *I)
: Shadow(S), Origin(O), OrigIns(I) { }
ShadowOriginAndInsertPoint() : Shadow(0), Origin(0), OrigIns(0) { }
};
SmallVector<ShadowOriginAndInsertPoint, 16> InstrumentationList;
SmallVector<Instruction*, 16> StoreList;
+ SmallVector<CallSite, 16> IndirectCallList;
MemorySanitizerVisitor(Function &F, MemorySanitizer &MS)
: F(F), MS(MS), VAHelper(CreateVarArgHelper(F, MS, *this)) {
- LoadShadow = InsertChecks =
- !MS.BL->isIn(F) &&
- F.getAttributes().hasAttribute(AttributeSet::FunctionIndex,
- Attribute::SanitizeMemory);
+ bool SanitizeFunction = !MS.BL->isIn(F) && F.getAttributes().hasAttribute(
+ AttributeSet::FunctionIndex,
+ Attribute::SanitizeMemory);
+ InsertChecks = SanitizeFunction;
+ LoadShadow = SanitizeFunction;
+ PoisonStack = SanitizeFunction && ClPoisonStack;
+ PoisonUndef = SanitizeFunction && ClPoisonUndef;
+ // FIXME: Consider using SpecialCaseList to specify a list of functions that
+ // must always return fully initialized values. For now, we hardcode "main".
+ CheckReturnValue = SanitizeFunction && (F.getName() == "main");
DEBUG(if (!InsertChecks)
dbgs() << "MemorySanitizer is not inserting checks into '"
@@ -454,7 +534,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
IRBuilder<> IRB(&I);
Value *Val = I.getValueOperand();
Value *Addr = I.getPointerOperand();
- Value *Shadow = getShadow(Val);
+ Value *Shadow = I.isAtomic() ? getCleanShadow(Val) : getShadow(Val);
Value *ShadowPtr = getShadowPtr(Addr, Shadow->getType(), IRB);
StoreInst *NewSI =
@@ -463,7 +543,10 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
(void)NewSI;
if (ClCheckAccessAddress)
- insertCheck(Addr, &I);
+ insertShadowCheck(Addr, &I);
+
+ if (I.isAtomic())
+ I.setOrdering(addReleaseOrdering(I.getOrdering()));
if (MS.TrackOrigins) {
unsigned Alignment = std::max(kMinOriginAlignment, I.getAlignment());
@@ -473,11 +556,10 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
} else {
Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB);
- Constant *Cst = dyn_cast_or_null<Constant>(ConvertedShadow);
// TODO(eugenis): handle non-zero constant shadow by inserting an
// unconditional check (can not simply fail compilation as this could
// be in the dead code).
- if (Cst)
+ if (isa<Constant>(ConvertedShadow))
continue;
Value *Cmp = IRB.CreateICmpNE(ConvertedShadow,
@@ -495,12 +577,15 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
void materializeChecks() {
for (size_t i = 0, n = InstrumentationList.size(); i < n; i++) {
- Instruction *Shadow = InstrumentationList[i].Shadow;
+ Value *Shadow = InstrumentationList[i].Shadow;
Instruction *OrigIns = InstrumentationList[i].OrigIns;
IRBuilder<> IRB(OrigIns);
DEBUG(dbgs() << " SHAD0 : " << *Shadow << "\n");
Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB);
DEBUG(dbgs() << " SHAD1 : " << *ConvertedShadow << "\n");
+ // See the comment in materializeStores().
+ if (isa<Constant>(ConvertedShadow))
+ continue;
Value *Cmp = IRB.CreateICmpNE(ConvertedShadow,
getCleanShadow(ConvertedShadow), "_mscmp");
Instruction *CheckTerm =
@@ -510,7 +595,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
IRB.SetInsertPoint(CheckTerm);
if (MS.TrackOrigins) {
- Instruction *Origin = InstrumentationList[i].Origin;
+ Value *Origin = InstrumentationList[i].Origin;
IRB.CreateStore(Origin ? (Value*)Origin : (Value*)IRB.getInt32(0),
MS.OriginTLS);
}
@@ -522,6 +607,48 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
DEBUG(dbgs() << "DONE:\n" << F);
}
+ void materializeIndirectCalls() {
+ for (size_t i = 0, n = IndirectCallList.size(); i < n; i++) {
+ CallSite CS = IndirectCallList[i];
+ Instruction *I = CS.getInstruction();
+ BasicBlock *B = I->getParent();
+ IRBuilder<> IRB(I);
+ Value *Fn0 = CS.getCalledValue();
+ Value *Fn = IRB.CreateBitCast(Fn0, MS.AnyFunctionPtrTy);
+
+ if (ClWrapIndirectCallsFast) {
+ // Check that call target is inside this module limits.
+ Value *Start =
+ IRB.CreateBitCast(MS.MsandrModuleStart, MS.AnyFunctionPtrTy);
+ Value *End = IRB.CreateBitCast(MS.MsandrModuleEnd, MS.AnyFunctionPtrTy);
+
+ Value *NotInThisModule = IRB.CreateOr(IRB.CreateICmpULT(Fn, Start),
+ IRB.CreateICmpUGE(Fn, End));
+
+ PHINode *NewFnPhi =
+ IRB.CreatePHI(Fn0->getType(), 2, "msandr.indirect_target");
+
+ Instruction *CheckTerm = SplitBlockAndInsertIfThen(
+ cast<Instruction>(NotInThisModule),
+ /* Unreachable */ false, MS.ColdCallWeights);
+
+ IRB.SetInsertPoint(CheckTerm);
+ // Slow path: call wrapper function to possibly transform the call
+ // target.
+ Value *NewFn = IRB.CreateBitCast(
+ IRB.CreateCall(MS.IndirectCallWrapperFn, Fn), Fn0->getType());
+
+ NewFnPhi->addIncoming(Fn0, B);
+ NewFnPhi->addIncoming(NewFn, dyn_cast<Instruction>(NewFn)->getParent());
+ CS.setCalledFunction(NewFnPhi);
+ } else {
+ Value *NewFn = IRB.CreateBitCast(
+ IRB.CreateCall(MS.IndirectCallWrapperFn, Fn), Fn0->getType());
+ CS.setCalledFunction(NewFn);
+ }
+ }
+ }
+
/// \brief Add MemorySanitizer instrumentation to a function.
bool runOnFunction() {
MS.initializeCallbacks(*F.getParent());
@@ -564,6 +691,9 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
// Insert shadow value checks.
materializeChecks();
+ // Wrap indirect calls.
+ materializeIndirectCalls();
+
return true;
}
@@ -741,7 +871,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
return Shadow;
}
if (UndefValue *U = dyn_cast<UndefValue>(V)) {
- Value *AllOnes = ClPoisonUndef ? getPoisonedShadow(V) : getCleanShadow(V);
+ Value *AllOnes = PoisonUndef ? getPoisonedShadow(V) : getCleanShadow(V);
DEBUG(dbgs() << "Undef: " << *U << " ==> " << *AllOnes << "\n");
(void)U;
return AllOnes;
@@ -768,14 +898,21 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
if (AI->hasByValAttr()) {
// ByVal pointer itself has clean shadow. We copy the actual
// argument shadow to the underlying memory.
+ // Figure out maximal valid memcpy alignment.
+ unsigned ArgAlign = AI->getParamAlignment();
+ if (ArgAlign == 0) {
+ Type *EltType = A->getType()->getPointerElementType();
+ ArgAlign = MS.TD->getABITypeAlignment(EltType);
+ }
+ unsigned CopyAlign = std::min(ArgAlign, kShadowTLSAlignment);
Value *Cpy = EntryIRB.CreateMemCpy(
- getShadowPtr(V, EntryIRB.getInt8Ty(), EntryIRB),
- Base, Size, AI->getParamAlignment());
+ getShadowPtr(V, EntryIRB.getInt8Ty(), EntryIRB), Base, Size,
+ CopyAlign);
DEBUG(dbgs() << " ByValCpy: " << *Cpy << "\n");
(void)Cpy;
*ShadowPtr = getCleanShadow(V);
} else {
- *ShadowPtr = EntryIRB.CreateLoad(Base);
+ *ShadowPtr = EntryIRB.CreateAlignedLoad(Base, kShadowTLSAlignment);
}
DEBUG(dbgs() << " ARG: " << *AI << " ==> " <<
**ShadowPtr << "\n");
@@ -784,7 +921,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
setOrigin(A, EntryIRB.CreateLoad(OriginPtr));
}
}
- ArgOffset += DataLayout::RoundUpAlignment(Size, 8);
+ ArgOffset += DataLayout::RoundUpAlignment(Size, kShadowTLSAlignment);
}
assert(*ShadowPtr && "Could not find shadow for an argument");
return *ShadowPtr;
@@ -820,20 +957,63 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
/// \brief Remember the place where a shadow check should be inserted.
///
/// This location will be later instrumented with a check that will print a
- /// UMR warning in runtime if the value is not fully defined.
- void insertCheck(Value *Val, Instruction *OrigIns) {
- assert(Val);
+ /// UMR warning in runtime if the shadow value is not 0.
+ void insertShadowCheck(Value *Shadow, Value *Origin, Instruction *OrigIns) {
+ assert(Shadow);
if (!InsertChecks) return;
- Instruction *Shadow = dyn_cast_or_null<Instruction>(getShadow(Val));
- if (!Shadow) return;
#ifndef NDEBUG
Type *ShadowTy = Shadow->getType();
assert((isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy)) &&
"Can only insert checks for integer and vector shadow types");
#endif
- Instruction *Origin = dyn_cast_or_null<Instruction>(getOrigin(Val));
InstrumentationList.push_back(
- ShadowOriginAndInsertPoint(Shadow, Origin, OrigIns));
+ ShadowOriginAndInsertPoint(Shadow, Origin, OrigIns));
+ }
+
+ /// \brief Remember the place where a shadow check should be inserted.
+ ///
+ /// This location will be later instrumented with a check that will print a
+ /// UMR warning in runtime if the value is not fully defined.
+ void insertShadowCheck(Value *Val, Instruction *OrigIns) {
+ assert(Val);
+ Instruction *Shadow = dyn_cast_or_null<Instruction>(getShadow(Val));
+ if (!Shadow) return;
+ Instruction *Origin = dyn_cast_or_null<Instruction>(getOrigin(Val));
+ insertShadowCheck(Shadow, Origin, OrigIns);
+ }
+
+ AtomicOrdering addReleaseOrdering(AtomicOrdering a) {
+ switch (a) {
+ case NotAtomic:
+ return NotAtomic;
+ case Unordered:
+ case Monotonic:
+ case Release:
+ return Release;
+ case Acquire:
+ case AcquireRelease:
+ return AcquireRelease;
+ case SequentiallyConsistent:
+ return SequentiallyConsistent;
+ }
+ llvm_unreachable("Unknown ordering");
+ }
+
+ AtomicOrdering addAcquireOrdering(AtomicOrdering a) {
+ switch (a) {
+ case NotAtomic:
+ return NotAtomic;
+ case Unordered:
+ case Monotonic:
+ case Acquire:
+ return Acquire;
+ case Release:
+ case AcquireRelease:
+ return AcquireRelease;
+ case SequentiallyConsistent:
+ return SequentiallyConsistent;
+ }
+ llvm_unreachable("Unknown ordering");
}
// ------------------- Visitors.
@@ -844,7 +1024,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
/// Optionally, checks that the load address is fully defined.
void visitLoadInst(LoadInst &I) {
assert(I.getType()->isSized() && "Load type must have size");
- IRBuilder<> IRB(&I);
+ IRBuilder<> IRB(I.getNextNode());
Type *ShadowTy = getShadowTy(&I);
Value *Addr = I.getPointerOperand();
if (LoadShadow) {
@@ -856,7 +1036,10 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
}
if (ClCheckAccessAddress)
- insertCheck(I.getPointerOperand(), &I);
+ insertShadowCheck(I.getPointerOperand(), &I);
+
+ if (I.isAtomic())
+ I.setOrdering(addAcquireOrdering(I.getOrdering()));
if (MS.TrackOrigins) {
if (LoadShadow) {
@@ -877,9 +1060,40 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
StoreList.push_back(&I);
}
+ void handleCASOrRMW(Instruction &I) {
+ assert(isa<AtomicRMWInst>(I) || isa<AtomicCmpXchgInst>(I));
+
+ IRBuilder<> IRB(&I);
+ Value *Addr = I.getOperand(0);
+ Value *ShadowPtr = getShadowPtr(Addr, I.getType(), IRB);
+
+ if (ClCheckAccessAddress)
+ insertShadowCheck(Addr, &I);
+
+ // Only test the conditional argument of cmpxchg instruction.
+ // The other argument can potentially be uninitialized, but we can not
+ // detect this situation reliably without possible false positives.
+ if (isa<AtomicCmpXchgInst>(I))
+ insertShadowCheck(I.getOperand(1), &I);
+
+ IRB.CreateStore(getCleanShadow(&I), ShadowPtr);
+
+ setShadow(&I, getCleanShadow(&I));
+ }
+
+ void visitAtomicRMWInst(AtomicRMWInst &I) {
+ handleCASOrRMW(I);
+ I.setOrdering(addReleaseOrdering(I.getOrdering()));
+ }
+
+ void visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) {
+ handleCASOrRMW(I);
+ I.setOrdering(addReleaseOrdering(I.getOrdering()));
+ }
+
// Vector manipulation.
void visitExtractElementInst(ExtractElementInst &I) {
- insertCheck(I.getOperand(1), &I);
+ insertShadowCheck(I.getOperand(1), &I);
IRBuilder<> IRB(&I);
setShadow(&I, IRB.CreateExtractElement(getShadow(&I, 0), I.getOperand(1),
"_msprop"));
@@ -887,7 +1101,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
}
void visitInsertElementInst(InsertElementInst &I) {
- insertCheck(I.getOperand(2), &I);
+ insertShadowCheck(I.getOperand(2), &I);
IRBuilder<> IRB(&I);
setShadow(&I, IRB.CreateInsertElement(getShadow(&I, 0), getShadow(&I, 1),
I.getOperand(2), "_msprop"));
@@ -895,7 +1109,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
}
void visitShuffleVectorInst(ShuffleVectorInst &I) {
- insertCheck(I.getOperand(2), &I);
+ insertShadowCheck(I.getOperand(2), &I);
IRBuilder<> IRB(&I);
setShadow(&I, IRB.CreateShuffleVector(getShadow(&I, 0), getShadow(&I, 1),
I.getOperand(2), "_msprop"));
@@ -1094,18 +1308,19 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
/// \brief Cast between two shadow types, extending or truncating as
/// necessary.
- Value *CreateShadowCast(IRBuilder<> &IRB, Value *V, Type *dstTy) {
+ Value *CreateShadowCast(IRBuilder<> &IRB, Value *V, Type *dstTy,
+ bool Signed = false) {
Type *srcTy = V->getType();
if (dstTy->isIntegerTy() && srcTy->isIntegerTy())
- return IRB.CreateIntCast(V, dstTy, false);
+ return IRB.CreateIntCast(V, dstTy, Signed);
if (dstTy->isVectorTy() && srcTy->isVectorTy() &&
dstTy->getVectorNumElements() == srcTy->getVectorNumElements())
- return IRB.CreateIntCast(V, dstTy, false);
+ return IRB.CreateIntCast(V, dstTy, Signed);
size_t srcSizeInBits = VectorOrPrimitiveTypeSizeInBits(srcTy);
size_t dstSizeInBits = VectorOrPrimitiveTypeSizeInBits(dstTy);
Value *V1 = IRB.CreateBitCast(V, Type::getIntNTy(*MS.C, srcSizeInBits));
Value *V2 =
- IRB.CreateIntCast(V1, Type::getIntNTy(*MS.C, dstSizeInBits), false);
+ IRB.CreateIntCast(V1, Type::getIntNTy(*MS.C, dstSizeInBits), Signed);
return IRB.CreateBitCast(V2, dstTy);
// TODO: handle struct types.
}
@@ -1130,7 +1345,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
void handleDiv(Instruction &I) {
IRBuilder<> IRB(&I);
// Strict on the second argument.
- insertCheck(I.getOperand(1), &I);
+ insertShadowCheck(I.getOperand(1), &I);
setShadow(&I, getShadow(&I, 0));
setOrigin(&I, getOrigin(&I, 0));
}
@@ -1413,7 +1628,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
IRB.CreateAlignedStore(Shadow, ShadowPtr, 1);
if (ClCheckAccessAddress)
- insertCheck(Addr, &I);
+ insertShadowCheck(Addr, &I);
// FIXME: use ClStoreCleanOrigin
// FIXME: factor out common code from materializeStores
@@ -1440,9 +1655,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
setShadow(&I, getCleanShadow(&I));
}
-
if (ClCheckAccessAddress)
- insertCheck(Addr, &I);
+ insertShadowCheck(Addr, &I);
if (MS.TrackOrigins) {
if (LoadShadow)
@@ -1539,11 +1753,119 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
setOrigin(&I, getOrigin(Op));
}
+ // \brief Instrument vector convert instrinsic.
+ //
+ // This function instruments intrinsics like cvtsi2ss:
+ // %Out = int_xxx_cvtyyy(%ConvertOp)
+ // or
+ // %Out = int_xxx_cvtyyy(%CopyOp, %ConvertOp)
+ // Intrinsic converts \p NumUsedElements elements of \p ConvertOp to the same
+ // number \p Out elements, and (if has 2 arguments) copies the rest of the
+ // elements from \p CopyOp.
+ // In most cases conversion involves floating-point value which may trigger a
+ // hardware exception when not fully initialized. For this reason we require
+ // \p ConvertOp[0:NumUsedElements] to be fully initialized and trap otherwise.
+ // We copy the shadow of \p CopyOp[NumUsedElements:] to \p
+ // Out[NumUsedElements:]. This means that intrinsics without \p CopyOp always
+ // return a fully initialized value.
+ void handleVectorConvertIntrinsic(IntrinsicInst &I, int NumUsedElements) {
+ IRBuilder<> IRB(&I);
+ Value *CopyOp, *ConvertOp;
+
+ switch (I.getNumArgOperands()) {
+ case 2:
+ CopyOp = I.getArgOperand(0);
+ ConvertOp = I.getArgOperand(1);
+ break;
+ case 1:
+ ConvertOp = I.getArgOperand(0);
+ CopyOp = NULL;
+ break;
+ default:
+ llvm_unreachable("Cvt intrinsic with unsupported number of arguments.");
+ }
+
+ // The first *NumUsedElements* elements of ConvertOp are converted to the
+ // same number of output elements. The rest of the output is copied from
+ // CopyOp, or (if not available) filled with zeroes.
+ // Combine shadow for elements of ConvertOp that are used in this operation,
+ // and insert a check.
+ // FIXME: consider propagating shadow of ConvertOp, at least in the case of
+ // int->any conversion.
+ Value *ConvertShadow = getShadow(ConvertOp);
+ Value *AggShadow = 0;
+ if (ConvertOp->getType()->isVectorTy()) {
+ AggShadow = IRB.CreateExtractElement(
+ ConvertShadow, ConstantInt::get(IRB.getInt32Ty(), 0));
+ for (int i = 1; i < NumUsedElements; ++i) {
+ Value *MoreShadow = IRB.CreateExtractElement(
+ ConvertShadow, ConstantInt::get(IRB.getInt32Ty(), i));
+ AggShadow = IRB.CreateOr(AggShadow, MoreShadow);
+ }
+ } else {
+ AggShadow = ConvertShadow;
+ }
+ assert(AggShadow->getType()->isIntegerTy());
+ insertShadowCheck(AggShadow, getOrigin(ConvertOp), &I);
+
+ // Build result shadow by zero-filling parts of CopyOp shadow that come from
+ // ConvertOp.
+ if (CopyOp) {
+ assert(CopyOp->getType() == I.getType());
+ assert(CopyOp->getType()->isVectorTy());
+ Value *ResultShadow = getShadow(CopyOp);
+ Type *EltTy = ResultShadow->getType()->getVectorElementType();
+ for (int i = 0; i < NumUsedElements; ++i) {
+ ResultShadow = IRB.CreateInsertElement(
+ ResultShadow, ConstantInt::getNullValue(EltTy),
+ ConstantInt::get(IRB.getInt32Ty(), i));
+ }
+ setShadow(&I, ResultShadow);
+ setOrigin(&I, getOrigin(CopyOp));
+ } else {
+ setShadow(&I, getCleanShadow(&I));
+ }
+ }
+
void visitIntrinsicInst(IntrinsicInst &I) {
switch (I.getIntrinsicID()) {
case llvm::Intrinsic::bswap:
handleBswap(I);
break;
+ case llvm::Intrinsic::x86_avx512_cvtsd2usi64:
+ case llvm::Intrinsic::x86_avx512_cvtsd2usi:
+ case llvm::Intrinsic::x86_avx512_cvtss2usi64:
+ case llvm::Intrinsic::x86_avx512_cvtss2usi:
+ case llvm::Intrinsic::x86_avx512_cvttss2usi64:
+ case llvm::Intrinsic::x86_avx512_cvttss2usi:
+ case llvm::Intrinsic::x86_avx512_cvttsd2usi64:
+ case llvm::Intrinsic::x86_avx512_cvttsd2usi:
+ case llvm::Intrinsic::x86_avx512_cvtusi2sd:
+ case llvm::Intrinsic::x86_avx512_cvtusi2ss:
+ case llvm::Intrinsic::x86_avx512_cvtusi642sd:
+ case llvm::Intrinsic::x86_avx512_cvtusi642ss:
+ case llvm::Intrinsic::x86_sse2_cvtsd2si64:
+ case llvm::Intrinsic::x86_sse2_cvtsd2si:
+ case llvm::Intrinsic::x86_sse2_cvtsd2ss:
+ case llvm::Intrinsic::x86_sse2_cvtsi2sd:
+ case llvm::Intrinsic::x86_sse2_cvtsi642sd:
+ case llvm::Intrinsic::x86_sse2_cvtss2sd:
+ case llvm::Intrinsic::x86_sse2_cvttsd2si64:
+ case llvm::Intrinsic::x86_sse2_cvttsd2si:
+ case llvm::Intrinsic::x86_sse_cvtsi2ss:
+ case llvm::Intrinsic::x86_sse_cvtsi642ss:
+ case llvm::Intrinsic::x86_sse_cvtss2si64:
+ case llvm::Intrinsic::x86_sse_cvtss2si:
+ case llvm::Intrinsic::x86_sse_cvttss2si64:
+ case llvm::Intrinsic::x86_sse_cvttss2si:
+ handleVectorConvertIntrinsic(I, 1);
+ break;
+ case llvm::Intrinsic::x86_sse2_cvtdq2pd:
+ case llvm::Intrinsic::x86_sse2_cvtps2pd:
+ case llvm::Intrinsic::x86_sse_cvtps2pi:
+ case llvm::Intrinsic::x86_sse_cvttps2pi:
+ handleVectorConvertIntrinsic(I, 2);
+ break;
default:
if (!handleUnknownIntrinsic(I))
visitInstruction(I);
@@ -1589,6 +1911,10 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
}
}
IRBuilder<> IRB(&I);
+
+ if (MS.WrapIndirectCalls && !CS.getCalledFunction())
+ IndirectCallList.push_back(CS);
+
unsigned ArgOffset = 0;
DEBUG(dbgs() << " CallSite: " << I << "\n");
for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end();
@@ -1632,7 +1958,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
DEBUG(dbgs() << " done with call args\n");
FunctionType *FT =
- cast<FunctionType>(CS.getCalledValue()->getType()-> getContainedType(0));
+ cast<FunctionType>(CS.getCalledValue()->getType()->getContainedType(0));
if (FT->isVarArg()) {
VAHelper->visitCallSite(CS, IRB);
}
@@ -1671,12 +1997,17 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
void visitReturnInst(ReturnInst &I) {
IRBuilder<> IRB(&I);
- if (Value *RetVal = I.getReturnValue()) {
- // Set the shadow for the RetVal.
+ Value *RetVal = I.getReturnValue();
+ if (!RetVal) return;
+ Value *ShadowPtr = getShadowPtrForRetval(RetVal, IRB);
+ if (CheckReturnValue) {
+ insertShadowCheck(RetVal, &I);
+ Value *Shadow = getCleanShadow(RetVal);
+ IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment);
+ } else {
Value *Shadow = getShadow(RetVal);
- Value *ShadowPtr = getShadowPtrForRetval(RetVal, IRB);
- DEBUG(dbgs() << "Return: " << *Shadow << "\n" << *ShadowPtr << "\n");
IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment);
+ // FIXME: make it conditional if ClStoreCleanOrigin==0
if (MS.TrackOrigins)
IRB.CreateStore(getOrigin(RetVal), getOriginPtrForRetval(IRB));
}
@@ -1694,20 +2025,19 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
void visitAllocaInst(AllocaInst &I) {
setShadow(&I, getCleanShadow(&I));
- if (!ClPoisonStack) return;
IRBuilder<> IRB(I.getNextNode());
uint64_t Size = MS.TD->getTypeAllocSize(I.getAllocatedType());
- if (ClPoisonStackWithCall) {
+ if (PoisonStack && ClPoisonStackWithCall) {
IRB.CreateCall2(MS.MsanPoisonStackFn,
IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()),
ConstantInt::get(MS.IntptrTy, Size));
} else {
Value *ShadowBase = getShadowPtr(&I, Type::getInt8PtrTy(*MS.C), IRB);
- IRB.CreateMemSet(ShadowBase, IRB.getInt8(ClPoisonStackPattern),
- Size, I.getAlignment());
+ Value *PoisonValue = IRB.getInt8(PoisonStack ? ClPoisonStackPattern : 0);
+ IRB.CreateMemSet(ShadowBase, PoisonValue, Size, I.getAlignment());
}
- if (MS.TrackOrigins) {
+ if (PoisonStack && MS.TrackOrigins) {
setOrigin(&I, getCleanOrigin());
SmallString<2048> StackDescriptionStorage;
raw_svector_ostream StackDescription(StackDescriptionStorage);
@@ -1720,18 +2050,34 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
Value *Descr =
createPrivateNonConstGlobalForString(*F.getParent(),
StackDescription.str());
- IRB.CreateCall3(MS.MsanSetAllocaOriginFn,
+
+ IRB.CreateCall4(MS.MsanSetAllocaOrigin4Fn,
IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()),
ConstantInt::get(MS.IntptrTy, Size),
- IRB.CreatePointerCast(Descr, IRB.getInt8PtrTy()));
+ IRB.CreatePointerCast(Descr, IRB.getInt8PtrTy()),
+ IRB.CreatePointerCast(&F, MS.IntptrTy));
}
}
void visitSelectInst(SelectInst& I) {
IRBuilder<> IRB(&I);
- setShadow(&I, IRB.CreateSelect(I.getCondition(),
- getShadow(I.getTrueValue()), getShadow(I.getFalseValue()),
- "_msprop"));
+ // a = select b, c, d
+ Value *S = IRB.CreateSelect(I.getCondition(), getShadow(I.getTrueValue()),
+ getShadow(I.getFalseValue()));
+ if (I.getType()->isAggregateType()) {
+ // To avoid "sign extending" i1 to an arbitrary aggregate type, we just do
+ // an extra "select". This results in much more compact IR.
+ // Sa = select Sb, poisoned, (select b, Sc, Sd)
+ S = IRB.CreateSelect(getShadow(I.getCondition()),
+ getPoisonedShadow(getShadowTy(I.getType())), S,
+ "_msprop_select_agg");
+ } else {
+ // Sa = (sext Sb) | (select b, Sc, Sd)
+ S = IRB.CreateOr(S, CreateShadowCast(IRB, getShadow(I.getCondition()),
+ S->getType(), true),
+ "_msprop_select");
+ }
+ setShadow(&I, S);
if (MS.TrackOrigins) {
// Origins are always i32, so any vector conditions must be flattened.
// FIXME: consider tracking vector origins for app vectors?
@@ -1766,7 +2112,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
Value *ResShadow = IRB.CreateExtractValue(AggShadow, I.getIndices());
DEBUG(dbgs() << " ResShadow: " << *ResShadow << "\n");
setShadow(&I, ResShadow);
- setOrigin(&I, getCleanOrigin());
+ setOriginForNaryOp(I);
}
void visitInsertValueInst(InsertValueInst &I) {
@@ -1779,7 +2125,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
Value *Res = IRB.CreateInsertValue(AggShadow, InsShadow, I.getIndices());
DEBUG(dbgs() << " Res: " << *Res << "\n");
setShadow(&I, Res);
- setOrigin(&I, getCleanOrigin());
+ setOriginForNaryOp(I);
}
void dumpInst(Instruction &I) {
@@ -1802,7 +2148,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
dumpInst(I);
DEBUG(dbgs() << "DEFAULT: " << I << "\n");
for (size_t i = 0, n = I.getNumOperands(); i < n; i++)
- insertCheck(I.getOperand(i), &I);
+ insertShadowCheck(I.getOperand(i), &I);
setShadow(&I, getCleanShadow(&I));
setOrigin(&I, getCleanOrigin());
}
@@ -1956,16 +2302,35 @@ struct VarArgAMD64Helper : public VarArgHelper {
Value *OverflowArgAreaPtr = IRB.CreateLoad(OverflowArgAreaPtrPtr);
Value *OverflowArgAreaShadowPtr =
MSV.getShadowPtr(OverflowArgAreaPtr, IRB.getInt8Ty(), IRB);
- Value *SrcPtr =
- getShadowPtrForVAArgument(VAArgTLSCopy, IRB, AMD64FpEndOffset);
+ Value *SrcPtr = IRB.CreateConstGEP1_32(VAArgTLSCopy, AMD64FpEndOffset);
IRB.CreateMemCpy(OverflowArgAreaShadowPtr, SrcPtr, VAArgOverflowSize, 16);
}
}
};
-VarArgHelper* CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
+/// \brief A no-op implementation of VarArgHelper.
+struct VarArgNoOpHelper : public VarArgHelper {
+ VarArgNoOpHelper(Function &F, MemorySanitizer &MS,
+ MemorySanitizerVisitor &MSV) {}
+
+ void visitCallSite(CallSite &CS, IRBuilder<> &IRB) {}
+
+ void visitVAStartInst(VAStartInst &I) {}
+
+ void visitVACopyInst(VACopyInst &I) {}
+
+ void finalizeInstrumentation() {}
+};
+
+VarArgHelper *CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
MemorySanitizerVisitor &Visitor) {
- return new VarArgAMD64Helper(Func, Msan, Visitor);
+ // VarArg handling is only implemented on AMD64. False positives are possible
+ // on other platforms.
+ llvm::Triple TargetTriple(Func.getParent()->getTargetTriple());
+ if (TargetTriple.getArch() == llvm::Triple::x86_64)
+ return new VarArgAMD64Helper(Func, Msan, Visitor);
+ else
+ return new VarArgNoOpHelper(Func, Msan, Visitor);
}
} // namespace
OpenPOWER on IntegriCloud