summaryrefslogtreecommitdiffstats
path: root/lib/Transforms
diff options
context:
space:
mode:
authordim <dim@FreeBSD.org>2012-08-19 10:31:50 +0000
committerdim <dim@FreeBSD.org>2012-08-19 10:31:50 +0000
commit4dc93743c9d40c29c0a3bec2aae328cac0d289e8 (patch)
treee7da40d2f6ef824f7371860826845870e6e1dcd5 /lib/Transforms
parent721c201bd55ffb73cb2ba8d39e0570fa38c44e15 (diff)
downloadFreeBSD-src-4dc93743c9d40c29c0a3bec2aae328cac0d289e8.zip
FreeBSD-src-4dc93743c9d40c29c0a3bec2aae328cac0d289e8.tar.gz
Vendor import of llvm trunk r162107:
http://llvm.org/svn/llvm-project/llvm/trunk@162107
Diffstat (limited to 'lib/Transforms')
-rw-r--r--lib/Transforms/InstCombine/InstCombineCalls.cpp10
-rw-r--r--lib/Transforms/Instrumentation/AddressSanitizer.cpp64
-rw-r--r--lib/Transforms/Scalar/CodeGenPrepare.cpp12
-rw-r--r--lib/Transforms/Scalar/GVN.cpp95
4 files changed, 87 insertions, 94 deletions
diff --git a/lib/Transforms/InstCombine/InstCombineCalls.cpp b/lib/Transforms/InstCombine/InstCombineCalls.cpp
index d34fab1..cbe1ca4 100644
--- a/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -51,8 +51,8 @@ Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
// if the size is something we can handle with a single primitive load/store.
// A single load+store correctly handles overlapping memory in the memmove
// case.
- unsigned Size = MemOpLength->getZExtValue();
- if (Size == 0) return MI; // Delete this mem transfer.
+ uint64_t Size = MemOpLength->getLimitedValue();
+ assert(Size && "0-sized memory transfering should be removed already.");
if (Size > 8 || (Size&(Size-1)))
return 0; // If not 1/2/4/8 bytes, exit.
@@ -133,11 +133,9 @@ Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue());
if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8))
return 0;
- uint64_t Len = LenC->getZExtValue();
+ uint64_t Len = LenC->getLimitedValue();
Alignment = MI->getAlignment();
-
- // If the length is zero, this is a no-op
- if (Len == 0) return MI; // memset(d,c,0,a) -> noop
+ assert(Len && "0-sized memory setting should be removed already.");
// memset(s,c,n) -> store s, c (for n=1,2,4,8)
if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) {
diff --git a/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index bf35eac..17b83ce 100644
--- a/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -86,6 +86,9 @@ static cl::opt<bool> ClInstrumentWrites("asan-instrument-writes",
static cl::opt<bool> ClInstrumentAtomics("asan-instrument-atomics",
cl::desc("instrument atomic instructions (rmw, cmpxchg)"),
cl::Hidden, cl::init(true));
+static cl::opt<bool> ClAlwaysSlowPath("asan-always-slow-path",
+ cl::desc("use instrumentation with slow path for all accesses"),
+ cl::Hidden, cl::init(false));
// This flag limits the number of instructions to be instrumented
// in any given BB. Normally, this should be set to unlimited (INT_MAX),
// but due to http://llvm.org/bugs/show_bug.cgi?id=12652 we temporary
@@ -159,7 +162,7 @@ struct AddressSanitizer : public ModulePass {
Value *Addr, uint32_t TypeSize, bool IsWrite);
Value *createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
Value *ShadowValue, uint32_t TypeSize);
- Instruction *generateCrashCode(BasicBlock *BB, Value *Addr, Value *PC,
+ Instruction *generateCrashCode(Instruction *InsertBefore, Value *Addr,
bool IsWrite, size_t AccessSizeIndex);
bool instrumentMemIntrinsic(AsanFunctionContext &AFC, MemIntrinsic *MI);
void instrumentMemIntrinsicParam(AsanFunctionContext &AFC,
@@ -251,24 +254,24 @@ static GlobalVariable *createPrivateGlobalForString(Module &M, StringRef Str) {
// ThenBlock
// Tail
//
-// If ThenBlock is zero, a new block is created and its terminator is returned.
-// Otherwize 0 is returned.
-static BranchInst *splitBlockAndInsertIfThen(Value *Cmp,
- BasicBlock *ThenBlock = 0) {
+// ThenBlock block is created and its terminator is returned.
+// If Unreachable, ThenBlock is terminated with UnreachableInst, otherwise
+// it is terminated with BranchInst to Tail.
+static TerminatorInst *splitBlockAndInsertIfThen(Value *Cmp, bool Unreachable) {
Instruction *SplitBefore = cast<Instruction>(Cmp)->getNextNode();
BasicBlock *Head = SplitBefore->getParent();
BasicBlock *Tail = Head->splitBasicBlock(SplitBefore);
TerminatorInst *HeadOldTerm = Head->getTerminator();
- BranchInst *CheckTerm = 0;
- if (!ThenBlock) {
- LLVMContext &C = Head->getParent()->getParent()->getContext();
- ThenBlock = BasicBlock::Create(C, "", Head->getParent(), Tail);
+ LLVMContext &C = Head->getParent()->getParent()->getContext();
+ BasicBlock *ThenBlock = BasicBlock::Create(C, "", Head->getParent(), Tail);
+ TerminatorInst *CheckTerm;
+ if (Unreachable)
+ CheckTerm = new UnreachableInst(C, ThenBlock);
+ else
CheckTerm = BranchInst::Create(Tail, ThenBlock);
- }
BranchInst *HeadNewTerm =
BranchInst::Create(/*ifTrue*/ThenBlock, /*ifFalse*/Tail, Cmp);
ReplaceInstWithInst(HeadOldTerm, HeadNewTerm);
-
return CheckTerm;
}
@@ -320,7 +323,7 @@ bool AddressSanitizer::instrumentMemIntrinsic(AsanFunctionContext &AFC,
Value *Cmp = IRB.CreateICmpNE(Length,
Constant::getNullValue(Length->getType()));
- InsertBefore = splitBlockAndInsertIfThen(Cmp);
+ InsertBefore = splitBlockAndInsertIfThen(Cmp, false);
}
instrumentMemIntrinsicParam(AFC, MI, Dst, Length, InsertBefore, true);
@@ -391,15 +394,11 @@ Function *AddressSanitizer::checkInterfaceFunction(Constant *FuncOrBitcast) {
}
Instruction *AddressSanitizer::generateCrashCode(
- BasicBlock *BB, Value *Addr, Value *PC,
+ Instruction *InsertBefore, Value *Addr,
bool IsWrite, size_t AccessSizeIndex) {
- IRBuilder<> IRB(BB->getFirstNonPHI());
- CallInst *Call;
- if (PC)
- Call = IRB.CreateCall2(AsanErrorCallback[IsWrite][AccessSizeIndex],
- Addr, PC);
- else
- Call = IRB.CreateCall(AsanErrorCallback[IsWrite][AccessSizeIndex], Addr);
+ IRBuilder<> IRB(InsertBefore);
+ CallInst *Call = IRB.CreateCall(AsanErrorCallback[IsWrite][AccessSizeIndex],
+ Addr);
// We don't do Call->setDoesNotReturn() because the BB already has
// UnreachableInst at the end.
// This EmptyAsm is required to avoid callback merge.
@@ -420,7 +419,7 @@ Value *AddressSanitizer::createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
LastAccessedByte, ConstantInt::get(IntptrTy, TypeSize / 8 - 1));
// (uint8_t) ((Addr & (Granularity-1)) + size - 1)
LastAccessedByte = IRB.CreateIntCast(
- LastAccessedByte, IRB.getInt8Ty(), false);
+ LastAccessedByte, ShadowValue->getType(), false);
// ((uint8_t) ((Addr & (Granularity-1)) + size - 1)) >= ShadowValue
return IRB.CreateICmpSGE(LastAccessedByte, ShadowValue);
}
@@ -440,26 +439,27 @@ void AddressSanitizer::instrumentAddress(AsanFunctionContext &AFC,
IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy));
Value *Cmp = IRB.CreateICmpNE(ShadowValue, CmpVal);
-
- BasicBlock *CrashBlock = BasicBlock::Create(*C, "crash_bb", &AFC.F);
- new UnreachableInst(*C, CrashBlock);
size_t AccessSizeIndex = TypeSizeToSizeIndex(TypeSize);
- Instruction *Crash =
- generateCrashCode(CrashBlock, AddrLong, 0, IsWrite, AccessSizeIndex);
- Crash->setDebugLoc(OrigIns->getDebugLoc());
-
size_t Granularity = 1 << MappingScale;
- if (TypeSize < 8 * Granularity) {
- BranchInst *CheckTerm = splitBlockAndInsertIfThen(Cmp);
- assert(CheckTerm->isUnconditional());
+ TerminatorInst *CrashTerm = 0;
+
+ if (ClAlwaysSlowPath || (TypeSize < 8 * Granularity)) {
+ TerminatorInst *CheckTerm = splitBlockAndInsertIfThen(Cmp, false);
+ assert(dyn_cast<BranchInst>(CheckTerm)->isUnconditional());
BasicBlock *NextBB = CheckTerm->getSuccessor(0);
IRB.SetInsertPoint(CheckTerm);
Value *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeSize);
+ BasicBlock *CrashBlock = BasicBlock::Create(*C, "", &AFC.F, NextBB);
+ CrashTerm = new UnreachableInst(*C, CrashBlock);
BranchInst *NewTerm = BranchInst::Create(CrashBlock, NextBB, Cmp2);
ReplaceInstWithInst(CheckTerm, NewTerm);
} else {
- splitBlockAndInsertIfThen(Cmp, CrashBlock);
+ CrashTerm = splitBlockAndInsertIfThen(Cmp, true);
}
+
+ Instruction *Crash =
+ generateCrashCode(CrashTerm, AddrLong, IsWrite, AccessSizeIndex);
+ Crash->setDebugLoc(OrigIns->getDebugLoc());
}
// This function replaces all global variables with new variables that have
diff --git a/lib/Transforms/Scalar/CodeGenPrepare.cpp b/lib/Transforms/Scalar/CodeGenPrepare.cpp
index bc87106..a3c426a 100644
--- a/lib/Transforms/Scalar/CodeGenPrepare.cpp
+++ b/lib/Transforms/Scalar/CodeGenPrepare.cpp
@@ -66,11 +66,6 @@ static cl::opt<bool> DisableBranchOpts(
"disable-cgp-branch-opts", cl::Hidden, cl::init(false),
cl::desc("Disable branch optimizations in CodeGenPrepare"));
-// FIXME: Remove this abomination once all of the tests pass without it!
-static cl::opt<bool> DisableDeleteDeadBlocks(
- "disable-cgp-delete-dead-blocks", cl::Hidden, cl::init(false),
- cl::desc("Disable deleting dead blocks in CodeGenPrepare"));
-
static cl::opt<bool> DisableSelectToBranch(
"disable-cgp-select2branch", cl::Hidden, cl::init(false),
cl::desc("Disable select to branch conversion."));
@@ -188,10 +183,9 @@ bool CodeGenPrepare::runOnFunction(Function &F) {
WorkList.insert(*II);
}
- if (!DisableDeleteDeadBlocks)
- for (SmallPtrSet<BasicBlock*, 8>::iterator
- I = WorkList.begin(), E = WorkList.end(); I != E; ++I)
- DeleteDeadBlock(*I);
+ for (SmallPtrSet<BasicBlock*, 8>::iterator
+ I = WorkList.begin(), E = WorkList.end(); I != E; ++I)
+ DeleteDeadBlock(*I);
// Merge pairs of basic blocks with unconditional branches, connected by
// a single edge.
diff --git a/lib/Transforms/Scalar/GVN.cpp b/lib/Transforms/Scalar/GVN.cpp
index 120175d..4822fd0 100644
--- a/lib/Transforms/Scalar/GVN.cpp
+++ b/lib/Transforms/Scalar/GVN.cpp
@@ -613,8 +613,8 @@ namespace {
void verifyRemoved(const Instruction *I) const;
bool splitCriticalEdges();
unsigned replaceAllDominatedUsesWith(Value *From, Value *To,
- const BasicBlock *Root);
- bool propagateEquality(Value *LHS, Value *RHS, const BasicBlock *Root);
+ const BasicBlockEdge &Root);
+ bool propagateEquality(Value *LHS, Value *RHS, const BasicBlockEdge &Root);
};
char GVN::ID = 0;
@@ -2004,22 +2004,13 @@ Value *GVN::findLeader(const BasicBlock *BB, uint32_t num) {
/// use is dominated by the given basic block. Returns the number of uses that
/// were replaced.
unsigned GVN::replaceAllDominatedUsesWith(Value *From, Value *To,
- const BasicBlock *Root) {
+ const BasicBlockEdge &Root) {
unsigned Count = 0;
for (Value::use_iterator UI = From->use_begin(), UE = From->use_end();
UI != UE; ) {
Use &U = (UI++).getUse();
- // If From occurs as a phi node operand then the use implicitly lives in the
- // corresponding incoming block. Otherwise it is the block containing the
- // user that must be dominated by Root.
- BasicBlock *UsingBlock;
- if (PHINode *PN = dyn_cast<PHINode>(U.getUser()))
- UsingBlock = PN->getIncomingBlock(U);
- else
- UsingBlock = cast<Instruction>(U.getUser())->getParent();
-
- if (DT->dominates(Root, UsingBlock)) {
+ if (DT->dominates(Root, U)) {
U.set(To);
++Count;
}
@@ -2027,13 +2018,34 @@ unsigned GVN::replaceAllDominatedUsesWith(Value *From, Value *To,
return Count;
}
+/// isOnlyReachableViaThisEdge - There is an edge from 'Src' to 'Dst'. Return
+/// true if every path from the entry block to 'Dst' passes via this edge. In
+/// particular 'Dst' must not be reachable via another edge from 'Src'.
+static bool isOnlyReachableViaThisEdge(const BasicBlockEdge &E,
+ DominatorTree *DT) {
+ // While in theory it is interesting to consider the case in which Dst has
+ // more than one predecessor, because Dst might be part of a loop which is
+ // only reachable from Src, in practice it is pointless since at the time
+ // GVN runs all such loops have preheaders, which means that Dst will have
+ // been changed to have only one predecessor, namely Src.
+ const BasicBlock *Pred = E.getEnd()->getSinglePredecessor();
+ const BasicBlock *Src = E.getStart();
+ assert((!Pred || Pred == Src) && "No edge between these basic blocks!");
+ (void)Src;
+ return Pred != 0;
+}
+
/// propagateEquality - The given values are known to be equal in every block
/// dominated by 'Root'. Exploit this, for example by replacing 'LHS' with
/// 'RHS' everywhere in the scope. Returns whether a change was made.
-bool GVN::propagateEquality(Value *LHS, Value *RHS, const BasicBlock *Root) {
+bool GVN::propagateEquality(Value *LHS, Value *RHS,
+ const BasicBlockEdge &Root) {
SmallVector<std::pair<Value*, Value*>, 4> Worklist;
Worklist.push_back(std::make_pair(LHS, RHS));
bool Changed = false;
+ // For speed, compute a conservative fast approximation to
+ // DT->dominates(Root, Root.getEnd());
+ bool RootDominatesEnd = isOnlyReachableViaThisEdge(Root, DT);
while (!Worklist.empty()) {
std::pair<Value*, Value*> Item = Worklist.pop_back_val();
@@ -2065,9 +2077,6 @@ bool GVN::propagateEquality(Value *LHS, Value *RHS, const BasicBlock *Root) {
LVN = RVN;
}
}
- assert((!isa<Instruction>(RHS) ||
- DT->properlyDominates(cast<Instruction>(RHS)->getParent(), Root)) &&
- "Instruction doesn't dominate scope!");
// If value numbering later sees that an instruction in the scope is equal
// to 'LHS' then ensure it will be turned into 'RHS'. In order to preserve
@@ -2076,8 +2085,10 @@ bool GVN::propagateEquality(Value *LHS, Value *RHS, const BasicBlock *Root) {
// if RHS is an instruction (if an instruction in the scope is morphed into
// LHS then it will be turned into RHS by the next GVN iteration anyway, so
// using the leader table is about compiling faster, not optimizing better).
- if (!isa<Instruction>(RHS))
- addToLeaderTable(LVN, RHS, Root);
+ // The leader table only tracks basic blocks, not edges. Only add to if we
+ // have the simple case where the edge dominates the end.
+ if (RootDominatesEnd && !isa<Instruction>(RHS))
+ addToLeaderTable(LVN, RHS, Root.getEnd());
// Replace all occurrences of 'LHS' with 'RHS' everywhere in the scope. As
// LHS always has at least one use that is not dominated by Root, this will
@@ -2136,7 +2147,7 @@ bool GVN::propagateEquality(Value *LHS, Value *RHS, const BasicBlock *Root) {
// If the number we were assigned was brand new then there is no point in
// looking for an instruction realizing it: there cannot be one!
if (Num < NextNum) {
- Value *NotCmp = findLeader(Root, Num);
+ Value *NotCmp = findLeader(Root.getEnd(), Num);
if (NotCmp && isa<Instruction>(NotCmp)) {
unsigned NumReplacements =
replaceAllDominatedUsesWith(NotCmp, NotVal, Root);
@@ -2146,7 +2157,10 @@ bool GVN::propagateEquality(Value *LHS, Value *RHS, const BasicBlock *Root) {
}
// Ensure that any instruction in scope that gets the "A < B" value number
// is replaced with false.
- addToLeaderTable(Num, NotVal, Root);
+ // The leader table only tracks basic blocks, not edges. Only add to if we
+ // have the simple case where the edge dominates the end.
+ if (RootDominatesEnd)
+ addToLeaderTable(Num, NotVal, Root.getEnd());
continue;
}
@@ -2155,22 +2169,6 @@ bool GVN::propagateEquality(Value *LHS, Value *RHS, const BasicBlock *Root) {
return Changed;
}
-/// isOnlyReachableViaThisEdge - There is an edge from 'Src' to 'Dst'. Return
-/// true if every path from the entry block to 'Dst' passes via this edge. In
-/// particular 'Dst' must not be reachable via another edge from 'Src'.
-static bool isOnlyReachableViaThisEdge(BasicBlock *Src, BasicBlock *Dst,
- DominatorTree *DT) {
- // While in theory it is interesting to consider the case in which Dst has
- // more than one predecessor, because Dst might be part of a loop which is
- // only reachable from Src, in practice it is pointless since at the time
- // GVN runs all such loops have preheaders, which means that Dst will have
- // been changed to have only one predecessor, namely Src.
- BasicBlock *Pred = Dst->getSinglePredecessor();
- assert((!Pred || Pred == Src) && "No edge between these basic blocks!");
- (void)Src;
- return Pred != 0;
-}
-
/// processInstruction - When calculating availability, handle an instruction
/// by inserting it into the appropriate sets
bool GVN::processInstruction(Instruction *I) {
@@ -2210,18 +2208,20 @@ bool GVN::processInstruction(Instruction *I) {
BasicBlock *TrueSucc = BI->getSuccessor(0);
BasicBlock *FalseSucc = BI->getSuccessor(1);
+ // Avoid multiple edges early.
+ if (TrueSucc == FalseSucc)
+ return false;
+
BasicBlock *Parent = BI->getParent();
bool Changed = false;
- if (isOnlyReachableViaThisEdge(Parent, TrueSucc, DT))
- Changed |= propagateEquality(BranchCond,
- ConstantInt::getTrue(TrueSucc->getContext()),
- TrueSucc);
+ Value *TrueVal = ConstantInt::getTrue(TrueSucc->getContext());
+ BasicBlockEdge TrueE(Parent, TrueSucc);
+ Changed |= propagateEquality(BranchCond, TrueVal, TrueE);
- if (isOnlyReachableViaThisEdge(Parent, FalseSucc, DT))
- Changed |= propagateEquality(BranchCond,
- ConstantInt::getFalse(FalseSucc->getContext()),
- FalseSucc);
+ Value *FalseVal = ConstantInt::getFalse(FalseSucc->getContext());
+ BasicBlockEdge FalseE(Parent, FalseSucc);
+ Changed |= propagateEquality(BranchCond, FalseVal, FalseE);
return Changed;
}
@@ -2234,8 +2234,9 @@ bool GVN::processInstruction(Instruction *I) {
for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end();
i != e; ++i) {
BasicBlock *Dst = i.getCaseSuccessor();
- if (isOnlyReachableViaThisEdge(Parent, Dst, DT))
- Changed |= propagateEquality(SwitchCond, i.getCaseValue(), Dst);
+ BasicBlockEdge E(Parent, Dst);
+ if (E.isSingleEdge())
+ Changed |= propagateEquality(SwitchCond, i.getCaseValue(), E);
}
return Changed;
}
OpenPOWER on IntegriCloud