summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/lib/Transforms/Utils
diff options
context:
space:
mode:
authordim <dim@FreeBSD.org>2016-01-06 20:19:13 +0000
committerdim <dim@FreeBSD.org>2016-01-06 20:19:13 +0000
commite06c171d67ab436f270b15f7e364a8d8f77c01f2 (patch)
treeb7c03c042b220d85a294b0e2e89936b631d3e6ad /contrib/llvm/lib/Transforms/Utils
parentdb873d7452584205dd063528dc8addbf28aa396b (diff)
parentff2ba393a56d9d99dcb76ceada542233db28af9a (diff)
downloadFreeBSD-src-e06c171d67ab436f270b15f7e364a8d8f77c01f2.zip
FreeBSD-src-e06c171d67ab436f270b15f7e364a8d8f77c01f2.tar.gz
Update llvm to trunk r256945.
Diffstat (limited to 'contrib/llvm/lib/Transforms/Utils')
-rw-r--r--contrib/llvm/lib/Transforms/Utils/BypassSlowDivision.cpp100
-rw-r--r--contrib/llvm/lib/Transforms/Utils/Local.cpp47
-rw-r--r--contrib/llvm/lib/Transforms/Utils/SimplifyCFG.cpp66
-rw-r--r--contrib/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp78
-rw-r--r--contrib/llvm/lib/Transforms/Utils/ValueMapper.cpp10
5 files changed, 191 insertions, 110 deletions
diff --git a/contrib/llvm/lib/Transforms/Utils/BypassSlowDivision.cpp b/contrib/llvm/lib/Transforms/Utils/BypassSlowDivision.cpp
index 0914699..42287d3 100644
--- a/contrib/llvm/lib/Transforms/Utils/BypassSlowDivision.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/BypassSlowDivision.cpp
@@ -74,17 +74,13 @@ namespace llvm {
// insertFastDiv - Substitutes the div/rem instruction with code that checks the
// value of the operands and uses a shorter-faster div/rem instruction when
// possible and the longer-slower div/rem instruction otherwise.
-static bool insertFastDiv(Function &F,
- Function::iterator &I,
- BasicBlock::iterator &J,
- IntegerType *BypassType,
- bool UseDivOp,
- bool UseSignedOp,
+static bool insertFastDiv(Instruction *I, IntegerType *BypassType,
+ bool UseDivOp, bool UseSignedOp,
DivCacheTy &PerBBDivCache) {
+ Function *F = I->getParent()->getParent();
// Get instruction operands
- Instruction *Instr = &*J;
- Value *Dividend = Instr->getOperand(0);
- Value *Divisor = Instr->getOperand(1);
+ Value *Dividend = I->getOperand(0);
+ Value *Divisor = I->getOperand(1);
if (isa<ConstantInt>(Divisor) ||
(isa<ConstantInt>(Dividend) && isa<ConstantInt>(Divisor))) {
@@ -94,13 +90,12 @@ static bool insertFastDiv(Function &F,
}
// Basic Block is split before divide
- BasicBlock *MainBB = &*I;
- BasicBlock *SuccessorBB = I->splitBasicBlock(J);
- ++I; //advance iterator I to successorBB
+ BasicBlock *MainBB = &*I->getParent();
+ BasicBlock *SuccessorBB = MainBB->splitBasicBlock(I);
// Add new basic block for slow divide operation
- BasicBlock *SlowBB = BasicBlock::Create(F.getContext(), "",
- MainBB->getParent(), SuccessorBB);
+ BasicBlock *SlowBB =
+ BasicBlock::Create(F->getContext(), "", MainBB->getParent(), SuccessorBB);
SlowBB->moveBefore(SuccessorBB);
IRBuilder<> SlowBuilder(SlowBB, SlowBB->begin());
Value *SlowQuotientV;
@@ -115,8 +110,8 @@ static bool insertFastDiv(Function &F,
SlowBuilder.CreateBr(SuccessorBB);
// Add new basic block for fast divide operation
- BasicBlock *FastBB = BasicBlock::Create(F.getContext(), "",
- MainBB->getParent(), SuccessorBB);
+ BasicBlock *FastBB =
+ BasicBlock::Create(F->getContext(), "", MainBB->getParent(), SuccessorBB);
FastBB->moveBefore(SlowBB);
IRBuilder<> FastBuilder(FastBB, FastBB->begin());
Value *ShortDivisorV = FastBuilder.CreateCast(Instruction::Trunc, Divisor,
@@ -139,19 +134,19 @@ static bool insertFastDiv(Function &F,
// Phi nodes for result of div and rem
IRBuilder<> SuccessorBuilder(SuccessorBB, SuccessorBB->begin());
- PHINode *QuoPhi = SuccessorBuilder.CreatePHI(Instr->getType(), 2);
+ PHINode *QuoPhi = SuccessorBuilder.CreatePHI(I->getType(), 2);
QuoPhi->addIncoming(SlowQuotientV, SlowBB);
QuoPhi->addIncoming(FastQuotientV, FastBB);
- PHINode *RemPhi = SuccessorBuilder.CreatePHI(Instr->getType(), 2);
+ PHINode *RemPhi = SuccessorBuilder.CreatePHI(I->getType(), 2);
RemPhi->addIncoming(SlowRemainderV, SlowBB);
RemPhi->addIncoming(FastRemainderV, FastBB);
- // Replace Instr with appropriate phi node
+ // Replace I with appropriate phi node
if (UseDivOp)
- Instr->replaceAllUsesWith(QuoPhi);
+ I->replaceAllUsesWith(QuoPhi);
else
- Instr->replaceAllUsesWith(RemPhi);
- Instr->eraseFromParent();
+ I->replaceAllUsesWith(RemPhi);
+ I->eraseFromParent();
// Combine operands into a single value with OR for value testing below
MainBB->getInstList().back().eraseFromParent();
@@ -168,9 +163,6 @@ static bool insertFastDiv(Function &F,
Value *CmpV = MainBuilder.CreateICmpEQ(AndV, ZeroV);
MainBuilder.CreateCondBr(CmpV, FastBB, SlowBB);
- // point iterator J at first instruction of successorBB
- J = I->begin();
-
// Cache phi nodes to be used later in place of other instances
// of div or rem with the same sign, dividend, and divisor
DivOpInfo Key(UseSignedOp, Dividend, Divisor);
@@ -179,57 +171,54 @@ static bool insertFastDiv(Function &F,
return true;
}
-// reuseOrInsertFastDiv - Reuses previously computed dividend or remainder if
-// operands and operation are identical. Otherwise call insertFastDiv to perform
-// the optimization and cache the resulting dividend and remainder.
-static bool reuseOrInsertFastDiv(Function &F,
- Function::iterator &I,
- BasicBlock::iterator &J,
- IntegerType *BypassType,
- bool UseDivOp,
- bool UseSignedOp,
+// reuseOrInsertFastDiv - Reuses previously computed dividend or remainder from
+// the current BB if operands and operation are identical. Otherwise calls
+// insertFastDiv to perform the optimization and caches the resulting dividend
+// and remainder.
+static bool reuseOrInsertFastDiv(Instruction *I, IntegerType *BypassType,
+ bool UseDivOp, bool UseSignedOp,
DivCacheTy &PerBBDivCache) {
// Get instruction operands
- Instruction *Instr = &*J;
- DivOpInfo Key(UseSignedOp, Instr->getOperand(0), Instr->getOperand(1));
+ DivOpInfo Key(UseSignedOp, I->getOperand(0), I->getOperand(1));
DivCacheTy::iterator CacheI = PerBBDivCache.find(Key);
if (CacheI == PerBBDivCache.end()) {
// If previous instance does not exist, insert fast div
- return insertFastDiv(F, I, J, BypassType, UseDivOp, UseSignedOp,
- PerBBDivCache);
+ return insertFastDiv(I, BypassType, UseDivOp, UseSignedOp, PerBBDivCache);
}
// Replace operation value with previously generated phi node
DivPhiNodes &Value = CacheI->second;
if (UseDivOp) {
// Replace all uses of div instruction with quotient phi node
- J->replaceAllUsesWith(Value.Quotient);
+ I->replaceAllUsesWith(Value.Quotient);
} else {
// Replace all uses of rem instruction with remainder phi node
- J->replaceAllUsesWith(Value.Remainder);
+ I->replaceAllUsesWith(Value.Remainder);
}
- // Advance to next operation
- ++J;
-
// Remove redundant operation
- Instr->eraseFromParent();
+ I->eraseFromParent();
return true;
}
-// bypassSlowDivision - This optimization identifies DIV instructions that can
-// be profitably bypassed and carried out with a shorter, faster divide.
-bool llvm::bypassSlowDivision(Function &F,
- Function::iterator &I,
- const DenseMap<unsigned int, unsigned int> &BypassWidths) {
+// bypassSlowDivision - This optimization identifies DIV instructions in a BB
+// that can be profitably bypassed and carried out with a shorter, faster
+// divide.
+bool llvm::bypassSlowDivision(
+ BasicBlock *BB, const DenseMap<unsigned int, unsigned int> &BypassWidths) {
DivCacheTy DivCache;
bool MadeChange = false;
- for (BasicBlock::iterator J = I->begin(); J != I->end(); J++) {
+ Instruction* Next = &*BB->begin();
+ while (Next != nullptr) {
+ // We may add instructions immediately after I, but we want to skip over
+ // them.
+ Instruction* I = Next;
+ Next = Next->getNextNode();
// Get instruction details
- unsigned Opcode = J->getOpcode();
+ unsigned Opcode = I->getOpcode();
bool UseDivOp = Opcode == Instruction::SDiv || Opcode == Instruction::UDiv;
bool UseRemOp = Opcode == Instruction::SRem || Opcode == Instruction::URem;
bool UseSignedOp = Opcode == Instruction::SDiv ||
@@ -240,11 +229,11 @@ bool llvm::bypassSlowDivision(Function &F,
continue;
// Skip division on vector types, only optimize integer instructions
- if (!J->getType()->isIntegerTy())
+ if (!I->getType()->isIntegerTy())
continue;
// Get bitwidth of div/rem instruction
- IntegerType *T = cast<IntegerType>(J->getType());
+ IntegerType *T = cast<IntegerType>(I->getType());
unsigned int bitwidth = T->getBitWidth();
// Continue if bitwidth is not bypassed
@@ -253,10 +242,9 @@ bool llvm::bypassSlowDivision(Function &F,
continue;
// Get type for div/rem instruction with bypass bitwidth
- IntegerType *BT = IntegerType::get(J->getContext(), BI->second);
+ IntegerType *BT = IntegerType::get(I->getContext(), BI->second);
- MadeChange |= reuseOrInsertFastDiv(F, I, J, BT, UseDivOp,
- UseSignedOp, DivCache);
+ MadeChange |= reuseOrInsertFastDiv(I, BT, UseDivOp, UseSignedOp, DivCache);
}
return MadeChange;
diff --git a/contrib/llvm/lib/Transforms/Utils/Local.cpp b/contrib/llvm/lib/Transforms/Utils/Local.cpp
index e75163f..0e386ac 100644
--- a/contrib/llvm/lib/Transforms/Utils/Local.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/Local.cpp
@@ -1305,8 +1305,9 @@ static bool markAliveBlocks(Function &F,
}
}
- // Turn invokes that call 'nounwind' functions into ordinary calls.
- if (InvokeInst *II = dyn_cast<InvokeInst>(BB->getTerminator())) {
+ TerminatorInst *Terminator = BB->getTerminator();
+ if (auto *II = dyn_cast<InvokeInst>(Terminator)) {
+ // Turn invokes that call 'nounwind' functions into ordinary calls.
Value *Callee = II->getCalledValue();
if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) {
changeToUnreachable(II, true);
@@ -1321,6 +1322,44 @@ static bool markAliveBlocks(Function &F,
changeToCall(II);
Changed = true;
}
+ } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Terminator)) {
+ // Remove catchpads which cannot be reached.
+ struct CatchPadDenseMapInfo {
+ static CatchPadInst *getEmptyKey() {
+ return DenseMapInfo<CatchPadInst *>::getEmptyKey();
+ }
+ static CatchPadInst *getTombstoneKey() {
+ return DenseMapInfo<CatchPadInst *>::getTombstoneKey();
+ }
+ static unsigned getHashValue(CatchPadInst *CatchPad) {
+ return static_cast<unsigned>(hash_combine_range(
+ CatchPad->value_op_begin(), CatchPad->value_op_end()));
+ }
+ static bool isEqual(CatchPadInst *LHS, CatchPadInst *RHS) {
+ if (LHS == getEmptyKey() || LHS == getTombstoneKey() ||
+ RHS == getEmptyKey() || RHS == getTombstoneKey())
+ return LHS == RHS;
+ return LHS->isIdenticalTo(RHS);
+ }
+ };
+
+ // Set of unique CatchPads.
+ SmallDenseMap<CatchPadInst *, detail::DenseSetEmpty, 4,
+ CatchPadDenseMapInfo, detail::DenseSetPair<CatchPadInst *>>
+ HandlerSet;
+ detail::DenseSetEmpty Empty;
+ for (CatchSwitchInst::handler_iterator I = CatchSwitch->handler_begin(),
+ E = CatchSwitch->handler_end();
+ I != E; ++I) {
+ BasicBlock *HandlerBB = *I;
+ auto *CatchPad = cast<CatchPadInst>(HandlerBB->getFirstNonPHI());
+ if (!HandlerSet.insert({CatchPad, Empty}).second) {
+ CatchSwitch->removeHandler(I);
+ --I;
+ --E;
+ Changed = true;
+ }
+ }
}
Changed |= ConstantFoldTerminator(BB, true);
@@ -1514,8 +1553,8 @@ bool llvm::callsGCLeafFunction(ImmutableCallSite CS) {
return true;
// Check if the function is specifically marked as a gc leaf function.
- //
- // TODO: we should be checking the attributes on the call site as well.
+ if (CS.hasFnAttr("gc-leaf-function"))
+ return true;
if (const Function *F = CS.getCalledFunction())
return F->hasFnAttribute("gc-leaf-function");
diff --git a/contrib/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/contrib/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
index d0932f83..3bb3fa5 100644
--- a/contrib/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
@@ -20,6 +20,7 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/ConstantFolding.h"
+#include "llvm/Analysis/EHPersonalities.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Analysis/ValueTracking.h"
@@ -3448,18 +3449,26 @@ bool SimplifyCFGOpt::SimplifyUnreachable(UnreachableInst *UI) {
if (isa<CallInst>(BBI) && !isa<DbgInfoIntrinsic>(BBI)) break;
if (BBI->mayHaveSideEffects()) {
- if (StoreInst *SI = dyn_cast<StoreInst>(BBI)) {
+ if (auto *SI = dyn_cast<StoreInst>(BBI)) {
if (SI->isVolatile())
break;
- } else if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
+ } else if (auto *LI = dyn_cast<LoadInst>(BBI)) {
if (LI->isVolatile())
break;
- } else if (AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(BBI)) {
+ } else if (auto *RMWI = dyn_cast<AtomicRMWInst>(BBI)) {
if (RMWI->isVolatile())
break;
- } else if (AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(BBI)) {
+ } else if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(BBI)) {
if (CXI->isVolatile())
break;
+ } else if (isa<CatchPadInst>(BBI)) {
+ // A catchpad may invoke exception object constructors and such, which
+ // in some languages can be arbitrary code, so be conservative by
+ // default.
+ // For CoreCLR, it just involves a type test, so can be removed.
+ if (classifyEHPersonality(BB->getParent()->getPersonalityFn()) !=
+ EHPersonality::CoreCLR)
+ break;
} else if (!isa<FenceInst>(BBI) && !isa<VAArgInst>(BBI) &&
!isa<LandingPadInst>(BBI)) {
break;
@@ -3485,7 +3494,7 @@ bool SimplifyCFGOpt::SimplifyUnreachable(UnreachableInst *UI) {
for (unsigned i = 0, e = Preds.size(); i != e; ++i) {
TerminatorInst *TI = Preds[i]->getTerminator();
IRBuilder<> Builder(TI);
- if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
+ if (auto *BI = dyn_cast<BranchInst>(TI)) {
if (BI->isUnconditional()) {
if (BI->getSuccessor(0) == BB) {
new UnreachableInst(TI->getContext(), TI);
@@ -3502,7 +3511,7 @@ bool SimplifyCFGOpt::SimplifyUnreachable(UnreachableInst *UI) {
Changed = true;
}
}
- } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
+ } else if (auto *SI = dyn_cast<SwitchInst>(TI)) {
for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end();
i != e; ++i)
if (i.getCaseSuccessor() == BB) {
@@ -3511,18 +3520,49 @@ bool SimplifyCFGOpt::SimplifyUnreachable(UnreachableInst *UI) {
--i; --e;
Changed = true;
}
- } else if ((isa<InvokeInst>(TI) &&
- cast<InvokeInst>(TI)->getUnwindDest() == BB) ||
- isa<CatchSwitchInst>(TI)) {
- removeUnwindEdge(TI->getParent());
- Changed = true;
+ } else if (auto *II = dyn_cast<InvokeInst>(TI)) {
+ if (II->getUnwindDest() == BB) {
+ removeUnwindEdge(TI->getParent());
+ Changed = true;
+ }
+ } else if (auto *CSI = dyn_cast<CatchSwitchInst>(TI)) {
+ if (CSI->getUnwindDest() == BB) {
+ removeUnwindEdge(TI->getParent());
+ Changed = true;
+ continue;
+ }
+
+ for (CatchSwitchInst::handler_iterator I = CSI->handler_begin(),
+ E = CSI->handler_end();
+ I != E; ++I) {
+ if (*I == BB) {
+ CSI->removeHandler(I);
+ --I;
+ --E;
+ Changed = true;
+ }
+ }
+ if (CSI->getNumHandlers() == 0) {
+ BasicBlock *CatchSwitchBB = CSI->getParent();
+ if (CSI->hasUnwindDest()) {
+ // Redirect preds to the unwind dest
+ CatchSwitchBB->replaceAllUsesWith(CSI->getUnwindDest());
+ } else {
+ // Rewrite all preds to unwind to caller (or from invoke to call).
+ SmallVector<BasicBlock *, 8> EHPreds(predecessors(CatchSwitchBB));
+ for (BasicBlock *EHPred : EHPreds)
+ removeUnwindEdge(EHPred);
+ }
+ // The catchswitch is no longer reachable.
+ new UnreachableInst(CSI->getContext(), CSI);
+ CSI->eraseFromParent();
+ Changed = true;
+ }
} else if (isa<CleanupReturnInst>(TI)) {
new UnreachableInst(TI->getContext(), TI);
TI->eraseFromParent();
Changed = true;
}
- // TODO: We can remove a catchswitch if all it's catchpads end in
- // unreachable.
}
// If this block is now dead, remove it.
diff --git a/contrib/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp b/contrib/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
index 81dea6d..dc5fee5 100644
--- a/contrib/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
@@ -57,8 +57,7 @@ static bool ignoreCallingConv(LibFunc::Func Func) {
Func == LibFunc::llabs || Func == LibFunc::strlen;
}
-/// isOnlyUsedInZeroEqualityComparison - Return true if it only matters that the
-/// value is equal or not-equal to zero.
+/// Return true if it only matters that the value is equal or not-equal to zero.
static bool isOnlyUsedInZeroEqualityComparison(Value *V) {
for (User *U : V->users()) {
if (ICmpInst *IC = dyn_cast<ICmpInst>(U))
@@ -72,8 +71,7 @@ static bool isOnlyUsedInZeroEqualityComparison(Value *V) {
return true;
}
-/// isOnlyUsedInEqualityComparison - Return true if it is only used in equality
-/// comparisons with With.
+/// Return true if it is only used in equality comparisons with With.
static bool isOnlyUsedInEqualityComparison(Value *V, Value *With) {
for (User *U : V->users()) {
if (ICmpInst *IC = dyn_cast<ICmpInst>(U))
@@ -249,12 +247,12 @@ Value *LibCallSimplifier::optimizeStrNCat(CallInst *CI, IRBuilder<> &B) {
!FT->getParamType(2)->isIntegerTy())
return nullptr;
- // Extract some information from the instruction
+ // Extract some information from the instruction.
Value *Dst = CI->getArgOperand(0);
Value *Src = CI->getArgOperand(1);
uint64_t Len;
- // We don't do anything if length is not constant
+ // We don't do anything if length is not constant.
if (ConstantInt *LengthArg = dyn_cast<ConstantInt>(CI->getArgOperand(2)))
Len = LengthArg->getZExtValue();
else
@@ -272,12 +270,12 @@ Value *LibCallSimplifier::optimizeStrNCat(CallInst *CI, IRBuilder<> &B) {
if (SrcLen == 0 || Len == 0)
return Dst;
- // We don't optimize this case
+ // We don't optimize this case.
if (Len < SrcLen)
return nullptr;
// strncat(x, s, c) -> strcat(x, s)
- // s is constant so the strcat can be optimized further
+ // s is constant so the strcat can be optimized further.
return emitStrLenMemCpy(Src, Dst, SrcLen, B);
}
@@ -310,7 +308,8 @@ Value *LibCallSimplifier::optimizeStrChr(CallInst *CI, IRBuilder<> &B) {
StringRef Str;
if (!getConstantStringInfo(SrcStr, Str)) {
if (CharC->isZero()) // strchr(p, 0) -> p + strlen(p)
- return B.CreateGEP(B.getInt8Ty(), SrcStr, EmitStrLen(SrcStr, B, DL, TLI), "strchr");
+ return B.CreateGEP(B.getInt8Ty(), SrcStr, EmitStrLen(SrcStr, B, DL, TLI),
+ "strchr");
return nullptr;
}
@@ -490,8 +489,8 @@ Value *LibCallSimplifier::optimizeStpCpy(CallInst *CI, IRBuilder<> &B) {
Type *PT = Callee->getFunctionType()->getParamType(0);
Value *LenV = ConstantInt::get(DL.getIntPtrType(PT), Len);
- Value *DstEnd =
- B.CreateGEP(B.getInt8Ty(), Dst, ConstantInt::get(DL.getIntPtrType(PT), Len - 1));
+ Value *DstEnd = B.CreateGEP(B.getInt8Ty(), Dst,
+ ConstantInt::get(DL.getIntPtrType(PT), Len - 1));
// We have enough information to now generate the memcpy call to do the
// copy for us. Make a memcpy to copy the nul byte with align = 1.
@@ -599,7 +598,8 @@ Value *LibCallSimplifier::optimizeStrPBrk(CallInst *CI, IRBuilder<> &B) {
if (I == StringRef::npos) // No match.
return Constant::getNullValue(CI->getType());
- return B.CreateGEP(B.getInt8Ty(), CI->getArgOperand(0), B.getInt64(I), "strpbrk");
+ return B.CreateGEP(B.getInt8Ty(), CI->getArgOperand(0), B.getInt64(I),
+ "strpbrk");
}
// strpbrk(s, "a") -> strchr(s, 'a')
@@ -878,8 +878,10 @@ Value *LibCallSimplifier::optimizeMemCmp(CallInst *CI, IRBuilder<> &B) {
Type *RHSPtrTy =
IntType->getPointerTo(RHS->getType()->getPointerAddressSpace());
- Value *LHSV = B.CreateLoad(B.CreateBitCast(LHS, LHSPtrTy, "lhsc"), "lhsv");
- Value *RHSV = B.CreateLoad(B.CreateBitCast(RHS, RHSPtrTy, "rhsc"), "rhsv");
+ Value *LHSV =
+ B.CreateLoad(B.CreateBitCast(LHS, LHSPtrTy, "lhsc"), "lhsv");
+ Value *RHSV =
+ B.CreateLoad(B.CreateBitCast(RHS, RHSPtrTy, "rhsc"), "rhsv");
return B.CreateZExt(B.CreateICmpNE(LHSV, RHSV), CI->getType(), "memcmp");
}
@@ -992,6 +994,10 @@ Value *LibCallSimplifier::optimizeUnaryDoubleFP(CallInst *CI, IRBuilder<> &B,
Value *V = valueHasFloatPrecision(CI->getArgOperand(0));
if (V == nullptr)
return nullptr;
+
+ // Propagate fast-math flags from the existing call to the new call.
+ IRBuilder<>::FastMathFlagGuard Guard(B);
+ B.SetFastMathFlags(CI->getFastMathFlags());
// floor((double)floatval) -> (double)floorf(floatval)
if (Callee->isIntrinsic()) {
@@ -1027,6 +1033,10 @@ Value *LibCallSimplifier::optimizeBinaryDoubleFP(CallInst *CI, IRBuilder<> &B) {
if (V2 == nullptr)
return nullptr;
+ // Propagate fast-math flags from the existing call to the new call.
+ IRBuilder<>::FastMathFlagGuard Guard(B);
+ B.SetFastMathFlags(CI->getFastMathFlags());
+
// fmin((double)floatval1, (double)floatval2)
// -> (double)fminf(floatval1, floatval2)
// TODO: Handle intrinsics in the same way as in optimizeUnaryDoubleFP().
@@ -1117,7 +1127,7 @@ Value *LibCallSimplifier::optimizePow(CallInst *CI, IRBuilder<> &B) {
Callee->getAttributes());
}
- bool unsafeFPMath = canUseUnsafeFPMath(CI->getParent()->getParent());
+ bool UnsafeFPMath = canUseUnsafeFPMath(CI->getParent()->getParent());
// pow(exp(x), y) -> exp(x*y)
// pow(exp2(x), y) -> exp2(x * y)
@@ -1126,7 +1136,7 @@ Value *LibCallSimplifier::optimizePow(CallInst *CI, IRBuilder<> &B) {
// underflow behavior quite dramatically.
// Example: x = 1000, y = 0.001.
// pow(exp(x), y) = pow(inf, 0.001) = inf, whereas exp(x*y) = exp(1).
- if (unsafeFPMath) {
+ if (UnsafeFPMath) {
if (auto *OpC = dyn_cast<CallInst>(Op1)) {
IRBuilder<>::FastMathFlagGuard Guard(B);
FastMathFlags FMF;
@@ -1157,7 +1167,7 @@ Value *LibCallSimplifier::optimizePow(CallInst *CI, IRBuilder<> &B) {
LibFunc::fabsl)) {
// In -ffast-math, pow(x, 0.5) -> sqrt(x).
- if (unsafeFPMath)
+ if (UnsafeFPMath)
return EmitUnaryFloatFnCall(Op1, TLI->getName(LibFunc::sqrt), B,
Callee->getAttributes());
@@ -1183,7 +1193,7 @@ Value *LibCallSimplifier::optimizePow(CallInst *CI, IRBuilder<> &B) {
return B.CreateFDiv(ConstantFP::get(CI->getType(), 1.0), Op1, "powrecip");
// In -ffast-math, generate repeated fmul instead of generating pow(x, n).
- if (unsafeFPMath) {
+ if (UnsafeFPMath) {
APFloat V = abs(Op2C->getValueAPF());
// We limit to a max of 7 fmul(s). Thus max exponent is 32.
// This transformation applies to integer exponents only.
@@ -1291,12 +1301,9 @@ Value *LibCallSimplifier::optimizeFMinFMax(CallInst *CI, IRBuilder<> &B) {
// function, do that first.
Function *Callee = CI->getCalledFunction();
StringRef Name = Callee->getName();
- if ((Name == "fmin" && hasFloatVersion(Name)) ||
- (Name == "fmax" && hasFloatVersion(Name))) {
- Value *Ret = optimizeBinaryDoubleFP(CI, B);
- if (Ret)
+ if ((Name == "fmin" || Name == "fmax") && hasFloatVersion(Name))
+ if (Value *Ret = optimizeBinaryDoubleFP(CI, B))
return Ret;
- }
// Make sure this has 2 arguments of FP type which match the result type.
FunctionType *FT = Callee->getFunctionType();
@@ -1307,14 +1314,12 @@ Value *LibCallSimplifier::optimizeFMinFMax(CallInst *CI, IRBuilder<> &B) {
IRBuilder<>::FastMathFlagGuard Guard(B);
FastMathFlags FMF;
- Function *F = CI->getParent()->getParent();
- if (canUseUnsafeFPMath(F)) {
+ if (CI->hasUnsafeAlgebra()) {
// Unsafe algebra sets all fast-math-flags to true.
FMF.setUnsafeAlgebra();
} else {
// At a minimum, no-nans-fp-math must be true.
- Attribute Attr = F->getFnAttribute("no-nans-fp-math");
- if (Attr.getValueAsString() != "true")
+ if (!CI->hasNoNaNs())
return nullptr;
// No-signed-zeros is implied by the definitions of fmax/fmin themselves:
// "Ideally, fmax would be sensitive to the sign of zero, for example
@@ -2169,7 +2174,10 @@ Value *LibCallSimplifier::optimizeCall(CallInst *CI) {
LibFunc::Func Func;
Function *Callee = CI->getCalledFunction();
StringRef FuncName = Callee->getName();
- IRBuilder<> Builder(CI);
+
+ SmallVector<OperandBundleDef, 2> OpBundles;
+ CI->getOperandBundlesAsDefs(OpBundles);
+ IRBuilder<> Builder(CI, /*FPMathTag=*/nullptr, OpBundles);
bool isCallingConvC = CI->getCallingConv() == llvm::CallingConv::C;
// Command-line parameter overrides function attribute.
@@ -2419,7 +2427,8 @@ bool FortifiedLibCallSimplifier::isFortifiedCallFoldable(CallInst *CI,
return false;
}
-Value *FortifiedLibCallSimplifier::optimizeMemCpyChk(CallInst *CI, IRBuilder<> &B) {
+Value *FortifiedLibCallSimplifier::optimizeMemCpyChk(CallInst *CI,
+ IRBuilder<> &B) {
Function *Callee = CI->getCalledFunction();
if (!checkStringCopyLibFuncSignature(Callee, LibFunc::memcpy_chk))
@@ -2433,7 +2442,8 @@ Value *FortifiedLibCallSimplifier::optimizeMemCpyChk(CallInst *CI, IRBuilder<> &
return nullptr;
}
-Value *FortifiedLibCallSimplifier::optimizeMemMoveChk(CallInst *CI, IRBuilder<> &B) {
+Value *FortifiedLibCallSimplifier::optimizeMemMoveChk(CallInst *CI,
+ IRBuilder<> &B) {
Function *Callee = CI->getCalledFunction();
if (!checkStringCopyLibFuncSignature(Callee, LibFunc::memmove_chk))
@@ -2447,7 +2457,8 @@ Value *FortifiedLibCallSimplifier::optimizeMemMoveChk(CallInst *CI, IRBuilder<>
return nullptr;
}
-Value *FortifiedLibCallSimplifier::optimizeMemSetChk(CallInst *CI, IRBuilder<> &B) {
+Value *FortifiedLibCallSimplifier::optimizeMemSetChk(CallInst *CI,
+ IRBuilder<> &B) {
Function *Callee = CI->getCalledFunction();
if (!checkStringCopyLibFuncSignature(Callee, LibFunc::memset_chk))
@@ -2539,7 +2550,10 @@ Value *FortifiedLibCallSimplifier::optimizeCall(CallInst *CI) {
LibFunc::Func Func;
Function *Callee = CI->getCalledFunction();
StringRef FuncName = Callee->getName();
- IRBuilder<> Builder(CI);
+
+ SmallVector<OperandBundleDef, 2> OpBundles;
+ CI->getOperandBundlesAsDefs(OpBundles);
+ IRBuilder<> Builder(CI, /*FPMathTag=*/nullptr, OpBundles);
bool isCallingConvC = CI->getCallingConv() == llvm::CallingConv::C;
// First, check that this is a known library functions.
diff --git a/contrib/llvm/lib/Transforms/Utils/ValueMapper.cpp b/contrib/llvm/lib/Transforms/Utils/ValueMapper.cpp
index 1add78e..2e361d3 100644
--- a/contrib/llvm/lib/Transforms/Utils/ValueMapper.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/ValueMapper.cpp
@@ -218,12 +218,12 @@ static Metadata *mapMetadataOp(Metadata *Op,
}
/// Resolve uniquing cycles involving the given metadata.
-static void resolveCycles(Metadata *MD, bool MDMaterialized) {
+static void resolveCycles(Metadata *MD, bool AllowTemps) {
if (auto *N = dyn_cast_or_null<MDNode>(MD)) {
- if (!MDMaterialized && N->isTemporary())
+ if (AllowTemps && N->isTemporary())
return;
if (!N->isResolved())
- N->resolveCycles(MDMaterialized);
+ N->resolveCycles(AllowTemps);
}
}
@@ -253,7 +253,7 @@ static bool remapOperands(MDNode &Node,
// Resolve uniquing cycles underneath distinct nodes on the fly so they
// don't infect later operands.
if (IsDistinct)
- resolveCycles(New, !(Flags & RF_HaveUnmaterializedMetadata));
+ resolveCycles(New, Flags & RF_HaveUnmaterializedMetadata);
}
}
@@ -401,7 +401,7 @@ Metadata *llvm::MapMetadata(const Metadata *MD, ValueToValueMapTy &VM,
return NewMD;
// Resolve cycles involving the entry metadata.
- resolveCycles(NewMD, !(Flags & RF_HaveUnmaterializedMetadata));
+ resolveCycles(NewMD, Flags & RF_HaveUnmaterializedMetadata);
// Remap the operands of distinct MDNodes.
while (!DistinctWorklist.empty())
OpenPOWER on IntegriCloud