summaryrefslogtreecommitdiffstats
path: root/lib/Transforms/Utils
diff options
context:
space:
mode:
authorrdivacky <rdivacky@FreeBSD.org>2010-07-13 17:19:57 +0000
committerrdivacky <rdivacky@FreeBSD.org>2010-07-13 17:19:57 +0000
commit9112829d76cbb8e0c8ef51bbc2d7d1be48cd7b74 (patch)
tree9de1c5f67a98cd0e73c60838396486c984f63ac2 /lib/Transforms/Utils
parent1e3dec662ea18131c495db50caccc57f77b7a5fe (diff)
downloadFreeBSD-src-9112829d76cbb8e0c8ef51bbc2d7d1be48cd7b74.zip
FreeBSD-src-9112829d76cbb8e0c8ef51bbc2d7d1be48cd7b74.tar.gz
Update LLVM to r108243.
Diffstat (limited to 'lib/Transforms/Utils')
-rw-r--r--lib/Transforms/Utils/AddrModeMatcher.cpp21
-rw-r--r--lib/Transforms/Utils/BasicBlockUtils.cpp118
-rw-r--r--lib/Transforms/Utils/BreakCriticalEdges.cpp23
-rw-r--r--lib/Transforms/Utils/BuildLibCalls.cpp67
-rw-r--r--lib/Transforms/Utils/CloneFunction.cpp100
-rw-r--r--lib/Transforms/Utils/CloneLoop.cpp33
-rw-r--r--lib/Transforms/Utils/CloneModule.cpp56
-rw-r--r--lib/Transforms/Utils/DemoteRegToStack.cpp22
-rw-r--r--lib/Transforms/Utils/InlineFunction.cpp23
-rw-r--r--lib/Transforms/Utils/LCSSA.cpp7
-rw-r--r--lib/Transforms/Utils/Local.cpp113
-rw-r--r--lib/Transforms/Utils/LoopSimplify.cpp49
-rw-r--r--lib/Transforms/Utils/LoopUnroll.cpp20
-rw-r--r--lib/Transforms/Utils/LowerInvoke.cpp140
-rw-r--r--lib/Transforms/Utils/PromoteMemoryToRegister.cpp16
-rw-r--r--lib/Transforms/Utils/SimplifyCFG.cpp15
-rw-r--r--lib/Transforms/Utils/ValueMapper.cpp10
-rw-r--r--lib/Transforms/Utils/ValueMapper.h4
18 files changed, 321 insertions, 516 deletions
diff --git a/lib/Transforms/Utils/AddrModeMatcher.cpp b/lib/Transforms/Utils/AddrModeMatcher.cpp
index ea9d1c1..4d64c85 100644
--- a/lib/Transforms/Utils/AddrModeMatcher.cpp
+++ b/lib/Transforms/Utils/AddrModeMatcher.cpp
@@ -381,29 +381,28 @@ static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal,
const TargetLowering &TLI) {
std::vector<InlineAsm::ConstraintInfo>
Constraints = IA->ParseConstraints();
-
- unsigned ArgNo = 1; // ArgNo - The operand of the CallInst.
+
+ unsigned ArgNo = 0; // The argument of the CallInst.
for (unsigned i = 0, e = Constraints.size(); i != e; ++i) {
TargetLowering::AsmOperandInfo OpInfo(Constraints[i]);
-
+
// Compute the value type for each operand.
switch (OpInfo.Type) {
case InlineAsm::isOutput:
if (OpInfo.isIndirect)
- OpInfo.CallOperandVal = CI->getOperand(ArgNo++);
+ OpInfo.CallOperandVal = CI->getArgOperand(ArgNo++);
break;
case InlineAsm::isInput:
- OpInfo.CallOperandVal = CI->getOperand(ArgNo++);
+ OpInfo.CallOperandVal = CI->getArgOperand(ArgNo++);
break;
case InlineAsm::isClobber:
// Nothing to do.
break;
}
-
+
// Compute the constraint code and ConstraintType to use.
- TLI.ComputeConstraintToUse(OpInfo, SDValue(),
- OpInfo.ConstraintType == TargetLowering::C_Memory);
-
+ TLI.ComputeConstraintToUse(OpInfo, SDValue());
+
// If this asm operand is our Value*, and if it isn't an indirect memory
// operand, we can't fold it!
if (OpInfo.CallOperandVal == OpVal &&
@@ -411,7 +410,7 @@ static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal,
!OpInfo.isIndirect))
return false;
}
-
+
return true;
}
@@ -450,7 +449,7 @@ static bool FindAllMemoryUses(Instruction *I,
if (CallInst *CI = dyn_cast<CallInst>(U)) {
InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledValue());
- if (IA == 0) return true;
+ if (!IA) return true;
// If this is a memory operand, we're cool, otherwise bail out.
if (!IsOperandAMemoryOperand(CI, IA, I, TLI))
diff --git a/lib/Transforms/Utils/BasicBlockUtils.cpp b/lib/Transforms/Utils/BasicBlockUtils.cpp
index 2f1ae00..ec625b4 100644
--- a/lib/Transforms/Utils/BasicBlockUtils.cpp
+++ b/lib/Transforms/Utils/BasicBlockUtils.cpp
@@ -558,121 +558,3 @@ void llvm::FindFunctionBackedges(const Function &F,
}
-
-
-
-/// AreEquivalentAddressValues - Test if A and B will obviously have the same
-/// value. This includes recognizing that %t0 and %t1 will have the same
-/// value in code like this:
-/// %t0 = getelementptr \@a, 0, 3
-/// store i32 0, i32* %t0
-/// %t1 = getelementptr \@a, 0, 3
-/// %t2 = load i32* %t1
-///
-static bool AreEquivalentAddressValues(const Value *A, const Value *B) {
- // Test if the values are trivially equivalent.
- if (A == B) return true;
-
- // Test if the values come from identical arithmetic instructions.
- // Use isIdenticalToWhenDefined instead of isIdenticalTo because
- // this function is only used when one address use dominates the
- // other, which means that they'll always either have the same
- // value or one of them will have an undefined value.
- if (isa<BinaryOperator>(A) || isa<CastInst>(A) ||
- isa<PHINode>(A) || isa<GetElementPtrInst>(A))
- if (const Instruction *BI = dyn_cast<Instruction>(B))
- if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
- return true;
-
- // Otherwise they may not be equivalent.
- return false;
-}
-
-/// FindAvailableLoadedValue - Scan the ScanBB block backwards (starting at the
-/// instruction before ScanFrom) checking to see if we have the value at the
-/// memory address *Ptr locally available within a small number of instructions.
-/// If the value is available, return it.
-///
-/// If not, return the iterator for the last validated instruction that the
-/// value would be live through. If we scanned the entire block and didn't find
-/// something that invalidates *Ptr or provides it, ScanFrom would be left at
-/// begin() and this returns null. ScanFrom could also be left
-///
-/// MaxInstsToScan specifies the maximum instructions to scan in the block. If
-/// it is set to 0, it will scan the whole block. You can also optionally
-/// specify an alias analysis implementation, which makes this more precise.
-Value *llvm::FindAvailableLoadedValue(Value *Ptr, BasicBlock *ScanBB,
- BasicBlock::iterator &ScanFrom,
- unsigned MaxInstsToScan,
- AliasAnalysis *AA) {
- if (MaxInstsToScan == 0) MaxInstsToScan = ~0U;
-
- // If we're using alias analysis to disambiguate get the size of *Ptr.
- unsigned AccessSize = 0;
- if (AA) {
- const Type *AccessTy = cast<PointerType>(Ptr->getType())->getElementType();
- AccessSize = AA->getTypeStoreSize(AccessTy);
- }
-
- while (ScanFrom != ScanBB->begin()) {
- // We must ignore debug info directives when counting (otherwise they
- // would affect codegen).
- Instruction *Inst = --ScanFrom;
- if (isa<DbgInfoIntrinsic>(Inst))
- continue;
-
- // Restore ScanFrom to expected value in case next test succeeds
- ScanFrom++;
-
- // Don't scan huge blocks.
- if (MaxInstsToScan-- == 0) return 0;
-
- --ScanFrom;
- // If this is a load of Ptr, the loaded value is available.
- if (LoadInst *LI = dyn_cast<LoadInst>(Inst))
- if (AreEquivalentAddressValues(LI->getOperand(0), Ptr))
- return LI;
-
- if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
- // If this is a store through Ptr, the value is available!
- if (AreEquivalentAddressValues(SI->getOperand(1), Ptr))
- return SI->getOperand(0);
-
- // If Ptr is an alloca and this is a store to a different alloca, ignore
- // the store. This is a trivial form of alias analysis that is important
- // for reg2mem'd code.
- if ((isa<AllocaInst>(Ptr) || isa<GlobalVariable>(Ptr)) &&
- (isa<AllocaInst>(SI->getOperand(1)) ||
- isa<GlobalVariable>(SI->getOperand(1))))
- continue;
-
- // If we have alias analysis and it says the store won't modify the loaded
- // value, ignore the store.
- if (AA &&
- (AA->getModRefInfo(SI, Ptr, AccessSize) & AliasAnalysis::Mod) == 0)
- continue;
-
- // Otherwise the store that may or may not alias the pointer, bail out.
- ++ScanFrom;
- return 0;
- }
-
- // If this is some other instruction that may clobber Ptr, bail out.
- if (Inst->mayWriteToMemory()) {
- // If alias analysis claims that it really won't modify the load,
- // ignore it.
- if (AA &&
- (AA->getModRefInfo(Inst, Ptr, AccessSize) & AliasAnalysis::Mod) == 0)
- continue;
-
- // May modify the pointer, bail out.
- ++ScanFrom;
- return 0;
- }
- }
-
- // Got to the start of the block, we didn't find it, but are done for this
- // block.
- return 0;
-}
-
diff --git a/lib/Transforms/Utils/BreakCriticalEdges.cpp b/lib/Transforms/Utils/BreakCriticalEdges.cpp
index 8c25ad1..26f53c0 100644
--- a/lib/Transforms/Utils/BreakCriticalEdges.cpp
+++ b/lib/Transforms/Utils/BreakCriticalEdges.cpp
@@ -106,11 +106,12 @@ bool llvm::isCriticalEdge(const TerminatorInst *TI, unsigned SuccNum,
// If AllowIdenticalEdges is true, then we allow this edge to be considered
// non-critical iff all preds come from TI's block.
while (I != E) {
- if (*I != FirstPred)
+ const BasicBlock *P = *I;
+ if (P != FirstPred)
return true;
// Note: leave this as is until no one ever compiles with either gcc 4.0.1
// or Xcode 2. This seems to work around the pred_iterator assert in PR 2207
- E = pred_end(*I);
+ E = pred_end(P);
++I;
}
return false;
@@ -277,11 +278,13 @@ BasicBlock *llvm::SplitCriticalEdge(TerminatorInst *TI, unsigned SuccNum,
OtherPreds.push_back(PN->getIncomingBlock(i));
} else {
for (pred_iterator I = pred_begin(DestBB), E = pred_end(DestBB);
- I != E; ++I)
- if (*I != NewBB)
- OtherPreds.push_back(*I);
+ I != E; ++I) {
+ BasicBlock *P = *I;
+ if (P != NewBB)
+ OtherPreds.push_back(P);
+ }
}
-
+
bool NewBBDominatesDestBB = true;
// Should we update DominatorTree information?
@@ -400,11 +403,13 @@ BasicBlock *llvm::SplitCriticalEdge(TerminatorInst *TI, unsigned SuccNum,
bool HasPredOutsideOfLoop = false;
BasicBlock *Exit = ExitBlocks[i];
for (pred_iterator I = pred_begin(Exit), E = pred_end(Exit);
- I != E; ++I)
- if (TIL->contains(*I))
- Preds.push_back(*I);
+ I != E; ++I) {
+ BasicBlock *P = *I;
+ if (TIL->contains(P))
+ Preds.push_back(P);
else
HasPredOutsideOfLoop = true;
+ }
// If there are any preds not in the loop, we'll need to split
// the edges. The Preds.empty() check is needed because a block
// may appear multiple times in the list. We can't use
diff --git a/lib/Transforms/Utils/BuildLibCalls.cpp b/lib/Transforms/Utils/BuildLibCalls.cpp
index 767fa3a..7a9d007 100644
--- a/lib/Transforms/Utils/BuildLibCalls.cpp
+++ b/lib/Transforms/Utils/BuildLibCalls.cpp
@@ -69,6 +69,31 @@ Value *llvm::EmitStrChr(Value *Ptr, char C, IRBuilder<> &B,
return CI;
}
+/// EmitStrNCmp - Emit a call to the strncmp function to the builder.
+Value *llvm::EmitStrNCmp(Value *Ptr1, Value *Ptr2, Value *Len,
+ IRBuilder<> &B, const TargetData *TD) {
+ Module *M = B.GetInsertBlock()->getParent()->getParent();
+ AttributeWithIndex AWI[3];
+ AWI[0] = AttributeWithIndex::get(1, Attribute::NoCapture);
+ AWI[1] = AttributeWithIndex::get(2, Attribute::NoCapture);
+ AWI[2] = AttributeWithIndex::get(~0u, Attribute::ReadOnly |
+ Attribute::NoUnwind);
+
+ LLVMContext &Context = B.GetInsertBlock()->getContext();
+ Value *StrNCmp = M->getOrInsertFunction("strncmp", AttrListPtr::get(AWI, 3),
+ B.getInt32Ty(),
+ B.getInt8PtrTy(),
+ B.getInt8PtrTy(),
+ TD->getIntPtrType(Context), NULL);
+ CallInst *CI = B.CreateCall3(StrNCmp, CastToCStr(Ptr1, B),
+ CastToCStr(Ptr2, B), Len, "strncmp");
+
+ if (const Function *F = dyn_cast<Function>(StrNCmp->stripPointerCasts()))
+ CI->setCallingConv(F->getCallingConv());
+
+ return CI;
+}
+
/// EmitStrCpy - Emit a call to the strcpy function to the builder, for the
/// specified pointer arguments.
Value *llvm::EmitStrCpy(Value *Dst, Value *Src, IRBuilder<> &B,
@@ -112,10 +137,10 @@ Value *llvm::EmitStrNCpy(Value *Dst, Value *Src, Value *Len,
Value *llvm::EmitMemCpy(Value *Dst, Value *Src, Value *Len, unsigned Align,
bool isVolatile, IRBuilder<> &B, const TargetData *TD) {
Module *M = B.GetInsertBlock()->getParent()->getParent();
- const Type *ArgTys[3] = { Dst->getType(), Src->getType(), Len->getType() };
- Value *MemCpy = Intrinsic::getDeclaration(M, Intrinsic::memcpy, ArgTys, 3);
Dst = CastToCStr(Dst, B);
Src = CastToCStr(Src, B);
+ const Type *ArgTys[3] = { Dst->getType(), Src->getType(), Len->getType() };
+ Value *MemCpy = Intrinsic::getDeclaration(M, Intrinsic::memcpy, ArgTys, 3);
return B.CreateCall5(MemCpy, Dst, Src, Len,
ConstantInt::get(B.getInt32Ty(), Align),
ConstantInt::get(B.getInt1Ty(), isVolatile));
@@ -395,11 +420,11 @@ bool SimplifyFortifiedLibCalls::fold(CallInst *CI, const TargetData *TD) {
FT->getParamType(2) != TD->getIntPtrType(Context) ||
FT->getParamType(3) != TD->getIntPtrType(Context))
return false;
-
- if (isFoldable(4, 3, false)) {
- EmitMemCpy(CI->getOperand(1), CI->getOperand(2), CI->getOperand(3),
+
+ if (isFoldable(3 + CallInst::ArgOffset, 2 + CallInst::ArgOffset, false)) {
+ EmitMemCpy(CI->getArgOperand(0), CI->getArgOperand(1), CI->getArgOperand(2),
1, false, B, TD);
- replaceCall(CI->getOperand(1));
+ replaceCall(CI->getArgOperand(0));
return true;
}
return false;
@@ -418,11 +443,11 @@ bool SimplifyFortifiedLibCalls::fold(CallInst *CI, const TargetData *TD) {
FT->getParamType(2) != TD->getIntPtrType(Context) ||
FT->getParamType(3) != TD->getIntPtrType(Context))
return false;
-
- if (isFoldable(4, 3, false)) {
- EmitMemMove(CI->getOperand(1), CI->getOperand(2), CI->getOperand(3),
+
+ if (isFoldable(3 + CallInst::ArgOffset, 2 + CallInst::ArgOffset, false)) {
+ EmitMemMove(CI->getArgOperand(0), CI->getArgOperand(1), CI->getArgOperand(2),
1, false, B, TD);
- replaceCall(CI->getOperand(1));
+ replaceCall(CI->getArgOperand(0));
return true;
}
return false;
@@ -436,12 +461,12 @@ bool SimplifyFortifiedLibCalls::fold(CallInst *CI, const TargetData *TD) {
FT->getParamType(2) != TD->getIntPtrType(Context) ||
FT->getParamType(3) != TD->getIntPtrType(Context))
return false;
-
- if (isFoldable(4, 3, false)) {
- Value *Val = B.CreateIntCast(CI->getOperand(2), B.getInt8Ty(),
+
+ if (isFoldable(3 + CallInst::ArgOffset, 2 + CallInst::ArgOffset, false)) {
+ Value *Val = B.CreateIntCast(CI->getArgOperand(1), B.getInt8Ty(),
false);
- EmitMemSet(CI->getOperand(1), Val, CI->getOperand(3), false, B, TD);
- replaceCall(CI->getOperand(1));
+ EmitMemSet(CI->getArgOperand(0), Val, CI->getArgOperand(2), false, B, TD);
+ replaceCall(CI->getArgOperand(0));
return true;
}
return false;
@@ -462,8 +487,8 @@ bool SimplifyFortifiedLibCalls::fold(CallInst *CI, const TargetData *TD) {
// st[rp]cpy_chk call which may fail at runtime if the size is too long.
// TODO: It might be nice to get a maximum length out of the possible
// string lengths for varying.
- if (isFoldable(3, 2, true)) {
- Value *Ret = EmitStrCpy(CI->getOperand(1), CI->getOperand(2), B, TD,
+ if (isFoldable(2 + CallInst::ArgOffset, 1 + CallInst::ArgOffset, true)) {
+ Value *Ret = EmitStrCpy(CI->getArgOperand(0), CI->getArgOperand(1), B, TD,
Name.substr(2, 6));
replaceCall(Ret);
return true;
@@ -479,10 +504,10 @@ bool SimplifyFortifiedLibCalls::fold(CallInst *CI, const TargetData *TD) {
!FT->getParamType(2)->isIntegerTy() ||
FT->getParamType(3) != TD->getIntPtrType(Context))
return false;
-
- if (isFoldable(4, 3, false)) {
- Value *Ret = EmitStrNCpy(CI->getOperand(1), CI->getOperand(2),
- CI->getOperand(3), B, TD, Name.substr(2, 7));
+
+ if (isFoldable(3 + CallInst::ArgOffset, 2 + CallInst::ArgOffset, false)) {
+ Value *Ret = EmitStrNCpy(CI->getArgOperand(0), CI->getArgOperand(1),
+ CI->getArgOperand(2), B, TD, Name.substr(2, 7));
replaceCall(Ret);
return true;
}
diff --git a/lib/Transforms/Utils/CloneFunction.cpp b/lib/Transforms/Utils/CloneFunction.cpp
index 6d4fe4b..1dcfd57 100644
--- a/lib/Transforms/Utils/CloneFunction.cpp
+++ b/lib/Transforms/Utils/CloneFunction.cpp
@@ -32,7 +32,7 @@ using namespace llvm;
// CloneBasicBlock - See comments in Cloning.h
BasicBlock *llvm::CloneBasicBlock(const BasicBlock *BB,
- DenseMap<const Value*, Value*> &ValueMap,
+ ValueToValueMapTy &VMap,
const Twine &NameSuffix, Function *F,
ClonedCodeInfo *CodeInfo) {
BasicBlock *NewBB = BasicBlock::Create(BB->getContext(), "", F);
@@ -47,7 +47,7 @@ BasicBlock *llvm::CloneBasicBlock(const BasicBlock *BB,
if (II->hasName())
NewInst->setName(II->getName()+NameSuffix);
NewBB->getInstList().push_back(NewInst);
- ValueMap[II] = NewInst; // Add instruction map to value.
+ VMap[II] = NewInst; // Add instruction map to value.
hasCalls |= (isa<CallInst>(II) && !isa<DbgInfoIntrinsic>(II));
if (const AllocaInst *AI = dyn_cast<AllocaInst>(II)) {
@@ -72,7 +72,7 @@ BasicBlock *llvm::CloneBasicBlock(const BasicBlock *BB,
// ArgMap values.
//
void llvm::CloneFunctionInto(Function *NewFunc, const Function *OldFunc,
- DenseMap<const Value*, Value*> &ValueMap,
+ ValueToValueMapTy &VMap,
SmallVectorImpl<ReturnInst*> &Returns,
const char *NameSuffix, ClonedCodeInfo *CodeInfo) {
assert(NameSuffix && "NameSuffix cannot be null!");
@@ -80,17 +80,17 @@ void llvm::CloneFunctionInto(Function *NewFunc, const Function *OldFunc,
#ifndef NDEBUG
for (Function::const_arg_iterator I = OldFunc->arg_begin(),
E = OldFunc->arg_end(); I != E; ++I)
- assert(ValueMap.count(I) && "No mapping from source argument specified!");
+ assert(VMap.count(I) && "No mapping from source argument specified!");
#endif
// Clone any attributes.
if (NewFunc->arg_size() == OldFunc->arg_size())
NewFunc->copyAttributesFrom(OldFunc);
else {
- //Some arguments were deleted with the ValueMap. Copy arguments one by one
+ //Some arguments were deleted with the VMap. Copy arguments one by one
for (Function::const_arg_iterator I = OldFunc->arg_begin(),
E = OldFunc->arg_end(); I != E; ++I)
- if (Argument* Anew = dyn_cast<Argument>(ValueMap[I]))
+ if (Argument* Anew = dyn_cast<Argument>(VMap[I]))
Anew->addAttr( OldFunc->getAttributes()
.getParamAttributes(I->getArgNo() + 1));
NewFunc->setAttributes(NewFunc->getAttributes()
@@ -111,43 +111,43 @@ void llvm::CloneFunctionInto(Function *NewFunc, const Function *OldFunc,
const BasicBlock &BB = *BI;
// Create a new basic block and copy instructions into it!
- BasicBlock *CBB = CloneBasicBlock(&BB, ValueMap, NameSuffix, NewFunc,
+ BasicBlock *CBB = CloneBasicBlock(&BB, VMap, NameSuffix, NewFunc,
CodeInfo);
- ValueMap[&BB] = CBB; // Add basic block mapping.
+ VMap[&BB] = CBB; // Add basic block mapping.
if (ReturnInst *RI = dyn_cast<ReturnInst>(CBB->getTerminator()))
Returns.push_back(RI);
}
// Loop over all of the instructions in the function, fixing up operand
- // references as we go. This uses ValueMap to do all the hard work.
+ // references as we go. This uses VMap to do all the hard work.
//
- for (Function::iterator BB = cast<BasicBlock>(ValueMap[OldFunc->begin()]),
+ for (Function::iterator BB = cast<BasicBlock>(VMap[OldFunc->begin()]),
BE = NewFunc->end(); BB != BE; ++BB)
// Loop over all instructions, fixing each one as we find it...
for (BasicBlock::iterator II = BB->begin(); II != BB->end(); ++II)
- RemapInstruction(II, ValueMap);
+ RemapInstruction(II, VMap);
}
/// CloneFunction - Return a copy of the specified function, but without
/// embedding the function into another module. Also, any references specified
-/// in the ValueMap are changed to refer to their mapped value instead of the
-/// original one. If any of the arguments to the function are in the ValueMap,
-/// the arguments are deleted from the resultant function. The ValueMap is
+/// in the VMap are changed to refer to their mapped value instead of the
+/// original one. If any of the arguments to the function are in the VMap,
+/// the arguments are deleted from the resultant function. The VMap is
/// updated to include mappings from all of the instructions and basicblocks in
/// the function from their old to new values.
///
Function *llvm::CloneFunction(const Function *F,
- DenseMap<const Value*, Value*> &ValueMap,
+ ValueToValueMapTy &VMap,
ClonedCodeInfo *CodeInfo) {
std::vector<const Type*> ArgTypes;
// The user might be deleting arguments to the function by specifying them in
- // the ValueMap. If so, we need to not add the arguments to the arg ty vector
+ // the VMap. If so, we need to not add the arguments to the arg ty vector
//
for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
I != E; ++I)
- if (ValueMap.count(I) == 0) // Haven't mapped the argument to anything yet?
+ if (VMap.count(I) == 0) // Haven't mapped the argument to anything yet?
ArgTypes.push_back(I->getType());
// Create a new function type...
@@ -161,13 +161,13 @@ Function *llvm::CloneFunction(const Function *F,
Function::arg_iterator DestI = NewF->arg_begin();
for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
I != E; ++I)
- if (ValueMap.count(I) == 0) { // Is this argument preserved?
+ if (VMap.count(I) == 0) { // Is this argument preserved?
DestI->setName(I->getName()); // Copy the name over...
- ValueMap[I] = DestI++; // Add mapping to ValueMap
+ VMap[I] = DestI++; // Add mapping to VMap
}
SmallVector<ReturnInst*, 8> Returns; // Ignore returns cloned.
- CloneFunctionInto(NewF, F, ValueMap, Returns, "", CodeInfo);
+ CloneFunctionInto(NewF, F, VMap, Returns, "", CodeInfo);
return NewF;
}
@@ -179,19 +179,19 @@ namespace {
struct PruningFunctionCloner {
Function *NewFunc;
const Function *OldFunc;
- DenseMap<const Value*, Value*> &ValueMap;
+ ValueToValueMapTy &VMap;
SmallVectorImpl<ReturnInst*> &Returns;
const char *NameSuffix;
ClonedCodeInfo *CodeInfo;
const TargetData *TD;
public:
PruningFunctionCloner(Function *newFunc, const Function *oldFunc,
- DenseMap<const Value*, Value*> &valueMap,
+ ValueToValueMapTy &valueMap,
SmallVectorImpl<ReturnInst*> &returns,
const char *nameSuffix,
ClonedCodeInfo *codeInfo,
const TargetData *td)
- : NewFunc(newFunc), OldFunc(oldFunc), ValueMap(valueMap), Returns(returns),
+ : NewFunc(newFunc), OldFunc(oldFunc), VMap(valueMap), Returns(returns),
NameSuffix(nameSuffix), CodeInfo(codeInfo), TD(td) {
}
@@ -202,7 +202,7 @@ namespace {
public:
/// ConstantFoldMappedInstruction - Constant fold the specified instruction,
- /// mapping its operands through ValueMap if they are available.
+ /// mapping its operands through VMap if they are available.
Constant *ConstantFoldMappedInstruction(const Instruction *I);
};
}
@@ -211,7 +211,7 @@ namespace {
/// anything that it can reach.
void PruningFunctionCloner::CloneBlock(const BasicBlock *BB,
std::vector<const BasicBlock*> &ToClone){
- Value *&BBEntry = ValueMap[BB];
+ Value *&BBEntry = VMap[BB];
// Have we already cloned this block?
if (BBEntry) return;
@@ -230,7 +230,7 @@ void PruningFunctionCloner::CloneBlock(const BasicBlock *BB,
// If this instruction constant folds, don't bother cloning the instruction,
// instead, just add the constant to the value map.
if (Constant *C = ConstantFoldMappedInstruction(II)) {
- ValueMap[II] = C;
+ VMap[II] = C;
continue;
}
@@ -238,7 +238,7 @@ void PruningFunctionCloner::CloneBlock(const BasicBlock *BB,
if (II->hasName())
NewInst->setName(II->getName()+NameSuffix);
NewBB->getInstList().push_back(NewInst);
- ValueMap[II] = NewInst; // Add instruction map to value.
+ VMap[II] = NewInst; // Add instruction map to value.
hasCalls |= (isa<CallInst>(II) && !isa<DbgInfoIntrinsic>(II));
if (const AllocaInst *AI = dyn_cast<AllocaInst>(II)) {
@@ -258,12 +258,12 @@ void PruningFunctionCloner::CloneBlock(const BasicBlock *BB,
ConstantInt *Cond = dyn_cast<ConstantInt>(BI->getCondition());
// Or is a known constant in the caller...
if (Cond == 0)
- Cond = dyn_cast_or_null<ConstantInt>(ValueMap[BI->getCondition()]);
+ Cond = dyn_cast_or_null<ConstantInt>(VMap[BI->getCondition()]);
// Constant fold to uncond branch!
if (Cond) {
BasicBlock *Dest = BI->getSuccessor(!Cond->getZExtValue());
- ValueMap[OldTI] = BranchInst::Create(Dest, NewBB);
+ VMap[OldTI] = BranchInst::Create(Dest, NewBB);
ToClone.push_back(Dest);
TerminatorDone = true;
}
@@ -272,10 +272,10 @@ void PruningFunctionCloner::CloneBlock(const BasicBlock *BB,
// If switching on a value known constant in the caller.
ConstantInt *Cond = dyn_cast<ConstantInt>(SI->getCondition());
if (Cond == 0) // Or known constant after constant prop in the callee...
- Cond = dyn_cast_or_null<ConstantInt>(ValueMap[SI->getCondition()]);
+ Cond = dyn_cast_or_null<ConstantInt>(VMap[SI->getCondition()]);
if (Cond) { // Constant fold to uncond branch!
BasicBlock *Dest = SI->getSuccessor(SI->findCaseValue(Cond));
- ValueMap[OldTI] = BranchInst::Create(Dest, NewBB);
+ VMap[OldTI] = BranchInst::Create(Dest, NewBB);
ToClone.push_back(Dest);
TerminatorDone = true;
}
@@ -286,7 +286,7 @@ void PruningFunctionCloner::CloneBlock(const BasicBlock *BB,
if (OldTI->hasName())
NewInst->setName(OldTI->getName()+NameSuffix);
NewBB->getInstList().push_back(NewInst);
- ValueMap[OldTI] = NewInst; // Add instruction map to value.
+ VMap[OldTI] = NewInst; // Add instruction map to value.
// Recursively clone any reachable successor blocks.
const TerminatorInst *TI = BB->getTerminator();
@@ -307,13 +307,13 @@ void PruningFunctionCloner::CloneBlock(const BasicBlock *BB,
}
/// ConstantFoldMappedInstruction - Constant fold the specified instruction,
-/// mapping its operands through ValueMap if they are available.
+/// mapping its operands through VMap if they are available.
Constant *PruningFunctionCloner::
ConstantFoldMappedInstruction(const Instruction *I) {
SmallVector<Constant*, 8> Ops;
for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i)
if (Constant *Op = dyn_cast_or_null<Constant>(MapValue(I->getOperand(i),
- ValueMap)))
+ VMap)))
Ops.push_back(Op);
else
return 0; // All operands not constant!
@@ -363,7 +363,7 @@ static MDNode *UpdateInlinedAtInfo(MDNode *InsnMD, MDNode *TheCallMD) {
/// dead. Since this doesn't produce an exact copy of the input, it can't be
/// used for things like CloneFunction or CloneModule.
void llvm::CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
- DenseMap<const Value*, Value*> &ValueMap,
+ ValueToValueMapTy &VMap,
SmallVectorImpl<ReturnInst*> &Returns,
const char *NameSuffix,
ClonedCodeInfo *CodeInfo,
@@ -374,10 +374,10 @@ void llvm::CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
#ifndef NDEBUG
for (Function::const_arg_iterator II = OldFunc->arg_begin(),
E = OldFunc->arg_end(); II != E; ++II)
- assert(ValueMap.count(II) && "No mapping from source argument specified!");
+ assert(VMap.count(II) && "No mapping from source argument specified!");
#endif
- PruningFunctionCloner PFC(NewFunc, OldFunc, ValueMap, Returns,
+ PruningFunctionCloner PFC(NewFunc, OldFunc, VMap, Returns,
NameSuffix, CodeInfo, TD);
// Clone the entry block, and anything recursively reachable from it.
@@ -397,14 +397,14 @@ void llvm::CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
SmallVector<const PHINode*, 16> PHIToResolve;
for (Function::const_iterator BI = OldFunc->begin(), BE = OldFunc->end();
BI != BE; ++BI) {
- BasicBlock *NewBB = cast_or_null<BasicBlock>(ValueMap[BI]);
+ BasicBlock *NewBB = cast_or_null<BasicBlock>(VMap[BI]);
if (NewBB == 0) continue; // Dead block.
// Add the new block to the new function.
NewFunc->getBasicBlockList().push_back(NewBB);
// Loop over all of the instructions in the block, fixing up operand
- // references as we go. This uses ValueMap to do all the hard work.
+ // references as we go. This uses VMap to do all the hard work.
//
BasicBlock::iterator I = NewBB->begin();
@@ -455,7 +455,7 @@ void llvm::CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
I->setMetadata(DbgKind, 0);
}
}
- RemapInstruction(I, ValueMap);
+ RemapInstruction(I, VMap);
}
}
@@ -465,19 +465,19 @@ void llvm::CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
const PHINode *OPN = PHIToResolve[phino];
unsigned NumPreds = OPN->getNumIncomingValues();
const BasicBlock *OldBB = OPN->getParent();
- BasicBlock *NewBB = cast<BasicBlock>(ValueMap[OldBB]);
+ BasicBlock *NewBB = cast<BasicBlock>(VMap[OldBB]);
// Map operands for blocks that are live and remove operands for blocks
// that are dead.
for (; phino != PHIToResolve.size() &&
PHIToResolve[phino]->getParent() == OldBB; ++phino) {
OPN = PHIToResolve[phino];
- PHINode *PN = cast<PHINode>(ValueMap[OPN]);
+ PHINode *PN = cast<PHINode>(VMap[OPN]);
for (unsigned pred = 0, e = NumPreds; pred != e; ++pred) {
if (BasicBlock *MappedBlock =
- cast_or_null<BasicBlock>(ValueMap[PN->getIncomingBlock(pred)])) {
+ cast_or_null<BasicBlock>(VMap[PN->getIncomingBlock(pred)])) {
Value *InVal = MapValue(PN->getIncomingValue(pred),
- ValueMap);
+ VMap);
assert(InVal && "Unknown input value?");
PN->setIncomingValue(pred, InVal);
PN->setIncomingBlock(pred, MappedBlock);
@@ -531,15 +531,15 @@ void llvm::CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
while ((PN = dyn_cast<PHINode>(I++))) {
Value *NV = UndefValue::get(PN->getType());
PN->replaceAllUsesWith(NV);
- assert(ValueMap[OldI] == PN && "ValueMap mismatch");
- ValueMap[OldI] = NV;
+ assert(VMap[OldI] == PN && "VMap mismatch");
+ VMap[OldI] = NV;
PN->eraseFromParent();
++OldI;
}
}
// NOTE: We cannot eliminate single entry phi nodes here, because of
- // ValueMap. Single entry phi nodes can have multiple ValueMap entries
- // pointing at them. Thus, deleting one would require scanning the ValueMap
+ // VMap. Single entry phi nodes can have multiple VMap entries
+ // pointing at them. Thus, deleting one would require scanning the VMap
// to update any entries in it that would require that. This would be
// really slow.
}
@@ -548,14 +548,14 @@ void llvm::CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
// and zap unconditional fall-through branches. This happen all the time when
// specializing code: code specialization turns conditional branches into
// uncond branches, and this code folds them.
- Function::iterator I = cast<BasicBlock>(ValueMap[&OldFunc->getEntryBlock()]);
+ Function::iterator I = cast<BasicBlock>(VMap[&OldFunc->getEntryBlock()]);
while (I != NewFunc->end()) {
BranchInst *BI = dyn_cast<BranchInst>(I->getTerminator());
if (!BI || BI->isConditional()) { ++I; continue; }
// Note that we can't eliminate uncond branches if the destination has
// single-entry PHI nodes. Eliminating the single-entry phi nodes would
- // require scanning the ValueMap to update any entries that point to the phi
+ // require scanning the VMap to update any entries that point to the phi
// node.
BasicBlock *Dest = BI->getSuccessor(0);
if (!Dest->getSinglePredecessor() || isa<PHINode>(Dest->begin())) {
diff --git a/lib/Transforms/Utils/CloneLoop.cpp b/lib/Transforms/Utils/CloneLoop.cpp
index 38928dc..551b630 100644
--- a/lib/Transforms/Utils/CloneLoop.cpp
+++ b/lib/Transforms/Utils/CloneLoop.cpp
@@ -15,7 +15,6 @@
#include "llvm/BasicBlock.h"
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Analysis/Dominators.h"
-#include "llvm/ADT/DenseMap.h"
using namespace llvm;
@@ -23,13 +22,13 @@ using namespace llvm;
/// CloneDominatorInfo - Clone basicblock's dominator tree and, if available,
/// dominance info. It is expected that basic block is already cloned.
static void CloneDominatorInfo(BasicBlock *BB,
- DenseMap<const Value *, Value *> &ValueMap,
+ ValueMap<const Value *, Value *> &VMap,
DominatorTree *DT,
DominanceFrontier *DF) {
assert (DT && "DominatorTree is not available");
- DenseMap<const Value *, Value*>::iterator BI = ValueMap.find(BB);
- assert (BI != ValueMap.end() && "BasicBlock clone is missing");
+ ValueMap<const Value *, Value*>::iterator BI = VMap.find(BB);
+ assert (BI != VMap.end() && "BasicBlock clone is missing");
BasicBlock *NewBB = cast<BasicBlock>(BI->second);
// NewBB already got dominator info.
@@ -43,11 +42,11 @@ static void CloneDominatorInfo(BasicBlock *BB,
// NewBB's dominator is either BB's dominator or BB's dominator's clone.
BasicBlock *NewBBDom = BBDom;
- DenseMap<const Value *, Value*>::iterator BBDomI = ValueMap.find(BBDom);
- if (BBDomI != ValueMap.end()) {
+ ValueMap<const Value *, Value*>::iterator BBDomI = VMap.find(BBDom);
+ if (BBDomI != VMap.end()) {
NewBBDom = cast<BasicBlock>(BBDomI->second);
if (!DT->getNode(NewBBDom))
- CloneDominatorInfo(BBDom, ValueMap, DT, DF);
+ CloneDominatorInfo(BBDom, VMap, DT, DF);
}
DT->addNewBlock(NewBB, NewBBDom);
@@ -60,8 +59,8 @@ static void CloneDominatorInfo(BasicBlock *BB,
for (DominanceFrontier::DomSetType::iterator I = S.begin(), E = S.end();
I != E; ++I) {
BasicBlock *DB = *I;
- DenseMap<const Value*, Value*>::iterator IDM = ValueMap.find(DB);
- if (IDM != ValueMap.end())
+ ValueMap<const Value*, Value*>::iterator IDM = VMap.find(DB);
+ if (IDM != VMap.end())
NewDFSet.insert(cast<BasicBlock>(IDM->second));
else
NewDFSet.insert(DB);
@@ -71,10 +70,10 @@ static void CloneDominatorInfo(BasicBlock *BB,
}
}
-/// CloneLoop - Clone Loop. Clone dominator info. Populate ValueMap
+/// CloneLoop - Clone Loop. Clone dominator info. Populate VMap
/// using old blocks to new blocks mapping.
Loop *llvm::CloneLoop(Loop *OrigL, LPPassManager *LPM, LoopInfo *LI,
- DenseMap<const Value *, Value *> &ValueMap, Pass *P) {
+ ValueMap<const Value *, Value *> &VMap, Pass *P) {
DominatorTree *DT = NULL;
DominanceFrontier *DF = NULL;
@@ -104,8 +103,8 @@ Loop *llvm::CloneLoop(Loop *OrigL, LPPassManager *LPM, LoopInfo *LI,
for (Loop::block_iterator I = L->block_begin(), E = L->block_end();
I != E; ++I) {
BasicBlock *BB = *I;
- BasicBlock *NewBB = CloneBasicBlock(BB, ValueMap, ".clone");
- ValueMap[BB] = NewBB;
+ BasicBlock *NewBB = CloneBasicBlock(BB, VMap, ".clone");
+ VMap[BB] = NewBB;
if (P)
LPM->cloneBasicBlockSimpleAnalysis(BB, NewBB, L);
NewLoop->addBasicBlockToLoop(NewBB, LI->getBase());
@@ -117,7 +116,7 @@ Loop *llvm::CloneLoop(Loop *OrigL, LPPassManager *LPM, LoopInfo *LI,
for (Loop::block_iterator I = L->block_begin(), E = L->block_end();
I != E; ++I) {
BasicBlock *BB = *I;
- CloneDominatorInfo(BB, ValueMap, DT, DF);
+ CloneDominatorInfo(BB, VMap, DT, DF);
}
// Process sub loops
@@ -125,7 +124,7 @@ Loop *llvm::CloneLoop(Loop *OrigL, LPPassManager *LPM, LoopInfo *LI,
LoopNest.push_back(*I);
} while (!LoopNest.empty());
- // Remap instructions to reference operands from ValueMap.
+ // Remap instructions to reference operands from VMap.
for(SmallVector<BasicBlock *, 16>::iterator NBItr = NewBlocks.begin(),
NBE = NewBlocks.end(); NBItr != NBE; ++NBItr) {
BasicBlock *NB = *NBItr;
@@ -135,8 +134,8 @@ Loop *llvm::CloneLoop(Loop *OrigL, LPPassManager *LPM, LoopInfo *LI,
for (unsigned index = 0, num_ops = Insn->getNumOperands();
index != num_ops; ++index) {
Value *Op = Insn->getOperand(index);
- DenseMap<const Value *, Value *>::iterator OpItr = ValueMap.find(Op);
- if (OpItr != ValueMap.end())
+ ValueMap<const Value *, Value *>::iterator OpItr = VMap.find(Op);
+ if (OpItr != VMap.end())
Insn->setOperand(index, OpItr->second);
}
}
diff --git a/lib/Transforms/Utils/CloneModule.cpp b/lib/Transforms/Utils/CloneModule.cpp
index b87c082..fc603d2 100644
--- a/lib/Transforms/Utils/CloneModule.cpp
+++ b/lib/Transforms/Utils/CloneModule.cpp
@@ -28,12 +28,12 @@ using namespace llvm;
Module *llvm::CloneModule(const Module *M) {
// Create the value map that maps things from the old module over to the new
// module.
- DenseMap<const Value*, Value*> ValueMap;
- return CloneModule(M, ValueMap);
+ ValueToValueMapTy VMap;
+ return CloneModule(M, VMap);
}
Module *llvm::CloneModule(const Module *M,
- DenseMap<const Value*, Value*> &ValueMap) {
+ ValueToValueMapTy &VMap) {
// First off, we need to create the new module...
Module *New = new Module(M->getModuleIdentifier(), M->getContext());
New->setDataLayout(M->getDataLayout());
@@ -51,7 +51,7 @@ Module *llvm::CloneModule(const Module *M,
New->addLibrary(*I);
// Loop over all of the global variables, making corresponding globals in the
- // new module. Here we add them to the ValueMap and to the new Module. We
+ // new module. Here we add them to the VMap and to the new Module. We
// don't worry about attributes or initializers, they will come later.
//
for (Module::const_global_iterator I = M->global_begin(), E = M->global_end();
@@ -62,7 +62,7 @@ Module *llvm::CloneModule(const Module *M,
GlobalValue::ExternalLinkage, 0,
I->getName());
GV->setAlignment(I->getAlignment());
- ValueMap[I] = GV;
+ VMap[I] = GV;
}
// Loop over the functions in the module, making external functions as before
@@ -71,13 +71,13 @@ Module *llvm::CloneModule(const Module *M,
Function::Create(cast<FunctionType>(I->getType()->getElementType()),
GlobalValue::ExternalLinkage, I->getName(), New);
NF->copyAttributesFrom(I);
- ValueMap[I] = NF;
+ VMap[I] = NF;
}
// Loop over the aliases in the module
for (Module::const_alias_iterator I = M->alias_begin(), E = M->alias_end();
I != E; ++I)
- ValueMap[I] = new GlobalAlias(I->getType(), GlobalAlias::ExternalLinkage,
+ VMap[I] = new GlobalAlias(I->getType(), GlobalAlias::ExternalLinkage,
I->getName(), NULL, New);
// Now that all of the things that global variable initializer can refer to
@@ -86,10 +86,10 @@ Module *llvm::CloneModule(const Module *M,
//
for (Module::const_global_iterator I = M->global_begin(), E = M->global_end();
I != E; ++I) {
- GlobalVariable *GV = cast<GlobalVariable>(ValueMap[I]);
+ GlobalVariable *GV = cast<GlobalVariable>(VMap[I]);
if (I->hasInitializer())
GV->setInitializer(cast<Constant>(MapValue(I->getInitializer(),
- ValueMap)));
+ VMap)));
GV->setLinkage(I->getLinkage());
GV->setThreadLocal(I->isThreadLocal());
GV->setConstant(I->isConstant());
@@ -98,17 +98,17 @@ Module *llvm::CloneModule(const Module *M,
// Similarly, copy over function bodies now...
//
for (Module::const_iterator I = M->begin(), E = M->end(); I != E; ++I) {
- Function *F = cast<Function>(ValueMap[I]);
+ Function *F = cast<Function>(VMap[I]);
if (!I->isDeclaration()) {
Function::arg_iterator DestI = F->arg_begin();
for (Function::const_arg_iterator J = I->arg_begin(); J != I->arg_end();
++J) {
DestI->setName(J->getName());
- ValueMap[J] = DestI++;
+ VMap[J] = DestI++;
}
SmallVector<ReturnInst*, 8> Returns; // Ignore returns cloned.
- CloneFunctionInto(F, I, ValueMap, Returns);
+ CloneFunctionInto(F, I, VMap, Returns);
}
F->setLinkage(I->getLinkage());
@@ -117,11 +117,37 @@ Module *llvm::CloneModule(const Module *M,
// And aliases
for (Module::const_alias_iterator I = M->alias_begin(), E = M->alias_end();
I != E; ++I) {
- GlobalAlias *GA = cast<GlobalAlias>(ValueMap[I]);
+ GlobalAlias *GA = cast<GlobalAlias>(VMap[I]);
GA->setLinkage(I->getLinkage());
if (const Constant* C = I->getAliasee())
- GA->setAliasee(cast<Constant>(MapValue(C, ValueMap)));
+ GA->setAliasee(cast<Constant>(MapValue(C, VMap)));
}
-
+
+ // And named metadata....
+ for (Module::const_named_metadata_iterator I = M->named_metadata_begin(),
+ E = M->named_metadata_end(); I != E; ++I) {
+ const NamedMDNode &NMD = *I;
+ SmallVector<MDNode*, 4> MDs;
+ for (unsigned i = 0, e = NMD.getNumOperands(); i != e; ++i)
+ MDs.push_back(cast<MDNode>(MapValue(NMD.getOperand(i), VMap)));
+ NamedMDNode::Create(New->getContext(), NMD.getName(),
+ MDs.data(), MDs.size(), New);
+ }
+
+ // Update metadata attach with instructions.
+ for (Module::iterator MI = New->begin(), ME = New->end(); MI != ME; ++MI)
+ for (Function::iterator FI = MI->begin(), FE = MI->end();
+ FI != FE; ++FI)
+ for (BasicBlock::iterator BI = FI->begin(), BE = FI->end();
+ BI != BE; ++BI) {
+ SmallVector<std::pair<unsigned, MDNode *>, 4 > MDs;
+ BI->getAllMetadata(MDs);
+ for (SmallVector<std::pair<unsigned, MDNode *>, 4>::iterator
+ MDI = MDs.begin(), MDE = MDs.end(); MDI != MDE; ++MDI) {
+ Value *MappedValue = MapValue(MDI->second, VMap);
+ if (MDI->second != MappedValue && MappedValue)
+ BI->setMetadata(MDI->first, cast<MDNode>(MappedValue));
+ }
+ }
return New;
}
diff --git a/lib/Transforms/Utils/DemoteRegToStack.cpp b/lib/Transforms/Utils/DemoteRegToStack.cpp
index c908b4a..8e82a02 100644
--- a/lib/Transforms/Utils/DemoteRegToStack.cpp
+++ b/lib/Transforms/Utils/DemoteRegToStack.cpp
@@ -35,7 +35,7 @@ AllocaInst* llvm::DemoteRegToStack(Instruction &I, bool VolatileLoads,
I.eraseFromParent();
return 0;
}
-
+
// Create a stack slot to hold the value.
AllocaInst *Slot;
if (AllocaPoint) {
@@ -46,7 +46,7 @@ AllocaInst* llvm::DemoteRegToStack(Instruction &I, bool VolatileLoads,
Slot = new AllocaInst(I.getType(), 0, I.getName()+".reg2mem",
F->getEntryBlock().begin());
}
-
+
// Change all of the users of the instruction to read from the stack slot
// instead.
while (!I.use_empty()) {
@@ -67,7 +67,7 @@ AllocaInst* llvm::DemoteRegToStack(Instruction &I, bool VolatileLoads,
Value *&V = Loads[PN->getIncomingBlock(i)];
if (V == 0) {
// Insert the load into the predecessor block
- V = new LoadInst(Slot, I.getName()+".reload", VolatileLoads,
+ V = new LoadInst(Slot, I.getName()+".reload", VolatileLoads,
PN->getIncomingBlock(i)->getTerminator());
}
PN->setIncomingValue(i, V);
@@ -110,8 +110,8 @@ AllocaInst* llvm::DemoteRegToStack(Instruction &I, bool VolatileLoads,
/// The phi node is deleted and it returns the pointer to the alloca inserted.
AllocaInst* llvm::DemotePHIToStack(PHINode *P, Instruction *AllocaPoint) {
if (P->use_empty()) {
- P->eraseFromParent();
- return 0;
+ P->eraseFromParent();
+ return 0;
}
// Create a stack slot to hold the value.
@@ -124,23 +124,23 @@ AllocaInst* llvm::DemotePHIToStack(PHINode *P, Instruction *AllocaPoint) {
Slot = new AllocaInst(P->getType(), 0, P->getName()+".reg2mem",
F->getEntryBlock().begin());
}
-
+
// Iterate over each operand, insert store in each predecessor.
for (unsigned i = 0, e = P->getNumIncomingValues(); i < e; ++i) {
if (InvokeInst *II = dyn_cast<InvokeInst>(P->getIncomingValue(i))) {
- assert(II->getParent() != P->getIncomingBlock(i) &&
+ assert(II->getParent() != P->getIncomingBlock(i) &&
"Invoke edge not supported yet"); II=II;
}
- new StoreInst(P->getIncomingValue(i), Slot,
+ new StoreInst(P->getIncomingValue(i), Slot,
P->getIncomingBlock(i)->getTerminator());
}
-
+
// Insert load in place of the phi and replace all uses.
Value *V = new LoadInst(Slot, P->getName()+".reload", P);
P->replaceAllUsesWith(V);
-
+
// Delete phi.
P->eraseFromParent();
-
+
return Slot;
}
diff --git a/lib/Transforms/Utils/InlineFunction.cpp b/lib/Transforms/Utils/InlineFunction.cpp
index 91390bc..598e7d2 100644
--- a/lib/Transforms/Utils/InlineFunction.cpp
+++ b/lib/Transforms/Utils/InlineFunction.cpp
@@ -63,7 +63,8 @@ static void HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB,
// Next, create the new invoke instruction, inserting it at the end
// of the old basic block.
- SmallVector<Value*, 8> InvokeArgs(CI->op_begin()+1, CI->op_end());
+ ImmutableCallSite CS(CI);
+ SmallVector<Value*, 8> InvokeArgs(CS.arg_begin(), CS.arg_end());
InvokeInst *II =
InvokeInst::Create(CI->getCalledValue(), Split, InvokeDest,
InvokeArgs.begin(), InvokeArgs.end(),
@@ -169,7 +170,7 @@ static void HandleInlinedInvoke(InvokeInst *II, BasicBlock *FirstNewBlock,
/// some edges of the callgraph may remain.
static void UpdateCallGraphAfterInlining(CallSite CS,
Function::iterator FirstNewBlock,
- DenseMap<const Value*, Value*> &ValueMap,
+ ValueMap<const Value*, Value*> &VMap,
InlineFunctionInfo &IFI) {
CallGraph &CG = *IFI.CG;
const Function *Caller = CS.getInstruction()->getParent()->getParent();
@@ -192,9 +193,9 @@ static void UpdateCallGraphAfterInlining(CallSite CS,
for (; I != E; ++I) {
const Value *OrigCall = I->first;
- DenseMap<const Value*, Value*>::iterator VMI = ValueMap.find(OrigCall);
+ ValueMap<const Value*, Value*>::iterator VMI = VMap.find(OrigCall);
// Only copy the edge if the call was inlined!
- if (VMI == ValueMap.end() || VMI->second == 0)
+ if (VMI == VMap.end() || VMI->second == 0)
continue;
// If the call was inlined, but then constant folded, there is no edge to
@@ -285,8 +286,8 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI) {
ClonedCodeInfo InlinedFunctionInfo;
Function::iterator FirstNewBlock;
- { // Scope to destroy ValueMap after cloning.
- DenseMap<const Value*, Value*> ValueMap;
+ { // Scope to destroy VMap after cloning.
+ ValueMap<const Value*, Value*> VMap;
assert(CalledFunc->arg_size() == CS.arg_size() &&
"No varargs calls can be inlined!");
@@ -351,16 +352,20 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI) {
// Uses of the argument in the function should use our new alloca
// instead.
ActualArg = NewAlloca;
+
+ // Calls that we inline may use the new alloca, so we need to clear
+ // their 'tail' flags.
+ MustClearTailCallFlags = true;
}
- ValueMap[I] = ActualArg;
+ VMap[I] = ActualArg;
}
// We want the inliner to prune the code as it copies. We would LOVE to
// have no dead or constant instructions leftover after inlining occurs
// (which can happen, e.g., because an argument was constant), but we'll be
// happy with whatever the cloner can do.
- CloneAndPruneFunctionInto(Caller, CalledFunc, ValueMap, Returns, ".i",
+ CloneAndPruneFunctionInto(Caller, CalledFunc, VMap, Returns, ".i",
&InlinedFunctionInfo, IFI.TD, TheCall);
// Remember the first block that is newly cloned over.
@@ -368,7 +373,7 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI) {
// Update the callgraph if requested.
if (IFI.CG)
- UpdateCallGraphAfterInlining(CS, FirstNewBlock, ValueMap, IFI);
+ UpdateCallGraphAfterInlining(CS, FirstNewBlock, VMap, IFI);
}
// If there are any alloca instructions in the block that used to be the entry
diff --git a/lib/Transforms/Utils/LCSSA.cpp b/lib/Transforms/Utils/LCSSA.cpp
index df6e603..e90c30b 100644
--- a/lib/Transforms/Utils/LCSSA.cpp
+++ b/lib/Transforms/Utils/LCSSA.cpp
@@ -190,14 +190,15 @@ bool LCSSA::ProcessInstruction(Instruction *Inst,
for (Value::use_iterator UI = Inst->use_begin(), E = Inst->use_end();
UI != E; ++UI) {
- BasicBlock *UserBB = cast<Instruction>(*UI)->getParent();
- if (PHINode *PN = dyn_cast<PHINode>(*UI))
+ User *U = *UI;
+ BasicBlock *UserBB = cast<Instruction>(U)->getParent();
+ if (PHINode *PN = dyn_cast<PHINode>(U))
UserBB = PN->getIncomingBlock(UI);
if (InstBB != UserBB && !inLoop(UserBB))
UsesToRewrite.push_back(&UI.getUse());
}
-
+
// If there are no uses outside the loop, exit with no change.
if (UsesToRewrite.empty()) return false;
diff --git a/lib/Transforms/Utils/Local.cpp b/lib/Transforms/Utils/Local.cpp
index d03f7a6..0b48a8f 100644
--- a/lib/Transforms/Utils/Local.cpp
+++ b/lib/Transforms/Utils/Local.cpp
@@ -35,111 +35,6 @@
using namespace llvm;
//===----------------------------------------------------------------------===//
-// Local analysis.
-//
-
-/// getUnderlyingObjectWithOffset - Strip off up to MaxLookup GEPs and
-/// bitcasts to get back to the underlying object being addressed, keeping
-/// track of the offset in bytes from the GEPs relative to the result.
-/// This is closely related to Value::getUnderlyingObject but is located
-/// here to avoid making VMCore depend on TargetData.
-static Value *getUnderlyingObjectWithOffset(Value *V, const TargetData *TD,
- uint64_t &ByteOffset,
- unsigned MaxLookup = 6) {
- if (!V->getType()->isPointerTy())
- return V;
- for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
- if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
- if (!GEP->hasAllConstantIndices())
- return V;
- SmallVector<Value*, 8> Indices(GEP->op_begin() + 1, GEP->op_end());
- ByteOffset += TD->getIndexedOffset(GEP->getPointerOperandType(),
- &Indices[0], Indices.size());
- V = GEP->getPointerOperand();
- } else if (Operator::getOpcode(V) == Instruction::BitCast) {
- V = cast<Operator>(V)->getOperand(0);
- } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
- if (GA->mayBeOverridden())
- return V;
- V = GA->getAliasee();
- } else {
- return V;
- }
- assert(V->getType()->isPointerTy() && "Unexpected operand type!");
- }
- return V;
-}
-
-/// isSafeToLoadUnconditionally - Return true if we know that executing a load
-/// from this value cannot trap. If it is not obviously safe to load from the
-/// specified pointer, we do a quick local scan of the basic block containing
-/// ScanFrom, to determine if the address is already accessed.
-bool llvm::isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom,
- unsigned Align, const TargetData *TD) {
- uint64_t ByteOffset = 0;
- Value *Base = V;
- if (TD)
- Base = getUnderlyingObjectWithOffset(V, TD, ByteOffset);
-
- const Type *BaseType = 0;
- unsigned BaseAlign = 0;
- if (const AllocaInst *AI = dyn_cast<AllocaInst>(Base)) {
- // An alloca is safe to load from as load as it is suitably aligned.
- BaseType = AI->getAllocatedType();
- BaseAlign = AI->getAlignment();
- } else if (const GlobalValue *GV = dyn_cast<GlobalValue>(Base)) {
- // Global variables are safe to load from but their size cannot be
- // guaranteed if they are overridden.
- if (!isa<GlobalAlias>(GV) && !GV->mayBeOverridden()) {
- BaseType = GV->getType()->getElementType();
- BaseAlign = GV->getAlignment();
- }
- }
-
- if (BaseType && BaseType->isSized()) {
- if (TD && BaseAlign == 0)
- BaseAlign = TD->getPrefTypeAlignment(BaseType);
-
- if (Align <= BaseAlign) {
- if (!TD)
- return true; // Loading directly from an alloca or global is OK.
-
- // Check if the load is within the bounds of the underlying object.
- const PointerType *AddrTy = cast<PointerType>(V->getType());
- uint64_t LoadSize = TD->getTypeStoreSize(AddrTy->getElementType());
- if (ByteOffset + LoadSize <= TD->getTypeAllocSize(BaseType) &&
- (Align == 0 || (ByteOffset % Align) == 0))
- return true;
- }
- }
-
- // Otherwise, be a little bit aggressive by scanning the local block where we
- // want to check to see if the pointer is already being loaded or stored
- // from/to. If so, the previous load or store would have already trapped,
- // so there is no harm doing an extra load (also, CSE will later eliminate
- // the load entirely).
- BasicBlock::iterator BBI = ScanFrom, E = ScanFrom->getParent()->begin();
-
- while (BBI != E) {
- --BBI;
-
- // If we see a free or a call which may write to memory (i.e. which might do
- // a free) the pointer could be marked invalid.
- if (isa<CallInst>(BBI) && BBI->mayWriteToMemory() &&
- !isa<DbgInfoIntrinsic>(BBI))
- return false;
-
- if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
- if (LI->getOperand(0) == V) return true;
- } else if (StoreInst *SI = dyn_cast<StoreInst>(BBI)) {
- if (SI->getOperand(1) == V) return true;
- }
- }
- return false;
-}
-
-
-//===----------------------------------------------------------------------===//
// Local constant propagation.
//
@@ -537,9 +432,11 @@ static bool CanPropagatePredecessorsForPHIs(BasicBlock *BB, BasicBlock *Succ) {
// Use that list to make another list of common predecessors of BB and Succ
BlockSet CommonPreds;
for (pred_iterator PI = pred_begin(Succ), PE = pred_end(Succ);
- PI != PE; ++PI)
- if (BBPreds.count(*PI))
- CommonPreds.insert(*PI);
+ PI != PE; ++PI) {
+ BasicBlock *P = *PI;
+ if (BBPreds.count(P))
+ CommonPreds.insert(P);
+ }
// Shortcut, if there are no common predecessors, merging is always safe
if (CommonPreds.empty())
diff --git a/lib/Transforms/Utils/LoopSimplify.cpp b/lib/Transforms/Utils/LoopSimplify.cpp
index 1ef3c32..4f4edf3 100644
--- a/lib/Transforms/Utils/LoopSimplify.cpp
+++ b/lib/Transforms/Utils/LoopSimplify.cpp
@@ -142,9 +142,11 @@ ReprocessLoop:
if (*BB == L->getHeader()) continue;
SmallPtrSet<BasicBlock *, 4> BadPreds;
- for (pred_iterator PI = pred_begin(*BB), PE = pred_end(*BB); PI != PE; ++PI)
- if (!L->contains(*PI))
- BadPreds.insert(*PI);
+ for (pred_iterator PI = pred_begin(*BB), PE = pred_end(*BB); PI != PE; ++PI){
+ BasicBlock *P = *PI;
+ if (!L->contains(P))
+ BadPreds.insert(P);
+ }
// Delete each unique out-of-loop (and thus dead) predecessor.
for (SmallPtrSet<BasicBlock *, 4>::iterator I = BadPreds.begin(),
@@ -192,7 +194,7 @@ ReprocessLoop:
if (!Preheader) {
Preheader = InsertPreheaderForLoop(L);
if (Preheader) {
- NumInserted++;
+ ++NumInserted;
Changed = true;
}
}
@@ -215,7 +217,7 @@ ReprocessLoop:
// allowed.
if (!L->contains(*PI)) {
if (RewriteLoopExitBlock(L, ExitBlock)) {
- NumInserted++;
+ ++NumInserted;
Changed = true;
}
break;
@@ -244,7 +246,7 @@ ReprocessLoop:
// loop header.
LoopLatch = InsertUniqueBackedgeBlock(L, Preheader);
if (LoopLatch) {
- NumInserted++;
+ ++NumInserted;
Changed = true;
}
}
@@ -353,16 +355,18 @@ BasicBlock *LoopSimplify::InsertPreheaderForLoop(Loop *L) {
// Compute the set of predecessors of the loop that are not in the loop.
SmallVector<BasicBlock*, 8> OutsideBlocks;
for (pred_iterator PI = pred_begin(Header), PE = pred_end(Header);
- PI != PE; ++PI)
- if (!L->contains(*PI)) { // Coming in from outside the loop?
+ PI != PE; ++PI) {
+ BasicBlock *P = *PI;
+ if (!L->contains(P)) { // Coming in from outside the loop?
// If the loop is branched to from an indirect branch, we won't
// be able to fully transform the loop, because it prohibits
// edge splitting.
- if (isa<IndirectBrInst>((*PI)->getTerminator())) return 0;
+ if (isa<IndirectBrInst>(P->getTerminator())) return 0;
// Keep track of it.
- OutsideBlocks.push_back(*PI);
+ OutsideBlocks.push_back(P);
}
+ }
// Split out the loop pre-header.
BasicBlock *NewBB =
@@ -385,13 +389,15 @@ BasicBlock *LoopSimplify::InsertPreheaderForLoop(Loop *L) {
/// outside of the loop.
BasicBlock *LoopSimplify::RewriteLoopExitBlock(Loop *L, BasicBlock *Exit) {
SmallVector<BasicBlock*, 8> LoopBlocks;
- for (pred_iterator I = pred_begin(Exit), E = pred_end(Exit); I != E; ++I)
- if (L->contains(*I)) {
+ for (pred_iterator I = pred_begin(Exit), E = pred_end(Exit); I != E; ++I) {
+ BasicBlock *P = *I;
+ if (L->contains(P)) {
// Don't do this if the loop is exited via an indirect branch.
- if (isa<IndirectBrInst>((*I)->getTerminator())) return 0;
+ if (isa<IndirectBrInst>(P->getTerminator())) return 0;
- LoopBlocks.push_back(*I);
+ LoopBlocks.push_back(P);
}
+ }
assert(!LoopBlocks.empty() && "No edges coming in from outside the loop?");
BasicBlock *NewBB = SplitBlockPredecessors(Exit, &LoopBlocks[0],
@@ -559,10 +565,11 @@ Loop *LoopSimplify::SeparateNestedLoop(Loop *L, LPPassManager &LPM) {
// Determine which blocks should stay in L and which should be moved out to
// the Outer loop now.
std::set<BasicBlock*> BlocksInL;
- for (pred_iterator PI = pred_begin(Header), E = pred_end(Header); PI!=E; ++PI)
- if (DT->dominates(Header, *PI))
- AddBlockAndPredsToSet(*PI, Header, BlocksInL);
-
+ for (pred_iterator PI=pred_begin(Header), E = pred_end(Header); PI!=E; ++PI) {
+ BasicBlock *P = *PI;
+ if (DT->dominates(Header, P))
+ AddBlockAndPredsToSet(P, Header, BlocksInL);
+ }
// Scan all of the loop children of L, moving them to OuterLoop if they are
// not part of the inner loop.
@@ -610,8 +617,10 @@ LoopSimplify::InsertUniqueBackedgeBlock(Loop *L, BasicBlock *Preheader) {
// Figure out which basic blocks contain back-edges to the loop header.
std::vector<BasicBlock*> BackedgeBlocks;
- for (pred_iterator I = pred_begin(Header), E = pred_end(Header); I != E; ++I)
- if (*I != Preheader) BackedgeBlocks.push_back(*I);
+ for (pred_iterator I = pred_begin(Header), E = pred_end(Header); I != E; ++I){
+ BasicBlock *P = *I;
+ if (P != Preheader) BackedgeBlocks.push_back(P);
+ }
// Create and insert the new backedge block...
BasicBlock *BEBlock = BasicBlock::Create(Header->getContext(),
diff --git a/lib/Transforms/Utils/LoopUnroll.cpp b/lib/Transforms/Utils/LoopUnroll.cpp
index 84fd1eb..e0e07e7 100644
--- a/lib/Transforms/Utils/LoopUnroll.cpp
+++ b/lib/Transforms/Utils/LoopUnroll.cpp
@@ -37,13 +37,13 @@ STATISTIC(NumCompletelyUnrolled, "Number of loops completely unrolled");
STATISTIC(NumUnrolled, "Number of loops unrolled (completely or otherwise)");
/// RemapInstruction - Convert the instruction operands from referencing the
-/// current values into those specified by ValueMap.
+/// current values into those specified by VMap.
static inline void RemapInstruction(Instruction *I,
- DenseMap<const Value *, Value*> &ValueMap) {
+ ValueMap<const Value *, Value*> &VMap) {
for (unsigned op = 0, E = I->getNumOperands(); op != E; ++op) {
Value *Op = I->getOperand(op);
- DenseMap<const Value *, Value*>::iterator It = ValueMap.find(Op);
- if (It != ValueMap.end())
+ ValueMap<const Value *, Value*>::iterator It = VMap.find(Op);
+ if (It != VMap.end())
I->setOperand(op, It->second);
}
}
@@ -183,7 +183,7 @@ bool llvm::UnrollLoop(Loop *L, unsigned Count, LoopInfo* LI, LPPassManager* LPM)
// For the first iteration of the loop, we should use the precloned values for
// PHI nodes. Insert associations now.
- typedef DenseMap<const Value*, Value*> ValueToValueMapTy;
+ typedef ValueMap<const Value*, Value*> ValueToValueMapTy;
ValueToValueMapTy LastValueMap;
std::vector<PHINode*> OrigPHINode;
for (BasicBlock::iterator I = Header->begin(); isa<PHINode>(I); ++I) {
@@ -205,26 +205,26 @@ bool llvm::UnrollLoop(Loop *L, unsigned Count, LoopInfo* LI, LPPassManager* LPM)
for (std::vector<BasicBlock*>::iterator BB = LoopBlocks.begin(),
E = LoopBlocks.end(); BB != E; ++BB) {
- ValueToValueMapTy ValueMap;
- BasicBlock *New = CloneBasicBlock(*BB, ValueMap, "." + Twine(It));
+ ValueToValueMapTy VMap;
+ BasicBlock *New = CloneBasicBlock(*BB, VMap, "." + Twine(It));
Header->getParent()->getBasicBlockList().push_back(New);
// Loop over all of the PHI nodes in the block, changing them to use the
// incoming values from the previous block.
if (*BB == Header)
for (unsigned i = 0, e = OrigPHINode.size(); i != e; ++i) {
- PHINode *NewPHI = cast<PHINode>(ValueMap[OrigPHINode[i]]);
+ PHINode *NewPHI = cast<PHINode>(VMap[OrigPHINode[i]]);
Value *InVal = NewPHI->getIncomingValueForBlock(LatchBlock);
if (Instruction *InValI = dyn_cast<Instruction>(InVal))
if (It > 1 && L->contains(InValI))
InVal = LastValueMap[InValI];
- ValueMap[OrigPHINode[i]] = InVal;
+ VMap[OrigPHINode[i]] = InVal;
New->getInstList().erase(NewPHI);
}
// Update our running map of newest clones
LastValueMap[*BB] = New;
- for (ValueToValueMapTy::iterator VI = ValueMap.begin(), VE = ValueMap.end();
+ for (ValueToValueMapTy::iterator VI = VMap.begin(), VE = VMap.end();
VI != VE; ++VI)
LastValueMap[VI->first] = VI->second;
diff --git a/lib/Transforms/Utils/LowerInvoke.cpp b/lib/Transforms/Utils/LowerInvoke.cpp
index 0ed8c72..2696e69 100644
--- a/lib/Transforms/Utils/LowerInvoke.cpp
+++ b/lib/Transforms/Utils/LowerInvoke.cpp
@@ -45,6 +45,7 @@
#include "llvm/Pass.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Target/TargetLowering.h"
@@ -62,10 +63,7 @@ static cl::opt<bool> ExpensiveEHSupport("enable-correct-eh-support",
namespace {
class LowerInvoke : public FunctionPass {
// Used for both models.
- Constant *WriteFn;
Constant *AbortFn;
- Value *AbortMessage;
- unsigned AbortMessageLength;
// Used for expensive EH support.
const Type *JBLinkTy;
@@ -92,10 +90,8 @@ namespace {
}
private:
- void createAbortMessage(Module *M);
- void writeAbortMessage(Instruction *IB);
bool insertCheapEHSupport(Function &F);
- void splitLiveRangesLiveAcrossInvokes(std::vector<InvokeInst*> &Invokes);
+ void splitLiveRangesLiveAcrossInvokes(SmallVectorImpl<InvokeInst*>&Invokes);
void rewriteExpensiveInvoke(InvokeInst *II, unsigned InvokeNo,
AllocaInst *InvokeNum, AllocaInst *StackPtr,
SwitchInst *CatchSwitch);
@@ -123,7 +119,6 @@ FunctionPass *llvm::createLowerInvokePass(const TargetLowering *TLI,
bool LowerInvoke::doInitialization(Module &M) {
const Type *VoidPtrTy =
Type::getInt8PtrTy(M.getContext());
- AbortMessage = 0;
if (useExpensiveEHSupport) {
// Insert a type for the linked list of jump buffers.
unsigned JBSize = TLI ? TLI->getJumpBufSize() : 0;
@@ -175,68 +170,14 @@ bool LowerInvoke::doInitialization(Module &M) {
// We need the 'write' and 'abort' functions for both models.
AbortFn = M.getOrInsertFunction("abort", Type::getVoidTy(M.getContext()),
(Type *)0);
-#if 0 // "write" is Unix-specific.. code is going away soon anyway.
- WriteFn = M.getOrInsertFunction("write", Type::VoidTy, Type::Int32Ty,
- VoidPtrTy, Type::Int32Ty, (Type *)0);
-#else
- WriteFn = 0;
-#endif
return true;
}
-void LowerInvoke::createAbortMessage(Module *M) {
- if (useExpensiveEHSupport) {
- // The abort message for expensive EH support tells the user that the
- // program 'unwound' without an 'invoke' instruction.
- Constant *Msg =
- ConstantArray::get(M->getContext(),
- "ERROR: Exception thrown, but not caught!\n");
- AbortMessageLength = Msg->getNumOperands()-1; // don't include \0
-
- GlobalVariable *MsgGV = new GlobalVariable(*M, Msg->getType(), true,
- GlobalValue::InternalLinkage,
- Msg, "abortmsg");
- std::vector<Constant*> GEPIdx(2,
- Constant::getNullValue(Type::getInt32Ty(M->getContext())));
- AbortMessage = ConstantExpr::getGetElementPtr(MsgGV, &GEPIdx[0], 2);
- } else {
- // The abort message for cheap EH support tells the user that EH is not
- // enabled.
- Constant *Msg =
- ConstantArray::get(M->getContext(),
- "Exception handler needed, but not enabled."
- "Recompile program with -enable-correct-eh-support.\n");
- AbortMessageLength = Msg->getNumOperands()-1; // don't include \0
-
- GlobalVariable *MsgGV = new GlobalVariable(*M, Msg->getType(), true,
- GlobalValue::InternalLinkage,
- Msg, "abortmsg");
- std::vector<Constant*> GEPIdx(2, Constant::getNullValue(
- Type::getInt32Ty(M->getContext())));
- AbortMessage = ConstantExpr::getGetElementPtr(MsgGV, &GEPIdx[0], 2);
- }
-}
-
-
-void LowerInvoke::writeAbortMessage(Instruction *IB) {
-#if 0
- if (AbortMessage == 0)
- createAbortMessage(IB->getParent()->getParent()->getParent());
-
- // These are the arguments we WANT...
- Value* Args[3];
- Args[0] = ConstantInt::get(Type::Int32Ty, 2);
- Args[1] = AbortMessage;
- Args[2] = ConstantInt::get(Type::Int32Ty, AbortMessageLength);
- (new CallInst(WriteFn, Args, 3, "", IB))->setTailCall();
-#endif
-}
-
bool LowerInvoke::insertCheapEHSupport(Function &F) {
bool Changed = false;
for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
if (InvokeInst *II = dyn_cast<InvokeInst>(BB->getTerminator())) {
- std::vector<Value*> CallArgs(II->op_begin(), II->op_end() - 3);
+ SmallVector<Value*,16> CallArgs(II->op_begin(), II->op_end() - 3);
// Insert a normal call instruction...
CallInst *NewCall = CallInst::Create(II->getCalledValue(),
CallArgs.begin(), CallArgs.end(),
@@ -257,9 +198,6 @@ bool LowerInvoke::insertCheapEHSupport(Function &F) {
++NumInvokes; Changed = true;
} else if (UnwindInst *UI = dyn_cast<UnwindInst>(BB->getTerminator())) {
- // Insert a new call to write(2, AbortMessage, AbortMessageLength);
- writeAbortMessage(UI);
-
// Insert a call to abort()
CallInst::Create(AbortFn, "", UI)->setTailCall();
@@ -320,7 +258,7 @@ void LowerInvoke::rewriteExpensiveInvoke(InvokeInst *II, unsigned InvokeNo,
CatchSwitch->addCase(InvokeNoC, II->getUnwindDest());
// Insert a normal call instruction.
- std::vector<Value*> CallArgs(II->op_begin(), II->op_end() - 3);
+ SmallVector<Value*,16> CallArgs(II->op_begin(), II->op_end() - 3);
CallInst *NewCall = CallInst::Create(II->getCalledValue(),
CallArgs.begin(), CallArgs.end(), "",
II);
@@ -349,7 +287,7 @@ static void MarkBlocksLiveIn(BasicBlock *BB, std::set<BasicBlock*> &LiveBBs) {
// across the unwind edge. This process also splits all critical edges
// coming out of invoke's.
void LowerInvoke::
-splitLiveRangesLiveAcrossInvokes(std::vector<InvokeInst*> &Invokes) {
+splitLiveRangesLiveAcrossInvokes(SmallVectorImpl<InvokeInst*> &Invokes) {
// First step, split all critical edges from invoke instructions.
for (unsigned i = 0, e = Invokes.size(); i != e; ++i) {
InvokeInst *II = Invokes[i];
@@ -371,16 +309,33 @@ splitLiveRangesLiveAcrossInvokes(std::vector<InvokeInst*> &Invokes) {
++AfterAllocaInsertPt;
for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end();
AI != E; ++AI) {
- // This is always a no-op cast because we're casting AI to AI->getType() so
- // src and destination types are identical. BitCast is the only possibility.
- CastInst *NC = new BitCastInst(
- AI, AI->getType(), AI->getName()+".tmp", AfterAllocaInsertPt);
- AI->replaceAllUsesWith(NC);
- // Normally its is forbidden to replace a CastInst's operand because it
- // could cause the opcode to reflect an illegal conversion. However, we're
- // replacing it here with the same value it was constructed with to simply
- // make NC its user.
- NC->setOperand(0, AI);
+ const Type *Ty = AI->getType();
+ // Aggregate types can't be cast, but are legal argument types, so we have
+ // to handle them differently. We use an extract/insert pair as a
+ // lightweight method to achieve the same goal.
+ if (isa<StructType>(Ty) || isa<ArrayType>(Ty) || isa<VectorType>(Ty)) {
+ Instruction *EI = ExtractValueInst::Create(AI, 0, "",AfterAllocaInsertPt);
+ Instruction *NI = InsertValueInst::Create(AI, EI, 0);
+ NI->insertAfter(EI);
+ AI->replaceAllUsesWith(NI);
+ // Set the operand of the instructions back to the AllocaInst.
+ EI->setOperand(0, AI);
+ NI->setOperand(0, AI);
+ } else {
+ // This is always a no-op cast because we're casting AI to AI->getType()
+ // so src and destination types are identical. BitCast is the only
+ // possibility.
+ CastInst *NC = new BitCastInst(
+ AI, AI->getType(), AI->getName()+".tmp", AfterAllocaInsertPt);
+ AI->replaceAllUsesWith(NC);
+ // Set the operand of the cast instruction back to the AllocaInst.
+ // Normally it's forbidden to replace a CastInst's operand because it
+ // could cause the opcode to reflect an illegal conversion. However,
+ // we're replacing it here with the same value it was constructed with.
+ // We do this because the above replaceAllUsesWith() clobbered the
+ // operand, but we want this one to remain.
+ NC->setOperand(0, AI);
+ }
}
// Finally, scan the code looking for instructions with bad live ranges.
@@ -402,7 +357,7 @@ splitLiveRangesLiveAcrossInvokes(std::vector<InvokeInst*> &Invokes) {
continue;
// Avoid iterator invalidation by copying users to a temporary vector.
- std::vector<Instruction*> Users;
+ SmallVector<Instruction*,16> Users;
for (Value::use_iterator UI = Inst->use_begin(), E = Inst->use_end();
UI != E; ++UI) {
Instruction *User = cast<Instruction>(*UI);
@@ -452,9 +407,9 @@ splitLiveRangesLiveAcrossInvokes(std::vector<InvokeInst*> &Invokes) {
}
bool LowerInvoke::insertExpensiveEHSupport(Function &F) {
- std::vector<ReturnInst*> Returns;
- std::vector<UnwindInst*> Unwinds;
- std::vector<InvokeInst*> Invokes;
+ SmallVector<ReturnInst*,16> Returns;
+ SmallVector<UnwindInst*,16> Unwinds;
+ SmallVector<InvokeInst*,16> Invokes;
for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
if (ReturnInst *RI = dyn_cast<ReturnInst>(BB->getTerminator())) {
@@ -502,12 +457,11 @@ bool LowerInvoke::insertExpensiveEHSupport(Function &F) {
new AllocaInst(JBLinkTy, 0, Align,
"jblink", F.begin()->begin());
- std::vector<Value*> Idx;
- Idx.push_back(Constant::getNullValue(Type::getInt32Ty(F.getContext())));
- Idx.push_back(ConstantInt::get(Type::getInt32Ty(F.getContext()), 1));
- OldJmpBufPtr = GetElementPtrInst::Create(JmpBuf, Idx.begin(), Idx.end(),
+ Value *Idx[] = { Constant::getNullValue(Type::getInt32Ty(F.getContext())),
+ ConstantInt::get(Type::getInt32Ty(F.getContext()), 1) };
+ OldJmpBufPtr = GetElementPtrInst::Create(JmpBuf, &Idx[0], &Idx[2],
"OldBuf",
- EntryBB->getTerminator());
+ EntryBB->getTerminator());
// Copy the JBListHead to the alloca.
Value *OldBuf = new LoadInst(JBListHead, "oldjmpbufptr", true,
@@ -552,7 +506,7 @@ bool LowerInvoke::insertExpensiveEHSupport(Function &F) {
"setjmp.cont");
Idx[1] = ConstantInt::get(Type::getInt32Ty(F.getContext()), 0);
- Value *JmpBufPtr = GetElementPtrInst::Create(JmpBuf, Idx.begin(), Idx.end(),
+ Value *JmpBufPtr = GetElementPtrInst::Create(JmpBuf, &Idx[0], &Idx[2],
"TheJmpBuf",
EntryBB->getTerminator());
JmpBufPtr = new BitCastInst(JmpBufPtr,
@@ -605,24 +559,20 @@ bool LowerInvoke::insertExpensiveEHSupport(Function &F) {
// Create the block to do the longjmp.
// Get a pointer to the jmpbuf and longjmp.
- std::vector<Value*> Idx;
- Idx.push_back(Constant::getNullValue(Type::getInt32Ty(F.getContext())));
- Idx.push_back(ConstantInt::get(Type::getInt32Ty(F.getContext()), 0));
- Idx[0] = GetElementPtrInst::Create(BufPtr, Idx.begin(), Idx.end(), "JmpBuf",
+ Value *Idx[] = { Constant::getNullValue(Type::getInt32Ty(F.getContext())),
+ ConstantInt::get(Type::getInt32Ty(F.getContext()), 0) };
+ Idx[0] = GetElementPtrInst::Create(BufPtr, &Idx[0], &Idx[2], "JmpBuf",
UnwindBlock);
Idx[0] = new BitCastInst(Idx[0],
Type::getInt8PtrTy(F.getContext()),
"tmp", UnwindBlock);
Idx[1] = ConstantInt::get(Type::getInt32Ty(F.getContext()), 1);
- CallInst::Create(LongJmpFn, Idx.begin(), Idx.end(), "", UnwindBlock);
+ CallInst::Create(LongJmpFn, &Idx[0], &Idx[2], "", UnwindBlock);
new UnreachableInst(F.getContext(), UnwindBlock);
// Set up the term block ("throw without a catch").
new UnreachableInst(F.getContext(), TermBlock);
- // Insert a new call to write(2, AbortMessage, AbortMessageLength);
- writeAbortMessage(TermBlock->getTerminator());
-
// Insert a call to abort()
CallInst::Create(AbortFn, "",
TermBlock->getTerminator())->setTailCall();
diff --git a/lib/Transforms/Utils/PromoteMemoryToRegister.cpp b/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
index 13f0a28..c0de193 100644
--- a/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
+++ b/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
@@ -69,11 +69,12 @@ bool llvm::isAllocaPromotable(const AllocaInst *AI) {
// Only allow direct and non-volatile loads and stores...
for (Value::const_use_iterator UI = AI->use_begin(), UE = AI->use_end();
- UI != UE; ++UI) // Loop over all of the uses of the alloca
- if (const LoadInst *LI = dyn_cast<LoadInst>(*UI)) {
+ UI != UE; ++UI) { // Loop over all of the uses of the alloca
+ const User *U = *UI;
+ if (const LoadInst *LI = dyn_cast<LoadInst>(U)) {
if (LI->isVolatile())
return false;
- } else if (const StoreInst *SI = dyn_cast<StoreInst>(*UI)) {
+ } else if (const StoreInst *SI = dyn_cast<StoreInst>(U)) {
if (SI->getOperand(0) == AI)
return false; // Don't allow a store OF the AI, only INTO the AI.
if (SI->isVolatile())
@@ -81,6 +82,7 @@ bool llvm::isAllocaPromotable(const AllocaInst *AI) {
} else {
return false;
}
+ }
return true;
}
@@ -603,9 +605,8 @@ ComputeLiveInBlocks(AllocaInst *AI, AllocaInfo &Info,
// To determine liveness, we must iterate through the predecessors of blocks
// where the def is live. Blocks are added to the worklist if we need to
// check their predecessors. Start with all the using blocks.
- SmallVector<BasicBlock*, 64> LiveInBlockWorklist;
- LiveInBlockWorklist.insert(LiveInBlockWorklist.end(),
- Info.UsingBlocks.begin(), Info.UsingBlocks.end());
+ SmallVector<BasicBlock*, 64> LiveInBlockWorklist(Info.UsingBlocks.begin(),
+ Info.UsingBlocks.end());
// If any of the using blocks is also a definition block, check to see if the
// definition occurs before or after the use. If it happens before the use,
@@ -897,6 +898,9 @@ void PromoteMem2Reg::ConvertDebugDeclareToDebugValue(DbgDeclareInst *DDI,
// Propagate any debug metadata from the store onto the dbg.value.
if (MDNode *SIMD = SI->getMetadata("dbg"))
DbgVal->setMetadata("dbg", SIMD);
+ // Otherwise propagate debug metadata from dbg.declare.
+ else if (MDNode *MD = DDI->getMetadata("dbg"))
+ DbgVal->setMetadata("dbg", MD);
}
// QueuePhiNode - queues a phi-node to be added to a basic-block for a specific
diff --git a/lib/Transforms/Utils/SimplifyCFG.cpp b/lib/Transforms/Utils/SimplifyCFG.cpp
index 9f2209d..fd3ed3e 100644
--- a/lib/Transforms/Utils/SimplifyCFG.cpp
+++ b/lib/Transforms/Utils/SimplifyCFG.cpp
@@ -1513,17 +1513,19 @@ static bool SimplifyCondBranchToCondBranch(BranchInst *PBI, BranchInst *BI) {
// Okay, we're going to insert the PHI node. Since PBI is not the only
// predecessor, compute the PHI'd conditional value for all of the preds.
// Any predecessor where the condition is not computable we keep symbolic.
- for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI)
- if ((PBI = dyn_cast<BranchInst>((*PI)->getTerminator())) &&
+ for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
+ BasicBlock *P = *PI;
+ if ((PBI = dyn_cast<BranchInst>(P->getTerminator())) &&
PBI != BI && PBI->isConditional() &&
PBI->getCondition() == BI->getCondition() &&
PBI->getSuccessor(0) != PBI->getSuccessor(1)) {
bool CondIsTrue = PBI->getSuccessor(0) == BB;
NewPN->addIncoming(ConstantInt::get(Type::getInt1Ty(BB->getContext()),
- CondIsTrue), *PI);
+ CondIsTrue), P);
} else {
- NewPN->addIncoming(BI->getCondition(), *PI);
+ NewPN->addIncoming(BI->getCondition(), P);
}
+ }
BI->setCondition(NewPN);
return true;
@@ -1697,10 +1699,11 @@ bool SimplifyCFGOpt::run(BasicBlock *BB) {
SmallVector<BasicBlock*, 8> UncondBranchPreds;
SmallVector<BranchInst*, 8> CondBranchPreds;
for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
- TerminatorInst *PTI = (*PI)->getTerminator();
+ BasicBlock *P = *PI;
+ TerminatorInst *PTI = P->getTerminator();
if (BranchInst *BI = dyn_cast<BranchInst>(PTI)) {
if (BI->isUnconditional())
- UncondBranchPreds.push_back(*PI);
+ UncondBranchPreds.push_back(P);
else
CondBranchPreds.push_back(BI);
}
diff --git a/lib/Transforms/Utils/ValueMapper.cpp b/lib/Transforms/Utils/ValueMapper.cpp
index 87ce631..3f6a90c 100644
--- a/lib/Transforms/Utils/ValueMapper.cpp
+++ b/lib/Transforms/Utils/ValueMapper.cpp
@@ -28,7 +28,7 @@ Value *llvm::MapValue(const Value *V, ValueToValueMapTy &VM) {
// DenseMap. This includes any recursive calls to MapValue.
// Global values and non-function-local metadata do not need to be seeded into
- // the ValueMap if they are using the identity mapping.
+ // the VM if they are using the identity mapping.
if (isa<GlobalValue>(V) || isa<InlineAsm>(V) || isa<MDString>(V) ||
(isa<MDNode>(V) && !cast<MDNode>(V)->isFunctionLocal()))
return VMSlot = const_cast<Value*>(V);
@@ -45,7 +45,7 @@ Value *llvm::MapValue(const Value *V, ValueToValueMapTy &VM) {
if (isa<ConstantInt>(C) || isa<ConstantFP>(C) ||
isa<ConstantPointerNull>(C) || isa<ConstantAggregateZero>(C) ||
- isa<UndefValue>(C) || isa<MDString>(C))
+ isa<UndefValue>(C))
return VMSlot = C; // Primitive constants map directly
if (ConstantArray *CA = dyn_cast<ConstantArray>(C)) {
@@ -125,11 +125,11 @@ Value *llvm::MapValue(const Value *V, ValueToValueMapTy &VM) {
}
/// RemapInstruction - Convert the instruction operands from referencing the
-/// current values into those specified by ValueMap.
+/// current values into those specified by VMap.
///
-void llvm::RemapInstruction(Instruction *I, ValueToValueMapTy &ValueMap) {
+void llvm::RemapInstruction(Instruction *I, ValueToValueMapTy &VMap) {
for (User::op_iterator op = I->op_begin(), E = I->op_end(); op != E; ++op) {
- Value *V = MapValue(*op, ValueMap);
+ Value *V = MapValue(*op, VMap);
assert(V && "Referenced value not in value map!");
*op = V;
}
diff --git a/lib/Transforms/Utils/ValueMapper.h b/lib/Transforms/Utils/ValueMapper.h
index d61c24c..f4ff643 100644
--- a/lib/Transforms/Utils/ValueMapper.h
+++ b/lib/Transforms/Utils/ValueMapper.h
@@ -15,12 +15,12 @@
#ifndef VALUEMAPPER_H
#define VALUEMAPPER_H
-#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/ValueMap.h"
namespace llvm {
class Value;
class Instruction;
- typedef DenseMap<const Value *, Value *> ValueToValueMapTy;
+ typedef ValueMap<const Value *, Value *> ValueToValueMapTy;
Value *MapValue(const Value *V, ValueToValueMapTy &VM);
void RemapInstruction(Instruction *I, ValueToValueMapTy &VM);
OpenPOWER on IntegriCloud