summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/lib/IR/Instructions.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/lib/IR/Instructions.cpp')
-rw-r--r--contrib/llvm/lib/IR/Instructions.cpp295
1 files changed, 183 insertions, 112 deletions
diff --git a/contrib/llvm/lib/IR/Instructions.cpp b/contrib/llvm/lib/IR/Instructions.cpp
index 8a6b77b..9553252 100644
--- a/contrib/llvm/lib/IR/Instructions.cpp
+++ b/contrib/llvm/lib/IR/Instructions.cpp
@@ -14,14 +14,14 @@
#include "llvm/IR/Instructions.h"
#include "LLVMContextImpl.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/ConstantRange.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Operator.h"
-#include "llvm/Support/CallSite.h"
-#include "llvm/Support/ConstantRange.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
using namespace llvm;
@@ -68,7 +68,7 @@ const char *SelectInst::areInvalidOperands(Value *Op0, Value *Op1, Value *Op2) {
if (VT->getElementType() != Type::getInt1Ty(Op0->getContext()))
return "vector select condition element type must be i1";
VectorType *ET = dyn_cast<VectorType>(Op1->getType());
- if (ET == 0)
+ if (!ET)
return "selected values for vector select must be vectors";
if (ET->getNumElements() != VT->getNumElements())
return "vector select requires selected vectors to have "
@@ -76,7 +76,7 @@ const char *SelectInst::areInvalidOperands(Value *Op0, Value *Op1, Value *Op2) {
} else if (Op0->getType() != Type::getInt1Ty(Op0->getContext())) {
return "select condition must be i1 or <n x i1>";
}
- return 0;
+ return nullptr;
}
@@ -123,7 +123,7 @@ Value *PHINode::removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty) {
std::copy(block_begin() + Idx + 1, block_end(), block_begin() + Idx);
// Nuke the last value.
- Op<-1>().set(0);
+ Op<-1>().set(nullptr);
--NumOperands;
// If the PHI node is dead, because it has zero entries, nuke it now.
@@ -164,7 +164,7 @@ Value *PHINode::hasConstantValue() const {
for (unsigned i = 1, e = getNumIncomingValues(); i != e; ++i)
if (getIncomingValue(i) != ConstantValue && getIncomingValue(i) != this) {
if (ConstantValue != this)
- return 0; // Incoming values not all the same.
+ return nullptr; // Incoming values not all the same.
// The case where the first value is this PHI.
ConstantValue = getIncomingValue(i);
}
@@ -180,14 +180,14 @@ Value *PHINode::hasConstantValue() const {
LandingPadInst::LandingPadInst(Type *RetTy, Value *PersonalityFn,
unsigned NumReservedValues, const Twine &NameStr,
Instruction *InsertBefore)
- : Instruction(RetTy, Instruction::LandingPad, 0, 0, InsertBefore) {
+ : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertBefore) {
init(PersonalityFn, 1 + NumReservedValues, NameStr);
}
LandingPadInst::LandingPadInst(Type *RetTy, Value *PersonalityFn,
unsigned NumReservedValues, const Twine &NameStr,
BasicBlock *InsertAtEnd)
- : Instruction(RetTy, Instruction::LandingPad, 0, 0, InsertAtEnd) {
+ : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertAtEnd) {
init(PersonalityFn, 1 + NumReservedValues, NameStr);
}
@@ -248,7 +248,7 @@ void LandingPadInst::growOperands(unsigned Size) {
Use::zap(OldOps, OldOps + e, true);
}
-void LandingPadInst::addClause(Value *Val) {
+void LandingPadInst::addClause(Constant *Val) {
unsigned OpNo = getNumOperands();
growOperands(1);
assert(OpNo < ReservedSpace && "Growing didn't work!");
@@ -324,7 +324,7 @@ CallInst::CallInst(const CallInst &CI)
OperandTraits<CallInst>::op_end(this) - CI.getNumOperands(),
CI.getNumOperands()) {
setAttributes(CI.getAttributes());
- setTailCall(CI.isTailCall());
+ setTailCallKind(CI.getTailCallKind());
setCallingConv(CI.getCallingConv());
std::copy(CI.op_begin(), CI.op_end(), op_begin());
@@ -420,8 +420,8 @@ static Instruction *createMalloc(Instruction *InsertBefore,
// prototype malloc as "void *malloc(size_t)"
MallocFunc = M->getOrInsertFunction("malloc", BPTy, IntPtrTy, NULL);
PointerType *AllocPtrType = PointerType::getUnqual(AllocTy);
- CallInst *MCall = NULL;
- Instruction *Result = NULL;
+ CallInst *MCall = nullptr;
+ Instruction *Result = nullptr;
if (InsertBefore) {
MCall = CallInst::Create(MallocFunc, AllocSize, "malloccall", InsertBefore);
Result = MCall;
@@ -458,7 +458,7 @@ Instruction *CallInst::CreateMalloc(Instruction *InsertBefore,
Value *AllocSize, Value *ArraySize,
Function * MallocF,
const Twine &Name) {
- return createMalloc(InsertBefore, NULL, IntPtrTy, AllocTy, AllocSize,
+ return createMalloc(InsertBefore, nullptr, IntPtrTy, AllocTy, AllocSize,
ArraySize, MallocF, Name);
}
@@ -474,7 +474,7 @@ Instruction *CallInst::CreateMalloc(BasicBlock *InsertAtEnd,
Type *IntPtrTy, Type *AllocTy,
Value *AllocSize, Value *ArraySize,
Function *MallocF, const Twine &Name) {
- return createMalloc(NULL, InsertAtEnd, IntPtrTy, AllocTy, AllocSize,
+ return createMalloc(nullptr, InsertAtEnd, IntPtrTy, AllocTy, AllocSize,
ArraySize, MallocF, Name);
}
@@ -492,7 +492,7 @@ static Instruction* createFree(Value* Source, Instruction *InsertBefore,
Type *IntPtrTy = Type::getInt8PtrTy(M->getContext());
// prototype free as "void free(void*)"
Value *FreeFunc = M->getOrInsertFunction("free", VoidTy, IntPtrTy, NULL);
- CallInst* Result = NULL;
+ CallInst* Result = nullptr;
Value *PtrCast = Source;
if (InsertBefore) {
if (Source->getType() != IntPtrTy)
@@ -512,14 +512,14 @@ static Instruction* createFree(Value* Source, Instruction *InsertBefore,
/// CreateFree - Generate the IR for a call to the builtin free function.
Instruction * CallInst::CreateFree(Value* Source, Instruction *InsertBefore) {
- return createFree(Source, InsertBefore, NULL);
+ return createFree(Source, InsertBefore, nullptr);
}
/// CreateFree - Generate the IR for a call to the builtin free function.
/// Note: This function does not add the call to the basic block, that is the
/// responsibility of the caller.
Instruction* CallInst::CreateFree(Value* Source, BasicBlock *InsertAtEnd) {
- Instruction* FreeCall = createFree(Source, NULL, InsertAtEnd);
+ Instruction* FreeCall = createFree(Source, nullptr, InsertAtEnd);
assert(FreeCall && "CreateFree did not create a CallInst");
return FreeCall;
}
@@ -699,11 +699,11 @@ BasicBlock *ResumeInst::getSuccessorV(unsigned idx) const {
UnreachableInst::UnreachableInst(LLVMContext &Context,
Instruction *InsertBefore)
: TerminatorInst(Type::getVoidTy(Context), Instruction::Unreachable,
- 0, 0, InsertBefore) {
+ nullptr, 0, InsertBefore) {
}
UnreachableInst::UnreachableInst(LLVMContext &Context, BasicBlock *InsertAtEnd)
: TerminatorInst(Type::getVoidTy(Context), Instruction::Unreachable,
- 0, 0, InsertAtEnd) {
+ nullptr, 0, InsertAtEnd) {
}
unsigned UnreachableInst::getNumSuccessorsV() const {
@@ -732,7 +732,7 @@ BranchInst::BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore)
: TerminatorInst(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
OperandTraits<BranchInst>::op_end(this) - 1,
1, InsertBefore) {
- assert(IfTrue != 0 && "Branch destination may not be null!");
+ assert(IfTrue && "Branch destination may not be null!");
Op<-1>() = IfTrue;
}
BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
@@ -752,7 +752,7 @@ BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd)
: TerminatorInst(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
OperandTraits<BranchInst>::op_end(this) - 1,
1, InsertAtEnd) {
- assert(IfTrue != 0 && "Branch destination may not be null!");
+ assert(IfTrue && "Branch destination may not be null!");
Op<-1>() = IfTrue;
}
@@ -852,7 +852,7 @@ AllocaInst::AllocaInst(Type *Ty, Value *ArraySize,
AllocaInst::AllocaInst(Type *Ty, const Twine &Name,
Instruction *InsertBefore)
: UnaryInstruction(PointerType::getUnqual(Ty), Alloca,
- getAISize(Ty->getContext(), 0), InsertBefore) {
+ getAISize(Ty->getContext(), nullptr), InsertBefore) {
setAlignment(0);
assert(!Ty->isVoidTy() && "Cannot allocate void!");
setName(Name);
@@ -861,7 +861,7 @@ AllocaInst::AllocaInst(Type *Ty, const Twine &Name,
AllocaInst::AllocaInst(Type *Ty, const Twine &Name,
BasicBlock *InsertAtEnd)
: UnaryInstruction(PointerType::getUnqual(Ty), Alloca,
- getAISize(Ty->getContext(), 0), InsertAtEnd) {
+ getAISize(Ty->getContext(), nullptr), InsertAtEnd) {
setAlignment(0);
assert(!Ty->isVoidTy() && "Cannot allocate void!");
setName(Name);
@@ -893,7 +893,8 @@ void AllocaInst::setAlignment(unsigned Align) {
assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!");
assert(Align <= MaximumAlignment &&
"Alignment is greater than MaximumAlignment!");
- setInstructionSubclassData(Log2_32(Align) + 1);
+ setInstructionSubclassData((getSubclassDataFromInstruction() & ~31) |
+ (Log2_32(Align) + 1));
assert(getAlignment() == Align && "Alignment representation error!");
}
@@ -916,7 +917,7 @@ bool AllocaInst::isStaticAlloca() const {
// Must be in the entry block.
const BasicBlock *Parent = getParent();
- return Parent == &Parent->getParent()->front();
+ return Parent == &Parent->getParent()->front() && !isUsedWithInAlloca();
}
//===----------------------------------------------------------------------===//
@@ -1083,7 +1084,7 @@ void StoreInst::AssertOK() {
cast<PointerType>(getOperand(1)->getType())->getElementType()
&& "Ptr must be a pointer to Val type!");
assert(!(isAtomic() && getAlignment() == 0) &&
- "Alignment required for atomic load");
+ "Alignment required for atomic store");
}
@@ -1215,12 +1216,14 @@ void StoreInst::setAlignment(unsigned Align) {
//===----------------------------------------------------------------------===//
void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,
- AtomicOrdering Ordering,
+ AtomicOrdering SuccessOrdering,
+ AtomicOrdering FailureOrdering,
SynchronizationScope SynchScope) {
Op<0>() = Ptr;
Op<1>() = Cmp;
Op<2>() = NewVal;
- setOrdering(Ordering);
+ setSuccessOrdering(SuccessOrdering);
+ setFailureOrdering(FailureOrdering);
setSynchScope(SynchScope);
assert(getOperand(0) && getOperand(1) && getOperand(2) &&
@@ -1233,32 +1236,42 @@ void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,
assert(getOperand(2)->getType() ==
cast<PointerType>(getOperand(0)->getType())->getElementType()
&& "Ptr must be a pointer to NewVal type!");
- assert(Ordering != NotAtomic &&
+ assert(SuccessOrdering != NotAtomic &&
+ "AtomicCmpXchg instructions must be atomic!");
+ assert(FailureOrdering != NotAtomic &&
"AtomicCmpXchg instructions must be atomic!");
+ assert(SuccessOrdering >= FailureOrdering &&
+ "AtomicCmpXchg success ordering must be at least as strong as fail");
+ assert(FailureOrdering != Release && FailureOrdering != AcquireRelease &&
+ "AtomicCmpXchg failure ordering cannot include release semantics");
}
AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
- AtomicOrdering Ordering,
+ AtomicOrdering SuccessOrdering,
+ AtomicOrdering FailureOrdering,
SynchronizationScope SynchScope,
Instruction *InsertBefore)
- : Instruction(Cmp->getType(), AtomicCmpXchg,
- OperandTraits<AtomicCmpXchgInst>::op_begin(this),
- OperandTraits<AtomicCmpXchgInst>::operands(this),
- InsertBefore) {
- Init(Ptr, Cmp, NewVal, Ordering, SynchScope);
+ : Instruction(
+ StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext()),
+ nullptr),
+ AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),
+ OperandTraits<AtomicCmpXchgInst>::operands(this), InsertBefore) {
+ Init(Ptr, Cmp, NewVal, SuccessOrdering, FailureOrdering, SynchScope);
}
AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
- AtomicOrdering Ordering,
+ AtomicOrdering SuccessOrdering,
+ AtomicOrdering FailureOrdering,
SynchronizationScope SynchScope,
BasicBlock *InsertAtEnd)
- : Instruction(Cmp->getType(), AtomicCmpXchg,
- OperandTraits<AtomicCmpXchgInst>::op_begin(this),
- OperandTraits<AtomicCmpXchgInst>::operands(this),
- InsertAtEnd) {
- Init(Ptr, Cmp, NewVal, Ordering, SynchScope);
+ : Instruction(
+ StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext()),
+ nullptr),
+ AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),
+ OperandTraits<AtomicCmpXchgInst>::operands(this), InsertAtEnd) {
+ Init(Ptr, Cmp, NewVal, SuccessOrdering, FailureOrdering, SynchScope);
}
-
+
//===----------------------------------------------------------------------===//
// AtomicRMWInst Implementation
//===----------------------------------------------------------------------===//
@@ -1312,7 +1325,7 @@ AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering,
SynchronizationScope SynchScope,
Instruction *InsertBefore)
- : Instruction(Type::getVoidTy(C), Fence, 0, 0, InsertBefore) {
+ : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertBefore) {
setOrdering(Ordering);
setSynchScope(SynchScope);
}
@@ -1320,7 +1333,7 @@ FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering,
FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering,
SynchronizationScope SynchScope,
BasicBlock *InsertAtEnd)
- : Instruction(Type::getVoidTy(C), Fence, 0, 0, InsertAtEnd) {
+ : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertAtEnd) {
setOrdering(Ordering);
setSynchScope(SynchScope);
}
@@ -1358,7 +1371,7 @@ GetElementPtrInst::GetElementPtrInst(const GetElementPtrInst &GEPI)
template <typename IndexTy>
static Type *getIndexedTypeInternal(Type *Ptr, ArrayRef<IndexTy> IdxList) {
PointerType *PTy = dyn_cast<PointerType>(Ptr->getScalarType());
- if (!PTy) return 0; // Type isn't a pointer type!
+ if (!PTy) return nullptr; // Type isn't a pointer type!
Type *Agg = PTy->getElementType();
// Handle the special case of the empty set index set, which is always valid.
@@ -1368,17 +1381,17 @@ static Type *getIndexedTypeInternal(Type *Ptr, ArrayRef<IndexTy> IdxList) {
// If there is at least one index, the top level type must be sized, otherwise
// it cannot be 'stepped over'.
if (!Agg->isSized())
- return 0;
+ return nullptr;
unsigned CurIdx = 1;
for (; CurIdx != IdxList.size(); ++CurIdx) {
CompositeType *CT = dyn_cast<CompositeType>(Agg);
- if (!CT || CT->isPointerTy()) return 0;
+ if (!CT || CT->isPointerTy()) return nullptr;
IndexTy Index = IdxList[CurIdx];
- if (!CT->indexValid(Index)) return 0;
+ if (!CT->indexValid(Index)) return nullptr;
Agg = CT->getTypeAtIndex(Index);
}
- return CurIdx == IdxList.size() ? Agg : 0;
+ return CurIdx == IdxList.size() ? Agg : nullptr;
}
Type *GetElementPtrInst::getIndexedType(Type *Ptr, ArrayRef<Value *> IdxList) {
@@ -1468,7 +1481,7 @@ ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
bool ExtractElementInst::isValidOperands(const Value *Val, const Value *Index) {
- if (!Val->getType()->isVectorTy() || !Index->getType()->isIntegerTy(32))
+ if (!Val->getType()->isVectorTy() || !Index->getType()->isIntegerTy())
return false;
return true;
}
@@ -1515,7 +1528,7 @@ bool InsertElementInst::isValidOperands(const Value *Vec, const Value *Elt,
if (Elt->getType() != cast<VectorType>(Vec->getType())->getElementType())
return false;// Second operand of insertelement must be vector element type.
- if (!Index->getType()->isIntegerTy(32))
+ if (!Index->getType()->isIntegerTy())
return false; // Third operand of insertelement must be i32.
return true;
}
@@ -1568,7 +1581,7 @@ bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2,
// Mask must be vector of i32.
VectorType *MaskTy = dyn_cast<VectorType>(Mask->getType());
- if (MaskTy == 0 || !MaskTy->getElementType()->isIntegerTy(32))
+ if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32))
return false;
// Check to see if Mask is valid.
@@ -1577,11 +1590,11 @@ bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2,
if (const ConstantVector *MV = dyn_cast<ConstantVector>(Mask)) {
unsigned V1Size = cast<VectorType>(V1->getType())->getNumElements();
- for (unsigned i = 0, e = MV->getNumOperands(); i != e; ++i) {
- if (ConstantInt *CI = dyn_cast<ConstantInt>(MV->getOperand(i))) {
+ for (Value *Op : MV->operands()) {
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
if (CI->uge(V1Size*2))
return false;
- } else if (!isa<UndefValue>(MV->getOperand(i))) {
+ } else if (!isa<UndefValue>(Op)) {
return false;
}
}
@@ -1701,8 +1714,7 @@ ExtractValueInst::ExtractValueInst(const ExtractValueInst &EVI)
//
Type *ExtractValueInst::getIndexedType(Type *Agg,
ArrayRef<unsigned> Idxs) {
- for (unsigned CurIdx = 0; CurIdx != Idxs.size(); ++CurIdx) {
- unsigned Index = Idxs[CurIdx];
+ for (unsigned Index : Idxs) {
// We can't use CompositeType::indexValid(Index) here.
// indexValid() always returns true for arrays because getelementptr allows
// out-of-bounds indices. Since we don't allow those for extractvalue and
@@ -1711,13 +1723,13 @@ Type *ExtractValueInst::getIndexedType(Type *Agg,
// as easy to check those manually as well.
if (ArrayType *AT = dyn_cast<ArrayType>(Agg)) {
if (Index >= AT->getNumElements())
- return 0;
+ return nullptr;
} else if (StructType *ST = dyn_cast<StructType>(Agg)) {
if (Index >= ST->getNumElements())
- return 0;
+ return nullptr;
} else {
// Not a valid type to index into.
- return 0;
+ return nullptr;
}
Agg = cast<CompositeType>(Agg)->getTypeAtIndex(Index);
@@ -2114,8 +2126,27 @@ bool CastInst::isNoopCast(Type *IntPtrTy) const {
return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), IntPtrTy);
}
-/// This function determines if a pair of casts can be eliminated and what
-/// opcode should be used in the elimination. This assumes that there are two
+bool CastInst::isNoopCast(const DataLayout *DL) const {
+ if (!DL) {
+ // Assume maximum pointer size.
+ return isNoopCast(Type::getInt64Ty(getContext()));
+ }
+
+ Type *PtrOpTy = nullptr;
+ if (getOpcode() == Instruction::PtrToInt)
+ PtrOpTy = getOperand(0)->getType();
+ else if (getOpcode() == Instruction::IntToPtr)
+ PtrOpTy = getType();
+
+ Type *IntPtrTy = PtrOpTy
+ ? DL->getIntPtrType(PtrOpTy)
+ : DL->getIntPtrType(getContext(), 0);
+
+ return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), IntPtrTy);
+}
+
+/// This function determines if a pair of casts can be eliminated and what
+/// opcode should be used in the elimination. This assumes that there are two
/// instructions like this:
/// * %F = firstOpcode SrcTy %x to MidTy
/// * %S = secondOpcode MidTy %F to DstTy
@@ -2206,7 +2237,7 @@ unsigned CastInst::isEliminableCastPair(
case 3:
// No-op cast in second op implies firstOp as long as the DestTy
// is integer and we are not converting between a vector and a
- // non vector type.
+ // non-vector type.
if (!SrcTy->isVectorTy() && DstTy->isIntegerTy())
return firstOp;
return 0;
@@ -2302,18 +2333,12 @@ unsigned CastInst::isEliminableCastPair(
// Allowed, use first cast's opcode
return firstOp;
case 14:
- // FIXME: this state can be merged with (2), but the following assert
- // is useful to check the correcteness of the sequence due to semantic
- // change of bitcast.
- assert(
- SrcTy->isPtrOrPtrVectorTy() &&
- MidTy->isPtrOrPtrVectorTy() &&
- DstTy->isPtrOrPtrVectorTy() &&
- SrcTy->getPointerAddressSpace() == MidTy->getPointerAddressSpace() &&
- MidTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace() &&
- "Illegal bitcast, addrspacecast sequence!");
- // Allowed, use second cast's opcode
- return secondOp;
+ // bitcast, addrspacecast -> addrspacecast if the element type of
+ // bitcast's source is the same as that of addrspacecast's destination.
+ if (SrcTy->getPointerElementType() == DstTy->getPointerElementType())
+ return Instruction::AddrSpaceCast;
+ return 0;
+
case 15:
// FIXME: this state can be merged with (1), but the following assert
// is useful to check the correcteness of the sequence due to semantic
@@ -2453,11 +2478,7 @@ CastInst *CastInst::CreatePointerCast(Value *S, Type *Ty,
if (Ty->isIntOrIntVectorTy())
return Create(Instruction::PtrToInt, S, Ty, Name, InsertAtEnd);
- Type *STy = S->getType();
- if (STy->getPointerAddressSpace() != Ty->getPointerAddressSpace())
- return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertAtEnd);
-
- return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
+ return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertAtEnd);
}
/// @brief Create a BitCast or a PtrToInt cast instruction
@@ -2475,14 +2496,36 @@ CastInst *CastInst::CreatePointerCast(Value *S, Type *Ty,
if (Ty->isIntOrIntVectorTy())
return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
- Type *STy = S->getType();
- if (STy->getPointerAddressSpace() != Ty->getPointerAddressSpace())
+ return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertBefore);
+}
+
+CastInst *CastInst::CreatePointerBitCastOrAddrSpaceCast(
+ Value *S, Type *Ty,
+ const Twine &Name,
+ BasicBlock *InsertAtEnd) {
+ assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
+ assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
+
+ if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace())
+ return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertAtEnd);
+
+ return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
+}
+
+CastInst *CastInst::CreatePointerBitCastOrAddrSpaceCast(
+ Value *S, Type *Ty,
+ const Twine &Name,
+ Instruction *InsertBefore) {
+ assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
+ assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
+
+ if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace())
return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertBefore);
return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
}
-CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty,
+CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty,
bool isSigned, const Twine &Name,
Instruction *InsertBefore) {
assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
@@ -2817,30 +2860,55 @@ CastInst::castIsValid(Instruction::CastOps op, Value *S, Type *DstTy) {
return false;
return SrcTy->getScalarType()->isIntegerTy() &&
DstTy->getScalarType()->isPointerTy();
- case Instruction::BitCast:
+ case Instruction::BitCast: {
+ PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
+ PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
+
// BitCast implies a no-op cast of type only. No bits change.
// However, you can't cast pointers to anything but pointers.
- if (SrcTy->isPtrOrPtrVectorTy() != DstTy->isPtrOrPtrVectorTy())
+ if (!SrcPtrTy != !DstPtrTy)
return false;
- // For non pointer cases, the cast is okay if the source and destination bit
+ // For non-pointer cases, the cast is okay if the source and destination bit
// widths are identical.
- if (!SrcTy->isPtrOrPtrVectorTy())
+ if (!SrcPtrTy)
return SrcTy->getPrimitiveSizeInBits() == DstTy->getPrimitiveSizeInBits();
- // If both are pointers then the address spaces must match and vector of
- // pointers must have the same number of elements.
- return SrcTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
- SrcTy->isVectorTy() == DstTy->isVectorTy() &&
- (!SrcTy->isVectorTy() ||
- SrcTy->getVectorNumElements() == SrcTy->getVectorNumElements());
-
- case Instruction::AddrSpaceCast:
- return SrcTy->isPtrOrPtrVectorTy() && DstTy->isPtrOrPtrVectorTy() &&
- SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace() &&
- SrcTy->isVectorTy() == DstTy->isVectorTy() &&
- (!SrcTy->isVectorTy() ||
- SrcTy->getVectorNumElements() == SrcTy->getVectorNumElements());
+ // If both are pointers then the address spaces must match.
+ if (SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace())
+ return false;
+
+ // A vector of pointers must have the same number of elements.
+ if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) {
+ if (VectorType *DstVecTy = dyn_cast<VectorType>(DstTy))
+ return (SrcVecTy->getNumElements() == DstVecTy->getNumElements());
+
+ return false;
+ }
+
+ return true;
+ }
+ case Instruction::AddrSpaceCast: {
+ PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
+ if (!SrcPtrTy)
+ return false;
+
+ PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
+ if (!DstPtrTy)
+ return false;
+
+ if (SrcPtrTy->getAddressSpace() == DstPtrTy->getAddressSpace())
+ return false;
+
+ if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) {
+ if (VectorType *DstVecTy = dyn_cast<VectorType>(DstTy))
+ return (SrcVecTy->getNumElements() == DstVecTy->getNumElements());
+
+ return false;
+ }
+
+ return true;
+ }
}
}
@@ -3307,7 +3375,7 @@ void SwitchInst::init(Value *Value, BasicBlock *Default, unsigned NumReserved) {
SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
Instruction *InsertBefore)
: TerminatorInst(Type::getVoidTy(Value->getContext()), Instruction::Switch,
- 0, 0, InsertBefore) {
+ nullptr, 0, InsertBefore) {
init(Value, Default, 2+NumCases*2);
}
@@ -3318,12 +3386,12 @@ SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
BasicBlock *InsertAtEnd)
: TerminatorInst(Type::getVoidTy(Value->getContext()), Instruction::Switch,
- 0, 0, InsertAtEnd) {
+ nullptr, 0, InsertAtEnd) {
init(Value, Default, 2+NumCases*2);
}
SwitchInst::SwitchInst(const SwitchInst &SI)
- : TerminatorInst(SI.getType(), Instruction::Switch, 0, 0) {
+ : TerminatorInst(SI.getType(), Instruction::Switch, nullptr, 0) {
init(SI.getCondition(), SI.getDefaultDest(), SI.getNumOperands());
NumOperands = SI.getNumOperands();
Use *OL = OperandList, *InOL = SI.OperandList;
@@ -3371,8 +3439,8 @@ void SwitchInst::removeCase(CaseIt i) {
}
// Nuke the last value.
- OL[NumOps-2].set(0);
- OL[NumOps-2+1].set(0);
+ OL[NumOps-2].set(nullptr);
+ OL[NumOps-2+1].set(nullptr);
NumOperands = NumOps-2;
}
@@ -3438,14 +3506,14 @@ void IndirectBrInst::growOperands() {
IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
Instruction *InsertBefore)
: TerminatorInst(Type::getVoidTy(Address->getContext()),Instruction::IndirectBr,
- 0, 0, InsertBefore) {
+ nullptr, 0, InsertBefore) {
init(Address, NumCases);
}
IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
BasicBlock *InsertAtEnd)
: TerminatorInst(Type::getVoidTy(Address->getContext()),Instruction::IndirectBr,
- 0, 0, InsertAtEnd) {
+ nullptr, 0, InsertAtEnd) {
init(Address, NumCases);
}
@@ -3487,7 +3555,7 @@ void IndirectBrInst::removeDestination(unsigned idx) {
OL[idx+1] = OL[NumOps-1];
// Nuke the last value.
- OL[NumOps-1].set(0);
+ OL[NumOps-1].set(nullptr);
NumOperands = NumOps-1;
}
@@ -3533,9 +3601,10 @@ InsertValueInst *InsertValueInst::clone_impl() const {
}
AllocaInst *AllocaInst::clone_impl() const {
- return new AllocaInst(getAllocatedType(),
- (Value*)getOperand(0),
- getAlignment());
+ AllocaInst *Result = new AllocaInst(getAllocatedType(),
+ (Value *)getOperand(0), getAlignment());
+ Result->setUsedWithInAlloca(isUsedWithInAlloca());
+ return Result;
}
LoadInst *LoadInst::clone_impl() const {
@@ -3552,8 +3621,10 @@ StoreInst *StoreInst::clone_impl() const {
AtomicCmpXchgInst *AtomicCmpXchgInst::clone_impl() const {
AtomicCmpXchgInst *Result =
new AtomicCmpXchgInst(getOperand(0), getOperand(1), getOperand(2),
- getOrdering(), getSynchScope());
+ getSuccessOrdering(), getFailureOrdering(),
+ getSynchScope());
Result->setVolatile(isVolatile());
+ Result->setWeak(isWeak());
return Result;
}
OpenPOWER on IntegriCloud