summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/lib/Transforms
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/lib/Transforms')
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp11
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp41
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/EarlyCSE.cpp3
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp10
-rw-r--r--contrib/llvm/lib/Transforms/Utils/Local.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Utils/ValueMapper.cpp110
-rw-r--r--contrib/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp22
9 files changed, 160 insertions, 43 deletions
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
index af1694d..6230c00 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
@@ -330,11 +330,17 @@ static LoadInst *combineLoadToNewType(InstCombiner &IC, LoadInst &LI, Type *NewT
case LLVMContext::MD_noalias:
case LLVMContext::MD_nontemporal:
case LLVMContext::MD_mem_parallel_loop_access:
- case LLVMContext::MD_nonnull:
// All of these directly apply.
NewLoad->setMetadata(ID, N);
break;
+ case LLVMContext::MD_nonnull:
+ // FIXME: We should translate this into range metadata for integer types
+ // and vice versa.
+ if (NewTy->isPointerTy())
+ NewLoad->setMetadata(ID, N);
+ break;
+
case LLVMContext::MD_range:
// FIXME: It would be nice to propagate this in some way, but the type
// conversions make it hard.
@@ -548,13 +554,14 @@ static bool combineStoreToValueType(InstCombiner &IC, StoreInst &SI) {
case LLVMContext::MD_noalias:
case LLVMContext::MD_nontemporal:
case LLVMContext::MD_mem_parallel_loop_access:
- case LLVMContext::MD_nonnull:
// All of these directly apply.
NewStore->setMetadata(ID, N);
break;
case LLVMContext::MD_invariant_load:
+ case LLVMContext::MD_nonnull:
case LLVMContext::MD_range:
+ // These don't apply for stores.
break;
}
}
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/contrib/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index 745c85a..25f1f02 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/contrib/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -67,7 +67,7 @@ static const uint64_t kMIPS32_ShadowOffset32 = 0x0aaa0000;
static const uint64_t kMIPS64_ShadowOffset64 = 1ULL << 36;
static const uint64_t kFreeBSD_ShadowOffset32 = 1ULL << 30;
static const uint64_t kFreeBSD_ShadowOffset64 = 1ULL << 46;
-static const uint64_t kWindowsShadowOffset32 = 1ULL << 30;
+static const uint64_t kWindowsShadowOffset32 = 3ULL << 28;
static const size_t kMinStackMallocSize = 1 << 6; // 64B
static const size_t kMaxStackMallocSize = 1 << 16; // 64K
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp b/contrib/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp
index 5f73b89..2a3d154 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp
+++ b/contrib/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp
@@ -71,9 +71,17 @@ private:
return isMachO() ? "__DATA,__llvm_prf_data" : "__llvm_prf_data";
}
+ /// Get the section name for the coverage mapping data.
+ StringRef getCoverageSection() const {
+ return isMachO() ? "__DATA,__llvm_covmap" : "__llvm_covmap";
+ }
+
/// Replace instrprof_increment with an increment of the appropriate value.
void lowerIncrement(InstrProfIncrementInst *Inc);
+ /// Set up the section and uses for coverage data and its references.
+ void lowerCoverageData(GlobalVariable *CoverageData);
+
/// Get the region counters for an increment, creating them if necessary.
///
/// If the counter array doesn't yet exist, the profile data variables
@@ -118,6 +126,10 @@ bool InstrProfiling::runOnModule(Module &M) {
lowerIncrement(Inc);
MadeChange = true;
}
+ if (GlobalVariable *Coverage = M.getNamedGlobal("__llvm_coverage_mapping")) {
+ lowerCoverageData(Coverage);
+ MadeChange = true;
+ }
if (!MadeChange)
return false;
@@ -140,6 +152,35 @@ void InstrProfiling::lowerIncrement(InstrProfIncrementInst *Inc) {
Inc->eraseFromParent();
}
+void InstrProfiling::lowerCoverageData(GlobalVariable *CoverageData) {
+ CoverageData->setSection(getCoverageSection());
+ CoverageData->setAlignment(8);
+
+ Constant *Init = CoverageData->getInitializer();
+ // We're expecting { i32, i32, i32, i32, [n x { i8*, i32, i32 }], [m x i8] }
+ // for some C. If not, the frontend's given us something broken.
+ assert(Init->getNumOperands() == 6 && "bad number of fields in coverage map");
+ assert(isa<ConstantArray>(Init->getAggregateElement(4)) &&
+ "invalid function list in coverage map");
+ ConstantArray *Records = cast<ConstantArray>(Init->getAggregateElement(4));
+ for (unsigned I = 0, E = Records->getNumOperands(); I < E; ++I) {
+ Constant *Record = Records->getOperand(I);
+ Value *V = const_cast<Value *>(Record->getOperand(0))->stripPointerCasts();
+
+ assert(isa<GlobalVariable>(V) && "Missing reference to function name");
+ GlobalVariable *Name = cast<GlobalVariable>(V);
+
+ // If we have region counters for this name, we've already handled it.
+ auto It = RegionCounters.find(Name);
+ if (It != RegionCounters.end())
+ continue;
+
+ // Move the name variable to the right section.
+ Name->setSection(getNameSection());
+ Name->setAlignment(1);
+ }
+}
+
/// Get the name of a profiling variable for a particular function.
static std::string getVarName(InstrProfIncrementInst *Inc, StringRef VarName) {
auto *Arr = cast<ConstantDataArray>(Inc->getName()->getInitializer());
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/contrib/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index 9f00d3d..d7d752f 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/contrib/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -631,7 +631,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
if (SI.isAtomic()) SI.setOrdering(addReleaseOrdering(SI.getOrdering()));
- if (MS.TrackOrigins)
+ if (MS.TrackOrigins && !SI.isAtomic())
storeOrigin(IRB, Addr, Shadow, getOrigin(Val), SI.getAlignment(),
InstrumentWithCalls);
}
diff --git a/contrib/llvm/lib/Transforms/Scalar/EarlyCSE.cpp b/contrib/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
index 394b0d3d..969b9a8 100644
--- a/contrib/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
@@ -480,6 +480,9 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
// Ignore volatile loads.
if (!LI->isSimple()) {
LastStore = nullptr;
+ // Don't CSE across synchronization boundaries.
+ if (Inst->mayWriteToMemory())
+ ++CurrentGeneration;
continue;
}
diff --git a/contrib/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/contrib/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
index 33b5f9d..3eea3d4 100644
--- a/contrib/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
@@ -750,6 +750,16 @@ bool MemCpyOpt::performCallSlotOptzn(Instruction *cpy,
// its dependence information by changing its parameter.
MD->removeInstruction(C);
+ // Update AA metadata
+ // FIXME: MD_tbaa_struct and MD_mem_parallel_loop_access should also be
+ // handled here, but combineMetadata doesn't support them yet
+ unsigned KnownIDs[] = {
+ LLVMContext::MD_tbaa,
+ LLVMContext::MD_alias_scope,
+ LLVMContext::MD_noalias,
+ };
+ combineMetadata(C, cpy, KnownIDs);
+
// Remove the memcpy.
MD->removeInstruction(cpy);
++NumMemCpyInstr;
diff --git a/contrib/llvm/lib/Transforms/Utils/Local.cpp b/contrib/llvm/lib/Transforms/Utils/Local.cpp
index 08a4b3f..2a84d7e 100644
--- a/contrib/llvm/lib/Transforms/Utils/Local.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/Local.cpp
@@ -1328,6 +1328,8 @@ void llvm::combineMetadata(Instruction *K, const Instruction *J, ArrayRef<unsign
K->setMetadata(Kind, MDNode::getMostGenericTBAA(JMD, KMD));
break;
case LLVMContext::MD_alias_scope:
+ K->setMetadata(Kind, MDNode::getMostGenericAliasScope(JMD, KMD));
+ break;
case LLVMContext::MD_noalias:
K->setMetadata(Kind, MDNode::intersect(JMD, KMD));
break;
diff --git a/contrib/llvm/lib/Transforms/Utils/ValueMapper.cpp b/contrib/llvm/lib/Transforms/Utils/ValueMapper.cpp
index 477fba4..65f2ae2 100644
--- a/contrib/llvm/lib/Transforms/Utils/ValueMapper.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/ValueMapper.cpp
@@ -154,19 +154,21 @@ static Metadata *mapToSelf(ValueToValueMapTy &VM, const Metadata *MD) {
return mapToMetadata(VM, MD, const_cast<Metadata *>(MD));
}
-static Metadata *MapMetadataImpl(const Metadata *MD, ValueToValueMapTy &VM,
- RemapFlags Flags,
+static Metadata *MapMetadataImpl(const Metadata *MD,
+ SmallVectorImpl<UniquableMDNode *> &Cycles,
+ ValueToValueMapTy &VM, RemapFlags Flags,
ValueMapTypeRemapper *TypeMapper,
ValueMaterializer *Materializer);
-static Metadata *mapMetadataOp(Metadata *Op, ValueToValueMapTy &VM,
- RemapFlags Flags,
- ValueMapTypeRemapper *TypeMapper,
- ValueMaterializer *Materializer) {
+static Metadata *mapMetadataOp(Metadata *Op,
+ SmallVectorImpl<UniquableMDNode *> &Cycles,
+ ValueToValueMapTy &VM, RemapFlags Flags,
+ ValueMapTypeRemapper *TypeMapper,
+ ValueMaterializer *Materializer) {
if (!Op)
return nullptr;
if (Metadata *MappedOp =
- MapMetadataImpl(Op, VM, Flags, TypeMapper, Materializer))
+ MapMetadataImpl(Op, Cycles, VM, Flags, TypeMapper, Materializer))
return MappedOp;
// Use identity map if MappedOp is null and we can ignore missing entries.
if (Flags & RF_IgnoreMissingEntries)
@@ -180,8 +182,9 @@ static Metadata *mapMetadataOp(Metadata *Op, ValueToValueMapTy &VM,
return nullptr;
}
-static Metadata *cloneMDTuple(const MDTuple *Node, ValueToValueMapTy &VM,
- RemapFlags Flags,
+static Metadata *cloneMDTuple(const MDTuple *Node,
+ SmallVectorImpl<UniquableMDNode *> &Cycles,
+ ValueToValueMapTy &VM, RemapFlags Flags,
ValueMapTypeRemapper *TypeMapper,
ValueMaterializer *Materializer,
bool IsDistinct) {
@@ -192,41 +195,57 @@ static Metadata *cloneMDTuple(const MDTuple *Node, ValueToValueMapTy &VM,
SmallVector<Metadata *, 4> Elts;
Elts.reserve(Node->getNumOperands());
for (unsigned I = 0, E = Node->getNumOperands(); I != E; ++I)
- Elts.push_back(mapMetadataOp(Node->getOperand(I), VM, Flags, TypeMapper,
- Materializer));
+ Elts.push_back(mapMetadataOp(Node->getOperand(I), Cycles, VM, Flags,
+ TypeMapper, Materializer));
return MDTuple::get(Node->getContext(), Elts);
}
-static Metadata *cloneMDLocation(const MDLocation *Node, ValueToValueMapTy &VM,
- RemapFlags Flags,
+static Metadata *cloneMDLocation(const MDLocation *Node,
+ SmallVectorImpl<UniquableMDNode *> &Cycles,
+ ValueToValueMapTy &VM, RemapFlags Flags,
ValueMapTypeRemapper *TypeMapper,
ValueMaterializer *Materializer,
bool IsDistinct) {
return (IsDistinct ? MDLocation::getDistinct : MDLocation::get)(
Node->getContext(), Node->getLine(), Node->getColumn(),
- mapMetadataOp(Node->getScope(), VM, Flags, TypeMapper, Materializer),
- mapMetadataOp(Node->getInlinedAt(), VM, Flags, TypeMapper, Materializer));
+ mapMetadataOp(Node->getScope(), Cycles, VM, Flags, TypeMapper,
+ Materializer),
+ mapMetadataOp(Node->getInlinedAt(), Cycles, VM, Flags, TypeMapper,
+ Materializer));
}
-static Metadata *cloneMDNode(const UniquableMDNode *Node, ValueToValueMapTy &VM,
- RemapFlags Flags, ValueMapTypeRemapper *TypeMapper,
+static Metadata *cloneMDNode(const UniquableMDNode *Node,
+ SmallVectorImpl<UniquableMDNode *> &Cycles,
+ ValueToValueMapTy &VM, RemapFlags Flags,
+ ValueMapTypeRemapper *TypeMapper,
ValueMaterializer *Materializer, bool IsDistinct) {
switch (Node->getMetadataID()) {
default:
llvm_unreachable("Invalid UniquableMDNode subclass");
#define HANDLE_UNIQUABLE_LEAF(CLASS) \
case Metadata::CLASS##Kind: \
- return clone##CLASS(cast<CLASS>(Node), VM, Flags, TypeMapper, \
+ return clone##CLASS(cast<CLASS>(Node), Cycles, VM, Flags, TypeMapper, \
Materializer, IsDistinct);
#include "llvm/IR/Metadata.def"
}
}
+static void
+trackCyclesUnderDistinct(const UniquableMDNode *Node,
+ SmallVectorImpl<UniquableMDNode *> &Cycles) {
+ // Track any cycles beneath this node.
+ for (Metadata *Op : Node->operands())
+ if (auto *N = dyn_cast_or_null<UniquableMDNode>(Op))
+ if (!N->isResolved())
+ Cycles.push_back(N);
+}
+
/// \brief Map a distinct MDNode.
///
/// Distinct nodes are not uniqued, so they must always recreated.
static Metadata *mapDistinctNode(const UniquableMDNode *Node,
+ SmallVectorImpl<UniquableMDNode *> &Cycles,
ValueToValueMapTy &VM, RemapFlags Flags,
ValueMapTypeRemapper *TypeMapper,
ValueMaterializer *Materializer) {
@@ -241,9 +260,11 @@ static Metadata *mapDistinctNode(const UniquableMDNode *Node,
// Fix the operands.
for (unsigned I = 0, E = Node->getNumOperands(); I != E; ++I)
- NewMD->replaceOperandWith(I, mapMetadataOp(Node->getOperand(I), VM, Flags,
- TypeMapper, Materializer));
+ NewMD->replaceOperandWith(I,
+ mapMetadataOp(Node->getOperand(I), Cycles, VM,
+ Flags, TypeMapper, Materializer));
+ trackCyclesUnderDistinct(NewMD, Cycles);
return NewMD;
}
@@ -252,9 +273,11 @@ static Metadata *mapDistinctNode(const UniquableMDNode *Node,
std::unique_ptr<MDNodeFwdDecl> Dummy(
MDNode::getTemporary(Node->getContext(), None));
mapToMetadata(VM, Node, Dummy.get());
- Metadata *NewMD = cloneMDNode(Node, VM, Flags, TypeMapper, Materializer,
- /* IsDistinct */ true);
+ auto *NewMD = cast<UniquableMDNode>(cloneMDNode(Node, Cycles, VM, Flags,
+ TypeMapper, Materializer,
+ /* IsDistinct */ true));
Dummy->replaceAllUsesWith(NewMD);
+ trackCyclesUnderDistinct(NewMD, Cycles);
return mapToMetadata(VM, Node, NewMD);
}
@@ -263,13 +286,14 @@ static Metadata *mapDistinctNode(const UniquableMDNode *Node,
/// Check whether a uniqued node needs to be remapped (due to any operands
/// changing).
static bool shouldRemapUniquedNode(const UniquableMDNode *Node,
+ SmallVectorImpl<UniquableMDNode *> &Cycles,
ValueToValueMapTy &VM, RemapFlags Flags,
ValueMapTypeRemapper *TypeMapper,
ValueMaterializer *Materializer) {
// Check all operands to see if any need to be remapped.
for (unsigned I = 0, E = Node->getNumOperands(); I != E; ++I) {
Metadata *Op = Node->getOperand(I);
- if (Op != mapMetadataOp(Op, VM, Flags, TypeMapper, Materializer))
+ if (Op != mapMetadataOp(Op, Cycles, VM, Flags, TypeMapper, Materializer))
return true;
}
return false;
@@ -279,9 +303,10 @@ static bool shouldRemapUniquedNode(const UniquableMDNode *Node,
///
/// Uniqued nodes may not need to be recreated (they may map to themselves).
static Metadata *mapUniquedNode(const UniquableMDNode *Node,
- ValueToValueMapTy &VM, RemapFlags Flags,
- ValueMapTypeRemapper *TypeMapper,
- ValueMaterializer *Materializer) {
+ SmallVectorImpl<UniquableMDNode *> &Cycles,
+ ValueToValueMapTy &VM, RemapFlags Flags,
+ ValueMapTypeRemapper *TypeMapper,
+ ValueMaterializer *Materializer) {
assert(!Node->isDistinct() && "Expected uniqued node");
// Create a dummy node in case we have a metadata cycle.
@@ -289,7 +314,8 @@ static Metadata *mapUniquedNode(const UniquableMDNode *Node,
mapToMetadata(VM, Node, Dummy);
// Check all operands to see if any need to be remapped.
- if (!shouldRemapUniquedNode(Node, VM, Flags, TypeMapper, Materializer)) {
+ if (!shouldRemapUniquedNode(Node, Cycles, VM, Flags, TypeMapper,
+ Materializer)) {
// Use an identity mapping.
mapToSelf(VM, Node);
MDNode::deleteTemporary(Dummy);
@@ -297,15 +323,17 @@ static Metadata *mapUniquedNode(const UniquableMDNode *Node,
}
// At least one operand needs remapping.
- Metadata *NewMD = cloneMDNode(Node, VM, Flags, TypeMapper, Materializer,
- /* IsDistinct */ false);
+ Metadata *NewMD =
+ cloneMDNode(Node, Cycles, VM, Flags, TypeMapper, Materializer,
+ /* IsDistinct */ false);
Dummy->replaceAllUsesWith(NewMD);
MDNode::deleteTemporary(Dummy);
return mapToMetadata(VM, Node, NewMD);
}
-static Metadata *MapMetadataImpl(const Metadata *MD, ValueToValueMapTy &VM,
- RemapFlags Flags,
+static Metadata *MapMetadataImpl(const Metadata *MD,
+ SmallVectorImpl<UniquableMDNode *> &Cycles,
+ ValueToValueMapTy &VM, RemapFlags Flags,
ValueMapTypeRemapper *TypeMapper,
ValueMaterializer *Materializer) {
// If the value already exists in the map, use it.
@@ -345,18 +373,30 @@ static Metadata *MapMetadataImpl(const Metadata *MD, ValueToValueMapTy &VM,
return mapToSelf(VM, MD);
if (Node->isDistinct())
- return mapDistinctNode(Node, VM, Flags, TypeMapper, Materializer);
+ return mapDistinctNode(Node, Cycles, VM, Flags, TypeMapper, Materializer);
- return mapUniquedNode(Node, VM, Flags, TypeMapper, Materializer);
+ return mapUniquedNode(Node, Cycles, VM, Flags, TypeMapper, Materializer);
}
Metadata *llvm::MapMetadata(const Metadata *MD, ValueToValueMapTy &VM,
RemapFlags Flags, ValueMapTypeRemapper *TypeMapper,
ValueMaterializer *Materializer) {
- Metadata *NewMD = MapMetadataImpl(MD, VM, Flags, TypeMapper, Materializer);
- if (NewMD && NewMD != MD)
+ SmallVector<UniquableMDNode *, 8> Cycles;
+ Metadata *NewMD =
+ MapMetadataImpl(MD, Cycles, VM, Flags, TypeMapper, Materializer);
+
+ // Resolve cycles underneath MD.
+ if (NewMD && NewMD != MD) {
if (auto *N = dyn_cast<UniquableMDNode>(NewMD))
N->resolveCycles();
+
+ for (UniquableMDNode *N : Cycles)
+ N->resolveCycles();
+ } else {
+ // Shouldn't get unresolved cycles if nothing was remapped.
+ assert(Cycles.empty() && "Expected no unresolved cycles");
+ }
+
return NewMD;
}
diff --git a/contrib/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/contrib/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index 4834782..bd8a4b3 100644
--- a/contrib/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/contrib/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -75,6 +75,18 @@ static const unsigned MinVecRegSize = 128;
static const unsigned RecursionMaxDepth = 12;
+/// \brief Predicate for the element types that the SLP vectorizer supports.
+///
+/// The most important thing to filter here are types which are invalid in LLVM
+/// vectors. We also filter target specific types which have absolutely no
+/// meaningful vectorization path such as x86_fp80 and ppc_f128. This just
+/// avoids spending time checking the cost model and realizing that they will
+/// be inevitably scalarized.
+static bool isValidElementType(Type *Ty) {
+ return VectorType::isValidElementType(Ty) && !Ty->isX86_FP80Ty() &&
+ !Ty->isPPC_FP128Ty();
+}
+
/// \returns the parent basic block if all of the instructions in \p VL
/// are in the same block or null otherwise.
static BasicBlock *getSameBlock(ArrayRef<Value *> VL) {
@@ -208,6 +220,8 @@ static Instruction *propagateMetadata(Instruction *I, ArrayRef<Value *> VL) {
MD = MDNode::getMostGenericTBAA(MD, IMD);
break;
case LLVMContext::MD_alias_scope:
+ MD = MDNode::getMostGenericAliasScope(MD, IMD);
+ break;
case LLVMContext::MD_noalias:
MD = MDNode::intersect(MD, IMD);
break;
@@ -1214,7 +1228,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) {
Type *SrcTy = VL0->getOperand(0)->getType();
for (unsigned i = 0; i < VL.size(); ++i) {
Type *Ty = cast<Instruction>(VL[i])->getOperand(0)->getType();
- if (Ty != SrcTy || Ty->isAggregateType() || Ty->isVectorTy()) {
+ if (Ty != SrcTy || !isValidElementType(Ty)) {
BS.cancelScheduling(VL);
newTreeEntry(VL, false);
DEBUG(dbgs() << "SLP: Gathering casts with different src types.\n");
@@ -3128,7 +3142,7 @@ unsigned SLPVectorizer::collectStores(BasicBlock *BB, BoUpSLP &R) {
// Check that the pointer points to scalars.
Type *Ty = SI->getValueOperand()->getType();
- if (Ty->isAggregateType() || Ty->isVectorTy())
+ if (!isValidElementType(Ty))
continue;
// Find the base pointer.
@@ -3169,7 +3183,7 @@ bool SLPVectorizer::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R,
for (int i = 0, e = VL.size(); i < e; ++i) {
Type *Ty = VL[i]->getType();
- if (Ty->isAggregateType() || Ty->isVectorTy())
+ if (!isValidElementType(Ty))
return false;
Instruction *Inst = dyn_cast<Instruction>(VL[i]);
if (!Inst || Inst->getOpcode() != Opcode0)
@@ -3389,7 +3403,7 @@ public:
return false;
Type *Ty = B->getType();
- if (Ty->isVectorTy())
+ if (!isValidElementType(Ty))
return false;
ReductionOpcode = B->getOpcode();
OpenPOWER on IntegriCloud