summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/include
diff options
context:
space:
mode:
authordim <dim@FreeBSD.org>2016-01-06 20:19:13 +0000
committerdim <dim@FreeBSD.org>2016-01-06 20:19:13 +0000
commite06c171d67ab436f270b15f7e364a8d8f77c01f2 (patch)
treeb7c03c042b220d85a294b0e2e89936b631d3e6ad /contrib/llvm/include
parentdb873d7452584205dd063528dc8addbf28aa396b (diff)
parentff2ba393a56d9d99dcb76ceada542233db28af9a (diff)
downloadFreeBSD-src-e06c171d67ab436f270b15f7e364a8d8f77c01f2.zip
FreeBSD-src-e06c171d67ab436f270b15f7e364a8d8f77c01f2.tar.gz
Update llvm to trunk r256945.
Diffstat (limited to 'contrib/llvm/include')
-rw-r--r--contrib/llvm/include/llvm/Analysis/MemoryBuiltins.h5
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineInstr.h27
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineInstrBuilder.h5
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineInstrBundle.h2
-rw-r--r--contrib/llvm/include/llvm/CodeGen/WinEHFuncInfo.h4
-rw-r--r--contrib/llvm/include/llvm/IR/CallSite.h5
-rw-r--r--contrib/llvm/include/llvm/IR/IRBuilder.h60
-rw-r--r--contrib/llvm/include/llvm/IR/Instructions.h21
-rw-r--r--contrib/llvm/include/llvm/IR/IntrinsicsX86.td152
-rw-r--r--contrib/llvm/include/llvm/IR/Metadata.h17
-rw-r--r--contrib/llvm/include/llvm/IR/Statepoint.h56
-rw-r--r--contrib/llvm/include/llvm/MC/SubtargetFeature.h22
-rw-r--r--contrib/llvm/include/llvm/ProfileData/InstrProf.h48
-rw-r--r--contrib/llvm/include/llvm/ProfileData/InstrProfData.inc21
-rw-r--r--contrib/llvm/include/llvm/Support/ARMTargetParser.def1
-rw-r--r--contrib/llvm/include/llvm/Support/Program.h2
-rw-r--r--contrib/llvm/include/llvm/Support/YAMLParser.h23
-rw-r--r--contrib/llvm/include/llvm/TableGen/Record.h35
-rw-r--r--contrib/llvm/include/llvm/Target/Target.td4
-rw-r--r--contrib/llvm/include/llvm/Target/TargetLowering.h6
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/BypassSlowDivision.h10
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/LoopUtils.h3
22 files changed, 417 insertions, 112 deletions
diff --git a/contrib/llvm/include/llvm/Analysis/MemoryBuiltins.h b/contrib/llvm/include/llvm/Analysis/MemoryBuiltins.h
index 87fb3ef..493a99a 100644
--- a/contrib/llvm/include/llvm/Analysis/MemoryBuiltins.h
+++ b/contrib/llvm/include/llvm/Analysis/MemoryBuiltins.h
@@ -59,11 +59,6 @@ bool isCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
bool isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
bool LookThroughBitCast = false);
-/// \brief Tests if a value is a call or invoke to a library function that
-/// allocates memory and never returns null (such as operator new).
-bool isOperatorNewLikeFn(const Value *V, const TargetLibraryInfo *TLI,
- bool LookThroughBitCast = false);
-
//===----------------------------------------------------------------------===//
// malloc Call Utility Functions.
//
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineInstr.h b/contrib/llvm/include/llvm/CodeGen/MachineInstr.h
index 978864e..05c9a9e0 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineInstr.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineInstr.h
@@ -97,7 +97,7 @@ private:
// of memory operands required to be precise exceeds the maximum value of
// NumMemRefs - currently 256 - we remove the operands entirely. Note also
// that this is a non-owning reference to a shared copy on write buffer owned
- // by the MachineFunction and created via MF.allocateMemRefsArray.
+ // by the MachineFunction and created via MF.allocateMemRefsArray.
mmo_iterator MemRefs;
DebugLoc debugLoc; // Source line information.
@@ -354,7 +354,7 @@ public:
mmo_iterator memoperands_end() const { return MemRefs + NumMemRefs; }
/// Return true if we don't have any memory operands which described the the
/// memory access done by this instruction. If this is true, calling code
- /// must be conservative.
+ /// must be conservative.
bool memoperands_empty() const { return NumMemRefs == 0; }
iterator_range<mmo_iterator> memoperands() {
@@ -774,7 +774,7 @@ public:
bool isKill() const { return getOpcode() == TargetOpcode::KILL; }
bool isImplicitDef() const { return getOpcode()==TargetOpcode::IMPLICIT_DEF; }
bool isInlineAsm() const { return getOpcode() == TargetOpcode::INLINEASM; }
- bool isMSInlineAsm() const {
+ bool isMSInlineAsm() const {
return getOpcode() == TargetOpcode::INLINEASM && getInlineAsmDialect();
}
bool isStackAligningInlineAsm() const;
@@ -1180,11 +1180,26 @@ public:
/// Assign this MachineInstr's memory reference descriptor list.
/// This does not transfer ownership.
void setMemRefs(mmo_iterator NewMemRefs, mmo_iterator NewMemRefsEnd) {
- MemRefs = NewMemRefs;
- NumMemRefs = uint8_t(NewMemRefsEnd - NewMemRefs);
- assert(NumMemRefs == NewMemRefsEnd - NewMemRefs && "Too many memrefs");
+ setMemRefs(std::make_pair(NewMemRefs, NewMemRefsEnd-NewMemRefs));
}
+ /// Assign this MachineInstr's memory reference descriptor list. First
+ /// element in the pair is the begin iterator/pointer to the array; the
+ /// second is the number of MemoryOperands. This does not transfer ownership
+ /// of the underlying memory.
+ void setMemRefs(std::pair<mmo_iterator, unsigned> NewMemRefs) {
+ MemRefs = NewMemRefs.first;
+ NumMemRefs = uint8_t(NewMemRefs.second);
+ assert(NumMemRefs == NewMemRefs.second &&
+ "Too many memrefs - must drop memory operands");
+ }
+
+ /// Return a set of memrefs (begin iterator, size) which conservatively
+ /// describe the memory behavior of both MachineInstrs. This is appropriate
+ /// for use when merging two MachineInstrs into one. This routine does not
+ /// modify the memrefs of the this MachineInstr.
+ std::pair<mmo_iterator, unsigned> mergeMemRefsWith(const MachineInstr& Other);
+
/// Clear this MachineInstr's memory reference descriptor list. This resets
/// the memrefs to their most conservative state. This should be used only
/// as a last resort since it greatly pessimizes our knowledge of the memory
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineInstrBuilder.h b/contrib/llvm/include/llvm/CodeGen/MachineInstrBuilder.h
index aa5f4b2..8fe9b28 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineInstrBuilder.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineInstrBuilder.h
@@ -162,6 +162,11 @@ public:
return *this;
}
+ const MachineInstrBuilder &setMemRefs(std::pair<MachineInstr::mmo_iterator,
+ unsigned> MemOperandsRef) const {
+ MI->setMemRefs(MemOperandsRef);
+ return *this;
+ }
const MachineInstrBuilder &addOperand(const MachineOperand &MO) const {
MI->addOperand(*MF, MO);
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineInstrBundle.h b/contrib/llvm/include/llvm/CodeGen/MachineInstrBundle.h
index 4fbe206..4e88606 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineInstrBundle.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineInstrBundle.h
@@ -178,7 +178,7 @@ public:
/// register.
bool FullyDefined;
- /// Reg or ont of its aliases is read. The register may only be read
+ /// Reg or one of its aliases is read. The register may only be read
/// partially.
bool Read;
/// Reg or a super-register is read. The full register is read.
diff --git a/contrib/llvm/include/llvm/CodeGen/WinEHFuncInfo.h b/contrib/llvm/include/llvm/CodeGen/WinEHFuncInfo.h
index 70d558f..f6ad7a8 100644
--- a/contrib/llvm/include/llvm/CodeGen/WinEHFuncInfo.h
+++ b/contrib/llvm/include/llvm/CodeGen/WinEHFuncInfo.h
@@ -83,7 +83,9 @@ enum class ClrHandlerType { Catch, Finally, Fault, Filter };
struct ClrEHUnwindMapEntry {
MBBOrBasicBlock Handler;
uint32_t TypeToken;
- int Parent;
+ int HandlerParentState; ///< Outer handler enclosing this entry's handler
+ int TryParentState; ///< Outer try region enclosing this entry's try region,
+ ///< treating later catches on same try as "outer"
ClrHandlerType HandlerType;
};
diff --git a/contrib/llvm/include/llvm/IR/CallSite.h b/contrib/llvm/include/llvm/IR/CallSite.h
index f4b8a8a..f7bfb47 100644
--- a/contrib/llvm/include/llvm/IR/CallSite.h
+++ b/contrib/llvm/include/llvm/IR/CallSite.h
@@ -310,6 +310,11 @@ public:
CALLSITE_DELEGATE_GETTER(hasFnAttr(A));
}
+ /// \brief Return true if this function has the given attribute.
+ bool hasFnAttr(StringRef A) const {
+ CALLSITE_DELEGATE_GETTER(hasFnAttr(A));
+ }
+
/// \brief Return true if the call or the callee has the given attribute.
bool paramHasAttr(unsigned i, Attribute::AttrKind A) const {
CALLSITE_DELEGATE_GETTER(paramHasAttr(i, A));
diff --git a/contrib/llvm/include/llvm/IR/IRBuilder.h b/contrib/llvm/include/llvm/IR/IRBuilder.h
index 7fe04f2..a305054 100644
--- a/contrib/llvm/include/llvm/IR/IRBuilder.h
+++ b/contrib/llvm/include/llvm/IR/IRBuilder.h
@@ -61,9 +61,13 @@ protected:
MDNode *DefaultFPMathTag;
FastMathFlags FMF;
+ ArrayRef<OperandBundleDef> DefaultOperandBundles;
+
public:
- IRBuilderBase(LLVMContext &context, MDNode *FPMathTag = nullptr)
- : Context(context), DefaultFPMathTag(FPMathTag), FMF() {
+ IRBuilderBase(LLVMContext &context, MDNode *FPMathTag = nullptr,
+ ArrayRef<OperandBundleDef> OpBundles = None)
+ : Context(context), DefaultFPMathTag(FPMathTag), FMF(),
+ DefaultOperandBundles(OpBundles) {
ClearInsertionPoint();
}
@@ -538,37 +542,44 @@ class IRBuilder : public IRBuilderBase, public Inserter {
public:
IRBuilder(LLVMContext &C, const T &F, Inserter I = Inserter(),
- MDNode *FPMathTag = nullptr)
- : IRBuilderBase(C, FPMathTag), Inserter(std::move(I)), Folder(F) {}
-
- explicit IRBuilder(LLVMContext &C, MDNode *FPMathTag = nullptr)
- : IRBuilderBase(C, FPMathTag), Folder() {
- }
-
- explicit IRBuilder(BasicBlock *TheBB, const T &F, MDNode *FPMathTag = nullptr)
- : IRBuilderBase(TheBB->getContext(), FPMathTag), Folder(F) {
+ MDNode *FPMathTag = nullptr,
+ ArrayRef<OperandBundleDef> OpBundles = None)
+ : IRBuilderBase(C, FPMathTag, OpBundles), Inserter(std::move(I)),
+ Folder(F) {}
+
+ explicit IRBuilder(LLVMContext &C, MDNode *FPMathTag = nullptr,
+ ArrayRef<OperandBundleDef> OpBundles = None)
+ : IRBuilderBase(C, FPMathTag, OpBundles), Folder() {}
+
+ explicit IRBuilder(BasicBlock *TheBB, const T &F, MDNode *FPMathTag = nullptr,
+ ArrayRef<OperandBundleDef> OpBundles = None)
+ : IRBuilderBase(TheBB->getContext(), FPMathTag, OpBundles), Folder(F) {
SetInsertPoint(TheBB);
}
- explicit IRBuilder(BasicBlock *TheBB, MDNode *FPMathTag = nullptr)
- : IRBuilderBase(TheBB->getContext(), FPMathTag), Folder() {
+ explicit IRBuilder(BasicBlock *TheBB, MDNode *FPMathTag = nullptr,
+ ArrayRef<OperandBundleDef> OpBundles = None)
+ : IRBuilderBase(TheBB->getContext(), FPMathTag, OpBundles), Folder() {
SetInsertPoint(TheBB);
}
- explicit IRBuilder(Instruction *IP, MDNode *FPMathTag = nullptr)
- : IRBuilderBase(IP->getContext(), FPMathTag), Folder() {
+ explicit IRBuilder(Instruction *IP, MDNode *FPMathTag = nullptr,
+ ArrayRef<OperandBundleDef> OpBundles = None)
+ : IRBuilderBase(IP->getContext(), FPMathTag, OpBundles), Folder() {
SetInsertPoint(IP);
}
- IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP, const T& F,
- MDNode *FPMathTag = nullptr)
- : IRBuilderBase(TheBB->getContext(), FPMathTag), Folder(F) {
+ IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP, const T &F,
+ MDNode *FPMathTag = nullptr,
+ ArrayRef<OperandBundleDef> OpBundles = None)
+ : IRBuilderBase(TheBB->getContext(), FPMathTag, OpBundles), Folder(F) {
SetInsertPoint(TheBB, IP);
}
IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP,
- MDNode *FPMathTag = nullptr)
- : IRBuilderBase(TheBB->getContext(), FPMathTag), Folder() {
+ MDNode *FPMathTag = nullptr,
+ ArrayRef<OperandBundleDef> OpBundles = None)
+ : IRBuilderBase(TheBB->getContext(), FPMathTag, OpBundles), Folder() {
SetInsertPoint(TheBB, IP);
}
@@ -1529,8 +1540,11 @@ public:
CallInst *CreateCall(Value *Callee, ArrayRef<Value *> Args = None,
ArrayRef<OperandBundleDef> OpBundles = None,
- const Twine &Name = "") {
- return Insert(CallInst::Create(Callee, Args, OpBundles), Name);
+ const Twine &Name = "", MDNode *FPMathTag = nullptr) {
+ CallInst *CI = CallInst::Create(Callee, Args, OpBundles);
+ if (isa<FPMathOperator>(CI))
+ CI = cast<CallInst>(AddFPMathAttributes(CI, FPMathTag, FMF));
+ return Insert(CI, Name);
}
CallInst *CreateCall(Value *Callee, ArrayRef<Value *> Args,
@@ -1543,7 +1557,7 @@ public:
CallInst *CreateCall(llvm::FunctionType *FTy, Value *Callee,
ArrayRef<Value *> Args, const Twine &Name = "",
MDNode *FPMathTag = nullptr) {
- CallInst *CI = CallInst::Create(FTy, Callee, Args);
+ CallInst *CI = CallInst::Create(FTy, Callee, Args, DefaultOperandBundles);
if (isa<FPMathOperator>(CI))
CI = cast<CallInst>(AddFPMathAttributes(CI, FPMathTag, FMF));
return Insert(CI, Name);
diff --git a/contrib/llvm/include/llvm/IR/Instructions.h b/contrib/llvm/include/llvm/IR/Instructions.h
index d781c7a..aba48ca 100644
--- a/contrib/llvm/include/llvm/IR/Instructions.h
+++ b/contrib/llvm/include/llvm/IR/Instructions.h
@@ -3550,6 +3550,11 @@ public:
return hasFnAttrImpl(A);
}
+ /// \brief Determine whether this call has the given attribute.
+ bool hasFnAttr(StringRef A) const {
+ return hasFnAttrImpl(A);
+ }
+
/// \brief Determine whether the call or the callee has the given attributes.
bool paramHasAttr(unsigned i, Attribute::AttrKind A) const;
@@ -3734,7 +3739,19 @@ private:
unsigned getNumSuccessorsV() const override;
void setSuccessorV(unsigned idx, BasicBlock *B) override;
- bool hasFnAttrImpl(Attribute::AttrKind A) const;
+ template <typename AttrKind> bool hasFnAttrImpl(AttrKind A) const {
+ if (AttributeList.hasAttribute(AttributeSet::FunctionIndex, A))
+ return true;
+
+ // Operand bundles override attributes on the called function, but don't
+ // override attributes directly present on the invoke instruction.
+ if (isFnAttrDisallowedByOpBundle(A))
+ return false;
+
+ if (const Function *F = getCalledFunction())
+ return F->getAttributes().hasAttribute(AttributeSet::FunctionIndex, A);
+ return false;
+ }
// Shadow Instruction::setInstructionSubclassData with a private forwarding
// method so that subclasses cannot accidentally use it.
@@ -3966,6 +3983,8 @@ public:
/// point to the added handler.
void addHandler(BasicBlock *Dest);
+ void removeHandler(handler_iterator HI);
+
unsigned getNumSuccessors() const { return getNumOperands() - 1; }
BasicBlock *getSuccessor(unsigned Idx) const {
assert(Idx < getNumSuccessors() &&
diff --git a/contrib/llvm/include/llvm/IR/IntrinsicsX86.td b/contrib/llvm/include/llvm/IR/IntrinsicsX86.td
index 18390f8..54bcbd8 100644
--- a/contrib/llvm/include/llvm/IR/IntrinsicsX86.td
+++ b/contrib/llvm/include/llvm/IR/IntrinsicsX86.td
@@ -33,6 +33,19 @@ let TargetPrefix = "x86" in {
}
//===----------------------------------------------------------------------===//
+// FLAGS.
+let TargetPrefix = "x86" in {
+ def int_x86_flags_read_u32 : GCCBuiltin<"__builtin_ia32_readeflags_u32">,
+ Intrinsic<[llvm_i32_ty], [], []>;
+ def int_x86_flags_read_u64 : GCCBuiltin<"__builtin_ia32_readeflags_u64">,
+ Intrinsic<[llvm_i64_ty], [], []>;
+ def int_x86_flags_write_u32 : GCCBuiltin<"__builtin_ia32_writeeflags_u32">,
+ Intrinsic<[], [llvm_i32_ty], []>;
+ def int_x86_flags_write_u64 : GCCBuiltin<"__builtin_ia32_writeeflags_u64">,
+ Intrinsic<[], [llvm_i64_ty], []>;
+}
+
+//===----------------------------------------------------------------------===//
// Read Time Stamp Counter.
let TargetPrefix = "x86" in {
def int_x86_rdtsc : GCCBuiltin<"__builtin_ia32_rdtsc">,
@@ -2211,6 +2224,25 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty,
llvm_i8_ty, llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psra_w_128 : GCCBuiltin<"__builtin_ia32_psraw128_mask">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
+ llvm_v8i16_ty, llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psra_w_256 : GCCBuiltin<"__builtin_ia32_psraw256_mask">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+ llvm_v8i16_ty, llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psra_w_512 : GCCBuiltin<"__builtin_ia32_psraw512_mask">,
+ Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty,
+ llvm_v8i16_ty, llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psra_wi_128 : GCCBuiltin<"__builtin_ia32_psrawi128_mask">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
+ llvm_i8_ty, llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psra_wi_256 : GCCBuiltin<"__builtin_ia32_psrawi256_mask">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+ llvm_i8_ty, llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psra_wi_512 : GCCBuiltin<"__builtin_ia32_psrawi512_mask">,
+ Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty,
+ llvm_i8_ty, llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
+
def int_x86_avx512_mask_psll_d : GCCBuiltin<"__builtin_ia32_pslld512_mask">,
Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty,
llvm_v4i32_ty, llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
@@ -2229,6 +2261,69 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_avx512_mask_psra_q : GCCBuiltin<"__builtin_ia32_psraq512_mask">,
Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
llvm_v2i64_ty, llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_psra_d_128 : GCCBuiltin<"__builtin_ia32_psrad128_mask">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
+ llvm_v4i32_ty, llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psra_d_256 : GCCBuiltin<"__builtin_ia32_psrad256_mask">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
+ llvm_v4i32_ty, llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psra_di_128 : GCCBuiltin<"__builtin_ia32_psradi128_mask">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
+ llvm_i8_ty, llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psra_di_256 : GCCBuiltin<"__builtin_ia32_psradi256_mask">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
+ llvm_i8_ty, llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psra_di_512 : GCCBuiltin<"__builtin_ia32_psradi512_mask">,
+ Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty,
+ llvm_i8_ty, llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psra_q_128 : GCCBuiltin<"__builtin_ia32_psraq128_mask">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty,
+ llvm_v2i64_ty, llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psra_q_256 : GCCBuiltin<"__builtin_ia32_psraq256_mask">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty,
+ llvm_v2i64_ty, llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psra_qi_128 : GCCBuiltin<"__builtin_ia32_psraqi128_mask">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty,
+ llvm_i8_ty, llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psra_qi_256 : GCCBuiltin<"__builtin_ia32_psraqi256_mask">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty,
+ llvm_i8_ty, llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psra_qi_512 : GCCBuiltin<"__builtin_ia32_psraqi512_mask">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
+ llvm_i8_ty, llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_psrl_d_128: GCCBuiltin<"__builtin_ia32_psrld128_mask">,
+ Intrinsic<[llvm_v4i32_ty], [ llvm_v4i32_ty,
+ llvm_v4i32_ty, llvm_v4i32_ty, llvm_i8_ty ], [IntrNoMem]>;
+ def int_x86_avx512_mask_psrl_d_256: GCCBuiltin<"__builtin_ia32_psrld256_mask">,
+ Intrinsic<[llvm_v8i32_ty], [ llvm_v8i32_ty,
+ llvm_v4i32_ty, llvm_v8i32_ty, llvm_i8_ty ], [IntrNoMem]>;
+ def int_x86_avx512_mask_psrl_di_128: GCCBuiltin<"__builtin_ia32_psrldi128_mask">,
+ Intrinsic<[llvm_v4i32_ty], [ llvm_v4i32_ty,
+ llvm_i8_ty, llvm_v4i32_ty, llvm_i8_ty ], [IntrNoMem]>;
+ def int_x86_avx512_mask_psrl_di_256: GCCBuiltin<"__builtin_ia32_psrldi256_mask">,
+ Intrinsic<[llvm_v8i32_ty], [ llvm_v8i32_ty,
+ llvm_i8_ty, llvm_v8i32_ty, llvm_i8_ty ], [IntrNoMem]>;
+ def int_x86_avx512_mask_psrl_di_512: GCCBuiltin<"__builtin_ia32_psrldi512_mask">,
+ Intrinsic<[llvm_v16i32_ty], [ llvm_v16i32_ty,
+ llvm_i8_ty, llvm_v16i32_ty, llvm_i16_ty ], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_psrl_q_128: GCCBuiltin<"__builtin_ia32_psrlq128_mask">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty,
+ llvm_v2i64_ty, llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psrl_q_256: GCCBuiltin<"__builtin_ia32_psrlq256_mask">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty,
+ llvm_v2i64_ty, llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psrl_qi_128: GCCBuiltin<"__builtin_ia32_psrlqi128_mask">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty,
+ llvm_i8_ty, llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psrl_qi_256: GCCBuiltin<"__builtin_ia32_psrlqi256_mask">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty,
+ llvm_i8_ty, llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psrl_qi_512: GCCBuiltin<"__builtin_ia32_psrlqi512_mask">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
+ llvm_i8_ty, llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
}
// Pack ops.
@@ -2696,6 +2791,59 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_avx512_psrl_dq_512 : GCCBuiltin<"__builtin_ia32_psrldq512">,
Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_i32_ty],
[IntrNoMem]>;
+
+ def int_x86_avx512_mask_psll_d_128 : GCCBuiltin<"__builtin_ia32_pslld128_mask">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
+ llvm_v4i32_ty, llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psll_d_256 : GCCBuiltin<"__builtin_ia32_pslld256_mask">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
+ llvm_v4i32_ty, llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psll_di_128 : GCCBuiltin<"__builtin_ia32_pslldi128_mask">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
+ llvm_i8_ty, llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psll_di_256 : GCCBuiltin<"__builtin_ia32_pslldi256_mask">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
+ llvm_i8_ty, llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psll_di_512 : GCCBuiltin<"__builtin_ia32_pslldi512_mask">,
+ Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty,
+ llvm_i8_ty, llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psll_q_128 : GCCBuiltin<"__builtin_ia32_psllq128_mask">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty,
+ llvm_v2i64_ty, llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psll_q_256 : GCCBuiltin<"__builtin_ia32_psllq256_mask">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty,
+ llvm_v2i64_ty, llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psll_qi_128 : GCCBuiltin<"__builtin_ia32_psllqi128_mask">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty,
+ llvm_i8_ty, llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psll_qi_256 : GCCBuiltin<"__builtin_ia32_psllqi256_mask">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty,
+ llvm_i8_ty, llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psll_qi_512 : GCCBuiltin<"__builtin_ia32_psllqi512_mask">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
+ llvm_i8_ty, llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_psrlv16_hi : GCCBuiltin<"__builtin_ia32_psrlv16hi_mask">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+ llvm_v16i16_ty, llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psrlv2_di : GCCBuiltin<"__builtin_ia32_psrlv2di_mask">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty,
+ llvm_v2i64_ty, llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psrlv32hi : GCCBuiltin<"__builtin_ia32_psrlv32hi_mask">,
+ Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty,
+ llvm_v32i16_ty, llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psrlv4_di : GCCBuiltin<"__builtin_ia32_psrlv4di_mask">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty,
+ llvm_v4i64_ty, llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psrlv4_si : GCCBuiltin<"__builtin_ia32_psrlv4si_mask">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
+ llvm_v4i32_ty, llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psrlv8_hi : GCCBuiltin<"__builtin_ia32_psrlv8hi_mask">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
+ llvm_v8i16_ty, llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psrlv8_si : GCCBuiltin<"__builtin_ia32_psrlv8si_mask">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
+ llvm_v8i32_ty, llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
}
// Gather ops
@@ -3919,9 +4067,9 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
// Support protection key
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_rdpkru : GCCBuiltin <"__builtin_ia32_rdpkru">,
- Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>;
+ Intrinsic<[llvm_i32_ty], [], []>;
def int_x86_wrpkru : GCCBuiltin<"__builtin_ia32_wrpkru">,
- Intrinsic<[], [llvm_i32_ty], [IntrNoMem]>;
+ Intrinsic<[], [llvm_i32_ty], []>;
}
//===----------------------------------------------------------------------===//
// Half float conversion
diff --git a/contrib/llvm/include/llvm/IR/Metadata.h b/contrib/llvm/include/llvm/IR/Metadata.h
index 2ea5913..4a8557d 100644
--- a/contrib/llvm/include/llvm/IR/Metadata.h
+++ b/contrib/llvm/include/llvm/IR/Metadata.h
@@ -283,14 +283,20 @@ private:
LLVMContext &Context;
uint64_t NextIndex;
SmallDenseMap<void *, std::pair<OwnerTy, uint64_t>, 4> UseMap;
+ /// Flag that can be set to false if this metadata should not be
+ /// RAUW'ed, e.g. if it is used as the key of a map.
+ bool CanReplace;
public:
ReplaceableMetadataImpl(LLVMContext &Context)
- : Context(Context), NextIndex(0) {}
+ : Context(Context), NextIndex(0), CanReplace(true) {}
~ReplaceableMetadataImpl() {
assert(UseMap.empty() && "Cannot destroy in-use replaceable metadata");
}
+ /// Set the CanReplace flag to the given value.
+ void setCanReplace(bool Replaceable) { CanReplace = Replaceable; }
+
LLVMContext &getContext() const { return Context; }
/// \brief Replace all uses of this with MD.
@@ -901,14 +907,19 @@ public:
Context.getReplaceableUses()->replaceAllUsesWith(MD);
}
+ /// Set the CanReplace flag to the given value.
+ void setCanReplace(bool Replaceable) {
+ Context.getReplaceableUses()->setCanReplace(Replaceable);
+ }
+
/// \brief Resolve cycles.
///
/// Once all forward declarations have been resolved, force cycles to be
- /// resolved. If \p MDMaterialized is true, then any temporary metadata
+ /// resolved. If \p AllowTemps is true, then any temporary metadata
/// is ignored, otherwise it asserts when encountering temporary metadata.
///
/// \pre No operands (or operands' operands, etc.) have \a isTemporary().
- void resolveCycles(bool MDMaterialized = true);
+ void resolveCycles(bool AllowTemps = false);
/// \brief Replace a temporary node with a permanent one.
///
diff --git a/contrib/llvm/include/llvm/IR/Statepoint.h b/contrib/llvm/include/llvm/IR/Statepoint.h
index 7310c56..51a0951 100644
--- a/contrib/llvm/include/llvm/IR/Statepoint.h
+++ b/contrib/llvm/include/llvm/IR/Statepoint.h
@@ -22,6 +22,7 @@
#include "llvm/IR/Constants.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/Support/Compiler.h"
@@ -36,14 +37,13 @@ enum class StatepointFlags {
MaskAll = GCTransition ///< A bitmask that includes all valid flags.
};
-class GCRelocateOperands;
+class GCRelocateInst;
class ImmutableStatepoint;
bool isStatepoint(const ImmutableCallSite &CS);
bool isStatepoint(const Value *V);
bool isStatepoint(const Value &V);
-bool isGCRelocate(const Value *V);
bool isGCRelocate(const ImmutableCallSite &CS);
bool isGCResult(const Value *V);
@@ -247,7 +247,7 @@ public:
/// May contain several relocations for the same base/derived pair.
/// For example this could happen due to relocations on unwinding
/// path of invoke.
- std::vector<GCRelocateOperands> getRelocates() const;
+ std::vector<const GCRelocateInst *> getRelocates() const;
/// Get the experimental_gc_result call tied to this statepoint. Can be
/// nullptr if there isn't a gc_result tied to this statepoint. Guaranteed to
@@ -305,33 +305,27 @@ public:
explicit Statepoint(CallSite CS) : Base(CS) {}
};
-/// Wraps a call to a gc.relocate and provides access to it's operands.
-/// TODO: This should likely be refactored to resememble the wrappers in
-/// InstrinsicInst.h.
-class GCRelocateOperands {
- ImmutableCallSite RelocateCS;
-
+/// This represents the gc.relocate intrinsic.
+class GCRelocateInst : public IntrinsicInst {
public:
- GCRelocateOperands(const User *U) : RelocateCS(U) { assert(isGCRelocate(U)); }
- GCRelocateOperands(const Instruction *inst) : RelocateCS(inst) {
- assert(isGCRelocate(inst));
+ static inline bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::experimental_gc_relocate;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
}
- GCRelocateOperands(CallSite CS) : RelocateCS(CS) { assert(isGCRelocate(CS)); }
/// Return true if this relocate is tied to the invoke statepoint.
/// This includes relocates which are on the unwinding path.
bool isTiedToInvoke() const {
- const Value *Token = RelocateCS.getArgument(0);
+ const Value *Token = getArgOperand(0);
return isa<LandingPadInst>(Token) || isa<InvokeInst>(Token);
}
- /// Get enclosed relocate intrinsic
- ImmutableCallSite getUnderlyingCallSite() { return RelocateCS; }
-
/// The statepoint with which this gc.relocate is associated.
- const Instruction *getStatepoint() {
- const Value *Token = RelocateCS.getArgument(0);
+ const Instruction *getStatepoint() const {
+ const Value *Token = getArgOperand(0);
// This takes care both of relocates for call statepoints and relocates
// on normal path of invoke statepoint.
@@ -354,22 +348,22 @@ public:
/// The index into the associate statepoint's argument list
/// which contains the base pointer of the pointer whose
/// relocation this gc.relocate describes.
- unsigned getBasePtrIndex() {
- return cast<ConstantInt>(RelocateCS.getArgument(1))->getZExtValue();
+ unsigned getBasePtrIndex() const {
+ return cast<ConstantInt>(getArgOperand(1))->getZExtValue();
}
/// The index into the associate statepoint's argument list which
/// contains the pointer whose relocation this gc.relocate describes.
- unsigned getDerivedPtrIndex() {
- return cast<ConstantInt>(RelocateCS.getArgument(2))->getZExtValue();
+ unsigned getDerivedPtrIndex() const {
+ return cast<ConstantInt>(getArgOperand(2))->getZExtValue();
}
- Value *getBasePtr() {
+ Value *getBasePtr() const {
ImmutableCallSite CS(getStatepoint());
return *(CS.arg_begin() + getBasePtrIndex());
}
- Value *getDerivedPtr() {
+ Value *getDerivedPtr() const {
ImmutableCallSite CS(getStatepoint());
return *(CS.arg_begin() + getDerivedPtrIndex());
}
@@ -377,11 +371,11 @@ public:
template <typename FunTy, typename InstructionTy, typename ValueTy,
typename CallSiteTy>
-std::vector<GCRelocateOperands>
+std::vector<const GCRelocateInst *>
StatepointBase<FunTy, InstructionTy, ValueTy, CallSiteTy>::getRelocates()
const {
- std::vector<GCRelocateOperands> Result;
+ std::vector<const GCRelocateInst *> Result;
CallSiteTy StatepointCS = getCallSite();
@@ -389,8 +383,8 @@ StatepointBase<FunTy, InstructionTy, ValueTy, CallSiteTy>::getRelocates()
// gc_relocates ensures that we only get pairs which are actually relocated
// and used after the statepoint.
for (const User *U : getInstruction()->users())
- if (isGCRelocate(U))
- Result.push_back(GCRelocateOperands(U));
+ if (auto *Relocate = dyn_cast<GCRelocateInst>(U))
+ Result.push_back(Relocate);
if (!StatepointCS.isInvoke())
return Result;
@@ -401,8 +395,8 @@ StatepointBase<FunTy, InstructionTy, ValueTy, CallSiteTy>::getRelocates()
// Search for gc relocates that are attached to this landingpad.
for (const User *LandingPadUser : LandingPad->users()) {
- if (isGCRelocate(LandingPadUser))
- Result.push_back(GCRelocateOperands(LandingPadUser));
+ if (auto *Relocate = dyn_cast<GCRelocateInst>(LandingPadUser))
+ Result.push_back(Relocate);
}
return Result;
}
diff --git a/contrib/llvm/include/llvm/MC/SubtargetFeature.h b/contrib/llvm/include/llvm/MC/SubtargetFeature.h
index 0d97b22..75d1e79 100644
--- a/contrib/llvm/include/llvm/MC/SubtargetFeature.h
+++ b/contrib/llvm/include/llvm/MC/SubtargetFeature.h
@@ -39,8 +39,8 @@ public:
FeatureBitset(const bitset<MAX_SUBTARGET_FEATURES>& B) : bitset(B) {}
FeatureBitset(std::initializer_list<unsigned> Init) : bitset() {
- for (auto I = Init.begin() , E = Init.end(); I != E; ++I)
- set(*I);
+ for (auto I : Init)
+ set(I);
}
};
@@ -59,6 +59,11 @@ struct SubtargetFeatureKV {
bool operator<(StringRef S) const {
return StringRef(Key) < S;
}
+
+ // Compare routine for std::is_sorted.
+ bool operator<(const SubtargetFeatureKV &Other) const {
+ return StringRef(Key) < StringRef(Other.Key);
+ }
};
//===----------------------------------------------------------------------===//
@@ -98,14 +103,13 @@ public:
/// Adding Features.
void AddFeature(StringRef String, bool Enable = true);
- /// ToggleFeature - Toggle a feature and returns the newly updated feature
- /// bits.
- FeatureBitset ToggleFeature(FeatureBitset Bits, StringRef String,
- ArrayRef<SubtargetFeatureKV> FeatureTable);
+ /// ToggleFeature - Toggle a feature and update the feature bits.
+ static void ToggleFeature(FeatureBitset &Bits, StringRef String,
+ ArrayRef<SubtargetFeatureKV> FeatureTable);
- /// Apply the feature flag and return the newly updated feature bits.
- FeatureBitset ApplyFeatureFlag(FeatureBitset Bits, StringRef Feature,
- ArrayRef<SubtargetFeatureKV> FeatureTable);
+ /// Apply the feature flag and update the feature bits.
+ static void ApplyFeatureFlag(FeatureBitset &Bits, StringRef Feature,
+ ArrayRef<SubtargetFeatureKV> FeatureTable);
/// Get feature bits of a CPU.
FeatureBitset getFeatureBits(StringRef CPU,
diff --git a/contrib/llvm/include/llvm/ProfileData/InstrProf.h b/contrib/llvm/include/llvm/ProfileData/InstrProf.h
index 4688759..49569d8 100644
--- a/contrib/llvm/include/llvm/ProfileData/InstrProf.h
+++ b/contrib/llvm/include/llvm/ProfileData/InstrProf.h
@@ -155,11 +155,36 @@ GlobalVariable *createPGOFuncNameVar(Function &F, StringRef FuncName);
GlobalVariable *createPGOFuncNameVar(Module &M,
GlobalValue::LinkageTypes Linkage,
StringRef FuncName);
+/// Return the initializer in string of the PGO name var \c NameVar.
+StringRef getPGOFuncNameVarInitializer(GlobalVariable *NameVar);
/// Given a PGO function name, remove the filename prefix and return
/// the original (static) function name.
StringRef getFuncNameWithoutPrefix(StringRef PGOFuncName, StringRef FileName);
+/// Given a vector of strings (function PGO names) \c NameStrs, the
+/// method generates a combined string \c Result thatis ready to be
+/// serialized. The \c Result string is comprised of three fields:
+/// The first field is the legnth of the uncompressed strings, and the
+/// the second field is the length of the zlib-compressed string.
+/// Both fields are encoded in ULEB128. If \c doCompress is false, the
+/// third field is the uncompressed strings; otherwise it is the
+/// compressed string. When the string compression is off, the
+/// second field will have value zero.
+int collectPGOFuncNameStrings(const std::vector<std::string> &NameStrs,
+ bool doCompression, std::string &Result);
+/// Produce \c Result string with the same format described above. The input
+/// is vector of PGO function name variables that are referenced.
+int collectPGOFuncNameStrings(const std::vector<GlobalVariable *> &NameVars,
+ std::string &Result);
+class InstrProfSymtab;
+/// \c NameStrings is a string composed of one of more sub-strings encoded in
+/// the
+/// format described above. The substrings are seperated by 0 or more zero
+/// bytes.
+/// This method decodes the string and populates the \c Symtab.
+int readPGOFuncNameStrings(StringRef NameStrings, InstrProfSymtab &Symtab);
+
const std::error_category &instrprof_category();
enum class instrprof_error {
@@ -235,6 +260,11 @@ public:
/// This interface is used by reader of CoverageMapping test
/// format.
inline std::error_code create(StringRef D, uint64_t BaseAddr);
+ /// \c NameStrings is a string composed of one of more sub-strings
+ /// encoded in the format described above. The substrings are
+ /// seperated by 0 or more zero bytes. This method decodes the
+ /// string and populates the \c Symtab.
+ inline std::error_code create(StringRef NameStrings);
/// Create InstrProfSymtab from a set of names iteratable from
/// \p IterRange. This interface is used by IndexedProfReader.
template <typename NameIterRange> void create(const NameIterRange &IterRange);
@@ -255,8 +285,8 @@ public:
AddrToMD5Map.push_back(std::make_pair(Addr, MD5Val));
}
AddrHashMap &getAddrHashMap() { return AddrToMD5Map; }
- /// Return function's PGO name from the function name's symabol
- /// address in the object file. If an error occurs, Return
+ /// Return function's PGO name from the function name's symbol
+ /// address in the object file. If an error occurs, return
/// an empty string.
StringRef getFuncName(uint64_t FuncNameAddress, size_t NameSize);
/// Return function's PGO name from the name's md5 hash value.
@@ -270,6 +300,12 @@ std::error_code InstrProfSymtab::create(StringRef D, uint64_t BaseAddr) {
return std::error_code();
}
+std::error_code InstrProfSymtab::create(StringRef NameStrings) {
+ if (readPGOFuncNameStrings(NameStrings, *this))
+ return make_error_code(instrprof_error::malformed);
+ return std::error_code();
+}
+
template <typename NameIterRange>
void InstrProfSymtab::create(const NameIterRange &IterRange) {
for (auto Name : IterRange)
@@ -576,8 +612,14 @@ template <class IntPtrT> struct CovMapFunctionRecord {
#define COVMAP_FUNC_RECORD(Type, LLVMType, Name, Init) Type Name;
#include "llvm/ProfileData/InstrProfData.inc"
};
-LLVM_PACKED_END
+// Per module coverage mapping data header, i.e. CoverageMapFileHeader
+// documented above.
+struct CovMapHeader {
+#define COVMAP_HEADER(Type, LLVMType, Name, Init) Type Name;
+#include "llvm/ProfileData/InstrProfData.inc"
+};
+LLVM_PACKED_END
}
} // end namespace llvm
diff --git a/contrib/llvm/include/llvm/ProfileData/InstrProfData.inc b/contrib/llvm/include/llvm/ProfileData/InstrProfData.inc
index 48dae50..3a7c0c5 100644
--- a/contrib/llvm/include/llvm/ProfileData/InstrProfData.inc
+++ b/contrib/llvm/include/llvm/ProfileData/InstrProfData.inc
@@ -1,4 +1,4 @@
-/*===-- InstrProfData.inc - instr profiling runtime structures -----------=== *\
+/*===-- InstrProfData.inc - instr profiling runtime structures -*- C++ -*-=== *\
|*
|* The LLVM Compiler Infrastructure
|*
@@ -167,6 +167,25 @@ COVMAP_FUNC_RECORD(const uint64_t, llvm::Type::getInt64Ty(Ctx), FuncHash, \
#undef COVMAP_FUNC_RECORD
/* COVMAP_FUNC_RECORD end. */
+/* COVMAP_HEADER start */
+/* Definition of member fields of coverage map header.
+ */
+#ifndef COVMAP_HEADER
+#define COVMAP_HEADER(Type, LLVMType, Name, Initializer)
+#else
+#define INSTR_PROF_DATA_DEFINED
+#endif
+COVMAP_HEADER(uint32_t, Int32Ty, NRecords, \
+ llvm::ConstantInt::get(Int32Ty, FunctionRecords.size()))
+COVMAP_HEADER(uint32_t, Int32Ty, FilenamesSize, \
+ llvm::ConstantInt::get(Int32Ty, FilenamesSize))
+COVMAP_HEADER(uint32_t, Int32Ty, CoverageSize, \
+ llvm::ConstantInt::get(Int32Ty, CoverageMappingSize))
+COVMAP_HEADER(uint32_t, Int32Ty, Version, \
+ llvm::ConstantInt::get(Int32Ty, CoverageMappingVersion1))
+#undef COVMAP_HEADER
+/* COVMAP_HEADER end. */
+
#ifdef INSTR_PROF_VALUE_PROF_DATA
#define INSTR_PROF_DATA_DEFINED
diff --git a/contrib/llvm/include/llvm/Support/ARMTargetParser.def b/contrib/llvm/include/llvm/Support/ARMTargetParser.def
index 2f99b07..c895b09 100644
--- a/contrib/llvm/include/llvm/Support/ARMTargetParser.def
+++ b/contrib/llvm/include/llvm/Support/ARMTargetParser.def
@@ -213,6 +213,7 @@ ARM_CPU_NAME("cortex-a53", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, true, AEK_CRC)
ARM_CPU_NAME("cortex-a57", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, AEK_CRC)
ARM_CPU_NAME("cortex-a72", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, AEK_CRC)
ARM_CPU_NAME("cyclone", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, AEK_CRC)
+ARM_CPU_NAME("exynos-m1", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, AEK_CRC)
// Non-standard Arch names.
ARM_CPU_NAME("iwmmxt", AK_IWMMXT, FK_NONE, true, AEK_NONE)
ARM_CPU_NAME("xscale", AK_XSCALE, FK_NONE, true, AEK_NONE)
diff --git a/contrib/llvm/include/llvm/Support/Program.h b/contrib/llvm/include/llvm/Support/Program.h
index 4330210..727864d 100644
--- a/contrib/llvm/include/llvm/Support/Program.h
+++ b/contrib/llvm/include/llvm/Support/Program.h
@@ -130,7 +130,7 @@ struct ProcessInfo {
/// Return true if the given arguments fit within system-specific
/// argument length limits.
- bool argumentsFitWithinSystemLimits(ArrayRef<const char*> Args);
+ bool commandLineFitsWithinSystemLimits(StringRef Program, ArrayRef<const char*> Args);
/// File encoding options when writing contents that a non-UTF8 tool will
/// read (on Windows systems). For UNIX, we always use UTF-8.
diff --git a/contrib/llvm/include/llvm/Support/YAMLParser.h b/contrib/llvm/include/llvm/Support/YAMLParser.h
index b056ab6..a5addfa 100644
--- a/contrib/llvm/include/llvm/Support/YAMLParser.h
+++ b/contrib/llvm/include/llvm/Support/YAMLParser.h
@@ -305,7 +305,7 @@ private:
/// increment() which must set CurrentEntry to 0 to create an end iterator.
template <class BaseT, class ValueT>
class basic_collection_iterator
- : public std::iterator<std::forward_iterator_tag, ValueT> {
+ : public std::iterator<std::input_iterator_tag, ValueT> {
public:
basic_collection_iterator() : Base(nullptr) {}
basic_collection_iterator(BaseT *B) : Base(B) {}
@@ -326,11 +326,24 @@ public:
return Base->CurrentEntry;
}
+ /// Note on EqualityComparable:
+ ///
+ /// The iterator is not re-entrant,
+ /// it is meant to be used for parsing YAML on-demand
+ /// Once iteration started - it can point only to one entry at a time
+ /// hence Base.CurrentEntry and Other.Base.CurrentEntry are equal
+ /// iff Base and Other.Base are equal.
+ bool operator==(const basic_collection_iterator &Other) const {
+ if (Base && (Base == Other.Base)) {
+ assert((Base->CurrentEntry == Other.Base->CurrentEntry)
+ && "Equal Bases expected to point to equal Entries");
+ }
+
+ return Base == Other.Base;
+ }
+
bool operator!=(const basic_collection_iterator &Other) const {
- if (Base != Other.Base)
- return true;
- return (Base && Other.Base) &&
- Base->CurrentEntry != Other.Base->CurrentEntry;
+ return !(Base == Other.Base);
}
basic_collection_iterator &operator++() {
diff --git a/contrib/llvm/include/llvm/TableGen/Record.h b/contrib/llvm/include/llvm/TableGen/Record.h
index eb1c5c7..4c1ef40 100644
--- a/contrib/llvm/include/llvm/TableGen/Record.h
+++ b/contrib/llvm/include/llvm/TableGen/Record.h
@@ -232,7 +232,7 @@ protected:
/// We could pack these a bit tighter by not having the IK_FirstXXXInit
/// and IK_LastXXXInit be their own values, but that would degrade
/// readability for really no benefit.
- enum InitKind {
+ enum InitKind : uint8_t {
IK_BitInit,
IK_FirstTypedInit,
IK_BitsInit,
@@ -256,6 +256,9 @@ protected:
private:
const InitKind Kind;
+protected:
+ uint8_t Opc; // Used by UnOpInit, BinOpInit, and TernOpInit
+private:
Init(const Init &) = delete;
Init &operator=(const Init &) = delete;
virtual void anchor();
@@ -264,7 +267,7 @@ public:
InitKind getKind() const { return Kind; }
protected:
- explicit Init(InitKind K) : Kind(K) {}
+ explicit Init(InitKind K, uint8_t Opc = 0) : Kind(K), Opc(Opc) {}
public:
virtual ~Init() {}
@@ -365,7 +368,8 @@ class TypedInit : public Init {
TypedInit &operator=(const TypedInit &Other) = delete;
protected:
- explicit TypedInit(InitKind K, RecTy *T) : Init(K), Ty(T) {}
+ explicit TypedInit(InitKind K, RecTy *T, uint8_t Opc = 0)
+ : Init(K, Opc), Ty(T) {}
~TypedInit() override {
// If this is a DefInit we need to delete the RecordRecTy.
if (getKind() == IK_DefInit)
@@ -650,7 +654,8 @@ class OpInit : public TypedInit {
OpInit &operator=(OpInit &Other) = delete;
protected:
- explicit OpInit(InitKind K, RecTy *Type) : TypedInit(K, Type) {}
+ explicit OpInit(InitKind K, RecTy *Type, uint8_t Opc)
+ : TypedInit(K, Type, Opc) {}
public:
static bool classof(const Init *I) {
@@ -677,14 +682,13 @@ public:
///
class UnOpInit : public OpInit {
public:
- enum UnaryOp { CAST, HEAD, TAIL, EMPTY };
+ enum UnaryOp : uint8_t { CAST, HEAD, TAIL, EMPTY };
private:
- UnaryOp Opc;
Init *LHS;
UnOpInit(UnaryOp opc, Init *lhs, RecTy *Type)
- : OpInit(IK_UnOpInit, Type), Opc(opc), LHS(lhs) {}
+ : OpInit(IK_UnOpInit, Type, opc), LHS(lhs) {}
UnOpInit(const UnOpInit &Other) = delete;
UnOpInit &operator=(const UnOpInit &Other) = delete;
@@ -708,7 +712,7 @@ public:
return getOperand();
}
- UnaryOp getOpcode() const { return Opc; }
+ UnaryOp getOpcode() const { return (UnaryOp)Opc; }
Init *getOperand() const { return LHS; }
// Fold - If possible, fold this to a simpler init. Return this if not
@@ -724,14 +728,14 @@ public:
///
class BinOpInit : public OpInit {
public:
- enum BinaryOp { ADD, AND, SHL, SRA, SRL, LISTCONCAT, STRCONCAT, CONCAT, EQ };
+ enum BinaryOp : uint8_t { ADD, AND, SHL, SRA, SRL, LISTCONCAT,
+ STRCONCAT, CONCAT, EQ };
private:
- BinaryOp Opc;
Init *LHS, *RHS;
BinOpInit(BinaryOp opc, Init *lhs, Init *rhs, RecTy *Type) :
- OpInit(IK_BinOpInit, Type), Opc(opc), LHS(lhs), RHS(rhs) {}
+ OpInit(IK_BinOpInit, Type, opc), LHS(lhs), RHS(rhs) {}
BinOpInit(const BinOpInit &Other) = delete;
BinOpInit &operator=(const BinOpInit &Other) = delete;
@@ -759,7 +763,7 @@ public:
}
}
- BinaryOp getOpcode() const { return Opc; }
+ BinaryOp getOpcode() const { return (BinaryOp)Opc; }
Init *getLHS() const { return LHS; }
Init *getRHS() const { return RHS; }
@@ -776,15 +780,14 @@ public:
///
class TernOpInit : public OpInit {
public:
- enum TernaryOp { SUBST, FOREACH, IF };
+ enum TernaryOp : uint8_t { SUBST, FOREACH, IF };
private:
- TernaryOp Opc;
Init *LHS, *MHS, *RHS;
TernOpInit(TernaryOp opc, Init *lhs, Init *mhs, Init *rhs,
RecTy *Type) :
- OpInit(IK_TernOpInit, Type), Opc(opc), LHS(lhs), MHS(mhs), RHS(rhs) {}
+ OpInit(IK_TernOpInit, Type, opc), LHS(lhs), MHS(mhs), RHS(rhs) {}
TernOpInit(const TernOpInit &Other) = delete;
TernOpInit &operator=(const TernOpInit &Other) = delete;
@@ -815,7 +818,7 @@ public:
}
}
- TernaryOp getOpcode() const { return Opc; }
+ TernaryOp getOpcode() const { return (TernaryOp)Opc; }
Init *getLHS() const { return LHS; }
Init *getMHS() const { return MHS; }
Init *getRHS() const { return RHS; }
diff --git a/contrib/llvm/include/llvm/Target/Target.td b/contrib/llvm/include/llvm/Target/Target.td
index 79046b2..c869341 100644
--- a/contrib/llvm/include/llvm/Target/Target.td
+++ b/contrib/llvm/include/llvm/Target/Target.td
@@ -936,6 +936,10 @@ class AsmParser {
// ShouldEmitMatchRegisterName - Set to false if the target needs a hand
// written register name matcher
bit ShouldEmitMatchRegisterName = 1;
+
+ // HasMnemonicFirst - Set to false if target instructions don't always
+ // start with a mnemonic as the first token.
+ bit HasMnemonicFirst = 1;
}
def DefaultAsmParser : AsmParser;
diff --git a/contrib/llvm/include/llvm/Target/TargetLowering.h b/contrib/llvm/include/llvm/Target/TargetLowering.h
index 140c3659..863b7cd 100644
--- a/contrib/llvm/include/llvm/Target/TargetLowering.h
+++ b/contrib/llvm/include/llvm/Target/TargetLowering.h
@@ -2269,6 +2269,12 @@ public:
return false;
}
+ /// Return true if the MachineFunction contains a COPY which would imply
+ /// HasOpaqueSPAdjustment.
+ virtual bool hasCopyImplyingStackAdjustment(MachineFunction *MF) const {
+ return false;
+ }
+
/// Perform necessary initialization to handle a subset of CSRs explicitly
/// via copies. This function is called at the beginning of instruction
/// selection.
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/BypassSlowDivision.h b/contrib/llvm/include/llvm/Transforms/Utils/BypassSlowDivision.h
index 0d081c0..af0d60b 100644
--- a/contrib/llvm/include/llvm/Transforms/Utils/BypassSlowDivision.h
+++ b/contrib/llvm/include/llvm/Transforms/Utils/BypassSlowDivision.h
@@ -23,11 +23,13 @@
namespace llvm {
-/// This optimization identifies DIV instructions that can be
+/// This optimization identifies DIV instructions in a BB that can be
/// profitably bypassed and carried out with a shorter, faster divide.
-bool bypassSlowDivision(Function &F,
- Function::iterator &I,
- const DenseMap<unsigned int, unsigned int> &BypassWidth);
+///
+/// This optimization may add basic blocks immediately after BB; for obvious
+/// reasons, you shouldn't pass those blocks to bypassSlowDivision.
+bool bypassSlowDivision(
+ BasicBlock *BB, const DenseMap<unsigned int, unsigned int> &BypassWidth);
} // End llvm namespace
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/LoopUtils.h b/contrib/llvm/include/llvm/Transforms/Utils/LoopUtils.h
index 17aaee0..2cfacb6 100644
--- a/contrib/llvm/include/llvm/Transforms/Utils/LoopUtils.h
+++ b/contrib/llvm/include/llvm/Transforms/Utils/LoopUtils.h
@@ -16,6 +16,7 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/EHPersonalities.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/IRBuilder.h"
@@ -39,6 +40,8 @@ struct LICMSafetyInfo {
bool MayThrow; // The current loop contains an instruction which
// may throw.
bool HeaderMayThrow; // Same as previous, but specific to loop header
+ // Used to update funclet bundle operands.
+ DenseMap<BasicBlock *, ColorVector> BlockColors;
LICMSafetyInfo() : MayThrow(false), HeaderMayThrow(false)
{}
};
OpenPOWER on IntegriCloud