summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/lib/IR
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/lib/IR')
-rw-r--r--contrib/llvm/lib/IR/AsmWriter.cpp3456
-rw-r--r--contrib/llvm/lib/IR/AttributeImpl.h284
-rw-r--r--contrib/llvm/lib/IR/Attributes.cpp1512
-rw-r--r--contrib/llvm/lib/IR/AttributesCompatFunc.td1
-rw-r--r--contrib/llvm/lib/IR/AutoUpgrade.cpp904
-rw-r--r--contrib/llvm/lib/IR/BasicBlock.cpp433
-rw-r--r--contrib/llvm/lib/IR/Comdat.cpp25
-rw-r--r--contrib/llvm/lib/IR/ConstantFold.cpp2266
-rw-r--r--contrib/llvm/lib/IR/ConstantFold.h60
-rw-r--r--contrib/llvm/lib/IR/ConstantRange.cpp824
-rw-r--r--contrib/llvm/lib/IR/Constants.cpp3040
-rw-r--r--contrib/llvm/lib/IR/ConstantsContext.h671
-rw-r--r--contrib/llvm/lib/IR/Core.cpp2952
-rw-r--r--contrib/llvm/lib/IR/DIBuilder.cpp907
-rw-r--r--contrib/llvm/lib/IR/DataLayout.cpp793
-rw-r--r--contrib/llvm/lib/IR/DebugInfo.cpp365
-rw-r--r--contrib/llvm/lib/IR/DebugInfoMetadata.cpp580
-rw-r--r--contrib/llvm/lib/IR/DebugLoc.cpp102
-rw-r--r--contrib/llvm/lib/IR/DiagnosticInfo.cpp243
-rw-r--r--contrib/llvm/lib/IR/DiagnosticPrinter.cpp117
-rw-r--r--contrib/llvm/lib/IR/Dominators.cpp356
-rw-r--r--contrib/llvm/lib/IR/Function.cpp991
-rw-r--r--contrib/llvm/lib/IR/FunctionInfo.cpp67
-rw-r--r--contrib/llvm/lib/IR/GCOV.cpp796
-rw-r--r--contrib/llvm/lib/IR/GVMaterializer.cpp18
-rw-r--r--contrib/llvm/lib/IR/Globals.cpp285
-rw-r--r--contrib/llvm/lib/IR/IRBuilder.cpp403
-rw-r--r--contrib/llvm/lib/IR/IRPrintingPasses.cpp139
-rw-r--r--contrib/llvm/lib/IR/InlineAsm.cpp295
-rw-r--r--contrib/llvm/lib/IR/Instruction.cpp586
-rw-r--r--contrib/llvm/lib/IR/Instructions.cpp3977
-rw-r--r--contrib/llvm/lib/IR/IntrinsicInst.cpp81
-rw-r--r--contrib/llvm/lib/IR/LLVMContext.cpp322
-rw-r--r--contrib/llvm/lib/IR/LLVMContextImpl.cpp259
-rw-r--r--contrib/llvm/lib/IR/LLVMContextImpl.h1046
-rw-r--r--contrib/llvm/lib/IR/LegacyPassManager.cpp1934
-rw-r--r--contrib/llvm/lib/IR/MDBuilder.cpp175
-rw-r--r--contrib/llvm/lib/IR/Mangler.cpp174
-rw-r--r--contrib/llvm/lib/IR/Metadata.cpp1322
-rw-r--r--contrib/llvm/lib/IR/MetadataImpl.h59
-rw-r--r--contrib/llvm/lib/IR/Module.cpp487
-rw-r--r--contrib/llvm/lib/IR/Operator.cpp44
-rw-r--r--contrib/llvm/lib/IR/Pass.cpp290
-rw-r--r--contrib/llvm/lib/IR/PassManager.cpp43
-rw-r--r--contrib/llvm/lib/IR/PassRegistry.cpp130
-rw-r--r--contrib/llvm/lib/IR/Statepoint.cpp61
-rw-r--r--contrib/llvm/lib/IR/SymbolTableListTraitsImpl.h114
-rw-r--r--contrib/llvm/lib/IR/Type.cpp719
-rw-r--r--contrib/llvm/lib/IR/TypeFinder.cpp174
-rw-r--r--contrib/llvm/lib/IR/Use.cpp127
-rw-r--r--contrib/llvm/lib/IR/User.cpp205
-rw-r--r--contrib/llvm/lib/IR/Value.cpp767
-rw-r--r--contrib/llvm/lib/IR/ValueSymbolTable.cpp107
-rw-r--r--contrib/llvm/lib/IR/ValueTypes.cpp316
-rw-r--r--contrib/llvm/lib/IR/Verifier.cpp4268
-rw-r--r--contrib/llvm/lib/IR/module.modulemap1
56 files changed, 40673 insertions, 0 deletions
diff --git a/contrib/llvm/lib/IR/AsmWriter.cpp b/contrib/llvm/lib/IR/AsmWriter.cpp
new file mode 100644
index 0000000..0ce44e1
--- /dev/null
+++ b/contrib/llvm/lib/IR/AsmWriter.cpp
@@ -0,0 +1,3456 @@
+//===-- AsmWriter.cpp - Printing LLVM as an assembly file -----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This library implements the functionality defined in llvm/IR/Writer.h
+//
+// Note that these routines must be extremely tolerant of various errors in the
+// LLVM code, because it can be used for debugging transformations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/IR/AssemblyAnnotationWriter.h"
+#include "llvm/IR/CFG.h"
+#include "llvm/IR/CallingConv.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DebugInfo.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/IRPrintingPasses.h"
+#include "llvm/IR/InlineAsm.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/ModuleSlotTracker.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/IR/Statepoint.h"
+#include "llvm/IR/TypeFinder.h"
+#include "llvm/IR/UseListOrder.h"
+#include "llvm/IR/ValueSymbolTable.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/Dwarf.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/FormattedStream.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cctype>
+using namespace llvm;
+
+// Make virtual table appear in this compilation unit.
+AssemblyAnnotationWriter::~AssemblyAnnotationWriter() {}
+
+//===----------------------------------------------------------------------===//
+// Helper Functions
+//===----------------------------------------------------------------------===//
+
+namespace {
+struct OrderMap {
+ DenseMap<const Value *, std::pair<unsigned, bool>> IDs;
+
+ unsigned size() const { return IDs.size(); }
+ std::pair<unsigned, bool> &operator[](const Value *V) { return IDs[V]; }
+ std::pair<unsigned, bool> lookup(const Value *V) const {
+ return IDs.lookup(V);
+ }
+ void index(const Value *V) {
+ // Explicitly sequence get-size and insert-value operations to avoid UB.
+ unsigned ID = IDs.size() + 1;
+ IDs[V].first = ID;
+ }
+};
+}
+
+static void orderValue(const Value *V, OrderMap &OM) {
+ if (OM.lookup(V).first)
+ return;
+
+ if (const Constant *C = dyn_cast<Constant>(V))
+ if (C->getNumOperands() && !isa<GlobalValue>(C))
+ for (const Value *Op : C->operands())
+ if (!isa<BasicBlock>(Op) && !isa<GlobalValue>(Op))
+ orderValue(Op, OM);
+
+ // Note: we cannot cache this lookup above, since inserting into the map
+ // changes the map's size, and thus affects the other IDs.
+ OM.index(V);
+}
+
+static OrderMap orderModule(const Module *M) {
+ // This needs to match the order used by ValueEnumerator::ValueEnumerator()
+ // and ValueEnumerator::incorporateFunction().
+ OrderMap OM;
+
+ for (const GlobalVariable &G : M->globals()) {
+ if (G.hasInitializer())
+ if (!isa<GlobalValue>(G.getInitializer()))
+ orderValue(G.getInitializer(), OM);
+ orderValue(&G, OM);
+ }
+ for (const GlobalAlias &A : M->aliases()) {
+ if (!isa<GlobalValue>(A.getAliasee()))
+ orderValue(A.getAliasee(), OM);
+ orderValue(&A, OM);
+ }
+ for (const Function &F : *M) {
+ for (const Use &U : F.operands())
+ if (!isa<GlobalValue>(U.get()))
+ orderValue(U.get(), OM);
+
+ orderValue(&F, OM);
+
+ if (F.isDeclaration())
+ continue;
+
+ for (const Argument &A : F.args())
+ orderValue(&A, OM);
+ for (const BasicBlock &BB : F) {
+ orderValue(&BB, OM);
+ for (const Instruction &I : BB) {
+ for (const Value *Op : I.operands())
+ if ((isa<Constant>(*Op) && !isa<GlobalValue>(*Op)) ||
+ isa<InlineAsm>(*Op))
+ orderValue(Op, OM);
+ orderValue(&I, OM);
+ }
+ }
+ }
+ return OM;
+}
+
+static void predictValueUseListOrderImpl(const Value *V, const Function *F,
+ unsigned ID, const OrderMap &OM,
+ UseListOrderStack &Stack) {
+ // Predict use-list order for this one.
+ typedef std::pair<const Use *, unsigned> Entry;
+ SmallVector<Entry, 64> List;
+ for (const Use &U : V->uses())
+ // Check if this user will be serialized.
+ if (OM.lookup(U.getUser()).first)
+ List.push_back(std::make_pair(&U, List.size()));
+
+ if (List.size() < 2)
+ // We may have lost some users.
+ return;
+
+ bool GetsReversed =
+ !isa<GlobalVariable>(V) && !isa<Function>(V) && !isa<BasicBlock>(V);
+ if (auto *BA = dyn_cast<BlockAddress>(V))
+ ID = OM.lookup(BA->getBasicBlock()).first;
+ std::sort(List.begin(), List.end(), [&](const Entry &L, const Entry &R) {
+ const Use *LU = L.first;
+ const Use *RU = R.first;
+ if (LU == RU)
+ return false;
+
+ auto LID = OM.lookup(LU->getUser()).first;
+ auto RID = OM.lookup(RU->getUser()).first;
+
+ // If ID is 4, then expect: 7 6 5 1 2 3.
+ if (LID < RID) {
+ if (GetsReversed)
+ if (RID <= ID)
+ return true;
+ return false;
+ }
+ if (RID < LID) {
+ if (GetsReversed)
+ if (LID <= ID)
+ return false;
+ return true;
+ }
+
+ // LID and RID are equal, so we have different operands of the same user.
+ // Assume operands are added in order for all instructions.
+ if (GetsReversed)
+ if (LID <= ID)
+ return LU->getOperandNo() < RU->getOperandNo();
+ return LU->getOperandNo() > RU->getOperandNo();
+ });
+
+ if (std::is_sorted(
+ List.begin(), List.end(),
+ [](const Entry &L, const Entry &R) { return L.second < R.second; }))
+ // Order is already correct.
+ return;
+
+ // Store the shuffle.
+ Stack.emplace_back(V, F, List.size());
+ assert(List.size() == Stack.back().Shuffle.size() && "Wrong size");
+ for (size_t I = 0, E = List.size(); I != E; ++I)
+ Stack.back().Shuffle[I] = List[I].second;
+}
+
+static void predictValueUseListOrder(const Value *V, const Function *F,
+ OrderMap &OM, UseListOrderStack &Stack) {
+ auto &IDPair = OM[V];
+ assert(IDPair.first && "Unmapped value");
+ if (IDPair.second)
+ // Already predicted.
+ return;
+
+ // Do the actual prediction.
+ IDPair.second = true;
+ if (!V->use_empty() && std::next(V->use_begin()) != V->use_end())
+ predictValueUseListOrderImpl(V, F, IDPair.first, OM, Stack);
+
+ // Recursive descent into constants.
+ if (const Constant *C = dyn_cast<Constant>(V))
+ if (C->getNumOperands()) // Visit GlobalValues.
+ for (const Value *Op : C->operands())
+ if (isa<Constant>(Op)) // Visit GlobalValues.
+ predictValueUseListOrder(Op, F, OM, Stack);
+}
+
+static UseListOrderStack predictUseListOrder(const Module *M) {
+ OrderMap OM = orderModule(M);
+
+ // Use-list orders need to be serialized after all the users have been added
+ // to a value, or else the shuffles will be incomplete. Store them per
+ // function in a stack.
+ //
+ // Aside from function order, the order of values doesn't matter much here.
+ UseListOrderStack Stack;
+
+ // We want to visit the functions backward now so we can list function-local
+ // constants in the last Function they're used in. Module-level constants
+ // have already been visited above.
+ for (const Function &F : make_range(M->rbegin(), M->rend())) {
+ if (F.isDeclaration())
+ continue;
+ for (const BasicBlock &BB : F)
+ predictValueUseListOrder(&BB, &F, OM, Stack);
+ for (const Argument &A : F.args())
+ predictValueUseListOrder(&A, &F, OM, Stack);
+ for (const BasicBlock &BB : F)
+ for (const Instruction &I : BB)
+ for (const Value *Op : I.operands())
+ if (isa<Constant>(*Op) || isa<InlineAsm>(*Op)) // Visit GlobalValues.
+ predictValueUseListOrder(Op, &F, OM, Stack);
+ for (const BasicBlock &BB : F)
+ for (const Instruction &I : BB)
+ predictValueUseListOrder(&I, &F, OM, Stack);
+ }
+
+ // Visit globals last.
+ for (const GlobalVariable &G : M->globals())
+ predictValueUseListOrder(&G, nullptr, OM, Stack);
+ for (const Function &F : *M)
+ predictValueUseListOrder(&F, nullptr, OM, Stack);
+ for (const GlobalAlias &A : M->aliases())
+ predictValueUseListOrder(&A, nullptr, OM, Stack);
+ for (const GlobalVariable &G : M->globals())
+ if (G.hasInitializer())
+ predictValueUseListOrder(G.getInitializer(), nullptr, OM, Stack);
+ for (const GlobalAlias &A : M->aliases())
+ predictValueUseListOrder(A.getAliasee(), nullptr, OM, Stack);
+ for (const Function &F : *M)
+ for (const Use &U : F.operands())
+ predictValueUseListOrder(U.get(), nullptr, OM, Stack);
+
+ return Stack;
+}
+
+static const Module *getModuleFromVal(const Value *V) {
+ if (const Argument *MA = dyn_cast<Argument>(V))
+ return MA->getParent() ? MA->getParent()->getParent() : nullptr;
+
+ if (const BasicBlock *BB = dyn_cast<BasicBlock>(V))
+ return BB->getParent() ? BB->getParent()->getParent() : nullptr;
+
+ if (const Instruction *I = dyn_cast<Instruction>(V)) {
+ const Function *M = I->getParent() ? I->getParent()->getParent() : nullptr;
+ return M ? M->getParent() : nullptr;
+ }
+
+ if (const GlobalValue *GV = dyn_cast<GlobalValue>(V))
+ return GV->getParent();
+
+ if (const auto *MAV = dyn_cast<MetadataAsValue>(V)) {
+ for (const User *U : MAV->users())
+ if (isa<Instruction>(U))
+ if (const Module *M = getModuleFromVal(U))
+ return M;
+ return nullptr;
+ }
+
+ return nullptr;
+}
+
+static void PrintCallingConv(unsigned cc, raw_ostream &Out) {
+ switch (cc) {
+ default: Out << "cc" << cc; break;
+ case CallingConv::Fast: Out << "fastcc"; break;
+ case CallingConv::Cold: Out << "coldcc"; break;
+ case CallingConv::WebKit_JS: Out << "webkit_jscc"; break;
+ case CallingConv::AnyReg: Out << "anyregcc"; break;
+ case CallingConv::PreserveMost: Out << "preserve_mostcc"; break;
+ case CallingConv::PreserveAll: Out << "preserve_allcc"; break;
+ case CallingConv::CXX_FAST_TLS: Out << "cxx_fast_tlscc"; break;
+ case CallingConv::GHC: Out << "ghccc"; break;
+ case CallingConv::X86_StdCall: Out << "x86_stdcallcc"; break;
+ case CallingConv::X86_FastCall: Out << "x86_fastcallcc"; break;
+ case CallingConv::X86_ThisCall: Out << "x86_thiscallcc"; break;
+ case CallingConv::X86_VectorCall:Out << "x86_vectorcallcc"; break;
+ case CallingConv::Intel_OCL_BI: Out << "intel_ocl_bicc"; break;
+ case CallingConv::ARM_APCS: Out << "arm_apcscc"; break;
+ case CallingConv::ARM_AAPCS: Out << "arm_aapcscc"; break;
+ case CallingConv::ARM_AAPCS_VFP: Out << "arm_aapcs_vfpcc"; break;
+ case CallingConv::MSP430_INTR: Out << "msp430_intrcc"; break;
+ case CallingConv::PTX_Kernel: Out << "ptx_kernel"; break;
+ case CallingConv::PTX_Device: Out << "ptx_device"; break;
+ case CallingConv::X86_64_SysV: Out << "x86_64_sysvcc"; break;
+ case CallingConv::X86_64_Win64: Out << "x86_64_win64cc"; break;
+ case CallingConv::SPIR_FUNC: Out << "spir_func"; break;
+ case CallingConv::SPIR_KERNEL: Out << "spir_kernel"; break;
+ case CallingConv::X86_INTR: Out << "x86_intrcc"; break;
+ case CallingConv::HHVM: Out << "hhvmcc"; break;
+ case CallingConv::HHVM_C: Out << "hhvm_ccc"; break;
+ }
+}
+
+// PrintEscapedString - Print each character of the specified string, escaping
+// it if it is not printable or if it is an escape char.
+static void PrintEscapedString(StringRef Name, raw_ostream &Out) {
+ for (unsigned i = 0, e = Name.size(); i != e; ++i) {
+ unsigned char C = Name[i];
+ if (isprint(C) && C != '\\' && C != '"')
+ Out << C;
+ else
+ Out << '\\' << hexdigit(C >> 4) << hexdigit(C & 0x0F);
+ }
+}
+
+enum PrefixType {
+ GlobalPrefix,
+ ComdatPrefix,
+ LabelPrefix,
+ LocalPrefix,
+ NoPrefix
+};
+
+void llvm::printLLVMNameWithoutPrefix(raw_ostream &OS, StringRef Name) {
+ assert(!Name.empty() && "Cannot get empty name!");
+
+ // Scan the name to see if it needs quotes first.
+ bool NeedsQuotes = isdigit(static_cast<unsigned char>(Name[0]));
+ if (!NeedsQuotes) {
+ for (unsigned i = 0, e = Name.size(); i != e; ++i) {
+ // By making this unsigned, the value passed in to isalnum will always be
+ // in the range 0-255. This is important when building with MSVC because
+ // its implementation will assert. This situation can arise when dealing
+ // with UTF-8 multibyte characters.
+ unsigned char C = Name[i];
+ if (!isalnum(static_cast<unsigned char>(C)) && C != '-' && C != '.' &&
+ C != '_') {
+ NeedsQuotes = true;
+ break;
+ }
+ }
+ }
+
+ // If we didn't need any quotes, just write out the name in one blast.
+ if (!NeedsQuotes) {
+ OS << Name;
+ return;
+ }
+
+ // Okay, we need quotes. Output the quotes and escape any scary characters as
+ // needed.
+ OS << '"';
+ PrintEscapedString(Name, OS);
+ OS << '"';
+}
+
+/// Turn the specified name into an 'LLVM name', which is either prefixed with %
+/// (if the string only contains simple characters) or is surrounded with ""'s
+/// (if it has special chars in it). Print it out.
+static void PrintLLVMName(raw_ostream &OS, StringRef Name, PrefixType Prefix) {
+ switch (Prefix) {
+ case NoPrefix:
+ break;
+ case GlobalPrefix:
+ OS << '@';
+ break;
+ case ComdatPrefix:
+ OS << '$';
+ break;
+ case LabelPrefix:
+ break;
+ case LocalPrefix:
+ OS << '%';
+ break;
+ }
+ printLLVMNameWithoutPrefix(OS, Name);
+}
+
+/// Turn the specified name into an 'LLVM name', which is either prefixed with %
+/// (if the string only contains simple characters) or is surrounded with ""'s
+/// (if it has special chars in it). Print it out.
+static void PrintLLVMName(raw_ostream &OS, const Value *V) {
+ PrintLLVMName(OS, V->getName(),
+ isa<GlobalValue>(V) ? GlobalPrefix : LocalPrefix);
+}
+
+
+namespace {
+class TypePrinting {
+ TypePrinting(const TypePrinting &) = delete;
+ void operator=(const TypePrinting&) = delete;
+public:
+
+ /// NamedTypes - The named types that are used by the current module.
+ TypeFinder NamedTypes;
+
+ /// NumberedTypes - The numbered types, along with their value.
+ DenseMap<StructType*, unsigned> NumberedTypes;
+
+ TypePrinting() = default;
+
+ void incorporateTypes(const Module &M);
+
+ void print(Type *Ty, raw_ostream &OS);
+
+ void printStructBody(StructType *Ty, raw_ostream &OS);
+};
+} // namespace
+
+void TypePrinting::incorporateTypes(const Module &M) {
+ NamedTypes.run(M, false);
+
+ // The list of struct types we got back includes all the struct types, split
+ // the unnamed ones out to a numbering and remove the anonymous structs.
+ unsigned NextNumber = 0;
+
+ std::vector<StructType*>::iterator NextToUse = NamedTypes.begin(), I, E;
+ for (I = NamedTypes.begin(), E = NamedTypes.end(); I != E; ++I) {
+ StructType *STy = *I;
+
+ // Ignore anonymous types.
+ if (STy->isLiteral())
+ continue;
+
+ if (STy->getName().empty())
+ NumberedTypes[STy] = NextNumber++;
+ else
+ *NextToUse++ = STy;
+ }
+
+ NamedTypes.erase(NextToUse, NamedTypes.end());
+}
+
+
+/// CalcTypeName - Write the specified type to the specified raw_ostream, making
+/// use of type names or up references to shorten the type name where possible.
+void TypePrinting::print(Type *Ty, raw_ostream &OS) {
+ switch (Ty->getTypeID()) {
+ case Type::VoidTyID: OS << "void"; return;
+ case Type::HalfTyID: OS << "half"; return;
+ case Type::FloatTyID: OS << "float"; return;
+ case Type::DoubleTyID: OS << "double"; return;
+ case Type::X86_FP80TyID: OS << "x86_fp80"; return;
+ case Type::FP128TyID: OS << "fp128"; return;
+ case Type::PPC_FP128TyID: OS << "ppc_fp128"; return;
+ case Type::LabelTyID: OS << "label"; return;
+ case Type::MetadataTyID: OS << "metadata"; return;
+ case Type::X86_MMXTyID: OS << "x86_mmx"; return;
+ case Type::TokenTyID: OS << "token"; return;
+ case Type::IntegerTyID:
+ OS << 'i' << cast<IntegerType>(Ty)->getBitWidth();
+ return;
+
+ case Type::FunctionTyID: {
+ FunctionType *FTy = cast<FunctionType>(Ty);
+ print(FTy->getReturnType(), OS);
+ OS << " (";
+ for (FunctionType::param_iterator I = FTy->param_begin(),
+ E = FTy->param_end(); I != E; ++I) {
+ if (I != FTy->param_begin())
+ OS << ", ";
+ print(*I, OS);
+ }
+ if (FTy->isVarArg()) {
+ if (FTy->getNumParams()) OS << ", ";
+ OS << "...";
+ }
+ OS << ')';
+ return;
+ }
+ case Type::StructTyID: {
+ StructType *STy = cast<StructType>(Ty);
+
+ if (STy->isLiteral())
+ return printStructBody(STy, OS);
+
+ if (!STy->getName().empty())
+ return PrintLLVMName(OS, STy->getName(), LocalPrefix);
+
+ DenseMap<StructType*, unsigned>::iterator I = NumberedTypes.find(STy);
+ if (I != NumberedTypes.end())
+ OS << '%' << I->second;
+ else // Not enumerated, print the hex address.
+ OS << "%\"type " << STy << '\"';
+ return;
+ }
+ case Type::PointerTyID: {
+ PointerType *PTy = cast<PointerType>(Ty);
+ print(PTy->getElementType(), OS);
+ if (unsigned AddressSpace = PTy->getAddressSpace())
+ OS << " addrspace(" << AddressSpace << ')';
+ OS << '*';
+ return;
+ }
+ case Type::ArrayTyID: {
+ ArrayType *ATy = cast<ArrayType>(Ty);
+ OS << '[' << ATy->getNumElements() << " x ";
+ print(ATy->getElementType(), OS);
+ OS << ']';
+ return;
+ }
+ case Type::VectorTyID: {
+ VectorType *PTy = cast<VectorType>(Ty);
+ OS << "<" << PTy->getNumElements() << " x ";
+ print(PTy->getElementType(), OS);
+ OS << '>';
+ return;
+ }
+ }
+ llvm_unreachable("Invalid TypeID");
+}
+
+void TypePrinting::printStructBody(StructType *STy, raw_ostream &OS) {
+ if (STy->isOpaque()) {
+ OS << "opaque";
+ return;
+ }
+
+ if (STy->isPacked())
+ OS << '<';
+
+ if (STy->getNumElements() == 0) {
+ OS << "{}";
+ } else {
+ StructType::element_iterator I = STy->element_begin();
+ OS << "{ ";
+ print(*I++, OS);
+ for (StructType::element_iterator E = STy->element_end(); I != E; ++I) {
+ OS << ", ";
+ print(*I, OS);
+ }
+
+ OS << " }";
+ }
+ if (STy->isPacked())
+ OS << '>';
+}
+
+namespace llvm {
+//===----------------------------------------------------------------------===//
+// SlotTracker Class: Enumerate slot numbers for unnamed values
+//===----------------------------------------------------------------------===//
+/// This class provides computation of slot numbers for LLVM Assembly writing.
+///
+class SlotTracker {
+public:
+ /// ValueMap - A mapping of Values to slot numbers.
+ typedef DenseMap<const Value*, unsigned> ValueMap;
+
+private:
+ /// TheModule - The module for which we are holding slot numbers.
+ const Module* TheModule;
+
+ /// TheFunction - The function for which we are holding slot numbers.
+ const Function* TheFunction;
+ bool FunctionProcessed;
+ bool ShouldInitializeAllMetadata;
+
+ /// mMap - The slot map for the module level data.
+ ValueMap mMap;
+ unsigned mNext;
+
+ /// fMap - The slot map for the function level data.
+ ValueMap fMap;
+ unsigned fNext;
+
+ /// mdnMap - Map for MDNodes.
+ DenseMap<const MDNode*, unsigned> mdnMap;
+ unsigned mdnNext;
+
+ /// asMap - The slot map for attribute sets.
+ DenseMap<AttributeSet, unsigned> asMap;
+ unsigned asNext;
+public:
+ /// Construct from a module.
+ ///
+ /// If \c ShouldInitializeAllMetadata, initializes all metadata in all
+ /// functions, giving correct numbering for metadata referenced only from
+ /// within a function (even if no functions have been initialized).
+ explicit SlotTracker(const Module *M,
+ bool ShouldInitializeAllMetadata = false);
+ /// Construct from a function, starting out in incorp state.
+ ///
+ /// If \c ShouldInitializeAllMetadata, initializes all metadata in all
+ /// functions, giving correct numbering for metadata referenced only from
+ /// within a function (even if no functions have been initialized).
+ explicit SlotTracker(const Function *F,
+ bool ShouldInitializeAllMetadata = false);
+
+ /// Return the slot number of the specified value in it's type
+ /// plane. If something is not in the SlotTracker, return -1.
+ int getLocalSlot(const Value *V);
+ int getGlobalSlot(const GlobalValue *V);
+ int getMetadataSlot(const MDNode *N);
+ int getAttributeGroupSlot(AttributeSet AS);
+
+ /// If you'd like to deal with a function instead of just a module, use
+ /// this method to get its data into the SlotTracker.
+ void incorporateFunction(const Function *F) {
+ TheFunction = F;
+ FunctionProcessed = false;
+ }
+
+ const Function *getFunction() const { return TheFunction; }
+
+ /// After calling incorporateFunction, use this method to remove the
+ /// most recently incorporated function from the SlotTracker. This
+ /// will reset the state of the machine back to just the module contents.
+ void purgeFunction();
+
+ /// MDNode map iterators.
+ typedef DenseMap<const MDNode*, unsigned>::iterator mdn_iterator;
+ mdn_iterator mdn_begin() { return mdnMap.begin(); }
+ mdn_iterator mdn_end() { return mdnMap.end(); }
+ unsigned mdn_size() const { return mdnMap.size(); }
+ bool mdn_empty() const { return mdnMap.empty(); }
+
+ /// AttributeSet map iterators.
+ typedef DenseMap<AttributeSet, unsigned>::iterator as_iterator;
+ as_iterator as_begin() { return asMap.begin(); }
+ as_iterator as_end() { return asMap.end(); }
+ unsigned as_size() const { return asMap.size(); }
+ bool as_empty() const { return asMap.empty(); }
+
+ /// This function does the actual initialization.
+ inline void initialize();
+
+ // Implementation Details
+private:
+ /// CreateModuleSlot - Insert the specified GlobalValue* into the slot table.
+ void CreateModuleSlot(const GlobalValue *V);
+
+ /// CreateMetadataSlot - Insert the specified MDNode* into the slot table.
+ void CreateMetadataSlot(const MDNode *N);
+
+ /// CreateFunctionSlot - Insert the specified Value* into the slot table.
+ void CreateFunctionSlot(const Value *V);
+
+ /// \brief Insert the specified AttributeSet into the slot table.
+ void CreateAttributeSetSlot(AttributeSet AS);
+
+ /// Add all of the module level global variables (and their initializers)
+ /// and function declarations, but not the contents of those functions.
+ void processModule();
+
+ /// Add all of the functions arguments, basic blocks, and instructions.
+ void processFunction();
+
+ /// Add all of the metadata from a function.
+ void processFunctionMetadata(const Function &F);
+
+ /// Add all of the metadata from an instruction.
+ void processInstructionMetadata(const Instruction &I);
+
+ SlotTracker(const SlotTracker &) = delete;
+ void operator=(const SlotTracker &) = delete;
+};
+} // namespace llvm
+
+ModuleSlotTracker::ModuleSlotTracker(SlotTracker &Machine, const Module *M,
+ const Function *F)
+ : M(M), F(F), Machine(&Machine) {}
+
+ModuleSlotTracker::ModuleSlotTracker(const Module *M,
+ bool ShouldInitializeAllMetadata)
+ : MachineStorage(M ? new SlotTracker(M, ShouldInitializeAllMetadata)
+ : nullptr),
+ M(M), Machine(MachineStorage.get()) {}
+
+ModuleSlotTracker::~ModuleSlotTracker() {}
+
+void ModuleSlotTracker::incorporateFunction(const Function &F) {
+ if (!Machine)
+ return;
+
+ // Nothing to do if this is the right function already.
+ if (this->F == &F)
+ return;
+ if (this->F)
+ Machine->purgeFunction();
+ Machine->incorporateFunction(&F);
+ this->F = &F;
+}
+
+int ModuleSlotTracker::getLocalSlot(const Value *V) {
+ assert(F && "No function incorporated");
+ return Machine->getLocalSlot(V);
+}
+
+static SlotTracker *createSlotTracker(const Value *V) {
+ if (const Argument *FA = dyn_cast<Argument>(V))
+ return new SlotTracker(FA->getParent());
+
+ if (const Instruction *I = dyn_cast<Instruction>(V))
+ if (I->getParent())
+ return new SlotTracker(I->getParent()->getParent());
+
+ if (const BasicBlock *BB = dyn_cast<BasicBlock>(V))
+ return new SlotTracker(BB->getParent());
+
+ if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
+ return new SlotTracker(GV->getParent());
+
+ if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V))
+ return new SlotTracker(GA->getParent());
+
+ if (const Function *Func = dyn_cast<Function>(V))
+ return new SlotTracker(Func);
+
+ return nullptr;
+}
+
+#if 0
+#define ST_DEBUG(X) dbgs() << X
+#else
+#define ST_DEBUG(X)
+#endif
+
+// Module level constructor. Causes the contents of the Module (sans functions)
+// to be added to the slot table.
+SlotTracker::SlotTracker(const Module *M, bool ShouldInitializeAllMetadata)
+ : TheModule(M), TheFunction(nullptr), FunctionProcessed(false),
+ ShouldInitializeAllMetadata(ShouldInitializeAllMetadata), mNext(0),
+ fNext(0), mdnNext(0), asNext(0) {}
+
+// Function level constructor. Causes the contents of the Module and the one
+// function provided to be added to the slot table.
+SlotTracker::SlotTracker(const Function *F, bool ShouldInitializeAllMetadata)
+ : TheModule(F ? F->getParent() : nullptr), TheFunction(F),
+ FunctionProcessed(false),
+ ShouldInitializeAllMetadata(ShouldInitializeAllMetadata), mNext(0),
+ fNext(0), mdnNext(0), asNext(0) {}
+
+inline void SlotTracker::initialize() {
+ if (TheModule) {
+ processModule();
+ TheModule = nullptr; ///< Prevent re-processing next time we're called.
+ }
+
+ if (TheFunction && !FunctionProcessed)
+ processFunction();
+}
+
+// Iterate through all the global variables, functions, and global
+// variable initializers and create slots for them.
+void SlotTracker::processModule() {
+ ST_DEBUG("begin processModule!\n");
+
+ // Add all of the unnamed global variables to the value table.
+ for (const GlobalVariable &Var : TheModule->globals()) {
+ if (!Var.hasName())
+ CreateModuleSlot(&Var);
+ }
+
+ for (const GlobalAlias &A : TheModule->aliases()) {
+ if (!A.hasName())
+ CreateModuleSlot(&A);
+ }
+
+ // Add metadata used by named metadata.
+ for (const NamedMDNode &NMD : TheModule->named_metadata()) {
+ for (unsigned i = 0, e = NMD.getNumOperands(); i != e; ++i)
+ CreateMetadataSlot(NMD.getOperand(i));
+ }
+
+ for (const Function &F : *TheModule) {
+ if (!F.hasName())
+ // Add all the unnamed functions to the table.
+ CreateModuleSlot(&F);
+
+ if (ShouldInitializeAllMetadata)
+ processFunctionMetadata(F);
+
+ // Add all the function attributes to the table.
+ // FIXME: Add attributes of other objects?
+ AttributeSet FnAttrs = F.getAttributes().getFnAttributes();
+ if (FnAttrs.hasAttributes(AttributeSet::FunctionIndex))
+ CreateAttributeSetSlot(FnAttrs);
+ }
+
+ ST_DEBUG("end processModule!\n");
+}
+
+// Process the arguments, basic blocks, and instructions of a function.
+void SlotTracker::processFunction() {
+ ST_DEBUG("begin processFunction!\n");
+ fNext = 0;
+
+ // Process function metadata if it wasn't hit at the module-level.
+ if (!ShouldInitializeAllMetadata)
+ processFunctionMetadata(*TheFunction);
+
+ // Add all the function arguments with no names.
+ for(Function::const_arg_iterator AI = TheFunction->arg_begin(),
+ AE = TheFunction->arg_end(); AI != AE; ++AI)
+ if (!AI->hasName())
+ CreateFunctionSlot(&*AI);
+
+ ST_DEBUG("Inserting Instructions:\n");
+
+ // Add all of the basic blocks and instructions with no names.
+ for (auto &BB : *TheFunction) {
+ if (!BB.hasName())
+ CreateFunctionSlot(&BB);
+
+ for (auto &I : BB) {
+ if (!I.getType()->isVoidTy() && !I.hasName())
+ CreateFunctionSlot(&I);
+
+ // We allow direct calls to any llvm.foo function here, because the
+ // target may not be linked into the optimizer.
+ if (const CallInst *CI = dyn_cast<CallInst>(&I)) {
+ // Add all the call attributes to the table.
+ AttributeSet Attrs = CI->getAttributes().getFnAttributes();
+ if (Attrs.hasAttributes(AttributeSet::FunctionIndex))
+ CreateAttributeSetSlot(Attrs);
+ } else if (const InvokeInst *II = dyn_cast<InvokeInst>(&I)) {
+ // Add all the call attributes to the table.
+ AttributeSet Attrs = II->getAttributes().getFnAttributes();
+ if (Attrs.hasAttributes(AttributeSet::FunctionIndex))
+ CreateAttributeSetSlot(Attrs);
+ }
+ }
+ }
+
+ FunctionProcessed = true;
+
+ ST_DEBUG("end processFunction!\n");
+}
+
+void SlotTracker::processFunctionMetadata(const Function &F) {
+ SmallVector<std::pair<unsigned, MDNode *>, 4> MDs;
+ F.getAllMetadata(MDs);
+ for (auto &MD : MDs)
+ CreateMetadataSlot(MD.second);
+
+ for (auto &BB : F) {
+ for (auto &I : BB)
+ processInstructionMetadata(I);
+ }
+}
+
+void SlotTracker::processInstructionMetadata(const Instruction &I) {
+ // Process metadata used directly by intrinsics.
+ if (const CallInst *CI = dyn_cast<CallInst>(&I))
+ if (Function *F = CI->getCalledFunction())
+ if (F->isIntrinsic())
+ for (auto &Op : I.operands())
+ if (auto *V = dyn_cast_or_null<MetadataAsValue>(Op))
+ if (MDNode *N = dyn_cast<MDNode>(V->getMetadata()))
+ CreateMetadataSlot(N);
+
+ // Process metadata attached to this instruction.
+ SmallVector<std::pair<unsigned, MDNode *>, 4> MDs;
+ I.getAllMetadata(MDs);
+ for (auto &MD : MDs)
+ CreateMetadataSlot(MD.second);
+}
+
+/// Clean up after incorporating a function. This is the only way to get out of
+/// the function incorporation state that affects get*Slot/Create*Slot. Function
+/// incorporation state is indicated by TheFunction != 0.
+void SlotTracker::purgeFunction() {
+ ST_DEBUG("begin purgeFunction!\n");
+ fMap.clear(); // Simply discard the function level map
+ TheFunction = nullptr;
+ FunctionProcessed = false;
+ ST_DEBUG("end purgeFunction!\n");
+}
+
+/// getGlobalSlot - Get the slot number of a global value.
+int SlotTracker::getGlobalSlot(const GlobalValue *V) {
+ // Check for uninitialized state and do lazy initialization.
+ initialize();
+
+ // Find the value in the module map
+ ValueMap::iterator MI = mMap.find(V);
+ return MI == mMap.end() ? -1 : (int)MI->second;
+}
+
+/// getMetadataSlot - Get the slot number of a MDNode.
+int SlotTracker::getMetadataSlot(const MDNode *N) {
+ // Check for uninitialized state and do lazy initialization.
+ initialize();
+
+ // Find the MDNode in the module map
+ mdn_iterator MI = mdnMap.find(N);
+ return MI == mdnMap.end() ? -1 : (int)MI->second;
+}
+
+
+/// getLocalSlot - Get the slot number for a value that is local to a function.
+int SlotTracker::getLocalSlot(const Value *V) {
+ assert(!isa<Constant>(V) && "Can't get a constant or global slot with this!");
+
+ // Check for uninitialized state and do lazy initialization.
+ initialize();
+
+ ValueMap::iterator FI = fMap.find(V);
+ return FI == fMap.end() ? -1 : (int)FI->second;
+}
+
+int SlotTracker::getAttributeGroupSlot(AttributeSet AS) {
+ // Check for uninitialized state and do lazy initialization.
+ initialize();
+
+ // Find the AttributeSet in the module map.
+ as_iterator AI = asMap.find(AS);
+ return AI == asMap.end() ? -1 : (int)AI->second;
+}
+
+/// CreateModuleSlot - Insert the specified GlobalValue* into the slot table.
+void SlotTracker::CreateModuleSlot(const GlobalValue *V) {
+ assert(V && "Can't insert a null Value into SlotTracker!");
+ assert(!V->getType()->isVoidTy() && "Doesn't need a slot!");
+ assert(!V->hasName() && "Doesn't need a slot!");
+
+ unsigned DestSlot = mNext++;
+ mMap[V] = DestSlot;
+
+ ST_DEBUG(" Inserting value [" << V->getType() << "] = " << V << " slot=" <<
+ DestSlot << " [");
+ // G = Global, F = Function, A = Alias, o = other
+ ST_DEBUG((isa<GlobalVariable>(V) ? 'G' :
+ (isa<Function>(V) ? 'F' :
+ (isa<GlobalAlias>(V) ? 'A' : 'o'))) << "]\n");
+}
+
+/// CreateSlot - Create a new slot for the specified value if it has no name.
+void SlotTracker::CreateFunctionSlot(const Value *V) {
+ assert(!V->getType()->isVoidTy() && !V->hasName() && "Doesn't need a slot!");
+
+ unsigned DestSlot = fNext++;
+ fMap[V] = DestSlot;
+
+ // G = Global, F = Function, o = other
+ ST_DEBUG(" Inserting value [" << V->getType() << "] = " << V << " slot=" <<
+ DestSlot << " [o]\n");
+}
+
+/// CreateModuleSlot - Insert the specified MDNode* into the slot table.
+void SlotTracker::CreateMetadataSlot(const MDNode *N) {
+ assert(N && "Can't insert a null Value into SlotTracker!");
+
+ unsigned DestSlot = mdnNext;
+ if (!mdnMap.insert(std::make_pair(N, DestSlot)).second)
+ return;
+ ++mdnNext;
+
+ // Recursively add any MDNodes referenced by operands.
+ for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
+ if (const MDNode *Op = dyn_cast_or_null<MDNode>(N->getOperand(i)))
+ CreateMetadataSlot(Op);
+}
+
+void SlotTracker::CreateAttributeSetSlot(AttributeSet AS) {
+ assert(AS.hasAttributes(AttributeSet::FunctionIndex) &&
+ "Doesn't need a slot!");
+
+ as_iterator I = asMap.find(AS);
+ if (I != asMap.end())
+ return;
+
+ unsigned DestSlot = asNext++;
+ asMap[AS] = DestSlot;
+}
+
+//===----------------------------------------------------------------------===//
+// AsmWriter Implementation
+//===----------------------------------------------------------------------===//
+
+static void WriteAsOperandInternal(raw_ostream &Out, const Value *V,
+ TypePrinting *TypePrinter,
+ SlotTracker *Machine,
+ const Module *Context);
+
+static void WriteAsOperandInternal(raw_ostream &Out, const Metadata *MD,
+ TypePrinting *TypePrinter,
+ SlotTracker *Machine, const Module *Context,
+ bool FromValue = false);
+
+static const char *getPredicateText(unsigned predicate) {
+ const char * pred = "unknown";
+ switch (predicate) {
+ case FCmpInst::FCMP_FALSE: pred = "false"; break;
+ case FCmpInst::FCMP_OEQ: pred = "oeq"; break;
+ case FCmpInst::FCMP_OGT: pred = "ogt"; break;
+ case FCmpInst::FCMP_OGE: pred = "oge"; break;
+ case FCmpInst::FCMP_OLT: pred = "olt"; break;
+ case FCmpInst::FCMP_OLE: pred = "ole"; break;
+ case FCmpInst::FCMP_ONE: pred = "one"; break;
+ case FCmpInst::FCMP_ORD: pred = "ord"; break;
+ case FCmpInst::FCMP_UNO: pred = "uno"; break;
+ case FCmpInst::FCMP_UEQ: pred = "ueq"; break;
+ case FCmpInst::FCMP_UGT: pred = "ugt"; break;
+ case FCmpInst::FCMP_UGE: pred = "uge"; break;
+ case FCmpInst::FCMP_ULT: pred = "ult"; break;
+ case FCmpInst::FCMP_ULE: pred = "ule"; break;
+ case FCmpInst::FCMP_UNE: pred = "une"; break;
+ case FCmpInst::FCMP_TRUE: pred = "true"; break;
+ case ICmpInst::ICMP_EQ: pred = "eq"; break;
+ case ICmpInst::ICMP_NE: pred = "ne"; break;
+ case ICmpInst::ICMP_SGT: pred = "sgt"; break;
+ case ICmpInst::ICMP_SGE: pred = "sge"; break;
+ case ICmpInst::ICMP_SLT: pred = "slt"; break;
+ case ICmpInst::ICMP_SLE: pred = "sle"; break;
+ case ICmpInst::ICMP_UGT: pred = "ugt"; break;
+ case ICmpInst::ICMP_UGE: pred = "uge"; break;
+ case ICmpInst::ICMP_ULT: pred = "ult"; break;
+ case ICmpInst::ICMP_ULE: pred = "ule"; break;
+ }
+ return pred;
+}
+
+static void writeAtomicRMWOperation(raw_ostream &Out,
+ AtomicRMWInst::BinOp Op) {
+ switch (Op) {
+ default: Out << " <unknown operation " << Op << ">"; break;
+ case AtomicRMWInst::Xchg: Out << " xchg"; break;
+ case AtomicRMWInst::Add: Out << " add"; break;
+ case AtomicRMWInst::Sub: Out << " sub"; break;
+ case AtomicRMWInst::And: Out << " and"; break;
+ case AtomicRMWInst::Nand: Out << " nand"; break;
+ case AtomicRMWInst::Or: Out << " or"; break;
+ case AtomicRMWInst::Xor: Out << " xor"; break;
+ case AtomicRMWInst::Max: Out << " max"; break;
+ case AtomicRMWInst::Min: Out << " min"; break;
+ case AtomicRMWInst::UMax: Out << " umax"; break;
+ case AtomicRMWInst::UMin: Out << " umin"; break;
+ }
+}
+
+static void WriteOptimizationInfo(raw_ostream &Out, const User *U) {
+ if (const FPMathOperator *FPO = dyn_cast<const FPMathOperator>(U)) {
+ // Unsafe algebra implies all the others, no need to write them all out
+ if (FPO->hasUnsafeAlgebra())
+ Out << " fast";
+ else {
+ if (FPO->hasNoNaNs())
+ Out << " nnan";
+ if (FPO->hasNoInfs())
+ Out << " ninf";
+ if (FPO->hasNoSignedZeros())
+ Out << " nsz";
+ if (FPO->hasAllowReciprocal())
+ Out << " arcp";
+ }
+ }
+
+ if (const OverflowingBinaryOperator *OBO =
+ dyn_cast<OverflowingBinaryOperator>(U)) {
+ if (OBO->hasNoUnsignedWrap())
+ Out << " nuw";
+ if (OBO->hasNoSignedWrap())
+ Out << " nsw";
+ } else if (const PossiblyExactOperator *Div =
+ dyn_cast<PossiblyExactOperator>(U)) {
+ if (Div->isExact())
+ Out << " exact";
+ } else if (const GEPOperator *GEP = dyn_cast<GEPOperator>(U)) {
+ if (GEP->isInBounds())
+ Out << " inbounds";
+ }
+}
+
+static void WriteConstantInternal(raw_ostream &Out, const Constant *CV,
+ TypePrinting &TypePrinter,
+ SlotTracker *Machine,
+ const Module *Context) {
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(CV)) {
+ if (CI->getType()->isIntegerTy(1)) {
+ Out << (CI->getZExtValue() ? "true" : "false");
+ return;
+ }
+ Out << CI->getValue();
+ return;
+ }
+
+ if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CV)) {
+ if (&CFP->getValueAPF().getSemantics() == &APFloat::IEEEsingle ||
+ &CFP->getValueAPF().getSemantics() == &APFloat::IEEEdouble) {
+ // We would like to output the FP constant value in exponential notation,
+ // but we cannot do this if doing so will lose precision. Check here to
+ // make sure that we only output it in exponential format if we can parse
+ // the value back and get the same value.
+ //
+ bool ignored;
+ bool isDouble = &CFP->getValueAPF().getSemantics()==&APFloat::IEEEdouble;
+ bool isInf = CFP->getValueAPF().isInfinity();
+ bool isNaN = CFP->getValueAPF().isNaN();
+ if (!isInf && !isNaN) {
+ double Val = isDouble ? CFP->getValueAPF().convertToDouble() :
+ CFP->getValueAPF().convertToFloat();
+ SmallString<128> StrVal;
+ raw_svector_ostream(StrVal) << Val;
+
+ // Check to make sure that the stringized number is not some string like
+ // "Inf" or NaN, that atof will accept, but the lexer will not. Check
+ // that the string matches the "[-+]?[0-9]" regex.
+ //
+ if ((StrVal[0] >= '0' && StrVal[0] <= '9') ||
+ ((StrVal[0] == '-' || StrVal[0] == '+') &&
+ (StrVal[1] >= '0' && StrVal[1] <= '9'))) {
+ // Reparse stringized version!
+ if (APFloat(APFloat::IEEEdouble, StrVal).convertToDouble() == Val) {
+ Out << StrVal;
+ return;
+ }
+ }
+ }
+ // Otherwise we could not reparse it to exactly the same value, so we must
+ // output the string in hexadecimal format! Note that loading and storing
+ // floating point types changes the bits of NaNs on some hosts, notably
+ // x86, so we must not use these types.
+ static_assert(sizeof(double) == sizeof(uint64_t),
+ "assuming that double is 64 bits!");
+ APFloat apf = CFP->getValueAPF();
+ // Floats are represented in ASCII IR as double, convert.
+ if (!isDouble)
+ apf.convert(APFloat::IEEEdouble, APFloat::rmNearestTiesToEven,
+ &ignored);
+ Out << format_hex(apf.bitcastToAPInt().getZExtValue(), 0, /*Upper=*/true);
+ return;
+ }
+
+ // Either half, or some form of long double.
+ // These appear as a magic letter identifying the type, then a
+ // fixed number of hex digits.
+ Out << "0x";
+ APInt API = CFP->getValueAPF().bitcastToAPInt();
+ if (&CFP->getValueAPF().getSemantics() == &APFloat::x87DoubleExtended) {
+ Out << 'K';
+ Out << format_hex_no_prefix(API.getHiBits(16).getZExtValue(), 4,
+ /*Upper=*/true);
+ Out << format_hex_no_prefix(API.getLoBits(64).getZExtValue(), 16,
+ /*Upper=*/true);
+ return;
+ } else if (&CFP->getValueAPF().getSemantics() == &APFloat::IEEEquad) {
+ Out << 'L';
+ Out << format_hex_no_prefix(API.getLoBits(64).getZExtValue(), 16,
+ /*Upper=*/true);
+ Out << format_hex_no_prefix(API.getHiBits(64).getZExtValue(), 16,
+ /*Upper=*/true);
+ } else if (&CFP->getValueAPF().getSemantics() == &APFloat::PPCDoubleDouble) {
+ Out << 'M';
+ Out << format_hex_no_prefix(API.getLoBits(64).getZExtValue(), 16,
+ /*Upper=*/true);
+ Out << format_hex_no_prefix(API.getHiBits(64).getZExtValue(), 16,
+ /*Upper=*/true);
+ } else if (&CFP->getValueAPF().getSemantics() == &APFloat::IEEEhalf) {
+ Out << 'H';
+ Out << format_hex_no_prefix(API.getZExtValue(), 4,
+ /*Upper=*/true);
+ } else
+ llvm_unreachable("Unsupported floating point type");
+ return;
+ }
+
+ if (isa<ConstantAggregateZero>(CV)) {
+ Out << "zeroinitializer";
+ return;
+ }
+
+ if (const BlockAddress *BA = dyn_cast<BlockAddress>(CV)) {
+ Out << "blockaddress(";
+ WriteAsOperandInternal(Out, BA->getFunction(), &TypePrinter, Machine,
+ Context);
+ Out << ", ";
+ WriteAsOperandInternal(Out, BA->getBasicBlock(), &TypePrinter, Machine,
+ Context);
+ Out << ")";
+ return;
+ }
+
+ if (const ConstantArray *CA = dyn_cast<ConstantArray>(CV)) {
+ Type *ETy = CA->getType()->getElementType();
+ Out << '[';
+ TypePrinter.print(ETy, Out);
+ Out << ' ';
+ WriteAsOperandInternal(Out, CA->getOperand(0),
+ &TypePrinter, Machine,
+ Context);
+ for (unsigned i = 1, e = CA->getNumOperands(); i != e; ++i) {
+ Out << ", ";
+ TypePrinter.print(ETy, Out);
+ Out << ' ';
+ WriteAsOperandInternal(Out, CA->getOperand(i), &TypePrinter, Machine,
+ Context);
+ }
+ Out << ']';
+ return;
+ }
+
+ if (const ConstantDataArray *CA = dyn_cast<ConstantDataArray>(CV)) {
+ // As a special case, print the array as a string if it is an array of
+ // i8 with ConstantInt values.
+ if (CA->isString()) {
+ Out << "c\"";
+ PrintEscapedString(CA->getAsString(), Out);
+ Out << '"';
+ return;
+ }
+
+ Type *ETy = CA->getType()->getElementType();
+ Out << '[';
+ TypePrinter.print(ETy, Out);
+ Out << ' ';
+ WriteAsOperandInternal(Out, CA->getElementAsConstant(0),
+ &TypePrinter, Machine,
+ Context);
+ for (unsigned i = 1, e = CA->getNumElements(); i != e; ++i) {
+ Out << ", ";
+ TypePrinter.print(ETy, Out);
+ Out << ' ';
+ WriteAsOperandInternal(Out, CA->getElementAsConstant(i), &TypePrinter,
+ Machine, Context);
+ }
+ Out << ']';
+ return;
+ }
+
+
+ if (const ConstantStruct *CS = dyn_cast<ConstantStruct>(CV)) {
+ if (CS->getType()->isPacked())
+ Out << '<';
+ Out << '{';
+ unsigned N = CS->getNumOperands();
+ if (N) {
+ Out << ' ';
+ TypePrinter.print(CS->getOperand(0)->getType(), Out);
+ Out << ' ';
+
+ WriteAsOperandInternal(Out, CS->getOperand(0), &TypePrinter, Machine,
+ Context);
+
+ for (unsigned i = 1; i < N; i++) {
+ Out << ", ";
+ TypePrinter.print(CS->getOperand(i)->getType(), Out);
+ Out << ' ';
+
+ WriteAsOperandInternal(Out, CS->getOperand(i), &TypePrinter, Machine,
+ Context);
+ }
+ Out << ' ';
+ }
+
+ Out << '}';
+ if (CS->getType()->isPacked())
+ Out << '>';
+ return;
+ }
+
+ if (isa<ConstantVector>(CV) || isa<ConstantDataVector>(CV)) {
+ Type *ETy = CV->getType()->getVectorElementType();
+ Out << '<';
+ TypePrinter.print(ETy, Out);
+ Out << ' ';
+ WriteAsOperandInternal(Out, CV->getAggregateElement(0U), &TypePrinter,
+ Machine, Context);
+ for (unsigned i = 1, e = CV->getType()->getVectorNumElements(); i != e;++i){
+ Out << ", ";
+ TypePrinter.print(ETy, Out);
+ Out << ' ';
+ WriteAsOperandInternal(Out, CV->getAggregateElement(i), &TypePrinter,
+ Machine, Context);
+ }
+ Out << '>';
+ return;
+ }
+
+ if (isa<ConstantPointerNull>(CV)) {
+ Out << "null";
+ return;
+ }
+
+ if (isa<ConstantTokenNone>(CV)) {
+ Out << "none";
+ return;
+ }
+
+ if (isa<UndefValue>(CV)) {
+ Out << "undef";
+ return;
+ }
+
+ if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(CV)) {
+ Out << CE->getOpcodeName();
+ WriteOptimizationInfo(Out, CE);
+ if (CE->isCompare())
+ Out << ' ' << getPredicateText(CE->getPredicate());
+ Out << " (";
+
+ if (const GEPOperator *GEP = dyn_cast<GEPOperator>(CE)) {
+ TypePrinter.print(GEP->getSourceElementType(), Out);
+ Out << ", ";
+ }
+
+ for (User::const_op_iterator OI=CE->op_begin(); OI != CE->op_end(); ++OI) {
+ TypePrinter.print((*OI)->getType(), Out);
+ Out << ' ';
+ WriteAsOperandInternal(Out, *OI, &TypePrinter, Machine, Context);
+ if (OI+1 != CE->op_end())
+ Out << ", ";
+ }
+
+ if (CE->hasIndices()) {
+ ArrayRef<unsigned> Indices = CE->getIndices();
+ for (unsigned i = 0, e = Indices.size(); i != e; ++i)
+ Out << ", " << Indices[i];
+ }
+
+ if (CE->isCast()) {
+ Out << " to ";
+ TypePrinter.print(CE->getType(), Out);
+ }
+
+ Out << ')';
+ return;
+ }
+
+ Out << "<placeholder or erroneous Constant>";
+}
+
+static void writeMDTuple(raw_ostream &Out, const MDTuple *Node,
+ TypePrinting *TypePrinter, SlotTracker *Machine,
+ const Module *Context) {
+ Out << "!{";
+ for (unsigned mi = 0, me = Node->getNumOperands(); mi != me; ++mi) {
+ const Metadata *MD = Node->getOperand(mi);
+ if (!MD)
+ Out << "null";
+ else if (auto *MDV = dyn_cast<ValueAsMetadata>(MD)) {
+ Value *V = MDV->getValue();
+ TypePrinter->print(V->getType(), Out);
+ Out << ' ';
+ WriteAsOperandInternal(Out, V, TypePrinter, Machine, Context);
+ } else {
+ WriteAsOperandInternal(Out, MD, TypePrinter, Machine, Context);
+ }
+ if (mi + 1 != me)
+ Out << ", ";
+ }
+
+ Out << "}";
+}
+
+namespace {
+struct FieldSeparator {
+ bool Skip;
+ const char *Sep;
+ FieldSeparator(const char *Sep = ", ") : Skip(true), Sep(Sep) {}
+};
+raw_ostream &operator<<(raw_ostream &OS, FieldSeparator &FS) {
+ if (FS.Skip) {
+ FS.Skip = false;
+ return OS;
+ }
+ return OS << FS.Sep;
+}
+struct MDFieldPrinter {
+ raw_ostream &Out;
+ FieldSeparator FS;
+ TypePrinting *TypePrinter;
+ SlotTracker *Machine;
+ const Module *Context;
+
+ explicit MDFieldPrinter(raw_ostream &Out)
+ : Out(Out), TypePrinter(nullptr), Machine(nullptr), Context(nullptr) {}
+ MDFieldPrinter(raw_ostream &Out, TypePrinting *TypePrinter,
+ SlotTracker *Machine, const Module *Context)
+ : Out(Out), TypePrinter(TypePrinter), Machine(Machine), Context(Context) {
+ }
+ void printTag(const DINode *N);
+ void printMacinfoType(const DIMacroNode *N);
+ void printString(StringRef Name, StringRef Value,
+ bool ShouldSkipEmpty = true);
+ void printMetadata(StringRef Name, const Metadata *MD,
+ bool ShouldSkipNull = true);
+ template <class IntTy>
+ void printInt(StringRef Name, IntTy Int, bool ShouldSkipZero = true);
+ void printBool(StringRef Name, bool Value);
+ void printDIFlags(StringRef Name, unsigned Flags);
+ template <class IntTy, class Stringifier>
+ void printDwarfEnum(StringRef Name, IntTy Value, Stringifier toString,
+ bool ShouldSkipZero = true);
+};
+} // end namespace
+
+void MDFieldPrinter::printTag(const DINode *N) {
+ Out << FS << "tag: ";
+ if (const char *Tag = dwarf::TagString(N->getTag()))
+ Out << Tag;
+ else
+ Out << N->getTag();
+}
+
+void MDFieldPrinter::printMacinfoType(const DIMacroNode *N) {
+ Out << FS << "type: ";
+ if (const char *Type = dwarf::MacinfoString(N->getMacinfoType()))
+ Out << Type;
+ else
+ Out << N->getMacinfoType();
+}
+
+void MDFieldPrinter::printString(StringRef Name, StringRef Value,
+ bool ShouldSkipEmpty) {
+ if (ShouldSkipEmpty && Value.empty())
+ return;
+
+ Out << FS << Name << ": \"";
+ PrintEscapedString(Value, Out);
+ Out << "\"";
+}
+
+static void writeMetadataAsOperand(raw_ostream &Out, const Metadata *MD,
+ TypePrinting *TypePrinter,
+ SlotTracker *Machine,
+ const Module *Context) {
+ if (!MD) {
+ Out << "null";
+ return;
+ }
+ WriteAsOperandInternal(Out, MD, TypePrinter, Machine, Context);
+}
+
+void MDFieldPrinter::printMetadata(StringRef Name, const Metadata *MD,
+ bool ShouldSkipNull) {
+ if (ShouldSkipNull && !MD)
+ return;
+
+ Out << FS << Name << ": ";
+ writeMetadataAsOperand(Out, MD, TypePrinter, Machine, Context);
+}
+
+template <class IntTy>
+void MDFieldPrinter::printInt(StringRef Name, IntTy Int, bool ShouldSkipZero) {
+ if (ShouldSkipZero && !Int)
+ return;
+
+ Out << FS << Name << ": " << Int;
+}
+
+void MDFieldPrinter::printBool(StringRef Name, bool Value) {
+ Out << FS << Name << ": " << (Value ? "true" : "false");
+}
+
+void MDFieldPrinter::printDIFlags(StringRef Name, unsigned Flags) {
+ if (!Flags)
+ return;
+
+ Out << FS << Name << ": ";
+
+ SmallVector<unsigned, 8> SplitFlags;
+ unsigned Extra = DINode::splitFlags(Flags, SplitFlags);
+
+ FieldSeparator FlagsFS(" | ");
+ for (unsigned F : SplitFlags) {
+ const char *StringF = DINode::getFlagString(F);
+ assert(StringF && "Expected valid flag");
+ Out << FlagsFS << StringF;
+ }
+ if (Extra || SplitFlags.empty())
+ Out << FlagsFS << Extra;
+}
+
+template <class IntTy, class Stringifier>
+void MDFieldPrinter::printDwarfEnum(StringRef Name, IntTy Value,
+ Stringifier toString, bool ShouldSkipZero) {
+ if (!Value)
+ return;
+
+ Out << FS << Name << ": ";
+ if (const char *S = toString(Value))
+ Out << S;
+ else
+ Out << Value;
+}
+
+static void writeGenericDINode(raw_ostream &Out, const GenericDINode *N,
+ TypePrinting *TypePrinter, SlotTracker *Machine,
+ const Module *Context) {
+ Out << "!GenericDINode(";
+ MDFieldPrinter Printer(Out, TypePrinter, Machine, Context);
+ Printer.printTag(N);
+ Printer.printString("header", N->getHeader());
+ if (N->getNumDwarfOperands()) {
+ Out << Printer.FS << "operands: {";
+ FieldSeparator IFS;
+ for (auto &I : N->dwarf_operands()) {
+ Out << IFS;
+ writeMetadataAsOperand(Out, I, TypePrinter, Machine, Context);
+ }
+ Out << "}";
+ }
+ Out << ")";
+}
+
+static void writeDILocation(raw_ostream &Out, const DILocation *DL,
+ TypePrinting *TypePrinter, SlotTracker *Machine,
+ const Module *Context) {
+ Out << "!DILocation(";
+ MDFieldPrinter Printer(Out, TypePrinter, Machine, Context);
+ // Always output the line, since 0 is a relevant and important value for it.
+ Printer.printInt("line", DL->getLine(), /* ShouldSkipZero */ false);
+ Printer.printInt("column", DL->getColumn());
+ Printer.printMetadata("scope", DL->getRawScope(), /* ShouldSkipNull */ false);
+ Printer.printMetadata("inlinedAt", DL->getRawInlinedAt());
+ Out << ")";
+}
+
+static void writeDISubrange(raw_ostream &Out, const DISubrange *N,
+ TypePrinting *, SlotTracker *, const Module *) {
+ Out << "!DISubrange(";
+ MDFieldPrinter Printer(Out);
+ Printer.printInt("count", N->getCount(), /* ShouldSkipZero */ false);
+ Printer.printInt("lowerBound", N->getLowerBound());
+ Out << ")";
+}
+
+static void writeDIEnumerator(raw_ostream &Out, const DIEnumerator *N,
+ TypePrinting *, SlotTracker *, const Module *) {
+ Out << "!DIEnumerator(";
+ MDFieldPrinter Printer(Out);
+ Printer.printString("name", N->getName(), /* ShouldSkipEmpty */ false);
+ Printer.printInt("value", N->getValue(), /* ShouldSkipZero */ false);
+ Out << ")";
+}
+
+static void writeDIBasicType(raw_ostream &Out, const DIBasicType *N,
+ TypePrinting *, SlotTracker *, const Module *) {
+ Out << "!DIBasicType(";
+ MDFieldPrinter Printer(Out);
+ if (N->getTag() != dwarf::DW_TAG_base_type)
+ Printer.printTag(N);
+ Printer.printString("name", N->getName());
+ Printer.printInt("size", N->getSizeInBits());
+ Printer.printInt("align", N->getAlignInBits());
+ Printer.printDwarfEnum("encoding", N->getEncoding(),
+ dwarf::AttributeEncodingString);
+ Out << ")";
+}
+
+static void writeDIDerivedType(raw_ostream &Out, const DIDerivedType *N,
+ TypePrinting *TypePrinter, SlotTracker *Machine,
+ const Module *Context) {
+ Out << "!DIDerivedType(";
+ MDFieldPrinter Printer(Out, TypePrinter, Machine, Context);
+ Printer.printTag(N);
+ Printer.printString("name", N->getName());
+ Printer.printMetadata("scope", N->getRawScope());
+ Printer.printMetadata("file", N->getRawFile());
+ Printer.printInt("line", N->getLine());
+ Printer.printMetadata("baseType", N->getRawBaseType(),
+ /* ShouldSkipNull */ false);
+ Printer.printInt("size", N->getSizeInBits());
+ Printer.printInt("align", N->getAlignInBits());
+ Printer.printInt("offset", N->getOffsetInBits());
+ Printer.printDIFlags("flags", N->getFlags());
+ Printer.printMetadata("extraData", N->getRawExtraData());
+ Out << ")";
+}
+
+static void writeDICompositeType(raw_ostream &Out, const DICompositeType *N,
+ TypePrinting *TypePrinter,
+ SlotTracker *Machine, const Module *Context) {
+ Out << "!DICompositeType(";
+ MDFieldPrinter Printer(Out, TypePrinter, Machine, Context);
+ Printer.printTag(N);
+ Printer.printString("name", N->getName());
+ Printer.printMetadata("scope", N->getRawScope());
+ Printer.printMetadata("file", N->getRawFile());
+ Printer.printInt("line", N->getLine());
+ Printer.printMetadata("baseType", N->getRawBaseType());
+ Printer.printInt("size", N->getSizeInBits());
+ Printer.printInt("align", N->getAlignInBits());
+ Printer.printInt("offset", N->getOffsetInBits());
+ Printer.printDIFlags("flags", N->getFlags());
+ Printer.printMetadata("elements", N->getRawElements());
+ Printer.printDwarfEnum("runtimeLang", N->getRuntimeLang(),
+ dwarf::LanguageString);
+ Printer.printMetadata("vtableHolder", N->getRawVTableHolder());
+ Printer.printMetadata("templateParams", N->getRawTemplateParams());
+ Printer.printString("identifier", N->getIdentifier());
+ Out << ")";
+}
+
+static void writeDISubroutineType(raw_ostream &Out, const DISubroutineType *N,
+ TypePrinting *TypePrinter,
+ SlotTracker *Machine, const Module *Context) {
+ Out << "!DISubroutineType(";
+ MDFieldPrinter Printer(Out, TypePrinter, Machine, Context);
+ Printer.printDIFlags("flags", N->getFlags());
+ Printer.printMetadata("types", N->getRawTypeArray(),
+ /* ShouldSkipNull */ false);
+ Out << ")";
+}
+
+static void writeDIFile(raw_ostream &Out, const DIFile *N, TypePrinting *,
+ SlotTracker *, const Module *) {
+ Out << "!DIFile(";
+ MDFieldPrinter Printer(Out);
+ Printer.printString("filename", N->getFilename(),
+ /* ShouldSkipEmpty */ false);
+ Printer.printString("directory", N->getDirectory(),
+ /* ShouldSkipEmpty */ false);
+ Out << ")";
+}
+
+static void writeDICompileUnit(raw_ostream &Out, const DICompileUnit *N,
+ TypePrinting *TypePrinter, SlotTracker *Machine,
+ const Module *Context) {
+ Out << "!DICompileUnit(";
+ MDFieldPrinter Printer(Out, TypePrinter, Machine, Context);
+ Printer.printDwarfEnum("language", N->getSourceLanguage(),
+ dwarf::LanguageString, /* ShouldSkipZero */ false);
+ Printer.printMetadata("file", N->getRawFile(), /* ShouldSkipNull */ false);
+ Printer.printString("producer", N->getProducer());
+ Printer.printBool("isOptimized", N->isOptimized());
+ Printer.printString("flags", N->getFlags());
+ Printer.printInt("runtimeVersion", N->getRuntimeVersion(),
+ /* ShouldSkipZero */ false);
+ Printer.printString("splitDebugFilename", N->getSplitDebugFilename());
+ Printer.printInt("emissionKind", N->getEmissionKind(),
+ /* ShouldSkipZero */ false);
+ Printer.printMetadata("enums", N->getRawEnumTypes());
+ Printer.printMetadata("retainedTypes", N->getRawRetainedTypes());
+ Printer.printMetadata("subprograms", N->getRawSubprograms());
+ Printer.printMetadata("globals", N->getRawGlobalVariables());
+ Printer.printMetadata("imports", N->getRawImportedEntities());
+ Printer.printMetadata("macros", N->getRawMacros());
+ Printer.printInt("dwoId", N->getDWOId());
+ Out << ")";
+}
+
+static void writeDISubprogram(raw_ostream &Out, const DISubprogram *N,
+ TypePrinting *TypePrinter, SlotTracker *Machine,
+ const Module *Context) {
+ Out << "!DISubprogram(";
+ MDFieldPrinter Printer(Out, TypePrinter, Machine, Context);
+ Printer.printString("name", N->getName());
+ Printer.printString("linkageName", N->getLinkageName());
+ Printer.printMetadata("scope", N->getRawScope(), /* ShouldSkipNull */ false);
+ Printer.printMetadata("file", N->getRawFile());
+ Printer.printInt("line", N->getLine());
+ Printer.printMetadata("type", N->getRawType());
+ Printer.printBool("isLocal", N->isLocalToUnit());
+ Printer.printBool("isDefinition", N->isDefinition());
+ Printer.printInt("scopeLine", N->getScopeLine());
+ Printer.printMetadata("containingType", N->getRawContainingType());
+ Printer.printDwarfEnum("virtuality", N->getVirtuality(),
+ dwarf::VirtualityString);
+ Printer.printInt("virtualIndex", N->getVirtualIndex());
+ Printer.printDIFlags("flags", N->getFlags());
+ Printer.printBool("isOptimized", N->isOptimized());
+ Printer.printMetadata("templateParams", N->getRawTemplateParams());
+ Printer.printMetadata("declaration", N->getRawDeclaration());
+ Printer.printMetadata("variables", N->getRawVariables());
+ Out << ")";
+}
+
+static void writeDILexicalBlock(raw_ostream &Out, const DILexicalBlock *N,
+ TypePrinting *TypePrinter, SlotTracker *Machine,
+ const Module *Context) {
+ Out << "!DILexicalBlock(";
+ MDFieldPrinter Printer(Out, TypePrinter, Machine, Context);
+ Printer.printMetadata("scope", N->getRawScope(), /* ShouldSkipNull */ false);
+ Printer.printMetadata("file", N->getRawFile());
+ Printer.printInt("line", N->getLine());
+ Printer.printInt("column", N->getColumn());
+ Out << ")";
+}
+
+static void writeDILexicalBlockFile(raw_ostream &Out,
+ const DILexicalBlockFile *N,
+ TypePrinting *TypePrinter,
+ SlotTracker *Machine,
+ const Module *Context) {
+ Out << "!DILexicalBlockFile(";
+ MDFieldPrinter Printer(Out, TypePrinter, Machine, Context);
+ Printer.printMetadata("scope", N->getRawScope(), /* ShouldSkipNull */ false);
+ Printer.printMetadata("file", N->getRawFile());
+ Printer.printInt("discriminator", N->getDiscriminator(),
+ /* ShouldSkipZero */ false);
+ Out << ")";
+}
+
+static void writeDINamespace(raw_ostream &Out, const DINamespace *N,
+ TypePrinting *TypePrinter, SlotTracker *Machine,
+ const Module *Context) {
+ Out << "!DINamespace(";
+ MDFieldPrinter Printer(Out, TypePrinter, Machine, Context);
+ Printer.printString("name", N->getName());
+ Printer.printMetadata("scope", N->getRawScope(), /* ShouldSkipNull */ false);
+ Printer.printMetadata("file", N->getRawFile());
+ Printer.printInt("line", N->getLine());
+ Out << ")";
+}
+
+static void writeDIMacro(raw_ostream &Out, const DIMacro *N,
+ TypePrinting *TypePrinter, SlotTracker *Machine,
+ const Module *Context) {
+ Out << "!DIMacro(";
+ MDFieldPrinter Printer(Out, TypePrinter, Machine, Context);
+ Printer.printMacinfoType(N);
+ Printer.printInt("line", N->getLine());
+ Printer.printString("name", N->getName());
+ Printer.printString("value", N->getValue());
+ Out << ")";
+}
+
+static void writeDIMacroFile(raw_ostream &Out, const DIMacroFile *N,
+ TypePrinting *TypePrinter, SlotTracker *Machine,
+ const Module *Context) {
+ Out << "!DIMacroFile(";
+ MDFieldPrinter Printer(Out, TypePrinter, Machine, Context);
+ Printer.printInt("line", N->getLine());
+ Printer.printMetadata("file", N->getRawFile(), /* ShouldSkipNull */ false);
+ Printer.printMetadata("nodes", N->getRawElements());
+ Out << ")";
+}
+
+static void writeDIModule(raw_ostream &Out, const DIModule *N,
+ TypePrinting *TypePrinter, SlotTracker *Machine,
+ const Module *Context) {
+ Out << "!DIModule(";
+ MDFieldPrinter Printer(Out, TypePrinter, Machine, Context);
+ Printer.printMetadata("scope", N->getRawScope(), /* ShouldSkipNull */ false);
+ Printer.printString("name", N->getName());
+ Printer.printString("configMacros", N->getConfigurationMacros());
+ Printer.printString("includePath", N->getIncludePath());
+ Printer.printString("isysroot", N->getISysRoot());
+ Out << ")";
+}
+
+
+static void writeDITemplateTypeParameter(raw_ostream &Out,
+ const DITemplateTypeParameter *N,
+ TypePrinting *TypePrinter,
+ SlotTracker *Machine,
+ const Module *Context) {
+ Out << "!DITemplateTypeParameter(";
+ MDFieldPrinter Printer(Out, TypePrinter, Machine, Context);
+ Printer.printString("name", N->getName());
+ Printer.printMetadata("type", N->getRawType(), /* ShouldSkipNull */ false);
+ Out << ")";
+}
+
+static void writeDITemplateValueParameter(raw_ostream &Out,
+ const DITemplateValueParameter *N,
+ TypePrinting *TypePrinter,
+ SlotTracker *Machine,
+ const Module *Context) {
+ Out << "!DITemplateValueParameter(";
+ MDFieldPrinter Printer(Out, TypePrinter, Machine, Context);
+ if (N->getTag() != dwarf::DW_TAG_template_value_parameter)
+ Printer.printTag(N);
+ Printer.printString("name", N->getName());
+ Printer.printMetadata("type", N->getRawType());
+ Printer.printMetadata("value", N->getValue(), /* ShouldSkipNull */ false);
+ Out << ")";
+}
+
+static void writeDIGlobalVariable(raw_ostream &Out, const DIGlobalVariable *N,
+ TypePrinting *TypePrinter,
+ SlotTracker *Machine, const Module *Context) {
+ Out << "!DIGlobalVariable(";
+ MDFieldPrinter Printer(Out, TypePrinter, Machine, Context);
+ Printer.printString("name", N->getName());
+ Printer.printString("linkageName", N->getLinkageName());
+ Printer.printMetadata("scope", N->getRawScope(), /* ShouldSkipNull */ false);
+ Printer.printMetadata("file", N->getRawFile());
+ Printer.printInt("line", N->getLine());
+ Printer.printMetadata("type", N->getRawType());
+ Printer.printBool("isLocal", N->isLocalToUnit());
+ Printer.printBool("isDefinition", N->isDefinition());
+ Printer.printMetadata("variable", N->getRawVariable());
+ Printer.printMetadata("declaration", N->getRawStaticDataMemberDeclaration());
+ Out << ")";
+}
+
+static void writeDILocalVariable(raw_ostream &Out, const DILocalVariable *N,
+ TypePrinting *TypePrinter,
+ SlotTracker *Machine, const Module *Context) {
+ Out << "!DILocalVariable(";
+ MDFieldPrinter Printer(Out, TypePrinter, Machine, Context);
+ Printer.printString("name", N->getName());
+ Printer.printInt("arg", N->getArg());
+ Printer.printMetadata("scope", N->getRawScope(), /* ShouldSkipNull */ false);
+ Printer.printMetadata("file", N->getRawFile());
+ Printer.printInt("line", N->getLine());
+ Printer.printMetadata("type", N->getRawType());
+ Printer.printDIFlags("flags", N->getFlags());
+ Out << ")";
+}
+
+static void writeDIExpression(raw_ostream &Out, const DIExpression *N,
+ TypePrinting *TypePrinter, SlotTracker *Machine,
+ const Module *Context) {
+ Out << "!DIExpression(";
+ FieldSeparator FS;
+ if (N->isValid()) {
+ for (auto I = N->expr_op_begin(), E = N->expr_op_end(); I != E; ++I) {
+ const char *OpStr = dwarf::OperationEncodingString(I->getOp());
+ assert(OpStr && "Expected valid opcode");
+
+ Out << FS << OpStr;
+ for (unsigned A = 0, AE = I->getNumArgs(); A != AE; ++A)
+ Out << FS << I->getArg(A);
+ }
+ } else {
+ for (const auto &I : N->getElements())
+ Out << FS << I;
+ }
+ Out << ")";
+}
+
+static void writeDIObjCProperty(raw_ostream &Out, const DIObjCProperty *N,
+ TypePrinting *TypePrinter, SlotTracker *Machine,
+ const Module *Context) {
+ Out << "!DIObjCProperty(";
+ MDFieldPrinter Printer(Out, TypePrinter, Machine, Context);
+ Printer.printString("name", N->getName());
+ Printer.printMetadata("file", N->getRawFile());
+ Printer.printInt("line", N->getLine());
+ Printer.printString("setter", N->getSetterName());
+ Printer.printString("getter", N->getGetterName());
+ Printer.printInt("attributes", N->getAttributes());
+ Printer.printMetadata("type", N->getRawType());
+ Out << ")";
+}
+
+static void writeDIImportedEntity(raw_ostream &Out, const DIImportedEntity *N,
+ TypePrinting *TypePrinter,
+ SlotTracker *Machine, const Module *Context) {
+ Out << "!DIImportedEntity(";
+ MDFieldPrinter Printer(Out, TypePrinter, Machine, Context);
+ Printer.printTag(N);
+ Printer.printString("name", N->getName());
+ Printer.printMetadata("scope", N->getRawScope(), /* ShouldSkipNull */ false);
+ Printer.printMetadata("entity", N->getRawEntity());
+ Printer.printInt("line", N->getLine());
+ Out << ")";
+}
+
+
+static void WriteMDNodeBodyInternal(raw_ostream &Out, const MDNode *Node,
+ TypePrinting *TypePrinter,
+ SlotTracker *Machine,
+ const Module *Context) {
+ if (Node->isDistinct())
+ Out << "distinct ";
+ else if (Node->isTemporary())
+ Out << "<temporary!> "; // Handle broken code.
+
+ switch (Node->getMetadataID()) {
+ default:
+ llvm_unreachable("Expected uniquable MDNode");
+#define HANDLE_MDNODE_LEAF(CLASS) \
+ case Metadata::CLASS##Kind: \
+ write##CLASS(Out, cast<CLASS>(Node), TypePrinter, Machine, Context); \
+ break;
+#include "llvm/IR/Metadata.def"
+ }
+}
+
+// Full implementation of printing a Value as an operand with support for
+// TypePrinting, etc.
+static void WriteAsOperandInternal(raw_ostream &Out, const Value *V,
+ TypePrinting *TypePrinter,
+ SlotTracker *Machine,
+ const Module *Context) {
+ if (V->hasName()) {
+ PrintLLVMName(Out, V);
+ return;
+ }
+
+ const Constant *CV = dyn_cast<Constant>(V);
+ if (CV && !isa<GlobalValue>(CV)) {
+ assert(TypePrinter && "Constants require TypePrinting!");
+ WriteConstantInternal(Out, CV, *TypePrinter, Machine, Context);
+ return;
+ }
+
+ if (const InlineAsm *IA = dyn_cast<InlineAsm>(V)) {
+ Out << "asm ";
+ if (IA->hasSideEffects())
+ Out << "sideeffect ";
+ if (IA->isAlignStack())
+ Out << "alignstack ";
+ // We don't emit the AD_ATT dialect as it's the assumed default.
+ if (IA->getDialect() == InlineAsm::AD_Intel)
+ Out << "inteldialect ";
+ Out << '"';
+ PrintEscapedString(IA->getAsmString(), Out);
+ Out << "\", \"";
+ PrintEscapedString(IA->getConstraintString(), Out);
+ Out << '"';
+ return;
+ }
+
+ if (auto *MD = dyn_cast<MetadataAsValue>(V)) {
+ WriteAsOperandInternal(Out, MD->getMetadata(), TypePrinter, Machine,
+ Context, /* FromValue */ true);
+ return;
+ }
+
+ char Prefix = '%';
+ int Slot;
+ // If we have a SlotTracker, use it.
+ if (Machine) {
+ if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
+ Slot = Machine->getGlobalSlot(GV);
+ Prefix = '@';
+ } else {
+ Slot = Machine->getLocalSlot(V);
+
+ // If the local value didn't succeed, then we may be referring to a value
+ // from a different function. Translate it, as this can happen when using
+ // address of blocks.
+ if (Slot == -1)
+ if ((Machine = createSlotTracker(V))) {
+ Slot = Machine->getLocalSlot(V);
+ delete Machine;
+ }
+ }
+ } else if ((Machine = createSlotTracker(V))) {
+ // Otherwise, create one to get the # and then destroy it.
+ if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
+ Slot = Machine->getGlobalSlot(GV);
+ Prefix = '@';
+ } else {
+ Slot = Machine->getLocalSlot(V);
+ }
+ delete Machine;
+ Machine = nullptr;
+ } else {
+ Slot = -1;
+ }
+
+ if (Slot != -1)
+ Out << Prefix << Slot;
+ else
+ Out << "<badref>";
+}
+
+static void WriteAsOperandInternal(raw_ostream &Out, const Metadata *MD,
+ TypePrinting *TypePrinter,
+ SlotTracker *Machine, const Module *Context,
+ bool FromValue) {
+ if (const MDNode *N = dyn_cast<MDNode>(MD)) {
+ std::unique_ptr<SlotTracker> MachineStorage;
+ if (!Machine) {
+ MachineStorage = make_unique<SlotTracker>(Context);
+ Machine = MachineStorage.get();
+ }
+ int Slot = Machine->getMetadataSlot(N);
+ if (Slot == -1)
+ // Give the pointer value instead of "badref", since this comes up all
+ // the time when debugging.
+ Out << "<" << N << ">";
+ else
+ Out << '!' << Slot;
+ return;
+ }
+
+ if (const MDString *MDS = dyn_cast<MDString>(MD)) {
+ Out << "!\"";
+ PrintEscapedString(MDS->getString(), Out);
+ Out << '"';
+ return;
+ }
+
+ auto *V = cast<ValueAsMetadata>(MD);
+ assert(TypePrinter && "TypePrinter required for metadata values");
+ assert((FromValue || !isa<LocalAsMetadata>(V)) &&
+ "Unexpected function-local metadata outside of value argument");
+
+ TypePrinter->print(V->getValue()->getType(), Out);
+ Out << ' ';
+ WriteAsOperandInternal(Out, V->getValue(), TypePrinter, Machine, Context);
+}
+
+namespace {
+class AssemblyWriter {
+ formatted_raw_ostream &Out;
+ const Module *TheModule;
+ std::unique_ptr<SlotTracker> SlotTrackerStorage;
+ SlotTracker &Machine;
+ TypePrinting TypePrinter;
+ AssemblyAnnotationWriter *AnnotationWriter;
+ SetVector<const Comdat *> Comdats;
+ bool IsForDebug;
+ bool ShouldPreserveUseListOrder;
+ UseListOrderStack UseListOrders;
+ SmallVector<StringRef, 8> MDNames;
+
+public:
+ /// Construct an AssemblyWriter with an external SlotTracker
+ AssemblyWriter(formatted_raw_ostream &o, SlotTracker &Mac, const Module *M,
+ AssemblyAnnotationWriter *AAW, bool IsForDebug,
+ bool ShouldPreserveUseListOrder = false);
+
+ void printMDNodeBody(const MDNode *MD);
+ void printNamedMDNode(const NamedMDNode *NMD);
+
+ void printModule(const Module *M);
+
+ void writeOperand(const Value *Op, bool PrintType);
+ void writeParamOperand(const Value *Operand, AttributeSet Attrs,unsigned Idx);
+ void writeOperandBundles(ImmutableCallSite CS);
+ void writeAtomic(AtomicOrdering Ordering, SynchronizationScope SynchScope);
+ void writeAtomicCmpXchg(AtomicOrdering SuccessOrdering,
+ AtomicOrdering FailureOrdering,
+ SynchronizationScope SynchScope);
+
+ void writeAllMDNodes();
+ void writeMDNode(unsigned Slot, const MDNode *Node);
+ void writeAllAttributeGroups();
+
+ void printTypeIdentities();
+ void printGlobal(const GlobalVariable *GV);
+ void printAlias(const GlobalAlias *GV);
+ void printComdat(const Comdat *C);
+ void printFunction(const Function *F);
+ void printArgument(const Argument *FA, AttributeSet Attrs, unsigned Idx);
+ void printBasicBlock(const BasicBlock *BB);
+ void printInstructionLine(const Instruction &I);
+ void printInstruction(const Instruction &I);
+
+ void printUseListOrder(const UseListOrder &Order);
+ void printUseLists(const Function *F);
+
+private:
+ /// \brief Print out metadata attachments.
+ void printMetadataAttachments(
+ const SmallVectorImpl<std::pair<unsigned, MDNode *>> &MDs,
+ StringRef Separator);
+
+ // printInfoComment - Print a little comment after the instruction indicating
+ // which slot it occupies.
+ void printInfoComment(const Value &V);
+
+ // printGCRelocateComment - print comment after call to the gc.relocate
+ // intrinsic indicating base and derived pointer names.
+ void printGCRelocateComment(const GCRelocateInst &Relocate);
+};
+} // namespace
+
+AssemblyWriter::AssemblyWriter(formatted_raw_ostream &o, SlotTracker &Mac,
+ const Module *M, AssemblyAnnotationWriter *AAW,
+ bool IsForDebug, bool ShouldPreserveUseListOrder)
+ : Out(o), TheModule(M), Machine(Mac), AnnotationWriter(AAW),
+ IsForDebug(IsForDebug),
+ ShouldPreserveUseListOrder(ShouldPreserveUseListOrder) {
+ if (!TheModule)
+ return;
+ TypePrinter.incorporateTypes(*TheModule);
+ for (const Function &F : *TheModule)
+ if (const Comdat *C = F.getComdat())
+ Comdats.insert(C);
+ for (const GlobalVariable &GV : TheModule->globals())
+ if (const Comdat *C = GV.getComdat())
+ Comdats.insert(C);
+}
+
+void AssemblyWriter::writeOperand(const Value *Operand, bool PrintType) {
+ if (!Operand) {
+ Out << "<null operand!>";
+ return;
+ }
+ if (PrintType) {
+ TypePrinter.print(Operand->getType(), Out);
+ Out << ' ';
+ }
+ WriteAsOperandInternal(Out, Operand, &TypePrinter, &Machine, TheModule);
+}
+
+void AssemblyWriter::writeAtomic(AtomicOrdering Ordering,
+ SynchronizationScope SynchScope) {
+ if (Ordering == NotAtomic)
+ return;
+
+ switch (SynchScope) {
+ case SingleThread: Out << " singlethread"; break;
+ case CrossThread: break;
+ }
+
+ switch (Ordering) {
+ default: Out << " <bad ordering " << int(Ordering) << ">"; break;
+ case Unordered: Out << " unordered"; break;
+ case Monotonic: Out << " monotonic"; break;
+ case Acquire: Out << " acquire"; break;
+ case Release: Out << " release"; break;
+ case AcquireRelease: Out << " acq_rel"; break;
+ case SequentiallyConsistent: Out << " seq_cst"; break;
+ }
+}
+
+void AssemblyWriter::writeAtomicCmpXchg(AtomicOrdering SuccessOrdering,
+ AtomicOrdering FailureOrdering,
+ SynchronizationScope SynchScope) {
+ assert(SuccessOrdering != NotAtomic && FailureOrdering != NotAtomic);
+
+ switch (SynchScope) {
+ case SingleThread: Out << " singlethread"; break;
+ case CrossThread: break;
+ }
+
+ switch (SuccessOrdering) {
+ default: Out << " <bad ordering " << int(SuccessOrdering) << ">"; break;
+ case Unordered: Out << " unordered"; break;
+ case Monotonic: Out << " monotonic"; break;
+ case Acquire: Out << " acquire"; break;
+ case Release: Out << " release"; break;
+ case AcquireRelease: Out << " acq_rel"; break;
+ case SequentiallyConsistent: Out << " seq_cst"; break;
+ }
+
+ switch (FailureOrdering) {
+ default: Out << " <bad ordering " << int(FailureOrdering) << ">"; break;
+ case Unordered: Out << " unordered"; break;
+ case Monotonic: Out << " monotonic"; break;
+ case Acquire: Out << " acquire"; break;
+ case Release: Out << " release"; break;
+ case AcquireRelease: Out << " acq_rel"; break;
+ case SequentiallyConsistent: Out << " seq_cst"; break;
+ }
+}
+
+void AssemblyWriter::writeParamOperand(const Value *Operand,
+ AttributeSet Attrs, unsigned Idx) {
+ if (!Operand) {
+ Out << "<null operand!>";
+ return;
+ }
+
+ // Print the type
+ TypePrinter.print(Operand->getType(), Out);
+ // Print parameter attributes list
+ if (Attrs.hasAttributes(Idx))
+ Out << ' ' << Attrs.getAsString(Idx);
+ Out << ' ';
+ // Print the operand
+ WriteAsOperandInternal(Out, Operand, &TypePrinter, &Machine, TheModule);
+}
+
+void AssemblyWriter::writeOperandBundles(ImmutableCallSite CS) {
+ if (!CS.hasOperandBundles())
+ return;
+
+ Out << " [ ";
+
+ bool FirstBundle = true;
+ for (unsigned i = 0, e = CS.getNumOperandBundles(); i != e; ++i) {
+ OperandBundleUse BU = CS.getOperandBundleAt(i);
+
+ if (!FirstBundle)
+ Out << ", ";
+ FirstBundle = false;
+
+ Out << '"';
+ PrintEscapedString(BU.getTagName(), Out);
+ Out << '"';
+
+ Out << '(';
+
+ bool FirstInput = true;
+ for (const auto &Input : BU.Inputs) {
+ if (!FirstInput)
+ Out << ", ";
+ FirstInput = false;
+
+ TypePrinter.print(Input->getType(), Out);
+ Out << " ";
+ WriteAsOperandInternal(Out, Input, &TypePrinter, &Machine, TheModule);
+ }
+
+ Out << ')';
+ }
+
+ Out << " ]";
+}
+
+void AssemblyWriter::printModule(const Module *M) {
+ Machine.initialize();
+
+ if (ShouldPreserveUseListOrder)
+ UseListOrders = predictUseListOrder(M);
+
+ if (!M->getModuleIdentifier().empty() &&
+ // Don't print the ID if it will start a new line (which would
+ // require a comment char before it).
+ M->getModuleIdentifier().find('\n') == std::string::npos)
+ Out << "; ModuleID = '" << M->getModuleIdentifier() << "'\n";
+
+ const std::string &DL = M->getDataLayoutStr();
+ if (!DL.empty())
+ Out << "target datalayout = \"" << DL << "\"\n";
+ if (!M->getTargetTriple().empty())
+ Out << "target triple = \"" << M->getTargetTriple() << "\"\n";
+
+ if (!M->getModuleInlineAsm().empty()) {
+ Out << '\n';
+
+ // Split the string into lines, to make it easier to read the .ll file.
+ StringRef Asm = M->getModuleInlineAsm();
+ do {
+ StringRef Front;
+ std::tie(Front, Asm) = Asm.split('\n');
+
+ // We found a newline, print the portion of the asm string from the
+ // last newline up to this newline.
+ Out << "module asm \"";
+ PrintEscapedString(Front, Out);
+ Out << "\"\n";
+ } while (!Asm.empty());
+ }
+
+ printTypeIdentities();
+
+ // Output all comdats.
+ if (!Comdats.empty())
+ Out << '\n';
+ for (const Comdat *C : Comdats) {
+ printComdat(C);
+ if (C != Comdats.back())
+ Out << '\n';
+ }
+
+ // Output all globals.
+ if (!M->global_empty()) Out << '\n';
+ for (const GlobalVariable &GV : M->globals()) {
+ printGlobal(&GV); Out << '\n';
+ }
+
+ // Output all aliases.
+ if (!M->alias_empty()) Out << "\n";
+ for (const GlobalAlias &GA : M->aliases())
+ printAlias(&GA);
+
+ // Output global use-lists.
+ printUseLists(nullptr);
+
+ // Output all of the functions.
+ for (const Function &F : *M)
+ printFunction(&F);
+ assert(UseListOrders.empty() && "All use-lists should have been consumed");
+
+ // Output all attribute groups.
+ if (!Machine.as_empty()) {
+ Out << '\n';
+ writeAllAttributeGroups();
+ }
+
+ // Output named metadata.
+ if (!M->named_metadata_empty()) Out << '\n';
+
+ for (const NamedMDNode &Node : M->named_metadata())
+ printNamedMDNode(&Node);
+
+ // Output metadata.
+ if (!Machine.mdn_empty()) {
+ Out << '\n';
+ writeAllMDNodes();
+ }
+}
+
+static void printMetadataIdentifier(StringRef Name,
+ formatted_raw_ostream &Out) {
+ if (Name.empty()) {
+ Out << "<empty name> ";
+ } else {
+ if (isalpha(static_cast<unsigned char>(Name[0])) || Name[0] == '-' ||
+ Name[0] == '$' || Name[0] == '.' || Name[0] == '_')
+ Out << Name[0];
+ else
+ Out << '\\' << hexdigit(Name[0] >> 4) << hexdigit(Name[0] & 0x0F);
+ for (unsigned i = 1, e = Name.size(); i != e; ++i) {
+ unsigned char C = Name[i];
+ if (isalnum(static_cast<unsigned char>(C)) || C == '-' || C == '$' ||
+ C == '.' || C == '_')
+ Out << C;
+ else
+ Out << '\\' << hexdigit(C >> 4) << hexdigit(C & 0x0F);
+ }
+ }
+}
+
+void AssemblyWriter::printNamedMDNode(const NamedMDNode *NMD) {
+ Out << '!';
+ printMetadataIdentifier(NMD->getName(), Out);
+ Out << " = !{";
+ for (unsigned i = 0, e = NMD->getNumOperands(); i != e; ++i) {
+ if (i)
+ Out << ", ";
+ int Slot = Machine.getMetadataSlot(NMD->getOperand(i));
+ if (Slot == -1)
+ Out << "<badref>";
+ else
+ Out << '!' << Slot;
+ }
+ Out << "}\n";
+}
+
+static void PrintLinkage(GlobalValue::LinkageTypes LT,
+ formatted_raw_ostream &Out) {
+ switch (LT) {
+ case GlobalValue::ExternalLinkage: break;
+ case GlobalValue::PrivateLinkage: Out << "private "; break;
+ case GlobalValue::InternalLinkage: Out << "internal "; break;
+ case GlobalValue::LinkOnceAnyLinkage: Out << "linkonce "; break;
+ case GlobalValue::LinkOnceODRLinkage: Out << "linkonce_odr "; break;
+ case GlobalValue::WeakAnyLinkage: Out << "weak "; break;
+ case GlobalValue::WeakODRLinkage: Out << "weak_odr "; break;
+ case GlobalValue::CommonLinkage: Out << "common "; break;
+ case GlobalValue::AppendingLinkage: Out << "appending "; break;
+ case GlobalValue::ExternalWeakLinkage: Out << "extern_weak "; break;
+ case GlobalValue::AvailableExternallyLinkage:
+ Out << "available_externally ";
+ break;
+ }
+}
+
+static void PrintVisibility(GlobalValue::VisibilityTypes Vis,
+ formatted_raw_ostream &Out) {
+ switch (Vis) {
+ case GlobalValue::DefaultVisibility: break;
+ case GlobalValue::HiddenVisibility: Out << "hidden "; break;
+ case GlobalValue::ProtectedVisibility: Out << "protected "; break;
+ }
+}
+
+static void PrintDLLStorageClass(GlobalValue::DLLStorageClassTypes SCT,
+ formatted_raw_ostream &Out) {
+ switch (SCT) {
+ case GlobalValue::DefaultStorageClass: break;
+ case GlobalValue::DLLImportStorageClass: Out << "dllimport "; break;
+ case GlobalValue::DLLExportStorageClass: Out << "dllexport "; break;
+ }
+}
+
+static void PrintThreadLocalModel(GlobalVariable::ThreadLocalMode TLM,
+ formatted_raw_ostream &Out) {
+ switch (TLM) {
+ case GlobalVariable::NotThreadLocal:
+ break;
+ case GlobalVariable::GeneralDynamicTLSModel:
+ Out << "thread_local ";
+ break;
+ case GlobalVariable::LocalDynamicTLSModel:
+ Out << "thread_local(localdynamic) ";
+ break;
+ case GlobalVariable::InitialExecTLSModel:
+ Out << "thread_local(initialexec) ";
+ break;
+ case GlobalVariable::LocalExecTLSModel:
+ Out << "thread_local(localexec) ";
+ break;
+ }
+}
+
+static void maybePrintComdat(formatted_raw_ostream &Out,
+ const GlobalObject &GO) {
+ const Comdat *C = GO.getComdat();
+ if (!C)
+ return;
+
+ if (isa<GlobalVariable>(GO))
+ Out << ',';
+ Out << " comdat";
+
+ if (GO.getName() == C->getName())
+ return;
+
+ Out << '(';
+ PrintLLVMName(Out, C->getName(), ComdatPrefix);
+ Out << ')';
+}
+
+void AssemblyWriter::printGlobal(const GlobalVariable *GV) {
+ if (GV->isMaterializable())
+ Out << "; Materializable\n";
+
+ WriteAsOperandInternal(Out, GV, &TypePrinter, &Machine, GV->getParent());
+ Out << " = ";
+
+ if (!GV->hasInitializer() && GV->hasExternalLinkage())
+ Out << "external ";
+
+ PrintLinkage(GV->getLinkage(), Out);
+ PrintVisibility(GV->getVisibility(), Out);
+ PrintDLLStorageClass(GV->getDLLStorageClass(), Out);
+ PrintThreadLocalModel(GV->getThreadLocalMode(), Out);
+ if (GV->hasUnnamedAddr())
+ Out << "unnamed_addr ";
+
+ if (unsigned AddressSpace = GV->getType()->getAddressSpace())
+ Out << "addrspace(" << AddressSpace << ") ";
+ if (GV->isExternallyInitialized()) Out << "externally_initialized ";
+ Out << (GV->isConstant() ? "constant " : "global ");
+ TypePrinter.print(GV->getType()->getElementType(), Out);
+
+ if (GV->hasInitializer()) {
+ Out << ' ';
+ writeOperand(GV->getInitializer(), false);
+ }
+
+ if (GV->hasSection()) {
+ Out << ", section \"";
+ PrintEscapedString(GV->getSection(), Out);
+ Out << '"';
+ }
+ maybePrintComdat(Out, *GV);
+ if (GV->getAlignment())
+ Out << ", align " << GV->getAlignment();
+
+ printInfoComment(*GV);
+}
+
+void AssemblyWriter::printAlias(const GlobalAlias *GA) {
+ if (GA->isMaterializable())
+ Out << "; Materializable\n";
+
+ WriteAsOperandInternal(Out, GA, &TypePrinter, &Machine, GA->getParent());
+ Out << " = ";
+
+ PrintLinkage(GA->getLinkage(), Out);
+ PrintVisibility(GA->getVisibility(), Out);
+ PrintDLLStorageClass(GA->getDLLStorageClass(), Out);
+ PrintThreadLocalModel(GA->getThreadLocalMode(), Out);
+ if (GA->hasUnnamedAddr())
+ Out << "unnamed_addr ";
+
+ Out << "alias ";
+
+ TypePrinter.print(GA->getValueType(), Out);
+
+ Out << ", ";
+
+ const Constant *Aliasee = GA->getAliasee();
+
+ if (!Aliasee) {
+ TypePrinter.print(GA->getType(), Out);
+ Out << " <<NULL ALIASEE>>";
+ } else {
+ writeOperand(Aliasee, !isa<ConstantExpr>(Aliasee));
+ }
+
+ printInfoComment(*GA);
+ Out << '\n';
+}
+
+void AssemblyWriter::printComdat(const Comdat *C) {
+ C->print(Out);
+}
+
+void AssemblyWriter::printTypeIdentities() {
+ if (TypePrinter.NumberedTypes.empty() &&
+ TypePrinter.NamedTypes.empty())
+ return;
+
+ Out << '\n';
+
+ // We know all the numbers that each type is used and we know that it is a
+ // dense assignment. Convert the map to an index table.
+ std::vector<StructType*> NumberedTypes(TypePrinter.NumberedTypes.size());
+ for (DenseMap<StructType*, unsigned>::iterator I =
+ TypePrinter.NumberedTypes.begin(), E = TypePrinter.NumberedTypes.end();
+ I != E; ++I) {
+ assert(I->second < NumberedTypes.size() && "Didn't get a dense numbering?");
+ NumberedTypes[I->second] = I->first;
+ }
+
+ // Emit all numbered types.
+ for (unsigned i = 0, e = NumberedTypes.size(); i != e; ++i) {
+ Out << '%' << i << " = type ";
+
+ // Make sure we print out at least one level of the type structure, so
+ // that we do not get %2 = type %2
+ TypePrinter.printStructBody(NumberedTypes[i], Out);
+ Out << '\n';
+ }
+
+ for (unsigned i = 0, e = TypePrinter.NamedTypes.size(); i != e; ++i) {
+ PrintLLVMName(Out, TypePrinter.NamedTypes[i]->getName(), LocalPrefix);
+ Out << " = type ";
+
+ // Make sure we print out at least one level of the type structure, so
+ // that we do not get %FILE = type %FILE
+ TypePrinter.printStructBody(TypePrinter.NamedTypes[i], Out);
+ Out << '\n';
+ }
+}
+
+/// printFunction - Print all aspects of a function.
+///
+void AssemblyWriter::printFunction(const Function *F) {
+ // Print out the return type and name.
+ Out << '\n';
+
+ if (AnnotationWriter) AnnotationWriter->emitFunctionAnnot(F, Out);
+
+ if (F->isMaterializable())
+ Out << "; Materializable\n";
+
+ const AttributeSet &Attrs = F->getAttributes();
+ if (Attrs.hasAttributes(AttributeSet::FunctionIndex)) {
+ AttributeSet AS = Attrs.getFnAttributes();
+ std::string AttrStr;
+
+ unsigned Idx = 0;
+ for (unsigned E = AS.getNumSlots(); Idx != E; ++Idx)
+ if (AS.getSlotIndex(Idx) == AttributeSet::FunctionIndex)
+ break;
+
+ for (AttributeSet::iterator I = AS.begin(Idx), E = AS.end(Idx);
+ I != E; ++I) {
+ Attribute Attr = *I;
+ if (!Attr.isStringAttribute()) {
+ if (!AttrStr.empty()) AttrStr += ' ';
+ AttrStr += Attr.getAsString();
+ }
+ }
+
+ if (!AttrStr.empty())
+ Out << "; Function Attrs: " << AttrStr << '\n';
+ }
+
+ if (F->isDeclaration())
+ Out << "declare ";
+ else
+ Out << "define ";
+
+ PrintLinkage(F->getLinkage(), Out);
+ PrintVisibility(F->getVisibility(), Out);
+ PrintDLLStorageClass(F->getDLLStorageClass(), Out);
+
+ // Print the calling convention.
+ if (F->getCallingConv() != CallingConv::C) {
+ PrintCallingConv(F->getCallingConv(), Out);
+ Out << " ";
+ }
+
+ FunctionType *FT = F->getFunctionType();
+ if (Attrs.hasAttributes(AttributeSet::ReturnIndex))
+ Out << Attrs.getAsString(AttributeSet::ReturnIndex) << ' ';
+ TypePrinter.print(F->getReturnType(), Out);
+ Out << ' ';
+ WriteAsOperandInternal(Out, F, &TypePrinter, &Machine, F->getParent());
+ Out << '(';
+ Machine.incorporateFunction(F);
+
+ // Loop over the arguments, printing them...
+ if (F->isDeclaration() && !IsForDebug) {
+ // We're only interested in the type here - don't print argument names.
+ for (unsigned I = 0, E = FT->getNumParams(); I != E; ++I) {
+ // Insert commas as we go... the first arg doesn't get a comma
+ if (I)
+ Out << ", ";
+ // Output type...
+ TypePrinter.print(FT->getParamType(I), Out);
+
+ if (Attrs.hasAttributes(I + 1))
+ Out << ' ' << Attrs.getAsString(I + 1);
+ }
+ } else {
+ // The arguments are meaningful here, print them in detail.
+ unsigned Idx = 1;
+ for (const Argument &Arg : F->args()) {
+ // Insert commas as we go... the first arg doesn't get a comma
+ if (Idx != 1)
+ Out << ", ";
+ printArgument(&Arg, Attrs, Idx++);
+ }
+ }
+
+ // Finish printing arguments...
+ if (FT->isVarArg()) {
+ if (FT->getNumParams()) Out << ", ";
+ Out << "..."; // Output varargs portion of signature!
+ }
+ Out << ')';
+ if (F->hasUnnamedAddr())
+ Out << " unnamed_addr";
+ if (Attrs.hasAttributes(AttributeSet::FunctionIndex))
+ Out << " #" << Machine.getAttributeGroupSlot(Attrs.getFnAttributes());
+ if (F->hasSection()) {
+ Out << " section \"";
+ PrintEscapedString(F->getSection(), Out);
+ Out << '"';
+ }
+ maybePrintComdat(Out, *F);
+ if (F->getAlignment())
+ Out << " align " << F->getAlignment();
+ if (F->hasGC())
+ Out << " gc \"" << F->getGC() << '"';
+ if (F->hasPrefixData()) {
+ Out << " prefix ";
+ writeOperand(F->getPrefixData(), true);
+ }
+ if (F->hasPrologueData()) {
+ Out << " prologue ";
+ writeOperand(F->getPrologueData(), true);
+ }
+ if (F->hasPersonalityFn()) {
+ Out << " personality ";
+ writeOperand(F->getPersonalityFn(), /*PrintType=*/true);
+ }
+
+ SmallVector<std::pair<unsigned, MDNode *>, 4> MDs;
+ F->getAllMetadata(MDs);
+ printMetadataAttachments(MDs, " ");
+
+ if (F->isDeclaration()) {
+ Out << '\n';
+ } else {
+ Out << " {";
+ // Output all of the function's basic blocks.
+ for (Function::const_iterator I = F->begin(), E = F->end(); I != E; ++I)
+ printBasicBlock(&*I);
+
+ // Output the function's use-lists.
+ printUseLists(F);
+
+ Out << "}\n";
+ }
+
+ Machine.purgeFunction();
+}
+
+/// printArgument - This member is called for every argument that is passed into
+/// the function. Simply print it out
+///
+void AssemblyWriter::printArgument(const Argument *Arg,
+ AttributeSet Attrs, unsigned Idx) {
+ // Output type...
+ TypePrinter.print(Arg->getType(), Out);
+
+ // Output parameter attributes list
+ if (Attrs.hasAttributes(Idx))
+ Out << ' ' << Attrs.getAsString(Idx);
+
+ // Output name, if available...
+ if (Arg->hasName()) {
+ Out << ' ';
+ PrintLLVMName(Out, Arg);
+ }
+}
+
+/// printBasicBlock - This member is called for each basic block in a method.
+///
+void AssemblyWriter::printBasicBlock(const BasicBlock *BB) {
+ if (BB->hasName()) { // Print out the label if it exists...
+ Out << "\n";
+ PrintLLVMName(Out, BB->getName(), LabelPrefix);
+ Out << ':';
+ } else if (!BB->use_empty()) { // Don't print block # of no uses...
+ Out << "\n; <label>:";
+ int Slot = Machine.getLocalSlot(BB);
+ if (Slot != -1)
+ Out << Slot;
+ else
+ Out << "<badref>";
+ }
+
+ if (!BB->getParent()) {
+ Out.PadToColumn(50);
+ Out << "; Error: Block without parent!";
+ } else if (BB != &BB->getParent()->getEntryBlock()) { // Not the entry block?
+ // Output predecessors for the block.
+ Out.PadToColumn(50);
+ Out << ";";
+ const_pred_iterator PI = pred_begin(BB), PE = pred_end(BB);
+
+ if (PI == PE) {
+ Out << " No predecessors!";
+ } else {
+ Out << " preds = ";
+ writeOperand(*PI, false);
+ for (++PI; PI != PE; ++PI) {
+ Out << ", ";
+ writeOperand(*PI, false);
+ }
+ }
+ }
+
+ Out << "\n";
+
+ if (AnnotationWriter) AnnotationWriter->emitBasicBlockStartAnnot(BB, Out);
+
+ // Output all of the instructions in the basic block...
+ for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
+ printInstructionLine(*I);
+ }
+
+ if (AnnotationWriter) AnnotationWriter->emitBasicBlockEndAnnot(BB, Out);
+}
+
+/// printInstructionLine - Print an instruction and a newline character.
+void AssemblyWriter::printInstructionLine(const Instruction &I) {
+ printInstruction(I);
+ Out << '\n';
+}
+
+/// printGCRelocateComment - print comment after call to the gc.relocate
+/// intrinsic indicating base and derived pointer names.
+void AssemblyWriter::printGCRelocateComment(const GCRelocateInst &Relocate) {
+ Out << " ; (";
+ writeOperand(Relocate.getBasePtr(), false);
+ Out << ", ";
+ writeOperand(Relocate.getDerivedPtr(), false);
+ Out << ")";
+}
+
+/// printInfoComment - Print a little comment after the instruction indicating
+/// which slot it occupies.
+///
+void AssemblyWriter::printInfoComment(const Value &V) {
+ if (const auto *Relocate = dyn_cast<GCRelocateInst>(&V))
+ printGCRelocateComment(*Relocate);
+
+ if (AnnotationWriter)
+ AnnotationWriter->printInfoComment(V, Out);
+}
+
+// This member is called for each Instruction in a function..
+void AssemblyWriter::printInstruction(const Instruction &I) {
+ if (AnnotationWriter) AnnotationWriter->emitInstructionAnnot(&I, Out);
+
+ // Print out indentation for an instruction.
+ Out << " ";
+
+ // Print out name if it exists...
+ if (I.hasName()) {
+ PrintLLVMName(Out, &I);
+ Out << " = ";
+ } else if (!I.getType()->isVoidTy()) {
+ // Print out the def slot taken.
+ int SlotNum = Machine.getLocalSlot(&I);
+ if (SlotNum == -1)
+ Out << "<badref> = ";
+ else
+ Out << '%' << SlotNum << " = ";
+ }
+
+ if (const CallInst *CI = dyn_cast<CallInst>(&I)) {
+ if (CI->isMustTailCall())
+ Out << "musttail ";
+ else if (CI->isTailCall())
+ Out << "tail ";
+ else if (CI->isNoTailCall())
+ Out << "notail ";
+ }
+
+ // Print out the opcode...
+ Out << I.getOpcodeName();
+
+ // If this is an atomic load or store, print out the atomic marker.
+ if ((isa<LoadInst>(I) && cast<LoadInst>(I).isAtomic()) ||
+ (isa<StoreInst>(I) && cast<StoreInst>(I).isAtomic()))
+ Out << " atomic";
+
+ if (isa<AtomicCmpXchgInst>(I) && cast<AtomicCmpXchgInst>(I).isWeak())
+ Out << " weak";
+
+ // If this is a volatile operation, print out the volatile marker.
+ if ((isa<LoadInst>(I) && cast<LoadInst>(I).isVolatile()) ||
+ (isa<StoreInst>(I) && cast<StoreInst>(I).isVolatile()) ||
+ (isa<AtomicCmpXchgInst>(I) && cast<AtomicCmpXchgInst>(I).isVolatile()) ||
+ (isa<AtomicRMWInst>(I) && cast<AtomicRMWInst>(I).isVolatile()))
+ Out << " volatile";
+
+ // Print out optimization information.
+ WriteOptimizationInfo(Out, &I);
+
+ // Print out the compare instruction predicates
+ if (const CmpInst *CI = dyn_cast<CmpInst>(&I))
+ Out << ' ' << getPredicateText(CI->getPredicate());
+
+ // Print out the atomicrmw operation
+ if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(&I))
+ writeAtomicRMWOperation(Out, RMWI->getOperation());
+
+ // Print out the type of the operands...
+ const Value *Operand = I.getNumOperands() ? I.getOperand(0) : nullptr;
+
+ // Special case conditional branches to swizzle the condition out to the front
+ if (isa<BranchInst>(I) && cast<BranchInst>(I).isConditional()) {
+ const BranchInst &BI(cast<BranchInst>(I));
+ Out << ' ';
+ writeOperand(BI.getCondition(), true);
+ Out << ", ";
+ writeOperand(BI.getSuccessor(0), true);
+ Out << ", ";
+ writeOperand(BI.getSuccessor(1), true);
+
+ } else if (isa<SwitchInst>(I)) {
+ const SwitchInst& SI(cast<SwitchInst>(I));
+ // Special case switch instruction to get formatting nice and correct.
+ Out << ' ';
+ writeOperand(SI.getCondition(), true);
+ Out << ", ";
+ writeOperand(SI.getDefaultDest(), true);
+ Out << " [";
+ for (SwitchInst::ConstCaseIt i = SI.case_begin(), e = SI.case_end();
+ i != e; ++i) {
+ Out << "\n ";
+ writeOperand(i.getCaseValue(), true);
+ Out << ", ";
+ writeOperand(i.getCaseSuccessor(), true);
+ }
+ Out << "\n ]";
+ } else if (isa<IndirectBrInst>(I)) {
+ // Special case indirectbr instruction to get formatting nice and correct.
+ Out << ' ';
+ writeOperand(Operand, true);
+ Out << ", [";
+
+ for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) {
+ if (i != 1)
+ Out << ", ";
+ writeOperand(I.getOperand(i), true);
+ }
+ Out << ']';
+ } else if (const PHINode *PN = dyn_cast<PHINode>(&I)) {
+ Out << ' ';
+ TypePrinter.print(I.getType(), Out);
+ Out << ' ';
+
+ for (unsigned op = 0, Eop = PN->getNumIncomingValues(); op < Eop; ++op) {
+ if (op) Out << ", ";
+ Out << "[ ";
+ writeOperand(PN->getIncomingValue(op), false); Out << ", ";
+ writeOperand(PN->getIncomingBlock(op), false); Out << " ]";
+ }
+ } else if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&I)) {
+ Out << ' ';
+ writeOperand(I.getOperand(0), true);
+ for (const unsigned *i = EVI->idx_begin(), *e = EVI->idx_end(); i != e; ++i)
+ Out << ", " << *i;
+ } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&I)) {
+ Out << ' ';
+ writeOperand(I.getOperand(0), true); Out << ", ";
+ writeOperand(I.getOperand(1), true);
+ for (const unsigned *i = IVI->idx_begin(), *e = IVI->idx_end(); i != e; ++i)
+ Out << ", " << *i;
+ } else if (const LandingPadInst *LPI = dyn_cast<LandingPadInst>(&I)) {
+ Out << ' ';
+ TypePrinter.print(I.getType(), Out);
+ if (LPI->isCleanup() || LPI->getNumClauses() != 0)
+ Out << '\n';
+
+ if (LPI->isCleanup())
+ Out << " cleanup";
+
+ for (unsigned i = 0, e = LPI->getNumClauses(); i != e; ++i) {
+ if (i != 0 || LPI->isCleanup()) Out << "\n";
+ if (LPI->isCatch(i))
+ Out << " catch ";
+ else
+ Out << " filter ";
+
+ writeOperand(LPI->getClause(i), true);
+ }
+ } else if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(&I)) {
+ Out << " within ";
+ writeOperand(CatchSwitch->getParentPad(), /*PrintType=*/false);
+ Out << " [";
+ unsigned Op = 0;
+ for (const BasicBlock *PadBB : CatchSwitch->handlers()) {
+ if (Op > 0)
+ Out << ", ";
+ writeOperand(PadBB, /*PrintType=*/true);
+ ++Op;
+ }
+ Out << "] unwind ";
+ if (const BasicBlock *UnwindDest = CatchSwitch->getUnwindDest())
+ writeOperand(UnwindDest, /*PrintType=*/true);
+ else
+ Out << "to caller";
+ } else if (const auto *FPI = dyn_cast<FuncletPadInst>(&I)) {
+ Out << " within ";
+ writeOperand(FPI->getParentPad(), /*PrintType=*/false);
+ Out << " [";
+ for (unsigned Op = 0, NumOps = FPI->getNumArgOperands(); Op < NumOps;
+ ++Op) {
+ if (Op > 0)
+ Out << ", ";
+ writeOperand(FPI->getArgOperand(Op), /*PrintType=*/true);
+ }
+ Out << ']';
+ } else if (isa<ReturnInst>(I) && !Operand) {
+ Out << " void";
+ } else if (const auto *CRI = dyn_cast<CatchReturnInst>(&I)) {
+ Out << " from ";
+ writeOperand(CRI->getOperand(0), /*PrintType=*/false);
+
+ Out << " to ";
+ writeOperand(CRI->getOperand(1), /*PrintType=*/true);
+ } else if (const auto *CRI = dyn_cast<CleanupReturnInst>(&I)) {
+ Out << " from ";
+ writeOperand(CRI->getOperand(0), /*PrintType=*/false);
+
+ Out << " unwind ";
+ if (CRI->hasUnwindDest())
+ writeOperand(CRI->getOperand(1), /*PrintType=*/true);
+ else
+ Out << "to caller";
+ } else if (const CallInst *CI = dyn_cast<CallInst>(&I)) {
+ // Print the calling convention being used.
+ if (CI->getCallingConv() != CallingConv::C) {
+ Out << " ";
+ PrintCallingConv(CI->getCallingConv(), Out);
+ }
+
+ Operand = CI->getCalledValue();
+ FunctionType *FTy = cast<FunctionType>(CI->getFunctionType());
+ Type *RetTy = FTy->getReturnType();
+ const AttributeSet &PAL = CI->getAttributes();
+
+ if (PAL.hasAttributes(AttributeSet::ReturnIndex))
+ Out << ' ' << PAL.getAsString(AttributeSet::ReturnIndex);
+
+ // If possible, print out the short form of the call instruction. We can
+ // only do this if the first argument is a pointer to a nonvararg function,
+ // and if the return type is not a pointer to a function.
+ //
+ Out << ' ';
+ TypePrinter.print(FTy->isVarArg() ? FTy : RetTy, Out);
+ Out << ' ';
+ writeOperand(Operand, false);
+ Out << '(';
+ for (unsigned op = 0, Eop = CI->getNumArgOperands(); op < Eop; ++op) {
+ if (op > 0)
+ Out << ", ";
+ writeParamOperand(CI->getArgOperand(op), PAL, op + 1);
+ }
+
+ // Emit an ellipsis if this is a musttail call in a vararg function. This
+ // is only to aid readability, musttail calls forward varargs by default.
+ if (CI->isMustTailCall() && CI->getParent() &&
+ CI->getParent()->getParent() &&
+ CI->getParent()->getParent()->isVarArg())
+ Out << ", ...";
+
+ Out << ')';
+ if (PAL.hasAttributes(AttributeSet::FunctionIndex))
+ Out << " #" << Machine.getAttributeGroupSlot(PAL.getFnAttributes());
+
+ writeOperandBundles(CI);
+
+ } else if (const InvokeInst *II = dyn_cast<InvokeInst>(&I)) {
+ Operand = II->getCalledValue();
+ FunctionType *FTy = cast<FunctionType>(II->getFunctionType());
+ Type *RetTy = FTy->getReturnType();
+ const AttributeSet &PAL = II->getAttributes();
+
+ // Print the calling convention being used.
+ if (II->getCallingConv() != CallingConv::C) {
+ Out << " ";
+ PrintCallingConv(II->getCallingConv(), Out);
+ }
+
+ if (PAL.hasAttributes(AttributeSet::ReturnIndex))
+ Out << ' ' << PAL.getAsString(AttributeSet::ReturnIndex);
+
+ // If possible, print out the short form of the invoke instruction. We can
+ // only do this if the first argument is a pointer to a nonvararg function,
+ // and if the return type is not a pointer to a function.
+ //
+ Out << ' ';
+ TypePrinter.print(FTy->isVarArg() ? FTy : RetTy, Out);
+ Out << ' ';
+ writeOperand(Operand, false);
+ Out << '(';
+ for (unsigned op = 0, Eop = II->getNumArgOperands(); op < Eop; ++op) {
+ if (op)
+ Out << ", ";
+ writeParamOperand(II->getArgOperand(op), PAL, op + 1);
+ }
+
+ Out << ')';
+ if (PAL.hasAttributes(AttributeSet::FunctionIndex))
+ Out << " #" << Machine.getAttributeGroupSlot(PAL.getFnAttributes());
+
+ writeOperandBundles(II);
+
+ Out << "\n to ";
+ writeOperand(II->getNormalDest(), true);
+ Out << " unwind ";
+ writeOperand(II->getUnwindDest(), true);
+
+ } else if (const AllocaInst *AI = dyn_cast<AllocaInst>(&I)) {
+ Out << ' ';
+ if (AI->isUsedWithInAlloca())
+ Out << "inalloca ";
+ TypePrinter.print(AI->getAllocatedType(), Out);
+
+ // Explicitly write the array size if the code is broken, if it's an array
+ // allocation, or if the type is not canonical for scalar allocations. The
+ // latter case prevents the type from mutating when round-tripping through
+ // assembly.
+ if (!AI->getArraySize() || AI->isArrayAllocation() ||
+ !AI->getArraySize()->getType()->isIntegerTy(32)) {
+ Out << ", ";
+ writeOperand(AI->getArraySize(), true);
+ }
+ if (AI->getAlignment()) {
+ Out << ", align " << AI->getAlignment();
+ }
+ } else if (isa<CastInst>(I)) {
+ if (Operand) {
+ Out << ' ';
+ writeOperand(Operand, true); // Work with broken code
+ }
+ Out << " to ";
+ TypePrinter.print(I.getType(), Out);
+ } else if (isa<VAArgInst>(I)) {
+ if (Operand) {
+ Out << ' ';
+ writeOperand(Operand, true); // Work with broken code
+ }
+ Out << ", ";
+ TypePrinter.print(I.getType(), Out);
+ } else if (Operand) { // Print the normal way.
+ if (const auto *GEP = dyn_cast<GetElementPtrInst>(&I)) {
+ Out << ' ';
+ TypePrinter.print(GEP->getSourceElementType(), Out);
+ Out << ',';
+ } else if (const auto *LI = dyn_cast<LoadInst>(&I)) {
+ Out << ' ';
+ TypePrinter.print(LI->getType(), Out);
+ Out << ',';
+ }
+
+ // PrintAllTypes - Instructions who have operands of all the same type
+ // omit the type from all but the first operand. If the instruction has
+ // different type operands (for example br), then they are all printed.
+ bool PrintAllTypes = false;
+ Type *TheType = Operand->getType();
+
+ // Select, Store and ShuffleVector always print all types.
+ if (isa<SelectInst>(I) || isa<StoreInst>(I) || isa<ShuffleVectorInst>(I)
+ || isa<ReturnInst>(I)) {
+ PrintAllTypes = true;
+ } else {
+ for (unsigned i = 1, E = I.getNumOperands(); i != E; ++i) {
+ Operand = I.getOperand(i);
+ // note that Operand shouldn't be null, but the test helps make dump()
+ // more tolerant of malformed IR
+ if (Operand && Operand->getType() != TheType) {
+ PrintAllTypes = true; // We have differing types! Print them all!
+ break;
+ }
+ }
+ }
+
+ if (!PrintAllTypes) {
+ Out << ' ';
+ TypePrinter.print(TheType, Out);
+ }
+
+ Out << ' ';
+ for (unsigned i = 0, E = I.getNumOperands(); i != E; ++i) {
+ if (i) Out << ", ";
+ writeOperand(I.getOperand(i), PrintAllTypes);
+ }
+ }
+
+ // Print atomic ordering/alignment for memory operations
+ if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) {
+ if (LI->isAtomic())
+ writeAtomic(LI->getOrdering(), LI->getSynchScope());
+ if (LI->getAlignment())
+ Out << ", align " << LI->getAlignment();
+ } else if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) {
+ if (SI->isAtomic())
+ writeAtomic(SI->getOrdering(), SI->getSynchScope());
+ if (SI->getAlignment())
+ Out << ", align " << SI->getAlignment();
+ } else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(&I)) {
+ writeAtomicCmpXchg(CXI->getSuccessOrdering(), CXI->getFailureOrdering(),
+ CXI->getSynchScope());
+ } else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(&I)) {
+ writeAtomic(RMWI->getOrdering(), RMWI->getSynchScope());
+ } else if (const FenceInst *FI = dyn_cast<FenceInst>(&I)) {
+ writeAtomic(FI->getOrdering(), FI->getSynchScope());
+ }
+
+ // Print Metadata info.
+ SmallVector<std::pair<unsigned, MDNode *>, 4> InstMD;
+ I.getAllMetadata(InstMD);
+ printMetadataAttachments(InstMD, ", ");
+
+ // Print a nice comment.
+ printInfoComment(I);
+}
+
+void AssemblyWriter::printMetadataAttachments(
+ const SmallVectorImpl<std::pair<unsigned, MDNode *>> &MDs,
+ StringRef Separator) {
+ if (MDs.empty())
+ return;
+
+ if (MDNames.empty())
+ MDs[0].second->getContext().getMDKindNames(MDNames);
+
+ for (const auto &I : MDs) {
+ unsigned Kind = I.first;
+ Out << Separator;
+ if (Kind < MDNames.size()) {
+ Out << "!";
+ printMetadataIdentifier(MDNames[Kind], Out);
+ } else
+ Out << "!<unknown kind #" << Kind << ">";
+ Out << ' ';
+ WriteAsOperandInternal(Out, I.second, &TypePrinter, &Machine, TheModule);
+ }
+}
+
+void AssemblyWriter::writeMDNode(unsigned Slot, const MDNode *Node) {
+ Out << '!' << Slot << " = ";
+ printMDNodeBody(Node);
+ Out << "\n";
+}
+
+void AssemblyWriter::writeAllMDNodes() {
+ SmallVector<const MDNode *, 16> Nodes;
+ Nodes.resize(Machine.mdn_size());
+ for (SlotTracker::mdn_iterator I = Machine.mdn_begin(), E = Machine.mdn_end();
+ I != E; ++I)
+ Nodes[I->second] = cast<MDNode>(I->first);
+
+ for (unsigned i = 0, e = Nodes.size(); i != e; ++i) {
+ writeMDNode(i, Nodes[i]);
+ }
+}
+
+void AssemblyWriter::printMDNodeBody(const MDNode *Node) {
+ WriteMDNodeBodyInternal(Out, Node, &TypePrinter, &Machine, TheModule);
+}
+
+void AssemblyWriter::writeAllAttributeGroups() {
+ std::vector<std::pair<AttributeSet, unsigned> > asVec;
+ asVec.resize(Machine.as_size());
+
+ for (SlotTracker::as_iterator I = Machine.as_begin(), E = Machine.as_end();
+ I != E; ++I)
+ asVec[I->second] = *I;
+
+ for (std::vector<std::pair<AttributeSet, unsigned> >::iterator
+ I = asVec.begin(), E = asVec.end(); I != E; ++I)
+ Out << "attributes #" << I->second << " = { "
+ << I->first.getAsString(AttributeSet::FunctionIndex, true) << " }\n";
+}
+
+void AssemblyWriter::printUseListOrder(const UseListOrder &Order) {
+ bool IsInFunction = Machine.getFunction();
+ if (IsInFunction)
+ Out << " ";
+
+ Out << "uselistorder";
+ if (const BasicBlock *BB =
+ IsInFunction ? nullptr : dyn_cast<BasicBlock>(Order.V)) {
+ Out << "_bb ";
+ writeOperand(BB->getParent(), false);
+ Out << ", ";
+ writeOperand(BB, false);
+ } else {
+ Out << " ";
+ writeOperand(Order.V, true);
+ }
+ Out << ", { ";
+
+ assert(Order.Shuffle.size() >= 2 && "Shuffle too small");
+ Out << Order.Shuffle[0];
+ for (unsigned I = 1, E = Order.Shuffle.size(); I != E; ++I)
+ Out << ", " << Order.Shuffle[I];
+ Out << " }\n";
+}
+
+void AssemblyWriter::printUseLists(const Function *F) {
+ auto hasMore =
+ [&]() { return !UseListOrders.empty() && UseListOrders.back().F == F; };
+ if (!hasMore())
+ // Nothing to do.
+ return;
+
+ Out << "\n; uselistorder directives\n";
+ while (hasMore()) {
+ printUseListOrder(UseListOrders.back());
+ UseListOrders.pop_back();
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// External Interface declarations
+//===----------------------------------------------------------------------===//
+
+void Module::print(raw_ostream &ROS, AssemblyAnnotationWriter *AAW,
+ bool ShouldPreserveUseListOrder, bool IsForDebug) const {
+ SlotTracker SlotTable(this);
+ formatted_raw_ostream OS(ROS);
+ AssemblyWriter W(OS, SlotTable, this, AAW, IsForDebug,
+ ShouldPreserveUseListOrder);
+ W.printModule(this);
+}
+
+void NamedMDNode::print(raw_ostream &ROS, bool IsForDebug) const {
+ SlotTracker SlotTable(getParent());
+ formatted_raw_ostream OS(ROS);
+ AssemblyWriter W(OS, SlotTable, getParent(), nullptr, IsForDebug);
+ W.printNamedMDNode(this);
+}
+
+void Comdat::print(raw_ostream &ROS, bool /*IsForDebug*/) const {
+ PrintLLVMName(ROS, getName(), ComdatPrefix);
+ ROS << " = comdat ";
+
+ switch (getSelectionKind()) {
+ case Comdat::Any:
+ ROS << "any";
+ break;
+ case Comdat::ExactMatch:
+ ROS << "exactmatch";
+ break;
+ case Comdat::Largest:
+ ROS << "largest";
+ break;
+ case Comdat::NoDuplicates:
+ ROS << "noduplicates";
+ break;
+ case Comdat::SameSize:
+ ROS << "samesize";
+ break;
+ }
+
+ ROS << '\n';
+}
+
+void Type::print(raw_ostream &OS, bool /*IsForDebug*/) const {
+ TypePrinting TP;
+ TP.print(const_cast<Type*>(this), OS);
+
+ // If the type is a named struct type, print the body as well.
+ if (StructType *STy = dyn_cast<StructType>(const_cast<Type*>(this)))
+ if (!STy->isLiteral()) {
+ OS << " = type ";
+ TP.printStructBody(STy, OS);
+ }
+}
+
+static bool isReferencingMDNode(const Instruction &I) {
+ if (const auto *CI = dyn_cast<CallInst>(&I))
+ if (Function *F = CI->getCalledFunction())
+ if (F->isIntrinsic())
+ for (auto &Op : I.operands())
+ if (auto *V = dyn_cast_or_null<MetadataAsValue>(Op))
+ if (isa<MDNode>(V->getMetadata()))
+ return true;
+ return false;
+}
+
+void Value::print(raw_ostream &ROS, bool IsForDebug) const {
+ bool ShouldInitializeAllMetadata = false;
+ if (auto *I = dyn_cast<Instruction>(this))
+ ShouldInitializeAllMetadata = isReferencingMDNode(*I);
+ else if (isa<Function>(this) || isa<MetadataAsValue>(this))
+ ShouldInitializeAllMetadata = true;
+
+ ModuleSlotTracker MST(getModuleFromVal(this), ShouldInitializeAllMetadata);
+ print(ROS, MST, IsForDebug);
+}
+
+void Value::print(raw_ostream &ROS, ModuleSlotTracker &MST,
+ bool IsForDebug) const {
+ formatted_raw_ostream OS(ROS);
+ SlotTracker EmptySlotTable(static_cast<const Module *>(nullptr));
+ SlotTracker &SlotTable =
+ MST.getMachine() ? *MST.getMachine() : EmptySlotTable;
+ auto incorporateFunction = [&](const Function *F) {
+ if (F)
+ MST.incorporateFunction(*F);
+ };
+
+ if (const Instruction *I = dyn_cast<Instruction>(this)) {
+ incorporateFunction(I->getParent() ? I->getParent()->getParent() : nullptr);
+ AssemblyWriter W(OS, SlotTable, getModuleFromVal(I), nullptr, IsForDebug);
+ W.printInstruction(*I);
+ } else if (const BasicBlock *BB = dyn_cast<BasicBlock>(this)) {
+ incorporateFunction(BB->getParent());
+ AssemblyWriter W(OS, SlotTable, getModuleFromVal(BB), nullptr, IsForDebug);
+ W.printBasicBlock(BB);
+ } else if (const GlobalValue *GV = dyn_cast<GlobalValue>(this)) {
+ AssemblyWriter W(OS, SlotTable, GV->getParent(), nullptr, IsForDebug);
+ if (const GlobalVariable *V = dyn_cast<GlobalVariable>(GV))
+ W.printGlobal(V);
+ else if (const Function *F = dyn_cast<Function>(GV))
+ W.printFunction(F);
+ else
+ W.printAlias(cast<GlobalAlias>(GV));
+ } else if (const MetadataAsValue *V = dyn_cast<MetadataAsValue>(this)) {
+ V->getMetadata()->print(ROS, MST, getModuleFromVal(V));
+ } else if (const Constant *C = dyn_cast<Constant>(this)) {
+ TypePrinting TypePrinter;
+ TypePrinter.print(C->getType(), OS);
+ OS << ' ';
+ WriteConstantInternal(OS, C, TypePrinter, MST.getMachine(), nullptr);
+ } else if (isa<InlineAsm>(this) || isa<Argument>(this)) {
+ this->printAsOperand(OS, /* PrintType */ true, MST);
+ } else {
+ llvm_unreachable("Unknown value to print out!");
+ }
+}
+
+/// Print without a type, skipping the TypePrinting object.
+///
+/// \return \c true iff printing was successful.
+static bool printWithoutType(const Value &V, raw_ostream &O,
+ SlotTracker *Machine, const Module *M) {
+ if (V.hasName() || isa<GlobalValue>(V) ||
+ (!isa<Constant>(V) && !isa<MetadataAsValue>(V))) {
+ WriteAsOperandInternal(O, &V, nullptr, Machine, M);
+ return true;
+ }
+ return false;
+}
+
+static void printAsOperandImpl(const Value &V, raw_ostream &O, bool PrintType,
+ ModuleSlotTracker &MST) {
+ TypePrinting TypePrinter;
+ if (const Module *M = MST.getModule())
+ TypePrinter.incorporateTypes(*M);
+ if (PrintType) {
+ TypePrinter.print(V.getType(), O);
+ O << ' ';
+ }
+
+ WriteAsOperandInternal(O, &V, &TypePrinter, MST.getMachine(),
+ MST.getModule());
+}
+
+void Value::printAsOperand(raw_ostream &O, bool PrintType,
+ const Module *M) const {
+ if (!M)
+ M = getModuleFromVal(this);
+
+ if (!PrintType)
+ if (printWithoutType(*this, O, nullptr, M))
+ return;
+
+ SlotTracker Machine(
+ M, /* ShouldInitializeAllMetadata */ isa<MetadataAsValue>(this));
+ ModuleSlotTracker MST(Machine, M);
+ printAsOperandImpl(*this, O, PrintType, MST);
+}
+
+void Value::printAsOperand(raw_ostream &O, bool PrintType,
+ ModuleSlotTracker &MST) const {
+ if (!PrintType)
+ if (printWithoutType(*this, O, MST.getMachine(), MST.getModule()))
+ return;
+
+ printAsOperandImpl(*this, O, PrintType, MST);
+}
+
+static void printMetadataImpl(raw_ostream &ROS, const Metadata &MD,
+ ModuleSlotTracker &MST, const Module *M,
+ bool OnlyAsOperand) {
+ formatted_raw_ostream OS(ROS);
+
+ TypePrinting TypePrinter;
+ if (M)
+ TypePrinter.incorporateTypes(*M);
+
+ WriteAsOperandInternal(OS, &MD, &TypePrinter, MST.getMachine(), M,
+ /* FromValue */ true);
+
+ auto *N = dyn_cast<MDNode>(&MD);
+ if (OnlyAsOperand || !N)
+ return;
+
+ OS << " = ";
+ WriteMDNodeBodyInternal(OS, N, &TypePrinter, MST.getMachine(), M);
+}
+
+void Metadata::printAsOperand(raw_ostream &OS, const Module *M) const {
+ ModuleSlotTracker MST(M, isa<MDNode>(this));
+ printMetadataImpl(OS, *this, MST, M, /* OnlyAsOperand */ true);
+}
+
+void Metadata::printAsOperand(raw_ostream &OS, ModuleSlotTracker &MST,
+ const Module *M) const {
+ printMetadataImpl(OS, *this, MST, M, /* OnlyAsOperand */ true);
+}
+
+void Metadata::print(raw_ostream &OS, const Module *M,
+ bool /*IsForDebug*/) const {
+ ModuleSlotTracker MST(M, isa<MDNode>(this));
+ printMetadataImpl(OS, *this, MST, M, /* OnlyAsOperand */ false);
+}
+
+void Metadata::print(raw_ostream &OS, ModuleSlotTracker &MST,
+ const Module *M, bool /*IsForDebug*/) const {
+ printMetadataImpl(OS, *this, MST, M, /* OnlyAsOperand */ false);
+}
+
+// Value::dump - allow easy printing of Values from the debugger.
+LLVM_DUMP_METHOD
+void Value::dump() const { print(dbgs(), /*IsForDebug=*/true); dbgs() << '\n'; }
+
+// Type::dump - allow easy printing of Types from the debugger.
+LLVM_DUMP_METHOD
+void Type::dump() const { print(dbgs(), /*IsForDebug=*/true); dbgs() << '\n'; }
+
+// Module::dump() - Allow printing of Modules from the debugger.
+LLVM_DUMP_METHOD
+void Module::dump() const {
+ print(dbgs(), nullptr,
+ /*ShouldPreserveUseListOrder=*/false, /*IsForDebug=*/true);
+}
+
+// \brief Allow printing of Comdats from the debugger.
+LLVM_DUMP_METHOD
+void Comdat::dump() const { print(dbgs(), /*IsForDebug=*/true); }
+
+// NamedMDNode::dump() - Allow printing of NamedMDNodes from the debugger.
+LLVM_DUMP_METHOD
+void NamedMDNode::dump() const { print(dbgs(), /*IsForDebug=*/true); }
+
+LLVM_DUMP_METHOD
+void Metadata::dump() const { dump(nullptr); }
+
+LLVM_DUMP_METHOD
+void Metadata::dump(const Module *M) const {
+ print(dbgs(), M, /*IsForDebug=*/true);
+ dbgs() << '\n';
+}
diff --git a/contrib/llvm/lib/IR/AttributeImpl.h b/contrib/llvm/lib/IR/AttributeImpl.h
new file mode 100644
index 0000000..659f956
--- /dev/null
+++ b/contrib/llvm/lib/IR/AttributeImpl.h
@@ -0,0 +1,284 @@
+//===-- AttributeImpl.h - Attribute Internals -------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file defines various helper methods and classes used by
+/// LLVMContextImpl for creating and managing attributes.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_IR_ATTRIBUTEIMPL_H
+#define LLVM_LIB_IR_ATTRIBUTEIMPL_H
+
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/Support/TrailingObjects.h"
+#include <string>
+
+namespace llvm {
+
+class Constant;
+class LLVMContext;
+
+//===----------------------------------------------------------------------===//
+/// \class
+/// \brief This class represents a single, uniqued attribute. That attribute
+/// could be a single enum, a tuple, or a string.
+class AttributeImpl : public FoldingSetNode {
+ unsigned char KindID; ///< Holds the AttrEntryKind of the attribute
+
+ // AttributesImpl is uniqued, these should not be publicly available.
+ void operator=(const AttributeImpl &) = delete;
+ AttributeImpl(const AttributeImpl &) = delete;
+
+protected:
+ enum AttrEntryKind {
+ EnumAttrEntry,
+ IntAttrEntry,
+ StringAttrEntry
+ };
+
+ AttributeImpl(AttrEntryKind KindID) : KindID(KindID) {}
+
+public:
+ virtual ~AttributeImpl();
+
+ bool isEnumAttribute() const { return KindID == EnumAttrEntry; }
+ bool isIntAttribute() const { return KindID == IntAttrEntry; }
+ bool isStringAttribute() const { return KindID == StringAttrEntry; }
+
+ bool hasAttribute(Attribute::AttrKind A) const;
+ bool hasAttribute(StringRef Kind) const;
+
+ Attribute::AttrKind getKindAsEnum() const;
+ uint64_t getValueAsInt() const;
+
+ StringRef getKindAsString() const;
+ StringRef getValueAsString() const;
+
+ /// \brief Used when sorting the attributes.
+ bool operator<(const AttributeImpl &AI) const;
+
+ void Profile(FoldingSetNodeID &ID) const {
+ if (isEnumAttribute())
+ Profile(ID, getKindAsEnum(), 0);
+ else if (isIntAttribute())
+ Profile(ID, getKindAsEnum(), getValueAsInt());
+ else
+ Profile(ID, getKindAsString(), getValueAsString());
+ }
+ static void Profile(FoldingSetNodeID &ID, Attribute::AttrKind Kind,
+ uint64_t Val) {
+ ID.AddInteger(Kind);
+ if (Val) ID.AddInteger(Val);
+ }
+ static void Profile(FoldingSetNodeID &ID, StringRef Kind, StringRef Values) {
+ ID.AddString(Kind);
+ if (!Values.empty()) ID.AddString(Values);
+ }
+
+ // FIXME: Remove this!
+ static uint64_t getAttrMask(Attribute::AttrKind Val);
+};
+
+//===----------------------------------------------------------------------===//
+/// \class
+/// \brief A set of classes that contain the value of the
+/// attribute object. There are three main categories: enum attribute entries,
+/// represented by Attribute::AttrKind; alignment attribute entries; and string
+/// attribute enties, which are for target-dependent attributes.
+
+class EnumAttributeImpl : public AttributeImpl {
+ virtual void anchor();
+ Attribute::AttrKind Kind;
+
+protected:
+ EnumAttributeImpl(AttrEntryKind ID, Attribute::AttrKind Kind)
+ : AttributeImpl(ID), Kind(Kind) {}
+
+public:
+ EnumAttributeImpl(Attribute::AttrKind Kind)
+ : AttributeImpl(EnumAttrEntry), Kind(Kind) {}
+
+ Attribute::AttrKind getEnumKind() const { return Kind; }
+};
+
+class IntAttributeImpl : public EnumAttributeImpl {
+ void anchor() override;
+ uint64_t Val;
+
+public:
+ IntAttributeImpl(Attribute::AttrKind Kind, uint64_t Val)
+ : EnumAttributeImpl(IntAttrEntry, Kind), Val(Val) {
+ assert((Kind == Attribute::Alignment || Kind == Attribute::StackAlignment ||
+ Kind == Attribute::Dereferenceable ||
+ Kind == Attribute::DereferenceableOrNull) &&
+ "Wrong kind for int attribute!");
+ }
+
+ uint64_t getValue() const { return Val; }
+};
+
+class StringAttributeImpl : public AttributeImpl {
+ virtual void anchor();
+ std::string Kind;
+ std::string Val;
+
+public:
+ StringAttributeImpl(StringRef Kind, StringRef Val = StringRef())
+ : AttributeImpl(StringAttrEntry), Kind(Kind), Val(Val) {}
+
+ StringRef getStringKind() const { return Kind; }
+ StringRef getStringValue() const { return Val; }
+};
+
+//===----------------------------------------------------------------------===//
+/// \class
+/// \brief This class represents a group of attributes that apply to one
+/// element: function, return type, or parameter.
+class AttributeSetNode final
+ : public FoldingSetNode,
+ private TrailingObjects<AttributeSetNode, Attribute> {
+ friend TrailingObjects;
+
+ unsigned NumAttrs; ///< Number of attributes in this node.
+
+ AttributeSetNode(ArrayRef<Attribute> Attrs) : NumAttrs(Attrs.size()) {
+ // There's memory after the node where we can store the entries in.
+ std::copy(Attrs.begin(), Attrs.end(), getTrailingObjects<Attribute>());
+ }
+
+ // AttributesSetNode is uniqued, these should not be publicly available.
+ void operator=(const AttributeSetNode &) = delete;
+ AttributeSetNode(const AttributeSetNode &) = delete;
+public:
+ static AttributeSetNode *get(LLVMContext &C, ArrayRef<Attribute> Attrs);
+
+ bool hasAttribute(Attribute::AttrKind Kind) const;
+ bool hasAttribute(StringRef Kind) const;
+ bool hasAttributes() const { return NumAttrs != 0; }
+
+ Attribute getAttribute(Attribute::AttrKind Kind) const;
+ Attribute getAttribute(StringRef Kind) const;
+
+ unsigned getAlignment() const;
+ unsigned getStackAlignment() const;
+ uint64_t getDereferenceableBytes() const;
+ uint64_t getDereferenceableOrNullBytes() const;
+ std::string getAsString(bool InAttrGrp) const;
+
+ typedef const Attribute *iterator;
+ iterator begin() const { return getTrailingObjects<Attribute>(); }
+ iterator end() const { return begin() + NumAttrs; }
+
+ void Profile(FoldingSetNodeID &ID) const {
+ Profile(ID, makeArrayRef(begin(), end()));
+ }
+ static void Profile(FoldingSetNodeID &ID, ArrayRef<Attribute> AttrList) {
+ for (unsigned I = 0, E = AttrList.size(); I != E; ++I)
+ AttrList[I].Profile(ID);
+ }
+};
+
+typedef std::pair<unsigned, AttributeSetNode *> IndexAttrPair;
+
+//===----------------------------------------------------------------------===//
+/// \class
+/// \brief This class represents a set of attributes that apply to the function,
+/// return type, and parameters.
+class AttributeSetImpl final
+ : public FoldingSetNode,
+ private TrailingObjects<AttributeSetImpl, IndexAttrPair> {
+ friend class AttributeSet;
+ friend TrailingObjects;
+
+private:
+ LLVMContext &Context;
+ unsigned NumAttrs; ///< Number of entries in this set.
+
+ // Helper fn for TrailingObjects class.
+ size_t numTrailingObjects(OverloadToken<IndexAttrPair>) { return NumAttrs; }
+
+ /// \brief Return a pointer to the IndexAttrPair for the specified slot.
+ const IndexAttrPair *getNode(unsigned Slot) const {
+ return getTrailingObjects<IndexAttrPair>() + Slot;
+ }
+
+ // AttributesSet is uniqued, these should not be publicly available.
+ void operator=(const AttributeSetImpl &) = delete;
+ AttributeSetImpl(const AttributeSetImpl &) = delete;
+public:
+ AttributeSetImpl(LLVMContext &C,
+ ArrayRef<std::pair<unsigned, AttributeSetNode *> > Attrs)
+ : Context(C), NumAttrs(Attrs.size()) {
+
+#ifndef NDEBUG
+ if (Attrs.size() >= 2) {
+ for (const std::pair<unsigned, AttributeSetNode *> *i = Attrs.begin() + 1,
+ *e = Attrs.end();
+ i != e; ++i) {
+ assert((i-1)->first <= i->first && "Attribute set not ordered!");
+ }
+ }
+#endif
+ // There's memory after the node where we can store the entries in.
+ std::copy(Attrs.begin(), Attrs.end(), getTrailingObjects<IndexAttrPair>());
+ }
+
+ /// \brief Get the context that created this AttributeSetImpl.
+ LLVMContext &getContext() { return Context; }
+
+ /// \brief Return the number of attributes this AttributeSet contains.
+ unsigned getNumAttributes() const { return NumAttrs; }
+
+ /// \brief Get the index of the given "slot" in the AttrNodes list. This index
+ /// is the index of the return, parameter, or function object that the
+ /// attributes are applied to, not the index into the AttrNodes list where the
+ /// attributes reside.
+ unsigned getSlotIndex(unsigned Slot) const {
+ return getNode(Slot)->first;
+ }
+
+ /// \brief Retrieve the attributes for the given "slot" in the AttrNode list.
+ /// \p Slot is an index into the AttrNodes list, not the index of the return /
+ /// parameter/ function which the attributes apply to.
+ AttributeSet getSlotAttributes(unsigned Slot) const {
+ return AttributeSet::get(Context, *getNode(Slot));
+ }
+
+ /// \brief Retrieve the attribute set node for the given "slot" in the
+ /// AttrNode list.
+ AttributeSetNode *getSlotNode(unsigned Slot) const {
+ return getNode(Slot)->second;
+ }
+
+ typedef AttributeSetNode::iterator iterator;
+ iterator begin(unsigned Slot) const { return getSlotNode(Slot)->begin(); }
+ iterator end(unsigned Slot) const { return getSlotNode(Slot)->end(); }
+
+ void Profile(FoldingSetNodeID &ID) const {
+ Profile(ID, makeArrayRef(getNode(0), getNumAttributes()));
+ }
+ static void Profile(FoldingSetNodeID &ID,
+ ArrayRef<std::pair<unsigned, AttributeSetNode*> > Nodes) {
+ for (unsigned i = 0, e = Nodes.size(); i != e; ++i) {
+ ID.AddInteger(Nodes[i].first);
+ ID.AddPointer(Nodes[i].second);
+ }
+ }
+
+ // FIXME: This atrocity is temporary.
+ uint64_t Raw(unsigned Index) const;
+
+ void dump() const;
+};
+
+} // end llvm namespace
+
+#endif
diff --git a/contrib/llvm/lib/IR/Attributes.cpp b/contrib/llvm/lib/IR/Attributes.cpp
new file mode 100644
index 0000000..6c01bb6
--- /dev/null
+++ b/contrib/llvm/lib/IR/Attributes.cpp
@@ -0,0 +1,1512 @@
+//===-- Attributes.cpp - Implement AttributesList -------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// \file
+// \brief This file implements the Attribute, AttributeImpl, AttrBuilder,
+// AttributeSetImpl, and AttributeSet classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/Function.h"
+#include "AttributeImpl.h"
+#include "LLVMContextImpl.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/IR/Type.h"
+#include "llvm/Support/Atomic.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/Mutex.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+using namespace llvm;
+
+//===----------------------------------------------------------------------===//
+// Attribute Construction Methods
+//===----------------------------------------------------------------------===//
+
+Attribute Attribute::get(LLVMContext &Context, Attribute::AttrKind Kind,
+ uint64_t Val) {
+ LLVMContextImpl *pImpl = Context.pImpl;
+ FoldingSetNodeID ID;
+ ID.AddInteger(Kind);
+ if (Val) ID.AddInteger(Val);
+
+ void *InsertPoint;
+ AttributeImpl *PA = pImpl->AttrsSet.FindNodeOrInsertPos(ID, InsertPoint);
+
+ if (!PA) {
+ // If we didn't find any existing attributes of the same shape then create a
+ // new one and insert it.
+ if (!Val)
+ PA = new EnumAttributeImpl(Kind);
+ else
+ PA = new IntAttributeImpl(Kind, Val);
+ pImpl->AttrsSet.InsertNode(PA, InsertPoint);
+ }
+
+ // Return the Attribute that we found or created.
+ return Attribute(PA);
+}
+
+Attribute Attribute::get(LLVMContext &Context, StringRef Kind, StringRef Val) {
+ LLVMContextImpl *pImpl = Context.pImpl;
+ FoldingSetNodeID ID;
+ ID.AddString(Kind);
+ if (!Val.empty()) ID.AddString(Val);
+
+ void *InsertPoint;
+ AttributeImpl *PA = pImpl->AttrsSet.FindNodeOrInsertPos(ID, InsertPoint);
+
+ if (!PA) {
+ // If we didn't find any existing attributes of the same shape then create a
+ // new one and insert it.
+ PA = new StringAttributeImpl(Kind, Val);
+ pImpl->AttrsSet.InsertNode(PA, InsertPoint);
+ }
+
+ // Return the Attribute that we found or created.
+ return Attribute(PA);
+}
+
+Attribute Attribute::getWithAlignment(LLVMContext &Context, uint64_t Align) {
+ assert(isPowerOf2_32(Align) && "Alignment must be a power of two.");
+ assert(Align <= 0x40000000 && "Alignment too large.");
+ return get(Context, Alignment, Align);
+}
+
+Attribute Attribute::getWithStackAlignment(LLVMContext &Context,
+ uint64_t Align) {
+ assert(isPowerOf2_32(Align) && "Alignment must be a power of two.");
+ assert(Align <= 0x100 && "Alignment too large.");
+ return get(Context, StackAlignment, Align);
+}
+
+Attribute Attribute::getWithDereferenceableBytes(LLVMContext &Context,
+ uint64_t Bytes) {
+ assert(Bytes && "Bytes must be non-zero.");
+ return get(Context, Dereferenceable, Bytes);
+}
+
+Attribute Attribute::getWithDereferenceableOrNullBytes(LLVMContext &Context,
+ uint64_t Bytes) {
+ assert(Bytes && "Bytes must be non-zero.");
+ return get(Context, DereferenceableOrNull, Bytes);
+}
+
+//===----------------------------------------------------------------------===//
+// Attribute Accessor Methods
+//===----------------------------------------------------------------------===//
+
+bool Attribute::isEnumAttribute() const {
+ return pImpl && pImpl->isEnumAttribute();
+}
+
+bool Attribute::isIntAttribute() const {
+ return pImpl && pImpl->isIntAttribute();
+}
+
+bool Attribute::isStringAttribute() const {
+ return pImpl && pImpl->isStringAttribute();
+}
+
+Attribute::AttrKind Attribute::getKindAsEnum() const {
+ if (!pImpl) return None;
+ assert((isEnumAttribute() || isIntAttribute()) &&
+ "Invalid attribute type to get the kind as an enum!");
+ return pImpl->getKindAsEnum();
+}
+
+uint64_t Attribute::getValueAsInt() const {
+ if (!pImpl) return 0;
+ assert(isIntAttribute() &&
+ "Expected the attribute to be an integer attribute!");
+ return pImpl->getValueAsInt();
+}
+
+StringRef Attribute::getKindAsString() const {
+ if (!pImpl) return StringRef();
+ assert(isStringAttribute() &&
+ "Invalid attribute type to get the kind as a string!");
+ return pImpl->getKindAsString();
+}
+
+StringRef Attribute::getValueAsString() const {
+ if (!pImpl) return StringRef();
+ assert(isStringAttribute() &&
+ "Invalid attribute type to get the value as a string!");
+ return pImpl->getValueAsString();
+}
+
+bool Attribute::hasAttribute(AttrKind Kind) const {
+ return (pImpl && pImpl->hasAttribute(Kind)) || (!pImpl && Kind == None);
+}
+
+bool Attribute::hasAttribute(StringRef Kind) const {
+ if (!isStringAttribute()) return false;
+ return pImpl && pImpl->hasAttribute(Kind);
+}
+
+/// This returns the alignment field of an attribute as a byte alignment value.
+unsigned Attribute::getAlignment() const {
+ assert(hasAttribute(Attribute::Alignment) &&
+ "Trying to get alignment from non-alignment attribute!");
+ return pImpl->getValueAsInt();
+}
+
+/// This returns the stack alignment field of an attribute as a byte alignment
+/// value.
+unsigned Attribute::getStackAlignment() const {
+ assert(hasAttribute(Attribute::StackAlignment) &&
+ "Trying to get alignment from non-alignment attribute!");
+ return pImpl->getValueAsInt();
+}
+
+/// This returns the number of dereferenceable bytes.
+uint64_t Attribute::getDereferenceableBytes() const {
+ assert(hasAttribute(Attribute::Dereferenceable) &&
+ "Trying to get dereferenceable bytes from "
+ "non-dereferenceable attribute!");
+ return pImpl->getValueAsInt();
+}
+
+uint64_t Attribute::getDereferenceableOrNullBytes() const {
+ assert(hasAttribute(Attribute::DereferenceableOrNull) &&
+ "Trying to get dereferenceable bytes from "
+ "non-dereferenceable attribute!");
+ return pImpl->getValueAsInt();
+}
+
+std::string Attribute::getAsString(bool InAttrGrp) const {
+ if (!pImpl) return "";
+
+ if (hasAttribute(Attribute::SanitizeAddress))
+ return "sanitize_address";
+ if (hasAttribute(Attribute::AlwaysInline))
+ return "alwaysinline";
+ if (hasAttribute(Attribute::ArgMemOnly))
+ return "argmemonly";
+ if (hasAttribute(Attribute::Builtin))
+ return "builtin";
+ if (hasAttribute(Attribute::ByVal))
+ return "byval";
+ if (hasAttribute(Attribute::Convergent))
+ return "convergent";
+ if (hasAttribute(Attribute::InaccessibleMemOnly))
+ return "inaccessiblememonly";
+ if (hasAttribute(Attribute::InaccessibleMemOrArgMemOnly))
+ return "inaccessiblemem_or_argmemonly";
+ if (hasAttribute(Attribute::InAlloca))
+ return "inalloca";
+ if (hasAttribute(Attribute::InlineHint))
+ return "inlinehint";
+ if (hasAttribute(Attribute::InReg))
+ return "inreg";
+ if (hasAttribute(Attribute::JumpTable))
+ return "jumptable";
+ if (hasAttribute(Attribute::MinSize))
+ return "minsize";
+ if (hasAttribute(Attribute::Naked))
+ return "naked";
+ if (hasAttribute(Attribute::Nest))
+ return "nest";
+ if (hasAttribute(Attribute::NoAlias))
+ return "noalias";
+ if (hasAttribute(Attribute::NoBuiltin))
+ return "nobuiltin";
+ if (hasAttribute(Attribute::NoCapture))
+ return "nocapture";
+ if (hasAttribute(Attribute::NoDuplicate))
+ return "noduplicate";
+ if (hasAttribute(Attribute::NoImplicitFloat))
+ return "noimplicitfloat";
+ if (hasAttribute(Attribute::NoInline))
+ return "noinline";
+ if (hasAttribute(Attribute::NonLazyBind))
+ return "nonlazybind";
+ if (hasAttribute(Attribute::NonNull))
+ return "nonnull";
+ if (hasAttribute(Attribute::NoRedZone))
+ return "noredzone";
+ if (hasAttribute(Attribute::NoReturn))
+ return "noreturn";
+ if (hasAttribute(Attribute::NoRecurse))
+ return "norecurse";
+ if (hasAttribute(Attribute::NoUnwind))
+ return "nounwind";
+ if (hasAttribute(Attribute::OptimizeNone))
+ return "optnone";
+ if (hasAttribute(Attribute::OptimizeForSize))
+ return "optsize";
+ if (hasAttribute(Attribute::ReadNone))
+ return "readnone";
+ if (hasAttribute(Attribute::ReadOnly))
+ return "readonly";
+ if (hasAttribute(Attribute::Returned))
+ return "returned";
+ if (hasAttribute(Attribute::ReturnsTwice))
+ return "returns_twice";
+ if (hasAttribute(Attribute::SExt))
+ return "signext";
+ if (hasAttribute(Attribute::StackProtect))
+ return "ssp";
+ if (hasAttribute(Attribute::StackProtectReq))
+ return "sspreq";
+ if (hasAttribute(Attribute::StackProtectStrong))
+ return "sspstrong";
+ if (hasAttribute(Attribute::SafeStack))
+ return "safestack";
+ if (hasAttribute(Attribute::StructRet))
+ return "sret";
+ if (hasAttribute(Attribute::SanitizeThread))
+ return "sanitize_thread";
+ if (hasAttribute(Attribute::SanitizeMemory))
+ return "sanitize_memory";
+ if (hasAttribute(Attribute::UWTable))
+ return "uwtable";
+ if (hasAttribute(Attribute::ZExt))
+ return "zeroext";
+ if (hasAttribute(Attribute::Cold))
+ return "cold";
+
+ // FIXME: These should be output like this:
+ //
+ // align=4
+ // alignstack=8
+ //
+ if (hasAttribute(Attribute::Alignment)) {
+ std::string Result;
+ Result += "align";
+ Result += (InAttrGrp) ? "=" : " ";
+ Result += utostr(getValueAsInt());
+ return Result;
+ }
+
+ auto AttrWithBytesToString = [&](const char *Name) {
+ std::string Result;
+ Result += Name;
+ if (InAttrGrp) {
+ Result += "=";
+ Result += utostr(getValueAsInt());
+ } else {
+ Result += "(";
+ Result += utostr(getValueAsInt());
+ Result += ")";
+ }
+ return Result;
+ };
+
+ if (hasAttribute(Attribute::StackAlignment))
+ return AttrWithBytesToString("alignstack");
+
+ if (hasAttribute(Attribute::Dereferenceable))
+ return AttrWithBytesToString("dereferenceable");
+
+ if (hasAttribute(Attribute::DereferenceableOrNull))
+ return AttrWithBytesToString("dereferenceable_or_null");
+
+ // Convert target-dependent attributes to strings of the form:
+ //
+ // "kind"
+ // "kind" = "value"
+ //
+ if (isStringAttribute()) {
+ std::string Result;
+ Result += (Twine('"') + getKindAsString() + Twine('"')).str();
+
+ StringRef Val = pImpl->getValueAsString();
+ if (Val.empty()) return Result;
+
+ Result += ("=\"" + Val + Twine('"')).str();
+ return Result;
+ }
+
+ llvm_unreachable("Unknown attribute");
+}
+
+bool Attribute::operator<(Attribute A) const {
+ if (!pImpl && !A.pImpl) return false;
+ if (!pImpl) return true;
+ if (!A.pImpl) return false;
+ return *pImpl < *A.pImpl;
+}
+
+//===----------------------------------------------------------------------===//
+// AttributeImpl Definition
+//===----------------------------------------------------------------------===//
+
+// Pin the vtables to this file.
+AttributeImpl::~AttributeImpl() {}
+void EnumAttributeImpl::anchor() {}
+void IntAttributeImpl::anchor() {}
+void StringAttributeImpl::anchor() {}
+
+bool AttributeImpl::hasAttribute(Attribute::AttrKind A) const {
+ if (isStringAttribute()) return false;
+ return getKindAsEnum() == A;
+}
+
+bool AttributeImpl::hasAttribute(StringRef Kind) const {
+ if (!isStringAttribute()) return false;
+ return getKindAsString() == Kind;
+}
+
+Attribute::AttrKind AttributeImpl::getKindAsEnum() const {
+ assert(isEnumAttribute() || isIntAttribute());
+ return static_cast<const EnumAttributeImpl *>(this)->getEnumKind();
+}
+
+uint64_t AttributeImpl::getValueAsInt() const {
+ assert(isIntAttribute());
+ return static_cast<const IntAttributeImpl *>(this)->getValue();
+}
+
+StringRef AttributeImpl::getKindAsString() const {
+ assert(isStringAttribute());
+ return static_cast<const StringAttributeImpl *>(this)->getStringKind();
+}
+
+StringRef AttributeImpl::getValueAsString() const {
+ assert(isStringAttribute());
+ return static_cast<const StringAttributeImpl *>(this)->getStringValue();
+}
+
+bool AttributeImpl::operator<(const AttributeImpl &AI) const {
+ // This sorts the attributes with Attribute::AttrKinds coming first (sorted
+ // relative to their enum value) and then strings.
+ if (isEnumAttribute()) {
+ if (AI.isEnumAttribute()) return getKindAsEnum() < AI.getKindAsEnum();
+ if (AI.isIntAttribute()) return true;
+ if (AI.isStringAttribute()) return true;
+ }
+
+ if (isIntAttribute()) {
+ if (AI.isEnumAttribute()) return false;
+ if (AI.isIntAttribute()) return getValueAsInt() < AI.getValueAsInt();
+ if (AI.isStringAttribute()) return true;
+ }
+
+ if (AI.isEnumAttribute()) return false;
+ if (AI.isIntAttribute()) return false;
+ if (getKindAsString() == AI.getKindAsString())
+ return getValueAsString() < AI.getValueAsString();
+ return getKindAsString() < AI.getKindAsString();
+}
+
+uint64_t AttributeImpl::getAttrMask(Attribute::AttrKind Val) {
+ // FIXME: Remove this.
+ switch (Val) {
+ case Attribute::EndAttrKinds:
+ llvm_unreachable("Synthetic enumerators which should never get here");
+
+ case Attribute::None: return 0;
+ case Attribute::ZExt: return 1 << 0;
+ case Attribute::SExt: return 1 << 1;
+ case Attribute::NoReturn: return 1 << 2;
+ case Attribute::InReg: return 1 << 3;
+ case Attribute::StructRet: return 1 << 4;
+ case Attribute::NoUnwind: return 1 << 5;
+ case Attribute::NoAlias: return 1 << 6;
+ case Attribute::ByVal: return 1 << 7;
+ case Attribute::Nest: return 1 << 8;
+ case Attribute::ReadNone: return 1 << 9;
+ case Attribute::ReadOnly: return 1 << 10;
+ case Attribute::NoInline: return 1 << 11;
+ case Attribute::AlwaysInline: return 1 << 12;
+ case Attribute::OptimizeForSize: return 1 << 13;
+ case Attribute::StackProtect: return 1 << 14;
+ case Attribute::StackProtectReq: return 1 << 15;
+ case Attribute::Alignment: return 31 << 16;
+ case Attribute::NoCapture: return 1 << 21;
+ case Attribute::NoRedZone: return 1 << 22;
+ case Attribute::NoImplicitFloat: return 1 << 23;
+ case Attribute::Naked: return 1 << 24;
+ case Attribute::InlineHint: return 1 << 25;
+ case Attribute::StackAlignment: return 7 << 26;
+ case Attribute::ReturnsTwice: return 1 << 29;
+ case Attribute::UWTable: return 1 << 30;
+ case Attribute::NonLazyBind: return 1U << 31;
+ case Attribute::SanitizeAddress: return 1ULL << 32;
+ case Attribute::MinSize: return 1ULL << 33;
+ case Attribute::NoDuplicate: return 1ULL << 34;
+ case Attribute::StackProtectStrong: return 1ULL << 35;
+ case Attribute::SanitizeThread: return 1ULL << 36;
+ case Attribute::SanitizeMemory: return 1ULL << 37;
+ case Attribute::NoBuiltin: return 1ULL << 38;
+ case Attribute::Returned: return 1ULL << 39;
+ case Attribute::Cold: return 1ULL << 40;
+ case Attribute::Builtin: return 1ULL << 41;
+ case Attribute::OptimizeNone: return 1ULL << 42;
+ case Attribute::InAlloca: return 1ULL << 43;
+ case Attribute::NonNull: return 1ULL << 44;
+ case Attribute::JumpTable: return 1ULL << 45;
+ case Attribute::Convergent: return 1ULL << 46;
+ case Attribute::SafeStack: return 1ULL << 47;
+ case Attribute::NoRecurse: return 1ULL << 48;
+ case Attribute::InaccessibleMemOnly: return 1ULL << 49;
+ case Attribute::InaccessibleMemOrArgMemOnly: return 1ULL << 50;
+ case Attribute::Dereferenceable:
+ llvm_unreachable("dereferenceable attribute not supported in raw format");
+ break;
+ case Attribute::DereferenceableOrNull:
+ llvm_unreachable("dereferenceable_or_null attribute not supported in raw "
+ "format");
+ break;
+ case Attribute::ArgMemOnly:
+ llvm_unreachable("argmemonly attribute not supported in raw format");
+ break;
+ }
+ llvm_unreachable("Unsupported attribute type");
+}
+
+//===----------------------------------------------------------------------===//
+// AttributeSetNode Definition
+//===----------------------------------------------------------------------===//
+
+AttributeSetNode *AttributeSetNode::get(LLVMContext &C,
+ ArrayRef<Attribute> Attrs) {
+ if (Attrs.empty())
+ return nullptr;
+
+ // Otherwise, build a key to look up the existing attributes.
+ LLVMContextImpl *pImpl = C.pImpl;
+ FoldingSetNodeID ID;
+
+ SmallVector<Attribute, 8> SortedAttrs(Attrs.begin(), Attrs.end());
+ array_pod_sort(SortedAttrs.begin(), SortedAttrs.end());
+
+ for (Attribute Attr : SortedAttrs)
+ Attr.Profile(ID);
+
+ void *InsertPoint;
+ AttributeSetNode *PA =
+ pImpl->AttrsSetNodes.FindNodeOrInsertPos(ID, InsertPoint);
+
+ // If we didn't find any existing attributes of the same shape then create a
+ // new one and insert it.
+ if (!PA) {
+ // Coallocate entries after the AttributeSetNode itself.
+ void *Mem = ::operator new(totalSizeToAlloc<Attribute>(SortedAttrs.size()));
+ PA = new (Mem) AttributeSetNode(SortedAttrs);
+ pImpl->AttrsSetNodes.InsertNode(PA, InsertPoint);
+ }
+
+ // Return the AttributesListNode that we found or created.
+ return PA;
+}
+
+bool AttributeSetNode::hasAttribute(Attribute::AttrKind Kind) const {
+ for (iterator I = begin(), E = end(); I != E; ++I)
+ if (I->hasAttribute(Kind))
+ return true;
+ return false;
+}
+
+bool AttributeSetNode::hasAttribute(StringRef Kind) const {
+ for (iterator I = begin(), E = end(); I != E; ++I)
+ if (I->hasAttribute(Kind))
+ return true;
+ return false;
+}
+
+Attribute AttributeSetNode::getAttribute(Attribute::AttrKind Kind) const {
+ for (iterator I = begin(), E = end(); I != E; ++I)
+ if (I->hasAttribute(Kind))
+ return *I;
+ return Attribute();
+}
+
+Attribute AttributeSetNode::getAttribute(StringRef Kind) const {
+ for (iterator I = begin(), E = end(); I != E; ++I)
+ if (I->hasAttribute(Kind))
+ return *I;
+ return Attribute();
+}
+
+unsigned AttributeSetNode::getAlignment() const {
+ for (iterator I = begin(), E = end(); I != E; ++I)
+ if (I->hasAttribute(Attribute::Alignment))
+ return I->getAlignment();
+ return 0;
+}
+
+unsigned AttributeSetNode::getStackAlignment() const {
+ for (iterator I = begin(), E = end(); I != E; ++I)
+ if (I->hasAttribute(Attribute::StackAlignment))
+ return I->getStackAlignment();
+ return 0;
+}
+
+uint64_t AttributeSetNode::getDereferenceableBytes() const {
+ for (iterator I = begin(), E = end(); I != E; ++I)
+ if (I->hasAttribute(Attribute::Dereferenceable))
+ return I->getDereferenceableBytes();
+ return 0;
+}
+
+uint64_t AttributeSetNode::getDereferenceableOrNullBytes() const {
+ for (iterator I = begin(), E = end(); I != E; ++I)
+ if (I->hasAttribute(Attribute::DereferenceableOrNull))
+ return I->getDereferenceableOrNullBytes();
+ return 0;
+}
+
+std::string AttributeSetNode::getAsString(bool InAttrGrp) const {
+ std::string Str;
+ for (iterator I = begin(), E = end(); I != E; ++I) {
+ if (I != begin())
+ Str += ' ';
+ Str += I->getAsString(InAttrGrp);
+ }
+ return Str;
+}
+
+//===----------------------------------------------------------------------===//
+// AttributeSetImpl Definition
+//===----------------------------------------------------------------------===//
+
+uint64_t AttributeSetImpl::Raw(unsigned Index) const {
+ for (unsigned I = 0, E = getNumAttributes(); I != E; ++I) {
+ if (getSlotIndex(I) != Index) continue;
+ const AttributeSetNode *ASN = getSlotNode(I);
+ uint64_t Mask = 0;
+
+ for (AttributeSetNode::iterator II = ASN->begin(),
+ IE = ASN->end(); II != IE; ++II) {
+ Attribute Attr = *II;
+
+ // This cannot handle string attributes.
+ if (Attr.isStringAttribute()) continue;
+
+ Attribute::AttrKind Kind = Attr.getKindAsEnum();
+
+ if (Kind == Attribute::Alignment)
+ Mask |= (Log2_32(ASN->getAlignment()) + 1) << 16;
+ else if (Kind == Attribute::StackAlignment)
+ Mask |= (Log2_32(ASN->getStackAlignment()) + 1) << 26;
+ else if (Kind == Attribute::Dereferenceable)
+ llvm_unreachable("dereferenceable not supported in bit mask");
+ else
+ Mask |= AttributeImpl::getAttrMask(Kind);
+ }
+
+ return Mask;
+ }
+
+ return 0;
+}
+
+void AttributeSetImpl::dump() const {
+ AttributeSet(const_cast<AttributeSetImpl *>(this)).dump();
+}
+
+//===----------------------------------------------------------------------===//
+// AttributeSet Construction and Mutation Methods
+//===----------------------------------------------------------------------===//
+
+AttributeSet
+AttributeSet::getImpl(LLVMContext &C,
+ ArrayRef<std::pair<unsigned, AttributeSetNode*> > Attrs) {
+ LLVMContextImpl *pImpl = C.pImpl;
+ FoldingSetNodeID ID;
+ AttributeSetImpl::Profile(ID, Attrs);
+
+ void *InsertPoint;
+ AttributeSetImpl *PA = pImpl->AttrsLists.FindNodeOrInsertPos(ID, InsertPoint);
+
+ // If we didn't find any existing attributes of the same shape then
+ // create a new one and insert it.
+ if (!PA) {
+ // Coallocate entries after the AttributeSetImpl itself.
+ void *Mem = ::operator new(
+ AttributeSetImpl::totalSizeToAlloc<IndexAttrPair>(Attrs.size()));
+ PA = new (Mem) AttributeSetImpl(C, Attrs);
+ pImpl->AttrsLists.InsertNode(PA, InsertPoint);
+ }
+
+ // Return the AttributesList that we found or created.
+ return AttributeSet(PA);
+}
+
+AttributeSet AttributeSet::get(LLVMContext &C,
+ ArrayRef<std::pair<unsigned, Attribute> > Attrs){
+ // If there are no attributes then return a null AttributesList pointer.
+ if (Attrs.empty())
+ return AttributeSet();
+
+ assert(std::is_sorted(Attrs.begin(), Attrs.end(),
+ [](const std::pair<unsigned, Attribute> &LHS,
+ const std::pair<unsigned, Attribute> &RHS) {
+ return LHS.first < RHS.first;
+ }) && "Misordered Attributes list!");
+ assert(std::none_of(Attrs.begin(), Attrs.end(),
+ [](const std::pair<unsigned, Attribute> &Pair) {
+ return Pair.second.hasAttribute(Attribute::None);
+ }) && "Pointless attribute!");
+
+ // Create a vector if (unsigned, AttributeSetNode*) pairs from the attributes
+ // list.
+ SmallVector<std::pair<unsigned, AttributeSetNode*>, 8> AttrPairVec;
+ for (ArrayRef<std::pair<unsigned, Attribute> >::iterator I = Attrs.begin(),
+ E = Attrs.end(); I != E; ) {
+ unsigned Index = I->first;
+ SmallVector<Attribute, 4> AttrVec;
+ while (I != E && I->first == Index) {
+ AttrVec.push_back(I->second);
+ ++I;
+ }
+
+ AttrPairVec.push_back(std::make_pair(Index,
+ AttributeSetNode::get(C, AttrVec)));
+ }
+
+ return getImpl(C, AttrPairVec);
+}
+
+AttributeSet AttributeSet::get(LLVMContext &C,
+ ArrayRef<std::pair<unsigned,
+ AttributeSetNode*> > Attrs) {
+ // If there are no attributes then return a null AttributesList pointer.
+ if (Attrs.empty())
+ return AttributeSet();
+
+ return getImpl(C, Attrs);
+}
+
+AttributeSet AttributeSet::get(LLVMContext &C, unsigned Index,
+ const AttrBuilder &B) {
+ if (!B.hasAttributes())
+ return AttributeSet();
+
+ // Add target-independent attributes.
+ SmallVector<std::pair<unsigned, Attribute>, 8> Attrs;
+ for (Attribute::AttrKind Kind = Attribute::None;
+ Kind != Attribute::EndAttrKinds; Kind = Attribute::AttrKind(Kind + 1)) {
+ if (!B.contains(Kind))
+ continue;
+
+ Attribute Attr;
+ switch (Kind) {
+ case Attribute::Alignment:
+ Attr = Attribute::getWithAlignment(C, B.getAlignment());
+ break;
+ case Attribute::StackAlignment:
+ Attr = Attribute::getWithStackAlignment(C, B.getStackAlignment());
+ break;
+ case Attribute::Dereferenceable:
+ Attr = Attribute::getWithDereferenceableBytes(
+ C, B.getDereferenceableBytes());
+ break;
+ case Attribute::DereferenceableOrNull:
+ Attr = Attribute::getWithDereferenceableOrNullBytes(
+ C, B.getDereferenceableOrNullBytes());
+ break;
+ default:
+ Attr = Attribute::get(C, Kind);
+ }
+ Attrs.push_back(std::make_pair(Index, Attr));
+ }
+
+ // Add target-dependent (string) attributes.
+ for (const AttrBuilder::td_type &TDA : B.td_attrs())
+ Attrs.push_back(
+ std::make_pair(Index, Attribute::get(C, TDA.first, TDA.second)));
+
+ return get(C, Attrs);
+}
+
+AttributeSet AttributeSet::get(LLVMContext &C, unsigned Index,
+ ArrayRef<Attribute::AttrKind> Kind) {
+ SmallVector<std::pair<unsigned, Attribute>, 8> Attrs;
+ for (Attribute::AttrKind K : Kind)
+ Attrs.push_back(std::make_pair(Index, Attribute::get(C, K)));
+ return get(C, Attrs);
+}
+
+AttributeSet AttributeSet::get(LLVMContext &C, ArrayRef<AttributeSet> Attrs) {
+ if (Attrs.empty()) return AttributeSet();
+ if (Attrs.size() == 1) return Attrs[0];
+
+ SmallVector<std::pair<unsigned, AttributeSetNode*>, 8> AttrNodeVec;
+ AttributeSetImpl *A0 = Attrs[0].pImpl;
+ if (A0)
+ AttrNodeVec.append(A0->getNode(0), A0->getNode(A0->getNumAttributes()));
+ // Copy all attributes from Attrs into AttrNodeVec while keeping AttrNodeVec
+ // ordered by index. Because we know that each list in Attrs is ordered by
+ // index we only need to merge each successive list in rather than doing a
+ // full sort.
+ for (unsigned I = 1, E = Attrs.size(); I != E; ++I) {
+ AttributeSetImpl *AS = Attrs[I].pImpl;
+ if (!AS) continue;
+ SmallVector<std::pair<unsigned, AttributeSetNode *>, 8>::iterator
+ ANVI = AttrNodeVec.begin(), ANVE;
+ for (const IndexAttrPair *AI = AS->getNode(0),
+ *AE = AS->getNode(AS->getNumAttributes());
+ AI != AE; ++AI) {
+ ANVE = AttrNodeVec.end();
+ while (ANVI != ANVE && ANVI->first <= AI->first)
+ ++ANVI;
+ ANVI = AttrNodeVec.insert(ANVI, *AI) + 1;
+ }
+ }
+
+ return getImpl(C, AttrNodeVec);
+}
+
+AttributeSet AttributeSet::addAttribute(LLVMContext &C, unsigned Index,
+ Attribute::AttrKind Attr) const {
+ if (hasAttribute(Index, Attr)) return *this;
+ return addAttributes(C, Index, AttributeSet::get(C, Index, Attr));
+}
+
+AttributeSet AttributeSet::addAttribute(LLVMContext &C, unsigned Index,
+ StringRef Kind) const {
+ llvm::AttrBuilder B;
+ B.addAttribute(Kind);
+ return addAttributes(C, Index, AttributeSet::get(C, Index, B));
+}
+
+AttributeSet AttributeSet::addAttribute(LLVMContext &C, unsigned Index,
+ StringRef Kind, StringRef Value) const {
+ llvm::AttrBuilder B;
+ B.addAttribute(Kind, Value);
+ return addAttributes(C, Index, AttributeSet::get(C, Index, B));
+}
+
+AttributeSet AttributeSet::addAttribute(LLVMContext &C,
+ ArrayRef<unsigned> Indices,
+ Attribute A) const {
+ unsigned I = 0, E = pImpl ? pImpl->getNumAttributes() : 0;
+ auto IdxI = Indices.begin(), IdxE = Indices.end();
+ SmallVector<AttributeSet, 4> AttrSet;
+
+ while (I != E && IdxI != IdxE) {
+ if (getSlotIndex(I) < *IdxI)
+ AttrSet.emplace_back(getSlotAttributes(I++));
+ else if (getSlotIndex(I) > *IdxI)
+ AttrSet.emplace_back(AttributeSet::get(C, std::make_pair(*IdxI++, A)));
+ else {
+ AttrBuilder B(getSlotAttributes(I), *IdxI);
+ B.addAttribute(A);
+ AttrSet.emplace_back(AttributeSet::get(C, *IdxI, B));
+ ++I;
+ ++IdxI;
+ }
+ }
+
+ while (I != E)
+ AttrSet.emplace_back(getSlotAttributes(I++));
+
+ while (IdxI != IdxE)
+ AttrSet.emplace_back(AttributeSet::get(C, std::make_pair(*IdxI++, A)));
+
+ return get(C, AttrSet);
+}
+
+AttributeSet AttributeSet::addAttributes(LLVMContext &C, unsigned Index,
+ AttributeSet Attrs) const {
+ if (!pImpl) return Attrs;
+ if (!Attrs.pImpl) return *this;
+
+#ifndef NDEBUG
+ // FIXME it is not obvious how this should work for alignment. For now, say
+ // we can't change a known alignment.
+ unsigned OldAlign = getParamAlignment(Index);
+ unsigned NewAlign = Attrs.getParamAlignment(Index);
+ assert((!OldAlign || !NewAlign || OldAlign == NewAlign) &&
+ "Attempt to change alignment!");
+#endif
+
+ // Add the attribute slots before the one we're trying to add.
+ SmallVector<AttributeSet, 4> AttrSet;
+ uint64_t NumAttrs = pImpl->getNumAttributes();
+ AttributeSet AS;
+ uint64_t LastIndex = 0;
+ for (unsigned I = 0, E = NumAttrs; I != E; ++I) {
+ if (getSlotIndex(I) >= Index) {
+ if (getSlotIndex(I) == Index) AS = getSlotAttributes(LastIndex++);
+ break;
+ }
+ LastIndex = I + 1;
+ AttrSet.push_back(getSlotAttributes(I));
+ }
+
+ // Now add the attribute into the correct slot. There may already be an
+ // AttributeSet there.
+ AttrBuilder B(AS, Index);
+
+ for (unsigned I = 0, E = Attrs.pImpl->getNumAttributes(); I != E; ++I)
+ if (Attrs.getSlotIndex(I) == Index) {
+ for (AttributeSetImpl::iterator II = Attrs.pImpl->begin(I),
+ IE = Attrs.pImpl->end(I); II != IE; ++II)
+ B.addAttribute(*II);
+ break;
+ }
+
+ AttrSet.push_back(AttributeSet::get(C, Index, B));
+
+ // Add the remaining attribute slots.
+ for (unsigned I = LastIndex, E = NumAttrs; I < E; ++I)
+ AttrSet.push_back(getSlotAttributes(I));
+
+ return get(C, AttrSet);
+}
+
+AttributeSet AttributeSet::removeAttribute(LLVMContext &C, unsigned Index,
+ Attribute::AttrKind Attr) const {
+ if (!hasAttribute(Index, Attr)) return *this;
+ return removeAttributes(C, Index, AttributeSet::get(C, Index, Attr));
+}
+
+AttributeSet AttributeSet::removeAttributes(LLVMContext &C, unsigned Index,
+ AttributeSet Attrs) const {
+ if (!pImpl) return AttributeSet();
+ if (!Attrs.pImpl) return *this;
+
+ // FIXME it is not obvious how this should work for alignment.
+ // For now, say we can't pass in alignment, which no current use does.
+ assert(!Attrs.hasAttribute(Index, Attribute::Alignment) &&
+ "Attempt to change alignment!");
+
+ // Add the attribute slots before the one we're trying to add.
+ SmallVector<AttributeSet, 4> AttrSet;
+ uint64_t NumAttrs = pImpl->getNumAttributes();
+ AttributeSet AS;
+ uint64_t LastIndex = 0;
+ for (unsigned I = 0, E = NumAttrs; I != E; ++I) {
+ if (getSlotIndex(I) >= Index) {
+ if (getSlotIndex(I) == Index) AS = getSlotAttributes(LastIndex++);
+ break;
+ }
+ LastIndex = I + 1;
+ AttrSet.push_back(getSlotAttributes(I));
+ }
+
+ // Now remove the attribute from the correct slot. There may already be an
+ // AttributeSet there.
+ AttrBuilder B(AS, Index);
+
+ for (unsigned I = 0, E = Attrs.pImpl->getNumAttributes(); I != E; ++I)
+ if (Attrs.getSlotIndex(I) == Index) {
+ B.removeAttributes(Attrs.pImpl->getSlotAttributes(I), Index);
+ break;
+ }
+
+ AttrSet.push_back(AttributeSet::get(C, Index, B));
+
+ // Add the remaining attribute slots.
+ for (unsigned I = LastIndex, E = NumAttrs; I < E; ++I)
+ AttrSet.push_back(getSlotAttributes(I));
+
+ return get(C, AttrSet);
+}
+
+AttributeSet AttributeSet::removeAttributes(LLVMContext &C, unsigned Index,
+ const AttrBuilder &Attrs) const {
+ if (!pImpl) return AttributeSet();
+
+ // FIXME it is not obvious how this should work for alignment.
+ // For now, say we can't pass in alignment, which no current use does.
+ assert(!Attrs.hasAlignmentAttr() && "Attempt to change alignment!");
+
+ // Add the attribute slots before the one we're trying to add.
+ SmallVector<AttributeSet, 4> AttrSet;
+ uint64_t NumAttrs = pImpl->getNumAttributes();
+ AttributeSet AS;
+ uint64_t LastIndex = 0;
+ for (unsigned I = 0, E = NumAttrs; I != E; ++I) {
+ if (getSlotIndex(I) >= Index) {
+ if (getSlotIndex(I) == Index) AS = getSlotAttributes(LastIndex++);
+ break;
+ }
+ LastIndex = I + 1;
+ AttrSet.push_back(getSlotAttributes(I));
+ }
+
+ // Now remove the attribute from the correct slot. There may already be an
+ // AttributeSet there.
+ AttrBuilder B(AS, Index);
+ B.remove(Attrs);
+
+ AttrSet.push_back(AttributeSet::get(C, Index, B));
+
+ // Add the remaining attribute slots.
+ for (unsigned I = LastIndex, E = NumAttrs; I < E; ++I)
+ AttrSet.push_back(getSlotAttributes(I));
+
+ return get(C, AttrSet);
+}
+
+AttributeSet AttributeSet::addDereferenceableAttr(LLVMContext &C, unsigned Index,
+ uint64_t Bytes) const {
+ llvm::AttrBuilder B;
+ B.addDereferenceableAttr(Bytes);
+ return addAttributes(C, Index, AttributeSet::get(C, Index, B));
+}
+
+AttributeSet AttributeSet::addDereferenceableOrNullAttr(LLVMContext &C,
+ unsigned Index,
+ uint64_t Bytes) const {
+ llvm::AttrBuilder B;
+ B.addDereferenceableOrNullAttr(Bytes);
+ return addAttributes(C, Index, AttributeSet::get(C, Index, B));
+}
+
+//===----------------------------------------------------------------------===//
+// AttributeSet Accessor Methods
+//===----------------------------------------------------------------------===//
+
+LLVMContext &AttributeSet::getContext() const {
+ return pImpl->getContext();
+}
+
+AttributeSet AttributeSet::getParamAttributes(unsigned Index) const {
+ return pImpl && hasAttributes(Index) ?
+ AttributeSet::get(pImpl->getContext(),
+ ArrayRef<std::pair<unsigned, AttributeSetNode*> >(
+ std::make_pair(Index, getAttributes(Index)))) :
+ AttributeSet();
+}
+
+AttributeSet AttributeSet::getRetAttributes() const {
+ return pImpl && hasAttributes(ReturnIndex) ?
+ AttributeSet::get(pImpl->getContext(),
+ ArrayRef<std::pair<unsigned, AttributeSetNode*> >(
+ std::make_pair(ReturnIndex,
+ getAttributes(ReturnIndex)))) :
+ AttributeSet();
+}
+
+AttributeSet AttributeSet::getFnAttributes() const {
+ return pImpl && hasAttributes(FunctionIndex) ?
+ AttributeSet::get(pImpl->getContext(),
+ ArrayRef<std::pair<unsigned, AttributeSetNode*> >(
+ std::make_pair(FunctionIndex,
+ getAttributes(FunctionIndex)))) :
+ AttributeSet();
+}
+
+bool AttributeSet::hasAttribute(unsigned Index, Attribute::AttrKind Kind) const{
+ AttributeSetNode *ASN = getAttributes(Index);
+ return ASN && ASN->hasAttribute(Kind);
+}
+
+bool AttributeSet::hasAttribute(unsigned Index, StringRef Kind) const {
+ AttributeSetNode *ASN = getAttributes(Index);
+ return ASN && ASN->hasAttribute(Kind);
+}
+
+bool AttributeSet::hasAttributes(unsigned Index) const {
+ AttributeSetNode *ASN = getAttributes(Index);
+ return ASN && ASN->hasAttributes();
+}
+
+/// \brief Return true if the specified attribute is set for at least one
+/// parameter or for the return value.
+bool AttributeSet::hasAttrSomewhere(Attribute::AttrKind Attr) const {
+ if (!pImpl) return false;
+
+ for (unsigned I = 0, E = pImpl->getNumAttributes(); I != E; ++I)
+ for (AttributeSetImpl::iterator II = pImpl->begin(I),
+ IE = pImpl->end(I); II != IE; ++II)
+ if (II->hasAttribute(Attr))
+ return true;
+
+ return false;
+}
+
+Attribute AttributeSet::getAttribute(unsigned Index,
+ Attribute::AttrKind Kind) const {
+ AttributeSetNode *ASN = getAttributes(Index);
+ return ASN ? ASN->getAttribute(Kind) : Attribute();
+}
+
+Attribute AttributeSet::getAttribute(unsigned Index,
+ StringRef Kind) const {
+ AttributeSetNode *ASN = getAttributes(Index);
+ return ASN ? ASN->getAttribute(Kind) : Attribute();
+}
+
+unsigned AttributeSet::getParamAlignment(unsigned Index) const {
+ AttributeSetNode *ASN = getAttributes(Index);
+ return ASN ? ASN->getAlignment() : 0;
+}
+
+unsigned AttributeSet::getStackAlignment(unsigned Index) const {
+ AttributeSetNode *ASN = getAttributes(Index);
+ return ASN ? ASN->getStackAlignment() : 0;
+}
+
+uint64_t AttributeSet::getDereferenceableBytes(unsigned Index) const {
+ AttributeSetNode *ASN = getAttributes(Index);
+ return ASN ? ASN->getDereferenceableBytes() : 0;
+}
+
+uint64_t AttributeSet::getDereferenceableOrNullBytes(unsigned Index) const {
+ AttributeSetNode *ASN = getAttributes(Index);
+ return ASN ? ASN->getDereferenceableOrNullBytes() : 0;
+}
+
+std::string AttributeSet::getAsString(unsigned Index,
+ bool InAttrGrp) const {
+ AttributeSetNode *ASN = getAttributes(Index);
+ return ASN ? ASN->getAsString(InAttrGrp) : std::string("");
+}
+
+/// \brief The attributes for the specified index are returned.
+AttributeSetNode *AttributeSet::getAttributes(unsigned Index) const {
+ if (!pImpl) return nullptr;
+
+ // Loop through to find the attribute node we want.
+ for (unsigned I = 0, E = pImpl->getNumAttributes(); I != E; ++I)
+ if (pImpl->getSlotIndex(I) == Index)
+ return pImpl->getSlotNode(I);
+
+ return nullptr;
+}
+
+AttributeSet::iterator AttributeSet::begin(unsigned Slot) const {
+ if (!pImpl)
+ return ArrayRef<Attribute>().begin();
+ return pImpl->begin(Slot);
+}
+
+AttributeSet::iterator AttributeSet::end(unsigned Slot) const {
+ if (!pImpl)
+ return ArrayRef<Attribute>().end();
+ return pImpl->end(Slot);
+}
+
+//===----------------------------------------------------------------------===//
+// AttributeSet Introspection Methods
+//===----------------------------------------------------------------------===//
+
+/// \brief Return the number of slots used in this attribute list. This is the
+/// number of arguments that have an attribute set on them (including the
+/// function itself).
+unsigned AttributeSet::getNumSlots() const {
+ return pImpl ? pImpl->getNumAttributes() : 0;
+}
+
+unsigned AttributeSet::getSlotIndex(unsigned Slot) const {
+ assert(pImpl && Slot < pImpl->getNumAttributes() &&
+ "Slot # out of range!");
+ return pImpl->getSlotIndex(Slot);
+}
+
+AttributeSet AttributeSet::getSlotAttributes(unsigned Slot) const {
+ assert(pImpl && Slot < pImpl->getNumAttributes() &&
+ "Slot # out of range!");
+ return pImpl->getSlotAttributes(Slot);
+}
+
+uint64_t AttributeSet::Raw(unsigned Index) const {
+ // FIXME: Remove this.
+ return pImpl ? pImpl->Raw(Index) : 0;
+}
+
+void AttributeSet::dump() const {
+ dbgs() << "PAL[\n";
+
+ for (unsigned i = 0, e = getNumSlots(); i < e; ++i) {
+ uint64_t Index = getSlotIndex(i);
+ dbgs() << " { ";
+ if (Index == ~0U)
+ dbgs() << "~0U";
+ else
+ dbgs() << Index;
+ dbgs() << " => " << getAsString(Index) << " }\n";
+ }
+
+ dbgs() << "]\n";
+}
+
+//===----------------------------------------------------------------------===//
+// AttrBuilder Method Implementations
+//===----------------------------------------------------------------------===//
+
+AttrBuilder::AttrBuilder(AttributeSet AS, unsigned Index)
+ : Attrs(0), Alignment(0), StackAlignment(0), DerefBytes(0),
+ DerefOrNullBytes(0) {
+ AttributeSetImpl *pImpl = AS.pImpl;
+ if (!pImpl) return;
+
+ for (unsigned I = 0, E = pImpl->getNumAttributes(); I != E; ++I) {
+ if (pImpl->getSlotIndex(I) != Index) continue;
+
+ for (AttributeSetImpl::iterator II = pImpl->begin(I),
+ IE = pImpl->end(I); II != IE; ++II)
+ addAttribute(*II);
+
+ break;
+ }
+}
+
+void AttrBuilder::clear() {
+ Attrs.reset();
+ TargetDepAttrs.clear();
+ Alignment = StackAlignment = DerefBytes = DerefOrNullBytes = 0;
+}
+
+AttrBuilder &AttrBuilder::addAttribute(Attribute::AttrKind Val) {
+ assert((unsigned)Val < Attribute::EndAttrKinds && "Attribute out of range!");
+ assert(Val != Attribute::Alignment && Val != Attribute::StackAlignment &&
+ Val != Attribute::Dereferenceable &&
+ "Adding integer attribute without adding a value!");
+ Attrs[Val] = true;
+ return *this;
+}
+
+AttrBuilder &AttrBuilder::addAttribute(Attribute Attr) {
+ if (Attr.isStringAttribute()) {
+ addAttribute(Attr.getKindAsString(), Attr.getValueAsString());
+ return *this;
+ }
+
+ Attribute::AttrKind Kind = Attr.getKindAsEnum();
+ Attrs[Kind] = true;
+
+ if (Kind == Attribute::Alignment)
+ Alignment = Attr.getAlignment();
+ else if (Kind == Attribute::StackAlignment)
+ StackAlignment = Attr.getStackAlignment();
+ else if (Kind == Attribute::Dereferenceable)
+ DerefBytes = Attr.getDereferenceableBytes();
+ else if (Kind == Attribute::DereferenceableOrNull)
+ DerefOrNullBytes = Attr.getDereferenceableOrNullBytes();
+ return *this;
+}
+
+AttrBuilder &AttrBuilder::addAttribute(StringRef A, StringRef V) {
+ TargetDepAttrs[A] = V;
+ return *this;
+}
+
+AttrBuilder &AttrBuilder::removeAttribute(Attribute::AttrKind Val) {
+ assert((unsigned)Val < Attribute::EndAttrKinds && "Attribute out of range!");
+ Attrs[Val] = false;
+
+ if (Val == Attribute::Alignment)
+ Alignment = 0;
+ else if (Val == Attribute::StackAlignment)
+ StackAlignment = 0;
+ else if (Val == Attribute::Dereferenceable)
+ DerefBytes = 0;
+ else if (Val == Attribute::DereferenceableOrNull)
+ DerefOrNullBytes = 0;
+
+ return *this;
+}
+
+AttrBuilder &AttrBuilder::removeAttributes(AttributeSet A, uint64_t Index) {
+ unsigned Slot = ~0U;
+ for (unsigned I = 0, E = A.getNumSlots(); I != E; ++I)
+ if (A.getSlotIndex(I) == Index) {
+ Slot = I;
+ break;
+ }
+
+ assert(Slot != ~0U && "Couldn't find index in AttributeSet!");
+
+ for (AttributeSet::iterator I = A.begin(Slot), E = A.end(Slot); I != E; ++I) {
+ Attribute Attr = *I;
+ if (Attr.isEnumAttribute() || Attr.isIntAttribute()) {
+ removeAttribute(Attr.getKindAsEnum());
+ } else {
+ assert(Attr.isStringAttribute() && "Invalid attribute type!");
+ removeAttribute(Attr.getKindAsString());
+ }
+ }
+
+ return *this;
+}
+
+AttrBuilder &AttrBuilder::removeAttribute(StringRef A) {
+ std::map<std::string, std::string>::iterator I = TargetDepAttrs.find(A);
+ if (I != TargetDepAttrs.end())
+ TargetDepAttrs.erase(I);
+ return *this;
+}
+
+AttrBuilder &AttrBuilder::addAlignmentAttr(unsigned Align) {
+ if (Align == 0) return *this;
+
+ assert(isPowerOf2_32(Align) && "Alignment must be a power of two.");
+ assert(Align <= 0x40000000 && "Alignment too large.");
+
+ Attrs[Attribute::Alignment] = true;
+ Alignment = Align;
+ return *this;
+}
+
+AttrBuilder &AttrBuilder::addStackAlignmentAttr(unsigned Align) {
+ // Default alignment, allow the target to define how to align it.
+ if (Align == 0) return *this;
+
+ assert(isPowerOf2_32(Align) && "Alignment must be a power of two.");
+ assert(Align <= 0x100 && "Alignment too large.");
+
+ Attrs[Attribute::StackAlignment] = true;
+ StackAlignment = Align;
+ return *this;
+}
+
+AttrBuilder &AttrBuilder::addDereferenceableAttr(uint64_t Bytes) {
+ if (Bytes == 0) return *this;
+
+ Attrs[Attribute::Dereferenceable] = true;
+ DerefBytes = Bytes;
+ return *this;
+}
+
+AttrBuilder &AttrBuilder::addDereferenceableOrNullAttr(uint64_t Bytes) {
+ if (Bytes == 0)
+ return *this;
+
+ Attrs[Attribute::DereferenceableOrNull] = true;
+ DerefOrNullBytes = Bytes;
+ return *this;
+}
+
+AttrBuilder &AttrBuilder::merge(const AttrBuilder &B) {
+ // FIXME: What if both have alignments, but they don't match?!
+ if (!Alignment)
+ Alignment = B.Alignment;
+
+ if (!StackAlignment)
+ StackAlignment = B.StackAlignment;
+
+ if (!DerefBytes)
+ DerefBytes = B.DerefBytes;
+
+ if (!DerefOrNullBytes)
+ DerefOrNullBytes = B.DerefOrNullBytes;
+
+ Attrs |= B.Attrs;
+
+ for (auto I : B.td_attrs())
+ TargetDepAttrs[I.first] = I.second;
+
+ return *this;
+}
+
+AttrBuilder &AttrBuilder::remove(const AttrBuilder &B) {
+ // FIXME: What if both have alignments, but they don't match?!
+ if (B.Alignment)
+ Alignment = 0;
+
+ if (B.StackAlignment)
+ StackAlignment = 0;
+
+ if (B.DerefBytes)
+ DerefBytes = 0;
+
+ if (B.DerefOrNullBytes)
+ DerefOrNullBytes = 0;
+
+ Attrs &= ~B.Attrs;
+
+ for (auto I : B.td_attrs())
+ TargetDepAttrs.erase(I.first);
+
+ return *this;
+}
+
+bool AttrBuilder::overlaps(const AttrBuilder &B) const {
+ // First check if any of the target independent attributes overlap.
+ if ((Attrs & B.Attrs).any())
+ return true;
+
+ // Then check if any target dependent ones do.
+ for (auto I : td_attrs())
+ if (B.contains(I.first))
+ return true;
+
+ return false;
+}
+
+bool AttrBuilder::contains(StringRef A) const {
+ return TargetDepAttrs.find(A) != TargetDepAttrs.end();
+}
+
+bool AttrBuilder::hasAttributes() const {
+ return !Attrs.none() || !TargetDepAttrs.empty();
+}
+
+bool AttrBuilder::hasAttributes(AttributeSet A, uint64_t Index) const {
+ unsigned Slot = ~0U;
+ for (unsigned I = 0, E = A.getNumSlots(); I != E; ++I)
+ if (A.getSlotIndex(I) == Index) {
+ Slot = I;
+ break;
+ }
+
+ assert(Slot != ~0U && "Couldn't find the index!");
+
+ for (AttributeSet::iterator I = A.begin(Slot), E = A.end(Slot); I != E; ++I) {
+ Attribute Attr = *I;
+ if (Attr.isEnumAttribute() || Attr.isIntAttribute()) {
+ if (Attrs[I->getKindAsEnum()])
+ return true;
+ } else {
+ assert(Attr.isStringAttribute() && "Invalid attribute kind!");
+ return TargetDepAttrs.find(Attr.getKindAsString())!=TargetDepAttrs.end();
+ }
+ }
+
+ return false;
+}
+
+bool AttrBuilder::hasAlignmentAttr() const {
+ return Alignment != 0;
+}
+
+bool AttrBuilder::operator==(const AttrBuilder &B) {
+ if (Attrs != B.Attrs)
+ return false;
+
+ for (td_const_iterator I = TargetDepAttrs.begin(),
+ E = TargetDepAttrs.end(); I != E; ++I)
+ if (B.TargetDepAttrs.find(I->first) == B.TargetDepAttrs.end())
+ return false;
+
+ return Alignment == B.Alignment && StackAlignment == B.StackAlignment &&
+ DerefBytes == B.DerefBytes;
+}
+
+AttrBuilder &AttrBuilder::addRawValue(uint64_t Val) {
+ // FIXME: Remove this in 4.0.
+ if (!Val) return *this;
+
+ for (Attribute::AttrKind I = Attribute::None; I != Attribute::EndAttrKinds;
+ I = Attribute::AttrKind(I + 1)) {
+ if (I == Attribute::Dereferenceable ||
+ I == Attribute::DereferenceableOrNull ||
+ I == Attribute::ArgMemOnly)
+ continue;
+ if (uint64_t A = (Val & AttributeImpl::getAttrMask(I))) {
+ Attrs[I] = true;
+
+ if (I == Attribute::Alignment)
+ Alignment = 1ULL << ((A >> 16) - 1);
+ else if (I == Attribute::StackAlignment)
+ StackAlignment = 1ULL << ((A >> 26)-1);
+ }
+ }
+
+ return *this;
+}
+
+//===----------------------------------------------------------------------===//
+// AttributeFuncs Function Defintions
+//===----------------------------------------------------------------------===//
+
+/// \brief Which attributes cannot be applied to a type.
+AttrBuilder AttributeFuncs::typeIncompatible(Type *Ty) {
+ AttrBuilder Incompatible;
+
+ if (!Ty->isIntegerTy())
+ // Attribute that only apply to integers.
+ Incompatible.addAttribute(Attribute::SExt)
+ .addAttribute(Attribute::ZExt);
+
+ if (!Ty->isPointerTy())
+ // Attribute that only apply to pointers.
+ Incompatible.addAttribute(Attribute::ByVal)
+ .addAttribute(Attribute::Nest)
+ .addAttribute(Attribute::NoAlias)
+ .addAttribute(Attribute::NoCapture)
+ .addAttribute(Attribute::NonNull)
+ .addDereferenceableAttr(1) // the int here is ignored
+ .addDereferenceableOrNullAttr(1) // the int here is ignored
+ .addAttribute(Attribute::ReadNone)
+ .addAttribute(Attribute::ReadOnly)
+ .addAttribute(Attribute::StructRet)
+ .addAttribute(Attribute::InAlloca);
+
+ return Incompatible;
+}
+
+template<typename AttrClass>
+static bool isEqual(const Function &Caller, const Function &Callee) {
+ return Caller.getFnAttribute(AttrClass::getKind()) ==
+ Callee.getFnAttribute(AttrClass::getKind());
+}
+
+/// \brief Compute the logical AND of the attributes of the caller and the
+/// callee.
+///
+/// This function sets the caller's attribute to false if the callee's attribute
+/// is false.
+template<typename AttrClass>
+static void setAND(Function &Caller, const Function &Callee) {
+ if (AttrClass::isSet(Caller, AttrClass::getKind()) &&
+ !AttrClass::isSet(Callee, AttrClass::getKind()))
+ AttrClass::set(Caller, AttrClass::getKind(), false);
+}
+
+/// \brief Compute the logical OR of the attributes of the caller and the
+/// callee.
+///
+/// This function sets the caller's attribute to true if the callee's attribute
+/// is true.
+template<typename AttrClass>
+static void setOR(Function &Caller, const Function &Callee) {
+ if (!AttrClass::isSet(Caller, AttrClass::getKind()) &&
+ AttrClass::isSet(Callee, AttrClass::getKind()))
+ AttrClass::set(Caller, AttrClass::getKind(), true);
+}
+
+/// \brief If the inlined function had a higher stack protection level than the
+/// calling function, then bump up the caller's stack protection level.
+static void adjustCallerSSPLevel(Function &Caller, const Function &Callee) {
+ // If upgrading the SSP attribute, clear out the old SSP Attributes first.
+ // Having multiple SSP attributes doesn't actually hurt, but it adds useless
+ // clutter to the IR.
+ AttrBuilder B;
+ B.addAttribute(Attribute::StackProtect)
+ .addAttribute(Attribute::StackProtectStrong)
+ .addAttribute(Attribute::StackProtectReq);
+ AttributeSet OldSSPAttr = AttributeSet::get(Caller.getContext(),
+ AttributeSet::FunctionIndex,
+ B);
+
+ if (Callee.hasFnAttribute(Attribute::SafeStack)) {
+ Caller.removeAttributes(AttributeSet::FunctionIndex, OldSSPAttr);
+ Caller.addFnAttr(Attribute::SafeStack);
+ } else if (Callee.hasFnAttribute(Attribute::StackProtectReq) &&
+ !Caller.hasFnAttribute(Attribute::SafeStack)) {
+ Caller.removeAttributes(AttributeSet::FunctionIndex, OldSSPAttr);
+ Caller.addFnAttr(Attribute::StackProtectReq);
+ } else if (Callee.hasFnAttribute(Attribute::StackProtectStrong) &&
+ !Caller.hasFnAttribute(Attribute::SafeStack) &&
+ !Caller.hasFnAttribute(Attribute::StackProtectReq)) {
+ Caller.removeAttributes(AttributeSet::FunctionIndex, OldSSPAttr);
+ Caller.addFnAttr(Attribute::StackProtectStrong);
+ } else if (Callee.hasFnAttribute(Attribute::StackProtect) &&
+ !Caller.hasFnAttribute(Attribute::SafeStack) &&
+ !Caller.hasFnAttribute(Attribute::StackProtectReq) &&
+ !Caller.hasFnAttribute(Attribute::StackProtectStrong))
+ Caller.addFnAttr(Attribute::StackProtect);
+}
+
+#define GET_ATTR_COMPAT_FUNC
+#include "AttributesCompatFunc.inc"
+
+bool AttributeFuncs::areInlineCompatible(const Function &Caller,
+ const Function &Callee) {
+ return hasCompatibleFnAttrs(Caller, Callee);
+}
+
+
+void AttributeFuncs::mergeAttributesForInlining(Function &Caller,
+ const Function &Callee) {
+ mergeFnAttrs(Caller, Callee);
+}
diff --git a/contrib/llvm/lib/IR/AttributesCompatFunc.td b/contrib/llvm/lib/IR/AttributesCompatFunc.td
new file mode 100644
index 0000000..7c85b3d
--- /dev/null
+++ b/contrib/llvm/lib/IR/AttributesCompatFunc.td
@@ -0,0 +1 @@
+include "llvm/IR/Attributes.td"
diff --git a/contrib/llvm/lib/IR/AutoUpgrade.cpp b/contrib/llvm/lib/IR/AutoUpgrade.cpp
new file mode 100644
index 0000000..12c354c
--- /dev/null
+++ b/contrib/llvm/lib/IR/AutoUpgrade.cpp
@@ -0,0 +1,904 @@
+//===-- AutoUpgrade.cpp - Implement auto-upgrade helper functions ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the auto-upgrade helper functions.
+// This is where deprecated IR intrinsics and other IR features are updated to
+// current specifications.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/AutoUpgrade.h"
+#include "llvm/IR/CFG.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DIBuilder.h"
+#include "llvm/IR/DebugInfo.h"
+#include "llvm/IR/DiagnosticInfo.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Regex.h"
+#include <cstring>
+using namespace llvm;
+
+// Upgrade the declarations of the SSE4.1 functions whose arguments have
+// changed their type from v4f32 to v2i64.
+static bool UpgradeSSE41Function(Function* F, Intrinsic::ID IID,
+ Function *&NewFn) {
+ // Check whether this is an old version of the function, which received
+ // v4f32 arguments.
+ Type *Arg0Type = F->getFunctionType()->getParamType(0);
+ if (Arg0Type != VectorType::get(Type::getFloatTy(F->getContext()), 4))
+ return false;
+
+ // Yes, it's old, replace it with new version.
+ F->setName(F->getName() + ".old");
+ NewFn = Intrinsic::getDeclaration(F->getParent(), IID);
+ return true;
+}
+
+// Upgrade the declarations of intrinsic functions whose 8-bit immediate mask
+// arguments have changed their type from i32 to i8.
+static bool UpgradeX86IntrinsicsWith8BitMask(Function *F, Intrinsic::ID IID,
+ Function *&NewFn) {
+ // Check that the last argument is an i32.
+ Type *LastArgType = F->getFunctionType()->getParamType(
+ F->getFunctionType()->getNumParams() - 1);
+ if (!LastArgType->isIntegerTy(32))
+ return false;
+
+ // Move this function aside and map down.
+ F->setName(F->getName() + ".old");
+ NewFn = Intrinsic::getDeclaration(F->getParent(), IID);
+ return true;
+}
+
+static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
+ assert(F && "Illegal to upgrade a non-existent Function.");
+
+ // Quickly eliminate it, if it's not a candidate.
+ StringRef Name = F->getName();
+ if (Name.size() <= 8 || !Name.startswith("llvm."))
+ return false;
+ Name = Name.substr(5); // Strip off "llvm."
+
+ switch (Name[0]) {
+ default: break;
+ case 'a': {
+ if (Name.startswith("arm.neon.vclz")) {
+ Type* args[2] = {
+ F->arg_begin()->getType(),
+ Type::getInt1Ty(F->getContext())
+ };
+ // Can't use Intrinsic::getDeclaration here as it adds a ".i1" to
+ // the end of the name. Change name from llvm.arm.neon.vclz.* to
+ // llvm.ctlz.*
+ FunctionType* fType = FunctionType::get(F->getReturnType(), args, false);
+ NewFn = Function::Create(fType, F->getLinkage(),
+ "llvm.ctlz." + Name.substr(14), F->getParent());
+ return true;
+ }
+ if (Name.startswith("arm.neon.vcnt")) {
+ NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctpop,
+ F->arg_begin()->getType());
+ return true;
+ }
+ Regex vldRegex("^arm\\.neon\\.vld([1234]|[234]lane)\\.v[a-z0-9]*$");
+ if (vldRegex.match(Name)) {
+ auto fArgs = F->getFunctionType()->params();
+ SmallVector<Type *, 4> Tys(fArgs.begin(), fArgs.end());
+ // Can't use Intrinsic::getDeclaration here as the return types might
+ // then only be structurally equal.
+ FunctionType* fType = FunctionType::get(F->getReturnType(), Tys, false);
+ NewFn = Function::Create(fType, F->getLinkage(),
+ "llvm." + Name + ".p0i8", F->getParent());
+ return true;
+ }
+ Regex vstRegex("^arm\\.neon\\.vst([1234]|[234]lane)\\.v[a-z0-9]*$");
+ if (vstRegex.match(Name)) {
+ static const Intrinsic::ID StoreInts[] = {Intrinsic::arm_neon_vst1,
+ Intrinsic::arm_neon_vst2,
+ Intrinsic::arm_neon_vst3,
+ Intrinsic::arm_neon_vst4};
+
+ static const Intrinsic::ID StoreLaneInts[] = {
+ Intrinsic::arm_neon_vst2lane, Intrinsic::arm_neon_vst3lane,
+ Intrinsic::arm_neon_vst4lane
+ };
+
+ auto fArgs = F->getFunctionType()->params();
+ Type *Tys[] = {fArgs[0], fArgs[1]};
+ if (Name.find("lane") == StringRef::npos)
+ NewFn = Intrinsic::getDeclaration(F->getParent(),
+ StoreInts[fArgs.size() - 3], Tys);
+ else
+ NewFn = Intrinsic::getDeclaration(F->getParent(),
+ StoreLaneInts[fArgs.size() - 5], Tys);
+ return true;
+ }
+ break;
+ }
+
+ case 'c': {
+ if (Name.startswith("ctlz.") && F->arg_size() == 1) {
+ F->setName(Name + ".old");
+ NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctlz,
+ F->arg_begin()->getType());
+ return true;
+ }
+ if (Name.startswith("cttz.") && F->arg_size() == 1) {
+ F->setName(Name + ".old");
+ NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::cttz,
+ F->arg_begin()->getType());
+ return true;
+ }
+ break;
+ }
+
+ case 'o':
+ // We only need to change the name to match the mangling including the
+ // address space.
+ if (F->arg_size() == 2 && Name.startswith("objectsize.")) {
+ Type *Tys[2] = { F->getReturnType(), F->arg_begin()->getType() };
+ if (F->getName() != Intrinsic::getName(Intrinsic::objectsize, Tys)) {
+ F->setName(Name + ".old");
+ NewFn = Intrinsic::getDeclaration(F->getParent(),
+ Intrinsic::objectsize, Tys);
+ return true;
+ }
+ }
+ break;
+
+ case 'x': {
+ if (Name.startswith("x86.sse2.pcmpeq.") ||
+ Name.startswith("x86.sse2.pcmpgt.") ||
+ Name.startswith("x86.avx2.pcmpeq.") ||
+ Name.startswith("x86.avx2.pcmpgt.") ||
+ Name.startswith("x86.avx2.vbroadcast") ||
+ Name.startswith("x86.avx2.pbroadcast") ||
+ Name.startswith("x86.avx.vpermil.") ||
+ Name.startswith("x86.sse41.pmovsx") ||
+ Name == "x86.avx.vinsertf128.pd.256" ||
+ Name == "x86.avx.vinsertf128.ps.256" ||
+ Name == "x86.avx.vinsertf128.si.256" ||
+ Name == "x86.avx2.vinserti128" ||
+ Name == "x86.avx.vextractf128.pd.256" ||
+ Name == "x86.avx.vextractf128.ps.256" ||
+ Name == "x86.avx.vextractf128.si.256" ||
+ Name == "x86.avx2.vextracti128" ||
+ Name == "x86.avx.movnt.dq.256" ||
+ Name == "x86.avx.movnt.pd.256" ||
+ Name == "x86.avx.movnt.ps.256" ||
+ Name == "x86.sse42.crc32.64.8" ||
+ Name == "x86.avx.vbroadcast.ss" ||
+ Name == "x86.avx.vbroadcast.ss.256" ||
+ Name == "x86.avx.vbroadcast.sd.256" ||
+ Name == "x86.sse2.psll.dq" ||
+ Name == "x86.sse2.psrl.dq" ||
+ Name == "x86.avx2.psll.dq" ||
+ Name == "x86.avx2.psrl.dq" ||
+ Name == "x86.sse2.psll.dq.bs" ||
+ Name == "x86.sse2.psrl.dq.bs" ||
+ Name == "x86.avx2.psll.dq.bs" ||
+ Name == "x86.avx2.psrl.dq.bs" ||
+ Name == "x86.sse41.pblendw" ||
+ Name == "x86.sse41.blendpd" ||
+ Name == "x86.sse41.blendps" ||
+ Name == "x86.avx.blend.pd.256" ||
+ Name == "x86.avx.blend.ps.256" ||
+ Name == "x86.avx2.pblendw" ||
+ Name == "x86.avx2.pblendd.128" ||
+ Name == "x86.avx2.pblendd.256" ||
+ Name == "x86.avx2.vbroadcasti128" ||
+ Name == "x86.xop.vpcmov" ||
+ (Name.startswith("x86.xop.vpcom") && F->arg_size() == 2)) {
+ NewFn = nullptr;
+ return true;
+ }
+ // SSE4.1 ptest functions may have an old signature.
+ if (Name.startswith("x86.sse41.ptest")) {
+ if (Name == "x86.sse41.ptestc")
+ return UpgradeSSE41Function(F, Intrinsic::x86_sse41_ptestc, NewFn);
+ if (Name == "x86.sse41.ptestz")
+ return UpgradeSSE41Function(F, Intrinsic::x86_sse41_ptestz, NewFn);
+ if (Name == "x86.sse41.ptestnzc")
+ return UpgradeSSE41Function(F, Intrinsic::x86_sse41_ptestnzc, NewFn);
+ }
+ // Several blend and other instructions with masks used the wrong number of
+ // bits.
+ if (Name == "x86.sse41.insertps")
+ return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_insertps,
+ NewFn);
+ if (Name == "x86.sse41.dppd")
+ return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_dppd,
+ NewFn);
+ if (Name == "x86.sse41.dpps")
+ return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_dpps,
+ NewFn);
+ if (Name == "x86.sse41.mpsadbw")
+ return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_mpsadbw,
+ NewFn);
+ if (Name == "x86.avx.dp.ps.256")
+ return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_avx_dp_ps_256,
+ NewFn);
+ if (Name == "x86.avx2.mpsadbw")
+ return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_avx2_mpsadbw,
+ NewFn);
+
+ // frcz.ss/sd may need to have an argument dropped
+ if (Name.startswith("x86.xop.vfrcz.ss") && F->arg_size() == 2) {
+ F->setName(Name + ".old");
+ NewFn = Intrinsic::getDeclaration(F->getParent(),
+ Intrinsic::x86_xop_vfrcz_ss);
+ return true;
+ }
+ if (Name.startswith("x86.xop.vfrcz.sd") && F->arg_size() == 2) {
+ F->setName(Name + ".old");
+ NewFn = Intrinsic::getDeclaration(F->getParent(),
+ Intrinsic::x86_xop_vfrcz_sd);
+ return true;
+ }
+ // Fix the FMA4 intrinsics to remove the 4
+ if (Name.startswith("x86.fma4.")) {
+ F->setName("llvm.x86.fma" + Name.substr(8));
+ NewFn = F;
+ return true;
+ }
+ break;
+ }
+ }
+
+ // This may not belong here. This function is effectively being overloaded
+ // to both detect an intrinsic which needs upgrading, and to provide the
+ // upgraded form of the intrinsic. We should perhaps have two separate
+ // functions for this.
+ return false;
+}
+
+bool llvm::UpgradeIntrinsicFunction(Function *F, Function *&NewFn) {
+ NewFn = nullptr;
+ bool Upgraded = UpgradeIntrinsicFunction1(F, NewFn);
+ assert(F != NewFn && "Intrinsic function upgraded to the same function");
+
+ // Upgrade intrinsic attributes. This does not change the function.
+ if (NewFn)
+ F = NewFn;
+ if (Intrinsic::ID id = F->getIntrinsicID())
+ F->setAttributes(Intrinsic::getAttributes(F->getContext(), id));
+ return Upgraded;
+}
+
+bool llvm::UpgradeGlobalVariable(GlobalVariable *GV) {
+ // Nothing to do yet.
+ return false;
+}
+
+// Handles upgrading SSE2 and AVX2 PSLLDQ intrinsics by converting them
+// to byte shuffles.
+static Value *UpgradeX86PSLLDQIntrinsics(IRBuilder<> &Builder, LLVMContext &C,
+ Value *Op, unsigned NumLanes,
+ unsigned Shift) {
+ // Each lane is 16 bytes.
+ unsigned NumElts = NumLanes * 16;
+
+ // Bitcast from a 64-bit element type to a byte element type.
+ Op = Builder.CreateBitCast(Op,
+ VectorType::get(Type::getInt8Ty(C), NumElts),
+ "cast");
+ // We'll be shuffling in zeroes.
+ Value *Res = ConstantVector::getSplat(NumElts, Builder.getInt8(0));
+
+ // If shift is less than 16, emit a shuffle to move the bytes. Otherwise,
+ // we'll just return the zero vector.
+ if (Shift < 16) {
+ SmallVector<Constant*, 32> Idxs;
+ // 256-bit version is split into two 16-byte lanes.
+ for (unsigned l = 0; l != NumElts; l += 16)
+ for (unsigned i = 0; i != 16; ++i) {
+ unsigned Idx = NumElts + i - Shift;
+ if (Idx < NumElts)
+ Idx -= NumElts - 16; // end of lane, switch operand.
+ Idxs.push_back(Builder.getInt32(Idx + l));
+ }
+
+ Res = Builder.CreateShuffleVector(Res, Op, ConstantVector::get(Idxs));
+ }
+
+ // Bitcast back to a 64-bit element type.
+ return Builder.CreateBitCast(Res,
+ VectorType::get(Type::getInt64Ty(C), 2*NumLanes),
+ "cast");
+}
+
+// Handles upgrading SSE2 and AVX2 PSRLDQ intrinsics by converting them
+// to byte shuffles.
+static Value *UpgradeX86PSRLDQIntrinsics(IRBuilder<> &Builder, LLVMContext &C,
+ Value *Op, unsigned NumLanes,
+ unsigned Shift) {
+ // Each lane is 16 bytes.
+ unsigned NumElts = NumLanes * 16;
+
+ // Bitcast from a 64-bit element type to a byte element type.
+ Op = Builder.CreateBitCast(Op,
+ VectorType::get(Type::getInt8Ty(C), NumElts),
+ "cast");
+ // We'll be shuffling in zeroes.
+ Value *Res = ConstantVector::getSplat(NumElts, Builder.getInt8(0));
+
+ // If shift is less than 16, emit a shuffle to move the bytes. Otherwise,
+ // we'll just return the zero vector.
+ if (Shift < 16) {
+ SmallVector<Constant*, 32> Idxs;
+ // 256-bit version is split into two 16-byte lanes.
+ for (unsigned l = 0; l != NumElts; l += 16)
+ for (unsigned i = 0; i != 16; ++i) {
+ unsigned Idx = i + Shift;
+ if (Idx >= 16)
+ Idx += NumElts - 16; // end of lane, switch operand.
+ Idxs.push_back(Builder.getInt32(Idx + l));
+ }
+
+ Res = Builder.CreateShuffleVector(Op, Res, ConstantVector::get(Idxs));
+ }
+
+ // Bitcast back to a 64-bit element type.
+ return Builder.CreateBitCast(Res,
+ VectorType::get(Type::getInt64Ty(C), 2*NumLanes),
+ "cast");
+}
+
+// UpgradeIntrinsicCall - Upgrade a call to an old intrinsic to be a call the
+// upgraded intrinsic. All argument and return casting must be provided in
+// order to seamlessly integrate with existing context.
+void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
+ Function *F = CI->getCalledFunction();
+ LLVMContext &C = CI->getContext();
+ IRBuilder<> Builder(C);
+ Builder.SetInsertPoint(CI->getParent(), CI->getIterator());
+
+ assert(F && "Intrinsic call is not direct?");
+
+ if (!NewFn) {
+ // Get the Function's name.
+ StringRef Name = F->getName();
+
+ Value *Rep;
+ // Upgrade packed integer vector compares intrinsics to compare instructions
+ if (Name.startswith("llvm.x86.sse2.pcmpeq.") ||
+ Name.startswith("llvm.x86.avx2.pcmpeq.")) {
+ Rep = Builder.CreateICmpEQ(CI->getArgOperand(0), CI->getArgOperand(1),
+ "pcmpeq");
+ // need to sign extend since icmp returns vector of i1
+ Rep = Builder.CreateSExt(Rep, CI->getType(), "");
+ } else if (Name.startswith("llvm.x86.sse2.pcmpgt.") ||
+ Name.startswith("llvm.x86.avx2.pcmpgt.")) {
+ Rep = Builder.CreateICmpSGT(CI->getArgOperand(0), CI->getArgOperand(1),
+ "pcmpgt");
+ // need to sign extend since icmp returns vector of i1
+ Rep = Builder.CreateSExt(Rep, CI->getType(), "");
+ } else if (Name == "llvm.x86.avx.movnt.dq.256" ||
+ Name == "llvm.x86.avx.movnt.ps.256" ||
+ Name == "llvm.x86.avx.movnt.pd.256") {
+ IRBuilder<> Builder(C);
+ Builder.SetInsertPoint(CI->getParent(), CI->getIterator());
+
+ Module *M = F->getParent();
+ SmallVector<Metadata *, 1> Elts;
+ Elts.push_back(
+ ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1)));
+ MDNode *Node = MDNode::get(C, Elts);
+
+ Value *Arg0 = CI->getArgOperand(0);
+ Value *Arg1 = CI->getArgOperand(1);
+
+ // Convert the type of the pointer to a pointer to the stored type.
+ Value *BC = Builder.CreateBitCast(Arg0,
+ PointerType::getUnqual(Arg1->getType()),
+ "cast");
+ StoreInst *SI = Builder.CreateStore(Arg1, BC);
+ SI->setMetadata(M->getMDKindID("nontemporal"), Node);
+ SI->setAlignment(32);
+
+ // Remove intrinsic.
+ CI->eraseFromParent();
+ return;
+ } else if (Name.startswith("llvm.x86.xop.vpcom")) {
+ Intrinsic::ID intID;
+ if (Name.endswith("ub"))
+ intID = Intrinsic::x86_xop_vpcomub;
+ else if (Name.endswith("uw"))
+ intID = Intrinsic::x86_xop_vpcomuw;
+ else if (Name.endswith("ud"))
+ intID = Intrinsic::x86_xop_vpcomud;
+ else if (Name.endswith("uq"))
+ intID = Intrinsic::x86_xop_vpcomuq;
+ else if (Name.endswith("b"))
+ intID = Intrinsic::x86_xop_vpcomb;
+ else if (Name.endswith("w"))
+ intID = Intrinsic::x86_xop_vpcomw;
+ else if (Name.endswith("d"))
+ intID = Intrinsic::x86_xop_vpcomd;
+ else if (Name.endswith("q"))
+ intID = Intrinsic::x86_xop_vpcomq;
+ else
+ llvm_unreachable("Unknown suffix");
+
+ Name = Name.substr(18); // strip off "llvm.x86.xop.vpcom"
+ unsigned Imm;
+ if (Name.startswith("lt"))
+ Imm = 0;
+ else if (Name.startswith("le"))
+ Imm = 1;
+ else if (Name.startswith("gt"))
+ Imm = 2;
+ else if (Name.startswith("ge"))
+ Imm = 3;
+ else if (Name.startswith("eq"))
+ Imm = 4;
+ else if (Name.startswith("ne"))
+ Imm = 5;
+ else if (Name.startswith("false"))
+ Imm = 6;
+ else if (Name.startswith("true"))
+ Imm = 7;
+ else
+ llvm_unreachable("Unknown condition");
+
+ Function *VPCOM = Intrinsic::getDeclaration(F->getParent(), intID);
+ Rep =
+ Builder.CreateCall(VPCOM, {CI->getArgOperand(0), CI->getArgOperand(1),
+ Builder.getInt8(Imm)});
+ } else if (Name == "llvm.x86.xop.vpcmov") {
+ Value *Arg0 = CI->getArgOperand(0);
+ Value *Arg1 = CI->getArgOperand(1);
+ Value *Sel = CI->getArgOperand(2);
+ unsigned NumElts = CI->getType()->getVectorNumElements();
+ Constant *MinusOne = ConstantVector::getSplat(NumElts, Builder.getInt64(-1));
+ Value *NotSel = Builder.CreateXor(Sel, MinusOne);
+ Value *Sel0 = Builder.CreateAnd(Arg0, Sel);
+ Value *Sel1 = Builder.CreateAnd(Arg1, NotSel);
+ Rep = Builder.CreateOr(Sel0, Sel1);
+ } else if (Name == "llvm.x86.sse42.crc32.64.8") {
+ Function *CRC32 = Intrinsic::getDeclaration(F->getParent(),
+ Intrinsic::x86_sse42_crc32_32_8);
+ Value *Trunc0 = Builder.CreateTrunc(CI->getArgOperand(0), Type::getInt32Ty(C));
+ Rep = Builder.CreateCall(CRC32, {Trunc0, CI->getArgOperand(1)});
+ Rep = Builder.CreateZExt(Rep, CI->getType(), "");
+ } else if (Name.startswith("llvm.x86.avx.vbroadcast")) {
+ // Replace broadcasts with a series of insertelements.
+ Type *VecTy = CI->getType();
+ Type *EltTy = VecTy->getVectorElementType();
+ unsigned EltNum = VecTy->getVectorNumElements();
+ Value *Cast = Builder.CreateBitCast(CI->getArgOperand(0),
+ EltTy->getPointerTo());
+ Value *Load = Builder.CreateLoad(EltTy, Cast);
+ Type *I32Ty = Type::getInt32Ty(C);
+ Rep = UndefValue::get(VecTy);
+ for (unsigned I = 0; I < EltNum; ++I)
+ Rep = Builder.CreateInsertElement(Rep, Load,
+ ConstantInt::get(I32Ty, I));
+ } else if (Name.startswith("llvm.x86.sse41.pmovsx")) {
+ VectorType *SrcTy = cast<VectorType>(CI->getArgOperand(0)->getType());
+ VectorType *DstTy = cast<VectorType>(CI->getType());
+ unsigned NumDstElts = DstTy->getNumElements();
+
+ // Extract a subvector of the first NumDstElts lanes and sign extend.
+ SmallVector<int, 8> ShuffleMask;
+ for (int i = 0; i != (int)NumDstElts; ++i)
+ ShuffleMask.push_back(i);
+
+ Value *SV = Builder.CreateShuffleVector(
+ CI->getArgOperand(0), UndefValue::get(SrcTy), ShuffleMask);
+ Rep = Builder.CreateSExt(SV, DstTy);
+ } else if (Name == "llvm.x86.avx2.vbroadcasti128") {
+ // Replace vbroadcasts with a vector shuffle.
+ Type *VT = VectorType::get(Type::getInt64Ty(C), 2);
+ Value *Op = Builder.CreatePointerCast(CI->getArgOperand(0),
+ PointerType::getUnqual(VT));
+ Value *Load = Builder.CreateLoad(VT, Op);
+ const int Idxs[4] = { 0, 1, 0, 1 };
+ Rep = Builder.CreateShuffleVector(Load, UndefValue::get(Load->getType()),
+ Idxs);
+ } else if (Name.startswith("llvm.x86.avx2.pbroadcast") ||
+ Name.startswith("llvm.x86.avx2.vbroadcast")) {
+ // Replace vp?broadcasts with a vector shuffle.
+ Value *Op = CI->getArgOperand(0);
+ unsigned NumElts = CI->getType()->getVectorNumElements();
+ Type *MaskTy = VectorType::get(Type::getInt32Ty(C), NumElts);
+ Rep = Builder.CreateShuffleVector(Op, UndefValue::get(Op->getType()),
+ Constant::getNullValue(MaskTy));
+ } else if (Name == "llvm.x86.sse2.psll.dq") {
+ // 128-bit shift left specified in bits.
+ unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
+ Rep = UpgradeX86PSLLDQIntrinsics(Builder, C, CI->getArgOperand(0), 1,
+ Shift / 8); // Shift is in bits.
+ } else if (Name == "llvm.x86.sse2.psrl.dq") {
+ // 128-bit shift right specified in bits.
+ unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
+ Rep = UpgradeX86PSRLDQIntrinsics(Builder, C, CI->getArgOperand(0), 1,
+ Shift / 8); // Shift is in bits.
+ } else if (Name == "llvm.x86.avx2.psll.dq") {
+ // 256-bit shift left specified in bits.
+ unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
+ Rep = UpgradeX86PSLLDQIntrinsics(Builder, C, CI->getArgOperand(0), 2,
+ Shift / 8); // Shift is in bits.
+ } else if (Name == "llvm.x86.avx2.psrl.dq") {
+ // 256-bit shift right specified in bits.
+ unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
+ Rep = UpgradeX86PSRLDQIntrinsics(Builder, C, CI->getArgOperand(0), 2,
+ Shift / 8); // Shift is in bits.
+ } else if (Name == "llvm.x86.sse2.psll.dq.bs") {
+ // 128-bit shift left specified in bytes.
+ unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
+ Rep = UpgradeX86PSLLDQIntrinsics(Builder, C, CI->getArgOperand(0), 1,
+ Shift);
+ } else if (Name == "llvm.x86.sse2.psrl.dq.bs") {
+ // 128-bit shift right specified in bytes.
+ unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
+ Rep = UpgradeX86PSRLDQIntrinsics(Builder, C, CI->getArgOperand(0), 1,
+ Shift);
+ } else if (Name == "llvm.x86.avx2.psll.dq.bs") {
+ // 256-bit shift left specified in bytes.
+ unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
+ Rep = UpgradeX86PSLLDQIntrinsics(Builder, C, CI->getArgOperand(0), 2,
+ Shift);
+ } else if (Name == "llvm.x86.avx2.psrl.dq.bs") {
+ // 256-bit shift right specified in bytes.
+ unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
+ Rep = UpgradeX86PSRLDQIntrinsics(Builder, C, CI->getArgOperand(0), 2,
+ Shift);
+ } else if (Name == "llvm.x86.sse41.pblendw" ||
+ Name == "llvm.x86.sse41.blendpd" ||
+ Name == "llvm.x86.sse41.blendps" ||
+ Name == "llvm.x86.avx.blend.pd.256" ||
+ Name == "llvm.x86.avx.blend.ps.256" ||
+ Name == "llvm.x86.avx2.pblendw" ||
+ Name == "llvm.x86.avx2.pblendd.128" ||
+ Name == "llvm.x86.avx2.pblendd.256") {
+ Value *Op0 = CI->getArgOperand(0);
+ Value *Op1 = CI->getArgOperand(1);
+ unsigned Imm = cast <ConstantInt>(CI->getArgOperand(2))->getZExtValue();
+ VectorType *VecTy = cast<VectorType>(CI->getType());
+ unsigned NumElts = VecTy->getNumElements();
+
+ SmallVector<Constant*, 16> Idxs;
+ for (unsigned i = 0; i != NumElts; ++i) {
+ unsigned Idx = ((Imm >> (i%8)) & 1) ? i + NumElts : i;
+ Idxs.push_back(Builder.getInt32(Idx));
+ }
+
+ Rep = Builder.CreateShuffleVector(Op0, Op1, ConstantVector::get(Idxs));
+ } else if (Name == "llvm.x86.avx.vinsertf128.pd.256" ||
+ Name == "llvm.x86.avx.vinsertf128.ps.256" ||
+ Name == "llvm.x86.avx.vinsertf128.si.256" ||
+ Name == "llvm.x86.avx2.vinserti128") {
+ Value *Op0 = CI->getArgOperand(0);
+ Value *Op1 = CI->getArgOperand(1);
+ unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
+ VectorType *VecTy = cast<VectorType>(CI->getType());
+ unsigned NumElts = VecTy->getNumElements();
+
+ // Mask off the high bits of the immediate value; hardware ignores those.
+ Imm = Imm & 1;
+
+ // Extend the second operand into a vector that is twice as big.
+ Value *UndefV = UndefValue::get(Op1->getType());
+ SmallVector<Constant*, 8> Idxs;
+ for (unsigned i = 0; i != NumElts; ++i) {
+ Idxs.push_back(Builder.getInt32(i));
+ }
+ Rep = Builder.CreateShuffleVector(Op1, UndefV, ConstantVector::get(Idxs));
+
+ // Insert the second operand into the first operand.
+
+ // Note that there is no guarantee that instruction lowering will actually
+ // produce a vinsertf128 instruction for the created shuffles. In
+ // particular, the 0 immediate case involves no lane changes, so it can
+ // be handled as a blend.
+
+ // Example of shuffle mask for 32-bit elements:
+ // Imm = 1 <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
+ // Imm = 0 <i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7 >
+
+ SmallVector<Constant*, 8> Idxs2;
+ // The low half of the result is either the low half of the 1st operand
+ // or the low half of the 2nd operand (the inserted vector).
+ for (unsigned i = 0; i != NumElts / 2; ++i) {
+ unsigned Idx = Imm ? i : (i + NumElts);
+ Idxs2.push_back(Builder.getInt32(Idx));
+ }
+ // The high half of the result is either the low half of the 2nd operand
+ // (the inserted vector) or the high half of the 1st operand.
+ for (unsigned i = NumElts / 2; i != NumElts; ++i) {
+ unsigned Idx = Imm ? (i + NumElts / 2) : i;
+ Idxs2.push_back(Builder.getInt32(Idx));
+ }
+ Rep = Builder.CreateShuffleVector(Op0, Rep, ConstantVector::get(Idxs2));
+ } else if (Name == "llvm.x86.avx.vextractf128.pd.256" ||
+ Name == "llvm.x86.avx.vextractf128.ps.256" ||
+ Name == "llvm.x86.avx.vextractf128.si.256" ||
+ Name == "llvm.x86.avx2.vextracti128") {
+ Value *Op0 = CI->getArgOperand(0);
+ unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
+ VectorType *VecTy = cast<VectorType>(CI->getType());
+ unsigned NumElts = VecTy->getNumElements();
+
+ // Mask off the high bits of the immediate value; hardware ignores those.
+ Imm = Imm & 1;
+
+ // Get indexes for either the high half or low half of the input vector.
+ SmallVector<Constant*, 4> Idxs(NumElts);
+ for (unsigned i = 0; i != NumElts; ++i) {
+ unsigned Idx = Imm ? (i + NumElts) : i;
+ Idxs[i] = Builder.getInt32(Idx);
+ }
+
+ Value *UndefV = UndefValue::get(Op0->getType());
+ Rep = Builder.CreateShuffleVector(Op0, UndefV, ConstantVector::get(Idxs));
+ } else {
+ bool PD128 = false, PD256 = false, PS128 = false, PS256 = false;
+ if (Name == "llvm.x86.avx.vpermil.pd.256")
+ PD256 = true;
+ else if (Name == "llvm.x86.avx.vpermil.pd")
+ PD128 = true;
+ else if (Name == "llvm.x86.avx.vpermil.ps.256")
+ PS256 = true;
+ else if (Name == "llvm.x86.avx.vpermil.ps")
+ PS128 = true;
+
+ if (PD256 || PD128 || PS256 || PS128) {
+ Value *Op0 = CI->getArgOperand(0);
+ unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
+ SmallVector<Constant*, 8> Idxs;
+
+ if (PD128)
+ for (unsigned i = 0; i != 2; ++i)
+ Idxs.push_back(Builder.getInt32((Imm >> i) & 0x1));
+ else if (PD256)
+ for (unsigned l = 0; l != 4; l+=2)
+ for (unsigned i = 0; i != 2; ++i)
+ Idxs.push_back(Builder.getInt32(((Imm >> (l+i)) & 0x1) + l));
+ else if (PS128)
+ for (unsigned i = 0; i != 4; ++i)
+ Idxs.push_back(Builder.getInt32((Imm >> (2 * i)) & 0x3));
+ else if (PS256)
+ for (unsigned l = 0; l != 8; l+=4)
+ for (unsigned i = 0; i != 4; ++i)
+ Idxs.push_back(Builder.getInt32(((Imm >> (2 * i)) & 0x3) + l));
+ else
+ llvm_unreachable("Unexpected function");
+
+ Rep = Builder.CreateShuffleVector(Op0, Op0, ConstantVector::get(Idxs));
+ } else {
+ llvm_unreachable("Unknown function for CallInst upgrade.");
+ }
+ }
+
+ CI->replaceAllUsesWith(Rep);
+ CI->eraseFromParent();
+ return;
+ }
+
+ std::string Name = CI->getName();
+ if (!Name.empty())
+ CI->setName(Name + ".old");
+
+ switch (NewFn->getIntrinsicID()) {
+ default:
+ llvm_unreachable("Unknown function for CallInst upgrade.");
+
+ case Intrinsic::arm_neon_vld1:
+ case Intrinsic::arm_neon_vld2:
+ case Intrinsic::arm_neon_vld3:
+ case Intrinsic::arm_neon_vld4:
+ case Intrinsic::arm_neon_vld2lane:
+ case Intrinsic::arm_neon_vld3lane:
+ case Intrinsic::arm_neon_vld4lane:
+ case Intrinsic::arm_neon_vst1:
+ case Intrinsic::arm_neon_vst2:
+ case Intrinsic::arm_neon_vst3:
+ case Intrinsic::arm_neon_vst4:
+ case Intrinsic::arm_neon_vst2lane:
+ case Intrinsic::arm_neon_vst3lane:
+ case Intrinsic::arm_neon_vst4lane: {
+ SmallVector<Value *, 4> Args(CI->arg_operands().begin(),
+ CI->arg_operands().end());
+ CI->replaceAllUsesWith(Builder.CreateCall(NewFn, Args));
+ CI->eraseFromParent();
+ return;
+ }
+
+ case Intrinsic::ctlz:
+ case Intrinsic::cttz:
+ assert(CI->getNumArgOperands() == 1 &&
+ "Mismatch between function args and call args");
+ CI->replaceAllUsesWith(Builder.CreateCall(
+ NewFn, {CI->getArgOperand(0), Builder.getFalse()}, Name));
+ CI->eraseFromParent();
+ return;
+
+ case Intrinsic::objectsize:
+ CI->replaceAllUsesWith(Builder.CreateCall(
+ NewFn, {CI->getArgOperand(0), CI->getArgOperand(1)}, Name));
+ CI->eraseFromParent();
+ return;
+
+ case Intrinsic::ctpop: {
+ CI->replaceAllUsesWith(Builder.CreateCall(NewFn, {CI->getArgOperand(0)}));
+ CI->eraseFromParent();
+ return;
+ }
+
+ case Intrinsic::x86_xop_vfrcz_ss:
+ case Intrinsic::x86_xop_vfrcz_sd:
+ CI->replaceAllUsesWith(
+ Builder.CreateCall(NewFn, {CI->getArgOperand(1)}, Name));
+ CI->eraseFromParent();
+ return;
+
+ case Intrinsic::x86_sse41_ptestc:
+ case Intrinsic::x86_sse41_ptestz:
+ case Intrinsic::x86_sse41_ptestnzc: {
+ // The arguments for these intrinsics used to be v4f32, and changed
+ // to v2i64. This is purely a nop, since those are bitwise intrinsics.
+ // So, the only thing required is a bitcast for both arguments.
+ // First, check the arguments have the old type.
+ Value *Arg0 = CI->getArgOperand(0);
+ if (Arg0->getType() != VectorType::get(Type::getFloatTy(C), 4))
+ return;
+
+ // Old intrinsic, add bitcasts
+ Value *Arg1 = CI->getArgOperand(1);
+
+ Type *NewVecTy = VectorType::get(Type::getInt64Ty(C), 2);
+
+ Value *BC0 = Builder.CreateBitCast(Arg0, NewVecTy, "cast");
+ Value *BC1 = Builder.CreateBitCast(Arg1, NewVecTy, "cast");
+
+ CallInst *NewCall = Builder.CreateCall(NewFn, {BC0, BC1}, Name);
+ CI->replaceAllUsesWith(NewCall);
+ CI->eraseFromParent();
+ return;
+ }
+
+ case Intrinsic::x86_sse41_insertps:
+ case Intrinsic::x86_sse41_dppd:
+ case Intrinsic::x86_sse41_dpps:
+ case Intrinsic::x86_sse41_mpsadbw:
+ case Intrinsic::x86_avx_dp_ps_256:
+ case Intrinsic::x86_avx2_mpsadbw: {
+ // Need to truncate the last argument from i32 to i8 -- this argument models
+ // an inherently 8-bit immediate operand to these x86 instructions.
+ SmallVector<Value *, 4> Args(CI->arg_operands().begin(),
+ CI->arg_operands().end());
+
+ // Replace the last argument with a trunc.
+ Args.back() = Builder.CreateTrunc(Args.back(), Type::getInt8Ty(C), "trunc");
+
+ CallInst *NewCall = Builder.CreateCall(NewFn, Args);
+ CI->replaceAllUsesWith(NewCall);
+ CI->eraseFromParent();
+ return;
+ }
+ }
+}
+
+// This tests each Function to determine if it needs upgrading. When we find
+// one we are interested in, we then upgrade all calls to reflect the new
+// function.
+void llvm::UpgradeCallsToIntrinsic(Function* F) {
+ assert(F && "Illegal attempt to upgrade a non-existent intrinsic.");
+
+ // Upgrade the function and check if it is a totaly new function.
+ Function *NewFn;
+ if (UpgradeIntrinsicFunction(F, NewFn)) {
+ // Replace all uses to the old function with the new one if necessary.
+ for (Value::user_iterator UI = F->user_begin(), UE = F->user_end();
+ UI != UE;) {
+ if (CallInst *CI = dyn_cast<CallInst>(*UI++))
+ UpgradeIntrinsicCall(CI, NewFn);
+ }
+ // Remove old function, no longer used, from the module.
+ F->eraseFromParent();
+ }
+}
+
+void llvm::UpgradeInstWithTBAATag(Instruction *I) {
+ MDNode *MD = I->getMetadata(LLVMContext::MD_tbaa);
+ assert(MD && "UpgradeInstWithTBAATag should have a TBAA tag");
+ // Check if the tag uses struct-path aware TBAA format.
+ if (isa<MDNode>(MD->getOperand(0)) && MD->getNumOperands() >= 3)
+ return;
+
+ if (MD->getNumOperands() == 3) {
+ Metadata *Elts[] = {MD->getOperand(0), MD->getOperand(1)};
+ MDNode *ScalarType = MDNode::get(I->getContext(), Elts);
+ // Create a MDNode <ScalarType, ScalarType, offset 0, const>
+ Metadata *Elts2[] = {ScalarType, ScalarType,
+ ConstantAsMetadata::get(Constant::getNullValue(
+ Type::getInt64Ty(I->getContext()))),
+ MD->getOperand(2)};
+ I->setMetadata(LLVMContext::MD_tbaa, MDNode::get(I->getContext(), Elts2));
+ } else {
+ // Create a MDNode <MD, MD, offset 0>
+ Metadata *Elts[] = {MD, MD, ConstantAsMetadata::get(Constant::getNullValue(
+ Type::getInt64Ty(I->getContext())))};
+ I->setMetadata(LLVMContext::MD_tbaa, MDNode::get(I->getContext(), Elts));
+ }
+}
+
+Instruction *llvm::UpgradeBitCastInst(unsigned Opc, Value *V, Type *DestTy,
+ Instruction *&Temp) {
+ if (Opc != Instruction::BitCast)
+ return nullptr;
+
+ Temp = nullptr;
+ Type *SrcTy = V->getType();
+ if (SrcTy->isPtrOrPtrVectorTy() && DestTy->isPtrOrPtrVectorTy() &&
+ SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace()) {
+ LLVMContext &Context = V->getContext();
+
+ // We have no information about target data layout, so we assume that
+ // the maximum pointer size is 64bit.
+ Type *MidTy = Type::getInt64Ty(Context);
+ Temp = CastInst::Create(Instruction::PtrToInt, V, MidTy);
+
+ return CastInst::Create(Instruction::IntToPtr, Temp, DestTy);
+ }
+
+ return nullptr;
+}
+
+Value *llvm::UpgradeBitCastExpr(unsigned Opc, Constant *C, Type *DestTy) {
+ if (Opc != Instruction::BitCast)
+ return nullptr;
+
+ Type *SrcTy = C->getType();
+ if (SrcTy->isPtrOrPtrVectorTy() && DestTy->isPtrOrPtrVectorTy() &&
+ SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace()) {
+ LLVMContext &Context = C->getContext();
+
+ // We have no information about target data layout, so we assume that
+ // the maximum pointer size is 64bit.
+ Type *MidTy = Type::getInt64Ty(Context);
+
+ return ConstantExpr::getIntToPtr(ConstantExpr::getPtrToInt(C, MidTy),
+ DestTy);
+ }
+
+ return nullptr;
+}
+
+/// Check the debug info version number, if it is out-dated, drop the debug
+/// info. Return true if module is modified.
+bool llvm::UpgradeDebugInfo(Module &M) {
+ unsigned Version = getDebugMetadataVersionFromModule(M);
+ if (Version == DEBUG_METADATA_VERSION)
+ return false;
+
+ bool RetCode = StripDebugInfo(M);
+ if (RetCode) {
+ DiagnosticInfoDebugMetadataVersion DiagVersion(M, Version);
+ M.getContext().diagnose(DiagVersion);
+ }
+ return RetCode;
+}
+
+void llvm::UpgradeMDStringConstant(std::string &String) {
+ const std::string OldPrefix = "llvm.vectorizer.";
+ if (String == "llvm.vectorizer.unroll") {
+ String = "llvm.loop.interleave.count";
+ } else if (String.find(OldPrefix) == 0) {
+ String.replace(0, OldPrefix.size(), "llvm.loop.vectorize.");
+ }
+}
diff --git a/contrib/llvm/lib/IR/BasicBlock.cpp b/contrib/llvm/lib/IR/BasicBlock.cpp
new file mode 100644
index 0000000..f61276f
--- /dev/null
+++ b/contrib/llvm/lib/IR/BasicBlock.cpp
@@ -0,0 +1,433 @@
+//===-- BasicBlock.cpp - Implement BasicBlock related methods -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the BasicBlock class for the IR library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/BasicBlock.h"
+#include "SymbolTableListTraitsImpl.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/IR/CFG.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Type.h"
+#include <algorithm>
+
+using namespace llvm;
+
+ValueSymbolTable *BasicBlock::getValueSymbolTable() {
+ if (Function *F = getParent())
+ return &F->getValueSymbolTable();
+ return nullptr;
+}
+
+LLVMContext &BasicBlock::getContext() const {
+ return getType()->getContext();
+}
+
+// Explicit instantiation of SymbolTableListTraits since some of the methods
+// are not in the public header file...
+template class llvm::SymbolTableListTraits<Instruction>;
+
+BasicBlock::BasicBlock(LLVMContext &C, const Twine &Name, Function *NewParent,
+ BasicBlock *InsertBefore)
+ : Value(Type::getLabelTy(C), Value::BasicBlockVal), Parent(nullptr) {
+
+ if (NewParent)
+ insertInto(NewParent, InsertBefore);
+ else
+ assert(!InsertBefore &&
+ "Cannot insert block before another block with no function!");
+
+ setName(Name);
+}
+
+void BasicBlock::insertInto(Function *NewParent, BasicBlock *InsertBefore) {
+ assert(NewParent && "Expected a parent");
+ assert(!Parent && "Already has a parent");
+
+ if (InsertBefore)
+ NewParent->getBasicBlockList().insert(InsertBefore->getIterator(), this);
+ else
+ NewParent->getBasicBlockList().push_back(this);
+}
+
+BasicBlock::~BasicBlock() {
+ // If the address of the block is taken and it is being deleted (e.g. because
+ // it is dead), this means that there is either a dangling constant expr
+ // hanging off the block, or an undefined use of the block (source code
+ // expecting the address of a label to keep the block alive even though there
+ // is no indirect branch). Handle these cases by zapping the BlockAddress
+ // nodes. There are no other possible uses at this point.
+ if (hasAddressTaken()) {
+ assert(!use_empty() && "There should be at least one blockaddress!");
+ Constant *Replacement =
+ ConstantInt::get(llvm::Type::getInt32Ty(getContext()), 1);
+ while (!use_empty()) {
+ BlockAddress *BA = cast<BlockAddress>(user_back());
+ BA->replaceAllUsesWith(ConstantExpr::getIntToPtr(Replacement,
+ BA->getType()));
+ BA->destroyConstant();
+ }
+ }
+
+ assert(getParent() == nullptr && "BasicBlock still linked into the program!");
+ dropAllReferences();
+ InstList.clear();
+}
+
+void BasicBlock::setParent(Function *parent) {
+ // Set Parent=parent, updating instruction symtab entries as appropriate.
+ InstList.setSymTabObject(&Parent, parent);
+}
+
+void BasicBlock::removeFromParent() {
+ getParent()->getBasicBlockList().remove(getIterator());
+}
+
+iplist<BasicBlock>::iterator BasicBlock::eraseFromParent() {
+ return getParent()->getBasicBlockList().erase(getIterator());
+}
+
+/// Unlink this basic block from its current function and
+/// insert it into the function that MovePos lives in, right before MovePos.
+void BasicBlock::moveBefore(BasicBlock *MovePos) {
+ MovePos->getParent()->getBasicBlockList().splice(
+ MovePos->getIterator(), getParent()->getBasicBlockList(), getIterator());
+}
+
+/// Unlink this basic block from its current function and
+/// insert it into the function that MovePos lives in, right after MovePos.
+void BasicBlock::moveAfter(BasicBlock *MovePos) {
+ MovePos->getParent()->getBasicBlockList().splice(
+ ++MovePos->getIterator(), getParent()->getBasicBlockList(),
+ getIterator());
+}
+
+const Module *BasicBlock::getModule() const {
+ return getParent()->getParent();
+}
+
+Module *BasicBlock::getModule() {
+ return getParent()->getParent();
+}
+
+TerminatorInst *BasicBlock::getTerminator() {
+ if (InstList.empty()) return nullptr;
+ return dyn_cast<TerminatorInst>(&InstList.back());
+}
+
+const TerminatorInst *BasicBlock::getTerminator() const {
+ if (InstList.empty()) return nullptr;
+ return dyn_cast<TerminatorInst>(&InstList.back());
+}
+
+CallInst *BasicBlock::getTerminatingMustTailCall() {
+ if (InstList.empty())
+ return nullptr;
+ ReturnInst *RI = dyn_cast<ReturnInst>(&InstList.back());
+ if (!RI || RI == &InstList.front())
+ return nullptr;
+
+ Instruction *Prev = RI->getPrevNode();
+ if (!Prev)
+ return nullptr;
+
+ if (Value *RV = RI->getReturnValue()) {
+ if (RV != Prev)
+ return nullptr;
+
+ // Look through the optional bitcast.
+ if (auto *BI = dyn_cast<BitCastInst>(Prev)) {
+ RV = BI->getOperand(0);
+ Prev = BI->getPrevNode();
+ if (!Prev || RV != Prev)
+ return nullptr;
+ }
+ }
+
+ if (auto *CI = dyn_cast<CallInst>(Prev)) {
+ if (CI->isMustTailCall())
+ return CI;
+ }
+ return nullptr;
+}
+
+Instruction* BasicBlock::getFirstNonPHI() {
+ for (Instruction &I : *this)
+ if (!isa<PHINode>(I))
+ return &I;
+ return nullptr;
+}
+
+Instruction* BasicBlock::getFirstNonPHIOrDbg() {
+ for (Instruction &I : *this)
+ if (!isa<PHINode>(I) && !isa<DbgInfoIntrinsic>(I))
+ return &I;
+ return nullptr;
+}
+
+Instruction* BasicBlock::getFirstNonPHIOrDbgOrLifetime() {
+ for (Instruction &I : *this) {
+ if (isa<PHINode>(I) || isa<DbgInfoIntrinsic>(I))
+ continue;
+
+ if (auto *II = dyn_cast<IntrinsicInst>(&I))
+ if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
+ II->getIntrinsicID() == Intrinsic::lifetime_end)
+ continue;
+
+ return &I;
+ }
+ return nullptr;
+}
+
+BasicBlock::iterator BasicBlock::getFirstInsertionPt() {
+ Instruction *FirstNonPHI = getFirstNonPHI();
+ if (!FirstNonPHI)
+ return end();
+
+ iterator InsertPt = FirstNonPHI->getIterator();
+ if (InsertPt->isEHPad()) ++InsertPt;
+ return InsertPt;
+}
+
+void BasicBlock::dropAllReferences() {
+ for(iterator I = begin(), E = end(); I != E; ++I)
+ I->dropAllReferences();
+}
+
+/// If this basic block has a single predecessor block,
+/// return the block, otherwise return a null pointer.
+BasicBlock *BasicBlock::getSinglePredecessor() {
+ pred_iterator PI = pred_begin(this), E = pred_end(this);
+ if (PI == E) return nullptr; // No preds.
+ BasicBlock *ThePred = *PI;
+ ++PI;
+ return (PI == E) ? ThePred : nullptr /*multiple preds*/;
+}
+
+/// If this basic block has a unique predecessor block,
+/// return the block, otherwise return a null pointer.
+/// Note that unique predecessor doesn't mean single edge, there can be
+/// multiple edges from the unique predecessor to this block (for example
+/// a switch statement with multiple cases having the same destination).
+BasicBlock *BasicBlock::getUniquePredecessor() {
+ pred_iterator PI = pred_begin(this), E = pred_end(this);
+ if (PI == E) return nullptr; // No preds.
+ BasicBlock *PredBB = *PI;
+ ++PI;
+ for (;PI != E; ++PI) {
+ if (*PI != PredBB)
+ return nullptr;
+ // The same predecessor appears multiple times in the predecessor list.
+ // This is OK.
+ }
+ return PredBB;
+}
+
+BasicBlock *BasicBlock::getSingleSuccessor() {
+ succ_iterator SI = succ_begin(this), E = succ_end(this);
+ if (SI == E) return nullptr; // no successors
+ BasicBlock *TheSucc = *SI;
+ ++SI;
+ return (SI == E) ? TheSucc : nullptr /* multiple successors */;
+}
+
+BasicBlock *BasicBlock::getUniqueSuccessor() {
+ succ_iterator SI = succ_begin(this), E = succ_end(this);
+ if (SI == E) return nullptr; // No successors
+ BasicBlock *SuccBB = *SI;
+ ++SI;
+ for (;SI != E; ++SI) {
+ if (*SI != SuccBB)
+ return nullptr;
+ // The same successor appears multiple times in the successor list.
+ // This is OK.
+ }
+ return SuccBB;
+}
+
+/// This method is used to notify a BasicBlock that the
+/// specified Predecessor of the block is no longer able to reach it. This is
+/// actually not used to update the Predecessor list, but is actually used to
+/// update the PHI nodes that reside in the block. Note that this should be
+/// called while the predecessor still refers to this block.
+///
+void BasicBlock::removePredecessor(BasicBlock *Pred,
+ bool DontDeleteUselessPHIs) {
+ assert((hasNUsesOrMore(16)||// Reduce cost of this assertion for complex CFGs.
+ find(pred_begin(this), pred_end(this), Pred) != pred_end(this)) &&
+ "removePredecessor: BB is not a predecessor!");
+
+ if (InstList.empty()) return;
+ PHINode *APN = dyn_cast<PHINode>(&front());
+ if (!APN) return; // Quick exit.
+
+ // If there are exactly two predecessors, then we want to nuke the PHI nodes
+ // altogether. However, we cannot do this, if this in this case:
+ //
+ // Loop:
+ // %x = phi [X, Loop]
+ // %x2 = add %x, 1 ;; This would become %x2 = add %x2, 1
+ // br Loop ;; %x2 does not dominate all uses
+ //
+ // This is because the PHI node input is actually taken from the predecessor
+ // basic block. The only case this can happen is with a self loop, so we
+ // check for this case explicitly now.
+ //
+ unsigned max_idx = APN->getNumIncomingValues();
+ assert(max_idx != 0 && "PHI Node in block with 0 predecessors!?!?!");
+ if (max_idx == 2) {
+ BasicBlock *Other = APN->getIncomingBlock(APN->getIncomingBlock(0) == Pred);
+
+ // Disable PHI elimination!
+ if (this == Other) max_idx = 3;
+ }
+
+ // <= Two predecessors BEFORE I remove one?
+ if (max_idx <= 2 && !DontDeleteUselessPHIs) {
+ // Yup, loop through and nuke the PHI nodes
+ while (PHINode *PN = dyn_cast<PHINode>(&front())) {
+ // Remove the predecessor first.
+ PN->removeIncomingValue(Pred, !DontDeleteUselessPHIs);
+
+ // If the PHI _HAD_ two uses, replace PHI node with its now *single* value
+ if (max_idx == 2) {
+ if (PN->getIncomingValue(0) != PN)
+ PN->replaceAllUsesWith(PN->getIncomingValue(0));
+ else
+ // We are left with an infinite loop with no entries: kill the PHI.
+ PN->replaceAllUsesWith(UndefValue::get(PN->getType()));
+ getInstList().pop_front(); // Remove the PHI node
+ }
+
+ // If the PHI node already only had one entry, it got deleted by
+ // removeIncomingValue.
+ }
+ } else {
+ // Okay, now we know that we need to remove predecessor #pred_idx from all
+ // PHI nodes. Iterate over each PHI node fixing them up
+ PHINode *PN;
+ for (iterator II = begin(); (PN = dyn_cast<PHINode>(II)); ) {
+ ++II;
+ PN->removeIncomingValue(Pred, false);
+ // If all incoming values to the Phi are the same, we can replace the Phi
+ // with that value.
+ Value* PNV = nullptr;
+ if (!DontDeleteUselessPHIs && (PNV = PN->hasConstantValue()))
+ if (PNV != PN) {
+ PN->replaceAllUsesWith(PNV);
+ PN->eraseFromParent();
+ }
+ }
+ }
+}
+
+bool BasicBlock::canSplitPredecessors() const {
+ const Instruction *FirstNonPHI = getFirstNonPHI();
+ if (isa<LandingPadInst>(FirstNonPHI))
+ return true;
+ // This is perhaps a little conservative because constructs like
+ // CleanupBlockInst are pretty easy to split. However, SplitBlockPredecessors
+ // cannot handle such things just yet.
+ if (FirstNonPHI->isEHPad())
+ return false;
+ return true;
+}
+
+/// This splits a basic block into two at the specified
+/// instruction. Note that all instructions BEFORE the specified iterator stay
+/// as part of the original basic block, an unconditional branch is added to
+/// the new BB, and the rest of the instructions in the BB are moved to the new
+/// BB, including the old terminator. This invalidates the iterator.
+///
+/// Note that this only works on well formed basic blocks (must have a
+/// terminator), and 'I' must not be the end of instruction list (which would
+/// cause a degenerate basic block to be formed, having a terminator inside of
+/// the basic block).
+///
+BasicBlock *BasicBlock::splitBasicBlock(iterator I, const Twine &BBName) {
+ assert(getTerminator() && "Can't use splitBasicBlock on degenerate BB!");
+ assert(I != InstList.end() &&
+ "Trying to get me to create degenerate basic block!");
+
+ BasicBlock *InsertBefore = std::next(Function::iterator(this))
+ .getNodePtrUnchecked();
+ BasicBlock *New = BasicBlock::Create(getContext(), BBName,
+ getParent(), InsertBefore);
+
+ // Save DebugLoc of split point before invalidating iterator.
+ DebugLoc Loc = I->getDebugLoc();
+ // Move all of the specified instructions from the original basic block into
+ // the new basic block.
+ New->getInstList().splice(New->end(), this->getInstList(), I, end());
+
+ // Add a branch instruction to the newly formed basic block.
+ BranchInst *BI = BranchInst::Create(New, this);
+ BI->setDebugLoc(Loc);
+
+ // Now we must loop through all of the successors of the New block (which
+ // _were_ the successors of the 'this' block), and update any PHI nodes in
+ // successors. If there were PHI nodes in the successors, then they need to
+ // know that incoming branches will be from New, not from Old.
+ //
+ for (succ_iterator I = succ_begin(New), E = succ_end(New); I != E; ++I) {
+ // Loop over any phi nodes in the basic block, updating the BB field of
+ // incoming values...
+ BasicBlock *Successor = *I;
+ PHINode *PN;
+ for (BasicBlock::iterator II = Successor->begin();
+ (PN = dyn_cast<PHINode>(II)); ++II) {
+ int IDX = PN->getBasicBlockIndex(this);
+ while (IDX != -1) {
+ PN->setIncomingBlock((unsigned)IDX, New);
+ IDX = PN->getBasicBlockIndex(this);
+ }
+ }
+ }
+ return New;
+}
+
+void BasicBlock::replaceSuccessorsPhiUsesWith(BasicBlock *New) {
+ TerminatorInst *TI = getTerminator();
+ if (!TI)
+ // Cope with being called on a BasicBlock that doesn't have a terminator
+ // yet. Clang's CodeGenFunction::EmitReturnBlock() likes to do this.
+ return;
+ for (BasicBlock *Succ : TI->successors()) {
+ // N.B. Succ might not be a complete BasicBlock, so don't assume
+ // that it ends with a non-phi instruction.
+ for (iterator II = Succ->begin(), IE = Succ->end(); II != IE; ++II) {
+ PHINode *PN = dyn_cast<PHINode>(II);
+ if (!PN)
+ break;
+ int i;
+ while ((i = PN->getBasicBlockIndex(this)) >= 0)
+ PN->setIncomingBlock(i, New);
+ }
+ }
+}
+
+/// Return true if this basic block is a landing pad. I.e., it's
+/// the destination of the 'unwind' edge of an invoke instruction.
+bool BasicBlock::isLandingPad() const {
+ return isa<LandingPadInst>(getFirstNonPHI());
+}
+
+/// Return the landingpad instruction associated with the landing pad.
+LandingPadInst *BasicBlock::getLandingPadInst() {
+ return dyn_cast<LandingPadInst>(getFirstNonPHI());
+}
+const LandingPadInst *BasicBlock::getLandingPadInst() const {
+ return dyn_cast<LandingPadInst>(getFirstNonPHI());
+}
diff --git a/contrib/llvm/lib/IR/Comdat.cpp b/contrib/llvm/lib/IR/Comdat.cpp
new file mode 100644
index 0000000..80715ff
--- /dev/null
+++ b/contrib/llvm/lib/IR/Comdat.cpp
@@ -0,0 +1,25 @@
+//===-- Comdat.cpp - Implement Metadata classes --------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Comdat class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/Comdat.h"
+#include "llvm/ADT/StringMap.h"
+using namespace llvm;
+
+Comdat::Comdat(SelectionKind SK, StringMapEntry<Comdat> *Name)
+ : Name(Name), SK(SK) {}
+
+Comdat::Comdat(Comdat &&C) : Name(C.Name), SK(C.SK) {}
+
+Comdat::Comdat() : Name(nullptr), SK(Comdat::Any) {}
+
+StringRef Comdat::getName() const { return Name->first(); }
diff --git a/contrib/llvm/lib/IR/ConstantFold.cpp b/contrib/llvm/lib/IR/ConstantFold.cpp
new file mode 100644
index 0000000..ce3fe03
--- /dev/null
+++ b/contrib/llvm/lib/IR/ConstantFold.cpp
@@ -0,0 +1,2266 @@
+//===- ConstantFold.cpp - LLVM constant folder ----------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements folding of constants for LLVM. This implements the
+// (internal) ConstantFold.h interface, which is used by the
+// ConstantExpr::get* methods to automatically fold constants when possible.
+//
+// The current constant folding implementation is implemented in two pieces: the
+// pieces that don't need DataLayout, and the pieces that do. This is to avoid
+// a dependence in IR on Target.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ConstantFold.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GetElementPtrTypeIterator.h"
+#include "llvm/IR/GlobalAlias.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/IR/PatternMatch.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/MathExtras.h"
+#include <limits>
+using namespace llvm;
+using namespace llvm::PatternMatch;
+
+//===----------------------------------------------------------------------===//
+// ConstantFold*Instruction Implementations
+//===----------------------------------------------------------------------===//
+
+/// BitCastConstantVector - Convert the specified vector Constant node to the
+/// specified vector type. At this point, we know that the elements of the
+/// input vector constant are all simple integer or FP values.
+static Constant *BitCastConstantVector(Constant *CV, VectorType *DstTy) {
+
+ if (CV->isAllOnesValue()) return Constant::getAllOnesValue(DstTy);
+ if (CV->isNullValue()) return Constant::getNullValue(DstTy);
+
+ // If this cast changes element count then we can't handle it here:
+ // doing so requires endianness information. This should be handled by
+ // Analysis/ConstantFolding.cpp
+ unsigned NumElts = DstTy->getNumElements();
+ if (NumElts != CV->getType()->getVectorNumElements())
+ return nullptr;
+
+ Type *DstEltTy = DstTy->getElementType();
+
+ SmallVector<Constant*, 16> Result;
+ Type *Ty = IntegerType::get(CV->getContext(), 32);
+ for (unsigned i = 0; i != NumElts; ++i) {
+ Constant *C =
+ ConstantExpr::getExtractElement(CV, ConstantInt::get(Ty, i));
+ C = ConstantExpr::getBitCast(C, DstEltTy);
+ Result.push_back(C);
+ }
+
+ return ConstantVector::get(Result);
+}
+
+/// This function determines which opcode to use to fold two constant cast
+/// expressions together. It uses CastInst::isEliminableCastPair to determine
+/// the opcode. Consequently its just a wrapper around that function.
+/// @brief Determine if it is valid to fold a cast of a cast
+static unsigned
+foldConstantCastPair(
+ unsigned opc, ///< opcode of the second cast constant expression
+ ConstantExpr *Op, ///< the first cast constant expression
+ Type *DstTy ///< destination type of the first cast
+) {
+ assert(Op && Op->isCast() && "Can't fold cast of cast without a cast!");
+ assert(DstTy && DstTy->isFirstClassType() && "Invalid cast destination type");
+ assert(CastInst::isCast(opc) && "Invalid cast opcode");
+
+ // The types and opcodes for the two Cast constant expressions
+ Type *SrcTy = Op->getOperand(0)->getType();
+ Type *MidTy = Op->getType();
+ Instruction::CastOps firstOp = Instruction::CastOps(Op->getOpcode());
+ Instruction::CastOps secondOp = Instruction::CastOps(opc);
+
+ // Assume that pointers are never more than 64 bits wide, and only use this
+ // for the middle type. Otherwise we could end up folding away illegal
+ // bitcasts between address spaces with different sizes.
+ IntegerType *FakeIntPtrTy = Type::getInt64Ty(DstTy->getContext());
+
+ // Let CastInst::isEliminableCastPair do the heavy lifting.
+ return CastInst::isEliminableCastPair(firstOp, secondOp, SrcTy, MidTy, DstTy,
+ nullptr, FakeIntPtrTy, nullptr);
+}
+
+static Constant *FoldBitCast(Constant *V, Type *DestTy) {
+ Type *SrcTy = V->getType();
+ if (SrcTy == DestTy)
+ return V; // no-op cast
+
+ // Check to see if we are casting a pointer to an aggregate to a pointer to
+ // the first element. If so, return the appropriate GEP instruction.
+ if (PointerType *PTy = dyn_cast<PointerType>(V->getType()))
+ if (PointerType *DPTy = dyn_cast<PointerType>(DestTy))
+ if (PTy->getAddressSpace() == DPTy->getAddressSpace()
+ && PTy->getElementType()->isSized()) {
+ SmallVector<Value*, 8> IdxList;
+ Value *Zero =
+ Constant::getNullValue(Type::getInt32Ty(DPTy->getContext()));
+ IdxList.push_back(Zero);
+ Type *ElTy = PTy->getElementType();
+ while (ElTy != DPTy->getElementType()) {
+ if (StructType *STy = dyn_cast<StructType>(ElTy)) {
+ if (STy->getNumElements() == 0) break;
+ ElTy = STy->getElementType(0);
+ IdxList.push_back(Zero);
+ } else if (SequentialType *STy =
+ dyn_cast<SequentialType>(ElTy)) {
+ if (ElTy->isPointerTy()) break; // Can't index into pointers!
+ ElTy = STy->getElementType();
+ IdxList.push_back(Zero);
+ } else {
+ break;
+ }
+ }
+
+ if (ElTy == DPTy->getElementType())
+ // This GEP is inbounds because all indices are zero.
+ return ConstantExpr::getInBoundsGetElementPtr(PTy->getElementType(),
+ V, IdxList);
+ }
+
+ // Handle casts from one vector constant to another. We know that the src
+ // and dest type have the same size (otherwise its an illegal cast).
+ if (VectorType *DestPTy = dyn_cast<VectorType>(DestTy)) {
+ if (VectorType *SrcTy = dyn_cast<VectorType>(V->getType())) {
+ assert(DestPTy->getBitWidth() == SrcTy->getBitWidth() &&
+ "Not cast between same sized vectors!");
+ SrcTy = nullptr;
+ // First, check for null. Undef is already handled.
+ if (isa<ConstantAggregateZero>(V))
+ return Constant::getNullValue(DestTy);
+
+ // Handle ConstantVector and ConstantAggregateVector.
+ return BitCastConstantVector(V, DestPTy);
+ }
+
+ // Canonicalize scalar-to-vector bitcasts into vector-to-vector bitcasts
+ // This allows for other simplifications (although some of them
+ // can only be handled by Analysis/ConstantFolding.cpp).
+ if (isa<ConstantInt>(V) || isa<ConstantFP>(V))
+ return ConstantExpr::getBitCast(ConstantVector::get(V), DestPTy);
+ }
+
+ // Finally, implement bitcast folding now. The code below doesn't handle
+ // bitcast right.
+ if (isa<ConstantPointerNull>(V)) // ptr->ptr cast.
+ return ConstantPointerNull::get(cast<PointerType>(DestTy));
+
+ // Handle integral constant input.
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
+ if (DestTy->isIntegerTy())
+ // Integral -> Integral. This is a no-op because the bit widths must
+ // be the same. Consequently, we just fold to V.
+ return V;
+
+ // See note below regarding the PPC_FP128 restriction.
+ if (DestTy->isFloatingPointTy() && !DestTy->isPPC_FP128Ty())
+ return ConstantFP::get(DestTy->getContext(),
+ APFloat(DestTy->getFltSemantics(),
+ CI->getValue()));
+
+ // Otherwise, can't fold this (vector?)
+ return nullptr;
+ }
+
+ // Handle ConstantFP input: FP -> Integral.
+ if (ConstantFP *FP = dyn_cast<ConstantFP>(V)) {
+ // PPC_FP128 is really the sum of two consecutive doubles, where the first
+ // double is always stored first in memory, regardless of the target
+ // endianness. The memory layout of i128, however, depends on the target
+ // endianness, and so we can't fold this without target endianness
+ // information. This should instead be handled by
+ // Analysis/ConstantFolding.cpp
+ if (FP->getType()->isPPC_FP128Ty())
+ return nullptr;
+
+ return ConstantInt::get(FP->getContext(),
+ FP->getValueAPF().bitcastToAPInt());
+ }
+
+ return nullptr;
+}
+
+
+/// ExtractConstantBytes - V is an integer constant which only has a subset of
+/// its bytes used. The bytes used are indicated by ByteStart (which is the
+/// first byte used, counting from the least significant byte) and ByteSize,
+/// which is the number of bytes used.
+///
+/// This function analyzes the specified constant to see if the specified byte
+/// range can be returned as a simplified constant. If so, the constant is
+/// returned, otherwise null is returned.
+///
+static Constant *ExtractConstantBytes(Constant *C, unsigned ByteStart,
+ unsigned ByteSize) {
+ assert(C->getType()->isIntegerTy() &&
+ (cast<IntegerType>(C->getType())->getBitWidth() & 7) == 0 &&
+ "Non-byte sized integer input");
+ unsigned CSize = cast<IntegerType>(C->getType())->getBitWidth()/8;
+ assert(ByteSize && "Must be accessing some piece");
+ assert(ByteStart+ByteSize <= CSize && "Extracting invalid piece from input");
+ assert(ByteSize != CSize && "Should not extract everything");
+
+ // Constant Integers are simple.
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) {
+ APInt V = CI->getValue();
+ if (ByteStart)
+ V = V.lshr(ByteStart*8);
+ V = V.trunc(ByteSize*8);
+ return ConstantInt::get(CI->getContext(), V);
+ }
+
+ // In the input is a constant expr, we might be able to recursively simplify.
+ // If not, we definitely can't do anything.
+ ConstantExpr *CE = dyn_cast<ConstantExpr>(C);
+ if (!CE) return nullptr;
+
+ switch (CE->getOpcode()) {
+ default: return nullptr;
+ case Instruction::Or: {
+ Constant *RHS = ExtractConstantBytes(CE->getOperand(1), ByteStart,ByteSize);
+ if (!RHS)
+ return nullptr;
+
+ // X | -1 -> -1.
+ if (ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS))
+ if (RHSC->isAllOnesValue())
+ return RHSC;
+
+ Constant *LHS = ExtractConstantBytes(CE->getOperand(0), ByteStart,ByteSize);
+ if (!LHS)
+ return nullptr;
+ return ConstantExpr::getOr(LHS, RHS);
+ }
+ case Instruction::And: {
+ Constant *RHS = ExtractConstantBytes(CE->getOperand(1), ByteStart,ByteSize);
+ if (!RHS)
+ return nullptr;
+
+ // X & 0 -> 0.
+ if (RHS->isNullValue())
+ return RHS;
+
+ Constant *LHS = ExtractConstantBytes(CE->getOperand(0), ByteStart,ByteSize);
+ if (!LHS)
+ return nullptr;
+ return ConstantExpr::getAnd(LHS, RHS);
+ }
+ case Instruction::LShr: {
+ ConstantInt *Amt = dyn_cast<ConstantInt>(CE->getOperand(1));
+ if (!Amt)
+ return nullptr;
+ unsigned ShAmt = Amt->getZExtValue();
+ // Cannot analyze non-byte shifts.
+ if ((ShAmt & 7) != 0)
+ return nullptr;
+ ShAmt >>= 3;
+
+ // If the extract is known to be all zeros, return zero.
+ if (ByteStart >= CSize-ShAmt)
+ return Constant::getNullValue(IntegerType::get(CE->getContext(),
+ ByteSize*8));
+ // If the extract is known to be fully in the input, extract it.
+ if (ByteStart+ByteSize+ShAmt <= CSize)
+ return ExtractConstantBytes(CE->getOperand(0), ByteStart+ShAmt, ByteSize);
+
+ // TODO: Handle the 'partially zero' case.
+ return nullptr;
+ }
+
+ case Instruction::Shl: {
+ ConstantInt *Amt = dyn_cast<ConstantInt>(CE->getOperand(1));
+ if (!Amt)
+ return nullptr;
+ unsigned ShAmt = Amt->getZExtValue();
+ // Cannot analyze non-byte shifts.
+ if ((ShAmt & 7) != 0)
+ return nullptr;
+ ShAmt >>= 3;
+
+ // If the extract is known to be all zeros, return zero.
+ if (ByteStart+ByteSize <= ShAmt)
+ return Constant::getNullValue(IntegerType::get(CE->getContext(),
+ ByteSize*8));
+ // If the extract is known to be fully in the input, extract it.
+ if (ByteStart >= ShAmt)
+ return ExtractConstantBytes(CE->getOperand(0), ByteStart-ShAmt, ByteSize);
+
+ // TODO: Handle the 'partially zero' case.
+ return nullptr;
+ }
+
+ case Instruction::ZExt: {
+ unsigned SrcBitSize =
+ cast<IntegerType>(CE->getOperand(0)->getType())->getBitWidth();
+
+ // If extracting something that is completely zero, return 0.
+ if (ByteStart*8 >= SrcBitSize)
+ return Constant::getNullValue(IntegerType::get(CE->getContext(),
+ ByteSize*8));
+
+ // If exactly extracting the input, return it.
+ if (ByteStart == 0 && ByteSize*8 == SrcBitSize)
+ return CE->getOperand(0);
+
+ // If extracting something completely in the input, if if the input is a
+ // multiple of 8 bits, recurse.
+ if ((SrcBitSize&7) == 0 && (ByteStart+ByteSize)*8 <= SrcBitSize)
+ return ExtractConstantBytes(CE->getOperand(0), ByteStart, ByteSize);
+
+ // Otherwise, if extracting a subset of the input, which is not multiple of
+ // 8 bits, do a shift and trunc to get the bits.
+ if ((ByteStart+ByteSize)*8 < SrcBitSize) {
+ assert((SrcBitSize&7) && "Shouldn't get byte sized case here");
+ Constant *Res = CE->getOperand(0);
+ if (ByteStart)
+ Res = ConstantExpr::getLShr(Res,
+ ConstantInt::get(Res->getType(), ByteStart*8));
+ return ConstantExpr::getTrunc(Res, IntegerType::get(C->getContext(),
+ ByteSize*8));
+ }
+
+ // TODO: Handle the 'partially zero' case.
+ return nullptr;
+ }
+ }
+}
+
+/// getFoldedSizeOf - Return a ConstantExpr with type DestTy for sizeof
+/// on Ty, with any known factors factored out. If Folded is false,
+/// return null if no factoring was possible, to avoid endlessly
+/// bouncing an unfoldable expression back into the top-level folder.
+///
+static Constant *getFoldedSizeOf(Type *Ty, Type *DestTy,
+ bool Folded) {
+ if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
+ Constant *N = ConstantInt::get(DestTy, ATy->getNumElements());
+ Constant *E = getFoldedSizeOf(ATy->getElementType(), DestTy, true);
+ return ConstantExpr::getNUWMul(E, N);
+ }
+
+ if (StructType *STy = dyn_cast<StructType>(Ty))
+ if (!STy->isPacked()) {
+ unsigned NumElems = STy->getNumElements();
+ // An empty struct has size zero.
+ if (NumElems == 0)
+ return ConstantExpr::getNullValue(DestTy);
+ // Check for a struct with all members having the same size.
+ Constant *MemberSize =
+ getFoldedSizeOf(STy->getElementType(0), DestTy, true);
+ bool AllSame = true;
+ for (unsigned i = 1; i != NumElems; ++i)
+ if (MemberSize !=
+ getFoldedSizeOf(STy->getElementType(i), DestTy, true)) {
+ AllSame = false;
+ break;
+ }
+ if (AllSame) {
+ Constant *N = ConstantInt::get(DestTy, NumElems);
+ return ConstantExpr::getNUWMul(MemberSize, N);
+ }
+ }
+
+ // Pointer size doesn't depend on the pointee type, so canonicalize them
+ // to an arbitrary pointee.
+ if (PointerType *PTy = dyn_cast<PointerType>(Ty))
+ if (!PTy->getElementType()->isIntegerTy(1))
+ return
+ getFoldedSizeOf(PointerType::get(IntegerType::get(PTy->getContext(), 1),
+ PTy->getAddressSpace()),
+ DestTy, true);
+
+ // If there's no interesting folding happening, bail so that we don't create
+ // a constant that looks like it needs folding but really doesn't.
+ if (!Folded)
+ return nullptr;
+
+ // Base case: Get a regular sizeof expression.
+ Constant *C = ConstantExpr::getSizeOf(Ty);
+ C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
+ DestTy, false),
+ C, DestTy);
+ return C;
+}
+
+/// getFoldedAlignOf - Return a ConstantExpr with type DestTy for alignof
+/// on Ty, with any known factors factored out. If Folded is false,
+/// return null if no factoring was possible, to avoid endlessly
+/// bouncing an unfoldable expression back into the top-level folder.
+///
+static Constant *getFoldedAlignOf(Type *Ty, Type *DestTy,
+ bool Folded) {
+ // The alignment of an array is equal to the alignment of the
+ // array element. Note that this is not always true for vectors.
+ if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
+ Constant *C = ConstantExpr::getAlignOf(ATy->getElementType());
+ C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
+ DestTy,
+ false),
+ C, DestTy);
+ return C;
+ }
+
+ if (StructType *STy = dyn_cast<StructType>(Ty)) {
+ // Packed structs always have an alignment of 1.
+ if (STy->isPacked())
+ return ConstantInt::get(DestTy, 1);
+
+ // Otherwise, struct alignment is the maximum alignment of any member.
+ // Without target data, we can't compare much, but we can check to see
+ // if all the members have the same alignment.
+ unsigned NumElems = STy->getNumElements();
+ // An empty struct has minimal alignment.
+ if (NumElems == 0)
+ return ConstantInt::get(DestTy, 1);
+ // Check for a struct with all members having the same alignment.
+ Constant *MemberAlign =
+ getFoldedAlignOf(STy->getElementType(0), DestTy, true);
+ bool AllSame = true;
+ for (unsigned i = 1; i != NumElems; ++i)
+ if (MemberAlign != getFoldedAlignOf(STy->getElementType(i), DestTy, true)) {
+ AllSame = false;
+ break;
+ }
+ if (AllSame)
+ return MemberAlign;
+ }
+
+ // Pointer alignment doesn't depend on the pointee type, so canonicalize them
+ // to an arbitrary pointee.
+ if (PointerType *PTy = dyn_cast<PointerType>(Ty))
+ if (!PTy->getElementType()->isIntegerTy(1))
+ return
+ getFoldedAlignOf(PointerType::get(IntegerType::get(PTy->getContext(),
+ 1),
+ PTy->getAddressSpace()),
+ DestTy, true);
+
+ // If there's no interesting folding happening, bail so that we don't create
+ // a constant that looks like it needs folding but really doesn't.
+ if (!Folded)
+ return nullptr;
+
+ // Base case: Get a regular alignof expression.
+ Constant *C = ConstantExpr::getAlignOf(Ty);
+ C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
+ DestTy, false),
+ C, DestTy);
+ return C;
+}
+
+/// getFoldedOffsetOf - Return a ConstantExpr with type DestTy for offsetof
+/// on Ty and FieldNo, with any known factors factored out. If Folded is false,
+/// return null if no factoring was possible, to avoid endlessly
+/// bouncing an unfoldable expression back into the top-level folder.
+///
+static Constant *getFoldedOffsetOf(Type *Ty, Constant *FieldNo,
+ Type *DestTy,
+ bool Folded) {
+ if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
+ Constant *N = ConstantExpr::getCast(CastInst::getCastOpcode(FieldNo, false,
+ DestTy, false),
+ FieldNo, DestTy);
+ Constant *E = getFoldedSizeOf(ATy->getElementType(), DestTy, true);
+ return ConstantExpr::getNUWMul(E, N);
+ }
+
+ if (StructType *STy = dyn_cast<StructType>(Ty))
+ if (!STy->isPacked()) {
+ unsigned NumElems = STy->getNumElements();
+ // An empty struct has no members.
+ if (NumElems == 0)
+ return nullptr;
+ // Check for a struct with all members having the same size.
+ Constant *MemberSize =
+ getFoldedSizeOf(STy->getElementType(0), DestTy, true);
+ bool AllSame = true;
+ for (unsigned i = 1; i != NumElems; ++i)
+ if (MemberSize !=
+ getFoldedSizeOf(STy->getElementType(i), DestTy, true)) {
+ AllSame = false;
+ break;
+ }
+ if (AllSame) {
+ Constant *N = ConstantExpr::getCast(CastInst::getCastOpcode(FieldNo,
+ false,
+ DestTy,
+ false),
+ FieldNo, DestTy);
+ return ConstantExpr::getNUWMul(MemberSize, N);
+ }
+ }
+
+ // If there's no interesting folding happening, bail so that we don't create
+ // a constant that looks like it needs folding but really doesn't.
+ if (!Folded)
+ return nullptr;
+
+ // Base case: Get a regular offsetof expression.
+ Constant *C = ConstantExpr::getOffsetOf(Ty, FieldNo);
+ C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
+ DestTy, false),
+ C, DestTy);
+ return C;
+}
+
+Constant *llvm::ConstantFoldCastInstruction(unsigned opc, Constant *V,
+ Type *DestTy) {
+ if (isa<UndefValue>(V)) {
+ // zext(undef) = 0, because the top bits will be zero.
+ // sext(undef) = 0, because the top bits will all be the same.
+ // [us]itofp(undef) = 0, because the result value is bounded.
+ if (opc == Instruction::ZExt || opc == Instruction::SExt ||
+ opc == Instruction::UIToFP || opc == Instruction::SIToFP)
+ return Constant::getNullValue(DestTy);
+ return UndefValue::get(DestTy);
+ }
+
+ if (V->isNullValue() && !DestTy->isX86_MMXTy())
+ return Constant::getNullValue(DestTy);
+
+ // If the cast operand is a constant expression, there's a few things we can
+ // do to try to simplify it.
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
+ if (CE->isCast()) {
+ // Try hard to fold cast of cast because they are often eliminable.
+ if (unsigned newOpc = foldConstantCastPair(opc, CE, DestTy))
+ return ConstantExpr::getCast(newOpc, CE->getOperand(0), DestTy);
+ } else if (CE->getOpcode() == Instruction::GetElementPtr &&
+ // Do not fold addrspacecast (gep 0, .., 0). It might make the
+ // addrspacecast uncanonicalized.
+ opc != Instruction::AddrSpaceCast) {
+ // If all of the indexes in the GEP are null values, there is no pointer
+ // adjustment going on. We might as well cast the source pointer.
+ bool isAllNull = true;
+ for (unsigned i = 1, e = CE->getNumOperands(); i != e; ++i)
+ if (!CE->getOperand(i)->isNullValue()) {
+ isAllNull = false;
+ break;
+ }
+ if (isAllNull)
+ // This is casting one pointer type to another, always BitCast
+ return ConstantExpr::getPointerCast(CE->getOperand(0), DestTy);
+ }
+ }
+
+ // If the cast operand is a constant vector, perform the cast by
+ // operating on each element. In the cast of bitcasts, the element
+ // count may be mismatched; don't attempt to handle that here.
+ if ((isa<ConstantVector>(V) || isa<ConstantDataVector>(V)) &&
+ DestTy->isVectorTy() &&
+ DestTy->getVectorNumElements() == V->getType()->getVectorNumElements()) {
+ SmallVector<Constant*, 16> res;
+ VectorType *DestVecTy = cast<VectorType>(DestTy);
+ Type *DstEltTy = DestVecTy->getElementType();
+ Type *Ty = IntegerType::get(V->getContext(), 32);
+ for (unsigned i = 0, e = V->getType()->getVectorNumElements(); i != e; ++i) {
+ Constant *C =
+ ConstantExpr::getExtractElement(V, ConstantInt::get(Ty, i));
+ res.push_back(ConstantExpr::getCast(opc, C, DstEltTy));
+ }
+ return ConstantVector::get(res);
+ }
+
+ // We actually have to do a cast now. Perform the cast according to the
+ // opcode specified.
+ switch (opc) {
+ default:
+ llvm_unreachable("Failed to cast constant expression");
+ case Instruction::FPTrunc:
+ case Instruction::FPExt:
+ if (ConstantFP *FPC = dyn_cast<ConstantFP>(V)) {
+ bool ignored;
+ APFloat Val = FPC->getValueAPF();
+ Val.convert(DestTy->isHalfTy() ? APFloat::IEEEhalf :
+ DestTy->isFloatTy() ? APFloat::IEEEsingle :
+ DestTy->isDoubleTy() ? APFloat::IEEEdouble :
+ DestTy->isX86_FP80Ty() ? APFloat::x87DoubleExtended :
+ DestTy->isFP128Ty() ? APFloat::IEEEquad :
+ DestTy->isPPC_FP128Ty() ? APFloat::PPCDoubleDouble :
+ APFloat::Bogus,
+ APFloat::rmNearestTiesToEven, &ignored);
+ return ConstantFP::get(V->getContext(), Val);
+ }
+ return nullptr; // Can't fold.
+ case Instruction::FPToUI:
+ case Instruction::FPToSI:
+ if (ConstantFP *FPC = dyn_cast<ConstantFP>(V)) {
+ const APFloat &V = FPC->getValueAPF();
+ bool ignored;
+ uint64_t x[2];
+ uint32_t DestBitWidth = cast<IntegerType>(DestTy)->getBitWidth();
+ if (APFloat::opInvalidOp ==
+ V.convertToInteger(x, DestBitWidth, opc==Instruction::FPToSI,
+ APFloat::rmTowardZero, &ignored)) {
+ // Undefined behavior invoked - the destination type can't represent
+ // the input constant.
+ return UndefValue::get(DestTy);
+ }
+ APInt Val(DestBitWidth, x);
+ return ConstantInt::get(FPC->getContext(), Val);
+ }
+ return nullptr; // Can't fold.
+ case Instruction::IntToPtr: //always treated as unsigned
+ if (V->isNullValue()) // Is it an integral null value?
+ return ConstantPointerNull::get(cast<PointerType>(DestTy));
+ return nullptr; // Other pointer types cannot be casted
+ case Instruction::PtrToInt: // always treated as unsigned
+ // Is it a null pointer value?
+ if (V->isNullValue())
+ return ConstantInt::get(DestTy, 0);
+ // If this is a sizeof-like expression, pull out multiplications by
+ // known factors to expose them to subsequent folding. If it's an
+ // alignof-like expression, factor out known factors.
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
+ if (CE->getOpcode() == Instruction::GetElementPtr &&
+ CE->getOperand(0)->isNullValue()) {
+ GEPOperator *GEPO = cast<GEPOperator>(CE);
+ Type *Ty = GEPO->getSourceElementType();
+ if (CE->getNumOperands() == 2) {
+ // Handle a sizeof-like expression.
+ Constant *Idx = CE->getOperand(1);
+ bool isOne = isa<ConstantInt>(Idx) && cast<ConstantInt>(Idx)->isOne();
+ if (Constant *C = getFoldedSizeOf(Ty, DestTy, !isOne)) {
+ Idx = ConstantExpr::getCast(CastInst::getCastOpcode(Idx, true,
+ DestTy, false),
+ Idx, DestTy);
+ return ConstantExpr::getMul(C, Idx);
+ }
+ } else if (CE->getNumOperands() == 3 &&
+ CE->getOperand(1)->isNullValue()) {
+ // Handle an alignof-like expression.
+ if (StructType *STy = dyn_cast<StructType>(Ty))
+ if (!STy->isPacked()) {
+ ConstantInt *CI = cast<ConstantInt>(CE->getOperand(2));
+ if (CI->isOne() &&
+ STy->getNumElements() == 2 &&
+ STy->getElementType(0)->isIntegerTy(1)) {
+ return getFoldedAlignOf(STy->getElementType(1), DestTy, false);
+ }
+ }
+ // Handle an offsetof-like expression.
+ if (Ty->isStructTy() || Ty->isArrayTy()) {
+ if (Constant *C = getFoldedOffsetOf(Ty, CE->getOperand(2),
+ DestTy, false))
+ return C;
+ }
+ }
+ }
+ // Other pointer types cannot be casted
+ return nullptr;
+ case Instruction::UIToFP:
+ case Instruction::SIToFP:
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
+ APInt api = CI->getValue();
+ APFloat apf(DestTy->getFltSemantics(),
+ APInt::getNullValue(DestTy->getPrimitiveSizeInBits()));
+ if (APFloat::opOverflow &
+ apf.convertFromAPInt(api, opc==Instruction::SIToFP,
+ APFloat::rmNearestTiesToEven)) {
+ // Undefined behavior invoked - the destination type can't represent
+ // the input constant.
+ return UndefValue::get(DestTy);
+ }
+ return ConstantFP::get(V->getContext(), apf);
+ }
+ return nullptr;
+ case Instruction::ZExt:
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
+ uint32_t BitWidth = cast<IntegerType>(DestTy)->getBitWidth();
+ return ConstantInt::get(V->getContext(),
+ CI->getValue().zext(BitWidth));
+ }
+ return nullptr;
+ case Instruction::SExt:
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
+ uint32_t BitWidth = cast<IntegerType>(DestTy)->getBitWidth();
+ return ConstantInt::get(V->getContext(),
+ CI->getValue().sext(BitWidth));
+ }
+ return nullptr;
+ case Instruction::Trunc: {
+ if (V->getType()->isVectorTy())
+ return nullptr;
+
+ uint32_t DestBitWidth = cast<IntegerType>(DestTy)->getBitWidth();
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
+ return ConstantInt::get(V->getContext(),
+ CI->getValue().trunc(DestBitWidth));
+ }
+
+ // The input must be a constantexpr. See if we can simplify this based on
+ // the bytes we are demanding. Only do this if the source and dest are an
+ // even multiple of a byte.
+ if ((DestBitWidth & 7) == 0 &&
+ (cast<IntegerType>(V->getType())->getBitWidth() & 7) == 0)
+ if (Constant *Res = ExtractConstantBytes(V, 0, DestBitWidth / 8))
+ return Res;
+
+ return nullptr;
+ }
+ case Instruction::BitCast:
+ return FoldBitCast(V, DestTy);
+ case Instruction::AddrSpaceCast:
+ return nullptr;
+ }
+}
+
+Constant *llvm::ConstantFoldSelectInstruction(Constant *Cond,
+ Constant *V1, Constant *V2) {
+ // Check for i1 and vector true/false conditions.
+ if (Cond->isNullValue()) return V2;
+ if (Cond->isAllOnesValue()) return V1;
+
+ // If the condition is a vector constant, fold the result elementwise.
+ if (ConstantVector *CondV = dyn_cast<ConstantVector>(Cond)) {
+ SmallVector<Constant*, 16> Result;
+ Type *Ty = IntegerType::get(CondV->getContext(), 32);
+ for (unsigned i = 0, e = V1->getType()->getVectorNumElements(); i != e;++i){
+ Constant *V;
+ Constant *V1Element = ConstantExpr::getExtractElement(V1,
+ ConstantInt::get(Ty, i));
+ Constant *V2Element = ConstantExpr::getExtractElement(V2,
+ ConstantInt::get(Ty, i));
+ Constant *Cond = dyn_cast<Constant>(CondV->getOperand(i));
+ if (V1Element == V2Element) {
+ V = V1Element;
+ } else if (isa<UndefValue>(Cond)) {
+ V = isa<UndefValue>(V1Element) ? V1Element : V2Element;
+ } else {
+ if (!isa<ConstantInt>(Cond)) break;
+ V = Cond->isNullValue() ? V2Element : V1Element;
+ }
+ Result.push_back(V);
+ }
+
+ // If we were able to build the vector, return it.
+ if (Result.size() == V1->getType()->getVectorNumElements())
+ return ConstantVector::get(Result);
+ }
+
+ if (isa<UndefValue>(Cond)) {
+ if (isa<UndefValue>(V1)) return V1;
+ return V2;
+ }
+ if (isa<UndefValue>(V1)) return V2;
+ if (isa<UndefValue>(V2)) return V1;
+ if (V1 == V2) return V1;
+
+ if (ConstantExpr *TrueVal = dyn_cast<ConstantExpr>(V1)) {
+ if (TrueVal->getOpcode() == Instruction::Select)
+ if (TrueVal->getOperand(0) == Cond)
+ return ConstantExpr::getSelect(Cond, TrueVal->getOperand(1), V2);
+ }
+ if (ConstantExpr *FalseVal = dyn_cast<ConstantExpr>(V2)) {
+ if (FalseVal->getOpcode() == Instruction::Select)
+ if (FalseVal->getOperand(0) == Cond)
+ return ConstantExpr::getSelect(Cond, V1, FalseVal->getOperand(2));
+ }
+
+ return nullptr;
+}
+
+Constant *llvm::ConstantFoldExtractElementInstruction(Constant *Val,
+ Constant *Idx) {
+ if (isa<UndefValue>(Val)) // ee(undef, x) -> undef
+ return UndefValue::get(Val->getType()->getVectorElementType());
+ if (Val->isNullValue()) // ee(zero, x) -> zero
+ return Constant::getNullValue(Val->getType()->getVectorElementType());
+ // ee({w,x,y,z}, undef) -> undef
+ if (isa<UndefValue>(Idx))
+ return UndefValue::get(Val->getType()->getVectorElementType());
+
+ if (ConstantInt *CIdx = dyn_cast<ConstantInt>(Idx)) {
+ // ee({w,x,y,z}, wrong_value) -> undef
+ if (CIdx->uge(Val->getType()->getVectorNumElements()))
+ return UndefValue::get(Val->getType()->getVectorElementType());
+ return Val->getAggregateElement(CIdx->getZExtValue());
+ }
+ return nullptr;
+}
+
+Constant *llvm::ConstantFoldInsertElementInstruction(Constant *Val,
+ Constant *Elt,
+ Constant *Idx) {
+ if (isa<UndefValue>(Idx))
+ return UndefValue::get(Val->getType());
+
+ ConstantInt *CIdx = dyn_cast<ConstantInt>(Idx);
+ if (!CIdx) return nullptr;
+
+ unsigned NumElts = Val->getType()->getVectorNumElements();
+ if (CIdx->uge(NumElts))
+ return UndefValue::get(Val->getType());
+
+ SmallVector<Constant*, 16> Result;
+ Result.reserve(NumElts);
+ auto *Ty = Type::getInt32Ty(Val->getContext());
+ uint64_t IdxVal = CIdx->getZExtValue();
+ for (unsigned i = 0; i != NumElts; ++i) {
+ if (i == IdxVal) {
+ Result.push_back(Elt);
+ continue;
+ }
+
+ Constant *C = ConstantExpr::getExtractElement(Val, ConstantInt::get(Ty, i));
+ Result.push_back(C);
+ }
+
+ return ConstantVector::get(Result);
+}
+
+Constant *llvm::ConstantFoldShuffleVectorInstruction(Constant *V1,
+ Constant *V2,
+ Constant *Mask) {
+ unsigned MaskNumElts = Mask->getType()->getVectorNumElements();
+ Type *EltTy = V1->getType()->getVectorElementType();
+
+ // Undefined shuffle mask -> undefined value.
+ if (isa<UndefValue>(Mask))
+ return UndefValue::get(VectorType::get(EltTy, MaskNumElts));
+
+ // Don't break the bitcode reader hack.
+ if (isa<ConstantExpr>(Mask)) return nullptr;
+
+ unsigned SrcNumElts = V1->getType()->getVectorNumElements();
+
+ // Loop over the shuffle mask, evaluating each element.
+ SmallVector<Constant*, 32> Result;
+ for (unsigned i = 0; i != MaskNumElts; ++i) {
+ int Elt = ShuffleVectorInst::getMaskValue(Mask, i);
+ if (Elt == -1) {
+ Result.push_back(UndefValue::get(EltTy));
+ continue;
+ }
+ Constant *InElt;
+ if (unsigned(Elt) >= SrcNumElts*2)
+ InElt = UndefValue::get(EltTy);
+ else if (unsigned(Elt) >= SrcNumElts) {
+ Type *Ty = IntegerType::get(V2->getContext(), 32);
+ InElt =
+ ConstantExpr::getExtractElement(V2,
+ ConstantInt::get(Ty, Elt - SrcNumElts));
+ } else {
+ Type *Ty = IntegerType::get(V1->getContext(), 32);
+ InElt = ConstantExpr::getExtractElement(V1, ConstantInt::get(Ty, Elt));
+ }
+ Result.push_back(InElt);
+ }
+
+ return ConstantVector::get(Result);
+}
+
+Constant *llvm::ConstantFoldExtractValueInstruction(Constant *Agg,
+ ArrayRef<unsigned> Idxs) {
+ // Base case: no indices, so return the entire value.
+ if (Idxs.empty())
+ return Agg;
+
+ if (Constant *C = Agg->getAggregateElement(Idxs[0]))
+ return ConstantFoldExtractValueInstruction(C, Idxs.slice(1));
+
+ return nullptr;
+}
+
+Constant *llvm::ConstantFoldInsertValueInstruction(Constant *Agg,
+ Constant *Val,
+ ArrayRef<unsigned> Idxs) {
+ // Base case: no indices, so replace the entire value.
+ if (Idxs.empty())
+ return Val;
+
+ unsigned NumElts;
+ if (StructType *ST = dyn_cast<StructType>(Agg->getType()))
+ NumElts = ST->getNumElements();
+ else if (ArrayType *AT = dyn_cast<ArrayType>(Agg->getType()))
+ NumElts = AT->getNumElements();
+ else
+ NumElts = Agg->getType()->getVectorNumElements();
+
+ SmallVector<Constant*, 32> Result;
+ for (unsigned i = 0; i != NumElts; ++i) {
+ Constant *C = Agg->getAggregateElement(i);
+ if (!C) return nullptr;
+
+ if (Idxs[0] == i)
+ C = ConstantFoldInsertValueInstruction(C, Val, Idxs.slice(1));
+
+ Result.push_back(C);
+ }
+
+ if (StructType *ST = dyn_cast<StructType>(Agg->getType()))
+ return ConstantStruct::get(ST, Result);
+ if (ArrayType *AT = dyn_cast<ArrayType>(Agg->getType()))
+ return ConstantArray::get(AT, Result);
+ return ConstantVector::get(Result);
+}
+
+
+Constant *llvm::ConstantFoldBinaryInstruction(unsigned Opcode,
+ Constant *C1, Constant *C2) {
+ // Handle UndefValue up front.
+ if (isa<UndefValue>(C1) || isa<UndefValue>(C2)) {
+ switch (Opcode) {
+ case Instruction::Xor:
+ if (isa<UndefValue>(C1) && isa<UndefValue>(C2))
+ // Handle undef ^ undef -> 0 special case. This is a common
+ // idiom (misuse).
+ return Constant::getNullValue(C1->getType());
+ // Fallthrough
+ case Instruction::Add:
+ case Instruction::Sub:
+ return UndefValue::get(C1->getType());
+ case Instruction::And:
+ if (isa<UndefValue>(C1) && isa<UndefValue>(C2)) // undef & undef -> undef
+ return C1;
+ return Constant::getNullValue(C1->getType()); // undef & X -> 0
+ case Instruction::Mul: {
+ // undef * undef -> undef
+ if (isa<UndefValue>(C1) && isa<UndefValue>(C2))
+ return C1;
+ const APInt *CV;
+ // X * undef -> undef if X is odd
+ if (match(C1, m_APInt(CV)) || match(C2, m_APInt(CV)))
+ if ((*CV)[0])
+ return UndefValue::get(C1->getType());
+
+ // X * undef -> 0 otherwise
+ return Constant::getNullValue(C1->getType());
+ }
+ case Instruction::SDiv:
+ case Instruction::UDiv:
+ // X / undef -> undef
+ if (match(C1, m_Zero()))
+ return C2;
+ // undef / 0 -> undef
+ // undef / 1 -> undef
+ if (match(C2, m_Zero()) || match(C2, m_One()))
+ return C1;
+ // undef / X -> 0 otherwise
+ return Constant::getNullValue(C1->getType());
+ case Instruction::URem:
+ case Instruction::SRem:
+ // X % undef -> undef
+ if (match(C2, m_Undef()))
+ return C2;
+ // undef % 0 -> undef
+ if (match(C2, m_Zero()))
+ return C1;
+ // undef % X -> 0 otherwise
+ return Constant::getNullValue(C1->getType());
+ case Instruction::Or: // X | undef -> -1
+ if (isa<UndefValue>(C1) && isa<UndefValue>(C2)) // undef | undef -> undef
+ return C1;
+ return Constant::getAllOnesValue(C1->getType()); // undef | X -> ~0
+ case Instruction::LShr:
+ // X >>l undef -> undef
+ if (isa<UndefValue>(C2))
+ return C2;
+ // undef >>l 0 -> undef
+ if (match(C2, m_Zero()))
+ return C1;
+ // undef >>l X -> 0
+ return Constant::getNullValue(C1->getType());
+ case Instruction::AShr:
+ // X >>a undef -> undef
+ if (isa<UndefValue>(C2))
+ return C2;
+ // undef >>a 0 -> undef
+ if (match(C2, m_Zero()))
+ return C1;
+ // TODO: undef >>a X -> undef if the shift is exact
+ // undef >>a X -> 0
+ return Constant::getNullValue(C1->getType());
+ case Instruction::Shl:
+ // X << undef -> undef
+ if (isa<UndefValue>(C2))
+ return C2;
+ // undef << 0 -> undef
+ if (match(C2, m_Zero()))
+ return C1;
+ // undef << X -> 0
+ return Constant::getNullValue(C1->getType());
+ }
+ }
+
+ // Handle simplifications when the RHS is a constant int.
+ if (ConstantInt *CI2 = dyn_cast<ConstantInt>(C2)) {
+ switch (Opcode) {
+ case Instruction::Add:
+ if (CI2->equalsInt(0)) return C1; // X + 0 == X
+ break;
+ case Instruction::Sub:
+ if (CI2->equalsInt(0)) return C1; // X - 0 == X
+ break;
+ case Instruction::Mul:
+ if (CI2->equalsInt(0)) return C2; // X * 0 == 0
+ if (CI2->equalsInt(1))
+ return C1; // X * 1 == X
+ break;
+ case Instruction::UDiv:
+ case Instruction::SDiv:
+ if (CI2->equalsInt(1))
+ return C1; // X / 1 == X
+ if (CI2->equalsInt(0))
+ return UndefValue::get(CI2->getType()); // X / 0 == undef
+ break;
+ case Instruction::URem:
+ case Instruction::SRem:
+ if (CI2->equalsInt(1))
+ return Constant::getNullValue(CI2->getType()); // X % 1 == 0
+ if (CI2->equalsInt(0))
+ return UndefValue::get(CI2->getType()); // X % 0 == undef
+ break;
+ case Instruction::And:
+ if (CI2->isZero()) return C2; // X & 0 == 0
+ if (CI2->isAllOnesValue())
+ return C1; // X & -1 == X
+
+ if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(C1)) {
+ // (zext i32 to i64) & 4294967295 -> (zext i32 to i64)
+ if (CE1->getOpcode() == Instruction::ZExt) {
+ unsigned DstWidth = CI2->getType()->getBitWidth();
+ unsigned SrcWidth =
+ CE1->getOperand(0)->getType()->getPrimitiveSizeInBits();
+ APInt PossiblySetBits(APInt::getLowBitsSet(DstWidth, SrcWidth));
+ if ((PossiblySetBits & CI2->getValue()) == PossiblySetBits)
+ return C1;
+ }
+
+ // If and'ing the address of a global with a constant, fold it.
+ if (CE1->getOpcode() == Instruction::PtrToInt &&
+ isa<GlobalValue>(CE1->getOperand(0))) {
+ GlobalValue *GV = cast<GlobalValue>(CE1->getOperand(0));
+
+ // Functions are at least 4-byte aligned.
+ unsigned GVAlign = GV->getAlignment();
+ if (isa<Function>(GV))
+ GVAlign = std::max(GVAlign, 4U);
+
+ if (GVAlign > 1) {
+ unsigned DstWidth = CI2->getType()->getBitWidth();
+ unsigned SrcWidth = std::min(DstWidth, Log2_32(GVAlign));
+ APInt BitsNotSet(APInt::getLowBitsSet(DstWidth, SrcWidth));
+
+ // If checking bits we know are clear, return zero.
+ if ((CI2->getValue() & BitsNotSet) == CI2->getValue())
+ return Constant::getNullValue(CI2->getType());
+ }
+ }
+ }
+ break;
+ case Instruction::Or:
+ if (CI2->equalsInt(0)) return C1; // X | 0 == X
+ if (CI2->isAllOnesValue())
+ return C2; // X | -1 == -1
+ break;
+ case Instruction::Xor:
+ if (CI2->equalsInt(0)) return C1; // X ^ 0 == X
+
+ if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(C1)) {
+ switch (CE1->getOpcode()) {
+ default: break;
+ case Instruction::ICmp:
+ case Instruction::FCmp:
+ // cmp pred ^ true -> cmp !pred
+ assert(CI2->equalsInt(1));
+ CmpInst::Predicate pred = (CmpInst::Predicate)CE1->getPredicate();
+ pred = CmpInst::getInversePredicate(pred);
+ return ConstantExpr::getCompare(pred, CE1->getOperand(0),
+ CE1->getOperand(1));
+ }
+ }
+ break;
+ case Instruction::AShr:
+ // ashr (zext C to Ty), C2 -> lshr (zext C, CSA), C2
+ if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(C1))
+ if (CE1->getOpcode() == Instruction::ZExt) // Top bits known zero.
+ return ConstantExpr::getLShr(C1, C2);
+ break;
+ }
+ } else if (isa<ConstantInt>(C1)) {
+ // If C1 is a ConstantInt and C2 is not, swap the operands.
+ if (Instruction::isCommutative(Opcode))
+ return ConstantExpr::get(Opcode, C2, C1);
+ }
+
+ // At this point we know neither constant is an UndefValue.
+ if (ConstantInt *CI1 = dyn_cast<ConstantInt>(C1)) {
+ if (ConstantInt *CI2 = dyn_cast<ConstantInt>(C2)) {
+ const APInt &C1V = CI1->getValue();
+ const APInt &C2V = CI2->getValue();
+ switch (Opcode) {
+ default:
+ break;
+ case Instruction::Add:
+ return ConstantInt::get(CI1->getContext(), C1V + C2V);
+ case Instruction::Sub:
+ return ConstantInt::get(CI1->getContext(), C1V - C2V);
+ case Instruction::Mul:
+ return ConstantInt::get(CI1->getContext(), C1V * C2V);
+ case Instruction::UDiv:
+ assert(!CI2->isNullValue() && "Div by zero handled above");
+ return ConstantInt::get(CI1->getContext(), C1V.udiv(C2V));
+ case Instruction::SDiv:
+ assert(!CI2->isNullValue() && "Div by zero handled above");
+ if (C2V.isAllOnesValue() && C1V.isMinSignedValue())
+ return UndefValue::get(CI1->getType()); // MIN_INT / -1 -> undef
+ return ConstantInt::get(CI1->getContext(), C1V.sdiv(C2V));
+ case Instruction::URem:
+ assert(!CI2->isNullValue() && "Div by zero handled above");
+ return ConstantInt::get(CI1->getContext(), C1V.urem(C2V));
+ case Instruction::SRem:
+ assert(!CI2->isNullValue() && "Div by zero handled above");
+ if (C2V.isAllOnesValue() && C1V.isMinSignedValue())
+ return UndefValue::get(CI1->getType()); // MIN_INT % -1 -> undef
+ return ConstantInt::get(CI1->getContext(), C1V.srem(C2V));
+ case Instruction::And:
+ return ConstantInt::get(CI1->getContext(), C1V & C2V);
+ case Instruction::Or:
+ return ConstantInt::get(CI1->getContext(), C1V | C2V);
+ case Instruction::Xor:
+ return ConstantInt::get(CI1->getContext(), C1V ^ C2V);
+ case Instruction::Shl:
+ if (C2V.ult(C1V.getBitWidth()))
+ return ConstantInt::get(CI1->getContext(), C1V.shl(C2V));
+ return UndefValue::get(C1->getType()); // too big shift is undef
+ case Instruction::LShr:
+ if (C2V.ult(C1V.getBitWidth()))
+ return ConstantInt::get(CI1->getContext(), C1V.lshr(C2V));
+ return UndefValue::get(C1->getType()); // too big shift is undef
+ case Instruction::AShr:
+ if (C2V.ult(C1V.getBitWidth()))
+ return ConstantInt::get(CI1->getContext(), C1V.ashr(C2V));
+ return UndefValue::get(C1->getType()); // too big shift is undef
+ }
+ }
+
+ switch (Opcode) {
+ case Instruction::SDiv:
+ case Instruction::UDiv:
+ case Instruction::URem:
+ case Instruction::SRem:
+ case Instruction::LShr:
+ case Instruction::AShr:
+ case Instruction::Shl:
+ if (CI1->equalsInt(0)) return C1;
+ break;
+ default:
+ break;
+ }
+ } else if (ConstantFP *CFP1 = dyn_cast<ConstantFP>(C1)) {
+ if (ConstantFP *CFP2 = dyn_cast<ConstantFP>(C2)) {
+ APFloat C1V = CFP1->getValueAPF();
+ APFloat C2V = CFP2->getValueAPF();
+ APFloat C3V = C1V; // copy for modification
+ switch (Opcode) {
+ default:
+ break;
+ case Instruction::FAdd:
+ (void)C3V.add(C2V, APFloat::rmNearestTiesToEven);
+ return ConstantFP::get(C1->getContext(), C3V);
+ case Instruction::FSub:
+ (void)C3V.subtract(C2V, APFloat::rmNearestTiesToEven);
+ return ConstantFP::get(C1->getContext(), C3V);
+ case Instruction::FMul:
+ (void)C3V.multiply(C2V, APFloat::rmNearestTiesToEven);
+ return ConstantFP::get(C1->getContext(), C3V);
+ case Instruction::FDiv:
+ (void)C3V.divide(C2V, APFloat::rmNearestTiesToEven);
+ return ConstantFP::get(C1->getContext(), C3V);
+ case Instruction::FRem:
+ (void)C3V.mod(C2V);
+ return ConstantFP::get(C1->getContext(), C3V);
+ }
+ }
+ } else if (VectorType *VTy = dyn_cast<VectorType>(C1->getType())) {
+ // Perform elementwise folding.
+ SmallVector<Constant*, 16> Result;
+ Type *Ty = IntegerType::get(VTy->getContext(), 32);
+ for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
+ Constant *LHS =
+ ConstantExpr::getExtractElement(C1, ConstantInt::get(Ty, i));
+ Constant *RHS =
+ ConstantExpr::getExtractElement(C2, ConstantInt::get(Ty, i));
+
+ Result.push_back(ConstantExpr::get(Opcode, LHS, RHS));
+ }
+
+ return ConstantVector::get(Result);
+ }
+
+ if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(C1)) {
+ // There are many possible foldings we could do here. We should probably
+ // at least fold add of a pointer with an integer into the appropriate
+ // getelementptr. This will improve alias analysis a bit.
+
+ // Given ((a + b) + c), if (b + c) folds to something interesting, return
+ // (a + (b + c)).
+ if (Instruction::isAssociative(Opcode) && CE1->getOpcode() == Opcode) {
+ Constant *T = ConstantExpr::get(Opcode, CE1->getOperand(1), C2);
+ if (!isa<ConstantExpr>(T) || cast<ConstantExpr>(T)->getOpcode() != Opcode)
+ return ConstantExpr::get(Opcode, CE1->getOperand(0), T);
+ }
+ } else if (isa<ConstantExpr>(C2)) {
+ // If C2 is a constant expr and C1 isn't, flop them around and fold the
+ // other way if possible.
+ if (Instruction::isCommutative(Opcode))
+ return ConstantFoldBinaryInstruction(Opcode, C2, C1);
+ }
+
+ // i1 can be simplified in many cases.
+ if (C1->getType()->isIntegerTy(1)) {
+ switch (Opcode) {
+ case Instruction::Add:
+ case Instruction::Sub:
+ return ConstantExpr::getXor(C1, C2);
+ case Instruction::Mul:
+ return ConstantExpr::getAnd(C1, C2);
+ case Instruction::Shl:
+ case Instruction::LShr:
+ case Instruction::AShr:
+ // We can assume that C2 == 0. If it were one the result would be
+ // undefined because the shift value is as large as the bitwidth.
+ return C1;
+ case Instruction::SDiv:
+ case Instruction::UDiv:
+ // We can assume that C2 == 1. If it were zero the result would be
+ // undefined through division by zero.
+ return C1;
+ case Instruction::URem:
+ case Instruction::SRem:
+ // We can assume that C2 == 1. If it were zero the result would be
+ // undefined through division by zero.
+ return ConstantInt::getFalse(C1->getContext());
+ default:
+ break;
+ }
+ }
+
+ // We don't know how to fold this.
+ return nullptr;
+}
+
+/// isZeroSizedType - This type is zero sized if its an array or structure of
+/// zero sized types. The only leaf zero sized type is an empty structure.
+static bool isMaybeZeroSizedType(Type *Ty) {
+ if (StructType *STy = dyn_cast<StructType>(Ty)) {
+ if (STy->isOpaque()) return true; // Can't say.
+
+ // If all of elements have zero size, this does too.
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
+ if (!isMaybeZeroSizedType(STy->getElementType(i))) return false;
+ return true;
+
+ } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
+ return isMaybeZeroSizedType(ATy->getElementType());
+ }
+ return false;
+}
+
+/// IdxCompare - Compare the two constants as though they were getelementptr
+/// indices. This allows coercion of the types to be the same thing.
+///
+/// If the two constants are the "same" (after coercion), return 0. If the
+/// first is less than the second, return -1, if the second is less than the
+/// first, return 1. If the constants are not integral, return -2.
+///
+static int IdxCompare(Constant *C1, Constant *C2, Type *ElTy) {
+ if (C1 == C2) return 0;
+
+ // Ok, we found a different index. If they are not ConstantInt, we can't do
+ // anything with them.
+ if (!isa<ConstantInt>(C1) || !isa<ConstantInt>(C2))
+ return -2; // don't know!
+
+ // We cannot compare the indices if they don't fit in an int64_t.
+ if (cast<ConstantInt>(C1)->getValue().getActiveBits() > 64 ||
+ cast<ConstantInt>(C2)->getValue().getActiveBits() > 64)
+ return -2; // don't know!
+
+ // Ok, we have two differing integer indices. Sign extend them to be the same
+ // type.
+ int64_t C1Val = cast<ConstantInt>(C1)->getSExtValue();
+ int64_t C2Val = cast<ConstantInt>(C2)->getSExtValue();
+
+ if (C1Val == C2Val) return 0; // They are equal
+
+ // If the type being indexed over is really just a zero sized type, there is
+ // no pointer difference being made here.
+ if (isMaybeZeroSizedType(ElTy))
+ return -2; // dunno.
+
+ // If they are really different, now that they are the same type, then we
+ // found a difference!
+ if (C1Val < C2Val)
+ return -1;
+ else
+ return 1;
+}
+
+/// evaluateFCmpRelation - This function determines if there is anything we can
+/// decide about the two constants provided. This doesn't need to handle simple
+/// things like ConstantFP comparisons, but should instead handle ConstantExprs.
+/// If we can determine that the two constants have a particular relation to
+/// each other, we should return the corresponding FCmpInst predicate,
+/// otherwise return FCmpInst::BAD_FCMP_PREDICATE. This is used below in
+/// ConstantFoldCompareInstruction.
+///
+/// To simplify this code we canonicalize the relation so that the first
+/// operand is always the most "complex" of the two. We consider ConstantFP
+/// to be the simplest, and ConstantExprs to be the most complex.
+static FCmpInst::Predicate evaluateFCmpRelation(Constant *V1, Constant *V2) {
+ assert(V1->getType() == V2->getType() &&
+ "Cannot compare values of different types!");
+
+ // Handle degenerate case quickly
+ if (V1 == V2) return FCmpInst::FCMP_OEQ;
+
+ if (!isa<ConstantExpr>(V1)) {
+ if (!isa<ConstantExpr>(V2)) {
+ // Simple case, use the standard constant folder.
+ ConstantInt *R = nullptr;
+ R = dyn_cast<ConstantInt>(
+ ConstantExpr::getFCmp(FCmpInst::FCMP_OEQ, V1, V2));
+ if (R && !R->isZero())
+ return FCmpInst::FCMP_OEQ;
+ R = dyn_cast<ConstantInt>(
+ ConstantExpr::getFCmp(FCmpInst::FCMP_OLT, V1, V2));
+ if (R && !R->isZero())
+ return FCmpInst::FCMP_OLT;
+ R = dyn_cast<ConstantInt>(
+ ConstantExpr::getFCmp(FCmpInst::FCMP_OGT, V1, V2));
+ if (R && !R->isZero())
+ return FCmpInst::FCMP_OGT;
+
+ // Nothing more we can do
+ return FCmpInst::BAD_FCMP_PREDICATE;
+ }
+
+ // If the first operand is simple and second is ConstantExpr, swap operands.
+ FCmpInst::Predicate SwappedRelation = evaluateFCmpRelation(V2, V1);
+ if (SwappedRelation != FCmpInst::BAD_FCMP_PREDICATE)
+ return FCmpInst::getSwappedPredicate(SwappedRelation);
+ } else {
+ // Ok, the LHS is known to be a constantexpr. The RHS can be any of a
+ // constantexpr or a simple constant.
+ ConstantExpr *CE1 = cast<ConstantExpr>(V1);
+ switch (CE1->getOpcode()) {
+ case Instruction::FPTrunc:
+ case Instruction::FPExt:
+ case Instruction::UIToFP:
+ case Instruction::SIToFP:
+ // We might be able to do something with these but we don't right now.
+ break;
+ default:
+ break;
+ }
+ }
+ // There are MANY other foldings that we could perform here. They will
+ // probably be added on demand, as they seem needed.
+ return FCmpInst::BAD_FCMP_PREDICATE;
+}
+
+static ICmpInst::Predicate areGlobalsPotentiallyEqual(const GlobalValue *GV1,
+ const GlobalValue *GV2) {
+ auto isGlobalUnsafeForEquality = [](const GlobalValue *GV) {
+ if (GV->hasExternalWeakLinkage() || GV->hasWeakAnyLinkage())
+ return true;
+ if (const auto *GVar = dyn_cast<GlobalVariable>(GV)) {
+ Type *Ty = GVar->getValueType();
+ // A global with opaque type might end up being zero sized.
+ if (!Ty->isSized())
+ return true;
+ // A global with an empty type might lie at the address of any other
+ // global.
+ if (Ty->isEmptyTy())
+ return true;
+ }
+ return false;
+ };
+ // Don't try to decide equality of aliases.
+ if (!isa<GlobalAlias>(GV1) && !isa<GlobalAlias>(GV2))
+ if (!isGlobalUnsafeForEquality(GV1) && !isGlobalUnsafeForEquality(GV2))
+ return ICmpInst::ICMP_NE;
+ return ICmpInst::BAD_ICMP_PREDICATE;
+}
+
+/// evaluateICmpRelation - This function determines if there is anything we can
+/// decide about the two constants provided. This doesn't need to handle simple
+/// things like integer comparisons, but should instead handle ConstantExprs
+/// and GlobalValues. If we can determine that the two constants have a
+/// particular relation to each other, we should return the corresponding ICmp
+/// predicate, otherwise return ICmpInst::BAD_ICMP_PREDICATE.
+///
+/// To simplify this code we canonicalize the relation so that the first
+/// operand is always the most "complex" of the two. We consider simple
+/// constants (like ConstantInt) to be the simplest, followed by
+/// GlobalValues, followed by ConstantExpr's (the most complex).
+///
+static ICmpInst::Predicate evaluateICmpRelation(Constant *V1, Constant *V2,
+ bool isSigned) {
+ assert(V1->getType() == V2->getType() &&
+ "Cannot compare different types of values!");
+ if (V1 == V2) return ICmpInst::ICMP_EQ;
+
+ if (!isa<ConstantExpr>(V1) && !isa<GlobalValue>(V1) &&
+ !isa<BlockAddress>(V1)) {
+ if (!isa<GlobalValue>(V2) && !isa<ConstantExpr>(V2) &&
+ !isa<BlockAddress>(V2)) {
+ // We distilled this down to a simple case, use the standard constant
+ // folder.
+ ConstantInt *R = nullptr;
+ ICmpInst::Predicate pred = ICmpInst::ICMP_EQ;
+ R = dyn_cast<ConstantInt>(ConstantExpr::getICmp(pred, V1, V2));
+ if (R && !R->isZero())
+ return pred;
+ pred = isSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
+ R = dyn_cast<ConstantInt>(ConstantExpr::getICmp(pred, V1, V2));
+ if (R && !R->isZero())
+ return pred;
+ pred = isSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
+ R = dyn_cast<ConstantInt>(ConstantExpr::getICmp(pred, V1, V2));
+ if (R && !R->isZero())
+ return pred;
+
+ // If we couldn't figure it out, bail.
+ return ICmpInst::BAD_ICMP_PREDICATE;
+ }
+
+ // If the first operand is simple, swap operands.
+ ICmpInst::Predicate SwappedRelation =
+ evaluateICmpRelation(V2, V1, isSigned);
+ if (SwappedRelation != ICmpInst::BAD_ICMP_PREDICATE)
+ return ICmpInst::getSwappedPredicate(SwappedRelation);
+
+ } else if (const GlobalValue *GV = dyn_cast<GlobalValue>(V1)) {
+ if (isa<ConstantExpr>(V2)) { // Swap as necessary.
+ ICmpInst::Predicate SwappedRelation =
+ evaluateICmpRelation(V2, V1, isSigned);
+ if (SwappedRelation != ICmpInst::BAD_ICMP_PREDICATE)
+ return ICmpInst::getSwappedPredicate(SwappedRelation);
+ return ICmpInst::BAD_ICMP_PREDICATE;
+ }
+
+ // Now we know that the RHS is a GlobalValue, BlockAddress or simple
+ // constant (which, since the types must match, means that it's a
+ // ConstantPointerNull).
+ if (const GlobalValue *GV2 = dyn_cast<GlobalValue>(V2)) {
+ return areGlobalsPotentiallyEqual(GV, GV2);
+ } else if (isa<BlockAddress>(V2)) {
+ return ICmpInst::ICMP_NE; // Globals never equal labels.
+ } else {
+ assert(isa<ConstantPointerNull>(V2) && "Canonicalization guarantee!");
+ // GlobalVals can never be null unless they have external weak linkage.
+ // We don't try to evaluate aliases here.
+ if (!GV->hasExternalWeakLinkage() && !isa<GlobalAlias>(GV))
+ return ICmpInst::ICMP_NE;
+ }
+ } else if (const BlockAddress *BA = dyn_cast<BlockAddress>(V1)) {
+ if (isa<ConstantExpr>(V2)) { // Swap as necessary.
+ ICmpInst::Predicate SwappedRelation =
+ evaluateICmpRelation(V2, V1, isSigned);
+ if (SwappedRelation != ICmpInst::BAD_ICMP_PREDICATE)
+ return ICmpInst::getSwappedPredicate(SwappedRelation);
+ return ICmpInst::BAD_ICMP_PREDICATE;
+ }
+
+ // Now we know that the RHS is a GlobalValue, BlockAddress or simple
+ // constant (which, since the types must match, means that it is a
+ // ConstantPointerNull).
+ if (const BlockAddress *BA2 = dyn_cast<BlockAddress>(V2)) {
+ // Block address in another function can't equal this one, but block
+ // addresses in the current function might be the same if blocks are
+ // empty.
+ if (BA2->getFunction() != BA->getFunction())
+ return ICmpInst::ICMP_NE;
+ } else {
+ // Block addresses aren't null, don't equal the address of globals.
+ assert((isa<ConstantPointerNull>(V2) || isa<GlobalValue>(V2)) &&
+ "Canonicalization guarantee!");
+ return ICmpInst::ICMP_NE;
+ }
+ } else {
+ // Ok, the LHS is known to be a constantexpr. The RHS can be any of a
+ // constantexpr, a global, block address, or a simple constant.
+ ConstantExpr *CE1 = cast<ConstantExpr>(V1);
+ Constant *CE1Op0 = CE1->getOperand(0);
+
+ switch (CE1->getOpcode()) {
+ case Instruction::Trunc:
+ case Instruction::FPTrunc:
+ case Instruction::FPExt:
+ case Instruction::FPToUI:
+ case Instruction::FPToSI:
+ break; // We can't evaluate floating point casts or truncations.
+
+ case Instruction::UIToFP:
+ case Instruction::SIToFP:
+ case Instruction::BitCast:
+ case Instruction::ZExt:
+ case Instruction::SExt:
+ // If the cast is not actually changing bits, and the second operand is a
+ // null pointer, do the comparison with the pre-casted value.
+ if (V2->isNullValue() &&
+ (CE1->getType()->isPointerTy() || CE1->getType()->isIntegerTy())) {
+ if (CE1->getOpcode() == Instruction::ZExt) isSigned = false;
+ if (CE1->getOpcode() == Instruction::SExt) isSigned = true;
+ return evaluateICmpRelation(CE1Op0,
+ Constant::getNullValue(CE1Op0->getType()),
+ isSigned);
+ }
+ break;
+
+ case Instruction::GetElementPtr: {
+ GEPOperator *CE1GEP = cast<GEPOperator>(CE1);
+ // Ok, since this is a getelementptr, we know that the constant has a
+ // pointer type. Check the various cases.
+ if (isa<ConstantPointerNull>(V2)) {
+ // If we are comparing a GEP to a null pointer, check to see if the base
+ // of the GEP equals the null pointer.
+ if (const GlobalValue *GV = dyn_cast<GlobalValue>(CE1Op0)) {
+ if (GV->hasExternalWeakLinkage())
+ // Weak linkage GVals could be zero or not. We're comparing that
+ // to null pointer so its greater-or-equal
+ return isSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE;
+ else
+ // If its not weak linkage, the GVal must have a non-zero address
+ // so the result is greater-than
+ return isSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
+ } else if (isa<ConstantPointerNull>(CE1Op0)) {
+ // If we are indexing from a null pointer, check to see if we have any
+ // non-zero indices.
+ for (unsigned i = 1, e = CE1->getNumOperands(); i != e; ++i)
+ if (!CE1->getOperand(i)->isNullValue())
+ // Offsetting from null, must not be equal.
+ return isSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
+ // Only zero indexes from null, must still be zero.
+ return ICmpInst::ICMP_EQ;
+ }
+ // Otherwise, we can't really say if the first operand is null or not.
+ } else if (const GlobalValue *GV2 = dyn_cast<GlobalValue>(V2)) {
+ if (isa<ConstantPointerNull>(CE1Op0)) {
+ if (GV2->hasExternalWeakLinkage())
+ // Weak linkage GVals could be zero or not. We're comparing it to
+ // a null pointer, so its less-or-equal
+ return isSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE;
+ else
+ // If its not weak linkage, the GVal must have a non-zero address
+ // so the result is less-than
+ return isSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
+ } else if (const GlobalValue *GV = dyn_cast<GlobalValue>(CE1Op0)) {
+ if (GV == GV2) {
+ // If this is a getelementptr of the same global, then it must be
+ // different. Because the types must match, the getelementptr could
+ // only have at most one index, and because we fold getelementptr's
+ // with a single zero index, it must be nonzero.
+ assert(CE1->getNumOperands() == 2 &&
+ !CE1->getOperand(1)->isNullValue() &&
+ "Surprising getelementptr!");
+ return isSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
+ } else {
+ if (CE1GEP->hasAllZeroIndices())
+ return areGlobalsPotentiallyEqual(GV, GV2);
+ return ICmpInst::BAD_ICMP_PREDICATE;
+ }
+ }
+ } else {
+ ConstantExpr *CE2 = cast<ConstantExpr>(V2);
+ Constant *CE2Op0 = CE2->getOperand(0);
+
+ // There are MANY other foldings that we could perform here. They will
+ // probably be added on demand, as they seem needed.
+ switch (CE2->getOpcode()) {
+ default: break;
+ case Instruction::GetElementPtr:
+ // By far the most common case to handle is when the base pointers are
+ // obviously to the same global.
+ if (isa<GlobalValue>(CE1Op0) && isa<GlobalValue>(CE2Op0)) {
+ // Don't know relative ordering, but check for inequality.
+ if (CE1Op0 != CE2Op0) {
+ GEPOperator *CE2GEP = cast<GEPOperator>(CE2);
+ if (CE1GEP->hasAllZeroIndices() && CE2GEP->hasAllZeroIndices())
+ return areGlobalsPotentiallyEqual(cast<GlobalValue>(CE1Op0),
+ cast<GlobalValue>(CE2Op0));
+ return ICmpInst::BAD_ICMP_PREDICATE;
+ }
+ // Ok, we know that both getelementptr instructions are based on the
+ // same global. From this, we can precisely determine the relative
+ // ordering of the resultant pointers.
+ unsigned i = 1;
+
+ // The logic below assumes that the result of the comparison
+ // can be determined by finding the first index that differs.
+ // This doesn't work if there is over-indexing in any
+ // subsequent indices, so check for that case first.
+ if (!CE1->isGEPWithNoNotionalOverIndexing() ||
+ !CE2->isGEPWithNoNotionalOverIndexing())
+ return ICmpInst::BAD_ICMP_PREDICATE; // Might be equal.
+
+ // Compare all of the operands the GEP's have in common.
+ gep_type_iterator GTI = gep_type_begin(CE1);
+ for (;i != CE1->getNumOperands() && i != CE2->getNumOperands();
+ ++i, ++GTI)
+ switch (IdxCompare(CE1->getOperand(i),
+ CE2->getOperand(i), GTI.getIndexedType())) {
+ case -1: return isSigned ? ICmpInst::ICMP_SLT:ICmpInst::ICMP_ULT;
+ case 1: return isSigned ? ICmpInst::ICMP_SGT:ICmpInst::ICMP_UGT;
+ case -2: return ICmpInst::BAD_ICMP_PREDICATE;
+ }
+
+ // Ok, we ran out of things they have in common. If any leftovers
+ // are non-zero then we have a difference, otherwise we are equal.
+ for (; i < CE1->getNumOperands(); ++i)
+ if (!CE1->getOperand(i)->isNullValue()) {
+ if (isa<ConstantInt>(CE1->getOperand(i)))
+ return isSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
+ else
+ return ICmpInst::BAD_ICMP_PREDICATE; // Might be equal.
+ }
+
+ for (; i < CE2->getNumOperands(); ++i)
+ if (!CE2->getOperand(i)->isNullValue()) {
+ if (isa<ConstantInt>(CE2->getOperand(i)))
+ return isSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
+ else
+ return ICmpInst::BAD_ICMP_PREDICATE; // Might be equal.
+ }
+ return ICmpInst::ICMP_EQ;
+ }
+ }
+ }
+ }
+ default:
+ break;
+ }
+ }
+
+ return ICmpInst::BAD_ICMP_PREDICATE;
+}
+
+Constant *llvm::ConstantFoldCompareInstruction(unsigned short pred,
+ Constant *C1, Constant *C2) {
+ Type *ResultTy;
+ if (VectorType *VT = dyn_cast<VectorType>(C1->getType()))
+ ResultTy = VectorType::get(Type::getInt1Ty(C1->getContext()),
+ VT->getNumElements());
+ else
+ ResultTy = Type::getInt1Ty(C1->getContext());
+
+ // Fold FCMP_FALSE/FCMP_TRUE unconditionally.
+ if (pred == FCmpInst::FCMP_FALSE)
+ return Constant::getNullValue(ResultTy);
+
+ if (pred == FCmpInst::FCMP_TRUE)
+ return Constant::getAllOnesValue(ResultTy);
+
+ // Handle some degenerate cases first
+ if (isa<UndefValue>(C1) || isa<UndefValue>(C2)) {
+ CmpInst::Predicate Predicate = CmpInst::Predicate(pred);
+ bool isIntegerPredicate = ICmpInst::isIntPredicate(Predicate);
+ // For EQ and NE, we can always pick a value for the undef to make the
+ // predicate pass or fail, so we can return undef.
+ // Also, if both operands are undef, we can return undef for int comparison.
+ if (ICmpInst::isEquality(Predicate) || (isIntegerPredicate && C1 == C2))
+ return UndefValue::get(ResultTy);
+
+ // Otherwise, for integer compare, pick the same value as the non-undef
+ // operand, and fold it to true or false.
+ if (isIntegerPredicate)
+ return ConstantInt::get(ResultTy, CmpInst::isTrueWhenEqual(Predicate));
+
+ // Choosing NaN for the undef will always make unordered comparison succeed
+ // and ordered comparison fails.
+ return ConstantInt::get(ResultTy, CmpInst::isUnordered(Predicate));
+ }
+
+ // icmp eq/ne(null,GV) -> false/true
+ if (C1->isNullValue()) {
+ if (const GlobalValue *GV = dyn_cast<GlobalValue>(C2))
+ // Don't try to evaluate aliases. External weak GV can be null.
+ if (!isa<GlobalAlias>(GV) && !GV->hasExternalWeakLinkage()) {
+ if (pred == ICmpInst::ICMP_EQ)
+ return ConstantInt::getFalse(C1->getContext());
+ else if (pred == ICmpInst::ICMP_NE)
+ return ConstantInt::getTrue(C1->getContext());
+ }
+ // icmp eq/ne(GV,null) -> false/true
+ } else if (C2->isNullValue()) {
+ if (const GlobalValue *GV = dyn_cast<GlobalValue>(C1))
+ // Don't try to evaluate aliases. External weak GV can be null.
+ if (!isa<GlobalAlias>(GV) && !GV->hasExternalWeakLinkage()) {
+ if (pred == ICmpInst::ICMP_EQ)
+ return ConstantInt::getFalse(C1->getContext());
+ else if (pred == ICmpInst::ICMP_NE)
+ return ConstantInt::getTrue(C1->getContext());
+ }
+ }
+
+ // If the comparison is a comparison between two i1's, simplify it.
+ if (C1->getType()->isIntegerTy(1)) {
+ switch(pred) {
+ case ICmpInst::ICMP_EQ:
+ if (isa<ConstantInt>(C2))
+ return ConstantExpr::getXor(C1, ConstantExpr::getNot(C2));
+ return ConstantExpr::getXor(ConstantExpr::getNot(C1), C2);
+ case ICmpInst::ICMP_NE:
+ return ConstantExpr::getXor(C1, C2);
+ default:
+ break;
+ }
+ }
+
+ if (isa<ConstantInt>(C1) && isa<ConstantInt>(C2)) {
+ APInt V1 = cast<ConstantInt>(C1)->getValue();
+ APInt V2 = cast<ConstantInt>(C2)->getValue();
+ switch (pred) {
+ default: llvm_unreachable("Invalid ICmp Predicate");
+ case ICmpInst::ICMP_EQ: return ConstantInt::get(ResultTy, V1 == V2);
+ case ICmpInst::ICMP_NE: return ConstantInt::get(ResultTy, V1 != V2);
+ case ICmpInst::ICMP_SLT: return ConstantInt::get(ResultTy, V1.slt(V2));
+ case ICmpInst::ICMP_SGT: return ConstantInt::get(ResultTy, V1.sgt(V2));
+ case ICmpInst::ICMP_SLE: return ConstantInt::get(ResultTy, V1.sle(V2));
+ case ICmpInst::ICMP_SGE: return ConstantInt::get(ResultTy, V1.sge(V2));
+ case ICmpInst::ICMP_ULT: return ConstantInt::get(ResultTy, V1.ult(V2));
+ case ICmpInst::ICMP_UGT: return ConstantInt::get(ResultTy, V1.ugt(V2));
+ case ICmpInst::ICMP_ULE: return ConstantInt::get(ResultTy, V1.ule(V2));
+ case ICmpInst::ICMP_UGE: return ConstantInt::get(ResultTy, V1.uge(V2));
+ }
+ } else if (isa<ConstantFP>(C1) && isa<ConstantFP>(C2)) {
+ APFloat C1V = cast<ConstantFP>(C1)->getValueAPF();
+ APFloat C2V = cast<ConstantFP>(C2)->getValueAPF();
+ APFloat::cmpResult R = C1V.compare(C2V);
+ switch (pred) {
+ default: llvm_unreachable("Invalid FCmp Predicate");
+ case FCmpInst::FCMP_FALSE: return Constant::getNullValue(ResultTy);
+ case FCmpInst::FCMP_TRUE: return Constant::getAllOnesValue(ResultTy);
+ case FCmpInst::FCMP_UNO:
+ return ConstantInt::get(ResultTy, R==APFloat::cmpUnordered);
+ case FCmpInst::FCMP_ORD:
+ return ConstantInt::get(ResultTy, R!=APFloat::cmpUnordered);
+ case FCmpInst::FCMP_UEQ:
+ return ConstantInt::get(ResultTy, R==APFloat::cmpUnordered ||
+ R==APFloat::cmpEqual);
+ case FCmpInst::FCMP_OEQ:
+ return ConstantInt::get(ResultTy, R==APFloat::cmpEqual);
+ case FCmpInst::FCMP_UNE:
+ return ConstantInt::get(ResultTy, R!=APFloat::cmpEqual);
+ case FCmpInst::FCMP_ONE:
+ return ConstantInt::get(ResultTy, R==APFloat::cmpLessThan ||
+ R==APFloat::cmpGreaterThan);
+ case FCmpInst::FCMP_ULT:
+ return ConstantInt::get(ResultTy, R==APFloat::cmpUnordered ||
+ R==APFloat::cmpLessThan);
+ case FCmpInst::FCMP_OLT:
+ return ConstantInt::get(ResultTy, R==APFloat::cmpLessThan);
+ case FCmpInst::FCMP_UGT:
+ return ConstantInt::get(ResultTy, R==APFloat::cmpUnordered ||
+ R==APFloat::cmpGreaterThan);
+ case FCmpInst::FCMP_OGT:
+ return ConstantInt::get(ResultTy, R==APFloat::cmpGreaterThan);
+ case FCmpInst::FCMP_ULE:
+ return ConstantInt::get(ResultTy, R!=APFloat::cmpGreaterThan);
+ case FCmpInst::FCMP_OLE:
+ return ConstantInt::get(ResultTy, R==APFloat::cmpLessThan ||
+ R==APFloat::cmpEqual);
+ case FCmpInst::FCMP_UGE:
+ return ConstantInt::get(ResultTy, R!=APFloat::cmpLessThan);
+ case FCmpInst::FCMP_OGE:
+ return ConstantInt::get(ResultTy, R==APFloat::cmpGreaterThan ||
+ R==APFloat::cmpEqual);
+ }
+ } else if (C1->getType()->isVectorTy()) {
+ // If we can constant fold the comparison of each element, constant fold
+ // the whole vector comparison.
+ SmallVector<Constant*, 4> ResElts;
+ Type *Ty = IntegerType::get(C1->getContext(), 32);
+ // Compare the elements, producing an i1 result or constant expr.
+ for (unsigned i = 0, e = C1->getType()->getVectorNumElements(); i != e;++i){
+ Constant *C1E =
+ ConstantExpr::getExtractElement(C1, ConstantInt::get(Ty, i));
+ Constant *C2E =
+ ConstantExpr::getExtractElement(C2, ConstantInt::get(Ty, i));
+
+ ResElts.push_back(ConstantExpr::getCompare(pred, C1E, C2E));
+ }
+
+ return ConstantVector::get(ResElts);
+ }
+
+ if (C1->getType()->isFloatingPointTy() &&
+ // Only call evaluateFCmpRelation if we have a constant expr to avoid
+ // infinite recursive loop
+ (isa<ConstantExpr>(C1) || isa<ConstantExpr>(C2))) {
+ int Result = -1; // -1 = unknown, 0 = known false, 1 = known true.
+ switch (evaluateFCmpRelation(C1, C2)) {
+ default: llvm_unreachable("Unknown relation!");
+ case FCmpInst::FCMP_UNO:
+ case FCmpInst::FCMP_ORD:
+ case FCmpInst::FCMP_UEQ:
+ case FCmpInst::FCMP_UNE:
+ case FCmpInst::FCMP_ULT:
+ case FCmpInst::FCMP_UGT:
+ case FCmpInst::FCMP_ULE:
+ case FCmpInst::FCMP_UGE:
+ case FCmpInst::FCMP_TRUE:
+ case FCmpInst::FCMP_FALSE:
+ case FCmpInst::BAD_FCMP_PREDICATE:
+ break; // Couldn't determine anything about these constants.
+ case FCmpInst::FCMP_OEQ: // We know that C1 == C2
+ Result = (pred == FCmpInst::FCMP_UEQ || pred == FCmpInst::FCMP_OEQ ||
+ pred == FCmpInst::FCMP_ULE || pred == FCmpInst::FCMP_OLE ||
+ pred == FCmpInst::FCMP_UGE || pred == FCmpInst::FCMP_OGE);
+ break;
+ case FCmpInst::FCMP_OLT: // We know that C1 < C2
+ Result = (pred == FCmpInst::FCMP_UNE || pred == FCmpInst::FCMP_ONE ||
+ pred == FCmpInst::FCMP_ULT || pred == FCmpInst::FCMP_OLT ||
+ pred == FCmpInst::FCMP_ULE || pred == FCmpInst::FCMP_OLE);
+ break;
+ case FCmpInst::FCMP_OGT: // We know that C1 > C2
+ Result = (pred == FCmpInst::FCMP_UNE || pred == FCmpInst::FCMP_ONE ||
+ pred == FCmpInst::FCMP_UGT || pred == FCmpInst::FCMP_OGT ||
+ pred == FCmpInst::FCMP_UGE || pred == FCmpInst::FCMP_OGE);
+ break;
+ case FCmpInst::FCMP_OLE: // We know that C1 <= C2
+ // We can only partially decide this relation.
+ if (pred == FCmpInst::FCMP_UGT || pred == FCmpInst::FCMP_OGT)
+ Result = 0;
+ else if (pred == FCmpInst::FCMP_ULT || pred == FCmpInst::FCMP_OLT)
+ Result = 1;
+ break;
+ case FCmpInst::FCMP_OGE: // We known that C1 >= C2
+ // We can only partially decide this relation.
+ if (pred == FCmpInst::FCMP_ULT || pred == FCmpInst::FCMP_OLT)
+ Result = 0;
+ else if (pred == FCmpInst::FCMP_UGT || pred == FCmpInst::FCMP_OGT)
+ Result = 1;
+ break;
+ case FCmpInst::FCMP_ONE: // We know that C1 != C2
+ // We can only partially decide this relation.
+ if (pred == FCmpInst::FCMP_OEQ || pred == FCmpInst::FCMP_UEQ)
+ Result = 0;
+ else if (pred == FCmpInst::FCMP_ONE || pred == FCmpInst::FCMP_UNE)
+ Result = 1;
+ break;
+ }
+
+ // If we evaluated the result, return it now.
+ if (Result != -1)
+ return ConstantInt::get(ResultTy, Result);
+
+ } else {
+ // Evaluate the relation between the two constants, per the predicate.
+ int Result = -1; // -1 = unknown, 0 = known false, 1 = known true.
+ switch (evaluateICmpRelation(C1, C2,
+ CmpInst::isSigned((CmpInst::Predicate)pred))) {
+ default: llvm_unreachable("Unknown relational!");
+ case ICmpInst::BAD_ICMP_PREDICATE:
+ break; // Couldn't determine anything about these constants.
+ case ICmpInst::ICMP_EQ: // We know the constants are equal!
+ // If we know the constants are equal, we can decide the result of this
+ // computation precisely.
+ Result = ICmpInst::isTrueWhenEqual((ICmpInst::Predicate)pred);
+ break;
+ case ICmpInst::ICMP_ULT:
+ switch (pred) {
+ case ICmpInst::ICMP_ULT: case ICmpInst::ICMP_NE: case ICmpInst::ICMP_ULE:
+ Result = 1; break;
+ case ICmpInst::ICMP_UGT: case ICmpInst::ICMP_EQ: case ICmpInst::ICMP_UGE:
+ Result = 0; break;
+ }
+ break;
+ case ICmpInst::ICMP_SLT:
+ switch (pred) {
+ case ICmpInst::ICMP_SLT: case ICmpInst::ICMP_NE: case ICmpInst::ICMP_SLE:
+ Result = 1; break;
+ case ICmpInst::ICMP_SGT: case ICmpInst::ICMP_EQ: case ICmpInst::ICMP_SGE:
+ Result = 0; break;
+ }
+ break;
+ case ICmpInst::ICMP_UGT:
+ switch (pred) {
+ case ICmpInst::ICMP_UGT: case ICmpInst::ICMP_NE: case ICmpInst::ICMP_UGE:
+ Result = 1; break;
+ case ICmpInst::ICMP_ULT: case ICmpInst::ICMP_EQ: case ICmpInst::ICMP_ULE:
+ Result = 0; break;
+ }
+ break;
+ case ICmpInst::ICMP_SGT:
+ switch (pred) {
+ case ICmpInst::ICMP_SGT: case ICmpInst::ICMP_NE: case ICmpInst::ICMP_SGE:
+ Result = 1; break;
+ case ICmpInst::ICMP_SLT: case ICmpInst::ICMP_EQ: case ICmpInst::ICMP_SLE:
+ Result = 0; break;
+ }
+ break;
+ case ICmpInst::ICMP_ULE:
+ if (pred == ICmpInst::ICMP_UGT) Result = 0;
+ if (pred == ICmpInst::ICMP_ULT || pred == ICmpInst::ICMP_ULE) Result = 1;
+ break;
+ case ICmpInst::ICMP_SLE:
+ if (pred == ICmpInst::ICMP_SGT) Result = 0;
+ if (pred == ICmpInst::ICMP_SLT || pred == ICmpInst::ICMP_SLE) Result = 1;
+ break;
+ case ICmpInst::ICMP_UGE:
+ if (pred == ICmpInst::ICMP_ULT) Result = 0;
+ if (pred == ICmpInst::ICMP_UGT || pred == ICmpInst::ICMP_UGE) Result = 1;
+ break;
+ case ICmpInst::ICMP_SGE:
+ if (pred == ICmpInst::ICMP_SLT) Result = 0;
+ if (pred == ICmpInst::ICMP_SGT || pred == ICmpInst::ICMP_SGE) Result = 1;
+ break;
+ case ICmpInst::ICMP_NE:
+ if (pred == ICmpInst::ICMP_EQ) Result = 0;
+ if (pred == ICmpInst::ICMP_NE) Result = 1;
+ break;
+ }
+
+ // If we evaluated the result, return it now.
+ if (Result != -1)
+ return ConstantInt::get(ResultTy, Result);
+
+ // If the right hand side is a bitcast, try using its inverse to simplify
+ // it by moving it to the left hand side. We can't do this if it would turn
+ // a vector compare into a scalar compare or visa versa.
+ if (ConstantExpr *CE2 = dyn_cast<ConstantExpr>(C2)) {
+ Constant *CE2Op0 = CE2->getOperand(0);
+ if (CE2->getOpcode() == Instruction::BitCast &&
+ CE2->getType()->isVectorTy() == CE2Op0->getType()->isVectorTy()) {
+ Constant *Inverse = ConstantExpr::getBitCast(C1, CE2Op0->getType());
+ return ConstantExpr::getICmp(pred, Inverse, CE2Op0);
+ }
+ }
+
+ // If the left hand side is an extension, try eliminating it.
+ if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(C1)) {
+ if ((CE1->getOpcode() == Instruction::SExt &&
+ ICmpInst::isSigned((ICmpInst::Predicate)pred)) ||
+ (CE1->getOpcode() == Instruction::ZExt &&
+ !ICmpInst::isSigned((ICmpInst::Predicate)pred))){
+ Constant *CE1Op0 = CE1->getOperand(0);
+ Constant *CE1Inverse = ConstantExpr::getTrunc(CE1, CE1Op0->getType());
+ if (CE1Inverse == CE1Op0) {
+ // Check whether we can safely truncate the right hand side.
+ Constant *C2Inverse = ConstantExpr::getTrunc(C2, CE1Op0->getType());
+ if (ConstantExpr::getCast(CE1->getOpcode(), C2Inverse,
+ C2->getType()) == C2)
+ return ConstantExpr::getICmp(pred, CE1Inverse, C2Inverse);
+ }
+ }
+ }
+
+ if ((!isa<ConstantExpr>(C1) && isa<ConstantExpr>(C2)) ||
+ (C1->isNullValue() && !C2->isNullValue())) {
+ // If C2 is a constant expr and C1 isn't, flip them around and fold the
+ // other way if possible.
+ // Also, if C1 is null and C2 isn't, flip them around.
+ pred = ICmpInst::getSwappedPredicate((ICmpInst::Predicate)pred);
+ return ConstantExpr::getICmp(pred, C2, C1);
+ }
+ }
+ return nullptr;
+}
+
+/// isInBoundsIndices - Test whether the given sequence of *normalized* indices
+/// is "inbounds".
+template<typename IndexTy>
+static bool isInBoundsIndices(ArrayRef<IndexTy> Idxs) {
+ // No indices means nothing that could be out of bounds.
+ if (Idxs.empty()) return true;
+
+ // If the first index is zero, it's in bounds.
+ if (cast<Constant>(Idxs[0])->isNullValue()) return true;
+
+ // If the first index is one and all the rest are zero, it's in bounds,
+ // by the one-past-the-end rule.
+ if (!cast<ConstantInt>(Idxs[0])->isOne())
+ return false;
+ for (unsigned i = 1, e = Idxs.size(); i != e; ++i)
+ if (!cast<Constant>(Idxs[i])->isNullValue())
+ return false;
+ return true;
+}
+
+/// \brief Test whether a given ConstantInt is in-range for a SequentialType.
+static bool isIndexInRangeOfSequentialType(SequentialType *STy,
+ const ConstantInt *CI) {
+ // And indices are valid when indexing along a pointer
+ if (isa<PointerType>(STy))
+ return true;
+
+ uint64_t NumElements = 0;
+ // Determine the number of elements in our sequential type.
+ if (auto *ATy = dyn_cast<ArrayType>(STy))
+ NumElements = ATy->getNumElements();
+ else if (auto *VTy = dyn_cast<VectorType>(STy))
+ NumElements = VTy->getNumElements();
+
+ assert((isa<ArrayType>(STy) || NumElements > 0) &&
+ "didn't expect non-array type to have zero elements!");
+
+ // We cannot bounds check the index if it doesn't fit in an int64_t.
+ if (CI->getValue().getActiveBits() > 64)
+ return false;
+
+ // A negative index or an index past the end of our sequential type is
+ // considered out-of-range.
+ int64_t IndexVal = CI->getSExtValue();
+ if (IndexVal < 0 || (NumElements > 0 && (uint64_t)IndexVal >= NumElements))
+ return false;
+
+ // Otherwise, it is in-range.
+ return true;
+}
+
+template<typename IndexTy>
+static Constant *ConstantFoldGetElementPtrImpl(Type *PointeeTy, Constant *C,
+ bool inBounds,
+ ArrayRef<IndexTy> Idxs) {
+ if (Idxs.empty()) return C;
+ Constant *Idx0 = cast<Constant>(Idxs[0]);
+ if ((Idxs.size() == 1 && Idx0->isNullValue()))
+ return C;
+
+ if (isa<UndefValue>(C)) {
+ PointerType *Ptr = cast<PointerType>(C->getType());
+ Type *Ty = GetElementPtrInst::getIndexedType(
+ cast<PointerType>(Ptr->getScalarType())->getElementType(), Idxs);
+ assert(Ty && "Invalid indices for GEP!");
+ return UndefValue::get(PointerType::get(Ty, Ptr->getAddressSpace()));
+ }
+
+ if (C->isNullValue()) {
+ bool isNull = true;
+ for (unsigned i = 0, e = Idxs.size(); i != e; ++i)
+ if (!cast<Constant>(Idxs[i])->isNullValue()) {
+ isNull = false;
+ break;
+ }
+ if (isNull) {
+ PointerType *Ptr = cast<PointerType>(C->getType());
+ Type *Ty = GetElementPtrInst::getIndexedType(
+ cast<PointerType>(Ptr->getScalarType())->getElementType(), Idxs);
+ assert(Ty && "Invalid indices for GEP!");
+ return ConstantPointerNull::get(PointerType::get(Ty,
+ Ptr->getAddressSpace()));
+ }
+ }
+
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
+ // Combine Indices - If the source pointer to this getelementptr instruction
+ // is a getelementptr instruction, combine the indices of the two
+ // getelementptr instructions into a single instruction.
+ //
+ if (CE->getOpcode() == Instruction::GetElementPtr) {
+ Type *LastTy = nullptr;
+ for (gep_type_iterator I = gep_type_begin(CE), E = gep_type_end(CE);
+ I != E; ++I)
+ LastTy = *I;
+
+ // We cannot combine indices if doing so would take us outside of an
+ // array or vector. Doing otherwise could trick us if we evaluated such a
+ // GEP as part of a load.
+ //
+ // e.g. Consider if the original GEP was:
+ // i8* getelementptr ({ [2 x i8], i32, i8, [3 x i8] }* @main.c,
+ // i32 0, i32 0, i64 0)
+ //
+ // If we then tried to offset it by '8' to get to the third element,
+ // an i8, we should *not* get:
+ // i8* getelementptr ({ [2 x i8], i32, i8, [3 x i8] }* @main.c,
+ // i32 0, i32 0, i64 8)
+ //
+ // This GEP tries to index array element '8 which runs out-of-bounds.
+ // Subsequent evaluation would get confused and produce erroneous results.
+ //
+ // The following prohibits such a GEP from being formed by checking to see
+ // if the index is in-range with respect to an array or vector.
+ bool PerformFold = false;
+ if (Idx0->isNullValue())
+ PerformFold = true;
+ else if (SequentialType *STy = dyn_cast_or_null<SequentialType>(LastTy))
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx0))
+ PerformFold = isIndexInRangeOfSequentialType(STy, CI);
+
+ if (PerformFold) {
+ SmallVector<Value*, 16> NewIndices;
+ NewIndices.reserve(Idxs.size() + CE->getNumOperands());
+ NewIndices.append(CE->op_begin() + 1, CE->op_end() - 1);
+
+ // Add the last index of the source with the first index of the new GEP.
+ // Make sure to handle the case when they are actually different types.
+ Constant *Combined = CE->getOperand(CE->getNumOperands()-1);
+ // Otherwise it must be an array.
+ if (!Idx0->isNullValue()) {
+ Type *IdxTy = Combined->getType();
+ if (IdxTy != Idx0->getType()) {
+ unsigned CommonExtendedWidth =
+ std::max(IdxTy->getIntegerBitWidth(),
+ Idx0->getType()->getIntegerBitWidth());
+ CommonExtendedWidth = std::max(CommonExtendedWidth, 64U);
+
+ Type *CommonTy =
+ Type::getIntNTy(IdxTy->getContext(), CommonExtendedWidth);
+ Constant *C1 = ConstantExpr::getSExtOrBitCast(Idx0, CommonTy);
+ Constant *C2 = ConstantExpr::getSExtOrBitCast(Combined, CommonTy);
+ Combined = ConstantExpr::get(Instruction::Add, C1, C2);
+ } else {
+ Combined =
+ ConstantExpr::get(Instruction::Add, Idx0, Combined);
+ }
+ }
+
+ NewIndices.push_back(Combined);
+ NewIndices.append(Idxs.begin() + 1, Idxs.end());
+ return ConstantExpr::getGetElementPtr(
+ cast<GEPOperator>(CE)->getSourceElementType(), CE->getOperand(0),
+ NewIndices, inBounds && cast<GEPOperator>(CE)->isInBounds());
+ }
+ }
+
+ // Attempt to fold casts to the same type away. For example, folding:
+ //
+ // i32* getelementptr ([2 x i32]* bitcast ([3 x i32]* %X to [2 x i32]*),
+ // i64 0, i64 0)
+ // into:
+ //
+ // i32* getelementptr ([3 x i32]* %X, i64 0, i64 0)
+ //
+ // Don't fold if the cast is changing address spaces.
+ if (CE->isCast() && Idxs.size() > 1 && Idx0->isNullValue()) {
+ PointerType *SrcPtrTy =
+ dyn_cast<PointerType>(CE->getOperand(0)->getType());
+ PointerType *DstPtrTy = dyn_cast<PointerType>(CE->getType());
+ if (SrcPtrTy && DstPtrTy) {
+ ArrayType *SrcArrayTy =
+ dyn_cast<ArrayType>(SrcPtrTy->getElementType());
+ ArrayType *DstArrayTy =
+ dyn_cast<ArrayType>(DstPtrTy->getElementType());
+ if (SrcArrayTy && DstArrayTy
+ && SrcArrayTy->getElementType() == DstArrayTy->getElementType()
+ && SrcPtrTy->getAddressSpace() == DstPtrTy->getAddressSpace())
+ return ConstantExpr::getGetElementPtr(
+ SrcArrayTy, (Constant *)CE->getOperand(0), Idxs, inBounds);
+ }
+ }
+ }
+
+ // Check to see if any array indices are not within the corresponding
+ // notional array or vector bounds. If so, try to determine if they can be
+ // factored out into preceding dimensions.
+ SmallVector<Constant *, 8> NewIdxs;
+ Type *Ty = PointeeTy;
+ Type *Prev = C->getType();
+ bool Unknown = !isa<ConstantInt>(Idxs[0]);
+ for (unsigned i = 1, e = Idxs.size(); i != e;
+ Prev = Ty, Ty = cast<CompositeType>(Ty)->getTypeAtIndex(Idxs[i]), ++i) {
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(Idxs[i])) {
+ if (isa<ArrayType>(Ty) || isa<VectorType>(Ty))
+ if (CI->getSExtValue() > 0 &&
+ !isIndexInRangeOfSequentialType(cast<SequentialType>(Ty), CI)) {
+ if (isa<SequentialType>(Prev)) {
+ // It's out of range, but we can factor it into the prior
+ // dimension.
+ NewIdxs.resize(Idxs.size());
+ uint64_t NumElements = 0;
+ if (auto *ATy = dyn_cast<ArrayType>(Ty))
+ NumElements = ATy->getNumElements();
+ else
+ NumElements = cast<VectorType>(Ty)->getNumElements();
+
+ ConstantInt *Factor = ConstantInt::get(CI->getType(), NumElements);
+ NewIdxs[i] = ConstantExpr::getSRem(CI, Factor);
+
+ Constant *PrevIdx = cast<Constant>(Idxs[i-1]);
+ Constant *Div = ConstantExpr::getSDiv(CI, Factor);
+
+ unsigned CommonExtendedWidth =
+ std::max(PrevIdx->getType()->getIntegerBitWidth(),
+ Div->getType()->getIntegerBitWidth());
+ CommonExtendedWidth = std::max(CommonExtendedWidth, 64U);
+
+ // Before adding, extend both operands to i64 to avoid
+ // overflow trouble.
+ if (!PrevIdx->getType()->isIntegerTy(CommonExtendedWidth))
+ PrevIdx = ConstantExpr::getSExt(
+ PrevIdx,
+ Type::getIntNTy(Div->getContext(), CommonExtendedWidth));
+ if (!Div->getType()->isIntegerTy(CommonExtendedWidth))
+ Div = ConstantExpr::getSExt(
+ Div, Type::getIntNTy(Div->getContext(), CommonExtendedWidth));
+
+ NewIdxs[i-1] = ConstantExpr::getAdd(PrevIdx, Div);
+ } else {
+ // It's out of range, but the prior dimension is a struct
+ // so we can't do anything about it.
+ Unknown = true;
+ }
+ }
+ } else {
+ // We don't know if it's in range or not.
+ Unknown = true;
+ }
+ }
+
+ // If we did any factoring, start over with the adjusted indices.
+ if (!NewIdxs.empty()) {
+ for (unsigned i = 0, e = Idxs.size(); i != e; ++i)
+ if (!NewIdxs[i]) NewIdxs[i] = cast<Constant>(Idxs[i]);
+ return ConstantExpr::getGetElementPtr(PointeeTy, C, NewIdxs, inBounds);
+ }
+
+ // If all indices are known integers and normalized, we can do a simple
+ // check for the "inbounds" property.
+ if (!Unknown && !inBounds)
+ if (auto *GV = dyn_cast<GlobalVariable>(C))
+ if (!GV->hasExternalWeakLinkage() && isInBoundsIndices(Idxs))
+ return ConstantExpr::getInBoundsGetElementPtr(PointeeTy, C, Idxs);
+
+ return nullptr;
+}
+
+Constant *llvm::ConstantFoldGetElementPtr(Constant *C,
+ bool inBounds,
+ ArrayRef<Constant *> Idxs) {
+ return ConstantFoldGetElementPtrImpl(
+ cast<PointerType>(C->getType()->getScalarType())->getElementType(), C,
+ inBounds, Idxs);
+}
+
+Constant *llvm::ConstantFoldGetElementPtr(Constant *C,
+ bool inBounds,
+ ArrayRef<Value *> Idxs) {
+ return ConstantFoldGetElementPtrImpl(
+ cast<PointerType>(C->getType()->getScalarType())->getElementType(), C,
+ inBounds, Idxs);
+}
+
+Constant *llvm::ConstantFoldGetElementPtr(Type *Ty, Constant *C,
+ bool inBounds,
+ ArrayRef<Constant *> Idxs) {
+ return ConstantFoldGetElementPtrImpl(Ty, C, inBounds, Idxs);
+}
+
+Constant *llvm::ConstantFoldGetElementPtr(Type *Ty, Constant *C,
+ bool inBounds,
+ ArrayRef<Value *> Idxs) {
+ return ConstantFoldGetElementPtrImpl(Ty, C, inBounds, Idxs);
+}
diff --git a/contrib/llvm/lib/IR/ConstantFold.h b/contrib/llvm/lib/IR/ConstantFold.h
new file mode 100644
index 0000000..42a9c6b
--- /dev/null
+++ b/contrib/llvm/lib/IR/ConstantFold.h
@@ -0,0 +1,60 @@
+//===-- ConstantFolding.h - Internal Constant Folding Interface -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the (internal) constant folding interfaces for LLVM. These
+// interfaces are used by the ConstantExpr::get* methods to automatically fold
+// constants when possible.
+//
+// These operators may return a null object if they don't know how to perform
+// the specified operation on the specified constant types.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_IR_CONSTANTFOLD_H
+#define LLVM_LIB_IR_CONSTANTFOLD_H
+
+#include "llvm/ADT/ArrayRef.h"
+
+namespace llvm {
+ class Value;
+ class Constant;
+ class Type;
+
+ // Constant fold various types of instruction...
+ Constant *ConstantFoldCastInstruction(
+ unsigned opcode, ///< The opcode of the cast
+ Constant *V, ///< The source constant
+ Type *DestTy ///< The destination type
+ );
+ Constant *ConstantFoldSelectInstruction(Constant *Cond,
+ Constant *V1, Constant *V2);
+ Constant *ConstantFoldExtractElementInstruction(Constant *Val, Constant *Idx);
+ Constant *ConstantFoldInsertElementInstruction(Constant *Val, Constant *Elt,
+ Constant *Idx);
+ Constant *ConstantFoldShuffleVectorInstruction(Constant *V1, Constant *V2,
+ Constant *Mask);
+ Constant *ConstantFoldExtractValueInstruction(Constant *Agg,
+ ArrayRef<unsigned> Idxs);
+ Constant *ConstantFoldInsertValueInstruction(Constant *Agg, Constant *Val,
+ ArrayRef<unsigned> Idxs);
+ Constant *ConstantFoldBinaryInstruction(unsigned Opcode, Constant *V1,
+ Constant *V2);
+ Constant *ConstantFoldCompareInstruction(unsigned short predicate,
+ Constant *C1, Constant *C2);
+ Constant *ConstantFoldGetElementPtr(Constant *C, bool inBounds,
+ ArrayRef<Constant *> Idxs);
+ Constant *ConstantFoldGetElementPtr(Constant *C, bool inBounds,
+ ArrayRef<Value *> Idxs);
+ Constant *ConstantFoldGetElementPtr(Type *Ty, Constant *C, bool inBounds,
+ ArrayRef<Constant *> Idxs);
+ Constant *ConstantFoldGetElementPtr(Type *Ty, Constant *C, bool inBounds,
+ ArrayRef<Value *> Idxs);
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/lib/IR/ConstantRange.cpp b/contrib/llvm/lib/IR/ConstantRange.cpp
new file mode 100644
index 0000000..48f9b27
--- /dev/null
+++ b/contrib/llvm/lib/IR/ConstantRange.cpp
@@ -0,0 +1,824 @@
+//===-- ConstantRange.cpp - ConstantRange implementation ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Represent a range of possible values that may occur when the program is run
+// for an integral value. This keeps track of a lower and upper bound for the
+// constant, which MAY wrap around the end of the numeric range. To do this, it
+// keeps track of a [lower, upper) bound, which specifies an interval just like
+// STL iterators. When used with boolean values, the following are important
+// ranges (other integral ranges use min/max values for special range values):
+//
+// [F, F) = {} = Empty set
+// [T, F) = {T}
+// [F, T) = {F}
+// [T, T) = {F, T} = Full set
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/IR/ConstantRange.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+/// Initialize a full (the default) or empty set for the specified type.
+///
+ConstantRange::ConstantRange(uint32_t BitWidth, bool Full) {
+ if (Full)
+ Lower = Upper = APInt::getMaxValue(BitWidth);
+ else
+ Lower = Upper = APInt::getMinValue(BitWidth);
+}
+
+/// Initialize a range to hold the single specified value.
+///
+ConstantRange::ConstantRange(APIntMoveTy V)
+ : Lower(std::move(V)), Upper(Lower + 1) {}
+
+ConstantRange::ConstantRange(APIntMoveTy L, APIntMoveTy U)
+ : Lower(std::move(L)), Upper(std::move(U)) {
+ assert(Lower.getBitWidth() == Upper.getBitWidth() &&
+ "ConstantRange with unequal bit widths");
+ assert((Lower != Upper || (Lower.isMaxValue() || Lower.isMinValue())) &&
+ "Lower == Upper, but they aren't min or max value!");
+}
+
+ConstantRange ConstantRange::makeAllowedICmpRegion(CmpInst::Predicate Pred,
+ const ConstantRange &CR) {
+ if (CR.isEmptySet())
+ return CR;
+
+ uint32_t W = CR.getBitWidth();
+ switch (Pred) {
+ default:
+ llvm_unreachable("Invalid ICmp predicate to makeAllowedICmpRegion()");
+ case CmpInst::ICMP_EQ:
+ return CR;
+ case CmpInst::ICMP_NE:
+ if (CR.isSingleElement())
+ return ConstantRange(CR.getUpper(), CR.getLower());
+ return ConstantRange(W);
+ case CmpInst::ICMP_ULT: {
+ APInt UMax(CR.getUnsignedMax());
+ if (UMax.isMinValue())
+ return ConstantRange(W, /* empty */ false);
+ return ConstantRange(APInt::getMinValue(W), UMax);
+ }
+ case CmpInst::ICMP_SLT: {
+ APInt SMax(CR.getSignedMax());
+ if (SMax.isMinSignedValue())
+ return ConstantRange(W, /* empty */ false);
+ return ConstantRange(APInt::getSignedMinValue(W), SMax);
+ }
+ case CmpInst::ICMP_ULE: {
+ APInt UMax(CR.getUnsignedMax());
+ if (UMax.isMaxValue())
+ return ConstantRange(W);
+ return ConstantRange(APInt::getMinValue(W), UMax + 1);
+ }
+ case CmpInst::ICMP_SLE: {
+ APInt SMax(CR.getSignedMax());
+ if (SMax.isMaxSignedValue())
+ return ConstantRange(W);
+ return ConstantRange(APInt::getSignedMinValue(W), SMax + 1);
+ }
+ case CmpInst::ICMP_UGT: {
+ APInt UMin(CR.getUnsignedMin());
+ if (UMin.isMaxValue())
+ return ConstantRange(W, /* empty */ false);
+ return ConstantRange(UMin + 1, APInt::getNullValue(W));
+ }
+ case CmpInst::ICMP_SGT: {
+ APInt SMin(CR.getSignedMin());
+ if (SMin.isMaxSignedValue())
+ return ConstantRange(W, /* empty */ false);
+ return ConstantRange(SMin + 1, APInt::getSignedMinValue(W));
+ }
+ case CmpInst::ICMP_UGE: {
+ APInt UMin(CR.getUnsignedMin());
+ if (UMin.isMinValue())
+ return ConstantRange(W);
+ return ConstantRange(UMin, APInt::getNullValue(W));
+ }
+ case CmpInst::ICMP_SGE: {
+ APInt SMin(CR.getSignedMin());
+ if (SMin.isMinSignedValue())
+ return ConstantRange(W);
+ return ConstantRange(SMin, APInt::getSignedMinValue(W));
+ }
+ }
+}
+
+ConstantRange ConstantRange::makeSatisfyingICmpRegion(CmpInst::Predicate Pred,
+ const ConstantRange &CR) {
+ // Follows from De-Morgan's laws:
+ //
+ // ~(~A union ~B) == A intersect B.
+ //
+ return makeAllowedICmpRegion(CmpInst::getInversePredicate(Pred), CR)
+ .inverse();
+}
+
+ConstantRange ConstantRange::makeNoWrapRegion(Instruction::BinaryOps BinOp,
+ const APInt &C,
+ unsigned NoWrapKind) {
+ typedef OverflowingBinaryOperator OBO;
+
+ // Computes the intersection of CR0 and CR1. It is different from
+ // intersectWith in that the ConstantRange returned will only contain elements
+ // in both CR0 and CR1 (i.e. SubsetIntersect(X, Y) is a *subset*, proper or
+ // not, of both X and Y).
+ auto SubsetIntersect =
+ [](const ConstantRange &CR0, const ConstantRange &CR1) {
+ return CR0.inverse().unionWith(CR1.inverse()).inverse();
+ };
+
+ assert(BinOp >= Instruction::BinaryOpsBegin &&
+ BinOp < Instruction::BinaryOpsEnd && "Binary operators only!");
+
+ assert((NoWrapKind == OBO::NoSignedWrap ||
+ NoWrapKind == OBO::NoUnsignedWrap ||
+ NoWrapKind == (OBO::NoUnsignedWrap | OBO::NoSignedWrap)) &&
+ "NoWrapKind invalid!");
+
+ unsigned BitWidth = C.getBitWidth();
+ if (BinOp != Instruction::Add)
+ // Conservative answer: empty set
+ return ConstantRange(BitWidth, false);
+
+ if (C.isMinValue())
+ // Full set: nothing signed / unsigned wraps when added to 0.
+ return ConstantRange(BitWidth);
+
+ ConstantRange Result(BitWidth);
+
+ if (NoWrapKind & OBO::NoUnsignedWrap)
+ Result = SubsetIntersect(Result,
+ ConstantRange(APInt::getNullValue(BitWidth), -C));
+
+ if (NoWrapKind & OBO::NoSignedWrap) {
+ if (C.isStrictlyPositive())
+ Result = SubsetIntersect(
+ Result, ConstantRange(APInt::getSignedMinValue(BitWidth),
+ APInt::getSignedMinValue(BitWidth) - C));
+ else
+ Result = SubsetIntersect(
+ Result, ConstantRange(APInt::getSignedMinValue(BitWidth) - C,
+ APInt::getSignedMinValue(BitWidth)));
+ }
+
+ return Result;
+}
+
+/// isFullSet - Return true if this set contains all of the elements possible
+/// for this data-type
+bool ConstantRange::isFullSet() const {
+ return Lower == Upper && Lower.isMaxValue();
+}
+
+/// isEmptySet - Return true if this set contains no members.
+///
+bool ConstantRange::isEmptySet() const {
+ return Lower == Upper && Lower.isMinValue();
+}
+
+/// isWrappedSet - Return true if this set wraps around the top of the range,
+/// for example: [100, 8)
+///
+bool ConstantRange::isWrappedSet() const {
+ return Lower.ugt(Upper);
+}
+
+/// isSignWrappedSet - Return true if this set wraps around the INT_MIN of
+/// its bitwidth, for example: i8 [120, 140).
+///
+bool ConstantRange::isSignWrappedSet() const {
+ return contains(APInt::getSignedMaxValue(getBitWidth())) &&
+ contains(APInt::getSignedMinValue(getBitWidth()));
+}
+
+/// getSetSize - Return the number of elements in this set.
+///
+APInt ConstantRange::getSetSize() const {
+ if (isFullSet()) {
+ APInt Size(getBitWidth()+1, 0);
+ Size.setBit(getBitWidth());
+ return Size;
+ }
+
+ // This is also correct for wrapped sets.
+ return (Upper - Lower).zext(getBitWidth()+1);
+}
+
+/// getUnsignedMax - Return the largest unsigned value contained in the
+/// ConstantRange.
+///
+APInt ConstantRange::getUnsignedMax() const {
+ if (isFullSet() || isWrappedSet())
+ return APInt::getMaxValue(getBitWidth());
+ return getUpper() - 1;
+}
+
+/// getUnsignedMin - Return the smallest unsigned value contained in the
+/// ConstantRange.
+///
+APInt ConstantRange::getUnsignedMin() const {
+ if (isFullSet() || (isWrappedSet() && getUpper() != 0))
+ return APInt::getMinValue(getBitWidth());
+ return getLower();
+}
+
+/// getSignedMax - Return the largest signed value contained in the
+/// ConstantRange.
+///
+APInt ConstantRange::getSignedMax() const {
+ APInt SignedMax(APInt::getSignedMaxValue(getBitWidth()));
+ if (!isWrappedSet()) {
+ if (getLower().sle(getUpper() - 1))
+ return getUpper() - 1;
+ return SignedMax;
+ }
+ if (getLower().isNegative() == getUpper().isNegative())
+ return SignedMax;
+ return getUpper() - 1;
+}
+
+/// getSignedMin - Return the smallest signed value contained in the
+/// ConstantRange.
+///
+APInt ConstantRange::getSignedMin() const {
+ APInt SignedMin(APInt::getSignedMinValue(getBitWidth()));
+ if (!isWrappedSet()) {
+ if (getLower().sle(getUpper() - 1))
+ return getLower();
+ return SignedMin;
+ }
+ if ((getUpper() - 1).slt(getLower())) {
+ if (getUpper() != SignedMin)
+ return SignedMin;
+ }
+ return getLower();
+}
+
+/// contains - Return true if the specified value is in the set.
+///
+bool ConstantRange::contains(const APInt &V) const {
+ if (Lower == Upper)
+ return isFullSet();
+
+ if (!isWrappedSet())
+ return Lower.ule(V) && V.ult(Upper);
+ return Lower.ule(V) || V.ult(Upper);
+}
+
+/// contains - Return true if the argument is a subset of this range.
+/// Two equal sets contain each other. The empty set contained by all other
+/// sets.
+///
+bool ConstantRange::contains(const ConstantRange &Other) const {
+ if (isFullSet() || Other.isEmptySet()) return true;
+ if (isEmptySet() || Other.isFullSet()) return false;
+
+ if (!isWrappedSet()) {
+ if (Other.isWrappedSet())
+ return false;
+
+ return Lower.ule(Other.getLower()) && Other.getUpper().ule(Upper);
+ }
+
+ if (!Other.isWrappedSet())
+ return Other.getUpper().ule(Upper) ||
+ Lower.ule(Other.getLower());
+
+ return Other.getUpper().ule(Upper) && Lower.ule(Other.getLower());
+}
+
+/// subtract - Subtract the specified constant from the endpoints of this
+/// constant range.
+ConstantRange ConstantRange::subtract(const APInt &Val) const {
+ assert(Val.getBitWidth() == getBitWidth() && "Wrong bit width");
+ // If the set is empty or full, don't modify the endpoints.
+ if (Lower == Upper)
+ return *this;
+ return ConstantRange(Lower - Val, Upper - Val);
+}
+
+/// \brief Subtract the specified range from this range (aka relative complement
+/// of the sets).
+ConstantRange ConstantRange::difference(const ConstantRange &CR) const {
+ return intersectWith(CR.inverse());
+}
+
+/// intersectWith - Return the range that results from the intersection of this
+/// range with another range. The resultant range is guaranteed to include all
+/// elements contained in both input ranges, and to have the smallest possible
+/// set size that does so. Because there may be two intersections with the
+/// same set size, A.intersectWith(B) might not be equal to B.intersectWith(A).
+ConstantRange ConstantRange::intersectWith(const ConstantRange &CR) const {
+ assert(getBitWidth() == CR.getBitWidth() &&
+ "ConstantRange types don't agree!");
+
+ // Handle common cases.
+ if ( isEmptySet() || CR.isFullSet()) return *this;
+ if (CR.isEmptySet() || isFullSet()) return CR;
+
+ if (!isWrappedSet() && CR.isWrappedSet())
+ return CR.intersectWith(*this);
+
+ if (!isWrappedSet() && !CR.isWrappedSet()) {
+ if (Lower.ult(CR.Lower)) {
+ if (Upper.ule(CR.Lower))
+ return ConstantRange(getBitWidth(), false);
+
+ if (Upper.ult(CR.Upper))
+ return ConstantRange(CR.Lower, Upper);
+
+ return CR;
+ }
+ if (Upper.ult(CR.Upper))
+ return *this;
+
+ if (Lower.ult(CR.Upper))
+ return ConstantRange(Lower, CR.Upper);
+
+ return ConstantRange(getBitWidth(), false);
+ }
+
+ if (isWrappedSet() && !CR.isWrappedSet()) {
+ if (CR.Lower.ult(Upper)) {
+ if (CR.Upper.ult(Upper))
+ return CR;
+
+ if (CR.Upper.ule(Lower))
+ return ConstantRange(CR.Lower, Upper);
+
+ if (getSetSize().ult(CR.getSetSize()))
+ return *this;
+ return CR;
+ }
+ if (CR.Lower.ult(Lower)) {
+ if (CR.Upper.ule(Lower))
+ return ConstantRange(getBitWidth(), false);
+
+ return ConstantRange(Lower, CR.Upper);
+ }
+ return CR;
+ }
+
+ if (CR.Upper.ult(Upper)) {
+ if (CR.Lower.ult(Upper)) {
+ if (getSetSize().ult(CR.getSetSize()))
+ return *this;
+ return CR;
+ }
+
+ if (CR.Lower.ult(Lower))
+ return ConstantRange(Lower, CR.Upper);
+
+ return CR;
+ }
+ if (CR.Upper.ule(Lower)) {
+ if (CR.Lower.ult(Lower))
+ return *this;
+
+ return ConstantRange(CR.Lower, Upper);
+ }
+ if (getSetSize().ult(CR.getSetSize()))
+ return *this;
+ return CR;
+}
+
+
+/// unionWith - Return the range that results from the union of this range with
+/// another range. The resultant range is guaranteed to include the elements of
+/// both sets, but may contain more. For example, [3, 9) union [12,15) is
+/// [3, 15), which includes 9, 10, and 11, which were not included in either
+/// set before.
+///
+ConstantRange ConstantRange::unionWith(const ConstantRange &CR) const {
+ assert(getBitWidth() == CR.getBitWidth() &&
+ "ConstantRange types don't agree!");
+
+ if ( isFullSet() || CR.isEmptySet()) return *this;
+ if (CR.isFullSet() || isEmptySet()) return CR;
+
+ if (!isWrappedSet() && CR.isWrappedSet()) return CR.unionWith(*this);
+
+ if (!isWrappedSet() && !CR.isWrappedSet()) {
+ if (CR.Upper.ult(Lower) || Upper.ult(CR.Lower)) {
+ // If the two ranges are disjoint, find the smaller gap and bridge it.
+ APInt d1 = CR.Lower - Upper, d2 = Lower - CR.Upper;
+ if (d1.ult(d2))
+ return ConstantRange(Lower, CR.Upper);
+ return ConstantRange(CR.Lower, Upper);
+ }
+
+ APInt L = Lower, U = Upper;
+ if (CR.Lower.ult(L))
+ L = CR.Lower;
+ if ((CR.Upper - 1).ugt(U - 1))
+ U = CR.Upper;
+
+ if (L == 0 && U == 0)
+ return ConstantRange(getBitWidth());
+
+ return ConstantRange(L, U);
+ }
+
+ if (!CR.isWrappedSet()) {
+ // ------U L----- and ------U L----- : this
+ // L--U L--U : CR
+ if (CR.Upper.ule(Upper) || CR.Lower.uge(Lower))
+ return *this;
+
+ // ------U L----- : this
+ // L---------U : CR
+ if (CR.Lower.ule(Upper) && Lower.ule(CR.Upper))
+ return ConstantRange(getBitWidth());
+
+ // ----U L---- : this
+ // L---U : CR
+ // <d1> <d2>
+ if (Upper.ule(CR.Lower) && CR.Upper.ule(Lower)) {
+ APInt d1 = CR.Lower - Upper, d2 = Lower - CR.Upper;
+ if (d1.ult(d2))
+ return ConstantRange(Lower, CR.Upper);
+ return ConstantRange(CR.Lower, Upper);
+ }
+
+ // ----U L----- : this
+ // L----U : CR
+ if (Upper.ult(CR.Lower) && Lower.ult(CR.Upper))
+ return ConstantRange(CR.Lower, Upper);
+
+ // ------U L---- : this
+ // L-----U : CR
+ assert(CR.Lower.ult(Upper) && CR.Upper.ult(Lower) &&
+ "ConstantRange::unionWith missed a case with one range wrapped");
+ return ConstantRange(Lower, CR.Upper);
+ }
+
+ // ------U L---- and ------U L---- : this
+ // -U L----------- and ------------U L : CR
+ if (CR.Lower.ule(Upper) || Lower.ule(CR.Upper))
+ return ConstantRange(getBitWidth());
+
+ APInt L = Lower, U = Upper;
+ if (CR.Upper.ugt(U))
+ U = CR.Upper;
+ if (CR.Lower.ult(L))
+ L = CR.Lower;
+
+ return ConstantRange(L, U);
+}
+
+/// zeroExtend - Return a new range in the specified integer type, which must
+/// be strictly larger than the current type. The returned range will
+/// correspond to the possible range of values as if the source range had been
+/// zero extended.
+ConstantRange ConstantRange::zeroExtend(uint32_t DstTySize) const {
+ if (isEmptySet()) return ConstantRange(DstTySize, /*isFullSet=*/false);
+
+ unsigned SrcTySize = getBitWidth();
+ assert(SrcTySize < DstTySize && "Not a value extension");
+ if (isFullSet() || isWrappedSet()) {
+ // Change into [0, 1 << src bit width)
+ APInt LowerExt(DstTySize, 0);
+ if (!Upper) // special case: [X, 0) -- not really wrapping around
+ LowerExt = Lower.zext(DstTySize);
+ return ConstantRange(LowerExt, APInt::getOneBitSet(DstTySize, SrcTySize));
+ }
+
+ return ConstantRange(Lower.zext(DstTySize), Upper.zext(DstTySize));
+}
+
+/// signExtend - Return a new range in the specified integer type, which must
+/// be strictly larger than the current type. The returned range will
+/// correspond to the possible range of values as if the source range had been
+/// sign extended.
+ConstantRange ConstantRange::signExtend(uint32_t DstTySize) const {
+ if (isEmptySet()) return ConstantRange(DstTySize, /*isFullSet=*/false);
+
+ unsigned SrcTySize = getBitWidth();
+ assert(SrcTySize < DstTySize && "Not a value extension");
+
+ // special case: [X, INT_MIN) -- not really wrapping around
+ if (Upper.isMinSignedValue())
+ return ConstantRange(Lower.sext(DstTySize), Upper.zext(DstTySize));
+
+ if (isFullSet() || isSignWrappedSet()) {
+ return ConstantRange(APInt::getHighBitsSet(DstTySize,DstTySize-SrcTySize+1),
+ APInt::getLowBitsSet(DstTySize, SrcTySize-1) + 1);
+ }
+
+ return ConstantRange(Lower.sext(DstTySize), Upper.sext(DstTySize));
+}
+
+/// truncate - Return a new range in the specified integer type, which must be
+/// strictly smaller than the current type. The returned range will
+/// correspond to the possible range of values as if the source range had been
+/// truncated to the specified type.
+ConstantRange ConstantRange::truncate(uint32_t DstTySize) const {
+ assert(getBitWidth() > DstTySize && "Not a value truncation");
+ if (isEmptySet())
+ return ConstantRange(DstTySize, /*isFullSet=*/false);
+ if (isFullSet())
+ return ConstantRange(DstTySize, /*isFullSet=*/true);
+
+ APInt MaxValue = APInt::getMaxValue(DstTySize).zext(getBitWidth());
+ APInt MaxBitValue(getBitWidth(), 0);
+ MaxBitValue.setBit(DstTySize);
+
+ APInt LowerDiv(Lower), UpperDiv(Upper);
+ ConstantRange Union(DstTySize, /*isFullSet=*/false);
+
+ // Analyze wrapped sets in their two parts: [0, Upper) \/ [Lower, MaxValue]
+ // We use the non-wrapped set code to analyze the [Lower, MaxValue) part, and
+ // then we do the union with [MaxValue, Upper)
+ if (isWrappedSet()) {
+ // if Upper is greater than Max Value, it covers the whole truncated range.
+ if (Upper.uge(MaxValue))
+ return ConstantRange(DstTySize, /*isFullSet=*/true);
+
+ Union = ConstantRange(APInt::getMaxValue(DstTySize),Upper.trunc(DstTySize));
+ UpperDiv = APInt::getMaxValue(getBitWidth());
+
+ // Union covers the MaxValue case, so return if the remaining range is just
+ // MaxValue.
+ if (LowerDiv == UpperDiv)
+ return Union;
+ }
+
+ // Chop off the most significant bits that are past the destination bitwidth.
+ if (LowerDiv.uge(MaxValue)) {
+ APInt Div(getBitWidth(), 0);
+ APInt::udivrem(LowerDiv, MaxBitValue, Div, LowerDiv);
+ UpperDiv = UpperDiv - MaxBitValue * Div;
+ }
+
+ if (UpperDiv.ule(MaxValue))
+ return ConstantRange(LowerDiv.trunc(DstTySize),
+ UpperDiv.trunc(DstTySize)).unionWith(Union);
+
+ // The truncated value wrapps around. Check if we can do better than fullset.
+ APInt UpperModulo = UpperDiv - MaxBitValue;
+ if (UpperModulo.ult(LowerDiv))
+ return ConstantRange(LowerDiv.trunc(DstTySize),
+ UpperModulo.trunc(DstTySize)).unionWith(Union);
+
+ return ConstantRange(DstTySize, /*isFullSet=*/true);
+}
+
+/// zextOrTrunc - make this range have the bit width given by \p DstTySize. The
+/// value is zero extended, truncated, or left alone to make it that width.
+ConstantRange ConstantRange::zextOrTrunc(uint32_t DstTySize) const {
+ unsigned SrcTySize = getBitWidth();
+ if (SrcTySize > DstTySize)
+ return truncate(DstTySize);
+ if (SrcTySize < DstTySize)
+ return zeroExtend(DstTySize);
+ return *this;
+}
+
+/// sextOrTrunc - make this range have the bit width given by \p DstTySize. The
+/// value is sign extended, truncated, or left alone to make it that width.
+ConstantRange ConstantRange::sextOrTrunc(uint32_t DstTySize) const {
+ unsigned SrcTySize = getBitWidth();
+ if (SrcTySize > DstTySize)
+ return truncate(DstTySize);
+ if (SrcTySize < DstTySize)
+ return signExtend(DstTySize);
+ return *this;
+}
+
+ConstantRange
+ConstantRange::add(const ConstantRange &Other) const {
+ if (isEmptySet() || Other.isEmptySet())
+ return ConstantRange(getBitWidth(), /*isFullSet=*/false);
+ if (isFullSet() || Other.isFullSet())
+ return ConstantRange(getBitWidth(), /*isFullSet=*/true);
+
+ APInt Spread_X = getSetSize(), Spread_Y = Other.getSetSize();
+ APInt NewLower = getLower() + Other.getLower();
+ APInt NewUpper = getUpper() + Other.getUpper() - 1;
+ if (NewLower == NewUpper)
+ return ConstantRange(getBitWidth(), /*isFullSet=*/true);
+
+ ConstantRange X = ConstantRange(NewLower, NewUpper);
+ if (X.getSetSize().ult(Spread_X) || X.getSetSize().ult(Spread_Y))
+ // We've wrapped, therefore, full set.
+ return ConstantRange(getBitWidth(), /*isFullSet=*/true);
+
+ return X;
+}
+
+ConstantRange
+ConstantRange::sub(const ConstantRange &Other) const {
+ if (isEmptySet() || Other.isEmptySet())
+ return ConstantRange(getBitWidth(), /*isFullSet=*/false);
+ if (isFullSet() || Other.isFullSet())
+ return ConstantRange(getBitWidth(), /*isFullSet=*/true);
+
+ APInt Spread_X = getSetSize(), Spread_Y = Other.getSetSize();
+ APInt NewLower = getLower() - Other.getUpper() + 1;
+ APInt NewUpper = getUpper() - Other.getLower();
+ if (NewLower == NewUpper)
+ return ConstantRange(getBitWidth(), /*isFullSet=*/true);
+
+ ConstantRange X = ConstantRange(NewLower, NewUpper);
+ if (X.getSetSize().ult(Spread_X) || X.getSetSize().ult(Spread_Y))
+ // We've wrapped, therefore, full set.
+ return ConstantRange(getBitWidth(), /*isFullSet=*/true);
+
+ return X;
+}
+
+ConstantRange
+ConstantRange::multiply(const ConstantRange &Other) const {
+ // TODO: If either operand is a single element and the multiply is known to
+ // be non-wrapping, round the result min and max value to the appropriate
+ // multiple of that element. If wrapping is possible, at least adjust the
+ // range according to the greatest power-of-two factor of the single element.
+
+ if (isEmptySet() || Other.isEmptySet())
+ return ConstantRange(getBitWidth(), /*isFullSet=*/false);
+
+ // Multiplication is signedness-independent. However different ranges can be
+ // obtained depending on how the input ranges are treated. These different
+ // ranges are all conservatively correct, but one might be better than the
+ // other. We calculate two ranges; one treating the inputs as unsigned
+ // and the other signed, then return the smallest of these ranges.
+
+ // Unsigned range first.
+ APInt this_min = getUnsignedMin().zext(getBitWidth() * 2);
+ APInt this_max = getUnsignedMax().zext(getBitWidth() * 2);
+ APInt Other_min = Other.getUnsignedMin().zext(getBitWidth() * 2);
+ APInt Other_max = Other.getUnsignedMax().zext(getBitWidth() * 2);
+
+ ConstantRange Result_zext = ConstantRange(this_min * Other_min,
+ this_max * Other_max + 1);
+ ConstantRange UR = Result_zext.truncate(getBitWidth());
+
+ // Now the signed range. Because we could be dealing with negative numbers
+ // here, the lower bound is the smallest of the cartesian product of the
+ // lower and upper ranges; for example:
+ // [-1,4) * [-2,3) = min(-1*-2, -1*2, 3*-2, 3*2) = -6.
+ // Similarly for the upper bound, swapping min for max.
+
+ this_min = getSignedMin().sext(getBitWidth() * 2);
+ this_max = getSignedMax().sext(getBitWidth() * 2);
+ Other_min = Other.getSignedMin().sext(getBitWidth() * 2);
+ Other_max = Other.getSignedMax().sext(getBitWidth() * 2);
+
+ auto L = {this_min * Other_min, this_min * Other_max,
+ this_max * Other_min, this_max * Other_max};
+ auto Compare = [](const APInt &A, const APInt &B) { return A.slt(B); };
+ ConstantRange Result_sext(std::min(L, Compare), std::max(L, Compare) + 1);
+ ConstantRange SR = Result_sext.truncate(getBitWidth());
+
+ return UR.getSetSize().ult(SR.getSetSize()) ? UR : SR;
+}
+
+ConstantRange
+ConstantRange::smax(const ConstantRange &Other) const {
+ // X smax Y is: range(smax(X_smin, Y_smin),
+ // smax(X_smax, Y_smax))
+ if (isEmptySet() || Other.isEmptySet())
+ return ConstantRange(getBitWidth(), /*isFullSet=*/false);
+ APInt NewL = APIntOps::smax(getSignedMin(), Other.getSignedMin());
+ APInt NewU = APIntOps::smax(getSignedMax(), Other.getSignedMax()) + 1;
+ if (NewU == NewL)
+ return ConstantRange(getBitWidth(), /*isFullSet=*/true);
+ return ConstantRange(NewL, NewU);
+}
+
+ConstantRange
+ConstantRange::umax(const ConstantRange &Other) const {
+ // X umax Y is: range(umax(X_umin, Y_umin),
+ // umax(X_umax, Y_umax))
+ if (isEmptySet() || Other.isEmptySet())
+ return ConstantRange(getBitWidth(), /*isFullSet=*/false);
+ APInt NewL = APIntOps::umax(getUnsignedMin(), Other.getUnsignedMin());
+ APInt NewU = APIntOps::umax(getUnsignedMax(), Other.getUnsignedMax()) + 1;
+ if (NewU == NewL)
+ return ConstantRange(getBitWidth(), /*isFullSet=*/true);
+ return ConstantRange(NewL, NewU);
+}
+
+ConstantRange
+ConstantRange::udiv(const ConstantRange &RHS) const {
+ if (isEmptySet() || RHS.isEmptySet() || RHS.getUnsignedMax() == 0)
+ return ConstantRange(getBitWidth(), /*isFullSet=*/false);
+ if (RHS.isFullSet())
+ return ConstantRange(getBitWidth(), /*isFullSet=*/true);
+
+ APInt Lower = getUnsignedMin().udiv(RHS.getUnsignedMax());
+
+ APInt RHS_umin = RHS.getUnsignedMin();
+ if (RHS_umin == 0) {
+ // We want the lowest value in RHS excluding zero. Usually that would be 1
+ // except for a range in the form of [X, 1) in which case it would be X.
+ if (RHS.getUpper() == 1)
+ RHS_umin = RHS.getLower();
+ else
+ RHS_umin = APInt(getBitWidth(), 1);
+ }
+
+ APInt Upper = getUnsignedMax().udiv(RHS_umin) + 1;
+
+ // If the LHS is Full and the RHS is a wrapped interval containing 1 then
+ // this could occur.
+ if (Lower == Upper)
+ return ConstantRange(getBitWidth(), /*isFullSet=*/true);
+
+ return ConstantRange(Lower, Upper);
+}
+
+ConstantRange
+ConstantRange::binaryAnd(const ConstantRange &Other) const {
+ if (isEmptySet() || Other.isEmptySet())
+ return ConstantRange(getBitWidth(), /*isFullSet=*/false);
+
+ // TODO: replace this with something less conservative
+
+ APInt umin = APIntOps::umin(Other.getUnsignedMax(), getUnsignedMax());
+ if (umin.isAllOnesValue())
+ return ConstantRange(getBitWidth(), /*isFullSet=*/true);
+ return ConstantRange(APInt::getNullValue(getBitWidth()), umin + 1);
+}
+
+ConstantRange
+ConstantRange::binaryOr(const ConstantRange &Other) const {
+ if (isEmptySet() || Other.isEmptySet())
+ return ConstantRange(getBitWidth(), /*isFullSet=*/false);
+
+ // TODO: replace this with something less conservative
+
+ APInt umax = APIntOps::umax(getUnsignedMin(), Other.getUnsignedMin());
+ if (umax.isMinValue())
+ return ConstantRange(getBitWidth(), /*isFullSet=*/true);
+ return ConstantRange(umax, APInt::getNullValue(getBitWidth()));
+}
+
+ConstantRange
+ConstantRange::shl(const ConstantRange &Other) const {
+ if (isEmptySet() || Other.isEmptySet())
+ return ConstantRange(getBitWidth(), /*isFullSet=*/false);
+
+ APInt min = getUnsignedMin().shl(Other.getUnsignedMin());
+ APInt max = getUnsignedMax().shl(Other.getUnsignedMax());
+
+ // there's no overflow!
+ APInt Zeros(getBitWidth(), getUnsignedMax().countLeadingZeros());
+ if (Zeros.ugt(Other.getUnsignedMax()))
+ return ConstantRange(min, max + 1);
+
+ // FIXME: implement the other tricky cases
+ return ConstantRange(getBitWidth(), /*isFullSet=*/true);
+}
+
+ConstantRange
+ConstantRange::lshr(const ConstantRange &Other) const {
+ if (isEmptySet() || Other.isEmptySet())
+ return ConstantRange(getBitWidth(), /*isFullSet=*/false);
+
+ APInt max = getUnsignedMax().lshr(Other.getUnsignedMin());
+ APInt min = getUnsignedMin().lshr(Other.getUnsignedMax());
+ if (min == max + 1)
+ return ConstantRange(getBitWidth(), /*isFullSet=*/true);
+
+ return ConstantRange(min, max + 1);
+}
+
+ConstantRange ConstantRange::inverse() const {
+ if (isFullSet())
+ return ConstantRange(getBitWidth(), /*isFullSet=*/false);
+ if (isEmptySet())
+ return ConstantRange(getBitWidth(), /*isFullSet=*/true);
+ return ConstantRange(Upper, Lower);
+}
+
+/// print - Print out the bounds to a stream...
+///
+void ConstantRange::print(raw_ostream &OS) const {
+ if (isFullSet())
+ OS << "full-set";
+ else if (isEmptySet())
+ OS << "empty-set";
+ else
+ OS << "[" << Lower << "," << Upper << ")";
+}
+
+/// dump - Allow printing from a debugger easily...
+///
+void ConstantRange::dump() const {
+ print(dbgs());
+}
diff --git a/contrib/llvm/lib/IR/Constants.cpp b/contrib/llvm/lib/IR/Constants.cpp
new file mode 100644
index 0000000..0898bf6
--- /dev/null
+++ b/contrib/llvm/lib/IR/Constants.cpp
@@ -0,0 +1,3040 @@
+//===-- Constants.cpp - Implement Constant nodes --------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Constant* classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/Constants.h"
+#include "ConstantFold.h"
+#include "LLVMContextImpl.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/GetElementPtrTypeIterator.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cstdarg>
+using namespace llvm;
+
+//===----------------------------------------------------------------------===//
+// Constant Class
+//===----------------------------------------------------------------------===//
+
+void Constant::anchor() { }
+
+bool Constant::isNegativeZeroValue() const {
+ // Floating point values have an explicit -0.0 value.
+ if (const ConstantFP *CFP = dyn_cast<ConstantFP>(this))
+ return CFP->isZero() && CFP->isNegative();
+
+ // Equivalent for a vector of -0.0's.
+ if (const ConstantDataVector *CV = dyn_cast<ConstantDataVector>(this))
+ if (ConstantFP *SplatCFP = dyn_cast_or_null<ConstantFP>(CV->getSplatValue()))
+ if (SplatCFP && SplatCFP->isZero() && SplatCFP->isNegative())
+ return true;
+
+ if (const ConstantVector *CV = dyn_cast<ConstantVector>(this))
+ if (ConstantFP *SplatCFP = dyn_cast_or_null<ConstantFP>(CV->getSplatValue()))
+ if (SplatCFP && SplatCFP->isZero() && SplatCFP->isNegative())
+ return true;
+
+ // We've already handled true FP case; any other FP vectors can't represent -0.0.
+ if (getType()->isFPOrFPVectorTy())
+ return false;
+
+ // Otherwise, just use +0.0.
+ return isNullValue();
+}
+
+// Return true iff this constant is positive zero (floating point), negative
+// zero (floating point), or a null value.
+bool Constant::isZeroValue() const {
+ // Floating point values have an explicit -0.0 value.
+ if (const ConstantFP *CFP = dyn_cast<ConstantFP>(this))
+ return CFP->isZero();
+
+ // Equivalent for a vector of -0.0's.
+ if (const ConstantDataVector *CV = dyn_cast<ConstantDataVector>(this))
+ if (ConstantFP *SplatCFP = dyn_cast_or_null<ConstantFP>(CV->getSplatValue()))
+ if (SplatCFP && SplatCFP->isZero())
+ return true;
+
+ if (const ConstantVector *CV = dyn_cast<ConstantVector>(this))
+ if (ConstantFP *SplatCFP = dyn_cast_or_null<ConstantFP>(CV->getSplatValue()))
+ if (SplatCFP && SplatCFP->isZero())
+ return true;
+
+ // Otherwise, just use +0.0.
+ return isNullValue();
+}
+
+bool Constant::isNullValue() const {
+ // 0 is null.
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(this))
+ return CI->isZero();
+
+ // +0.0 is null.
+ if (const ConstantFP *CFP = dyn_cast<ConstantFP>(this))
+ return CFP->isZero() && !CFP->isNegative();
+
+ // constant zero is zero for aggregates, cpnull is null for pointers, none for
+ // tokens.
+ return isa<ConstantAggregateZero>(this) || isa<ConstantPointerNull>(this) ||
+ isa<ConstantTokenNone>(this);
+}
+
+bool Constant::isAllOnesValue() const {
+ // Check for -1 integers
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(this))
+ return CI->isMinusOne();
+
+ // Check for FP which are bitcasted from -1 integers
+ if (const ConstantFP *CFP = dyn_cast<ConstantFP>(this))
+ return CFP->getValueAPF().bitcastToAPInt().isAllOnesValue();
+
+ // Check for constant vectors which are splats of -1 values.
+ if (const ConstantVector *CV = dyn_cast<ConstantVector>(this))
+ if (Constant *Splat = CV->getSplatValue())
+ return Splat->isAllOnesValue();
+
+ // Check for constant vectors which are splats of -1 values.
+ if (const ConstantDataVector *CV = dyn_cast<ConstantDataVector>(this))
+ if (Constant *Splat = CV->getSplatValue())
+ return Splat->isAllOnesValue();
+
+ return false;
+}
+
+bool Constant::isOneValue() const {
+ // Check for 1 integers
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(this))
+ return CI->isOne();
+
+ // Check for FP which are bitcasted from 1 integers
+ if (const ConstantFP *CFP = dyn_cast<ConstantFP>(this))
+ return CFP->getValueAPF().bitcastToAPInt() == 1;
+
+ // Check for constant vectors which are splats of 1 values.
+ if (const ConstantVector *CV = dyn_cast<ConstantVector>(this))
+ if (Constant *Splat = CV->getSplatValue())
+ return Splat->isOneValue();
+
+ // Check for constant vectors which are splats of 1 values.
+ if (const ConstantDataVector *CV = dyn_cast<ConstantDataVector>(this))
+ if (Constant *Splat = CV->getSplatValue())
+ return Splat->isOneValue();
+
+ return false;
+}
+
+bool Constant::isMinSignedValue() const {
+ // Check for INT_MIN integers
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(this))
+ return CI->isMinValue(/*isSigned=*/true);
+
+ // Check for FP which are bitcasted from INT_MIN integers
+ if (const ConstantFP *CFP = dyn_cast<ConstantFP>(this))
+ return CFP->getValueAPF().bitcastToAPInt().isMinSignedValue();
+
+ // Check for constant vectors which are splats of INT_MIN values.
+ if (const ConstantVector *CV = dyn_cast<ConstantVector>(this))
+ if (Constant *Splat = CV->getSplatValue())
+ return Splat->isMinSignedValue();
+
+ // Check for constant vectors which are splats of INT_MIN values.
+ if (const ConstantDataVector *CV = dyn_cast<ConstantDataVector>(this))
+ if (Constant *Splat = CV->getSplatValue())
+ return Splat->isMinSignedValue();
+
+ return false;
+}
+
+bool Constant::isNotMinSignedValue() const {
+ // Check for INT_MIN integers
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(this))
+ return !CI->isMinValue(/*isSigned=*/true);
+
+ // Check for FP which are bitcasted from INT_MIN integers
+ if (const ConstantFP *CFP = dyn_cast<ConstantFP>(this))
+ return !CFP->getValueAPF().bitcastToAPInt().isMinSignedValue();
+
+ // Check for constant vectors which are splats of INT_MIN values.
+ if (const ConstantVector *CV = dyn_cast<ConstantVector>(this))
+ if (Constant *Splat = CV->getSplatValue())
+ return Splat->isNotMinSignedValue();
+
+ // Check for constant vectors which are splats of INT_MIN values.
+ if (const ConstantDataVector *CV = dyn_cast<ConstantDataVector>(this))
+ if (Constant *Splat = CV->getSplatValue())
+ return Splat->isNotMinSignedValue();
+
+ // It *may* contain INT_MIN, we can't tell.
+ return false;
+}
+
+// Constructor to create a '0' constant of arbitrary type...
+Constant *Constant::getNullValue(Type *Ty) {
+ switch (Ty->getTypeID()) {
+ case Type::IntegerTyID:
+ return ConstantInt::get(Ty, 0);
+ case Type::HalfTyID:
+ return ConstantFP::get(Ty->getContext(),
+ APFloat::getZero(APFloat::IEEEhalf));
+ case Type::FloatTyID:
+ return ConstantFP::get(Ty->getContext(),
+ APFloat::getZero(APFloat::IEEEsingle));
+ case Type::DoubleTyID:
+ return ConstantFP::get(Ty->getContext(),
+ APFloat::getZero(APFloat::IEEEdouble));
+ case Type::X86_FP80TyID:
+ return ConstantFP::get(Ty->getContext(),
+ APFloat::getZero(APFloat::x87DoubleExtended));
+ case Type::FP128TyID:
+ return ConstantFP::get(Ty->getContext(),
+ APFloat::getZero(APFloat::IEEEquad));
+ case Type::PPC_FP128TyID:
+ return ConstantFP::get(Ty->getContext(),
+ APFloat(APFloat::PPCDoubleDouble,
+ APInt::getNullValue(128)));
+ case Type::PointerTyID:
+ return ConstantPointerNull::get(cast<PointerType>(Ty));
+ case Type::StructTyID:
+ case Type::ArrayTyID:
+ case Type::VectorTyID:
+ return ConstantAggregateZero::get(Ty);
+ case Type::TokenTyID:
+ return ConstantTokenNone::get(Ty->getContext());
+ default:
+ // Function, Label, or Opaque type?
+ llvm_unreachable("Cannot create a null constant of that type!");
+ }
+}
+
+Constant *Constant::getIntegerValue(Type *Ty, const APInt &V) {
+ Type *ScalarTy = Ty->getScalarType();
+
+ // Create the base integer constant.
+ Constant *C = ConstantInt::get(Ty->getContext(), V);
+
+ // Convert an integer to a pointer, if necessary.
+ if (PointerType *PTy = dyn_cast<PointerType>(ScalarTy))
+ C = ConstantExpr::getIntToPtr(C, PTy);
+
+ // Broadcast a scalar to a vector, if necessary.
+ if (VectorType *VTy = dyn_cast<VectorType>(Ty))
+ C = ConstantVector::getSplat(VTy->getNumElements(), C);
+
+ return C;
+}
+
+Constant *Constant::getAllOnesValue(Type *Ty) {
+ if (IntegerType *ITy = dyn_cast<IntegerType>(Ty))
+ return ConstantInt::get(Ty->getContext(),
+ APInt::getAllOnesValue(ITy->getBitWidth()));
+
+ if (Ty->isFloatingPointTy()) {
+ APFloat FL = APFloat::getAllOnesValue(Ty->getPrimitiveSizeInBits(),
+ !Ty->isPPC_FP128Ty());
+ return ConstantFP::get(Ty->getContext(), FL);
+ }
+
+ VectorType *VTy = cast<VectorType>(Ty);
+ return ConstantVector::getSplat(VTy->getNumElements(),
+ getAllOnesValue(VTy->getElementType()));
+}
+
+/// getAggregateElement - For aggregates (struct/array/vector) return the
+/// constant that corresponds to the specified element if possible, or null if
+/// not. This can return null if the element index is a ConstantExpr, or if
+/// 'this' is a constant expr.
+Constant *Constant::getAggregateElement(unsigned Elt) const {
+ if (const ConstantStruct *CS = dyn_cast<ConstantStruct>(this))
+ return Elt < CS->getNumOperands() ? CS->getOperand(Elt) : nullptr;
+
+ if (const ConstantArray *CA = dyn_cast<ConstantArray>(this))
+ return Elt < CA->getNumOperands() ? CA->getOperand(Elt) : nullptr;
+
+ if (const ConstantVector *CV = dyn_cast<ConstantVector>(this))
+ return Elt < CV->getNumOperands() ? CV->getOperand(Elt) : nullptr;
+
+ if (const ConstantAggregateZero *CAZ = dyn_cast<ConstantAggregateZero>(this))
+ return Elt < CAZ->getNumElements() ? CAZ->getElementValue(Elt) : nullptr;
+
+ if (const UndefValue *UV = dyn_cast<UndefValue>(this))
+ return Elt < UV->getNumElements() ? UV->getElementValue(Elt) : nullptr;
+
+ if (const ConstantDataSequential *CDS =dyn_cast<ConstantDataSequential>(this))
+ return Elt < CDS->getNumElements() ? CDS->getElementAsConstant(Elt)
+ : nullptr;
+ return nullptr;
+}
+
+Constant *Constant::getAggregateElement(Constant *Elt) const {
+ assert(isa<IntegerType>(Elt->getType()) && "Index must be an integer");
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(Elt))
+ return getAggregateElement(CI->getZExtValue());
+ return nullptr;
+}
+
+void Constant::destroyConstant() {
+ /// First call destroyConstantImpl on the subclass. This gives the subclass
+ /// a chance to remove the constant from any maps/pools it's contained in.
+ switch (getValueID()) {
+ default:
+ llvm_unreachable("Not a constant!");
+#define HANDLE_CONSTANT(Name) \
+ case Value::Name##Val: \
+ cast<Name>(this)->destroyConstantImpl(); \
+ break;
+#include "llvm/IR/Value.def"
+ }
+
+ // When a Constant is destroyed, there may be lingering
+ // references to the constant by other constants in the constant pool. These
+ // constants are implicitly dependent on the module that is being deleted,
+ // but they don't know that. Because we only find out when the CPV is
+ // deleted, we must now notify all of our users (that should only be
+ // Constants) that they are, in fact, invalid now and should be deleted.
+ //
+ while (!use_empty()) {
+ Value *V = user_back();
+#ifndef NDEBUG // Only in -g mode...
+ if (!isa<Constant>(V)) {
+ dbgs() << "While deleting: " << *this
+ << "\n\nUse still stuck around after Def is destroyed: " << *V
+ << "\n\n";
+ }
+#endif
+ assert(isa<Constant>(V) && "References remain to Constant being destroyed");
+ cast<Constant>(V)->destroyConstant();
+
+ // The constant should remove itself from our use list...
+ assert((use_empty() || user_back() != V) && "Constant not removed!");
+ }
+
+ // Value has no outstanding references it is safe to delete it now...
+ delete this;
+}
+
+static bool canTrapImpl(const Constant *C,
+ SmallPtrSetImpl<const ConstantExpr *> &NonTrappingOps) {
+ assert(C->getType()->isFirstClassType() && "Cannot evaluate aggregate vals!");
+ // The only thing that could possibly trap are constant exprs.
+ const ConstantExpr *CE = dyn_cast<ConstantExpr>(C);
+ if (!CE)
+ return false;
+
+ // ConstantExpr traps if any operands can trap.
+ for (unsigned i = 0, e = C->getNumOperands(); i != e; ++i) {
+ if (ConstantExpr *Op = dyn_cast<ConstantExpr>(CE->getOperand(i))) {
+ if (NonTrappingOps.insert(Op).second && canTrapImpl(Op, NonTrappingOps))
+ return true;
+ }
+ }
+
+ // Otherwise, only specific operations can trap.
+ switch (CE->getOpcode()) {
+ default:
+ return false;
+ case Instruction::UDiv:
+ case Instruction::SDiv:
+ case Instruction::FDiv:
+ case Instruction::URem:
+ case Instruction::SRem:
+ case Instruction::FRem:
+ // Div and rem can trap if the RHS is not known to be non-zero.
+ if (!isa<ConstantInt>(CE->getOperand(1)) ||CE->getOperand(1)->isNullValue())
+ return true;
+ return false;
+ }
+}
+
+/// canTrap - Return true if evaluation of this constant could trap. This is
+/// true for things like constant expressions that could divide by zero.
+bool Constant::canTrap() const {
+ SmallPtrSet<const ConstantExpr *, 4> NonTrappingOps;
+ return canTrapImpl(this, NonTrappingOps);
+}
+
+/// Check if C contains a GlobalValue for which Predicate is true.
+static bool
+ConstHasGlobalValuePredicate(const Constant *C,
+ bool (*Predicate)(const GlobalValue *)) {
+ SmallPtrSet<const Constant *, 8> Visited;
+ SmallVector<const Constant *, 8> WorkList;
+ WorkList.push_back(C);
+ Visited.insert(C);
+
+ while (!WorkList.empty()) {
+ const Constant *WorkItem = WorkList.pop_back_val();
+ if (const auto *GV = dyn_cast<GlobalValue>(WorkItem))
+ if (Predicate(GV))
+ return true;
+ for (const Value *Op : WorkItem->operands()) {
+ const Constant *ConstOp = dyn_cast<Constant>(Op);
+ if (!ConstOp)
+ continue;
+ if (Visited.insert(ConstOp).second)
+ WorkList.push_back(ConstOp);
+ }
+ }
+ return false;
+}
+
+/// Return true if the value can vary between threads.
+bool Constant::isThreadDependent() const {
+ auto DLLImportPredicate = [](const GlobalValue *GV) {
+ return GV->isThreadLocal();
+ };
+ return ConstHasGlobalValuePredicate(this, DLLImportPredicate);
+}
+
+bool Constant::isDLLImportDependent() const {
+ auto DLLImportPredicate = [](const GlobalValue *GV) {
+ return GV->hasDLLImportStorageClass();
+ };
+ return ConstHasGlobalValuePredicate(this, DLLImportPredicate);
+}
+
+/// Return true if the constant has users other than constant exprs and other
+/// dangling things.
+bool Constant::isConstantUsed() const {
+ for (const User *U : users()) {
+ const Constant *UC = dyn_cast<Constant>(U);
+ if (!UC || isa<GlobalValue>(UC))
+ return true;
+
+ if (UC->isConstantUsed())
+ return true;
+ }
+ return false;
+}
+
+bool Constant::needsRelocation() const {
+ if (isa<GlobalValue>(this))
+ return true; // Global reference.
+
+ if (const BlockAddress *BA = dyn_cast<BlockAddress>(this))
+ return BA->getFunction()->needsRelocation();
+
+ // While raw uses of blockaddress need to be relocated, differences between
+ // two of them don't when they are for labels in the same function. This is a
+ // common idiom when creating a table for the indirect goto extension, so we
+ // handle it efficiently here.
+ if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(this))
+ if (CE->getOpcode() == Instruction::Sub) {
+ ConstantExpr *LHS = dyn_cast<ConstantExpr>(CE->getOperand(0));
+ ConstantExpr *RHS = dyn_cast<ConstantExpr>(CE->getOperand(1));
+ if (LHS && RHS && LHS->getOpcode() == Instruction::PtrToInt &&
+ RHS->getOpcode() == Instruction::PtrToInt &&
+ isa<BlockAddress>(LHS->getOperand(0)) &&
+ isa<BlockAddress>(RHS->getOperand(0)) &&
+ cast<BlockAddress>(LHS->getOperand(0))->getFunction() ==
+ cast<BlockAddress>(RHS->getOperand(0))->getFunction())
+ return false;
+ }
+
+ bool Result = false;
+ for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
+ Result |= cast<Constant>(getOperand(i))->needsRelocation();
+
+ return Result;
+}
+
+/// removeDeadUsersOfConstant - If the specified constantexpr is dead, remove
+/// it. This involves recursively eliminating any dead users of the
+/// constantexpr.
+static bool removeDeadUsersOfConstant(const Constant *C) {
+ if (isa<GlobalValue>(C)) return false; // Cannot remove this
+
+ while (!C->use_empty()) {
+ const Constant *User = dyn_cast<Constant>(C->user_back());
+ if (!User) return false; // Non-constant usage;
+ if (!removeDeadUsersOfConstant(User))
+ return false; // Constant wasn't dead
+ }
+
+ const_cast<Constant*>(C)->destroyConstant();
+ return true;
+}
+
+
+/// removeDeadConstantUsers - If there are any dead constant users dangling
+/// off of this constant, remove them. This method is useful for clients
+/// that want to check to see if a global is unused, but don't want to deal
+/// with potentially dead constants hanging off of the globals.
+void Constant::removeDeadConstantUsers() const {
+ Value::const_user_iterator I = user_begin(), E = user_end();
+ Value::const_user_iterator LastNonDeadUser = E;
+ while (I != E) {
+ const Constant *User = dyn_cast<Constant>(*I);
+ if (!User) {
+ LastNonDeadUser = I;
+ ++I;
+ continue;
+ }
+
+ if (!removeDeadUsersOfConstant(User)) {
+ // If the constant wasn't dead, remember that this was the last live use
+ // and move on to the next constant.
+ LastNonDeadUser = I;
+ ++I;
+ continue;
+ }
+
+ // If the constant was dead, then the iterator is invalidated.
+ if (LastNonDeadUser == E) {
+ I = user_begin();
+ if (I == E) break;
+ } else {
+ I = LastNonDeadUser;
+ ++I;
+ }
+ }
+}
+
+
+
+//===----------------------------------------------------------------------===//
+// ConstantInt
+//===----------------------------------------------------------------------===//
+
+void ConstantInt::anchor() { }
+
+ConstantInt::ConstantInt(IntegerType *Ty, const APInt& V)
+ : Constant(Ty, ConstantIntVal, nullptr, 0), Val(V) {
+ assert(V.getBitWidth() == Ty->getBitWidth() && "Invalid constant for type");
+}
+
+ConstantInt *ConstantInt::getTrue(LLVMContext &Context) {
+ LLVMContextImpl *pImpl = Context.pImpl;
+ if (!pImpl->TheTrueVal)
+ pImpl->TheTrueVal = ConstantInt::get(Type::getInt1Ty(Context), 1);
+ return pImpl->TheTrueVal;
+}
+
+ConstantInt *ConstantInt::getFalse(LLVMContext &Context) {
+ LLVMContextImpl *pImpl = Context.pImpl;
+ if (!pImpl->TheFalseVal)
+ pImpl->TheFalseVal = ConstantInt::get(Type::getInt1Ty(Context), 0);
+ return pImpl->TheFalseVal;
+}
+
+Constant *ConstantInt::getTrue(Type *Ty) {
+ VectorType *VTy = dyn_cast<VectorType>(Ty);
+ if (!VTy) {
+ assert(Ty->isIntegerTy(1) && "True must be i1 or vector of i1.");
+ return ConstantInt::getTrue(Ty->getContext());
+ }
+ assert(VTy->getElementType()->isIntegerTy(1) &&
+ "True must be vector of i1 or i1.");
+ return ConstantVector::getSplat(VTy->getNumElements(),
+ ConstantInt::getTrue(Ty->getContext()));
+}
+
+Constant *ConstantInt::getFalse(Type *Ty) {
+ VectorType *VTy = dyn_cast<VectorType>(Ty);
+ if (!VTy) {
+ assert(Ty->isIntegerTy(1) && "False must be i1 or vector of i1.");
+ return ConstantInt::getFalse(Ty->getContext());
+ }
+ assert(VTy->getElementType()->isIntegerTy(1) &&
+ "False must be vector of i1 or i1.");
+ return ConstantVector::getSplat(VTy->getNumElements(),
+ ConstantInt::getFalse(Ty->getContext()));
+}
+
+// Get a ConstantInt from an APInt.
+ConstantInt *ConstantInt::get(LLVMContext &Context, const APInt &V) {
+ // get an existing value or the insertion position
+ LLVMContextImpl *pImpl = Context.pImpl;
+ ConstantInt *&Slot = pImpl->IntConstants[V];
+ if (!Slot) {
+ // Get the corresponding integer type for the bit width of the value.
+ IntegerType *ITy = IntegerType::get(Context, V.getBitWidth());
+ Slot = new ConstantInt(ITy, V);
+ }
+ assert(Slot->getType() == IntegerType::get(Context, V.getBitWidth()));
+ return Slot;
+}
+
+Constant *ConstantInt::get(Type *Ty, uint64_t V, bool isSigned) {
+ Constant *C = get(cast<IntegerType>(Ty->getScalarType()), V, isSigned);
+
+ // For vectors, broadcast the value.
+ if (VectorType *VTy = dyn_cast<VectorType>(Ty))
+ return ConstantVector::getSplat(VTy->getNumElements(), C);
+
+ return C;
+}
+
+ConstantInt *ConstantInt::get(IntegerType *Ty, uint64_t V,
+ bool isSigned) {
+ return get(Ty->getContext(), APInt(Ty->getBitWidth(), V, isSigned));
+}
+
+ConstantInt *ConstantInt::getSigned(IntegerType *Ty, int64_t V) {
+ return get(Ty, V, true);
+}
+
+Constant *ConstantInt::getSigned(Type *Ty, int64_t V) {
+ return get(Ty, V, true);
+}
+
+Constant *ConstantInt::get(Type *Ty, const APInt& V) {
+ ConstantInt *C = get(Ty->getContext(), V);
+ assert(C->getType() == Ty->getScalarType() &&
+ "ConstantInt type doesn't match the type implied by its value!");
+
+ // For vectors, broadcast the value.
+ if (VectorType *VTy = dyn_cast<VectorType>(Ty))
+ return ConstantVector::getSplat(VTy->getNumElements(), C);
+
+ return C;
+}
+
+ConstantInt *ConstantInt::get(IntegerType* Ty, StringRef Str,
+ uint8_t radix) {
+ return get(Ty->getContext(), APInt(Ty->getBitWidth(), Str, radix));
+}
+
+/// Remove the constant from the constant table.
+void ConstantInt::destroyConstantImpl() {
+ llvm_unreachable("You can't ConstantInt->destroyConstantImpl()!");
+}
+
+//===----------------------------------------------------------------------===//
+// ConstantFP
+//===----------------------------------------------------------------------===//
+
+static const fltSemantics *TypeToFloatSemantics(Type *Ty) {
+ if (Ty->isHalfTy())
+ return &APFloat::IEEEhalf;
+ if (Ty->isFloatTy())
+ return &APFloat::IEEEsingle;
+ if (Ty->isDoubleTy())
+ return &APFloat::IEEEdouble;
+ if (Ty->isX86_FP80Ty())
+ return &APFloat::x87DoubleExtended;
+ else if (Ty->isFP128Ty())
+ return &APFloat::IEEEquad;
+
+ assert(Ty->isPPC_FP128Ty() && "Unknown FP format");
+ return &APFloat::PPCDoubleDouble;
+}
+
+void ConstantFP::anchor() { }
+
+/// get() - This returns a constant fp for the specified value in the
+/// specified type. This should only be used for simple constant values like
+/// 2.0/1.0 etc, that are known-valid both as double and as the target format.
+Constant *ConstantFP::get(Type *Ty, double V) {
+ LLVMContext &Context = Ty->getContext();
+
+ APFloat FV(V);
+ bool ignored;
+ FV.convert(*TypeToFloatSemantics(Ty->getScalarType()),
+ APFloat::rmNearestTiesToEven, &ignored);
+ Constant *C = get(Context, FV);
+
+ // For vectors, broadcast the value.
+ if (VectorType *VTy = dyn_cast<VectorType>(Ty))
+ return ConstantVector::getSplat(VTy->getNumElements(), C);
+
+ return C;
+}
+
+
+Constant *ConstantFP::get(Type *Ty, StringRef Str) {
+ LLVMContext &Context = Ty->getContext();
+
+ APFloat FV(*TypeToFloatSemantics(Ty->getScalarType()), Str);
+ Constant *C = get(Context, FV);
+
+ // For vectors, broadcast the value.
+ if (VectorType *VTy = dyn_cast<VectorType>(Ty))
+ return ConstantVector::getSplat(VTy->getNumElements(), C);
+
+ return C;
+}
+
+Constant *ConstantFP::getNaN(Type *Ty, bool Negative, unsigned Type) {
+ const fltSemantics &Semantics = *TypeToFloatSemantics(Ty->getScalarType());
+ APFloat NaN = APFloat::getNaN(Semantics, Negative, Type);
+ Constant *C = get(Ty->getContext(), NaN);
+
+ if (VectorType *VTy = dyn_cast<VectorType>(Ty))
+ return ConstantVector::getSplat(VTy->getNumElements(), C);
+
+ return C;
+}
+
+Constant *ConstantFP::getNegativeZero(Type *Ty) {
+ const fltSemantics &Semantics = *TypeToFloatSemantics(Ty->getScalarType());
+ APFloat NegZero = APFloat::getZero(Semantics, /*Negative=*/true);
+ Constant *C = get(Ty->getContext(), NegZero);
+
+ if (VectorType *VTy = dyn_cast<VectorType>(Ty))
+ return ConstantVector::getSplat(VTy->getNumElements(), C);
+
+ return C;
+}
+
+
+Constant *ConstantFP::getZeroValueForNegation(Type *Ty) {
+ if (Ty->isFPOrFPVectorTy())
+ return getNegativeZero(Ty);
+
+ return Constant::getNullValue(Ty);
+}
+
+
+// ConstantFP accessors.
+ConstantFP* ConstantFP::get(LLVMContext &Context, const APFloat& V) {
+ LLVMContextImpl* pImpl = Context.pImpl;
+
+ ConstantFP *&Slot = pImpl->FPConstants[V];
+
+ if (!Slot) {
+ Type *Ty;
+ if (&V.getSemantics() == &APFloat::IEEEhalf)
+ Ty = Type::getHalfTy(Context);
+ else if (&V.getSemantics() == &APFloat::IEEEsingle)
+ Ty = Type::getFloatTy(Context);
+ else if (&V.getSemantics() == &APFloat::IEEEdouble)
+ Ty = Type::getDoubleTy(Context);
+ else if (&V.getSemantics() == &APFloat::x87DoubleExtended)
+ Ty = Type::getX86_FP80Ty(Context);
+ else if (&V.getSemantics() == &APFloat::IEEEquad)
+ Ty = Type::getFP128Ty(Context);
+ else {
+ assert(&V.getSemantics() == &APFloat::PPCDoubleDouble &&
+ "Unknown FP format");
+ Ty = Type::getPPC_FP128Ty(Context);
+ }
+ Slot = new ConstantFP(Ty, V);
+ }
+
+ return Slot;
+}
+
+Constant *ConstantFP::getInfinity(Type *Ty, bool Negative) {
+ const fltSemantics &Semantics = *TypeToFloatSemantics(Ty->getScalarType());
+ Constant *C = get(Ty->getContext(), APFloat::getInf(Semantics, Negative));
+
+ if (VectorType *VTy = dyn_cast<VectorType>(Ty))
+ return ConstantVector::getSplat(VTy->getNumElements(), C);
+
+ return C;
+}
+
+ConstantFP::ConstantFP(Type *Ty, const APFloat& V)
+ : Constant(Ty, ConstantFPVal, nullptr, 0), Val(V) {
+ assert(&V.getSemantics() == TypeToFloatSemantics(Ty) &&
+ "FP type Mismatch");
+}
+
+bool ConstantFP::isExactlyValue(const APFloat &V) const {
+ return Val.bitwiseIsEqual(V);
+}
+
+/// Remove the constant from the constant table.
+void ConstantFP::destroyConstantImpl() {
+ llvm_unreachable("You can't ConstantInt->destroyConstantImpl()!");
+}
+
+//===----------------------------------------------------------------------===//
+// ConstantAggregateZero Implementation
+//===----------------------------------------------------------------------===//
+
+/// getSequentialElement - If this CAZ has array or vector type, return a zero
+/// with the right element type.
+Constant *ConstantAggregateZero::getSequentialElement() const {
+ return Constant::getNullValue(getType()->getSequentialElementType());
+}
+
+/// getStructElement - If this CAZ has struct type, return a zero with the
+/// right element type for the specified element.
+Constant *ConstantAggregateZero::getStructElement(unsigned Elt) const {
+ return Constant::getNullValue(getType()->getStructElementType(Elt));
+}
+
+/// getElementValue - Return a zero of the right value for the specified GEP
+/// index if we can, otherwise return null (e.g. if C is a ConstantExpr).
+Constant *ConstantAggregateZero::getElementValue(Constant *C) const {
+ if (isa<SequentialType>(getType()))
+ return getSequentialElement();
+ return getStructElement(cast<ConstantInt>(C)->getZExtValue());
+}
+
+/// getElementValue - Return a zero of the right value for the specified GEP
+/// index.
+Constant *ConstantAggregateZero::getElementValue(unsigned Idx) const {
+ if (isa<SequentialType>(getType()))
+ return getSequentialElement();
+ return getStructElement(Idx);
+}
+
+unsigned ConstantAggregateZero::getNumElements() const {
+ Type *Ty = getType();
+ if (auto *AT = dyn_cast<ArrayType>(Ty))
+ return AT->getNumElements();
+ if (auto *VT = dyn_cast<VectorType>(Ty))
+ return VT->getNumElements();
+ return Ty->getStructNumElements();
+}
+
+//===----------------------------------------------------------------------===//
+// UndefValue Implementation
+//===----------------------------------------------------------------------===//
+
+/// getSequentialElement - If this undef has array or vector type, return an
+/// undef with the right element type.
+UndefValue *UndefValue::getSequentialElement() const {
+ return UndefValue::get(getType()->getSequentialElementType());
+}
+
+/// getStructElement - If this undef has struct type, return a zero with the
+/// right element type for the specified element.
+UndefValue *UndefValue::getStructElement(unsigned Elt) const {
+ return UndefValue::get(getType()->getStructElementType(Elt));
+}
+
+/// getElementValue - Return an undef of the right value for the specified GEP
+/// index if we can, otherwise return null (e.g. if C is a ConstantExpr).
+UndefValue *UndefValue::getElementValue(Constant *C) const {
+ if (isa<SequentialType>(getType()))
+ return getSequentialElement();
+ return getStructElement(cast<ConstantInt>(C)->getZExtValue());
+}
+
+/// getElementValue - Return an undef of the right value for the specified GEP
+/// index.
+UndefValue *UndefValue::getElementValue(unsigned Idx) const {
+ if (isa<SequentialType>(getType()))
+ return getSequentialElement();
+ return getStructElement(Idx);
+}
+
+unsigned UndefValue::getNumElements() const {
+ Type *Ty = getType();
+ if (auto *AT = dyn_cast<ArrayType>(Ty))
+ return AT->getNumElements();
+ if (auto *VT = dyn_cast<VectorType>(Ty))
+ return VT->getNumElements();
+ return Ty->getStructNumElements();
+}
+
+//===----------------------------------------------------------------------===//
+// ConstantXXX Classes
+//===----------------------------------------------------------------------===//
+
+template <typename ItTy, typename EltTy>
+static bool rangeOnlyContains(ItTy Start, ItTy End, EltTy Elt) {
+ for (; Start != End; ++Start)
+ if (*Start != Elt)
+ return false;
+ return true;
+}
+
+template <typename SequentialTy, typename ElementTy>
+static Constant *getIntSequenceIfElementsMatch(ArrayRef<Constant *> V) {
+ assert(!V.empty() && "Cannot get empty int sequence.");
+
+ SmallVector<ElementTy, 16> Elts;
+ for (Constant *C : V)
+ if (auto *CI = dyn_cast<ConstantInt>(C))
+ Elts.push_back(CI->getZExtValue());
+ else
+ return nullptr;
+ return SequentialTy::get(V[0]->getContext(), Elts);
+}
+
+template <typename SequentialTy, typename ElementTy>
+static Constant *getFPSequenceIfElementsMatch(ArrayRef<Constant *> V) {
+ assert(!V.empty() && "Cannot get empty FP sequence.");
+
+ SmallVector<ElementTy, 16> Elts;
+ for (Constant *C : V)
+ if (auto *CFP = dyn_cast<ConstantFP>(C))
+ Elts.push_back(CFP->getValueAPF().bitcastToAPInt().getLimitedValue());
+ else
+ return nullptr;
+ return SequentialTy::getFP(V[0]->getContext(), Elts);
+}
+
+template <typename SequenceTy>
+static Constant *getSequenceIfElementsMatch(Constant *C,
+ ArrayRef<Constant *> V) {
+ // We speculatively build the elements here even if it turns out that there is
+ // a constantexpr or something else weird, since it is so uncommon for that to
+ // happen.
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) {
+ if (CI->getType()->isIntegerTy(8))
+ return getIntSequenceIfElementsMatch<SequenceTy, uint8_t>(V);
+ else if (CI->getType()->isIntegerTy(16))
+ return getIntSequenceIfElementsMatch<SequenceTy, uint16_t>(V);
+ else if (CI->getType()->isIntegerTy(32))
+ return getIntSequenceIfElementsMatch<SequenceTy, uint32_t>(V);
+ else if (CI->getType()->isIntegerTy(64))
+ return getIntSequenceIfElementsMatch<SequenceTy, uint64_t>(V);
+ } else if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
+ if (CFP->getType()->isHalfTy())
+ return getFPSequenceIfElementsMatch<SequenceTy, uint16_t>(V);
+ else if (CFP->getType()->isFloatTy())
+ return getFPSequenceIfElementsMatch<SequenceTy, uint32_t>(V);
+ else if (CFP->getType()->isDoubleTy())
+ return getFPSequenceIfElementsMatch<SequenceTy, uint64_t>(V);
+ }
+
+ return nullptr;
+}
+
+ConstantArray::ConstantArray(ArrayType *T, ArrayRef<Constant *> V)
+ : Constant(T, ConstantArrayVal,
+ OperandTraits<ConstantArray>::op_end(this) - V.size(),
+ V.size()) {
+ assert(V.size() == T->getNumElements() &&
+ "Invalid initializer vector for constant array");
+ for (unsigned i = 0, e = V.size(); i != e; ++i)
+ assert(V[i]->getType() == T->getElementType() &&
+ "Initializer for array element doesn't match array element type!");
+ std::copy(V.begin(), V.end(), op_begin());
+}
+
+Constant *ConstantArray::get(ArrayType *Ty, ArrayRef<Constant*> V) {
+ if (Constant *C = getImpl(Ty, V))
+ return C;
+ return Ty->getContext().pImpl->ArrayConstants.getOrCreate(Ty, V);
+}
+
+Constant *ConstantArray::getImpl(ArrayType *Ty, ArrayRef<Constant*> V) {
+ // Empty arrays are canonicalized to ConstantAggregateZero.
+ if (V.empty())
+ return ConstantAggregateZero::get(Ty);
+
+ for (unsigned i = 0, e = V.size(); i != e; ++i) {
+ assert(V[i]->getType() == Ty->getElementType() &&
+ "Wrong type in array element initializer");
+ }
+
+ // If this is an all-zero array, return a ConstantAggregateZero object. If
+ // all undef, return an UndefValue, if "all simple", then return a
+ // ConstantDataArray.
+ Constant *C = V[0];
+ if (isa<UndefValue>(C) && rangeOnlyContains(V.begin(), V.end(), C))
+ return UndefValue::get(Ty);
+
+ if (C->isNullValue() && rangeOnlyContains(V.begin(), V.end(), C))
+ return ConstantAggregateZero::get(Ty);
+
+ // Check to see if all of the elements are ConstantFP or ConstantInt and if
+ // the element type is compatible with ConstantDataVector. If so, use it.
+ if (ConstantDataSequential::isElementTypeCompatible(C->getType()))
+ return getSequenceIfElementsMatch<ConstantDataArray>(C, V);
+
+ // Otherwise, we really do want to create a ConstantArray.
+ return nullptr;
+}
+
+/// getTypeForElements - Return an anonymous struct type to use for a constant
+/// with the specified set of elements. The list must not be empty.
+StructType *ConstantStruct::getTypeForElements(LLVMContext &Context,
+ ArrayRef<Constant*> V,
+ bool Packed) {
+ unsigned VecSize = V.size();
+ SmallVector<Type*, 16> EltTypes(VecSize);
+ for (unsigned i = 0; i != VecSize; ++i)
+ EltTypes[i] = V[i]->getType();
+
+ return StructType::get(Context, EltTypes, Packed);
+}
+
+
+StructType *ConstantStruct::getTypeForElements(ArrayRef<Constant*> V,
+ bool Packed) {
+ assert(!V.empty() &&
+ "ConstantStruct::getTypeForElements cannot be called on empty list");
+ return getTypeForElements(V[0]->getContext(), V, Packed);
+}
+
+
+ConstantStruct::ConstantStruct(StructType *T, ArrayRef<Constant *> V)
+ : Constant(T, ConstantStructVal,
+ OperandTraits<ConstantStruct>::op_end(this) - V.size(),
+ V.size()) {
+ assert(V.size() == T->getNumElements() &&
+ "Invalid initializer vector for constant structure");
+ for (unsigned i = 0, e = V.size(); i != e; ++i)
+ assert((T->isOpaque() || V[i]->getType() == T->getElementType(i)) &&
+ "Initializer for struct element doesn't match struct element type!");
+ std::copy(V.begin(), V.end(), op_begin());
+}
+
+// ConstantStruct accessors.
+Constant *ConstantStruct::get(StructType *ST, ArrayRef<Constant*> V) {
+ assert((ST->isOpaque() || ST->getNumElements() == V.size()) &&
+ "Incorrect # elements specified to ConstantStruct::get");
+
+ // Create a ConstantAggregateZero value if all elements are zeros.
+ bool isZero = true;
+ bool isUndef = false;
+
+ if (!V.empty()) {
+ isUndef = isa<UndefValue>(V[0]);
+ isZero = V[0]->isNullValue();
+ if (isUndef || isZero) {
+ for (unsigned i = 0, e = V.size(); i != e; ++i) {
+ if (!V[i]->isNullValue())
+ isZero = false;
+ if (!isa<UndefValue>(V[i]))
+ isUndef = false;
+ }
+ }
+ }
+ if (isZero)
+ return ConstantAggregateZero::get(ST);
+ if (isUndef)
+ return UndefValue::get(ST);
+
+ return ST->getContext().pImpl->StructConstants.getOrCreate(ST, V);
+}
+
+Constant *ConstantStruct::get(StructType *T, ...) {
+ va_list ap;
+ SmallVector<Constant*, 8> Values;
+ va_start(ap, T);
+ while (Constant *Val = va_arg(ap, llvm::Constant*))
+ Values.push_back(Val);
+ va_end(ap);
+ return get(T, Values);
+}
+
+ConstantVector::ConstantVector(VectorType *T, ArrayRef<Constant *> V)
+ : Constant(T, ConstantVectorVal,
+ OperandTraits<ConstantVector>::op_end(this) - V.size(),
+ V.size()) {
+ for (size_t i = 0, e = V.size(); i != e; i++)
+ assert(V[i]->getType() == T->getElementType() &&
+ "Initializer for vector element doesn't match vector element type!");
+ std::copy(V.begin(), V.end(), op_begin());
+}
+
+// ConstantVector accessors.
+Constant *ConstantVector::get(ArrayRef<Constant*> V) {
+ if (Constant *C = getImpl(V))
+ return C;
+ VectorType *Ty = VectorType::get(V.front()->getType(), V.size());
+ return Ty->getContext().pImpl->VectorConstants.getOrCreate(Ty, V);
+}
+
+Constant *ConstantVector::getImpl(ArrayRef<Constant*> V) {
+ assert(!V.empty() && "Vectors can't be empty");
+ VectorType *T = VectorType::get(V.front()->getType(), V.size());
+
+ // If this is an all-undef or all-zero vector, return a
+ // ConstantAggregateZero or UndefValue.
+ Constant *C = V[0];
+ bool isZero = C->isNullValue();
+ bool isUndef = isa<UndefValue>(C);
+
+ if (isZero || isUndef) {
+ for (unsigned i = 1, e = V.size(); i != e; ++i)
+ if (V[i] != C) {
+ isZero = isUndef = false;
+ break;
+ }
+ }
+
+ if (isZero)
+ return ConstantAggregateZero::get(T);
+ if (isUndef)
+ return UndefValue::get(T);
+
+ // Check to see if all of the elements are ConstantFP or ConstantInt and if
+ // the element type is compatible with ConstantDataVector. If so, use it.
+ if (ConstantDataSequential::isElementTypeCompatible(C->getType()))
+ return getSequenceIfElementsMatch<ConstantDataVector>(C, V);
+
+ // Otherwise, the element type isn't compatible with ConstantDataVector, or
+ // the operand list constants a ConstantExpr or something else strange.
+ return nullptr;
+}
+
+Constant *ConstantVector::getSplat(unsigned NumElts, Constant *V) {
+ // If this splat is compatible with ConstantDataVector, use it instead of
+ // ConstantVector.
+ if ((isa<ConstantFP>(V) || isa<ConstantInt>(V)) &&
+ ConstantDataSequential::isElementTypeCompatible(V->getType()))
+ return ConstantDataVector::getSplat(NumElts, V);
+
+ SmallVector<Constant*, 32> Elts(NumElts, V);
+ return get(Elts);
+}
+
+ConstantTokenNone *ConstantTokenNone::get(LLVMContext &Context) {
+ LLVMContextImpl *pImpl = Context.pImpl;
+ if (!pImpl->TheNoneToken)
+ pImpl->TheNoneToken.reset(new ConstantTokenNone(Context));
+ return pImpl->TheNoneToken.get();
+}
+
+/// Remove the constant from the constant table.
+void ConstantTokenNone::destroyConstantImpl() {
+ llvm_unreachable("You can't ConstantTokenNone->destroyConstantImpl()!");
+}
+
+// Utility function for determining if a ConstantExpr is a CastOp or not. This
+// can't be inline because we don't want to #include Instruction.h into
+// Constant.h
+bool ConstantExpr::isCast() const {
+ return Instruction::isCast(getOpcode());
+}
+
+bool ConstantExpr::isCompare() const {
+ return getOpcode() == Instruction::ICmp || getOpcode() == Instruction::FCmp;
+}
+
+bool ConstantExpr::isGEPWithNoNotionalOverIndexing() const {
+ if (getOpcode() != Instruction::GetElementPtr) return false;
+
+ gep_type_iterator GEPI = gep_type_begin(this), E = gep_type_end(this);
+ User::const_op_iterator OI = std::next(this->op_begin());
+
+ // Skip the first index, as it has no static limit.
+ ++GEPI;
+ ++OI;
+
+ // The remaining indices must be compile-time known integers within the
+ // bounds of the corresponding notional static array types.
+ for (; GEPI != E; ++GEPI, ++OI) {
+ ConstantInt *CI = dyn_cast<ConstantInt>(*OI);
+ if (!CI) return false;
+ if (ArrayType *ATy = dyn_cast<ArrayType>(*GEPI))
+ if (CI->getValue().getActiveBits() > 64 ||
+ CI->getZExtValue() >= ATy->getNumElements())
+ return false;
+ }
+
+ // All the indices checked out.
+ return true;
+}
+
+bool ConstantExpr::hasIndices() const {
+ return getOpcode() == Instruction::ExtractValue ||
+ getOpcode() == Instruction::InsertValue;
+}
+
+ArrayRef<unsigned> ConstantExpr::getIndices() const {
+ if (const ExtractValueConstantExpr *EVCE =
+ dyn_cast<ExtractValueConstantExpr>(this))
+ return EVCE->Indices;
+
+ return cast<InsertValueConstantExpr>(this)->Indices;
+}
+
+unsigned ConstantExpr::getPredicate() const {
+ return cast<CompareConstantExpr>(this)->predicate;
+}
+
+/// getWithOperandReplaced - Return a constant expression identical to this
+/// one, but with the specified operand set to the specified value.
+Constant *
+ConstantExpr::getWithOperandReplaced(unsigned OpNo, Constant *Op) const {
+ assert(Op->getType() == getOperand(OpNo)->getType() &&
+ "Replacing operand with value of different type!");
+ if (getOperand(OpNo) == Op)
+ return const_cast<ConstantExpr*>(this);
+
+ SmallVector<Constant*, 8> NewOps;
+ for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
+ NewOps.push_back(i == OpNo ? Op : getOperand(i));
+
+ return getWithOperands(NewOps);
+}
+
+/// getWithOperands - This returns the current constant expression with the
+/// operands replaced with the specified values. The specified array must
+/// have the same number of operands as our current one.
+Constant *ConstantExpr::getWithOperands(ArrayRef<Constant *> Ops, Type *Ty,
+ bool OnlyIfReduced, Type *SrcTy) const {
+ assert(Ops.size() == getNumOperands() && "Operand count mismatch!");
+
+ // If no operands changed return self.
+ if (Ty == getType() && std::equal(Ops.begin(), Ops.end(), op_begin()))
+ return const_cast<ConstantExpr*>(this);
+
+ Type *OnlyIfReducedTy = OnlyIfReduced ? Ty : nullptr;
+ switch (getOpcode()) {
+ case Instruction::Trunc:
+ case Instruction::ZExt:
+ case Instruction::SExt:
+ case Instruction::FPTrunc:
+ case Instruction::FPExt:
+ case Instruction::UIToFP:
+ case Instruction::SIToFP:
+ case Instruction::FPToUI:
+ case Instruction::FPToSI:
+ case Instruction::PtrToInt:
+ case Instruction::IntToPtr:
+ case Instruction::BitCast:
+ case Instruction::AddrSpaceCast:
+ return ConstantExpr::getCast(getOpcode(), Ops[0], Ty, OnlyIfReduced);
+ case Instruction::Select:
+ return ConstantExpr::getSelect(Ops[0], Ops[1], Ops[2], OnlyIfReducedTy);
+ case Instruction::InsertElement:
+ return ConstantExpr::getInsertElement(Ops[0], Ops[1], Ops[2],
+ OnlyIfReducedTy);
+ case Instruction::ExtractElement:
+ return ConstantExpr::getExtractElement(Ops[0], Ops[1], OnlyIfReducedTy);
+ case Instruction::InsertValue:
+ return ConstantExpr::getInsertValue(Ops[0], Ops[1], getIndices(),
+ OnlyIfReducedTy);
+ case Instruction::ExtractValue:
+ return ConstantExpr::getExtractValue(Ops[0], getIndices(), OnlyIfReducedTy);
+ case Instruction::ShuffleVector:
+ return ConstantExpr::getShuffleVector(Ops[0], Ops[1], Ops[2],
+ OnlyIfReducedTy);
+ case Instruction::GetElementPtr: {
+ auto *GEPO = cast<GEPOperator>(this);
+ assert(SrcTy || (Ops[0]->getType() == getOperand(0)->getType()));
+ return ConstantExpr::getGetElementPtr(
+ SrcTy ? SrcTy : GEPO->getSourceElementType(), Ops[0], Ops.slice(1),
+ GEPO->isInBounds(), OnlyIfReducedTy);
+ }
+ case Instruction::ICmp:
+ case Instruction::FCmp:
+ return ConstantExpr::getCompare(getPredicate(), Ops[0], Ops[1],
+ OnlyIfReducedTy);
+ default:
+ assert(getNumOperands() == 2 && "Must be binary operator?");
+ return ConstantExpr::get(getOpcode(), Ops[0], Ops[1], SubclassOptionalData,
+ OnlyIfReducedTy);
+ }
+}
+
+
+//===----------------------------------------------------------------------===//
+// isValueValidForType implementations
+
+bool ConstantInt::isValueValidForType(Type *Ty, uint64_t Val) {
+ unsigned NumBits = Ty->getIntegerBitWidth(); // assert okay
+ if (Ty->isIntegerTy(1))
+ return Val == 0 || Val == 1;
+ if (NumBits >= 64)
+ return true; // always true, has to fit in largest type
+ uint64_t Max = (1ll << NumBits) - 1;
+ return Val <= Max;
+}
+
+bool ConstantInt::isValueValidForType(Type *Ty, int64_t Val) {
+ unsigned NumBits = Ty->getIntegerBitWidth();
+ if (Ty->isIntegerTy(1))
+ return Val == 0 || Val == 1 || Val == -1;
+ if (NumBits >= 64)
+ return true; // always true, has to fit in largest type
+ int64_t Min = -(1ll << (NumBits-1));
+ int64_t Max = (1ll << (NumBits-1)) - 1;
+ return (Val >= Min && Val <= Max);
+}
+
+bool ConstantFP::isValueValidForType(Type *Ty, const APFloat& Val) {
+ // convert modifies in place, so make a copy.
+ APFloat Val2 = APFloat(Val);
+ bool losesInfo;
+ switch (Ty->getTypeID()) {
+ default:
+ return false; // These can't be represented as floating point!
+
+ // FIXME rounding mode needs to be more flexible
+ case Type::HalfTyID: {
+ if (&Val2.getSemantics() == &APFloat::IEEEhalf)
+ return true;
+ Val2.convert(APFloat::IEEEhalf, APFloat::rmNearestTiesToEven, &losesInfo);
+ return !losesInfo;
+ }
+ case Type::FloatTyID: {
+ if (&Val2.getSemantics() == &APFloat::IEEEsingle)
+ return true;
+ Val2.convert(APFloat::IEEEsingle, APFloat::rmNearestTiesToEven, &losesInfo);
+ return !losesInfo;
+ }
+ case Type::DoubleTyID: {
+ if (&Val2.getSemantics() == &APFloat::IEEEhalf ||
+ &Val2.getSemantics() == &APFloat::IEEEsingle ||
+ &Val2.getSemantics() == &APFloat::IEEEdouble)
+ return true;
+ Val2.convert(APFloat::IEEEdouble, APFloat::rmNearestTiesToEven, &losesInfo);
+ return !losesInfo;
+ }
+ case Type::X86_FP80TyID:
+ return &Val2.getSemantics() == &APFloat::IEEEhalf ||
+ &Val2.getSemantics() == &APFloat::IEEEsingle ||
+ &Val2.getSemantics() == &APFloat::IEEEdouble ||
+ &Val2.getSemantics() == &APFloat::x87DoubleExtended;
+ case Type::FP128TyID:
+ return &Val2.getSemantics() == &APFloat::IEEEhalf ||
+ &Val2.getSemantics() == &APFloat::IEEEsingle ||
+ &Val2.getSemantics() == &APFloat::IEEEdouble ||
+ &Val2.getSemantics() == &APFloat::IEEEquad;
+ case Type::PPC_FP128TyID:
+ return &Val2.getSemantics() == &APFloat::IEEEhalf ||
+ &Val2.getSemantics() == &APFloat::IEEEsingle ||
+ &Val2.getSemantics() == &APFloat::IEEEdouble ||
+ &Val2.getSemantics() == &APFloat::PPCDoubleDouble;
+ }
+}
+
+
+//===----------------------------------------------------------------------===//
+// Factory Function Implementation
+
+ConstantAggregateZero *ConstantAggregateZero::get(Type *Ty) {
+ assert((Ty->isStructTy() || Ty->isArrayTy() || Ty->isVectorTy()) &&
+ "Cannot create an aggregate zero of non-aggregate type!");
+
+ ConstantAggregateZero *&Entry = Ty->getContext().pImpl->CAZConstants[Ty];
+ if (!Entry)
+ Entry = new ConstantAggregateZero(Ty);
+
+ return Entry;
+}
+
+/// destroyConstant - Remove the constant from the constant table.
+///
+void ConstantAggregateZero::destroyConstantImpl() {
+ getContext().pImpl->CAZConstants.erase(getType());
+}
+
+/// destroyConstant - Remove the constant from the constant table...
+///
+void ConstantArray::destroyConstantImpl() {
+ getType()->getContext().pImpl->ArrayConstants.remove(this);
+}
+
+
+//---- ConstantStruct::get() implementation...
+//
+
+// destroyConstant - Remove the constant from the constant table...
+//
+void ConstantStruct::destroyConstantImpl() {
+ getType()->getContext().pImpl->StructConstants.remove(this);
+}
+
+// destroyConstant - Remove the constant from the constant table...
+//
+void ConstantVector::destroyConstantImpl() {
+ getType()->getContext().pImpl->VectorConstants.remove(this);
+}
+
+/// getSplatValue - If this is a splat vector constant, meaning that all of
+/// the elements have the same value, return that value. Otherwise return 0.
+Constant *Constant::getSplatValue() const {
+ assert(this->getType()->isVectorTy() && "Only valid for vectors!");
+ if (isa<ConstantAggregateZero>(this))
+ return getNullValue(this->getType()->getVectorElementType());
+ if (const ConstantDataVector *CV = dyn_cast<ConstantDataVector>(this))
+ return CV->getSplatValue();
+ if (const ConstantVector *CV = dyn_cast<ConstantVector>(this))
+ return CV->getSplatValue();
+ return nullptr;
+}
+
+/// getSplatValue - If this is a splat constant, where all of the
+/// elements have the same value, return that value. Otherwise return null.
+Constant *ConstantVector::getSplatValue() const {
+ // Check out first element.
+ Constant *Elt = getOperand(0);
+ // Then make sure all remaining elements point to the same value.
+ for (unsigned I = 1, E = getNumOperands(); I < E; ++I)
+ if (getOperand(I) != Elt)
+ return nullptr;
+ return Elt;
+}
+
+/// If C is a constant integer then return its value, otherwise C must be a
+/// vector of constant integers, all equal, and the common value is returned.
+const APInt &Constant::getUniqueInteger() const {
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(this))
+ return CI->getValue();
+ assert(this->getSplatValue() && "Doesn't contain a unique integer!");
+ const Constant *C = this->getAggregateElement(0U);
+ assert(C && isa<ConstantInt>(C) && "Not a vector of numbers!");
+ return cast<ConstantInt>(C)->getValue();
+}
+
+//---- ConstantPointerNull::get() implementation.
+//
+
+ConstantPointerNull *ConstantPointerNull::get(PointerType *Ty) {
+ ConstantPointerNull *&Entry = Ty->getContext().pImpl->CPNConstants[Ty];
+ if (!Entry)
+ Entry = new ConstantPointerNull(Ty);
+
+ return Entry;
+}
+
+// destroyConstant - Remove the constant from the constant table...
+//
+void ConstantPointerNull::destroyConstantImpl() {
+ getContext().pImpl->CPNConstants.erase(getType());
+}
+
+
+//---- UndefValue::get() implementation.
+//
+
+UndefValue *UndefValue::get(Type *Ty) {
+ UndefValue *&Entry = Ty->getContext().pImpl->UVConstants[Ty];
+ if (!Entry)
+ Entry = new UndefValue(Ty);
+
+ return Entry;
+}
+
+// destroyConstant - Remove the constant from the constant table.
+//
+void UndefValue::destroyConstantImpl() {
+ // Free the constant and any dangling references to it.
+ getContext().pImpl->UVConstants.erase(getType());
+}
+
+//---- BlockAddress::get() implementation.
+//
+
+BlockAddress *BlockAddress::get(BasicBlock *BB) {
+ assert(BB->getParent() && "Block must have a parent");
+ return get(BB->getParent(), BB);
+}
+
+BlockAddress *BlockAddress::get(Function *F, BasicBlock *BB) {
+ BlockAddress *&BA =
+ F->getContext().pImpl->BlockAddresses[std::make_pair(F, BB)];
+ if (!BA)
+ BA = new BlockAddress(F, BB);
+
+ assert(BA->getFunction() == F && "Basic block moved between functions");
+ return BA;
+}
+
+BlockAddress::BlockAddress(Function *F, BasicBlock *BB)
+: Constant(Type::getInt8PtrTy(F->getContext()), Value::BlockAddressVal,
+ &Op<0>(), 2) {
+ setOperand(0, F);
+ setOperand(1, BB);
+ BB->AdjustBlockAddressRefCount(1);
+}
+
+BlockAddress *BlockAddress::lookup(const BasicBlock *BB) {
+ if (!BB->hasAddressTaken())
+ return nullptr;
+
+ const Function *F = BB->getParent();
+ assert(F && "Block must have a parent");
+ BlockAddress *BA =
+ F->getContext().pImpl->BlockAddresses.lookup(std::make_pair(F, BB));
+ assert(BA && "Refcount and block address map disagree!");
+ return BA;
+}
+
+// destroyConstant - Remove the constant from the constant table.
+//
+void BlockAddress::destroyConstantImpl() {
+ getFunction()->getType()->getContext().pImpl
+ ->BlockAddresses.erase(std::make_pair(getFunction(), getBasicBlock()));
+ getBasicBlock()->AdjustBlockAddressRefCount(-1);
+}
+
+Value *BlockAddress::handleOperandChangeImpl(Value *From, Value *To, Use *U) {
+ // This could be replacing either the Basic Block or the Function. In either
+ // case, we have to remove the map entry.
+ Function *NewF = getFunction();
+ BasicBlock *NewBB = getBasicBlock();
+
+ if (U == &Op<0>())
+ NewF = cast<Function>(To->stripPointerCasts());
+ else
+ NewBB = cast<BasicBlock>(To);
+
+ // See if the 'new' entry already exists, if not, just update this in place
+ // and return early.
+ BlockAddress *&NewBA =
+ getContext().pImpl->BlockAddresses[std::make_pair(NewF, NewBB)];
+ if (NewBA)
+ return NewBA;
+
+ getBasicBlock()->AdjustBlockAddressRefCount(-1);
+
+ // Remove the old entry, this can't cause the map to rehash (just a
+ // tombstone will get added).
+ getContext().pImpl->BlockAddresses.erase(std::make_pair(getFunction(),
+ getBasicBlock()));
+ NewBA = this;
+ setOperand(0, NewF);
+ setOperand(1, NewBB);
+ getBasicBlock()->AdjustBlockAddressRefCount(1);
+
+ // If we just want to keep the existing value, then return null.
+ // Callers know that this means we shouldn't delete this value.
+ return nullptr;
+}
+
+//---- ConstantExpr::get() implementations.
+//
+
+/// This is a utility function to handle folding of casts and lookup of the
+/// cast in the ExprConstants map. It is used by the various get* methods below.
+static Constant *getFoldedCast(Instruction::CastOps opc, Constant *C, Type *Ty,
+ bool OnlyIfReduced = false) {
+ assert(Ty->isFirstClassType() && "Cannot cast to an aggregate type!");
+ // Fold a few common cases
+ if (Constant *FC = ConstantFoldCastInstruction(opc, C, Ty))
+ return FC;
+
+ if (OnlyIfReduced)
+ return nullptr;
+
+ LLVMContextImpl *pImpl = Ty->getContext().pImpl;
+
+ // Look up the constant in the table first to ensure uniqueness.
+ ConstantExprKeyType Key(opc, C);
+
+ return pImpl->ExprConstants.getOrCreate(Ty, Key);
+}
+
+Constant *ConstantExpr::getCast(unsigned oc, Constant *C, Type *Ty,
+ bool OnlyIfReduced) {
+ Instruction::CastOps opc = Instruction::CastOps(oc);
+ assert(Instruction::isCast(opc) && "opcode out of range");
+ assert(C && Ty && "Null arguments to getCast");
+ assert(CastInst::castIsValid(opc, C, Ty) && "Invalid constantexpr cast!");
+
+ switch (opc) {
+ default:
+ llvm_unreachable("Invalid cast opcode");
+ case Instruction::Trunc:
+ return getTrunc(C, Ty, OnlyIfReduced);
+ case Instruction::ZExt:
+ return getZExt(C, Ty, OnlyIfReduced);
+ case Instruction::SExt:
+ return getSExt(C, Ty, OnlyIfReduced);
+ case Instruction::FPTrunc:
+ return getFPTrunc(C, Ty, OnlyIfReduced);
+ case Instruction::FPExt:
+ return getFPExtend(C, Ty, OnlyIfReduced);
+ case Instruction::UIToFP:
+ return getUIToFP(C, Ty, OnlyIfReduced);
+ case Instruction::SIToFP:
+ return getSIToFP(C, Ty, OnlyIfReduced);
+ case Instruction::FPToUI:
+ return getFPToUI(C, Ty, OnlyIfReduced);
+ case Instruction::FPToSI:
+ return getFPToSI(C, Ty, OnlyIfReduced);
+ case Instruction::PtrToInt:
+ return getPtrToInt(C, Ty, OnlyIfReduced);
+ case Instruction::IntToPtr:
+ return getIntToPtr(C, Ty, OnlyIfReduced);
+ case Instruction::BitCast:
+ return getBitCast(C, Ty, OnlyIfReduced);
+ case Instruction::AddrSpaceCast:
+ return getAddrSpaceCast(C, Ty, OnlyIfReduced);
+ }
+}
+
+Constant *ConstantExpr::getZExtOrBitCast(Constant *C, Type *Ty) {
+ if (C->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
+ return getBitCast(C, Ty);
+ return getZExt(C, Ty);
+}
+
+Constant *ConstantExpr::getSExtOrBitCast(Constant *C, Type *Ty) {
+ if (C->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
+ return getBitCast(C, Ty);
+ return getSExt(C, Ty);
+}
+
+Constant *ConstantExpr::getTruncOrBitCast(Constant *C, Type *Ty) {
+ if (C->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
+ return getBitCast(C, Ty);
+ return getTrunc(C, Ty);
+}
+
+Constant *ConstantExpr::getPointerCast(Constant *S, Type *Ty) {
+ assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
+ assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
+ "Invalid cast");
+
+ if (Ty->isIntOrIntVectorTy())
+ return getPtrToInt(S, Ty);
+
+ unsigned SrcAS = S->getType()->getPointerAddressSpace();
+ if (Ty->isPtrOrPtrVectorTy() && SrcAS != Ty->getPointerAddressSpace())
+ return getAddrSpaceCast(S, Ty);
+
+ return getBitCast(S, Ty);
+}
+
+Constant *ConstantExpr::getPointerBitCastOrAddrSpaceCast(Constant *S,
+ Type *Ty) {
+ assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
+ assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
+
+ if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace())
+ return getAddrSpaceCast(S, Ty);
+
+ return getBitCast(S, Ty);
+}
+
+Constant *ConstantExpr::getIntegerCast(Constant *C, Type *Ty,
+ bool isSigned) {
+ assert(C->getType()->isIntOrIntVectorTy() &&
+ Ty->isIntOrIntVectorTy() && "Invalid cast");
+ unsigned SrcBits = C->getType()->getScalarSizeInBits();
+ unsigned DstBits = Ty->getScalarSizeInBits();
+ Instruction::CastOps opcode =
+ (SrcBits == DstBits ? Instruction::BitCast :
+ (SrcBits > DstBits ? Instruction::Trunc :
+ (isSigned ? Instruction::SExt : Instruction::ZExt)));
+ return getCast(opcode, C, Ty);
+}
+
+Constant *ConstantExpr::getFPCast(Constant *C, Type *Ty) {
+ assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
+ "Invalid cast");
+ unsigned SrcBits = C->getType()->getScalarSizeInBits();
+ unsigned DstBits = Ty->getScalarSizeInBits();
+ if (SrcBits == DstBits)
+ return C; // Avoid a useless cast
+ Instruction::CastOps opcode =
+ (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt);
+ return getCast(opcode, C, Ty);
+}
+
+Constant *ConstantExpr::getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced) {
+#ifndef NDEBUG
+ bool fromVec = C->getType()->getTypeID() == Type::VectorTyID;
+ bool toVec = Ty->getTypeID() == Type::VectorTyID;
+#endif
+ assert((fromVec == toVec) && "Cannot convert from scalar to/from vector");
+ assert(C->getType()->isIntOrIntVectorTy() && "Trunc operand must be integer");
+ assert(Ty->isIntOrIntVectorTy() && "Trunc produces only integral");
+ assert(C->getType()->getScalarSizeInBits() > Ty->getScalarSizeInBits()&&
+ "SrcTy must be larger than DestTy for Trunc!");
+
+ return getFoldedCast(Instruction::Trunc, C, Ty, OnlyIfReduced);
+}
+
+Constant *ConstantExpr::getSExt(Constant *C, Type *Ty, bool OnlyIfReduced) {
+#ifndef NDEBUG
+ bool fromVec = C->getType()->getTypeID() == Type::VectorTyID;
+ bool toVec = Ty->getTypeID() == Type::VectorTyID;
+#endif
+ assert((fromVec == toVec) && "Cannot convert from scalar to/from vector");
+ assert(C->getType()->isIntOrIntVectorTy() && "SExt operand must be integral");
+ assert(Ty->isIntOrIntVectorTy() && "SExt produces only integer");
+ assert(C->getType()->getScalarSizeInBits() < Ty->getScalarSizeInBits()&&
+ "SrcTy must be smaller than DestTy for SExt!");
+
+ return getFoldedCast(Instruction::SExt, C, Ty, OnlyIfReduced);
+}
+
+Constant *ConstantExpr::getZExt(Constant *C, Type *Ty, bool OnlyIfReduced) {
+#ifndef NDEBUG
+ bool fromVec = C->getType()->getTypeID() == Type::VectorTyID;
+ bool toVec = Ty->getTypeID() == Type::VectorTyID;
+#endif
+ assert((fromVec == toVec) && "Cannot convert from scalar to/from vector");
+ assert(C->getType()->isIntOrIntVectorTy() && "ZEXt operand must be integral");
+ assert(Ty->isIntOrIntVectorTy() && "ZExt produces only integer");
+ assert(C->getType()->getScalarSizeInBits() < Ty->getScalarSizeInBits()&&
+ "SrcTy must be smaller than DestTy for ZExt!");
+
+ return getFoldedCast(Instruction::ZExt, C, Ty, OnlyIfReduced);
+}
+
+Constant *ConstantExpr::getFPTrunc(Constant *C, Type *Ty, bool OnlyIfReduced) {
+#ifndef NDEBUG
+ bool fromVec = C->getType()->getTypeID() == Type::VectorTyID;
+ bool toVec = Ty->getTypeID() == Type::VectorTyID;
+#endif
+ assert((fromVec == toVec) && "Cannot convert from scalar to/from vector");
+ assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
+ C->getType()->getScalarSizeInBits() > Ty->getScalarSizeInBits()&&
+ "This is an illegal floating point truncation!");
+ return getFoldedCast(Instruction::FPTrunc, C, Ty, OnlyIfReduced);
+}
+
+Constant *ConstantExpr::getFPExtend(Constant *C, Type *Ty, bool OnlyIfReduced) {
+#ifndef NDEBUG
+ bool fromVec = C->getType()->getTypeID() == Type::VectorTyID;
+ bool toVec = Ty->getTypeID() == Type::VectorTyID;
+#endif
+ assert((fromVec == toVec) && "Cannot convert from scalar to/from vector");
+ assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
+ C->getType()->getScalarSizeInBits() < Ty->getScalarSizeInBits()&&
+ "This is an illegal floating point extension!");
+ return getFoldedCast(Instruction::FPExt, C, Ty, OnlyIfReduced);
+}
+
+Constant *ConstantExpr::getUIToFP(Constant *C, Type *Ty, bool OnlyIfReduced) {
+#ifndef NDEBUG
+ bool fromVec = C->getType()->getTypeID() == Type::VectorTyID;
+ bool toVec = Ty->getTypeID() == Type::VectorTyID;
+#endif
+ assert((fromVec == toVec) && "Cannot convert from scalar to/from vector");
+ assert(C->getType()->isIntOrIntVectorTy() && Ty->isFPOrFPVectorTy() &&
+ "This is an illegal uint to floating point cast!");
+ return getFoldedCast(Instruction::UIToFP, C, Ty, OnlyIfReduced);
+}
+
+Constant *ConstantExpr::getSIToFP(Constant *C, Type *Ty, bool OnlyIfReduced) {
+#ifndef NDEBUG
+ bool fromVec = C->getType()->getTypeID() == Type::VectorTyID;
+ bool toVec = Ty->getTypeID() == Type::VectorTyID;
+#endif
+ assert((fromVec == toVec) && "Cannot convert from scalar to/from vector");
+ assert(C->getType()->isIntOrIntVectorTy() && Ty->isFPOrFPVectorTy() &&
+ "This is an illegal sint to floating point cast!");
+ return getFoldedCast(Instruction::SIToFP, C, Ty, OnlyIfReduced);
+}
+
+Constant *ConstantExpr::getFPToUI(Constant *C, Type *Ty, bool OnlyIfReduced) {
+#ifndef NDEBUG
+ bool fromVec = C->getType()->getTypeID() == Type::VectorTyID;
+ bool toVec = Ty->getTypeID() == Type::VectorTyID;
+#endif
+ assert((fromVec == toVec) && "Cannot convert from scalar to/from vector");
+ assert(C->getType()->isFPOrFPVectorTy() && Ty->isIntOrIntVectorTy() &&
+ "This is an illegal floating point to uint cast!");
+ return getFoldedCast(Instruction::FPToUI, C, Ty, OnlyIfReduced);
+}
+
+Constant *ConstantExpr::getFPToSI(Constant *C, Type *Ty, bool OnlyIfReduced) {
+#ifndef NDEBUG
+ bool fromVec = C->getType()->getTypeID() == Type::VectorTyID;
+ bool toVec = Ty->getTypeID() == Type::VectorTyID;
+#endif
+ assert((fromVec == toVec) && "Cannot convert from scalar to/from vector");
+ assert(C->getType()->isFPOrFPVectorTy() && Ty->isIntOrIntVectorTy() &&
+ "This is an illegal floating point to sint cast!");
+ return getFoldedCast(Instruction::FPToSI, C, Ty, OnlyIfReduced);
+}
+
+Constant *ConstantExpr::getPtrToInt(Constant *C, Type *DstTy,
+ bool OnlyIfReduced) {
+ assert(C->getType()->getScalarType()->isPointerTy() &&
+ "PtrToInt source must be pointer or pointer vector");
+ assert(DstTy->getScalarType()->isIntegerTy() &&
+ "PtrToInt destination must be integer or integer vector");
+ assert(isa<VectorType>(C->getType()) == isa<VectorType>(DstTy));
+ if (isa<VectorType>(C->getType()))
+ assert(C->getType()->getVectorNumElements()==DstTy->getVectorNumElements()&&
+ "Invalid cast between a different number of vector elements");
+ return getFoldedCast(Instruction::PtrToInt, C, DstTy, OnlyIfReduced);
+}
+
+Constant *ConstantExpr::getIntToPtr(Constant *C, Type *DstTy,
+ bool OnlyIfReduced) {
+ assert(C->getType()->getScalarType()->isIntegerTy() &&
+ "IntToPtr source must be integer or integer vector");
+ assert(DstTy->getScalarType()->isPointerTy() &&
+ "IntToPtr destination must be a pointer or pointer vector");
+ assert(isa<VectorType>(C->getType()) == isa<VectorType>(DstTy));
+ if (isa<VectorType>(C->getType()))
+ assert(C->getType()->getVectorNumElements()==DstTy->getVectorNumElements()&&
+ "Invalid cast between a different number of vector elements");
+ return getFoldedCast(Instruction::IntToPtr, C, DstTy, OnlyIfReduced);
+}
+
+Constant *ConstantExpr::getBitCast(Constant *C, Type *DstTy,
+ bool OnlyIfReduced) {
+ assert(CastInst::castIsValid(Instruction::BitCast, C, DstTy) &&
+ "Invalid constantexpr bitcast!");
+
+ // It is common to ask for a bitcast of a value to its own type, handle this
+ // speedily.
+ if (C->getType() == DstTy) return C;
+
+ return getFoldedCast(Instruction::BitCast, C, DstTy, OnlyIfReduced);
+}
+
+Constant *ConstantExpr::getAddrSpaceCast(Constant *C, Type *DstTy,
+ bool OnlyIfReduced) {
+ assert(CastInst::castIsValid(Instruction::AddrSpaceCast, C, DstTy) &&
+ "Invalid constantexpr addrspacecast!");
+
+ // Canonicalize addrspacecasts between different pointer types by first
+ // bitcasting the pointer type and then converting the address space.
+ PointerType *SrcScalarTy = cast<PointerType>(C->getType()->getScalarType());
+ PointerType *DstScalarTy = cast<PointerType>(DstTy->getScalarType());
+ Type *DstElemTy = DstScalarTy->getElementType();
+ if (SrcScalarTy->getElementType() != DstElemTy) {
+ Type *MidTy = PointerType::get(DstElemTy, SrcScalarTy->getAddressSpace());
+ if (VectorType *VT = dyn_cast<VectorType>(DstTy)) {
+ // Handle vectors of pointers.
+ MidTy = VectorType::get(MidTy, VT->getNumElements());
+ }
+ C = getBitCast(C, MidTy);
+ }
+ return getFoldedCast(Instruction::AddrSpaceCast, C, DstTy, OnlyIfReduced);
+}
+
+Constant *ConstantExpr::get(unsigned Opcode, Constant *C1, Constant *C2,
+ unsigned Flags, Type *OnlyIfReducedTy) {
+ // Check the operands for consistency first.
+ assert(Opcode >= Instruction::BinaryOpsBegin &&
+ Opcode < Instruction::BinaryOpsEnd &&
+ "Invalid opcode in binary constant expression");
+ assert(C1->getType() == C2->getType() &&
+ "Operand types in binary constant expression should match");
+
+#ifndef NDEBUG
+ switch (Opcode) {
+ case Instruction::Add:
+ case Instruction::Sub:
+ case Instruction::Mul:
+ assert(C1->getType() == C2->getType() && "Op types should be identical!");
+ assert(C1->getType()->isIntOrIntVectorTy() &&
+ "Tried to create an integer operation on a non-integer type!");
+ break;
+ case Instruction::FAdd:
+ case Instruction::FSub:
+ case Instruction::FMul:
+ assert(C1->getType() == C2->getType() && "Op types should be identical!");
+ assert(C1->getType()->isFPOrFPVectorTy() &&
+ "Tried to create a floating-point operation on a "
+ "non-floating-point type!");
+ break;
+ case Instruction::UDiv:
+ case Instruction::SDiv:
+ assert(C1->getType() == C2->getType() && "Op types should be identical!");
+ assert(C1->getType()->isIntOrIntVectorTy() &&
+ "Tried to create an arithmetic operation on a non-arithmetic type!");
+ break;
+ case Instruction::FDiv:
+ assert(C1->getType() == C2->getType() && "Op types should be identical!");
+ assert(C1->getType()->isFPOrFPVectorTy() &&
+ "Tried to create an arithmetic operation on a non-arithmetic type!");
+ break;
+ case Instruction::URem:
+ case Instruction::SRem:
+ assert(C1->getType() == C2->getType() && "Op types should be identical!");
+ assert(C1->getType()->isIntOrIntVectorTy() &&
+ "Tried to create an arithmetic operation on a non-arithmetic type!");
+ break;
+ case Instruction::FRem:
+ assert(C1->getType() == C2->getType() && "Op types should be identical!");
+ assert(C1->getType()->isFPOrFPVectorTy() &&
+ "Tried to create an arithmetic operation on a non-arithmetic type!");
+ break;
+ case Instruction::And:
+ case Instruction::Or:
+ case Instruction::Xor:
+ assert(C1->getType() == C2->getType() && "Op types should be identical!");
+ assert(C1->getType()->isIntOrIntVectorTy() &&
+ "Tried to create a logical operation on a non-integral type!");
+ break;
+ case Instruction::Shl:
+ case Instruction::LShr:
+ case Instruction::AShr:
+ assert(C1->getType() == C2->getType() && "Op types should be identical!");
+ assert(C1->getType()->isIntOrIntVectorTy() &&
+ "Tried to create a shift operation on a non-integer type!");
+ break;
+ default:
+ break;
+ }
+#endif
+
+ if (Constant *FC = ConstantFoldBinaryInstruction(Opcode, C1, C2))
+ return FC; // Fold a few common cases.
+
+ if (OnlyIfReducedTy == C1->getType())
+ return nullptr;
+
+ Constant *ArgVec[] = { C1, C2 };
+ ConstantExprKeyType Key(Opcode, ArgVec, 0, Flags);
+
+ LLVMContextImpl *pImpl = C1->getContext().pImpl;
+ return pImpl->ExprConstants.getOrCreate(C1->getType(), Key);
+}
+
+Constant *ConstantExpr::getSizeOf(Type* Ty) {
+ // sizeof is implemented as: (i64) gep (Ty*)null, 1
+ // Note that a non-inbounds gep is used, as null isn't within any object.
+ Constant *GEPIdx = ConstantInt::get(Type::getInt32Ty(Ty->getContext()), 1);
+ Constant *GEP = getGetElementPtr(
+ Ty, Constant::getNullValue(PointerType::getUnqual(Ty)), GEPIdx);
+ return getPtrToInt(GEP,
+ Type::getInt64Ty(Ty->getContext()));
+}
+
+Constant *ConstantExpr::getAlignOf(Type* Ty) {
+ // alignof is implemented as: (i64) gep ({i1,Ty}*)null, 0, 1
+ // Note that a non-inbounds gep is used, as null isn't within any object.
+ Type *AligningTy =
+ StructType::get(Type::getInt1Ty(Ty->getContext()), Ty, nullptr);
+ Constant *NullPtr = Constant::getNullValue(AligningTy->getPointerTo(0));
+ Constant *Zero = ConstantInt::get(Type::getInt64Ty(Ty->getContext()), 0);
+ Constant *One = ConstantInt::get(Type::getInt32Ty(Ty->getContext()), 1);
+ Constant *Indices[2] = { Zero, One };
+ Constant *GEP = getGetElementPtr(AligningTy, NullPtr, Indices);
+ return getPtrToInt(GEP,
+ Type::getInt64Ty(Ty->getContext()));
+}
+
+Constant *ConstantExpr::getOffsetOf(StructType* STy, unsigned FieldNo) {
+ return getOffsetOf(STy, ConstantInt::get(Type::getInt32Ty(STy->getContext()),
+ FieldNo));
+}
+
+Constant *ConstantExpr::getOffsetOf(Type* Ty, Constant *FieldNo) {
+ // offsetof is implemented as: (i64) gep (Ty*)null, 0, FieldNo
+ // Note that a non-inbounds gep is used, as null isn't within any object.
+ Constant *GEPIdx[] = {
+ ConstantInt::get(Type::getInt64Ty(Ty->getContext()), 0),
+ FieldNo
+ };
+ Constant *GEP = getGetElementPtr(
+ Ty, Constant::getNullValue(PointerType::getUnqual(Ty)), GEPIdx);
+ return getPtrToInt(GEP,
+ Type::getInt64Ty(Ty->getContext()));
+}
+
+Constant *ConstantExpr::getCompare(unsigned short Predicate, Constant *C1,
+ Constant *C2, bool OnlyIfReduced) {
+ assert(C1->getType() == C2->getType() && "Op types should be identical!");
+
+ switch (Predicate) {
+ default: llvm_unreachable("Invalid CmpInst predicate");
+ case CmpInst::FCMP_FALSE: case CmpInst::FCMP_OEQ: case CmpInst::FCMP_OGT:
+ case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLT: case CmpInst::FCMP_OLE:
+ case CmpInst::FCMP_ONE: case CmpInst::FCMP_ORD: case CmpInst::FCMP_UNO:
+ case CmpInst::FCMP_UEQ: case CmpInst::FCMP_UGT: case CmpInst::FCMP_UGE:
+ case CmpInst::FCMP_ULT: case CmpInst::FCMP_ULE: case CmpInst::FCMP_UNE:
+ case CmpInst::FCMP_TRUE:
+ return getFCmp(Predicate, C1, C2, OnlyIfReduced);
+
+ case CmpInst::ICMP_EQ: case CmpInst::ICMP_NE: case CmpInst::ICMP_UGT:
+ case CmpInst::ICMP_UGE: case CmpInst::ICMP_ULT: case CmpInst::ICMP_ULE:
+ case CmpInst::ICMP_SGT: case CmpInst::ICMP_SGE: case CmpInst::ICMP_SLT:
+ case CmpInst::ICMP_SLE:
+ return getICmp(Predicate, C1, C2, OnlyIfReduced);
+ }
+}
+
+Constant *ConstantExpr::getSelect(Constant *C, Constant *V1, Constant *V2,
+ Type *OnlyIfReducedTy) {
+ assert(!SelectInst::areInvalidOperands(C, V1, V2)&&"Invalid select operands");
+
+ if (Constant *SC = ConstantFoldSelectInstruction(C, V1, V2))
+ return SC; // Fold common cases
+
+ if (OnlyIfReducedTy == V1->getType())
+ return nullptr;
+
+ Constant *ArgVec[] = { C, V1, V2 };
+ ConstantExprKeyType Key(Instruction::Select, ArgVec);
+
+ LLVMContextImpl *pImpl = C->getContext().pImpl;
+ return pImpl->ExprConstants.getOrCreate(V1->getType(), Key);
+}
+
+Constant *ConstantExpr::getGetElementPtr(Type *Ty, Constant *C,
+ ArrayRef<Value *> Idxs, bool InBounds,
+ Type *OnlyIfReducedTy) {
+ if (!Ty)
+ Ty = cast<PointerType>(C->getType()->getScalarType())->getElementType();
+ else
+ assert(
+ Ty ==
+ cast<PointerType>(C->getType()->getScalarType())->getContainedType(0u));
+
+ if (Constant *FC = ConstantFoldGetElementPtr(Ty, C, InBounds, Idxs))
+ return FC; // Fold a few common cases.
+
+ // Get the result type of the getelementptr!
+ Type *DestTy = GetElementPtrInst::getIndexedType(Ty, Idxs);
+ assert(DestTy && "GEP indices invalid!");
+ unsigned AS = C->getType()->getPointerAddressSpace();
+ Type *ReqTy = DestTy->getPointerTo(AS);
+ if (VectorType *VecTy = dyn_cast<VectorType>(C->getType()))
+ ReqTy = VectorType::get(ReqTy, VecTy->getNumElements());
+
+ if (OnlyIfReducedTy == ReqTy)
+ return nullptr;
+
+ // Look up the constant in the table first to ensure uniqueness
+ std::vector<Constant*> ArgVec;
+ ArgVec.reserve(1 + Idxs.size());
+ ArgVec.push_back(C);
+ for (unsigned i = 0, e = Idxs.size(); i != e; ++i) {
+ assert(Idxs[i]->getType()->isVectorTy() == ReqTy->isVectorTy() &&
+ "getelementptr index type missmatch");
+ assert((!Idxs[i]->getType()->isVectorTy() ||
+ ReqTy->getVectorNumElements() ==
+ Idxs[i]->getType()->getVectorNumElements()) &&
+ "getelementptr index type missmatch");
+ ArgVec.push_back(cast<Constant>(Idxs[i]));
+ }
+ const ConstantExprKeyType Key(Instruction::GetElementPtr, ArgVec, 0,
+ InBounds ? GEPOperator::IsInBounds : 0, None,
+ Ty);
+
+ LLVMContextImpl *pImpl = C->getContext().pImpl;
+ return pImpl->ExprConstants.getOrCreate(ReqTy, Key);
+}
+
+Constant *ConstantExpr::getICmp(unsigned short pred, Constant *LHS,
+ Constant *RHS, bool OnlyIfReduced) {
+ assert(LHS->getType() == RHS->getType());
+ assert(pred >= ICmpInst::FIRST_ICMP_PREDICATE &&
+ pred <= ICmpInst::LAST_ICMP_PREDICATE && "Invalid ICmp Predicate");
+
+ if (Constant *FC = ConstantFoldCompareInstruction(pred, LHS, RHS))
+ return FC; // Fold a few common cases...
+
+ if (OnlyIfReduced)
+ return nullptr;
+
+ // Look up the constant in the table first to ensure uniqueness
+ Constant *ArgVec[] = { LHS, RHS };
+ // Get the key type with both the opcode and predicate
+ const ConstantExprKeyType Key(Instruction::ICmp, ArgVec, pred);
+
+ Type *ResultTy = Type::getInt1Ty(LHS->getContext());
+ if (VectorType *VT = dyn_cast<VectorType>(LHS->getType()))
+ ResultTy = VectorType::get(ResultTy, VT->getNumElements());
+
+ LLVMContextImpl *pImpl = LHS->getType()->getContext().pImpl;
+ return pImpl->ExprConstants.getOrCreate(ResultTy, Key);
+}
+
+Constant *ConstantExpr::getFCmp(unsigned short pred, Constant *LHS,
+ Constant *RHS, bool OnlyIfReduced) {
+ assert(LHS->getType() == RHS->getType());
+ assert(pred <= FCmpInst::LAST_FCMP_PREDICATE && "Invalid FCmp Predicate");
+
+ if (Constant *FC = ConstantFoldCompareInstruction(pred, LHS, RHS))
+ return FC; // Fold a few common cases...
+
+ if (OnlyIfReduced)
+ return nullptr;
+
+ // Look up the constant in the table first to ensure uniqueness
+ Constant *ArgVec[] = { LHS, RHS };
+ // Get the key type with both the opcode and predicate
+ const ConstantExprKeyType Key(Instruction::FCmp, ArgVec, pred);
+
+ Type *ResultTy = Type::getInt1Ty(LHS->getContext());
+ if (VectorType *VT = dyn_cast<VectorType>(LHS->getType()))
+ ResultTy = VectorType::get(ResultTy, VT->getNumElements());
+
+ LLVMContextImpl *pImpl = LHS->getType()->getContext().pImpl;
+ return pImpl->ExprConstants.getOrCreate(ResultTy, Key);
+}
+
+Constant *ConstantExpr::getExtractElement(Constant *Val, Constant *Idx,
+ Type *OnlyIfReducedTy) {
+ assert(Val->getType()->isVectorTy() &&
+ "Tried to create extractelement operation on non-vector type!");
+ assert(Idx->getType()->isIntegerTy() &&
+ "Extractelement index must be an integer type!");
+
+ if (Constant *FC = ConstantFoldExtractElementInstruction(Val, Idx))
+ return FC; // Fold a few common cases.
+
+ Type *ReqTy = Val->getType()->getVectorElementType();
+ if (OnlyIfReducedTy == ReqTy)
+ return nullptr;
+
+ // Look up the constant in the table first to ensure uniqueness
+ Constant *ArgVec[] = { Val, Idx };
+ const ConstantExprKeyType Key(Instruction::ExtractElement, ArgVec);
+
+ LLVMContextImpl *pImpl = Val->getContext().pImpl;
+ return pImpl->ExprConstants.getOrCreate(ReqTy, Key);
+}
+
+Constant *ConstantExpr::getInsertElement(Constant *Val, Constant *Elt,
+ Constant *Idx, Type *OnlyIfReducedTy) {
+ assert(Val->getType()->isVectorTy() &&
+ "Tried to create insertelement operation on non-vector type!");
+ assert(Elt->getType() == Val->getType()->getVectorElementType() &&
+ "Insertelement types must match!");
+ assert(Idx->getType()->isIntegerTy() &&
+ "Insertelement index must be i32 type!");
+
+ if (Constant *FC = ConstantFoldInsertElementInstruction(Val, Elt, Idx))
+ return FC; // Fold a few common cases.
+
+ if (OnlyIfReducedTy == Val->getType())
+ return nullptr;
+
+ // Look up the constant in the table first to ensure uniqueness
+ Constant *ArgVec[] = { Val, Elt, Idx };
+ const ConstantExprKeyType Key(Instruction::InsertElement, ArgVec);
+
+ LLVMContextImpl *pImpl = Val->getContext().pImpl;
+ return pImpl->ExprConstants.getOrCreate(Val->getType(), Key);
+}
+
+Constant *ConstantExpr::getShuffleVector(Constant *V1, Constant *V2,
+ Constant *Mask, Type *OnlyIfReducedTy) {
+ assert(ShuffleVectorInst::isValidOperands(V1, V2, Mask) &&
+ "Invalid shuffle vector constant expr operands!");
+
+ if (Constant *FC = ConstantFoldShuffleVectorInstruction(V1, V2, Mask))
+ return FC; // Fold a few common cases.
+
+ unsigned NElts = Mask->getType()->getVectorNumElements();
+ Type *EltTy = V1->getType()->getVectorElementType();
+ Type *ShufTy = VectorType::get(EltTy, NElts);
+
+ if (OnlyIfReducedTy == ShufTy)
+ return nullptr;
+
+ // Look up the constant in the table first to ensure uniqueness
+ Constant *ArgVec[] = { V1, V2, Mask };
+ const ConstantExprKeyType Key(Instruction::ShuffleVector, ArgVec);
+
+ LLVMContextImpl *pImpl = ShufTy->getContext().pImpl;
+ return pImpl->ExprConstants.getOrCreate(ShufTy, Key);
+}
+
+Constant *ConstantExpr::getInsertValue(Constant *Agg, Constant *Val,
+ ArrayRef<unsigned> Idxs,
+ Type *OnlyIfReducedTy) {
+ assert(Agg->getType()->isFirstClassType() &&
+ "Non-first-class type for constant insertvalue expression");
+
+ assert(ExtractValueInst::getIndexedType(Agg->getType(),
+ Idxs) == Val->getType() &&
+ "insertvalue indices invalid!");
+ Type *ReqTy = Val->getType();
+
+ if (Constant *FC = ConstantFoldInsertValueInstruction(Agg, Val, Idxs))
+ return FC;
+
+ if (OnlyIfReducedTy == ReqTy)
+ return nullptr;
+
+ Constant *ArgVec[] = { Agg, Val };
+ const ConstantExprKeyType Key(Instruction::InsertValue, ArgVec, 0, 0, Idxs);
+
+ LLVMContextImpl *pImpl = Agg->getContext().pImpl;
+ return pImpl->ExprConstants.getOrCreate(ReqTy, Key);
+}
+
+Constant *ConstantExpr::getExtractValue(Constant *Agg, ArrayRef<unsigned> Idxs,
+ Type *OnlyIfReducedTy) {
+ assert(Agg->getType()->isFirstClassType() &&
+ "Tried to create extractelement operation on non-first-class type!");
+
+ Type *ReqTy = ExtractValueInst::getIndexedType(Agg->getType(), Idxs);
+ (void)ReqTy;
+ assert(ReqTy && "extractvalue indices invalid!");
+
+ assert(Agg->getType()->isFirstClassType() &&
+ "Non-first-class type for constant extractvalue expression");
+ if (Constant *FC = ConstantFoldExtractValueInstruction(Agg, Idxs))
+ return FC;
+
+ if (OnlyIfReducedTy == ReqTy)
+ return nullptr;
+
+ Constant *ArgVec[] = { Agg };
+ const ConstantExprKeyType Key(Instruction::ExtractValue, ArgVec, 0, 0, Idxs);
+
+ LLVMContextImpl *pImpl = Agg->getContext().pImpl;
+ return pImpl->ExprConstants.getOrCreate(ReqTy, Key);
+}
+
+Constant *ConstantExpr::getNeg(Constant *C, bool HasNUW, bool HasNSW) {
+ assert(C->getType()->isIntOrIntVectorTy() &&
+ "Cannot NEG a nonintegral value!");
+ return getSub(ConstantFP::getZeroValueForNegation(C->getType()),
+ C, HasNUW, HasNSW);
+}
+
+Constant *ConstantExpr::getFNeg(Constant *C) {
+ assert(C->getType()->isFPOrFPVectorTy() &&
+ "Cannot FNEG a non-floating-point value!");
+ return getFSub(ConstantFP::getZeroValueForNegation(C->getType()), C);
+}
+
+Constant *ConstantExpr::getNot(Constant *C) {
+ assert(C->getType()->isIntOrIntVectorTy() &&
+ "Cannot NOT a nonintegral value!");
+ return get(Instruction::Xor, C, Constant::getAllOnesValue(C->getType()));
+}
+
+Constant *ConstantExpr::getAdd(Constant *C1, Constant *C2,
+ bool HasNUW, bool HasNSW) {
+ unsigned Flags = (HasNUW ? OverflowingBinaryOperator::NoUnsignedWrap : 0) |
+ (HasNSW ? OverflowingBinaryOperator::NoSignedWrap : 0);
+ return get(Instruction::Add, C1, C2, Flags);
+}
+
+Constant *ConstantExpr::getFAdd(Constant *C1, Constant *C2) {
+ return get(Instruction::FAdd, C1, C2);
+}
+
+Constant *ConstantExpr::getSub(Constant *C1, Constant *C2,
+ bool HasNUW, bool HasNSW) {
+ unsigned Flags = (HasNUW ? OverflowingBinaryOperator::NoUnsignedWrap : 0) |
+ (HasNSW ? OverflowingBinaryOperator::NoSignedWrap : 0);
+ return get(Instruction::Sub, C1, C2, Flags);
+}
+
+Constant *ConstantExpr::getFSub(Constant *C1, Constant *C2) {
+ return get(Instruction::FSub, C1, C2);
+}
+
+Constant *ConstantExpr::getMul(Constant *C1, Constant *C2,
+ bool HasNUW, bool HasNSW) {
+ unsigned Flags = (HasNUW ? OverflowingBinaryOperator::NoUnsignedWrap : 0) |
+ (HasNSW ? OverflowingBinaryOperator::NoSignedWrap : 0);
+ return get(Instruction::Mul, C1, C2, Flags);
+}
+
+Constant *ConstantExpr::getFMul(Constant *C1, Constant *C2) {
+ return get(Instruction::FMul, C1, C2);
+}
+
+Constant *ConstantExpr::getUDiv(Constant *C1, Constant *C2, bool isExact) {
+ return get(Instruction::UDiv, C1, C2,
+ isExact ? PossiblyExactOperator::IsExact : 0);
+}
+
+Constant *ConstantExpr::getSDiv(Constant *C1, Constant *C2, bool isExact) {
+ return get(Instruction::SDiv, C1, C2,
+ isExact ? PossiblyExactOperator::IsExact : 0);
+}
+
+Constant *ConstantExpr::getFDiv(Constant *C1, Constant *C2) {
+ return get(Instruction::FDiv, C1, C2);
+}
+
+Constant *ConstantExpr::getURem(Constant *C1, Constant *C2) {
+ return get(Instruction::URem, C1, C2);
+}
+
+Constant *ConstantExpr::getSRem(Constant *C1, Constant *C2) {
+ return get(Instruction::SRem, C1, C2);
+}
+
+Constant *ConstantExpr::getFRem(Constant *C1, Constant *C2) {
+ return get(Instruction::FRem, C1, C2);
+}
+
+Constant *ConstantExpr::getAnd(Constant *C1, Constant *C2) {
+ return get(Instruction::And, C1, C2);
+}
+
+Constant *ConstantExpr::getOr(Constant *C1, Constant *C2) {
+ return get(Instruction::Or, C1, C2);
+}
+
+Constant *ConstantExpr::getXor(Constant *C1, Constant *C2) {
+ return get(Instruction::Xor, C1, C2);
+}
+
+Constant *ConstantExpr::getShl(Constant *C1, Constant *C2,
+ bool HasNUW, bool HasNSW) {
+ unsigned Flags = (HasNUW ? OverflowingBinaryOperator::NoUnsignedWrap : 0) |
+ (HasNSW ? OverflowingBinaryOperator::NoSignedWrap : 0);
+ return get(Instruction::Shl, C1, C2, Flags);
+}
+
+Constant *ConstantExpr::getLShr(Constant *C1, Constant *C2, bool isExact) {
+ return get(Instruction::LShr, C1, C2,
+ isExact ? PossiblyExactOperator::IsExact : 0);
+}
+
+Constant *ConstantExpr::getAShr(Constant *C1, Constant *C2, bool isExact) {
+ return get(Instruction::AShr, C1, C2,
+ isExact ? PossiblyExactOperator::IsExact : 0);
+}
+
+/// getBinOpIdentity - Return the identity for the given binary operation,
+/// i.e. a constant C such that X op C = X and C op X = X for every X. It
+/// returns null if the operator doesn't have an identity.
+Constant *ConstantExpr::getBinOpIdentity(unsigned Opcode, Type *Ty) {
+ switch (Opcode) {
+ default:
+ // Doesn't have an identity.
+ return nullptr;
+
+ case Instruction::Add:
+ case Instruction::Or:
+ case Instruction::Xor:
+ return Constant::getNullValue(Ty);
+
+ case Instruction::Mul:
+ return ConstantInt::get(Ty, 1);
+
+ case Instruction::And:
+ return Constant::getAllOnesValue(Ty);
+ }
+}
+
+/// getBinOpAbsorber - Return the absorbing element for the given binary
+/// operation, i.e. a constant C such that X op C = C and C op X = C for
+/// every X. For example, this returns zero for integer multiplication.
+/// It returns null if the operator doesn't have an absorbing element.
+Constant *ConstantExpr::getBinOpAbsorber(unsigned Opcode, Type *Ty) {
+ switch (Opcode) {
+ default:
+ // Doesn't have an absorber.
+ return nullptr;
+
+ case Instruction::Or:
+ return Constant::getAllOnesValue(Ty);
+
+ case Instruction::And:
+ case Instruction::Mul:
+ return Constant::getNullValue(Ty);
+ }
+}
+
+// destroyConstant - Remove the constant from the constant table...
+//
+void ConstantExpr::destroyConstantImpl() {
+ getType()->getContext().pImpl->ExprConstants.remove(this);
+}
+
+const char *ConstantExpr::getOpcodeName() const {
+ return Instruction::getOpcodeName(getOpcode());
+}
+
+GetElementPtrConstantExpr::GetElementPtrConstantExpr(
+ Type *SrcElementTy, Constant *C, ArrayRef<Constant *> IdxList, Type *DestTy)
+ : ConstantExpr(DestTy, Instruction::GetElementPtr,
+ OperandTraits<GetElementPtrConstantExpr>::op_end(this) -
+ (IdxList.size() + 1),
+ IdxList.size() + 1),
+ SrcElementTy(SrcElementTy) {
+ Op<0>() = C;
+ Use *OperandList = getOperandList();
+ for (unsigned i = 0, E = IdxList.size(); i != E; ++i)
+ OperandList[i+1] = IdxList[i];
+}
+
+Type *GetElementPtrConstantExpr::getSourceElementType() const {
+ return SrcElementTy;
+}
+
+//===----------------------------------------------------------------------===//
+// ConstantData* implementations
+
+void ConstantDataArray::anchor() {}
+void ConstantDataVector::anchor() {}
+
+/// getElementType - Return the element type of the array/vector.
+Type *ConstantDataSequential::getElementType() const {
+ return getType()->getElementType();
+}
+
+StringRef ConstantDataSequential::getRawDataValues() const {
+ return StringRef(DataElements, getNumElements()*getElementByteSize());
+}
+
+/// isElementTypeCompatible - Return true if a ConstantDataSequential can be
+/// formed with a vector or array of the specified element type.
+/// ConstantDataArray only works with normal float and int types that are
+/// stored densely in memory, not with things like i42 or x86_f80.
+bool ConstantDataSequential::isElementTypeCompatible(Type *Ty) {
+ if (Ty->isHalfTy() || Ty->isFloatTy() || Ty->isDoubleTy()) return true;
+ if (auto *IT = dyn_cast<IntegerType>(Ty)) {
+ switch (IT->getBitWidth()) {
+ case 8:
+ case 16:
+ case 32:
+ case 64:
+ return true;
+ default: break;
+ }
+ }
+ return false;
+}
+
+/// getNumElements - Return the number of elements in the array or vector.
+unsigned ConstantDataSequential::getNumElements() const {
+ if (ArrayType *AT = dyn_cast<ArrayType>(getType()))
+ return AT->getNumElements();
+ return getType()->getVectorNumElements();
+}
+
+
+/// getElementByteSize - Return the size in bytes of the elements in the data.
+uint64_t ConstantDataSequential::getElementByteSize() const {
+ return getElementType()->getPrimitiveSizeInBits()/8;
+}
+
+/// getElementPointer - Return the start of the specified element.
+const char *ConstantDataSequential::getElementPointer(unsigned Elt) const {
+ assert(Elt < getNumElements() && "Invalid Elt");
+ return DataElements+Elt*getElementByteSize();
+}
+
+
+/// isAllZeros - return true if the array is empty or all zeros.
+static bool isAllZeros(StringRef Arr) {
+ for (StringRef::iterator I = Arr.begin(), E = Arr.end(); I != E; ++I)
+ if (*I != 0)
+ return false;
+ return true;
+}
+
+/// getImpl - This is the underlying implementation of all of the
+/// ConstantDataSequential::get methods. They all thunk down to here, providing
+/// the correct element type. We take the bytes in as a StringRef because
+/// we *want* an underlying "char*" to avoid TBAA type punning violations.
+Constant *ConstantDataSequential::getImpl(StringRef Elements, Type *Ty) {
+ assert(isElementTypeCompatible(Ty->getSequentialElementType()));
+ // If the elements are all zero or there are no elements, return a CAZ, which
+ // is more dense and canonical.
+ if (isAllZeros(Elements))
+ return ConstantAggregateZero::get(Ty);
+
+ // Do a lookup to see if we have already formed one of these.
+ auto &Slot =
+ *Ty->getContext()
+ .pImpl->CDSConstants.insert(std::make_pair(Elements, nullptr))
+ .first;
+
+ // The bucket can point to a linked list of different CDS's that have the same
+ // body but different types. For example, 0,0,0,1 could be a 4 element array
+ // of i8, or a 1-element array of i32. They'll both end up in the same
+ /// StringMap bucket, linked up by their Next pointers. Walk the list.
+ ConstantDataSequential **Entry = &Slot.second;
+ for (ConstantDataSequential *Node = *Entry; Node;
+ Entry = &Node->Next, Node = *Entry)
+ if (Node->getType() == Ty)
+ return Node;
+
+ // Okay, we didn't get a hit. Create a node of the right class, link it in,
+ // and return it.
+ if (isa<ArrayType>(Ty))
+ return *Entry = new ConstantDataArray(Ty, Slot.first().data());
+
+ assert(isa<VectorType>(Ty));
+ return *Entry = new ConstantDataVector(Ty, Slot.first().data());
+}
+
+void ConstantDataSequential::destroyConstantImpl() {
+ // Remove the constant from the StringMap.
+ StringMap<ConstantDataSequential*> &CDSConstants =
+ getType()->getContext().pImpl->CDSConstants;
+
+ StringMap<ConstantDataSequential*>::iterator Slot =
+ CDSConstants.find(getRawDataValues());
+
+ assert(Slot != CDSConstants.end() && "CDS not found in uniquing table");
+
+ ConstantDataSequential **Entry = &Slot->getValue();
+
+ // Remove the entry from the hash table.
+ if (!(*Entry)->Next) {
+ // If there is only one value in the bucket (common case) it must be this
+ // entry, and removing the entry should remove the bucket completely.
+ assert((*Entry) == this && "Hash mismatch in ConstantDataSequential");
+ getContext().pImpl->CDSConstants.erase(Slot);
+ } else {
+ // Otherwise, there are multiple entries linked off the bucket, unlink the
+ // node we care about but keep the bucket around.
+ for (ConstantDataSequential *Node = *Entry; ;
+ Entry = &Node->Next, Node = *Entry) {
+ assert(Node && "Didn't find entry in its uniquing hash table!");
+ // If we found our entry, unlink it from the list and we're done.
+ if (Node == this) {
+ *Entry = Node->Next;
+ break;
+ }
+ }
+ }
+
+ // If we were part of a list, make sure that we don't delete the list that is
+ // still owned by the uniquing map.
+ Next = nullptr;
+}
+
+/// get() constructors - Return a constant with array type with an element
+/// count and element type matching the ArrayRef passed in. Note that this
+/// can return a ConstantAggregateZero object.
+Constant *ConstantDataArray::get(LLVMContext &Context, ArrayRef<uint8_t> Elts) {
+ Type *Ty = ArrayType::get(Type::getInt8Ty(Context), Elts.size());
+ const char *Data = reinterpret_cast<const char *>(Elts.data());
+ return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*1), Ty);
+}
+Constant *ConstantDataArray::get(LLVMContext &Context, ArrayRef<uint16_t> Elts){
+ Type *Ty = ArrayType::get(Type::getInt16Ty(Context), Elts.size());
+ const char *Data = reinterpret_cast<const char *>(Elts.data());
+ return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*2), Ty);
+}
+Constant *ConstantDataArray::get(LLVMContext &Context, ArrayRef<uint32_t> Elts){
+ Type *Ty = ArrayType::get(Type::getInt32Ty(Context), Elts.size());
+ const char *Data = reinterpret_cast<const char *>(Elts.data());
+ return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*4), Ty);
+}
+Constant *ConstantDataArray::get(LLVMContext &Context, ArrayRef<uint64_t> Elts){
+ Type *Ty = ArrayType::get(Type::getInt64Ty(Context), Elts.size());
+ const char *Data = reinterpret_cast<const char *>(Elts.data());
+ return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*8), Ty);
+}
+Constant *ConstantDataArray::get(LLVMContext &Context, ArrayRef<float> Elts) {
+ Type *Ty = ArrayType::get(Type::getFloatTy(Context), Elts.size());
+ const char *Data = reinterpret_cast<const char *>(Elts.data());
+ return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*4), Ty);
+}
+Constant *ConstantDataArray::get(LLVMContext &Context, ArrayRef<double> Elts) {
+ Type *Ty = ArrayType::get(Type::getDoubleTy(Context), Elts.size());
+ const char *Data = reinterpret_cast<const char *>(Elts.data());
+ return getImpl(StringRef(const_cast<char *>(Data), Elts.size() * 8), Ty);
+}
+
+/// getFP() constructors - Return a constant with array type with an element
+/// count and element type of float with precision matching the number of
+/// bits in the ArrayRef passed in. (i.e. half for 16bits, float for 32bits,
+/// double for 64bits) Note that this can return a ConstantAggregateZero
+/// object.
+Constant *ConstantDataArray::getFP(LLVMContext &Context,
+ ArrayRef<uint16_t> Elts) {
+ Type *Ty = ArrayType::get(Type::getHalfTy(Context), Elts.size());
+ const char *Data = reinterpret_cast<const char *>(Elts.data());
+ return getImpl(StringRef(const_cast<char *>(Data), Elts.size() * 2), Ty);
+}
+Constant *ConstantDataArray::getFP(LLVMContext &Context,
+ ArrayRef<uint32_t> Elts) {
+ Type *Ty = ArrayType::get(Type::getFloatTy(Context), Elts.size());
+ const char *Data = reinterpret_cast<const char *>(Elts.data());
+ return getImpl(StringRef(const_cast<char *>(Data), Elts.size() * 4), Ty);
+}
+Constant *ConstantDataArray::getFP(LLVMContext &Context,
+ ArrayRef<uint64_t> Elts) {
+ Type *Ty = ArrayType::get(Type::getDoubleTy(Context), Elts.size());
+ const char *Data = reinterpret_cast<const char *>(Elts.data());
+ return getImpl(StringRef(const_cast<char *>(Data), Elts.size() * 8), Ty);
+}
+
+/// getString - This method constructs a CDS and initializes it with a text
+/// string. The default behavior (AddNull==true) causes a null terminator to
+/// be placed at the end of the array (increasing the length of the string by
+/// one more than the StringRef would normally indicate. Pass AddNull=false
+/// to disable this behavior.
+Constant *ConstantDataArray::getString(LLVMContext &Context,
+ StringRef Str, bool AddNull) {
+ if (!AddNull) {
+ const uint8_t *Data = reinterpret_cast<const uint8_t *>(Str.data());
+ return get(Context, makeArrayRef(const_cast<uint8_t *>(Data),
+ Str.size()));
+ }
+
+ SmallVector<uint8_t, 64> ElementVals;
+ ElementVals.append(Str.begin(), Str.end());
+ ElementVals.push_back(0);
+ return get(Context, ElementVals);
+}
+
+/// get() constructors - Return a constant with vector type with an element
+/// count and element type matching the ArrayRef passed in. Note that this
+/// can return a ConstantAggregateZero object.
+Constant *ConstantDataVector::get(LLVMContext &Context, ArrayRef<uint8_t> Elts){
+ Type *Ty = VectorType::get(Type::getInt8Ty(Context), Elts.size());
+ const char *Data = reinterpret_cast<const char *>(Elts.data());
+ return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*1), Ty);
+}
+Constant *ConstantDataVector::get(LLVMContext &Context, ArrayRef<uint16_t> Elts){
+ Type *Ty = VectorType::get(Type::getInt16Ty(Context), Elts.size());
+ const char *Data = reinterpret_cast<const char *>(Elts.data());
+ return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*2), Ty);
+}
+Constant *ConstantDataVector::get(LLVMContext &Context, ArrayRef<uint32_t> Elts){
+ Type *Ty = VectorType::get(Type::getInt32Ty(Context), Elts.size());
+ const char *Data = reinterpret_cast<const char *>(Elts.data());
+ return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*4), Ty);
+}
+Constant *ConstantDataVector::get(LLVMContext &Context, ArrayRef<uint64_t> Elts){
+ Type *Ty = VectorType::get(Type::getInt64Ty(Context), Elts.size());
+ const char *Data = reinterpret_cast<const char *>(Elts.data());
+ return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*8), Ty);
+}
+Constant *ConstantDataVector::get(LLVMContext &Context, ArrayRef<float> Elts) {
+ Type *Ty = VectorType::get(Type::getFloatTy(Context), Elts.size());
+ const char *Data = reinterpret_cast<const char *>(Elts.data());
+ return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*4), Ty);
+}
+Constant *ConstantDataVector::get(LLVMContext &Context, ArrayRef<double> Elts) {
+ Type *Ty = VectorType::get(Type::getDoubleTy(Context), Elts.size());
+ const char *Data = reinterpret_cast<const char *>(Elts.data());
+ return getImpl(StringRef(const_cast<char *>(Data), Elts.size() * 8), Ty);
+}
+
+/// getFP() constructors - Return a constant with vector type with an element
+/// count and element type of float with the precision matching the number of
+/// bits in the ArrayRef passed in. (i.e. half for 16bits, float for 32bits,
+/// double for 64bits) Note that this can return a ConstantAggregateZero
+/// object.
+Constant *ConstantDataVector::getFP(LLVMContext &Context,
+ ArrayRef<uint16_t> Elts) {
+ Type *Ty = VectorType::get(Type::getHalfTy(Context), Elts.size());
+ const char *Data = reinterpret_cast<const char *>(Elts.data());
+ return getImpl(StringRef(const_cast<char *>(Data), Elts.size() * 2), Ty);
+}
+Constant *ConstantDataVector::getFP(LLVMContext &Context,
+ ArrayRef<uint32_t> Elts) {
+ Type *Ty = VectorType::get(Type::getFloatTy(Context), Elts.size());
+ const char *Data = reinterpret_cast<const char *>(Elts.data());
+ return getImpl(StringRef(const_cast<char *>(Data), Elts.size() * 4), Ty);
+}
+Constant *ConstantDataVector::getFP(LLVMContext &Context,
+ ArrayRef<uint64_t> Elts) {
+ Type *Ty = VectorType::get(Type::getDoubleTy(Context), Elts.size());
+ const char *Data = reinterpret_cast<const char *>(Elts.data());
+ return getImpl(StringRef(const_cast<char *>(Data), Elts.size() * 8), Ty);
+}
+
+Constant *ConstantDataVector::getSplat(unsigned NumElts, Constant *V) {
+ assert(isElementTypeCompatible(V->getType()) &&
+ "Element type not compatible with ConstantData");
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
+ if (CI->getType()->isIntegerTy(8)) {
+ SmallVector<uint8_t, 16> Elts(NumElts, CI->getZExtValue());
+ return get(V->getContext(), Elts);
+ }
+ if (CI->getType()->isIntegerTy(16)) {
+ SmallVector<uint16_t, 16> Elts(NumElts, CI->getZExtValue());
+ return get(V->getContext(), Elts);
+ }
+ if (CI->getType()->isIntegerTy(32)) {
+ SmallVector<uint32_t, 16> Elts(NumElts, CI->getZExtValue());
+ return get(V->getContext(), Elts);
+ }
+ assert(CI->getType()->isIntegerTy(64) && "Unsupported ConstantData type");
+ SmallVector<uint64_t, 16> Elts(NumElts, CI->getZExtValue());
+ return get(V->getContext(), Elts);
+ }
+
+ if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
+ if (CFP->getType()->isHalfTy()) {
+ SmallVector<uint16_t, 16> Elts(
+ NumElts, CFP->getValueAPF().bitcastToAPInt().getLimitedValue());
+ return getFP(V->getContext(), Elts);
+ }
+ if (CFP->getType()->isFloatTy()) {
+ SmallVector<uint32_t, 16> Elts(
+ NumElts, CFP->getValueAPF().bitcastToAPInt().getLimitedValue());
+ return getFP(V->getContext(), Elts);
+ }
+ if (CFP->getType()->isDoubleTy()) {
+ SmallVector<uint64_t, 16> Elts(
+ NumElts, CFP->getValueAPF().bitcastToAPInt().getLimitedValue());
+ return getFP(V->getContext(), Elts);
+ }
+ }
+ return ConstantVector::getSplat(NumElts, V);
+}
+
+
+/// getElementAsInteger - If this is a sequential container of integers (of
+/// any size), return the specified element in the low bits of a uint64_t.
+uint64_t ConstantDataSequential::getElementAsInteger(unsigned Elt) const {
+ assert(isa<IntegerType>(getElementType()) &&
+ "Accessor can only be used when element is an integer");
+ const char *EltPtr = getElementPointer(Elt);
+
+ // The data is stored in host byte order, make sure to cast back to the right
+ // type to load with the right endianness.
+ switch (getElementType()->getIntegerBitWidth()) {
+ default: llvm_unreachable("Invalid bitwidth for CDS");
+ case 8:
+ return *const_cast<uint8_t *>(reinterpret_cast<const uint8_t *>(EltPtr));
+ case 16:
+ return *const_cast<uint16_t *>(reinterpret_cast<const uint16_t *>(EltPtr));
+ case 32:
+ return *const_cast<uint32_t *>(reinterpret_cast<const uint32_t *>(EltPtr));
+ case 64:
+ return *const_cast<uint64_t *>(reinterpret_cast<const uint64_t *>(EltPtr));
+ }
+}
+
+/// getElementAsAPFloat - If this is a sequential container of floating point
+/// type, return the specified element as an APFloat.
+APFloat ConstantDataSequential::getElementAsAPFloat(unsigned Elt) const {
+ const char *EltPtr = getElementPointer(Elt);
+
+ switch (getElementType()->getTypeID()) {
+ default:
+ llvm_unreachable("Accessor can only be used when element is float/double!");
+ case Type::HalfTyID: {
+ auto EltVal = *reinterpret_cast<const uint16_t *>(EltPtr);
+ return APFloat(APFloat::IEEEhalf, APInt(16, EltVal));
+ }
+ case Type::FloatTyID: {
+ auto EltVal = *reinterpret_cast<const uint32_t *>(EltPtr);
+ return APFloat(APFloat::IEEEsingle, APInt(32, EltVal));
+ }
+ case Type::DoubleTyID: {
+ auto EltVal = *reinterpret_cast<const uint64_t *>(EltPtr);
+ return APFloat(APFloat::IEEEdouble, APInt(64, EltVal));
+ }
+ }
+}
+
+/// getElementAsFloat - If this is an sequential container of floats, return
+/// the specified element as a float.
+float ConstantDataSequential::getElementAsFloat(unsigned Elt) const {
+ assert(getElementType()->isFloatTy() &&
+ "Accessor can only be used when element is a 'float'");
+ const float *EltPtr = reinterpret_cast<const float *>(getElementPointer(Elt));
+ return *const_cast<float *>(EltPtr);
+}
+
+/// getElementAsDouble - If this is an sequential container of doubles, return
+/// the specified element as a float.
+double ConstantDataSequential::getElementAsDouble(unsigned Elt) const {
+ assert(getElementType()->isDoubleTy() &&
+ "Accessor can only be used when element is a 'float'");
+ const double *EltPtr =
+ reinterpret_cast<const double *>(getElementPointer(Elt));
+ return *const_cast<double *>(EltPtr);
+}
+
+/// getElementAsConstant - Return a Constant for a specified index's element.
+/// Note that this has to compute a new constant to return, so it isn't as
+/// efficient as getElementAsInteger/Float/Double.
+Constant *ConstantDataSequential::getElementAsConstant(unsigned Elt) const {
+ if (getElementType()->isHalfTy() || getElementType()->isFloatTy() ||
+ getElementType()->isDoubleTy())
+ return ConstantFP::get(getContext(), getElementAsAPFloat(Elt));
+
+ return ConstantInt::get(getElementType(), getElementAsInteger(Elt));
+}
+
+/// isString - This method returns true if this is an array of i8.
+bool ConstantDataSequential::isString() const {
+ return isa<ArrayType>(getType()) && getElementType()->isIntegerTy(8);
+}
+
+/// isCString - This method returns true if the array "isString", ends with a
+/// nul byte, and does not contains any other nul bytes.
+bool ConstantDataSequential::isCString() const {
+ if (!isString())
+ return false;
+
+ StringRef Str = getAsString();
+
+ // The last value must be nul.
+ if (Str.back() != 0) return false;
+
+ // Other elements must be non-nul.
+ return Str.drop_back().find(0) == StringRef::npos;
+}
+
+/// getSplatValue - If this is a splat constant, meaning that all of the
+/// elements have the same value, return that value. Otherwise return nullptr.
+Constant *ConstantDataVector::getSplatValue() const {
+ const char *Base = getRawDataValues().data();
+
+ // Compare elements 1+ to the 0'th element.
+ unsigned EltSize = getElementByteSize();
+ for (unsigned i = 1, e = getNumElements(); i != e; ++i)
+ if (memcmp(Base, Base+i*EltSize, EltSize))
+ return nullptr;
+
+ // If they're all the same, return the 0th one as a representative.
+ return getElementAsConstant(0);
+}
+
+//===----------------------------------------------------------------------===//
+// handleOperandChange implementations
+
+/// Update this constant array to change uses of
+/// 'From' to be uses of 'To'. This must update the uniquing data structures
+/// etc.
+///
+/// Note that we intentionally replace all uses of From with To here. Consider
+/// a large array that uses 'From' 1000 times. By handling this case all here,
+/// ConstantArray::handleOperandChange is only invoked once, and that
+/// single invocation handles all 1000 uses. Handling them one at a time would
+/// work, but would be really slow because it would have to unique each updated
+/// array instance.
+///
+void Constant::handleOperandChange(Value *From, Value *To, Use *U) {
+ Value *Replacement = nullptr;
+ switch (getValueID()) {
+ default:
+ llvm_unreachable("Not a constant!");
+#define HANDLE_CONSTANT(Name) \
+ case Value::Name##Val: \
+ Replacement = cast<Name>(this)->handleOperandChangeImpl(From, To, U); \
+ break;
+#include "llvm/IR/Value.def"
+ }
+
+ // If handleOperandChangeImpl returned nullptr, then it handled
+ // replacing itself and we don't want to delete or replace anything else here.
+ if (!Replacement)
+ return;
+
+ // I do need to replace this with an existing value.
+ assert(Replacement != this && "I didn't contain From!");
+
+ // Everyone using this now uses the replacement.
+ replaceAllUsesWith(Replacement);
+
+ // Delete the old constant!
+ destroyConstant();
+}
+
+Value *ConstantInt::handleOperandChangeImpl(Value *From, Value *To, Use *U) {
+ llvm_unreachable("Unsupported class for handleOperandChange()!");
+}
+
+Value *ConstantFP::handleOperandChangeImpl(Value *From, Value *To, Use *U) {
+ llvm_unreachable("Unsupported class for handleOperandChange()!");
+}
+
+Value *ConstantTokenNone::handleOperandChangeImpl(Value *From, Value *To,
+ Use *U) {
+ llvm_unreachable("Unsupported class for handleOperandChange()!");
+}
+
+Value *UndefValue::handleOperandChangeImpl(Value *From, Value *To, Use *U) {
+ llvm_unreachable("Unsupported class for handleOperandChange()!");
+}
+
+Value *ConstantPointerNull::handleOperandChangeImpl(Value *From, Value *To,
+ Use *U) {
+ llvm_unreachable("Unsupported class for handleOperandChange()!");
+}
+
+Value *ConstantAggregateZero::handleOperandChangeImpl(Value *From, Value *To,
+ Use *U) {
+ llvm_unreachable("Unsupported class for handleOperandChange()!");
+}
+
+Value *ConstantDataSequential::handleOperandChangeImpl(Value *From, Value *To,
+ Use *U) {
+ llvm_unreachable("Unsupported class for handleOperandChange()!");
+}
+
+Value *ConstantArray::handleOperandChangeImpl(Value *From, Value *To, Use *U) {
+ assert(isa<Constant>(To) && "Cannot make Constant refer to non-constant!");
+ Constant *ToC = cast<Constant>(To);
+
+ SmallVector<Constant*, 8> Values;
+ Values.reserve(getNumOperands()); // Build replacement array.
+
+ // Fill values with the modified operands of the constant array. Also,
+ // compute whether this turns into an all-zeros array.
+ unsigned NumUpdated = 0;
+
+ // Keep track of whether all the values in the array are "ToC".
+ bool AllSame = true;
+ Use *OperandList = getOperandList();
+ for (Use *O = OperandList, *E = OperandList+getNumOperands(); O != E; ++O) {
+ Constant *Val = cast<Constant>(O->get());
+ if (Val == From) {
+ Val = ToC;
+ ++NumUpdated;
+ }
+ Values.push_back(Val);
+ AllSame &= Val == ToC;
+ }
+
+ if (AllSame && ToC->isNullValue())
+ return ConstantAggregateZero::get(getType());
+
+ if (AllSame && isa<UndefValue>(ToC))
+ return UndefValue::get(getType());
+
+ // Check for any other type of constant-folding.
+ if (Constant *C = getImpl(getType(), Values))
+ return C;
+
+ // Update to the new value.
+ return getContext().pImpl->ArrayConstants.replaceOperandsInPlace(
+ Values, this, From, ToC, NumUpdated, U - OperandList);
+}
+
+Value *ConstantStruct::handleOperandChangeImpl(Value *From, Value *To, Use *U) {
+ assert(isa<Constant>(To) && "Cannot make Constant refer to non-constant!");
+ Constant *ToC = cast<Constant>(To);
+
+ Use *OperandList = getOperandList();
+ unsigned OperandToUpdate = U-OperandList;
+ assert(getOperand(OperandToUpdate) == From && "ReplaceAllUsesWith broken!");
+
+ SmallVector<Constant*, 8> Values;
+ Values.reserve(getNumOperands()); // Build replacement struct.
+
+ // Fill values with the modified operands of the constant struct. Also,
+ // compute whether this turns into an all-zeros struct.
+ bool isAllZeros = false;
+ bool isAllUndef = false;
+ if (ToC->isNullValue()) {
+ isAllZeros = true;
+ for (Use *O = OperandList, *E = OperandList+getNumOperands(); O != E; ++O) {
+ Constant *Val = cast<Constant>(O->get());
+ Values.push_back(Val);
+ if (isAllZeros) isAllZeros = Val->isNullValue();
+ }
+ } else if (isa<UndefValue>(ToC)) {
+ isAllUndef = true;
+ for (Use *O = OperandList, *E = OperandList+getNumOperands(); O != E; ++O) {
+ Constant *Val = cast<Constant>(O->get());
+ Values.push_back(Val);
+ if (isAllUndef) isAllUndef = isa<UndefValue>(Val);
+ }
+ } else {
+ for (Use *O = OperandList, *E = OperandList + getNumOperands(); O != E; ++O)
+ Values.push_back(cast<Constant>(O->get()));
+ }
+ Values[OperandToUpdate] = ToC;
+
+ if (isAllZeros)
+ return ConstantAggregateZero::get(getType());
+
+ if (isAllUndef)
+ return UndefValue::get(getType());
+
+ // Update to the new value.
+ return getContext().pImpl->StructConstants.replaceOperandsInPlace(
+ Values, this, From, ToC);
+}
+
+Value *ConstantVector::handleOperandChangeImpl(Value *From, Value *To, Use *U) {
+ assert(isa<Constant>(To) && "Cannot make Constant refer to non-constant!");
+ Constant *ToC = cast<Constant>(To);
+
+ SmallVector<Constant*, 8> Values;
+ Values.reserve(getNumOperands()); // Build replacement array...
+ unsigned NumUpdated = 0;
+ for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
+ Constant *Val = getOperand(i);
+ if (Val == From) {
+ ++NumUpdated;
+ Val = ToC;
+ }
+ Values.push_back(Val);
+ }
+
+ if (Constant *C = getImpl(Values))
+ return C;
+
+ // Update to the new value.
+ Use *OperandList = getOperandList();
+ return getContext().pImpl->VectorConstants.replaceOperandsInPlace(
+ Values, this, From, ToC, NumUpdated, U - OperandList);
+}
+
+Value *ConstantExpr::handleOperandChangeImpl(Value *From, Value *ToV, Use *U) {
+ assert(isa<Constant>(ToV) && "Cannot make Constant refer to non-constant!");
+ Constant *To = cast<Constant>(ToV);
+
+ SmallVector<Constant*, 8> NewOps;
+ unsigned NumUpdated = 0;
+ for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
+ Constant *Op = getOperand(i);
+ if (Op == From) {
+ ++NumUpdated;
+ Op = To;
+ }
+ NewOps.push_back(Op);
+ }
+ assert(NumUpdated && "I didn't contain From!");
+
+ if (Constant *C = getWithOperands(NewOps, getType(), true))
+ return C;
+
+ // Update to the new value.
+ Use *OperandList = getOperandList();
+ return getContext().pImpl->ExprConstants.replaceOperandsInPlace(
+ NewOps, this, From, To, NumUpdated, U - OperandList);
+}
+
+Instruction *ConstantExpr::getAsInstruction() {
+ SmallVector<Value *, 4> ValueOperands(op_begin(), op_end());
+ ArrayRef<Value*> Ops(ValueOperands);
+
+ switch (getOpcode()) {
+ case Instruction::Trunc:
+ case Instruction::ZExt:
+ case Instruction::SExt:
+ case Instruction::FPTrunc:
+ case Instruction::FPExt:
+ case Instruction::UIToFP:
+ case Instruction::SIToFP:
+ case Instruction::FPToUI:
+ case Instruction::FPToSI:
+ case Instruction::PtrToInt:
+ case Instruction::IntToPtr:
+ case Instruction::BitCast:
+ case Instruction::AddrSpaceCast:
+ return CastInst::Create((Instruction::CastOps)getOpcode(),
+ Ops[0], getType());
+ case Instruction::Select:
+ return SelectInst::Create(Ops[0], Ops[1], Ops[2]);
+ case Instruction::InsertElement:
+ return InsertElementInst::Create(Ops[0], Ops[1], Ops[2]);
+ case Instruction::ExtractElement:
+ return ExtractElementInst::Create(Ops[0], Ops[1]);
+ case Instruction::InsertValue:
+ return InsertValueInst::Create(Ops[0], Ops[1], getIndices());
+ case Instruction::ExtractValue:
+ return ExtractValueInst::Create(Ops[0], getIndices());
+ case Instruction::ShuffleVector:
+ return new ShuffleVectorInst(Ops[0], Ops[1], Ops[2]);
+
+ case Instruction::GetElementPtr: {
+ const auto *GO = cast<GEPOperator>(this);
+ if (GO->isInBounds())
+ return GetElementPtrInst::CreateInBounds(GO->getSourceElementType(),
+ Ops[0], Ops.slice(1));
+ return GetElementPtrInst::Create(GO->getSourceElementType(), Ops[0],
+ Ops.slice(1));
+ }
+ case Instruction::ICmp:
+ case Instruction::FCmp:
+ return CmpInst::Create((Instruction::OtherOps)getOpcode(),
+ (CmpInst::Predicate)getPredicate(), Ops[0], Ops[1]);
+
+ default:
+ assert(getNumOperands() == 2 && "Must be binary operator?");
+ BinaryOperator *BO =
+ BinaryOperator::Create((Instruction::BinaryOps)getOpcode(),
+ Ops[0], Ops[1]);
+ if (isa<OverflowingBinaryOperator>(BO)) {
+ BO->setHasNoUnsignedWrap(SubclassOptionalData &
+ OverflowingBinaryOperator::NoUnsignedWrap);
+ BO->setHasNoSignedWrap(SubclassOptionalData &
+ OverflowingBinaryOperator::NoSignedWrap);
+ }
+ if (isa<PossiblyExactOperator>(BO))
+ BO->setIsExact(SubclassOptionalData & PossiblyExactOperator::IsExact);
+ return BO;
+ }
+}
diff --git a/contrib/llvm/lib/IR/ConstantsContext.h b/contrib/llvm/lib/IR/ConstantsContext.h
new file mode 100644
index 0000000..13fcbd2
--- /dev/null
+++ b/contrib/llvm/lib/IR/ConstantsContext.h
@@ -0,0 +1,671 @@
+//===-- ConstantsContext.h - Constants-related Context Interals -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines various helper methods and classes used by
+// LLVMContextImpl for creating and managing constants.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_IR_CONSTANTSCONTEXT_H
+#define LLVM_LIB_IR_CONSTANTSCONTEXT_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Hashing.h"
+#include "llvm/IR/InlineAsm.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include <map>
+#include <tuple>
+
+#define DEBUG_TYPE "ir"
+
+namespace llvm {
+
+/// UnaryConstantExpr - This class is private to Constants.cpp, and is used
+/// behind the scenes to implement unary constant exprs.
+class UnaryConstantExpr : public ConstantExpr {
+ void anchor() override;
+ void *operator new(size_t, unsigned) = delete;
+public:
+ // allocate space for exactly one operand
+ void *operator new(size_t s) {
+ return User::operator new(s, 1);
+ }
+ UnaryConstantExpr(unsigned Opcode, Constant *C, Type *Ty)
+ : ConstantExpr(Ty, Opcode, &Op<0>(), 1) {
+ Op<0>() = C;
+ }
+ DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+};
+
+/// BinaryConstantExpr - This class is private to Constants.cpp, and is used
+/// behind the scenes to implement binary constant exprs.
+class BinaryConstantExpr : public ConstantExpr {
+ void anchor() override;
+ void *operator new(size_t, unsigned) = delete;
+public:
+ // allocate space for exactly two operands
+ void *operator new(size_t s) {
+ return User::operator new(s, 2);
+ }
+ BinaryConstantExpr(unsigned Opcode, Constant *C1, Constant *C2,
+ unsigned Flags)
+ : ConstantExpr(C1->getType(), Opcode, &Op<0>(), 2) {
+ Op<0>() = C1;
+ Op<1>() = C2;
+ SubclassOptionalData = Flags;
+ }
+ /// Transparently provide more efficient getOperand methods.
+ DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+};
+
+/// SelectConstantExpr - This class is private to Constants.cpp, and is used
+/// behind the scenes to implement select constant exprs.
+class SelectConstantExpr : public ConstantExpr {
+ void anchor() override;
+ void *operator new(size_t, unsigned) = delete;
+public:
+ // allocate space for exactly three operands
+ void *operator new(size_t s) {
+ return User::operator new(s, 3);
+ }
+ SelectConstantExpr(Constant *C1, Constant *C2, Constant *C3)
+ : ConstantExpr(C2->getType(), Instruction::Select, &Op<0>(), 3) {
+ Op<0>() = C1;
+ Op<1>() = C2;
+ Op<2>() = C3;
+ }
+ /// Transparently provide more efficient getOperand methods.
+ DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+};
+
+/// ExtractElementConstantExpr - This class is private to
+/// Constants.cpp, and is used behind the scenes to implement
+/// extractelement constant exprs.
+class ExtractElementConstantExpr : public ConstantExpr {
+ void anchor() override;
+ void *operator new(size_t, unsigned) = delete;
+public:
+ // allocate space for exactly two operands
+ void *operator new(size_t s) {
+ return User::operator new(s, 2);
+ }
+ ExtractElementConstantExpr(Constant *C1, Constant *C2)
+ : ConstantExpr(cast<VectorType>(C1->getType())->getElementType(),
+ Instruction::ExtractElement, &Op<0>(), 2) {
+ Op<0>() = C1;
+ Op<1>() = C2;
+ }
+ /// Transparently provide more efficient getOperand methods.
+ DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+};
+
+/// InsertElementConstantExpr - This class is private to
+/// Constants.cpp, and is used behind the scenes to implement
+/// insertelement constant exprs.
+class InsertElementConstantExpr : public ConstantExpr {
+ void anchor() override;
+ void *operator new(size_t, unsigned) = delete;
+public:
+ // allocate space for exactly three operands
+ void *operator new(size_t s) {
+ return User::operator new(s, 3);
+ }
+ InsertElementConstantExpr(Constant *C1, Constant *C2, Constant *C3)
+ : ConstantExpr(C1->getType(), Instruction::InsertElement,
+ &Op<0>(), 3) {
+ Op<0>() = C1;
+ Op<1>() = C2;
+ Op<2>() = C3;
+ }
+ /// Transparently provide more efficient getOperand methods.
+ DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+};
+
+/// ShuffleVectorConstantExpr - This class is private to
+/// Constants.cpp, and is used behind the scenes to implement
+/// shufflevector constant exprs.
+class ShuffleVectorConstantExpr : public ConstantExpr {
+ void anchor() override;
+ void *operator new(size_t, unsigned) = delete;
+public:
+ // allocate space for exactly three operands
+ void *operator new(size_t s) {
+ return User::operator new(s, 3);
+ }
+ ShuffleVectorConstantExpr(Constant *C1, Constant *C2, Constant *C3)
+ : ConstantExpr(VectorType::get(
+ cast<VectorType>(C1->getType())->getElementType(),
+ cast<VectorType>(C3->getType())->getNumElements()),
+ Instruction::ShuffleVector,
+ &Op<0>(), 3) {
+ Op<0>() = C1;
+ Op<1>() = C2;
+ Op<2>() = C3;
+ }
+ /// Transparently provide more efficient getOperand methods.
+ DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+};
+
+/// ExtractValueConstantExpr - This class is private to
+/// Constants.cpp, and is used behind the scenes to implement
+/// extractvalue constant exprs.
+class ExtractValueConstantExpr : public ConstantExpr {
+ void anchor() override;
+ void *operator new(size_t, unsigned) = delete;
+public:
+ // allocate space for exactly one operand
+ void *operator new(size_t s) {
+ return User::operator new(s, 1);
+ }
+ ExtractValueConstantExpr(Constant *Agg, ArrayRef<unsigned> IdxList,
+ Type *DestTy)
+ : ConstantExpr(DestTy, Instruction::ExtractValue, &Op<0>(), 1),
+ Indices(IdxList.begin(), IdxList.end()) {
+ Op<0>() = Agg;
+ }
+
+ /// Indices - These identify which value to extract.
+ const SmallVector<unsigned, 4> Indices;
+
+ /// Transparently provide more efficient getOperand methods.
+ DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+ static bool classof(const ConstantExpr *CE) {
+ return CE->getOpcode() == Instruction::ExtractValue;
+ }
+ static bool classof(const Value *V) {
+ return isa<ConstantExpr>(V) && classof(cast<ConstantExpr>(V));
+ }
+};
+
+/// InsertValueConstantExpr - This class is private to
+/// Constants.cpp, and is used behind the scenes to implement
+/// insertvalue constant exprs.
+class InsertValueConstantExpr : public ConstantExpr {
+ void anchor() override;
+ void *operator new(size_t, unsigned) = delete;
+public:
+ // allocate space for exactly one operand
+ void *operator new(size_t s) {
+ return User::operator new(s, 2);
+ }
+ InsertValueConstantExpr(Constant *Agg, Constant *Val,
+ ArrayRef<unsigned> IdxList, Type *DestTy)
+ : ConstantExpr(DestTy, Instruction::InsertValue, &Op<0>(), 2),
+ Indices(IdxList.begin(), IdxList.end()) {
+ Op<0>() = Agg;
+ Op<1>() = Val;
+ }
+
+ /// Indices - These identify the position for the insertion.
+ const SmallVector<unsigned, 4> Indices;
+
+ /// Transparently provide more efficient getOperand methods.
+ DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+ static bool classof(const ConstantExpr *CE) {
+ return CE->getOpcode() == Instruction::InsertValue;
+ }
+ static bool classof(const Value *V) {
+ return isa<ConstantExpr>(V) && classof(cast<ConstantExpr>(V));
+ }
+};
+
+/// GetElementPtrConstantExpr - This class is private to Constants.cpp, and is
+/// used behind the scenes to implement getelementpr constant exprs.
+class GetElementPtrConstantExpr : public ConstantExpr {
+ Type *SrcElementTy;
+ void anchor() override;
+ GetElementPtrConstantExpr(Type *SrcElementTy, Constant *C,
+ ArrayRef<Constant *> IdxList, Type *DestTy);
+
+public:
+ static GetElementPtrConstantExpr *Create(Constant *C,
+ ArrayRef<Constant*> IdxList,
+ Type *DestTy,
+ unsigned Flags) {
+ return Create(
+ cast<PointerType>(C->getType()->getScalarType())->getElementType(), C,
+ IdxList, DestTy, Flags);
+ }
+ static GetElementPtrConstantExpr *Create(Type *SrcElementTy, Constant *C,
+ ArrayRef<Constant *> IdxList,
+ Type *DestTy, unsigned Flags) {
+ GetElementPtrConstantExpr *Result = new (IdxList.size() + 1)
+ GetElementPtrConstantExpr(SrcElementTy, C, IdxList, DestTy);
+ Result->SubclassOptionalData = Flags;
+ return Result;
+ }
+ Type *getSourceElementType() const;
+ /// Transparently provide more efficient getOperand methods.
+ DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+ static bool classof(const ConstantExpr *CE) {
+ return CE->getOpcode() == Instruction::GetElementPtr;
+ }
+ static bool classof(const Value *V) {
+ return isa<ConstantExpr>(V) && classof(cast<ConstantExpr>(V));
+ }
+};
+
+// CompareConstantExpr - This class is private to Constants.cpp, and is used
+// behind the scenes to implement ICmp and FCmp constant expressions. This is
+// needed in order to store the predicate value for these instructions.
+class CompareConstantExpr : public ConstantExpr {
+ void anchor() override;
+ void *operator new(size_t, unsigned) = delete;
+public:
+ // allocate space for exactly two operands
+ void *operator new(size_t s) {
+ return User::operator new(s, 2);
+ }
+ unsigned short predicate;
+ CompareConstantExpr(Type *ty, Instruction::OtherOps opc,
+ unsigned short pred, Constant* LHS, Constant* RHS)
+ : ConstantExpr(ty, opc, &Op<0>(), 2), predicate(pred) {
+ Op<0>() = LHS;
+ Op<1>() = RHS;
+ }
+ /// Transparently provide more efficient getOperand methods.
+ DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+ static bool classof(const ConstantExpr *CE) {
+ return CE->getOpcode() == Instruction::ICmp ||
+ CE->getOpcode() == Instruction::FCmp;
+ }
+ static bool classof(const Value *V) {
+ return isa<ConstantExpr>(V) && classof(cast<ConstantExpr>(V));
+ }
+};
+
+template <>
+struct OperandTraits<UnaryConstantExpr>
+ : public FixedNumOperandTraits<UnaryConstantExpr, 1> {};
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(UnaryConstantExpr, Value)
+
+template <>
+struct OperandTraits<BinaryConstantExpr>
+ : public FixedNumOperandTraits<BinaryConstantExpr, 2> {};
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BinaryConstantExpr, Value)
+
+template <>
+struct OperandTraits<SelectConstantExpr>
+ : public FixedNumOperandTraits<SelectConstantExpr, 3> {};
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectConstantExpr, Value)
+
+template <>
+struct OperandTraits<ExtractElementConstantExpr>
+ : public FixedNumOperandTraits<ExtractElementConstantExpr, 2> {};
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementConstantExpr, Value)
+
+template <>
+struct OperandTraits<InsertElementConstantExpr>
+ : public FixedNumOperandTraits<InsertElementConstantExpr, 3> {};
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementConstantExpr, Value)
+
+template <>
+struct OperandTraits<ShuffleVectorConstantExpr>
+ : public FixedNumOperandTraits<ShuffleVectorConstantExpr, 3> {};
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorConstantExpr, Value)
+
+template <>
+struct OperandTraits<ExtractValueConstantExpr>
+ : public FixedNumOperandTraits<ExtractValueConstantExpr, 1> {};
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractValueConstantExpr, Value)
+
+template <>
+struct OperandTraits<InsertValueConstantExpr>
+ : public FixedNumOperandTraits<InsertValueConstantExpr, 2> {};
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueConstantExpr, Value)
+
+template <>
+struct OperandTraits<GetElementPtrConstantExpr>
+ : public VariadicOperandTraits<GetElementPtrConstantExpr, 1> {};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrConstantExpr, Value)
+
+template <>
+struct OperandTraits<CompareConstantExpr>
+ : public FixedNumOperandTraits<CompareConstantExpr, 2> {};
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CompareConstantExpr, Value)
+
+template <class ConstantClass> struct ConstantAggrKeyType;
+struct InlineAsmKeyType;
+struct ConstantExprKeyType;
+
+template <class ConstantClass> struct ConstantInfo;
+template <> struct ConstantInfo<ConstantExpr> {
+ typedef ConstantExprKeyType ValType;
+ typedef Type TypeClass;
+};
+template <> struct ConstantInfo<InlineAsm> {
+ typedef InlineAsmKeyType ValType;
+ typedef PointerType TypeClass;
+};
+template <> struct ConstantInfo<ConstantArray> {
+ typedef ConstantAggrKeyType<ConstantArray> ValType;
+ typedef ArrayType TypeClass;
+};
+template <> struct ConstantInfo<ConstantStruct> {
+ typedef ConstantAggrKeyType<ConstantStruct> ValType;
+ typedef StructType TypeClass;
+};
+template <> struct ConstantInfo<ConstantVector> {
+ typedef ConstantAggrKeyType<ConstantVector> ValType;
+ typedef VectorType TypeClass;
+};
+
+template <class ConstantClass> struct ConstantAggrKeyType {
+ ArrayRef<Constant *> Operands;
+ ConstantAggrKeyType(ArrayRef<Constant *> Operands) : Operands(Operands) {}
+ ConstantAggrKeyType(ArrayRef<Constant *> Operands, const ConstantClass *)
+ : Operands(Operands) {}
+ ConstantAggrKeyType(const ConstantClass *C,
+ SmallVectorImpl<Constant *> &Storage) {
+ assert(Storage.empty() && "Expected empty storage");
+ for (unsigned I = 0, E = C->getNumOperands(); I != E; ++I)
+ Storage.push_back(C->getOperand(I));
+ Operands = Storage;
+ }
+
+ bool operator==(const ConstantAggrKeyType &X) const {
+ return Operands == X.Operands;
+ }
+ bool operator==(const ConstantClass *C) const {
+ if (Operands.size() != C->getNumOperands())
+ return false;
+ for (unsigned I = 0, E = Operands.size(); I != E; ++I)
+ if (Operands[I] != C->getOperand(I))
+ return false;
+ return true;
+ }
+ unsigned getHash() const {
+ return hash_combine_range(Operands.begin(), Operands.end());
+ }
+
+ typedef typename ConstantInfo<ConstantClass>::TypeClass TypeClass;
+ ConstantClass *create(TypeClass *Ty) const {
+ return new (Operands.size()) ConstantClass(Ty, Operands);
+ }
+};
+
+struct InlineAsmKeyType {
+ StringRef AsmString;
+ StringRef Constraints;
+ FunctionType *FTy;
+ bool HasSideEffects;
+ bool IsAlignStack;
+ InlineAsm::AsmDialect AsmDialect;
+
+ InlineAsmKeyType(StringRef AsmString, StringRef Constraints,
+ FunctionType *FTy, bool HasSideEffects, bool IsAlignStack,
+ InlineAsm::AsmDialect AsmDialect)
+ : AsmString(AsmString), Constraints(Constraints), FTy(FTy),
+ HasSideEffects(HasSideEffects), IsAlignStack(IsAlignStack),
+ AsmDialect(AsmDialect) {}
+ InlineAsmKeyType(const InlineAsm *Asm, SmallVectorImpl<Constant *> &)
+ : AsmString(Asm->getAsmString()), Constraints(Asm->getConstraintString()),
+ FTy(Asm->getFunctionType()), HasSideEffects(Asm->hasSideEffects()),
+ IsAlignStack(Asm->isAlignStack()), AsmDialect(Asm->getDialect()) {}
+
+ bool operator==(const InlineAsmKeyType &X) const {
+ return HasSideEffects == X.HasSideEffects &&
+ IsAlignStack == X.IsAlignStack && AsmDialect == X.AsmDialect &&
+ AsmString == X.AsmString && Constraints == X.Constraints &&
+ FTy == X.FTy;
+ }
+ bool operator==(const InlineAsm *Asm) const {
+ return HasSideEffects == Asm->hasSideEffects() &&
+ IsAlignStack == Asm->isAlignStack() &&
+ AsmDialect == Asm->getDialect() &&
+ AsmString == Asm->getAsmString() &&
+ Constraints == Asm->getConstraintString() &&
+ FTy == Asm->getFunctionType();
+ }
+ unsigned getHash() const {
+ return hash_combine(AsmString, Constraints, HasSideEffects, IsAlignStack,
+ AsmDialect, FTy);
+ }
+
+ typedef ConstantInfo<InlineAsm>::TypeClass TypeClass;
+ InlineAsm *create(TypeClass *Ty) const {
+ assert(PointerType::getUnqual(FTy) == Ty);
+ return new InlineAsm(FTy, AsmString, Constraints, HasSideEffects,
+ IsAlignStack, AsmDialect);
+ }
+};
+
+struct ConstantExprKeyType {
+ uint8_t Opcode;
+ uint8_t SubclassOptionalData;
+ uint16_t SubclassData;
+ ArrayRef<Constant *> Ops;
+ ArrayRef<unsigned> Indexes;
+ Type *ExplicitTy;
+
+ ConstantExprKeyType(unsigned Opcode, ArrayRef<Constant *> Ops,
+ unsigned short SubclassData = 0,
+ unsigned short SubclassOptionalData = 0,
+ ArrayRef<unsigned> Indexes = None,
+ Type *ExplicitTy = nullptr)
+ : Opcode(Opcode), SubclassOptionalData(SubclassOptionalData),
+ SubclassData(SubclassData), Ops(Ops), Indexes(Indexes),
+ ExplicitTy(ExplicitTy) {}
+ ConstantExprKeyType(ArrayRef<Constant *> Operands, const ConstantExpr *CE)
+ : Opcode(CE->getOpcode()),
+ SubclassOptionalData(CE->getRawSubclassOptionalData()),
+ SubclassData(CE->isCompare() ? CE->getPredicate() : 0), Ops(Operands),
+ Indexes(CE->hasIndices() ? CE->getIndices() : ArrayRef<unsigned>()) {}
+ ConstantExprKeyType(const ConstantExpr *CE,
+ SmallVectorImpl<Constant *> &Storage)
+ : Opcode(CE->getOpcode()),
+ SubclassOptionalData(CE->getRawSubclassOptionalData()),
+ SubclassData(CE->isCompare() ? CE->getPredicate() : 0),
+ Indexes(CE->hasIndices() ? CE->getIndices() : ArrayRef<unsigned>()) {
+ assert(Storage.empty() && "Expected empty storage");
+ for (unsigned I = 0, E = CE->getNumOperands(); I != E; ++I)
+ Storage.push_back(CE->getOperand(I));
+ Ops = Storage;
+ }
+
+ bool operator==(const ConstantExprKeyType &X) const {
+ return Opcode == X.Opcode && SubclassData == X.SubclassData &&
+ SubclassOptionalData == X.SubclassOptionalData && Ops == X.Ops &&
+ Indexes == X.Indexes;
+ }
+
+ bool operator==(const ConstantExpr *CE) const {
+ if (Opcode != CE->getOpcode())
+ return false;
+ if (SubclassOptionalData != CE->getRawSubclassOptionalData())
+ return false;
+ if (Ops.size() != CE->getNumOperands())
+ return false;
+ if (SubclassData != (CE->isCompare() ? CE->getPredicate() : 0))
+ return false;
+ for (unsigned I = 0, E = Ops.size(); I != E; ++I)
+ if (Ops[I] != CE->getOperand(I))
+ return false;
+ if (Indexes != (CE->hasIndices() ? CE->getIndices() : ArrayRef<unsigned>()))
+ return false;
+ return true;
+ }
+
+ unsigned getHash() const {
+ return hash_combine(Opcode, SubclassOptionalData, SubclassData,
+ hash_combine_range(Ops.begin(), Ops.end()),
+ hash_combine_range(Indexes.begin(), Indexes.end()));
+ }
+
+ typedef ConstantInfo<ConstantExpr>::TypeClass TypeClass;
+ ConstantExpr *create(TypeClass *Ty) const {
+ switch (Opcode) {
+ default:
+ if (Instruction::isCast(Opcode))
+ return new UnaryConstantExpr(Opcode, Ops[0], Ty);
+ if ((Opcode >= Instruction::BinaryOpsBegin &&
+ Opcode < Instruction::BinaryOpsEnd))
+ return new BinaryConstantExpr(Opcode, Ops[0], Ops[1],
+ SubclassOptionalData);
+ llvm_unreachable("Invalid ConstantExpr!");
+ case Instruction::Select:
+ return new SelectConstantExpr(Ops[0], Ops[1], Ops[2]);
+ case Instruction::ExtractElement:
+ return new ExtractElementConstantExpr(Ops[0], Ops[1]);
+ case Instruction::InsertElement:
+ return new InsertElementConstantExpr(Ops[0], Ops[1], Ops[2]);
+ case Instruction::ShuffleVector:
+ return new ShuffleVectorConstantExpr(Ops[0], Ops[1], Ops[2]);
+ case Instruction::InsertValue:
+ return new InsertValueConstantExpr(Ops[0], Ops[1], Indexes, Ty);
+ case Instruction::ExtractValue:
+ return new ExtractValueConstantExpr(Ops[0], Indexes, Ty);
+ case Instruction::GetElementPtr:
+ return GetElementPtrConstantExpr::Create(
+ ExplicitTy ? ExplicitTy
+ : cast<PointerType>(Ops[0]->getType()->getScalarType())
+ ->getElementType(),
+ Ops[0], Ops.slice(1), Ty, SubclassOptionalData);
+ case Instruction::ICmp:
+ return new CompareConstantExpr(Ty, Instruction::ICmp, SubclassData,
+ Ops[0], Ops[1]);
+ case Instruction::FCmp:
+ return new CompareConstantExpr(Ty, Instruction::FCmp, SubclassData,
+ Ops[0], Ops[1]);
+ }
+ }
+};
+
+template <class ConstantClass> class ConstantUniqueMap {
+public:
+ typedef typename ConstantInfo<ConstantClass>::ValType ValType;
+ typedef typename ConstantInfo<ConstantClass>::TypeClass TypeClass;
+ typedef std::pair<TypeClass *, ValType> LookupKey;
+
+private:
+ struct MapInfo {
+ typedef DenseMapInfo<ConstantClass *> ConstantClassInfo;
+ static inline ConstantClass *getEmptyKey() {
+ return ConstantClassInfo::getEmptyKey();
+ }
+ static inline ConstantClass *getTombstoneKey() {
+ return ConstantClassInfo::getTombstoneKey();
+ }
+ static unsigned getHashValue(const ConstantClass *CP) {
+ SmallVector<Constant *, 8> Storage;
+ return getHashValue(LookupKey(CP->getType(), ValType(CP, Storage)));
+ }
+ static bool isEqual(const ConstantClass *LHS, const ConstantClass *RHS) {
+ return LHS == RHS;
+ }
+ static unsigned getHashValue(const LookupKey &Val) {
+ return hash_combine(Val.first, Val.second.getHash());
+ }
+ static bool isEqual(const LookupKey &LHS, const ConstantClass *RHS) {
+ if (RHS == getEmptyKey() || RHS == getTombstoneKey())
+ return false;
+ if (LHS.first != RHS->getType())
+ return false;
+ return LHS.second == RHS;
+ }
+ };
+
+public:
+ typedef DenseMap<ConstantClass *, char, MapInfo> MapTy;
+
+private:
+ MapTy Map;
+
+public:
+ typename MapTy::iterator map_begin() { return Map.begin(); }
+ typename MapTy::iterator map_end() { return Map.end(); }
+
+ void freeConstants() {
+ for (auto &I : Map)
+ // Asserts that use_empty().
+ delete I.first;
+ }
+
+private:
+ ConstantClass *create(TypeClass *Ty, ValType V) {
+ ConstantClass *Result = V.create(Ty);
+
+ assert(Result->getType() == Ty && "Type specified is not correct!");
+ insert(Result);
+
+ return Result;
+ }
+
+public:
+ /// Return the specified constant from the map, creating it if necessary.
+ ConstantClass *getOrCreate(TypeClass *Ty, ValType V) {
+ LookupKey Lookup(Ty, V);
+ ConstantClass *Result = nullptr;
+
+ auto I = find(Lookup);
+ if (I == Map.end())
+ Result = create(Ty, V);
+ else
+ Result = I->first;
+ assert(Result && "Unexpected nullptr");
+
+ return Result;
+ }
+
+ /// Find the constant by lookup key.
+ typename MapTy::iterator find(LookupKey Lookup) {
+ return Map.find_as(Lookup);
+ }
+
+ /// Insert the constant into its proper slot.
+ void insert(ConstantClass *CP) { Map[CP] = '\0'; }
+
+ /// Remove this constant from the map
+ void remove(ConstantClass *CP) {
+ typename MapTy::iterator I = Map.find(CP);
+ assert(I != Map.end() && "Constant not found in constant table!");
+ assert(I->first == CP && "Didn't find correct element?");
+ Map.erase(I);
+ }
+
+ ConstantClass *replaceOperandsInPlace(ArrayRef<Constant *> Operands,
+ ConstantClass *CP, Value *From,
+ Constant *To, unsigned NumUpdated = 0,
+ unsigned OperandNo = ~0u) {
+ LookupKey Lookup(CP->getType(), ValType(Operands, CP));
+ auto I = find(Lookup);
+ if (I != Map.end())
+ return I->first;
+
+ // Update to the new value. Optimize for the case when we have a single
+ // operand that we're changing, but handle bulk updates efficiently.
+ remove(CP);
+ if (NumUpdated == 1) {
+ assert(OperandNo < CP->getNumOperands() && "Invalid index");
+ assert(CP->getOperand(OperandNo) != To && "I didn't contain From!");
+ CP->setOperand(OperandNo, To);
+ } else {
+ for (unsigned I = 0, E = CP->getNumOperands(); I != E; ++I)
+ if (CP->getOperand(I) == From)
+ CP->setOperand(I, To);
+ }
+ insert(CP);
+ return nullptr;
+ }
+
+ void dump() const { DEBUG(dbgs() << "Constant.cpp: ConstantUniqueMap\n"); }
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/IR/Core.cpp b/contrib/llvm/lib/IR/Core.cpp
new file mode 100644
index 0000000..591dafa
--- /dev/null
+++ b/contrib/llvm/lib/IR/Core.cpp
@@ -0,0 +1,2952 @@
+//===-- Core.cpp ----------------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the common infrastructure (including the C bindings)
+// for libLLVMCore.a, which implements the LLVM intermediate representation.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm-c/Core.h"
+#include "llvm/Bitcode/ReaderWriter.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/DiagnosticInfo.h"
+#include "llvm/IR/DiagnosticPrinter.h"
+#include "llvm/IR/GlobalAlias.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/InlineAsm.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/LegacyPassManager.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Threading.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+#include <cstdlib>
+#include <cstring>
+#include <system_error>
+
+using namespace llvm;
+
+#define DEBUG_TYPE "ir"
+
+void llvm::initializeCore(PassRegistry &Registry) {
+ initializeDominatorTreeWrapperPassPass(Registry);
+ initializePrintModulePassWrapperPass(Registry);
+ initializePrintFunctionPassWrapperPass(Registry);
+ initializePrintBasicBlockPassPass(Registry);
+ initializeVerifierLegacyPassPass(Registry);
+}
+
+void LLVMInitializeCore(LLVMPassRegistryRef R) {
+ initializeCore(*unwrap(R));
+}
+
+void LLVMShutdown() {
+ llvm_shutdown();
+}
+
+/*===-- Error handling ----------------------------------------------------===*/
+
+char *LLVMCreateMessage(const char *Message) {
+ return strdup(Message);
+}
+
+void LLVMDisposeMessage(char *Message) {
+ free(Message);
+}
+
+
+/*===-- Operations on contexts --------------------------------------------===*/
+
+LLVMContextRef LLVMContextCreate() {
+ return wrap(new LLVMContext());
+}
+
+LLVMContextRef LLVMGetGlobalContext() {
+ return wrap(&getGlobalContext());
+}
+
+void LLVMContextSetDiagnosticHandler(LLVMContextRef C,
+ LLVMDiagnosticHandler Handler,
+ void *DiagnosticContext) {
+ unwrap(C)->setDiagnosticHandler(
+ LLVM_EXTENSION reinterpret_cast<LLVMContext::DiagnosticHandlerTy>(Handler),
+ DiagnosticContext);
+}
+
+void LLVMContextSetYieldCallback(LLVMContextRef C, LLVMYieldCallback Callback,
+ void *OpaqueHandle) {
+ auto YieldCallback =
+ LLVM_EXTENSION reinterpret_cast<LLVMContext::YieldCallbackTy>(Callback);
+ unwrap(C)->setYieldCallback(YieldCallback, OpaqueHandle);
+}
+
+void LLVMContextDispose(LLVMContextRef C) {
+ delete unwrap(C);
+}
+
+unsigned LLVMGetMDKindIDInContext(LLVMContextRef C, const char* Name,
+ unsigned SLen) {
+ return unwrap(C)->getMDKindID(StringRef(Name, SLen));
+}
+
+unsigned LLVMGetMDKindID(const char* Name, unsigned SLen) {
+ return LLVMGetMDKindIDInContext(LLVMGetGlobalContext(), Name, SLen);
+}
+
+char *LLVMGetDiagInfoDescription(LLVMDiagnosticInfoRef DI) {
+ std::string MsgStorage;
+ raw_string_ostream Stream(MsgStorage);
+ DiagnosticPrinterRawOStream DP(Stream);
+
+ unwrap(DI)->print(DP);
+ Stream.flush();
+
+ return LLVMCreateMessage(MsgStorage.c_str());
+}
+
+LLVMDiagnosticSeverity LLVMGetDiagInfoSeverity(LLVMDiagnosticInfoRef DI){
+ LLVMDiagnosticSeverity severity;
+
+ switch(unwrap(DI)->getSeverity()) {
+ default:
+ severity = LLVMDSError;
+ break;
+ case DS_Warning:
+ severity = LLVMDSWarning;
+ break;
+ case DS_Remark:
+ severity = LLVMDSRemark;
+ break;
+ case DS_Note:
+ severity = LLVMDSNote;
+ break;
+ }
+
+ return severity;
+}
+
+
+
+
+/*===-- Operations on modules ---------------------------------------------===*/
+
+LLVMModuleRef LLVMModuleCreateWithName(const char *ModuleID) {
+ return wrap(new Module(ModuleID, getGlobalContext()));
+}
+
+LLVMModuleRef LLVMModuleCreateWithNameInContext(const char *ModuleID,
+ LLVMContextRef C) {
+ return wrap(new Module(ModuleID, *unwrap(C)));
+}
+
+void LLVMDisposeModule(LLVMModuleRef M) {
+ delete unwrap(M);
+}
+
+/*--.. Data layout .........................................................--*/
+const char * LLVMGetDataLayout(LLVMModuleRef M) {
+ return unwrap(M)->getDataLayoutStr().c_str();
+}
+
+void LLVMSetDataLayout(LLVMModuleRef M, const char *Triple) {
+ unwrap(M)->setDataLayout(Triple);
+}
+
+/*--.. Target triple .......................................................--*/
+const char * LLVMGetTarget(LLVMModuleRef M) {
+ return unwrap(M)->getTargetTriple().c_str();
+}
+
+void LLVMSetTarget(LLVMModuleRef M, const char *Triple) {
+ unwrap(M)->setTargetTriple(Triple);
+}
+
+void LLVMDumpModule(LLVMModuleRef M) {
+ unwrap(M)->dump();
+}
+
+LLVMBool LLVMPrintModuleToFile(LLVMModuleRef M, const char *Filename,
+ char **ErrorMessage) {
+ std::error_code EC;
+ raw_fd_ostream dest(Filename, EC, sys::fs::F_Text);
+ if (EC) {
+ *ErrorMessage = strdup(EC.message().c_str());
+ return true;
+ }
+
+ unwrap(M)->print(dest, nullptr);
+
+ dest.close();
+
+ if (dest.has_error()) {
+ *ErrorMessage = strdup("Error printing to file");
+ return true;
+ }
+
+ return false;
+}
+
+char *LLVMPrintModuleToString(LLVMModuleRef M) {
+ std::string buf;
+ raw_string_ostream os(buf);
+
+ unwrap(M)->print(os, nullptr);
+ os.flush();
+
+ return strdup(buf.c_str());
+}
+
+/*--.. Operations on inline assembler ......................................--*/
+void LLVMSetModuleInlineAsm(LLVMModuleRef M, const char *Asm) {
+ unwrap(M)->setModuleInlineAsm(StringRef(Asm));
+}
+
+
+/*--.. Operations on module contexts ......................................--*/
+LLVMContextRef LLVMGetModuleContext(LLVMModuleRef M) {
+ return wrap(&unwrap(M)->getContext());
+}
+
+
+/*===-- Operations on types -----------------------------------------------===*/
+
+/*--.. Operations on all types (mostly) ....................................--*/
+
+LLVMTypeKind LLVMGetTypeKind(LLVMTypeRef Ty) {
+ switch (unwrap(Ty)->getTypeID()) {
+ case Type::VoidTyID:
+ return LLVMVoidTypeKind;
+ case Type::HalfTyID:
+ return LLVMHalfTypeKind;
+ case Type::FloatTyID:
+ return LLVMFloatTypeKind;
+ case Type::DoubleTyID:
+ return LLVMDoubleTypeKind;
+ case Type::X86_FP80TyID:
+ return LLVMX86_FP80TypeKind;
+ case Type::FP128TyID:
+ return LLVMFP128TypeKind;
+ case Type::PPC_FP128TyID:
+ return LLVMPPC_FP128TypeKind;
+ case Type::LabelTyID:
+ return LLVMLabelTypeKind;
+ case Type::MetadataTyID:
+ return LLVMMetadataTypeKind;
+ case Type::IntegerTyID:
+ return LLVMIntegerTypeKind;
+ case Type::FunctionTyID:
+ return LLVMFunctionTypeKind;
+ case Type::StructTyID:
+ return LLVMStructTypeKind;
+ case Type::ArrayTyID:
+ return LLVMArrayTypeKind;
+ case Type::PointerTyID:
+ return LLVMPointerTypeKind;
+ case Type::VectorTyID:
+ return LLVMVectorTypeKind;
+ case Type::X86_MMXTyID:
+ return LLVMX86_MMXTypeKind;
+ case Type::TokenTyID:
+ return LLVMTokenTypeKind;
+ }
+ llvm_unreachable("Unhandled TypeID.");
+}
+
+LLVMBool LLVMTypeIsSized(LLVMTypeRef Ty)
+{
+ return unwrap(Ty)->isSized();
+}
+
+LLVMContextRef LLVMGetTypeContext(LLVMTypeRef Ty) {
+ return wrap(&unwrap(Ty)->getContext());
+}
+
+void LLVMDumpType(LLVMTypeRef Ty) {
+ return unwrap(Ty)->dump();
+}
+
+char *LLVMPrintTypeToString(LLVMTypeRef Ty) {
+ std::string buf;
+ raw_string_ostream os(buf);
+
+ if (unwrap(Ty))
+ unwrap(Ty)->print(os);
+ else
+ os << "Printing <null> Type";
+
+ os.flush();
+
+ return strdup(buf.c_str());
+}
+
+/*--.. Operations on integer types .........................................--*/
+
+LLVMTypeRef LLVMInt1TypeInContext(LLVMContextRef C) {
+ return (LLVMTypeRef) Type::getInt1Ty(*unwrap(C));
+}
+LLVMTypeRef LLVMInt8TypeInContext(LLVMContextRef C) {
+ return (LLVMTypeRef) Type::getInt8Ty(*unwrap(C));
+}
+LLVMTypeRef LLVMInt16TypeInContext(LLVMContextRef C) {
+ return (LLVMTypeRef) Type::getInt16Ty(*unwrap(C));
+}
+LLVMTypeRef LLVMInt32TypeInContext(LLVMContextRef C) {
+ return (LLVMTypeRef) Type::getInt32Ty(*unwrap(C));
+}
+LLVMTypeRef LLVMInt64TypeInContext(LLVMContextRef C) {
+ return (LLVMTypeRef) Type::getInt64Ty(*unwrap(C));
+}
+LLVMTypeRef LLVMInt128TypeInContext(LLVMContextRef C) {
+ return (LLVMTypeRef) Type::getInt128Ty(*unwrap(C));
+}
+LLVMTypeRef LLVMIntTypeInContext(LLVMContextRef C, unsigned NumBits) {
+ return wrap(IntegerType::get(*unwrap(C), NumBits));
+}
+
+LLVMTypeRef LLVMInt1Type(void) {
+ return LLVMInt1TypeInContext(LLVMGetGlobalContext());
+}
+LLVMTypeRef LLVMInt8Type(void) {
+ return LLVMInt8TypeInContext(LLVMGetGlobalContext());
+}
+LLVMTypeRef LLVMInt16Type(void) {
+ return LLVMInt16TypeInContext(LLVMGetGlobalContext());
+}
+LLVMTypeRef LLVMInt32Type(void) {
+ return LLVMInt32TypeInContext(LLVMGetGlobalContext());
+}
+LLVMTypeRef LLVMInt64Type(void) {
+ return LLVMInt64TypeInContext(LLVMGetGlobalContext());
+}
+LLVMTypeRef LLVMInt128Type(void) {
+ return LLVMInt128TypeInContext(LLVMGetGlobalContext());
+}
+LLVMTypeRef LLVMIntType(unsigned NumBits) {
+ return LLVMIntTypeInContext(LLVMGetGlobalContext(), NumBits);
+}
+
+unsigned LLVMGetIntTypeWidth(LLVMTypeRef IntegerTy) {
+ return unwrap<IntegerType>(IntegerTy)->getBitWidth();
+}
+
+/*--.. Operations on real types ............................................--*/
+
+LLVMTypeRef LLVMHalfTypeInContext(LLVMContextRef C) {
+ return (LLVMTypeRef) Type::getHalfTy(*unwrap(C));
+}
+LLVMTypeRef LLVMFloatTypeInContext(LLVMContextRef C) {
+ return (LLVMTypeRef) Type::getFloatTy(*unwrap(C));
+}
+LLVMTypeRef LLVMDoubleTypeInContext(LLVMContextRef C) {
+ return (LLVMTypeRef) Type::getDoubleTy(*unwrap(C));
+}
+LLVMTypeRef LLVMX86FP80TypeInContext(LLVMContextRef C) {
+ return (LLVMTypeRef) Type::getX86_FP80Ty(*unwrap(C));
+}
+LLVMTypeRef LLVMFP128TypeInContext(LLVMContextRef C) {
+ return (LLVMTypeRef) Type::getFP128Ty(*unwrap(C));
+}
+LLVMTypeRef LLVMPPCFP128TypeInContext(LLVMContextRef C) {
+ return (LLVMTypeRef) Type::getPPC_FP128Ty(*unwrap(C));
+}
+LLVMTypeRef LLVMX86MMXTypeInContext(LLVMContextRef C) {
+ return (LLVMTypeRef) Type::getX86_MMXTy(*unwrap(C));
+}
+LLVMTypeRef LLVMTokenTypeInContext(LLVMContextRef C) {
+ return (LLVMTypeRef) Type::getTokenTy(*unwrap(C));
+}
+
+LLVMTypeRef LLVMHalfType(void) {
+ return LLVMHalfTypeInContext(LLVMGetGlobalContext());
+}
+LLVMTypeRef LLVMFloatType(void) {
+ return LLVMFloatTypeInContext(LLVMGetGlobalContext());
+}
+LLVMTypeRef LLVMDoubleType(void) {
+ return LLVMDoubleTypeInContext(LLVMGetGlobalContext());
+}
+LLVMTypeRef LLVMX86FP80Type(void) {
+ return LLVMX86FP80TypeInContext(LLVMGetGlobalContext());
+}
+LLVMTypeRef LLVMFP128Type(void) {
+ return LLVMFP128TypeInContext(LLVMGetGlobalContext());
+}
+LLVMTypeRef LLVMPPCFP128Type(void) {
+ return LLVMPPCFP128TypeInContext(LLVMGetGlobalContext());
+}
+LLVMTypeRef LLVMX86MMXType(void) {
+ return LLVMX86MMXTypeInContext(LLVMGetGlobalContext());
+}
+
+/*--.. Operations on function types ........................................--*/
+
+LLVMTypeRef LLVMFunctionType(LLVMTypeRef ReturnType,
+ LLVMTypeRef *ParamTypes, unsigned ParamCount,
+ LLVMBool IsVarArg) {
+ ArrayRef<Type*> Tys(unwrap(ParamTypes), ParamCount);
+ return wrap(FunctionType::get(unwrap(ReturnType), Tys, IsVarArg != 0));
+}
+
+LLVMBool LLVMIsFunctionVarArg(LLVMTypeRef FunctionTy) {
+ return unwrap<FunctionType>(FunctionTy)->isVarArg();
+}
+
+LLVMTypeRef LLVMGetReturnType(LLVMTypeRef FunctionTy) {
+ return wrap(unwrap<FunctionType>(FunctionTy)->getReturnType());
+}
+
+unsigned LLVMCountParamTypes(LLVMTypeRef FunctionTy) {
+ return unwrap<FunctionType>(FunctionTy)->getNumParams();
+}
+
+void LLVMGetParamTypes(LLVMTypeRef FunctionTy, LLVMTypeRef *Dest) {
+ FunctionType *Ty = unwrap<FunctionType>(FunctionTy);
+ for (FunctionType::param_iterator I = Ty->param_begin(),
+ E = Ty->param_end(); I != E; ++I)
+ *Dest++ = wrap(*I);
+}
+
+/*--.. Operations on struct types ..........................................--*/
+
+LLVMTypeRef LLVMStructTypeInContext(LLVMContextRef C, LLVMTypeRef *ElementTypes,
+ unsigned ElementCount, LLVMBool Packed) {
+ ArrayRef<Type*> Tys(unwrap(ElementTypes), ElementCount);
+ return wrap(StructType::get(*unwrap(C), Tys, Packed != 0));
+}
+
+LLVMTypeRef LLVMStructType(LLVMTypeRef *ElementTypes,
+ unsigned ElementCount, LLVMBool Packed) {
+ return LLVMStructTypeInContext(LLVMGetGlobalContext(), ElementTypes,
+ ElementCount, Packed);
+}
+
+LLVMTypeRef LLVMStructCreateNamed(LLVMContextRef C, const char *Name)
+{
+ return wrap(StructType::create(*unwrap(C), Name));
+}
+
+const char *LLVMGetStructName(LLVMTypeRef Ty)
+{
+ StructType *Type = unwrap<StructType>(Ty);
+ if (!Type->hasName())
+ return nullptr;
+ return Type->getName().data();
+}
+
+void LLVMStructSetBody(LLVMTypeRef StructTy, LLVMTypeRef *ElementTypes,
+ unsigned ElementCount, LLVMBool Packed) {
+ ArrayRef<Type*> Tys(unwrap(ElementTypes), ElementCount);
+ unwrap<StructType>(StructTy)->setBody(Tys, Packed != 0);
+}
+
+unsigned LLVMCountStructElementTypes(LLVMTypeRef StructTy) {
+ return unwrap<StructType>(StructTy)->getNumElements();
+}
+
+void LLVMGetStructElementTypes(LLVMTypeRef StructTy, LLVMTypeRef *Dest) {
+ StructType *Ty = unwrap<StructType>(StructTy);
+ for (StructType::element_iterator I = Ty->element_begin(),
+ E = Ty->element_end(); I != E; ++I)
+ *Dest++ = wrap(*I);
+}
+
+LLVMTypeRef LLVMStructGetTypeAtIndex(LLVMTypeRef StructTy, unsigned i) {
+ StructType *Ty = unwrap<StructType>(StructTy);
+ return wrap(Ty->getTypeAtIndex(i));
+}
+
+LLVMBool LLVMIsPackedStruct(LLVMTypeRef StructTy) {
+ return unwrap<StructType>(StructTy)->isPacked();
+}
+
+LLVMBool LLVMIsOpaqueStruct(LLVMTypeRef StructTy) {
+ return unwrap<StructType>(StructTy)->isOpaque();
+}
+
+LLVMTypeRef LLVMGetTypeByName(LLVMModuleRef M, const char *Name) {
+ return wrap(unwrap(M)->getTypeByName(Name));
+}
+
+/*--.. Operations on array, pointer, and vector types (sequence types) .....--*/
+
+LLVMTypeRef LLVMArrayType(LLVMTypeRef ElementType, unsigned ElementCount) {
+ return wrap(ArrayType::get(unwrap(ElementType), ElementCount));
+}
+
+LLVMTypeRef LLVMPointerType(LLVMTypeRef ElementType, unsigned AddressSpace) {
+ return wrap(PointerType::get(unwrap(ElementType), AddressSpace));
+}
+
+LLVMTypeRef LLVMVectorType(LLVMTypeRef ElementType, unsigned ElementCount) {
+ return wrap(VectorType::get(unwrap(ElementType), ElementCount));
+}
+
+LLVMTypeRef LLVMGetElementType(LLVMTypeRef Ty) {
+ return wrap(unwrap<SequentialType>(Ty)->getElementType());
+}
+
+unsigned LLVMGetArrayLength(LLVMTypeRef ArrayTy) {
+ return unwrap<ArrayType>(ArrayTy)->getNumElements();
+}
+
+unsigned LLVMGetPointerAddressSpace(LLVMTypeRef PointerTy) {
+ return unwrap<PointerType>(PointerTy)->getAddressSpace();
+}
+
+unsigned LLVMGetVectorSize(LLVMTypeRef VectorTy) {
+ return unwrap<VectorType>(VectorTy)->getNumElements();
+}
+
+/*--.. Operations on other types ...........................................--*/
+
+LLVMTypeRef LLVMVoidTypeInContext(LLVMContextRef C) {
+ return wrap(Type::getVoidTy(*unwrap(C)));
+}
+LLVMTypeRef LLVMLabelTypeInContext(LLVMContextRef C) {
+ return wrap(Type::getLabelTy(*unwrap(C)));
+}
+
+LLVMTypeRef LLVMVoidType(void) {
+ return LLVMVoidTypeInContext(LLVMGetGlobalContext());
+}
+LLVMTypeRef LLVMLabelType(void) {
+ return LLVMLabelTypeInContext(LLVMGetGlobalContext());
+}
+
+/*===-- Operations on values ----------------------------------------------===*/
+
+/*--.. Operations on all values ............................................--*/
+
+LLVMTypeRef LLVMTypeOf(LLVMValueRef Val) {
+ return wrap(unwrap(Val)->getType());
+}
+
+const char *LLVMGetValueName(LLVMValueRef Val) {
+ return unwrap(Val)->getName().data();
+}
+
+void LLVMSetValueName(LLVMValueRef Val, const char *Name) {
+ unwrap(Val)->setName(Name);
+}
+
+void LLVMDumpValue(LLVMValueRef Val) {
+ unwrap(Val)->dump();
+}
+
+char* LLVMPrintValueToString(LLVMValueRef Val) {
+ std::string buf;
+ raw_string_ostream os(buf);
+
+ if (unwrap(Val))
+ unwrap(Val)->print(os);
+ else
+ os << "Printing <null> Value";
+
+ os.flush();
+
+ return strdup(buf.c_str());
+}
+
+void LLVMReplaceAllUsesWith(LLVMValueRef OldVal, LLVMValueRef NewVal) {
+ unwrap(OldVal)->replaceAllUsesWith(unwrap(NewVal));
+}
+
+int LLVMHasMetadata(LLVMValueRef Inst) {
+ return unwrap<Instruction>(Inst)->hasMetadata();
+}
+
+LLVMValueRef LLVMGetMetadata(LLVMValueRef Inst, unsigned KindID) {
+ auto *I = unwrap<Instruction>(Inst);
+ assert(I && "Expected instruction");
+ if (auto *MD = I->getMetadata(KindID))
+ return wrap(MetadataAsValue::get(I->getContext(), MD));
+ return nullptr;
+}
+
+// MetadataAsValue uses a canonical format which strips the actual MDNode for
+// MDNode with just a single constant value, storing just a ConstantAsMetadata
+// This undoes this canonicalization, reconstructing the MDNode.
+static MDNode *extractMDNode(MetadataAsValue *MAV) {
+ Metadata *MD = MAV->getMetadata();
+ assert((isa<MDNode>(MD) || isa<ConstantAsMetadata>(MD)) &&
+ "Expected a metadata node or a canonicalized constant");
+
+ if (MDNode *N = dyn_cast<MDNode>(MD))
+ return N;
+
+ return MDNode::get(MAV->getContext(), MD);
+}
+
+void LLVMSetMetadata(LLVMValueRef Inst, unsigned KindID, LLVMValueRef Val) {
+ MDNode *N = Val ? extractMDNode(unwrap<MetadataAsValue>(Val)) : nullptr;
+
+ unwrap<Instruction>(Inst)->setMetadata(KindID, N);
+}
+
+/*--.. Conversion functions ................................................--*/
+
+#define LLVM_DEFINE_VALUE_CAST(name) \
+ LLVMValueRef LLVMIsA##name(LLVMValueRef Val) { \
+ return wrap(static_cast<Value*>(dyn_cast_or_null<name>(unwrap(Val)))); \
+ }
+
+LLVM_FOR_EACH_VALUE_SUBCLASS(LLVM_DEFINE_VALUE_CAST)
+
+LLVMValueRef LLVMIsAMDNode(LLVMValueRef Val) {
+ if (auto *MD = dyn_cast_or_null<MetadataAsValue>(unwrap(Val)))
+ if (isa<MDNode>(MD->getMetadata()) ||
+ isa<ValueAsMetadata>(MD->getMetadata()))
+ return Val;
+ return nullptr;
+}
+
+LLVMValueRef LLVMIsAMDString(LLVMValueRef Val) {
+ if (auto *MD = dyn_cast_or_null<MetadataAsValue>(unwrap(Val)))
+ if (isa<MDString>(MD->getMetadata()))
+ return Val;
+ return nullptr;
+}
+
+/*--.. Operations on Uses ..................................................--*/
+LLVMUseRef LLVMGetFirstUse(LLVMValueRef Val) {
+ Value *V = unwrap(Val);
+ Value::use_iterator I = V->use_begin();
+ if (I == V->use_end())
+ return nullptr;
+ return wrap(&*I);
+}
+
+LLVMUseRef LLVMGetNextUse(LLVMUseRef U) {
+ Use *Next = unwrap(U)->getNext();
+ if (Next)
+ return wrap(Next);
+ return nullptr;
+}
+
+LLVMValueRef LLVMGetUser(LLVMUseRef U) {
+ return wrap(unwrap(U)->getUser());
+}
+
+LLVMValueRef LLVMGetUsedValue(LLVMUseRef U) {
+ return wrap(unwrap(U)->get());
+}
+
+/*--.. Operations on Users .................................................--*/
+
+static LLVMValueRef getMDNodeOperandImpl(LLVMContext &Context, const MDNode *N,
+ unsigned Index) {
+ Metadata *Op = N->getOperand(Index);
+ if (!Op)
+ return nullptr;
+ if (auto *C = dyn_cast<ConstantAsMetadata>(Op))
+ return wrap(C->getValue());
+ return wrap(MetadataAsValue::get(Context, Op));
+}
+
+LLVMValueRef LLVMGetOperand(LLVMValueRef Val, unsigned Index) {
+ Value *V = unwrap(Val);
+ if (auto *MD = dyn_cast<MetadataAsValue>(V)) {
+ if (auto *L = dyn_cast<ValueAsMetadata>(MD->getMetadata())) {
+ assert(Index == 0 && "Function-local metadata can only have one operand");
+ return wrap(L->getValue());
+ }
+ return getMDNodeOperandImpl(V->getContext(),
+ cast<MDNode>(MD->getMetadata()), Index);
+ }
+
+ return wrap(cast<User>(V)->getOperand(Index));
+}
+
+LLVMUseRef LLVMGetOperandUse(LLVMValueRef Val, unsigned Index) {
+ Value *V = unwrap(Val);
+ return wrap(&cast<User>(V)->getOperandUse(Index));
+}
+
+void LLVMSetOperand(LLVMValueRef Val, unsigned Index, LLVMValueRef Op) {
+ unwrap<User>(Val)->setOperand(Index, unwrap(Op));
+}
+
+int LLVMGetNumOperands(LLVMValueRef Val) {
+ Value *V = unwrap(Val);
+ if (isa<MetadataAsValue>(V))
+ return LLVMGetMDNodeNumOperands(Val);
+
+ return cast<User>(V)->getNumOperands();
+}
+
+/*--.. Operations on constants of any type .................................--*/
+
+LLVMValueRef LLVMConstNull(LLVMTypeRef Ty) {
+ return wrap(Constant::getNullValue(unwrap(Ty)));
+}
+
+LLVMValueRef LLVMConstAllOnes(LLVMTypeRef Ty) {
+ return wrap(Constant::getAllOnesValue(unwrap(Ty)));
+}
+
+LLVMValueRef LLVMGetUndef(LLVMTypeRef Ty) {
+ return wrap(UndefValue::get(unwrap(Ty)));
+}
+
+LLVMBool LLVMIsConstant(LLVMValueRef Ty) {
+ return isa<Constant>(unwrap(Ty));
+}
+
+LLVMBool LLVMIsNull(LLVMValueRef Val) {
+ if (Constant *C = dyn_cast<Constant>(unwrap(Val)))
+ return C->isNullValue();
+ return false;
+}
+
+LLVMBool LLVMIsUndef(LLVMValueRef Val) {
+ return isa<UndefValue>(unwrap(Val));
+}
+
+LLVMValueRef LLVMConstPointerNull(LLVMTypeRef Ty) {
+ return
+ wrap(ConstantPointerNull::get(unwrap<PointerType>(Ty)));
+}
+
+/*--.. Operations on metadata nodes ........................................--*/
+
+LLVMValueRef LLVMMDStringInContext(LLVMContextRef C, const char *Str,
+ unsigned SLen) {
+ LLVMContext &Context = *unwrap(C);
+ return wrap(MetadataAsValue::get(
+ Context, MDString::get(Context, StringRef(Str, SLen))));
+}
+
+LLVMValueRef LLVMMDString(const char *Str, unsigned SLen) {
+ return LLVMMDStringInContext(LLVMGetGlobalContext(), Str, SLen);
+}
+
+LLVMValueRef LLVMMDNodeInContext(LLVMContextRef C, LLVMValueRef *Vals,
+ unsigned Count) {
+ LLVMContext &Context = *unwrap(C);
+ SmallVector<Metadata *, 8> MDs;
+ for (auto *OV : makeArrayRef(Vals, Count)) {
+ Value *V = unwrap(OV);
+ Metadata *MD;
+ if (!V)
+ MD = nullptr;
+ else if (auto *C = dyn_cast<Constant>(V))
+ MD = ConstantAsMetadata::get(C);
+ else if (auto *MDV = dyn_cast<MetadataAsValue>(V)) {
+ MD = MDV->getMetadata();
+ assert(!isa<LocalAsMetadata>(MD) && "Unexpected function-local metadata "
+ "outside of direct argument to call");
+ } else {
+ // This is function-local metadata. Pretend to make an MDNode.
+ assert(Count == 1 &&
+ "Expected only one operand to function-local metadata");
+ return wrap(MetadataAsValue::get(Context, LocalAsMetadata::get(V)));
+ }
+
+ MDs.push_back(MD);
+ }
+ return wrap(MetadataAsValue::get(Context, MDNode::get(Context, MDs)));
+}
+
+LLVMValueRef LLVMMDNode(LLVMValueRef *Vals, unsigned Count) {
+ return LLVMMDNodeInContext(LLVMGetGlobalContext(), Vals, Count);
+}
+
+const char *LLVMGetMDString(LLVMValueRef V, unsigned* Len) {
+ if (const auto *MD = dyn_cast<MetadataAsValue>(unwrap(V)))
+ if (const MDString *S = dyn_cast<MDString>(MD->getMetadata())) {
+ *Len = S->getString().size();
+ return S->getString().data();
+ }
+ *Len = 0;
+ return nullptr;
+}
+
+unsigned LLVMGetMDNodeNumOperands(LLVMValueRef V)
+{
+ auto *MD = cast<MetadataAsValue>(unwrap(V));
+ if (isa<ValueAsMetadata>(MD->getMetadata()))
+ return 1;
+ return cast<MDNode>(MD->getMetadata())->getNumOperands();
+}
+
+void LLVMGetMDNodeOperands(LLVMValueRef V, LLVMValueRef *Dest)
+{
+ auto *MD = cast<MetadataAsValue>(unwrap(V));
+ if (auto *MDV = dyn_cast<ValueAsMetadata>(MD->getMetadata())) {
+ *Dest = wrap(MDV->getValue());
+ return;
+ }
+ const auto *N = cast<MDNode>(MD->getMetadata());
+ const unsigned numOperands = N->getNumOperands();
+ LLVMContext &Context = unwrap(V)->getContext();
+ for (unsigned i = 0; i < numOperands; i++)
+ Dest[i] = getMDNodeOperandImpl(Context, N, i);
+}
+
+unsigned LLVMGetNamedMetadataNumOperands(LLVMModuleRef M, const char* name)
+{
+ if (NamedMDNode *N = unwrap(M)->getNamedMetadata(name)) {
+ return N->getNumOperands();
+ }
+ return 0;
+}
+
+void LLVMGetNamedMetadataOperands(LLVMModuleRef M, const char* name, LLVMValueRef *Dest)
+{
+ NamedMDNode *N = unwrap(M)->getNamedMetadata(name);
+ if (!N)
+ return;
+ LLVMContext &Context = unwrap(M)->getContext();
+ for (unsigned i=0;i<N->getNumOperands();i++)
+ Dest[i] = wrap(MetadataAsValue::get(Context, N->getOperand(i)));
+}
+
+void LLVMAddNamedMetadataOperand(LLVMModuleRef M, const char* name,
+ LLVMValueRef Val)
+{
+ NamedMDNode *N = unwrap(M)->getOrInsertNamedMetadata(name);
+ if (!N)
+ return;
+ if (!Val)
+ return;
+ N->addOperand(extractMDNode(unwrap<MetadataAsValue>(Val)));
+}
+
+/*--.. Operations on scalar constants ......................................--*/
+
+LLVMValueRef LLVMConstInt(LLVMTypeRef IntTy, unsigned long long N,
+ LLVMBool SignExtend) {
+ return wrap(ConstantInt::get(unwrap<IntegerType>(IntTy), N, SignExtend != 0));
+}
+
+LLVMValueRef LLVMConstIntOfArbitraryPrecision(LLVMTypeRef IntTy,
+ unsigned NumWords,
+ const uint64_t Words[]) {
+ IntegerType *Ty = unwrap<IntegerType>(IntTy);
+ return wrap(ConstantInt::get(Ty->getContext(),
+ APInt(Ty->getBitWidth(),
+ makeArrayRef(Words, NumWords))));
+}
+
+LLVMValueRef LLVMConstIntOfString(LLVMTypeRef IntTy, const char Str[],
+ uint8_t Radix) {
+ return wrap(ConstantInt::get(unwrap<IntegerType>(IntTy), StringRef(Str),
+ Radix));
+}
+
+LLVMValueRef LLVMConstIntOfStringAndSize(LLVMTypeRef IntTy, const char Str[],
+ unsigned SLen, uint8_t Radix) {
+ return wrap(ConstantInt::get(unwrap<IntegerType>(IntTy), StringRef(Str, SLen),
+ Radix));
+}
+
+LLVMValueRef LLVMConstReal(LLVMTypeRef RealTy, double N) {
+ return wrap(ConstantFP::get(unwrap(RealTy), N));
+}
+
+LLVMValueRef LLVMConstRealOfString(LLVMTypeRef RealTy, const char *Text) {
+ return wrap(ConstantFP::get(unwrap(RealTy), StringRef(Text)));
+}
+
+LLVMValueRef LLVMConstRealOfStringAndSize(LLVMTypeRef RealTy, const char Str[],
+ unsigned SLen) {
+ return wrap(ConstantFP::get(unwrap(RealTy), StringRef(Str, SLen)));
+}
+
+unsigned long long LLVMConstIntGetZExtValue(LLVMValueRef ConstantVal) {
+ return unwrap<ConstantInt>(ConstantVal)->getZExtValue();
+}
+
+long long LLVMConstIntGetSExtValue(LLVMValueRef ConstantVal) {
+ return unwrap<ConstantInt>(ConstantVal)->getSExtValue();
+}
+
+double LLVMConstRealGetDouble(LLVMValueRef ConstantVal, LLVMBool *LosesInfo) {
+ ConstantFP *cFP = unwrap<ConstantFP>(ConstantVal) ;
+ Type *Ty = cFP->getType();
+
+ if (Ty->isFloatTy()) {
+ *LosesInfo = false;
+ return cFP->getValueAPF().convertToFloat();
+ }
+
+ if (Ty->isDoubleTy()) {
+ *LosesInfo = false;
+ return cFP->getValueAPF().convertToDouble();
+ }
+
+ bool APFLosesInfo;
+ APFloat APF = cFP->getValueAPF();
+ APF.convert(APFloat::IEEEdouble, APFloat::rmNearestTiesToEven, &APFLosesInfo);
+ *LosesInfo = APFLosesInfo;
+ return APF.convertToDouble();
+}
+
+/*--.. Operations on composite constants ...................................--*/
+
+LLVMValueRef LLVMConstStringInContext(LLVMContextRef C, const char *Str,
+ unsigned Length,
+ LLVMBool DontNullTerminate) {
+ /* Inverted the sense of AddNull because ', 0)' is a
+ better mnemonic for null termination than ', 1)'. */
+ return wrap(ConstantDataArray::getString(*unwrap(C), StringRef(Str, Length),
+ DontNullTerminate == 0));
+}
+LLVMValueRef LLVMConstStructInContext(LLVMContextRef C,
+ LLVMValueRef *ConstantVals,
+ unsigned Count, LLVMBool Packed) {
+ Constant **Elements = unwrap<Constant>(ConstantVals, Count);
+ return wrap(ConstantStruct::getAnon(*unwrap(C), makeArrayRef(Elements, Count),
+ Packed != 0));
+}
+
+LLVMValueRef LLVMConstString(const char *Str, unsigned Length,
+ LLVMBool DontNullTerminate) {
+ return LLVMConstStringInContext(LLVMGetGlobalContext(), Str, Length,
+ DontNullTerminate);
+}
+
+LLVMValueRef LLVMGetElementAsConstant(LLVMValueRef c, unsigned idx) {
+ return wrap(static_cast<ConstantDataSequential*>(unwrap(c))->getElementAsConstant(idx));
+}
+
+LLVMBool LLVMIsConstantString(LLVMValueRef c) {
+ return static_cast<ConstantDataSequential*>(unwrap(c))->isString();
+}
+
+const char *LLVMGetAsString(LLVMValueRef c, size_t* Length) {
+ StringRef str = static_cast<ConstantDataSequential*>(unwrap(c))->getAsString();
+ *Length = str.size();
+ return str.data();
+}
+
+LLVMValueRef LLVMConstArray(LLVMTypeRef ElementTy,
+ LLVMValueRef *ConstantVals, unsigned Length) {
+ ArrayRef<Constant*> V(unwrap<Constant>(ConstantVals, Length), Length);
+ return wrap(ConstantArray::get(ArrayType::get(unwrap(ElementTy), Length), V));
+}
+
+LLVMValueRef LLVMConstStruct(LLVMValueRef *ConstantVals, unsigned Count,
+ LLVMBool Packed) {
+ return LLVMConstStructInContext(LLVMGetGlobalContext(), ConstantVals, Count,
+ Packed);
+}
+
+LLVMValueRef LLVMConstNamedStruct(LLVMTypeRef StructTy,
+ LLVMValueRef *ConstantVals,
+ unsigned Count) {
+ Constant **Elements = unwrap<Constant>(ConstantVals, Count);
+ StructType *Ty = cast<StructType>(unwrap(StructTy));
+
+ return wrap(ConstantStruct::get(Ty, makeArrayRef(Elements, Count)));
+}
+
+LLVMValueRef LLVMConstVector(LLVMValueRef *ScalarConstantVals, unsigned Size) {
+ return wrap(ConstantVector::get(makeArrayRef(
+ unwrap<Constant>(ScalarConstantVals, Size), Size)));
+}
+
+/*-- Opcode mapping */
+
+static LLVMOpcode map_to_llvmopcode(int opcode)
+{
+ switch (opcode) {
+ default: llvm_unreachable("Unhandled Opcode.");
+#define HANDLE_INST(num, opc, clas) case num: return LLVM##opc;
+#include "llvm/IR/Instruction.def"
+#undef HANDLE_INST
+ }
+}
+
+static int map_from_llvmopcode(LLVMOpcode code)
+{
+ switch (code) {
+#define HANDLE_INST(num, opc, clas) case LLVM##opc: return num;
+#include "llvm/IR/Instruction.def"
+#undef HANDLE_INST
+ }
+ llvm_unreachable("Unhandled Opcode.");
+}
+
+/*--.. Constant expressions ................................................--*/
+
+LLVMOpcode LLVMGetConstOpcode(LLVMValueRef ConstantVal) {
+ return map_to_llvmopcode(unwrap<ConstantExpr>(ConstantVal)->getOpcode());
+}
+
+LLVMValueRef LLVMAlignOf(LLVMTypeRef Ty) {
+ return wrap(ConstantExpr::getAlignOf(unwrap(Ty)));
+}
+
+LLVMValueRef LLVMSizeOf(LLVMTypeRef Ty) {
+ return wrap(ConstantExpr::getSizeOf(unwrap(Ty)));
+}
+
+LLVMValueRef LLVMConstNeg(LLVMValueRef ConstantVal) {
+ return wrap(ConstantExpr::getNeg(unwrap<Constant>(ConstantVal)));
+}
+
+LLVMValueRef LLVMConstNSWNeg(LLVMValueRef ConstantVal) {
+ return wrap(ConstantExpr::getNSWNeg(unwrap<Constant>(ConstantVal)));
+}
+
+LLVMValueRef LLVMConstNUWNeg(LLVMValueRef ConstantVal) {
+ return wrap(ConstantExpr::getNUWNeg(unwrap<Constant>(ConstantVal)));
+}
+
+
+LLVMValueRef LLVMConstFNeg(LLVMValueRef ConstantVal) {
+ return wrap(ConstantExpr::getFNeg(unwrap<Constant>(ConstantVal)));
+}
+
+LLVMValueRef LLVMConstNot(LLVMValueRef ConstantVal) {
+ return wrap(ConstantExpr::getNot(unwrap<Constant>(ConstantVal)));
+}
+
+LLVMValueRef LLVMConstAdd(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) {
+ return wrap(ConstantExpr::getAdd(unwrap<Constant>(LHSConstant),
+ unwrap<Constant>(RHSConstant)));
+}
+
+LLVMValueRef LLVMConstNSWAdd(LLVMValueRef LHSConstant,
+ LLVMValueRef RHSConstant) {
+ return wrap(ConstantExpr::getNSWAdd(unwrap<Constant>(LHSConstant),
+ unwrap<Constant>(RHSConstant)));
+}
+
+LLVMValueRef LLVMConstNUWAdd(LLVMValueRef LHSConstant,
+ LLVMValueRef RHSConstant) {
+ return wrap(ConstantExpr::getNUWAdd(unwrap<Constant>(LHSConstant),
+ unwrap<Constant>(RHSConstant)));
+}
+
+LLVMValueRef LLVMConstFAdd(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) {
+ return wrap(ConstantExpr::getFAdd(unwrap<Constant>(LHSConstant),
+ unwrap<Constant>(RHSConstant)));
+}
+
+LLVMValueRef LLVMConstSub(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) {
+ return wrap(ConstantExpr::getSub(unwrap<Constant>(LHSConstant),
+ unwrap<Constant>(RHSConstant)));
+}
+
+LLVMValueRef LLVMConstNSWSub(LLVMValueRef LHSConstant,
+ LLVMValueRef RHSConstant) {
+ return wrap(ConstantExpr::getNSWSub(unwrap<Constant>(LHSConstant),
+ unwrap<Constant>(RHSConstant)));
+}
+
+LLVMValueRef LLVMConstNUWSub(LLVMValueRef LHSConstant,
+ LLVMValueRef RHSConstant) {
+ return wrap(ConstantExpr::getNUWSub(unwrap<Constant>(LHSConstant),
+ unwrap<Constant>(RHSConstant)));
+}
+
+LLVMValueRef LLVMConstFSub(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) {
+ return wrap(ConstantExpr::getFSub(unwrap<Constant>(LHSConstant),
+ unwrap<Constant>(RHSConstant)));
+}
+
+LLVMValueRef LLVMConstMul(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) {
+ return wrap(ConstantExpr::getMul(unwrap<Constant>(LHSConstant),
+ unwrap<Constant>(RHSConstant)));
+}
+
+LLVMValueRef LLVMConstNSWMul(LLVMValueRef LHSConstant,
+ LLVMValueRef RHSConstant) {
+ return wrap(ConstantExpr::getNSWMul(unwrap<Constant>(LHSConstant),
+ unwrap<Constant>(RHSConstant)));
+}
+
+LLVMValueRef LLVMConstNUWMul(LLVMValueRef LHSConstant,
+ LLVMValueRef RHSConstant) {
+ return wrap(ConstantExpr::getNUWMul(unwrap<Constant>(LHSConstant),
+ unwrap<Constant>(RHSConstant)));
+}
+
+LLVMValueRef LLVMConstFMul(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) {
+ return wrap(ConstantExpr::getFMul(unwrap<Constant>(LHSConstant),
+ unwrap<Constant>(RHSConstant)));
+}
+
+LLVMValueRef LLVMConstUDiv(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) {
+ return wrap(ConstantExpr::getUDiv(unwrap<Constant>(LHSConstant),
+ unwrap<Constant>(RHSConstant)));
+}
+
+LLVMValueRef LLVMConstSDiv(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) {
+ return wrap(ConstantExpr::getSDiv(unwrap<Constant>(LHSConstant),
+ unwrap<Constant>(RHSConstant)));
+}
+
+LLVMValueRef LLVMConstExactSDiv(LLVMValueRef LHSConstant,
+ LLVMValueRef RHSConstant) {
+ return wrap(ConstantExpr::getExactSDiv(unwrap<Constant>(LHSConstant),
+ unwrap<Constant>(RHSConstant)));
+}
+
+LLVMValueRef LLVMConstFDiv(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) {
+ return wrap(ConstantExpr::getFDiv(unwrap<Constant>(LHSConstant),
+ unwrap<Constant>(RHSConstant)));
+}
+
+LLVMValueRef LLVMConstURem(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) {
+ return wrap(ConstantExpr::getURem(unwrap<Constant>(LHSConstant),
+ unwrap<Constant>(RHSConstant)));
+}
+
+LLVMValueRef LLVMConstSRem(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) {
+ return wrap(ConstantExpr::getSRem(unwrap<Constant>(LHSConstant),
+ unwrap<Constant>(RHSConstant)));
+}
+
+LLVMValueRef LLVMConstFRem(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) {
+ return wrap(ConstantExpr::getFRem(unwrap<Constant>(LHSConstant),
+ unwrap<Constant>(RHSConstant)));
+}
+
+LLVMValueRef LLVMConstAnd(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) {
+ return wrap(ConstantExpr::getAnd(unwrap<Constant>(LHSConstant),
+ unwrap<Constant>(RHSConstant)));
+}
+
+LLVMValueRef LLVMConstOr(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) {
+ return wrap(ConstantExpr::getOr(unwrap<Constant>(LHSConstant),
+ unwrap<Constant>(RHSConstant)));
+}
+
+LLVMValueRef LLVMConstXor(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) {
+ return wrap(ConstantExpr::getXor(unwrap<Constant>(LHSConstant),
+ unwrap<Constant>(RHSConstant)));
+}
+
+LLVMValueRef LLVMConstICmp(LLVMIntPredicate Predicate,
+ LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) {
+ return wrap(ConstantExpr::getICmp(Predicate,
+ unwrap<Constant>(LHSConstant),
+ unwrap<Constant>(RHSConstant)));
+}
+
+LLVMValueRef LLVMConstFCmp(LLVMRealPredicate Predicate,
+ LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) {
+ return wrap(ConstantExpr::getFCmp(Predicate,
+ unwrap<Constant>(LHSConstant),
+ unwrap<Constant>(RHSConstant)));
+}
+
+LLVMValueRef LLVMConstShl(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) {
+ return wrap(ConstantExpr::getShl(unwrap<Constant>(LHSConstant),
+ unwrap<Constant>(RHSConstant)));
+}
+
+LLVMValueRef LLVMConstLShr(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) {
+ return wrap(ConstantExpr::getLShr(unwrap<Constant>(LHSConstant),
+ unwrap<Constant>(RHSConstant)));
+}
+
+LLVMValueRef LLVMConstAShr(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) {
+ return wrap(ConstantExpr::getAShr(unwrap<Constant>(LHSConstant),
+ unwrap<Constant>(RHSConstant)));
+}
+
+LLVMValueRef LLVMConstGEP(LLVMValueRef ConstantVal,
+ LLVMValueRef *ConstantIndices, unsigned NumIndices) {
+ ArrayRef<Constant *> IdxList(unwrap<Constant>(ConstantIndices, NumIndices),
+ NumIndices);
+ return wrap(ConstantExpr::getGetElementPtr(
+ nullptr, unwrap<Constant>(ConstantVal), IdxList));
+}
+
+LLVMValueRef LLVMConstInBoundsGEP(LLVMValueRef ConstantVal,
+ LLVMValueRef *ConstantIndices,
+ unsigned NumIndices) {
+ Constant* Val = unwrap<Constant>(ConstantVal);
+ ArrayRef<Constant *> IdxList(unwrap<Constant>(ConstantIndices, NumIndices),
+ NumIndices);
+ return wrap(ConstantExpr::getInBoundsGetElementPtr(nullptr, Val, IdxList));
+}
+
+LLVMValueRef LLVMConstTrunc(LLVMValueRef ConstantVal, LLVMTypeRef ToType) {
+ return wrap(ConstantExpr::getTrunc(unwrap<Constant>(ConstantVal),
+ unwrap(ToType)));
+}
+
+LLVMValueRef LLVMConstSExt(LLVMValueRef ConstantVal, LLVMTypeRef ToType) {
+ return wrap(ConstantExpr::getSExt(unwrap<Constant>(ConstantVal),
+ unwrap(ToType)));
+}
+
+LLVMValueRef LLVMConstZExt(LLVMValueRef ConstantVal, LLVMTypeRef ToType) {
+ return wrap(ConstantExpr::getZExt(unwrap<Constant>(ConstantVal),
+ unwrap(ToType)));
+}
+
+LLVMValueRef LLVMConstFPTrunc(LLVMValueRef ConstantVal, LLVMTypeRef ToType) {
+ return wrap(ConstantExpr::getFPTrunc(unwrap<Constant>(ConstantVal),
+ unwrap(ToType)));
+}
+
+LLVMValueRef LLVMConstFPExt(LLVMValueRef ConstantVal, LLVMTypeRef ToType) {
+ return wrap(ConstantExpr::getFPExtend(unwrap<Constant>(ConstantVal),
+ unwrap(ToType)));
+}
+
+LLVMValueRef LLVMConstUIToFP(LLVMValueRef ConstantVal, LLVMTypeRef ToType) {
+ return wrap(ConstantExpr::getUIToFP(unwrap<Constant>(ConstantVal),
+ unwrap(ToType)));
+}
+
+LLVMValueRef LLVMConstSIToFP(LLVMValueRef ConstantVal, LLVMTypeRef ToType) {
+ return wrap(ConstantExpr::getSIToFP(unwrap<Constant>(ConstantVal),
+ unwrap(ToType)));
+}
+
+LLVMValueRef LLVMConstFPToUI(LLVMValueRef ConstantVal, LLVMTypeRef ToType) {
+ return wrap(ConstantExpr::getFPToUI(unwrap<Constant>(ConstantVal),
+ unwrap(ToType)));
+}
+
+LLVMValueRef LLVMConstFPToSI(LLVMValueRef ConstantVal, LLVMTypeRef ToType) {
+ return wrap(ConstantExpr::getFPToSI(unwrap<Constant>(ConstantVal),
+ unwrap(ToType)));
+}
+
+LLVMValueRef LLVMConstPtrToInt(LLVMValueRef ConstantVal, LLVMTypeRef ToType) {
+ return wrap(ConstantExpr::getPtrToInt(unwrap<Constant>(ConstantVal),
+ unwrap(ToType)));
+}
+
+LLVMValueRef LLVMConstIntToPtr(LLVMValueRef ConstantVal, LLVMTypeRef ToType) {
+ return wrap(ConstantExpr::getIntToPtr(unwrap<Constant>(ConstantVal),
+ unwrap(ToType)));
+}
+
+LLVMValueRef LLVMConstBitCast(LLVMValueRef ConstantVal, LLVMTypeRef ToType) {
+ return wrap(ConstantExpr::getBitCast(unwrap<Constant>(ConstantVal),
+ unwrap(ToType)));
+}
+
+LLVMValueRef LLVMConstAddrSpaceCast(LLVMValueRef ConstantVal,
+ LLVMTypeRef ToType) {
+ return wrap(ConstantExpr::getAddrSpaceCast(unwrap<Constant>(ConstantVal),
+ unwrap(ToType)));
+}
+
+LLVMValueRef LLVMConstZExtOrBitCast(LLVMValueRef ConstantVal,
+ LLVMTypeRef ToType) {
+ return wrap(ConstantExpr::getZExtOrBitCast(unwrap<Constant>(ConstantVal),
+ unwrap(ToType)));
+}
+
+LLVMValueRef LLVMConstSExtOrBitCast(LLVMValueRef ConstantVal,
+ LLVMTypeRef ToType) {
+ return wrap(ConstantExpr::getSExtOrBitCast(unwrap<Constant>(ConstantVal),
+ unwrap(ToType)));
+}
+
+LLVMValueRef LLVMConstTruncOrBitCast(LLVMValueRef ConstantVal,
+ LLVMTypeRef ToType) {
+ return wrap(ConstantExpr::getTruncOrBitCast(unwrap<Constant>(ConstantVal),
+ unwrap(ToType)));
+}
+
+LLVMValueRef LLVMConstPointerCast(LLVMValueRef ConstantVal,
+ LLVMTypeRef ToType) {
+ return wrap(ConstantExpr::getPointerCast(unwrap<Constant>(ConstantVal),
+ unwrap(ToType)));
+}
+
+LLVMValueRef LLVMConstIntCast(LLVMValueRef ConstantVal, LLVMTypeRef ToType,
+ LLVMBool isSigned) {
+ return wrap(ConstantExpr::getIntegerCast(unwrap<Constant>(ConstantVal),
+ unwrap(ToType), isSigned));
+}
+
+LLVMValueRef LLVMConstFPCast(LLVMValueRef ConstantVal, LLVMTypeRef ToType) {
+ return wrap(ConstantExpr::getFPCast(unwrap<Constant>(ConstantVal),
+ unwrap(ToType)));
+}
+
+LLVMValueRef LLVMConstSelect(LLVMValueRef ConstantCondition,
+ LLVMValueRef ConstantIfTrue,
+ LLVMValueRef ConstantIfFalse) {
+ return wrap(ConstantExpr::getSelect(unwrap<Constant>(ConstantCondition),
+ unwrap<Constant>(ConstantIfTrue),
+ unwrap<Constant>(ConstantIfFalse)));
+}
+
+LLVMValueRef LLVMConstExtractElement(LLVMValueRef VectorConstant,
+ LLVMValueRef IndexConstant) {
+ return wrap(ConstantExpr::getExtractElement(unwrap<Constant>(VectorConstant),
+ unwrap<Constant>(IndexConstant)));
+}
+
+LLVMValueRef LLVMConstInsertElement(LLVMValueRef VectorConstant,
+ LLVMValueRef ElementValueConstant,
+ LLVMValueRef IndexConstant) {
+ return wrap(ConstantExpr::getInsertElement(unwrap<Constant>(VectorConstant),
+ unwrap<Constant>(ElementValueConstant),
+ unwrap<Constant>(IndexConstant)));
+}
+
+LLVMValueRef LLVMConstShuffleVector(LLVMValueRef VectorAConstant,
+ LLVMValueRef VectorBConstant,
+ LLVMValueRef MaskConstant) {
+ return wrap(ConstantExpr::getShuffleVector(unwrap<Constant>(VectorAConstant),
+ unwrap<Constant>(VectorBConstant),
+ unwrap<Constant>(MaskConstant)));
+}
+
+LLVMValueRef LLVMConstExtractValue(LLVMValueRef AggConstant, unsigned *IdxList,
+ unsigned NumIdx) {
+ return wrap(ConstantExpr::getExtractValue(unwrap<Constant>(AggConstant),
+ makeArrayRef(IdxList, NumIdx)));
+}
+
+LLVMValueRef LLVMConstInsertValue(LLVMValueRef AggConstant,
+ LLVMValueRef ElementValueConstant,
+ unsigned *IdxList, unsigned NumIdx) {
+ return wrap(ConstantExpr::getInsertValue(unwrap<Constant>(AggConstant),
+ unwrap<Constant>(ElementValueConstant),
+ makeArrayRef(IdxList, NumIdx)));
+}
+
+LLVMValueRef LLVMConstInlineAsm(LLVMTypeRef Ty, const char *AsmString,
+ const char *Constraints,
+ LLVMBool HasSideEffects,
+ LLVMBool IsAlignStack) {
+ return wrap(InlineAsm::get(dyn_cast<FunctionType>(unwrap(Ty)), AsmString,
+ Constraints, HasSideEffects, IsAlignStack));
+}
+
+LLVMValueRef LLVMBlockAddress(LLVMValueRef F, LLVMBasicBlockRef BB) {
+ return wrap(BlockAddress::get(unwrap<Function>(F), unwrap(BB)));
+}
+
+/*--.. Operations on global variables, functions, and aliases (globals) ....--*/
+
+LLVMModuleRef LLVMGetGlobalParent(LLVMValueRef Global) {
+ return wrap(unwrap<GlobalValue>(Global)->getParent());
+}
+
+LLVMBool LLVMIsDeclaration(LLVMValueRef Global) {
+ return unwrap<GlobalValue>(Global)->isDeclaration();
+}
+
+LLVMLinkage LLVMGetLinkage(LLVMValueRef Global) {
+ switch (unwrap<GlobalValue>(Global)->getLinkage()) {
+ case GlobalValue::ExternalLinkage:
+ return LLVMExternalLinkage;
+ case GlobalValue::AvailableExternallyLinkage:
+ return LLVMAvailableExternallyLinkage;
+ case GlobalValue::LinkOnceAnyLinkage:
+ return LLVMLinkOnceAnyLinkage;
+ case GlobalValue::LinkOnceODRLinkage:
+ return LLVMLinkOnceODRLinkage;
+ case GlobalValue::WeakAnyLinkage:
+ return LLVMWeakAnyLinkage;
+ case GlobalValue::WeakODRLinkage:
+ return LLVMWeakODRLinkage;
+ case GlobalValue::AppendingLinkage:
+ return LLVMAppendingLinkage;
+ case GlobalValue::InternalLinkage:
+ return LLVMInternalLinkage;
+ case GlobalValue::PrivateLinkage:
+ return LLVMPrivateLinkage;
+ case GlobalValue::ExternalWeakLinkage:
+ return LLVMExternalWeakLinkage;
+ case GlobalValue::CommonLinkage:
+ return LLVMCommonLinkage;
+ }
+
+ llvm_unreachable("Invalid GlobalValue linkage!");
+}
+
+void LLVMSetLinkage(LLVMValueRef Global, LLVMLinkage Linkage) {
+ GlobalValue *GV = unwrap<GlobalValue>(Global);
+
+ switch (Linkage) {
+ case LLVMExternalLinkage:
+ GV->setLinkage(GlobalValue::ExternalLinkage);
+ break;
+ case LLVMAvailableExternallyLinkage:
+ GV->setLinkage(GlobalValue::AvailableExternallyLinkage);
+ break;
+ case LLVMLinkOnceAnyLinkage:
+ GV->setLinkage(GlobalValue::LinkOnceAnyLinkage);
+ break;
+ case LLVMLinkOnceODRLinkage:
+ GV->setLinkage(GlobalValue::LinkOnceODRLinkage);
+ break;
+ case LLVMLinkOnceODRAutoHideLinkage:
+ DEBUG(errs() << "LLVMSetLinkage(): LLVMLinkOnceODRAutoHideLinkage is no "
+ "longer supported.");
+ break;
+ case LLVMWeakAnyLinkage:
+ GV->setLinkage(GlobalValue::WeakAnyLinkage);
+ break;
+ case LLVMWeakODRLinkage:
+ GV->setLinkage(GlobalValue::WeakODRLinkage);
+ break;
+ case LLVMAppendingLinkage:
+ GV->setLinkage(GlobalValue::AppendingLinkage);
+ break;
+ case LLVMInternalLinkage:
+ GV->setLinkage(GlobalValue::InternalLinkage);
+ break;
+ case LLVMPrivateLinkage:
+ GV->setLinkage(GlobalValue::PrivateLinkage);
+ break;
+ case LLVMLinkerPrivateLinkage:
+ GV->setLinkage(GlobalValue::PrivateLinkage);
+ break;
+ case LLVMLinkerPrivateWeakLinkage:
+ GV->setLinkage(GlobalValue::PrivateLinkage);
+ break;
+ case LLVMDLLImportLinkage:
+ DEBUG(errs()
+ << "LLVMSetLinkage(): LLVMDLLImportLinkage is no longer supported.");
+ break;
+ case LLVMDLLExportLinkage:
+ DEBUG(errs()
+ << "LLVMSetLinkage(): LLVMDLLExportLinkage is no longer supported.");
+ break;
+ case LLVMExternalWeakLinkage:
+ GV->setLinkage(GlobalValue::ExternalWeakLinkage);
+ break;
+ case LLVMGhostLinkage:
+ DEBUG(errs()
+ << "LLVMSetLinkage(): LLVMGhostLinkage is no longer supported.");
+ break;
+ case LLVMCommonLinkage:
+ GV->setLinkage(GlobalValue::CommonLinkage);
+ break;
+ }
+}
+
+const char *LLVMGetSection(LLVMValueRef Global) {
+ return unwrap<GlobalValue>(Global)->getSection();
+}
+
+void LLVMSetSection(LLVMValueRef Global, const char *Section) {
+ unwrap<GlobalObject>(Global)->setSection(Section);
+}
+
+LLVMVisibility LLVMGetVisibility(LLVMValueRef Global) {
+ return static_cast<LLVMVisibility>(
+ unwrap<GlobalValue>(Global)->getVisibility());
+}
+
+void LLVMSetVisibility(LLVMValueRef Global, LLVMVisibility Viz) {
+ unwrap<GlobalValue>(Global)
+ ->setVisibility(static_cast<GlobalValue::VisibilityTypes>(Viz));
+}
+
+LLVMDLLStorageClass LLVMGetDLLStorageClass(LLVMValueRef Global) {
+ return static_cast<LLVMDLLStorageClass>(
+ unwrap<GlobalValue>(Global)->getDLLStorageClass());
+}
+
+void LLVMSetDLLStorageClass(LLVMValueRef Global, LLVMDLLStorageClass Class) {
+ unwrap<GlobalValue>(Global)->setDLLStorageClass(
+ static_cast<GlobalValue::DLLStorageClassTypes>(Class));
+}
+
+LLVMBool LLVMHasUnnamedAddr(LLVMValueRef Global) {
+ return unwrap<GlobalValue>(Global)->hasUnnamedAddr();
+}
+
+void LLVMSetUnnamedAddr(LLVMValueRef Global, LLVMBool HasUnnamedAddr) {
+ unwrap<GlobalValue>(Global)->setUnnamedAddr(HasUnnamedAddr);
+}
+
+/*--.. Operations on global variables, load and store instructions .........--*/
+
+unsigned LLVMGetAlignment(LLVMValueRef V) {
+ Value *P = unwrap<Value>(V);
+ if (GlobalValue *GV = dyn_cast<GlobalValue>(P))
+ return GV->getAlignment();
+ if (AllocaInst *AI = dyn_cast<AllocaInst>(P))
+ return AI->getAlignment();
+ if (LoadInst *LI = dyn_cast<LoadInst>(P))
+ return LI->getAlignment();
+ if (StoreInst *SI = dyn_cast<StoreInst>(P))
+ return SI->getAlignment();
+
+ llvm_unreachable(
+ "only GlobalValue, AllocaInst, LoadInst and StoreInst have alignment");
+}
+
+void LLVMSetAlignment(LLVMValueRef V, unsigned Bytes) {
+ Value *P = unwrap<Value>(V);
+ if (GlobalObject *GV = dyn_cast<GlobalObject>(P))
+ GV->setAlignment(Bytes);
+ else if (AllocaInst *AI = dyn_cast<AllocaInst>(P))
+ AI->setAlignment(Bytes);
+ else if (LoadInst *LI = dyn_cast<LoadInst>(P))
+ LI->setAlignment(Bytes);
+ else if (StoreInst *SI = dyn_cast<StoreInst>(P))
+ SI->setAlignment(Bytes);
+ else
+ llvm_unreachable(
+ "only GlobalValue, AllocaInst, LoadInst and StoreInst have alignment");
+}
+
+/*--.. Operations on global variables ......................................--*/
+
+LLVMValueRef LLVMAddGlobal(LLVMModuleRef M, LLVMTypeRef Ty, const char *Name) {
+ return wrap(new GlobalVariable(*unwrap(M), unwrap(Ty), false,
+ GlobalValue::ExternalLinkage, nullptr, Name));
+}
+
+LLVMValueRef LLVMAddGlobalInAddressSpace(LLVMModuleRef M, LLVMTypeRef Ty,
+ const char *Name,
+ unsigned AddressSpace) {
+ return wrap(new GlobalVariable(*unwrap(M), unwrap(Ty), false,
+ GlobalValue::ExternalLinkage, nullptr, Name,
+ nullptr, GlobalVariable::NotThreadLocal,
+ AddressSpace));
+}
+
+LLVMValueRef LLVMGetNamedGlobal(LLVMModuleRef M, const char *Name) {
+ return wrap(unwrap(M)->getNamedGlobal(Name));
+}
+
+LLVMValueRef LLVMGetFirstGlobal(LLVMModuleRef M) {
+ Module *Mod = unwrap(M);
+ Module::global_iterator I = Mod->global_begin();
+ if (I == Mod->global_end())
+ return nullptr;
+ return wrap(&*I);
+}
+
+LLVMValueRef LLVMGetLastGlobal(LLVMModuleRef M) {
+ Module *Mod = unwrap(M);
+ Module::global_iterator I = Mod->global_end();
+ if (I == Mod->global_begin())
+ return nullptr;
+ return wrap(&*--I);
+}
+
+LLVMValueRef LLVMGetNextGlobal(LLVMValueRef GlobalVar) {
+ GlobalVariable *GV = unwrap<GlobalVariable>(GlobalVar);
+ Module::global_iterator I(GV);
+ if (++I == GV->getParent()->global_end())
+ return nullptr;
+ return wrap(&*I);
+}
+
+LLVMValueRef LLVMGetPreviousGlobal(LLVMValueRef GlobalVar) {
+ GlobalVariable *GV = unwrap<GlobalVariable>(GlobalVar);
+ Module::global_iterator I(GV);
+ if (I == GV->getParent()->global_begin())
+ return nullptr;
+ return wrap(&*--I);
+}
+
+void LLVMDeleteGlobal(LLVMValueRef GlobalVar) {
+ unwrap<GlobalVariable>(GlobalVar)->eraseFromParent();
+}
+
+LLVMValueRef LLVMGetInitializer(LLVMValueRef GlobalVar) {
+ GlobalVariable* GV = unwrap<GlobalVariable>(GlobalVar);
+ if ( !GV->hasInitializer() )
+ return nullptr;
+ return wrap(GV->getInitializer());
+}
+
+void LLVMSetInitializer(LLVMValueRef GlobalVar, LLVMValueRef ConstantVal) {
+ unwrap<GlobalVariable>(GlobalVar)
+ ->setInitializer(unwrap<Constant>(ConstantVal));
+}
+
+LLVMBool LLVMIsThreadLocal(LLVMValueRef GlobalVar) {
+ return unwrap<GlobalVariable>(GlobalVar)->isThreadLocal();
+}
+
+void LLVMSetThreadLocal(LLVMValueRef GlobalVar, LLVMBool IsThreadLocal) {
+ unwrap<GlobalVariable>(GlobalVar)->setThreadLocal(IsThreadLocal != 0);
+}
+
+LLVMBool LLVMIsGlobalConstant(LLVMValueRef GlobalVar) {
+ return unwrap<GlobalVariable>(GlobalVar)->isConstant();
+}
+
+void LLVMSetGlobalConstant(LLVMValueRef GlobalVar, LLVMBool IsConstant) {
+ unwrap<GlobalVariable>(GlobalVar)->setConstant(IsConstant != 0);
+}
+
+LLVMThreadLocalMode LLVMGetThreadLocalMode(LLVMValueRef GlobalVar) {
+ switch (unwrap<GlobalVariable>(GlobalVar)->getThreadLocalMode()) {
+ case GlobalVariable::NotThreadLocal:
+ return LLVMNotThreadLocal;
+ case GlobalVariable::GeneralDynamicTLSModel:
+ return LLVMGeneralDynamicTLSModel;
+ case GlobalVariable::LocalDynamicTLSModel:
+ return LLVMLocalDynamicTLSModel;
+ case GlobalVariable::InitialExecTLSModel:
+ return LLVMInitialExecTLSModel;
+ case GlobalVariable::LocalExecTLSModel:
+ return LLVMLocalExecTLSModel;
+ }
+
+ llvm_unreachable("Invalid GlobalVariable thread local mode");
+}
+
+void LLVMSetThreadLocalMode(LLVMValueRef GlobalVar, LLVMThreadLocalMode Mode) {
+ GlobalVariable *GV = unwrap<GlobalVariable>(GlobalVar);
+
+ switch (Mode) {
+ case LLVMNotThreadLocal:
+ GV->setThreadLocalMode(GlobalVariable::NotThreadLocal);
+ break;
+ case LLVMGeneralDynamicTLSModel:
+ GV->setThreadLocalMode(GlobalVariable::GeneralDynamicTLSModel);
+ break;
+ case LLVMLocalDynamicTLSModel:
+ GV->setThreadLocalMode(GlobalVariable::LocalDynamicTLSModel);
+ break;
+ case LLVMInitialExecTLSModel:
+ GV->setThreadLocalMode(GlobalVariable::InitialExecTLSModel);
+ break;
+ case LLVMLocalExecTLSModel:
+ GV->setThreadLocalMode(GlobalVariable::LocalExecTLSModel);
+ break;
+ }
+}
+
+LLVMBool LLVMIsExternallyInitialized(LLVMValueRef GlobalVar) {
+ return unwrap<GlobalVariable>(GlobalVar)->isExternallyInitialized();
+}
+
+void LLVMSetExternallyInitialized(LLVMValueRef GlobalVar, LLVMBool IsExtInit) {
+ unwrap<GlobalVariable>(GlobalVar)->setExternallyInitialized(IsExtInit);
+}
+
+/*--.. Operations on aliases ......................................--*/
+
+LLVMValueRef LLVMAddAlias(LLVMModuleRef M, LLVMTypeRef Ty, LLVMValueRef Aliasee,
+ const char *Name) {
+ auto *PTy = cast<PointerType>(unwrap(Ty));
+ return wrap(GlobalAlias::create(PTy->getElementType(), PTy->getAddressSpace(),
+ GlobalValue::ExternalLinkage, Name,
+ unwrap<Constant>(Aliasee), unwrap(M)));
+}
+
+/*--.. Operations on functions .............................................--*/
+
+LLVMValueRef LLVMAddFunction(LLVMModuleRef M, const char *Name,
+ LLVMTypeRef FunctionTy) {
+ return wrap(Function::Create(unwrap<FunctionType>(FunctionTy),
+ GlobalValue::ExternalLinkage, Name, unwrap(M)));
+}
+
+LLVMValueRef LLVMGetNamedFunction(LLVMModuleRef M, const char *Name) {
+ return wrap(unwrap(M)->getFunction(Name));
+}
+
+LLVMValueRef LLVMGetFirstFunction(LLVMModuleRef M) {
+ Module *Mod = unwrap(M);
+ Module::iterator I = Mod->begin();
+ if (I == Mod->end())
+ return nullptr;
+ return wrap(&*I);
+}
+
+LLVMValueRef LLVMGetLastFunction(LLVMModuleRef M) {
+ Module *Mod = unwrap(M);
+ Module::iterator I = Mod->end();
+ if (I == Mod->begin())
+ return nullptr;
+ return wrap(&*--I);
+}
+
+LLVMValueRef LLVMGetNextFunction(LLVMValueRef Fn) {
+ Function *Func = unwrap<Function>(Fn);
+ Module::iterator I(Func);
+ if (++I == Func->getParent()->end())
+ return nullptr;
+ return wrap(&*I);
+}
+
+LLVMValueRef LLVMGetPreviousFunction(LLVMValueRef Fn) {
+ Function *Func = unwrap<Function>(Fn);
+ Module::iterator I(Func);
+ if (I == Func->getParent()->begin())
+ return nullptr;
+ return wrap(&*--I);
+}
+
+void LLVMDeleteFunction(LLVMValueRef Fn) {
+ unwrap<Function>(Fn)->eraseFromParent();
+}
+
+LLVMValueRef LLVMGetPersonalityFn(LLVMValueRef Fn) {
+ return wrap(unwrap<Function>(Fn)->getPersonalityFn());
+}
+
+void LLVMSetPersonalityFn(LLVMValueRef Fn, LLVMValueRef PersonalityFn) {
+ unwrap<Function>(Fn)->setPersonalityFn(unwrap<Constant>(PersonalityFn));
+}
+
+unsigned LLVMGetIntrinsicID(LLVMValueRef Fn) {
+ if (Function *F = dyn_cast<Function>(unwrap(Fn)))
+ return F->getIntrinsicID();
+ return 0;
+}
+
+unsigned LLVMGetFunctionCallConv(LLVMValueRef Fn) {
+ return unwrap<Function>(Fn)->getCallingConv();
+}
+
+void LLVMSetFunctionCallConv(LLVMValueRef Fn, unsigned CC) {
+ return unwrap<Function>(Fn)->setCallingConv(
+ static_cast<CallingConv::ID>(CC));
+}
+
+const char *LLVMGetGC(LLVMValueRef Fn) {
+ Function *F = unwrap<Function>(Fn);
+ return F->hasGC()? F->getGC().c_str() : nullptr;
+}
+
+void LLVMSetGC(LLVMValueRef Fn, const char *GC) {
+ Function *F = unwrap<Function>(Fn);
+ if (GC)
+ F->setGC(GC);
+ else
+ F->clearGC();
+}
+
+void LLVMAddFunctionAttr(LLVMValueRef Fn, LLVMAttribute PA) {
+ Function *Func = unwrap<Function>(Fn);
+ const AttributeSet PAL = Func->getAttributes();
+ AttrBuilder B(PA);
+ const AttributeSet PALnew =
+ PAL.addAttributes(Func->getContext(), AttributeSet::FunctionIndex,
+ AttributeSet::get(Func->getContext(),
+ AttributeSet::FunctionIndex, B));
+ Func->setAttributes(PALnew);
+}
+
+void LLVMAddTargetDependentFunctionAttr(LLVMValueRef Fn, const char *A,
+ const char *V) {
+ Function *Func = unwrap<Function>(Fn);
+ AttributeSet::AttrIndex Idx =
+ AttributeSet::AttrIndex(AttributeSet::FunctionIndex);
+ AttrBuilder B;
+
+ B.addAttribute(A, V);
+ AttributeSet Set = AttributeSet::get(Func->getContext(), Idx, B);
+ Func->addAttributes(Idx, Set);
+}
+
+void LLVMRemoveFunctionAttr(LLVMValueRef Fn, LLVMAttribute PA) {
+ Function *Func = unwrap<Function>(Fn);
+ const AttributeSet PAL = Func->getAttributes();
+ AttrBuilder B(PA);
+ const AttributeSet PALnew =
+ PAL.removeAttributes(Func->getContext(), AttributeSet::FunctionIndex,
+ AttributeSet::get(Func->getContext(),
+ AttributeSet::FunctionIndex, B));
+ Func->setAttributes(PALnew);
+}
+
+LLVMAttribute LLVMGetFunctionAttr(LLVMValueRef Fn) {
+ Function *Func = unwrap<Function>(Fn);
+ const AttributeSet PAL = Func->getAttributes();
+ return (LLVMAttribute)PAL.Raw(AttributeSet::FunctionIndex);
+}
+
+/*--.. Operations on parameters ............................................--*/
+
+unsigned LLVMCountParams(LLVMValueRef FnRef) {
+ // This function is strictly redundant to
+ // LLVMCountParamTypes(LLVMGetElementType(LLVMTypeOf(FnRef)))
+ return unwrap<Function>(FnRef)->arg_size();
+}
+
+void LLVMGetParams(LLVMValueRef FnRef, LLVMValueRef *ParamRefs) {
+ Function *Fn = unwrap<Function>(FnRef);
+ for (Function::arg_iterator I = Fn->arg_begin(),
+ E = Fn->arg_end(); I != E; I++)
+ *ParamRefs++ = wrap(&*I);
+}
+
+LLVMValueRef LLVMGetParam(LLVMValueRef FnRef, unsigned index) {
+ Function::arg_iterator AI = unwrap<Function>(FnRef)->arg_begin();
+ while (index --> 0)
+ AI++;
+ return wrap(&*AI);
+}
+
+LLVMValueRef LLVMGetParamParent(LLVMValueRef V) {
+ return wrap(unwrap<Argument>(V)->getParent());
+}
+
+LLVMValueRef LLVMGetFirstParam(LLVMValueRef Fn) {
+ Function *Func = unwrap<Function>(Fn);
+ Function::arg_iterator I = Func->arg_begin();
+ if (I == Func->arg_end())
+ return nullptr;
+ return wrap(&*I);
+}
+
+LLVMValueRef LLVMGetLastParam(LLVMValueRef Fn) {
+ Function *Func = unwrap<Function>(Fn);
+ Function::arg_iterator I = Func->arg_end();
+ if (I == Func->arg_begin())
+ return nullptr;
+ return wrap(&*--I);
+}
+
+LLVMValueRef LLVMGetNextParam(LLVMValueRef Arg) {
+ Argument *A = unwrap<Argument>(Arg);
+ Function::arg_iterator I(A);
+ if (++I == A->getParent()->arg_end())
+ return nullptr;
+ return wrap(&*I);
+}
+
+LLVMValueRef LLVMGetPreviousParam(LLVMValueRef Arg) {
+ Argument *A = unwrap<Argument>(Arg);
+ Function::arg_iterator I(A);
+ if (I == A->getParent()->arg_begin())
+ return nullptr;
+ return wrap(&*--I);
+}
+
+void LLVMAddAttribute(LLVMValueRef Arg, LLVMAttribute PA) {
+ Argument *A = unwrap<Argument>(Arg);
+ AttrBuilder B(PA);
+ A->addAttr(AttributeSet::get(A->getContext(), A->getArgNo() + 1, B));
+}
+
+void LLVMRemoveAttribute(LLVMValueRef Arg, LLVMAttribute PA) {
+ Argument *A = unwrap<Argument>(Arg);
+ AttrBuilder B(PA);
+ A->removeAttr(AttributeSet::get(A->getContext(), A->getArgNo() + 1, B));
+}
+
+LLVMAttribute LLVMGetAttribute(LLVMValueRef Arg) {
+ Argument *A = unwrap<Argument>(Arg);
+ return (LLVMAttribute)A->getParent()->getAttributes().
+ Raw(A->getArgNo()+1);
+}
+
+
+void LLVMSetParamAlignment(LLVMValueRef Arg, unsigned align) {
+ Argument *A = unwrap<Argument>(Arg);
+ AttrBuilder B;
+ B.addAlignmentAttr(align);
+ A->addAttr(AttributeSet::get(A->getContext(),A->getArgNo() + 1, B));
+}
+
+/*--.. Operations on basic blocks ..........................................--*/
+
+LLVMValueRef LLVMBasicBlockAsValue(LLVMBasicBlockRef BB) {
+ return wrap(static_cast<Value*>(unwrap(BB)));
+}
+
+LLVMBool LLVMValueIsBasicBlock(LLVMValueRef Val) {
+ return isa<BasicBlock>(unwrap(Val));
+}
+
+LLVMBasicBlockRef LLVMValueAsBasicBlock(LLVMValueRef Val) {
+ return wrap(unwrap<BasicBlock>(Val));
+}
+
+LLVMValueRef LLVMGetBasicBlockParent(LLVMBasicBlockRef BB) {
+ return wrap(unwrap(BB)->getParent());
+}
+
+LLVMValueRef LLVMGetBasicBlockTerminator(LLVMBasicBlockRef BB) {
+ return wrap(unwrap(BB)->getTerminator());
+}
+
+unsigned LLVMCountBasicBlocks(LLVMValueRef FnRef) {
+ return unwrap<Function>(FnRef)->size();
+}
+
+void LLVMGetBasicBlocks(LLVMValueRef FnRef, LLVMBasicBlockRef *BasicBlocksRefs){
+ Function *Fn = unwrap<Function>(FnRef);
+ for (Function::iterator I = Fn->begin(), E = Fn->end(); I != E; I++)
+ *BasicBlocksRefs++ = wrap(&*I);
+}
+
+LLVMBasicBlockRef LLVMGetEntryBasicBlock(LLVMValueRef Fn) {
+ return wrap(&unwrap<Function>(Fn)->getEntryBlock());
+}
+
+LLVMBasicBlockRef LLVMGetFirstBasicBlock(LLVMValueRef Fn) {
+ Function *Func = unwrap<Function>(Fn);
+ Function::iterator I = Func->begin();
+ if (I == Func->end())
+ return nullptr;
+ return wrap(&*I);
+}
+
+LLVMBasicBlockRef LLVMGetLastBasicBlock(LLVMValueRef Fn) {
+ Function *Func = unwrap<Function>(Fn);
+ Function::iterator I = Func->end();
+ if (I == Func->begin())
+ return nullptr;
+ return wrap(&*--I);
+}
+
+LLVMBasicBlockRef LLVMGetNextBasicBlock(LLVMBasicBlockRef BB) {
+ BasicBlock *Block = unwrap(BB);
+ Function::iterator I(Block);
+ if (++I == Block->getParent()->end())
+ return nullptr;
+ return wrap(&*I);
+}
+
+LLVMBasicBlockRef LLVMGetPreviousBasicBlock(LLVMBasicBlockRef BB) {
+ BasicBlock *Block = unwrap(BB);
+ Function::iterator I(Block);
+ if (I == Block->getParent()->begin())
+ return nullptr;
+ return wrap(&*--I);
+}
+
+LLVMBasicBlockRef LLVMAppendBasicBlockInContext(LLVMContextRef C,
+ LLVMValueRef FnRef,
+ const char *Name) {
+ return wrap(BasicBlock::Create(*unwrap(C), Name, unwrap<Function>(FnRef)));
+}
+
+LLVMBasicBlockRef LLVMAppendBasicBlock(LLVMValueRef FnRef, const char *Name) {
+ return LLVMAppendBasicBlockInContext(LLVMGetGlobalContext(), FnRef, Name);
+}
+
+LLVMBasicBlockRef LLVMInsertBasicBlockInContext(LLVMContextRef C,
+ LLVMBasicBlockRef BBRef,
+ const char *Name) {
+ BasicBlock *BB = unwrap(BBRef);
+ return wrap(BasicBlock::Create(*unwrap(C), Name, BB->getParent(), BB));
+}
+
+LLVMBasicBlockRef LLVMInsertBasicBlock(LLVMBasicBlockRef BBRef,
+ const char *Name) {
+ return LLVMInsertBasicBlockInContext(LLVMGetGlobalContext(), BBRef, Name);
+}
+
+void LLVMDeleteBasicBlock(LLVMBasicBlockRef BBRef) {
+ unwrap(BBRef)->eraseFromParent();
+}
+
+void LLVMRemoveBasicBlockFromParent(LLVMBasicBlockRef BBRef) {
+ unwrap(BBRef)->removeFromParent();
+}
+
+void LLVMMoveBasicBlockBefore(LLVMBasicBlockRef BB, LLVMBasicBlockRef MovePos) {
+ unwrap(BB)->moveBefore(unwrap(MovePos));
+}
+
+void LLVMMoveBasicBlockAfter(LLVMBasicBlockRef BB, LLVMBasicBlockRef MovePos) {
+ unwrap(BB)->moveAfter(unwrap(MovePos));
+}
+
+/*--.. Operations on instructions ..........................................--*/
+
+LLVMBasicBlockRef LLVMGetInstructionParent(LLVMValueRef Inst) {
+ return wrap(unwrap<Instruction>(Inst)->getParent());
+}
+
+LLVMValueRef LLVMGetFirstInstruction(LLVMBasicBlockRef BB) {
+ BasicBlock *Block = unwrap(BB);
+ BasicBlock::iterator I = Block->begin();
+ if (I == Block->end())
+ return nullptr;
+ return wrap(&*I);
+}
+
+LLVMValueRef LLVMGetLastInstruction(LLVMBasicBlockRef BB) {
+ BasicBlock *Block = unwrap(BB);
+ BasicBlock::iterator I = Block->end();
+ if (I == Block->begin())
+ return nullptr;
+ return wrap(&*--I);
+}
+
+LLVMValueRef LLVMGetNextInstruction(LLVMValueRef Inst) {
+ Instruction *Instr = unwrap<Instruction>(Inst);
+ BasicBlock::iterator I(Instr);
+ if (++I == Instr->getParent()->end())
+ return nullptr;
+ return wrap(&*I);
+}
+
+LLVMValueRef LLVMGetPreviousInstruction(LLVMValueRef Inst) {
+ Instruction *Instr = unwrap<Instruction>(Inst);
+ BasicBlock::iterator I(Instr);
+ if (I == Instr->getParent()->begin())
+ return nullptr;
+ return wrap(&*--I);
+}
+
+void LLVMInstructionEraseFromParent(LLVMValueRef Inst) {
+ unwrap<Instruction>(Inst)->eraseFromParent();
+}
+
+LLVMIntPredicate LLVMGetICmpPredicate(LLVMValueRef Inst) {
+ if (ICmpInst *I = dyn_cast<ICmpInst>(unwrap(Inst)))
+ return (LLVMIntPredicate)I->getPredicate();
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(unwrap(Inst)))
+ if (CE->getOpcode() == Instruction::ICmp)
+ return (LLVMIntPredicate)CE->getPredicate();
+ return (LLVMIntPredicate)0;
+}
+
+LLVMRealPredicate LLVMGetFCmpPredicate(LLVMValueRef Inst) {
+ if (FCmpInst *I = dyn_cast<FCmpInst>(unwrap(Inst)))
+ return (LLVMRealPredicate)I->getPredicate();
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(unwrap(Inst)))
+ if (CE->getOpcode() == Instruction::FCmp)
+ return (LLVMRealPredicate)CE->getPredicate();
+ return (LLVMRealPredicate)0;
+}
+
+LLVMOpcode LLVMGetInstructionOpcode(LLVMValueRef Inst) {
+ if (Instruction *C = dyn_cast<Instruction>(unwrap(Inst)))
+ return map_to_llvmopcode(C->getOpcode());
+ return (LLVMOpcode)0;
+}
+
+LLVMValueRef LLVMInstructionClone(LLVMValueRef Inst) {
+ if (Instruction *C = dyn_cast<Instruction>(unwrap(Inst)))
+ return wrap(C->clone());
+ return nullptr;
+}
+
+/*--.. Call and invoke instructions ........................................--*/
+
+unsigned LLVMGetInstructionCallConv(LLVMValueRef Instr) {
+ Value *V = unwrap(Instr);
+ if (CallInst *CI = dyn_cast<CallInst>(V))
+ return CI->getCallingConv();
+ if (InvokeInst *II = dyn_cast<InvokeInst>(V))
+ return II->getCallingConv();
+ llvm_unreachable("LLVMGetInstructionCallConv applies only to call and invoke!");
+}
+
+void LLVMSetInstructionCallConv(LLVMValueRef Instr, unsigned CC) {
+ Value *V = unwrap(Instr);
+ if (CallInst *CI = dyn_cast<CallInst>(V))
+ return CI->setCallingConv(static_cast<CallingConv::ID>(CC));
+ else if (InvokeInst *II = dyn_cast<InvokeInst>(V))
+ return II->setCallingConv(static_cast<CallingConv::ID>(CC));
+ llvm_unreachable("LLVMSetInstructionCallConv applies only to call and invoke!");
+}
+
+void LLVMAddInstrAttribute(LLVMValueRef Instr, unsigned index,
+ LLVMAttribute PA) {
+ CallSite Call = CallSite(unwrap<Instruction>(Instr));
+ AttrBuilder B(PA);
+ Call.setAttributes(
+ Call.getAttributes().addAttributes(Call->getContext(), index,
+ AttributeSet::get(Call->getContext(),
+ index, B)));
+}
+
+void LLVMRemoveInstrAttribute(LLVMValueRef Instr, unsigned index,
+ LLVMAttribute PA) {
+ CallSite Call = CallSite(unwrap<Instruction>(Instr));
+ AttrBuilder B(PA);
+ Call.setAttributes(Call.getAttributes()
+ .removeAttributes(Call->getContext(), index,
+ AttributeSet::get(Call->getContext(),
+ index, B)));
+}
+
+void LLVMSetInstrParamAlignment(LLVMValueRef Instr, unsigned index,
+ unsigned align) {
+ CallSite Call = CallSite(unwrap<Instruction>(Instr));
+ AttrBuilder B;
+ B.addAlignmentAttr(align);
+ Call.setAttributes(Call.getAttributes()
+ .addAttributes(Call->getContext(), index,
+ AttributeSet::get(Call->getContext(),
+ index, B)));
+}
+
+/*--.. Operations on call instructions (only) ..............................--*/
+
+LLVMBool LLVMIsTailCall(LLVMValueRef Call) {
+ return unwrap<CallInst>(Call)->isTailCall();
+}
+
+void LLVMSetTailCall(LLVMValueRef Call, LLVMBool isTailCall) {
+ unwrap<CallInst>(Call)->setTailCall(isTailCall);
+}
+
+/*--.. Operations on terminators ...........................................--*/
+
+unsigned LLVMGetNumSuccessors(LLVMValueRef Term) {
+ return unwrap<TerminatorInst>(Term)->getNumSuccessors();
+}
+
+LLVMBasicBlockRef LLVMGetSuccessor(LLVMValueRef Term, unsigned i) {
+ return wrap(unwrap<TerminatorInst>(Term)->getSuccessor(i));
+}
+
+void LLVMSetSuccessor(LLVMValueRef Term, unsigned i, LLVMBasicBlockRef block) {
+ return unwrap<TerminatorInst>(Term)->setSuccessor(i,unwrap(block));
+}
+
+/*--.. Operations on branch instructions (only) ............................--*/
+
+LLVMBool LLVMIsConditional(LLVMValueRef Branch) {
+ return unwrap<BranchInst>(Branch)->isConditional();
+}
+
+LLVMValueRef LLVMGetCondition(LLVMValueRef Branch) {
+ return wrap(unwrap<BranchInst>(Branch)->getCondition());
+}
+
+void LLVMSetCondition(LLVMValueRef Branch, LLVMValueRef Cond) {
+ return unwrap<BranchInst>(Branch)->setCondition(unwrap(Cond));
+}
+
+/*--.. Operations on switch instructions (only) ............................--*/
+
+LLVMBasicBlockRef LLVMGetSwitchDefaultDest(LLVMValueRef Switch) {
+ return wrap(unwrap<SwitchInst>(Switch)->getDefaultDest());
+}
+
+/*--.. Operations on phi nodes .............................................--*/
+
+void LLVMAddIncoming(LLVMValueRef PhiNode, LLVMValueRef *IncomingValues,
+ LLVMBasicBlockRef *IncomingBlocks, unsigned Count) {
+ PHINode *PhiVal = unwrap<PHINode>(PhiNode);
+ for (unsigned I = 0; I != Count; ++I)
+ PhiVal->addIncoming(unwrap(IncomingValues[I]), unwrap(IncomingBlocks[I]));
+}
+
+unsigned LLVMCountIncoming(LLVMValueRef PhiNode) {
+ return unwrap<PHINode>(PhiNode)->getNumIncomingValues();
+}
+
+LLVMValueRef LLVMGetIncomingValue(LLVMValueRef PhiNode, unsigned Index) {
+ return wrap(unwrap<PHINode>(PhiNode)->getIncomingValue(Index));
+}
+
+LLVMBasicBlockRef LLVMGetIncomingBlock(LLVMValueRef PhiNode, unsigned Index) {
+ return wrap(unwrap<PHINode>(PhiNode)->getIncomingBlock(Index));
+}
+
+
+/*===-- Instruction builders ----------------------------------------------===*/
+
+LLVMBuilderRef LLVMCreateBuilderInContext(LLVMContextRef C) {
+ return wrap(new IRBuilder<>(*unwrap(C)));
+}
+
+LLVMBuilderRef LLVMCreateBuilder(void) {
+ return LLVMCreateBuilderInContext(LLVMGetGlobalContext());
+}
+
+void LLVMPositionBuilder(LLVMBuilderRef Builder, LLVMBasicBlockRef Block,
+ LLVMValueRef Instr) {
+ BasicBlock *BB = unwrap(Block);
+ Instruction *I = Instr? unwrap<Instruction>(Instr) : (Instruction*) BB->end();
+ unwrap(Builder)->SetInsertPoint(BB, I->getIterator());
+}
+
+void LLVMPositionBuilderBefore(LLVMBuilderRef Builder, LLVMValueRef Instr) {
+ Instruction *I = unwrap<Instruction>(Instr);
+ unwrap(Builder)->SetInsertPoint(I->getParent(), I->getIterator());
+}
+
+void LLVMPositionBuilderAtEnd(LLVMBuilderRef Builder, LLVMBasicBlockRef Block) {
+ BasicBlock *BB = unwrap(Block);
+ unwrap(Builder)->SetInsertPoint(BB);
+}
+
+LLVMBasicBlockRef LLVMGetInsertBlock(LLVMBuilderRef Builder) {
+ return wrap(unwrap(Builder)->GetInsertBlock());
+}
+
+void LLVMClearInsertionPosition(LLVMBuilderRef Builder) {
+ unwrap(Builder)->ClearInsertionPoint();
+}
+
+void LLVMInsertIntoBuilder(LLVMBuilderRef Builder, LLVMValueRef Instr) {
+ unwrap(Builder)->Insert(unwrap<Instruction>(Instr));
+}
+
+void LLVMInsertIntoBuilderWithName(LLVMBuilderRef Builder, LLVMValueRef Instr,
+ const char *Name) {
+ unwrap(Builder)->Insert(unwrap<Instruction>(Instr), Name);
+}
+
+void LLVMDisposeBuilder(LLVMBuilderRef Builder) {
+ delete unwrap(Builder);
+}
+
+/*--.. Metadata builders ...................................................--*/
+
+void LLVMSetCurrentDebugLocation(LLVMBuilderRef Builder, LLVMValueRef L) {
+ MDNode *Loc =
+ L ? cast<MDNode>(unwrap<MetadataAsValue>(L)->getMetadata()) : nullptr;
+ unwrap(Builder)->SetCurrentDebugLocation(DebugLoc(Loc));
+}
+
+LLVMValueRef LLVMGetCurrentDebugLocation(LLVMBuilderRef Builder) {
+ LLVMContext &Context = unwrap(Builder)->getContext();
+ return wrap(MetadataAsValue::get(
+ Context, unwrap(Builder)->getCurrentDebugLocation().getAsMDNode()));
+}
+
+void LLVMSetInstDebugLocation(LLVMBuilderRef Builder, LLVMValueRef Inst) {
+ unwrap(Builder)->SetInstDebugLocation(unwrap<Instruction>(Inst));
+}
+
+
+/*--.. Instruction builders ................................................--*/
+
+LLVMValueRef LLVMBuildRetVoid(LLVMBuilderRef B) {
+ return wrap(unwrap(B)->CreateRetVoid());
+}
+
+LLVMValueRef LLVMBuildRet(LLVMBuilderRef B, LLVMValueRef V) {
+ return wrap(unwrap(B)->CreateRet(unwrap(V)));
+}
+
+LLVMValueRef LLVMBuildAggregateRet(LLVMBuilderRef B, LLVMValueRef *RetVals,
+ unsigned N) {
+ return wrap(unwrap(B)->CreateAggregateRet(unwrap(RetVals), N));
+}
+
+LLVMValueRef LLVMBuildBr(LLVMBuilderRef B, LLVMBasicBlockRef Dest) {
+ return wrap(unwrap(B)->CreateBr(unwrap(Dest)));
+}
+
+LLVMValueRef LLVMBuildCondBr(LLVMBuilderRef B, LLVMValueRef If,
+ LLVMBasicBlockRef Then, LLVMBasicBlockRef Else) {
+ return wrap(unwrap(B)->CreateCondBr(unwrap(If), unwrap(Then), unwrap(Else)));
+}
+
+LLVMValueRef LLVMBuildSwitch(LLVMBuilderRef B, LLVMValueRef V,
+ LLVMBasicBlockRef Else, unsigned NumCases) {
+ return wrap(unwrap(B)->CreateSwitch(unwrap(V), unwrap(Else), NumCases));
+}
+
+LLVMValueRef LLVMBuildIndirectBr(LLVMBuilderRef B, LLVMValueRef Addr,
+ unsigned NumDests) {
+ return wrap(unwrap(B)->CreateIndirectBr(unwrap(Addr), NumDests));
+}
+
+LLVMValueRef LLVMBuildInvoke(LLVMBuilderRef B, LLVMValueRef Fn,
+ LLVMValueRef *Args, unsigned NumArgs,
+ LLVMBasicBlockRef Then, LLVMBasicBlockRef Catch,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateInvoke(unwrap(Fn), unwrap(Then), unwrap(Catch),
+ makeArrayRef(unwrap(Args), NumArgs),
+ Name));
+}
+
+LLVMValueRef LLVMBuildLandingPad(LLVMBuilderRef B, LLVMTypeRef Ty,
+ LLVMValueRef PersFn, unsigned NumClauses,
+ const char *Name) {
+ // The personality used to live on the landingpad instruction, but now it
+ // lives on the parent function. For compatibility, take the provided
+ // personality and put it on the parent function.
+ if (PersFn)
+ unwrap(B)->GetInsertBlock()->getParent()->setPersonalityFn(
+ cast<Function>(unwrap(PersFn)));
+ return wrap(unwrap(B)->CreateLandingPad(unwrap(Ty), NumClauses, Name));
+}
+
+LLVMValueRef LLVMBuildResume(LLVMBuilderRef B, LLVMValueRef Exn) {
+ return wrap(unwrap(B)->CreateResume(unwrap(Exn)));
+}
+
+LLVMValueRef LLVMBuildUnreachable(LLVMBuilderRef B) {
+ return wrap(unwrap(B)->CreateUnreachable());
+}
+
+void LLVMAddCase(LLVMValueRef Switch, LLVMValueRef OnVal,
+ LLVMBasicBlockRef Dest) {
+ unwrap<SwitchInst>(Switch)->addCase(unwrap<ConstantInt>(OnVal), unwrap(Dest));
+}
+
+void LLVMAddDestination(LLVMValueRef IndirectBr, LLVMBasicBlockRef Dest) {
+ unwrap<IndirectBrInst>(IndirectBr)->addDestination(unwrap(Dest));
+}
+
+void LLVMAddClause(LLVMValueRef LandingPad, LLVMValueRef ClauseVal) {
+ unwrap<LandingPadInst>(LandingPad)->
+ addClause(cast<Constant>(unwrap(ClauseVal)));
+}
+
+void LLVMSetCleanup(LLVMValueRef LandingPad, LLVMBool Val) {
+ unwrap<LandingPadInst>(LandingPad)->setCleanup(Val);
+}
+
+/*--.. Arithmetic ..........................................................--*/
+
+LLVMValueRef LLVMBuildAdd(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateAdd(unwrap(LHS), unwrap(RHS), Name));
+}
+
+LLVMValueRef LLVMBuildNSWAdd(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateNSWAdd(unwrap(LHS), unwrap(RHS), Name));
+}
+
+LLVMValueRef LLVMBuildNUWAdd(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateNUWAdd(unwrap(LHS), unwrap(RHS), Name));
+}
+
+LLVMValueRef LLVMBuildFAdd(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateFAdd(unwrap(LHS), unwrap(RHS), Name));
+}
+
+LLVMValueRef LLVMBuildSub(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateSub(unwrap(LHS), unwrap(RHS), Name));
+}
+
+LLVMValueRef LLVMBuildNSWSub(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateNSWSub(unwrap(LHS), unwrap(RHS), Name));
+}
+
+LLVMValueRef LLVMBuildNUWSub(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateNUWSub(unwrap(LHS), unwrap(RHS), Name));
+}
+
+LLVMValueRef LLVMBuildFSub(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateFSub(unwrap(LHS), unwrap(RHS), Name));
+}
+
+LLVMValueRef LLVMBuildMul(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateMul(unwrap(LHS), unwrap(RHS), Name));
+}
+
+LLVMValueRef LLVMBuildNSWMul(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateNSWMul(unwrap(LHS), unwrap(RHS), Name));
+}
+
+LLVMValueRef LLVMBuildNUWMul(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateNUWMul(unwrap(LHS), unwrap(RHS), Name));
+}
+
+LLVMValueRef LLVMBuildFMul(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateFMul(unwrap(LHS), unwrap(RHS), Name));
+}
+
+LLVMValueRef LLVMBuildUDiv(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateUDiv(unwrap(LHS), unwrap(RHS), Name));
+}
+
+LLVMValueRef LLVMBuildSDiv(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateSDiv(unwrap(LHS), unwrap(RHS), Name));
+}
+
+LLVMValueRef LLVMBuildExactSDiv(LLVMBuilderRef B, LLVMValueRef LHS,
+ LLVMValueRef RHS, const char *Name) {
+ return wrap(unwrap(B)->CreateExactSDiv(unwrap(LHS), unwrap(RHS), Name));
+}
+
+LLVMValueRef LLVMBuildFDiv(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateFDiv(unwrap(LHS), unwrap(RHS), Name));
+}
+
+LLVMValueRef LLVMBuildURem(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateURem(unwrap(LHS), unwrap(RHS), Name));
+}
+
+LLVMValueRef LLVMBuildSRem(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateSRem(unwrap(LHS), unwrap(RHS), Name));
+}
+
+LLVMValueRef LLVMBuildFRem(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateFRem(unwrap(LHS), unwrap(RHS), Name));
+}
+
+LLVMValueRef LLVMBuildShl(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateShl(unwrap(LHS), unwrap(RHS), Name));
+}
+
+LLVMValueRef LLVMBuildLShr(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateLShr(unwrap(LHS), unwrap(RHS), Name));
+}
+
+LLVMValueRef LLVMBuildAShr(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateAShr(unwrap(LHS), unwrap(RHS), Name));
+}
+
+LLVMValueRef LLVMBuildAnd(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateAnd(unwrap(LHS), unwrap(RHS), Name));
+}
+
+LLVMValueRef LLVMBuildOr(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateOr(unwrap(LHS), unwrap(RHS), Name));
+}
+
+LLVMValueRef LLVMBuildXor(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateXor(unwrap(LHS), unwrap(RHS), Name));
+}
+
+LLVMValueRef LLVMBuildBinOp(LLVMBuilderRef B, LLVMOpcode Op,
+ LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateBinOp(Instruction::BinaryOps(map_from_llvmopcode(Op)), unwrap(LHS),
+ unwrap(RHS), Name));
+}
+
+LLVMValueRef LLVMBuildNeg(LLVMBuilderRef B, LLVMValueRef V, const char *Name) {
+ return wrap(unwrap(B)->CreateNeg(unwrap(V), Name));
+}
+
+LLVMValueRef LLVMBuildNSWNeg(LLVMBuilderRef B, LLVMValueRef V,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateNSWNeg(unwrap(V), Name));
+}
+
+LLVMValueRef LLVMBuildNUWNeg(LLVMBuilderRef B, LLVMValueRef V,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateNUWNeg(unwrap(V), Name));
+}
+
+LLVMValueRef LLVMBuildFNeg(LLVMBuilderRef B, LLVMValueRef V, const char *Name) {
+ return wrap(unwrap(B)->CreateFNeg(unwrap(V), Name));
+}
+
+LLVMValueRef LLVMBuildNot(LLVMBuilderRef B, LLVMValueRef V, const char *Name) {
+ return wrap(unwrap(B)->CreateNot(unwrap(V), Name));
+}
+
+/*--.. Memory ..............................................................--*/
+
+LLVMValueRef LLVMBuildMalloc(LLVMBuilderRef B, LLVMTypeRef Ty,
+ const char *Name) {
+ Type* ITy = Type::getInt32Ty(unwrap(B)->GetInsertBlock()->getContext());
+ Constant* AllocSize = ConstantExpr::getSizeOf(unwrap(Ty));
+ AllocSize = ConstantExpr::getTruncOrBitCast(AllocSize, ITy);
+ Instruction* Malloc = CallInst::CreateMalloc(unwrap(B)->GetInsertBlock(),
+ ITy, unwrap(Ty), AllocSize,
+ nullptr, nullptr, "");
+ return wrap(unwrap(B)->Insert(Malloc, Twine(Name)));
+}
+
+LLVMValueRef LLVMBuildArrayMalloc(LLVMBuilderRef B, LLVMTypeRef Ty,
+ LLVMValueRef Val, const char *Name) {
+ Type* ITy = Type::getInt32Ty(unwrap(B)->GetInsertBlock()->getContext());
+ Constant* AllocSize = ConstantExpr::getSizeOf(unwrap(Ty));
+ AllocSize = ConstantExpr::getTruncOrBitCast(AllocSize, ITy);
+ Instruction* Malloc = CallInst::CreateMalloc(unwrap(B)->GetInsertBlock(),
+ ITy, unwrap(Ty), AllocSize,
+ unwrap(Val), nullptr, "");
+ return wrap(unwrap(B)->Insert(Malloc, Twine(Name)));
+}
+
+LLVMValueRef LLVMBuildAlloca(LLVMBuilderRef B, LLVMTypeRef Ty,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateAlloca(unwrap(Ty), nullptr, Name));
+}
+
+LLVMValueRef LLVMBuildArrayAlloca(LLVMBuilderRef B, LLVMTypeRef Ty,
+ LLVMValueRef Val, const char *Name) {
+ return wrap(unwrap(B)->CreateAlloca(unwrap(Ty), unwrap(Val), Name));
+}
+
+LLVMValueRef LLVMBuildFree(LLVMBuilderRef B, LLVMValueRef PointerVal) {
+ return wrap(unwrap(B)->Insert(
+ CallInst::CreateFree(unwrap(PointerVal), unwrap(B)->GetInsertBlock())));
+}
+
+LLVMValueRef LLVMBuildLoad(LLVMBuilderRef B, LLVMValueRef PointerVal,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateLoad(unwrap(PointerVal), Name));
+}
+
+LLVMValueRef LLVMBuildStore(LLVMBuilderRef B, LLVMValueRef Val,
+ LLVMValueRef PointerVal) {
+ return wrap(unwrap(B)->CreateStore(unwrap(Val), unwrap(PointerVal)));
+}
+
+static AtomicOrdering mapFromLLVMOrdering(LLVMAtomicOrdering Ordering) {
+ switch (Ordering) {
+ case LLVMAtomicOrderingNotAtomic: return NotAtomic;
+ case LLVMAtomicOrderingUnordered: return Unordered;
+ case LLVMAtomicOrderingMonotonic: return Monotonic;
+ case LLVMAtomicOrderingAcquire: return Acquire;
+ case LLVMAtomicOrderingRelease: return Release;
+ case LLVMAtomicOrderingAcquireRelease: return AcquireRelease;
+ case LLVMAtomicOrderingSequentiallyConsistent:
+ return SequentiallyConsistent;
+ }
+
+ llvm_unreachable("Invalid LLVMAtomicOrdering value!");
+}
+
+static LLVMAtomicOrdering mapToLLVMOrdering(AtomicOrdering Ordering) {
+ switch (Ordering) {
+ case NotAtomic: return LLVMAtomicOrderingNotAtomic;
+ case Unordered: return LLVMAtomicOrderingUnordered;
+ case Monotonic: return LLVMAtomicOrderingMonotonic;
+ case Acquire: return LLVMAtomicOrderingAcquire;
+ case Release: return LLVMAtomicOrderingRelease;
+ case AcquireRelease: return LLVMAtomicOrderingAcquireRelease;
+ case SequentiallyConsistent:
+ return LLVMAtomicOrderingSequentiallyConsistent;
+ }
+
+ llvm_unreachable("Invalid AtomicOrdering value!");
+}
+
+LLVMValueRef LLVMBuildFence(LLVMBuilderRef B, LLVMAtomicOrdering Ordering,
+ LLVMBool isSingleThread, const char *Name) {
+ return wrap(
+ unwrap(B)->CreateFence(mapFromLLVMOrdering(Ordering),
+ isSingleThread ? SingleThread : CrossThread,
+ Name));
+}
+
+LLVMValueRef LLVMBuildGEP(LLVMBuilderRef B, LLVMValueRef Pointer,
+ LLVMValueRef *Indices, unsigned NumIndices,
+ const char *Name) {
+ ArrayRef<Value *> IdxList(unwrap(Indices), NumIndices);
+ return wrap(unwrap(B)->CreateGEP(nullptr, unwrap(Pointer), IdxList, Name));
+}
+
+LLVMValueRef LLVMBuildInBoundsGEP(LLVMBuilderRef B, LLVMValueRef Pointer,
+ LLVMValueRef *Indices, unsigned NumIndices,
+ const char *Name) {
+ ArrayRef<Value *> IdxList(unwrap(Indices), NumIndices);
+ return wrap(
+ unwrap(B)->CreateInBoundsGEP(nullptr, unwrap(Pointer), IdxList, Name));
+}
+
+LLVMValueRef LLVMBuildStructGEP(LLVMBuilderRef B, LLVMValueRef Pointer,
+ unsigned Idx, const char *Name) {
+ return wrap(unwrap(B)->CreateStructGEP(nullptr, unwrap(Pointer), Idx, Name));
+}
+
+LLVMValueRef LLVMBuildGlobalString(LLVMBuilderRef B, const char *Str,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateGlobalString(Str, Name));
+}
+
+LLVMValueRef LLVMBuildGlobalStringPtr(LLVMBuilderRef B, const char *Str,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateGlobalStringPtr(Str, Name));
+}
+
+LLVMBool LLVMGetVolatile(LLVMValueRef MemAccessInst) {
+ Value *P = unwrap<Value>(MemAccessInst);
+ if (LoadInst *LI = dyn_cast<LoadInst>(P))
+ return LI->isVolatile();
+ return cast<StoreInst>(P)->isVolatile();
+}
+
+void LLVMSetVolatile(LLVMValueRef MemAccessInst, LLVMBool isVolatile) {
+ Value *P = unwrap<Value>(MemAccessInst);
+ if (LoadInst *LI = dyn_cast<LoadInst>(P))
+ return LI->setVolatile(isVolatile);
+ return cast<StoreInst>(P)->setVolatile(isVolatile);
+}
+
+LLVMAtomicOrdering LLVMGetOrdering(LLVMValueRef MemAccessInst) {
+ Value *P = unwrap<Value>(MemAccessInst);
+ AtomicOrdering O;
+ if (LoadInst *LI = dyn_cast<LoadInst>(P))
+ O = LI->getOrdering();
+ else
+ O = cast<StoreInst>(P)->getOrdering();
+ return mapToLLVMOrdering(O);
+}
+
+void LLVMSetOrdering(LLVMValueRef MemAccessInst, LLVMAtomicOrdering Ordering) {
+ Value *P = unwrap<Value>(MemAccessInst);
+ AtomicOrdering O = mapFromLLVMOrdering(Ordering);
+
+ if (LoadInst *LI = dyn_cast<LoadInst>(P))
+ return LI->setOrdering(O);
+ return cast<StoreInst>(P)->setOrdering(O);
+}
+
+/*--.. Casts ...............................................................--*/
+
+LLVMValueRef LLVMBuildTrunc(LLVMBuilderRef B, LLVMValueRef Val,
+ LLVMTypeRef DestTy, const char *Name) {
+ return wrap(unwrap(B)->CreateTrunc(unwrap(Val), unwrap(DestTy), Name));
+}
+
+LLVMValueRef LLVMBuildZExt(LLVMBuilderRef B, LLVMValueRef Val,
+ LLVMTypeRef DestTy, const char *Name) {
+ return wrap(unwrap(B)->CreateZExt(unwrap(Val), unwrap(DestTy), Name));
+}
+
+LLVMValueRef LLVMBuildSExt(LLVMBuilderRef B, LLVMValueRef Val,
+ LLVMTypeRef DestTy, const char *Name) {
+ return wrap(unwrap(B)->CreateSExt(unwrap(Val), unwrap(DestTy), Name));
+}
+
+LLVMValueRef LLVMBuildFPToUI(LLVMBuilderRef B, LLVMValueRef Val,
+ LLVMTypeRef DestTy, const char *Name) {
+ return wrap(unwrap(B)->CreateFPToUI(unwrap(Val), unwrap(DestTy), Name));
+}
+
+LLVMValueRef LLVMBuildFPToSI(LLVMBuilderRef B, LLVMValueRef Val,
+ LLVMTypeRef DestTy, const char *Name) {
+ return wrap(unwrap(B)->CreateFPToSI(unwrap(Val), unwrap(DestTy), Name));
+}
+
+LLVMValueRef LLVMBuildUIToFP(LLVMBuilderRef B, LLVMValueRef Val,
+ LLVMTypeRef DestTy, const char *Name) {
+ return wrap(unwrap(B)->CreateUIToFP(unwrap(Val), unwrap(DestTy), Name));
+}
+
+LLVMValueRef LLVMBuildSIToFP(LLVMBuilderRef B, LLVMValueRef Val,
+ LLVMTypeRef DestTy, const char *Name) {
+ return wrap(unwrap(B)->CreateSIToFP(unwrap(Val), unwrap(DestTy), Name));
+}
+
+LLVMValueRef LLVMBuildFPTrunc(LLVMBuilderRef B, LLVMValueRef Val,
+ LLVMTypeRef DestTy, const char *Name) {
+ return wrap(unwrap(B)->CreateFPTrunc(unwrap(Val), unwrap(DestTy), Name));
+}
+
+LLVMValueRef LLVMBuildFPExt(LLVMBuilderRef B, LLVMValueRef Val,
+ LLVMTypeRef DestTy, const char *Name) {
+ return wrap(unwrap(B)->CreateFPExt(unwrap(Val), unwrap(DestTy), Name));
+}
+
+LLVMValueRef LLVMBuildPtrToInt(LLVMBuilderRef B, LLVMValueRef Val,
+ LLVMTypeRef DestTy, const char *Name) {
+ return wrap(unwrap(B)->CreatePtrToInt(unwrap(Val), unwrap(DestTy), Name));
+}
+
+LLVMValueRef LLVMBuildIntToPtr(LLVMBuilderRef B, LLVMValueRef Val,
+ LLVMTypeRef DestTy, const char *Name) {
+ return wrap(unwrap(B)->CreateIntToPtr(unwrap(Val), unwrap(DestTy), Name));
+}
+
+LLVMValueRef LLVMBuildBitCast(LLVMBuilderRef B, LLVMValueRef Val,
+ LLVMTypeRef DestTy, const char *Name) {
+ return wrap(unwrap(B)->CreateBitCast(unwrap(Val), unwrap(DestTy), Name));
+}
+
+LLVMValueRef LLVMBuildAddrSpaceCast(LLVMBuilderRef B, LLVMValueRef Val,
+ LLVMTypeRef DestTy, const char *Name) {
+ return wrap(unwrap(B)->CreateAddrSpaceCast(unwrap(Val), unwrap(DestTy), Name));
+}
+
+LLVMValueRef LLVMBuildZExtOrBitCast(LLVMBuilderRef B, LLVMValueRef Val,
+ LLVMTypeRef DestTy, const char *Name) {
+ return wrap(unwrap(B)->CreateZExtOrBitCast(unwrap(Val), unwrap(DestTy),
+ Name));
+}
+
+LLVMValueRef LLVMBuildSExtOrBitCast(LLVMBuilderRef B, LLVMValueRef Val,
+ LLVMTypeRef DestTy, const char *Name) {
+ return wrap(unwrap(B)->CreateSExtOrBitCast(unwrap(Val), unwrap(DestTy),
+ Name));
+}
+
+LLVMValueRef LLVMBuildTruncOrBitCast(LLVMBuilderRef B, LLVMValueRef Val,
+ LLVMTypeRef DestTy, const char *Name) {
+ return wrap(unwrap(B)->CreateTruncOrBitCast(unwrap(Val), unwrap(DestTy),
+ Name));
+}
+
+LLVMValueRef LLVMBuildCast(LLVMBuilderRef B, LLVMOpcode Op, LLVMValueRef Val,
+ LLVMTypeRef DestTy, const char *Name) {
+ return wrap(unwrap(B)->CreateCast(Instruction::CastOps(map_from_llvmopcode(Op)), unwrap(Val),
+ unwrap(DestTy), Name));
+}
+
+LLVMValueRef LLVMBuildPointerCast(LLVMBuilderRef B, LLVMValueRef Val,
+ LLVMTypeRef DestTy, const char *Name) {
+ return wrap(unwrap(B)->CreatePointerCast(unwrap(Val), unwrap(DestTy), Name));
+}
+
+LLVMValueRef LLVMBuildIntCast(LLVMBuilderRef B, LLVMValueRef Val,
+ LLVMTypeRef DestTy, const char *Name) {
+ return wrap(unwrap(B)->CreateIntCast(unwrap(Val), unwrap(DestTy),
+ /*isSigned*/true, Name));
+}
+
+LLVMValueRef LLVMBuildFPCast(LLVMBuilderRef B, LLVMValueRef Val,
+ LLVMTypeRef DestTy, const char *Name) {
+ return wrap(unwrap(B)->CreateFPCast(unwrap(Val), unwrap(DestTy), Name));
+}
+
+/*--.. Comparisons .........................................................--*/
+
+LLVMValueRef LLVMBuildICmp(LLVMBuilderRef B, LLVMIntPredicate Op,
+ LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateICmp(static_cast<ICmpInst::Predicate>(Op),
+ unwrap(LHS), unwrap(RHS), Name));
+}
+
+LLVMValueRef LLVMBuildFCmp(LLVMBuilderRef B, LLVMRealPredicate Op,
+ LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateFCmp(static_cast<FCmpInst::Predicate>(Op),
+ unwrap(LHS), unwrap(RHS), Name));
+}
+
+/*--.. Miscellaneous instructions ..........................................--*/
+
+LLVMValueRef LLVMBuildPhi(LLVMBuilderRef B, LLVMTypeRef Ty, const char *Name) {
+ return wrap(unwrap(B)->CreatePHI(unwrap(Ty), 0, Name));
+}
+
+LLVMValueRef LLVMBuildCall(LLVMBuilderRef B, LLVMValueRef Fn,
+ LLVMValueRef *Args, unsigned NumArgs,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateCall(unwrap(Fn),
+ makeArrayRef(unwrap(Args), NumArgs),
+ Name));
+}
+
+LLVMValueRef LLVMBuildSelect(LLVMBuilderRef B, LLVMValueRef If,
+ LLVMValueRef Then, LLVMValueRef Else,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateSelect(unwrap(If), unwrap(Then), unwrap(Else),
+ Name));
+}
+
+LLVMValueRef LLVMBuildVAArg(LLVMBuilderRef B, LLVMValueRef List,
+ LLVMTypeRef Ty, const char *Name) {
+ return wrap(unwrap(B)->CreateVAArg(unwrap(List), unwrap(Ty), Name));
+}
+
+LLVMValueRef LLVMBuildExtractElement(LLVMBuilderRef B, LLVMValueRef VecVal,
+ LLVMValueRef Index, const char *Name) {
+ return wrap(unwrap(B)->CreateExtractElement(unwrap(VecVal), unwrap(Index),
+ Name));
+}
+
+LLVMValueRef LLVMBuildInsertElement(LLVMBuilderRef B, LLVMValueRef VecVal,
+ LLVMValueRef EltVal, LLVMValueRef Index,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateInsertElement(unwrap(VecVal), unwrap(EltVal),
+ unwrap(Index), Name));
+}
+
+LLVMValueRef LLVMBuildShuffleVector(LLVMBuilderRef B, LLVMValueRef V1,
+ LLVMValueRef V2, LLVMValueRef Mask,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateShuffleVector(unwrap(V1), unwrap(V2),
+ unwrap(Mask), Name));
+}
+
+LLVMValueRef LLVMBuildExtractValue(LLVMBuilderRef B, LLVMValueRef AggVal,
+ unsigned Index, const char *Name) {
+ return wrap(unwrap(B)->CreateExtractValue(unwrap(AggVal), Index, Name));
+}
+
+LLVMValueRef LLVMBuildInsertValue(LLVMBuilderRef B, LLVMValueRef AggVal,
+ LLVMValueRef EltVal, unsigned Index,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateInsertValue(unwrap(AggVal), unwrap(EltVal),
+ Index, Name));
+}
+
+LLVMValueRef LLVMBuildIsNull(LLVMBuilderRef B, LLVMValueRef Val,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateIsNull(unwrap(Val), Name));
+}
+
+LLVMValueRef LLVMBuildIsNotNull(LLVMBuilderRef B, LLVMValueRef Val,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateIsNotNull(unwrap(Val), Name));
+}
+
+LLVMValueRef LLVMBuildPtrDiff(LLVMBuilderRef B, LLVMValueRef LHS,
+ LLVMValueRef RHS, const char *Name) {
+ return wrap(unwrap(B)->CreatePtrDiff(unwrap(LHS), unwrap(RHS), Name));
+}
+
+LLVMValueRef LLVMBuildAtomicRMW(LLVMBuilderRef B,LLVMAtomicRMWBinOp op,
+ LLVMValueRef PTR, LLVMValueRef Val,
+ LLVMAtomicOrdering ordering,
+ LLVMBool singleThread) {
+ AtomicRMWInst::BinOp intop;
+ switch (op) {
+ case LLVMAtomicRMWBinOpXchg: intop = AtomicRMWInst::Xchg; break;
+ case LLVMAtomicRMWBinOpAdd: intop = AtomicRMWInst::Add; break;
+ case LLVMAtomicRMWBinOpSub: intop = AtomicRMWInst::Sub; break;
+ case LLVMAtomicRMWBinOpAnd: intop = AtomicRMWInst::And; break;
+ case LLVMAtomicRMWBinOpNand: intop = AtomicRMWInst::Nand; break;
+ case LLVMAtomicRMWBinOpOr: intop = AtomicRMWInst::Or; break;
+ case LLVMAtomicRMWBinOpXor: intop = AtomicRMWInst::Xor; break;
+ case LLVMAtomicRMWBinOpMax: intop = AtomicRMWInst::Max; break;
+ case LLVMAtomicRMWBinOpMin: intop = AtomicRMWInst::Min; break;
+ case LLVMAtomicRMWBinOpUMax: intop = AtomicRMWInst::UMax; break;
+ case LLVMAtomicRMWBinOpUMin: intop = AtomicRMWInst::UMin; break;
+ }
+ return wrap(unwrap(B)->CreateAtomicRMW(intop, unwrap(PTR), unwrap(Val),
+ mapFromLLVMOrdering(ordering), singleThread ? SingleThread : CrossThread));
+}
+
+
+/*===-- Module providers --------------------------------------------------===*/
+
+LLVMModuleProviderRef
+LLVMCreateModuleProviderForExistingModule(LLVMModuleRef M) {
+ return reinterpret_cast<LLVMModuleProviderRef>(M);
+}
+
+void LLVMDisposeModuleProvider(LLVMModuleProviderRef MP) {
+ delete unwrap(MP);
+}
+
+
+/*===-- Memory buffers ----------------------------------------------------===*/
+
+LLVMBool LLVMCreateMemoryBufferWithContentsOfFile(
+ const char *Path,
+ LLVMMemoryBufferRef *OutMemBuf,
+ char **OutMessage) {
+
+ ErrorOr<std::unique_ptr<MemoryBuffer>> MBOrErr = MemoryBuffer::getFile(Path);
+ if (std::error_code EC = MBOrErr.getError()) {
+ *OutMessage = strdup(EC.message().c_str());
+ return 1;
+ }
+ *OutMemBuf = wrap(MBOrErr.get().release());
+ return 0;
+}
+
+LLVMBool LLVMCreateMemoryBufferWithSTDIN(LLVMMemoryBufferRef *OutMemBuf,
+ char **OutMessage) {
+ ErrorOr<std::unique_ptr<MemoryBuffer>> MBOrErr = MemoryBuffer::getSTDIN();
+ if (std::error_code EC = MBOrErr.getError()) {
+ *OutMessage = strdup(EC.message().c_str());
+ return 1;
+ }
+ *OutMemBuf = wrap(MBOrErr.get().release());
+ return 0;
+}
+
+LLVMMemoryBufferRef LLVMCreateMemoryBufferWithMemoryRange(
+ const char *InputData,
+ size_t InputDataLength,
+ const char *BufferName,
+ LLVMBool RequiresNullTerminator) {
+
+ return wrap(MemoryBuffer::getMemBuffer(StringRef(InputData, InputDataLength),
+ StringRef(BufferName),
+ RequiresNullTerminator).release());
+}
+
+LLVMMemoryBufferRef LLVMCreateMemoryBufferWithMemoryRangeCopy(
+ const char *InputData,
+ size_t InputDataLength,
+ const char *BufferName) {
+
+ return wrap(
+ MemoryBuffer::getMemBufferCopy(StringRef(InputData, InputDataLength),
+ StringRef(BufferName)).release());
+}
+
+const char *LLVMGetBufferStart(LLVMMemoryBufferRef MemBuf) {
+ return unwrap(MemBuf)->getBufferStart();
+}
+
+size_t LLVMGetBufferSize(LLVMMemoryBufferRef MemBuf) {
+ return unwrap(MemBuf)->getBufferSize();
+}
+
+void LLVMDisposeMemoryBuffer(LLVMMemoryBufferRef MemBuf) {
+ delete unwrap(MemBuf);
+}
+
+/*===-- Pass Registry -----------------------------------------------------===*/
+
+LLVMPassRegistryRef LLVMGetGlobalPassRegistry(void) {
+ return wrap(PassRegistry::getPassRegistry());
+}
+
+/*===-- Pass Manager ------------------------------------------------------===*/
+
+LLVMPassManagerRef LLVMCreatePassManager() {
+ return wrap(new legacy::PassManager());
+}
+
+LLVMPassManagerRef LLVMCreateFunctionPassManagerForModule(LLVMModuleRef M) {
+ return wrap(new legacy::FunctionPassManager(unwrap(M)));
+}
+
+LLVMPassManagerRef LLVMCreateFunctionPassManager(LLVMModuleProviderRef P) {
+ return LLVMCreateFunctionPassManagerForModule(
+ reinterpret_cast<LLVMModuleRef>(P));
+}
+
+LLVMBool LLVMRunPassManager(LLVMPassManagerRef PM, LLVMModuleRef M) {
+ return unwrap<legacy::PassManager>(PM)->run(*unwrap(M));
+}
+
+LLVMBool LLVMInitializeFunctionPassManager(LLVMPassManagerRef FPM) {
+ return unwrap<legacy::FunctionPassManager>(FPM)->doInitialization();
+}
+
+LLVMBool LLVMRunFunctionPassManager(LLVMPassManagerRef FPM, LLVMValueRef F) {
+ return unwrap<legacy::FunctionPassManager>(FPM)->run(*unwrap<Function>(F));
+}
+
+LLVMBool LLVMFinalizeFunctionPassManager(LLVMPassManagerRef FPM) {
+ return unwrap<legacy::FunctionPassManager>(FPM)->doFinalization();
+}
+
+void LLVMDisposePassManager(LLVMPassManagerRef PM) {
+ delete unwrap(PM);
+}
+
+/*===-- Threading ------------------------------------------------------===*/
+
+LLVMBool LLVMStartMultithreaded() {
+ return LLVMIsMultithreaded();
+}
+
+void LLVMStopMultithreaded() {
+}
+
+LLVMBool LLVMIsMultithreaded() {
+ return llvm_is_multithreaded();
+}
diff --git a/contrib/llvm/lib/IR/DIBuilder.cpp b/contrib/llvm/lib/IR/DIBuilder.cpp
new file mode 100644
index 0000000..b7841fe
--- /dev/null
+++ b/contrib/llvm/lib/IR/DIBuilder.cpp
@@ -0,0 +1,907 @@
+//===--- DIBuilder.cpp - Debug Information Builder ------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the DIBuilder.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/DIBuilder.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DebugInfo.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/Dwarf.h"
+
+using namespace llvm;
+using namespace llvm::dwarf;
+
+namespace {
+class HeaderBuilder {
+ /// \brief Whether there are any fields yet.
+ ///
+ /// Note that this is not equivalent to \c Chars.empty(), since \a concat()
+ /// may have been called already with an empty string.
+ bool IsEmpty;
+ SmallVector<char, 256> Chars;
+
+public:
+ HeaderBuilder() : IsEmpty(true) {}
+ HeaderBuilder(const HeaderBuilder &X) : IsEmpty(X.IsEmpty), Chars(X.Chars) {}
+ HeaderBuilder(HeaderBuilder &&X)
+ : IsEmpty(X.IsEmpty), Chars(std::move(X.Chars)) {}
+
+ template <class Twineable> HeaderBuilder &concat(Twineable &&X) {
+ if (IsEmpty)
+ IsEmpty = false;
+ else
+ Chars.push_back(0);
+ Twine(X).toVector(Chars);
+ return *this;
+ }
+
+ MDString *get(LLVMContext &Context) const {
+ return MDString::get(Context, StringRef(Chars.begin(), Chars.size()));
+ }
+
+ static HeaderBuilder get(unsigned Tag) {
+ return HeaderBuilder().concat("0x" + Twine::utohexstr(Tag));
+ }
+};
+}
+
+DIBuilder::DIBuilder(Module &m, bool AllowUnresolvedNodes)
+ : M(m), VMContext(M.getContext()), CUNode(nullptr),
+ DeclareFn(nullptr), ValueFn(nullptr),
+ AllowUnresolvedNodes(AllowUnresolvedNodes) {}
+
+void DIBuilder::trackIfUnresolved(MDNode *N) {
+ if (!N)
+ return;
+ if (N->isResolved())
+ return;
+
+ assert(AllowUnresolvedNodes && "Cannot handle unresolved nodes");
+ UnresolvedNodes.emplace_back(N);
+}
+
+void DIBuilder::finalize() {
+ if (!CUNode) {
+ assert(!AllowUnresolvedNodes &&
+ "creating type nodes without a CU is not supported");
+ return;
+ }
+
+ CUNode->replaceEnumTypes(MDTuple::get(VMContext, AllEnumTypes));
+
+ SmallVector<Metadata *, 16> RetainValues;
+ // Declarations and definitions of the same type may be retained. Some
+ // clients RAUW these pairs, leaving duplicates in the retained types
+ // list. Use a set to remove the duplicates while we transform the
+ // TrackingVHs back into Values.
+ SmallPtrSet<Metadata *, 16> RetainSet;
+ for (unsigned I = 0, E = AllRetainTypes.size(); I < E; I++)
+ if (RetainSet.insert(AllRetainTypes[I]).second)
+ RetainValues.push_back(AllRetainTypes[I]);
+
+ if (!RetainValues.empty())
+ CUNode->replaceRetainedTypes(MDTuple::get(VMContext, RetainValues));
+
+ DISubprogramArray SPs = MDTuple::get(VMContext, AllSubprograms);
+ if (!AllSubprograms.empty())
+ CUNode->replaceSubprograms(SPs.get());
+
+ for (auto *SP : SPs) {
+ if (MDTuple *Temp = SP->getVariables().get()) {
+ const auto &PV = PreservedVariables.lookup(SP);
+ SmallVector<Metadata *, 4> Variables(PV.begin(), PV.end());
+ DINodeArray AV = getOrCreateArray(Variables);
+ TempMDTuple(Temp)->replaceAllUsesWith(AV.get());
+ }
+ }
+
+ if (!AllGVs.empty())
+ CUNode->replaceGlobalVariables(MDTuple::get(VMContext, AllGVs));
+
+ if (!AllImportedModules.empty())
+ CUNode->replaceImportedEntities(MDTuple::get(
+ VMContext, SmallVector<Metadata *, 16>(AllImportedModules.begin(),
+ AllImportedModules.end())));
+
+ // Now that all temp nodes have been replaced or deleted, resolve remaining
+ // cycles.
+ for (const auto &N : UnresolvedNodes)
+ if (N && !N->isResolved())
+ N->resolveCycles();
+ UnresolvedNodes.clear();
+
+ // Can't handle unresolved nodes anymore.
+ AllowUnresolvedNodes = false;
+}
+
+/// If N is compile unit return NULL otherwise return N.
+static DIScope *getNonCompileUnitScope(DIScope *N) {
+ if (!N || isa<DICompileUnit>(N))
+ return nullptr;
+ return cast<DIScope>(N);
+}
+
+DICompileUnit *DIBuilder::createCompileUnit(
+ unsigned Lang, StringRef Filename, StringRef Directory, StringRef Producer,
+ bool isOptimized, StringRef Flags, unsigned RunTimeVer, StringRef SplitName,
+ DebugEmissionKind Kind, uint64_t DWOId, bool EmitDebugInfo) {
+
+ assert(((Lang <= dwarf::DW_LANG_Fortran08 && Lang >= dwarf::DW_LANG_C89) ||
+ (Lang <= dwarf::DW_LANG_hi_user && Lang >= dwarf::DW_LANG_lo_user)) &&
+ "Invalid Language tag");
+ assert(!Filename.empty() &&
+ "Unable to create compile unit without filename");
+
+ assert(!CUNode && "Can only make one compile unit per DIBuilder instance");
+ CUNode = DICompileUnit::getDistinct(
+ VMContext, Lang, DIFile::get(VMContext, Filename, Directory), Producer,
+ isOptimized, Flags, RunTimeVer, SplitName, Kind, nullptr,
+ nullptr, nullptr, nullptr, nullptr, nullptr, DWOId);
+
+ // Create a named metadata so that it is easier to find cu in a module.
+ // Note that we only generate this when the caller wants to actually
+ // emit debug information. When we are only interested in tracking
+ // source line locations throughout the backend, we prevent codegen from
+ // emitting debug info in the final output by not generating llvm.dbg.cu.
+ if (EmitDebugInfo) {
+ NamedMDNode *NMD = M.getOrInsertNamedMetadata("llvm.dbg.cu");
+ NMD->addOperand(CUNode);
+ }
+
+ trackIfUnresolved(CUNode);
+ return CUNode;
+}
+
+static DIImportedEntity *
+createImportedModule(LLVMContext &C, dwarf::Tag Tag, DIScope *Context,
+ Metadata *NS, unsigned Line, StringRef Name,
+ SmallVectorImpl<TrackingMDNodeRef> &AllImportedModules) {
+ auto *M = DIImportedEntity::get(C, Tag, Context, DINodeRef(NS), Line, Name);
+ AllImportedModules.emplace_back(M);
+ return M;
+}
+
+DIImportedEntity *DIBuilder::createImportedModule(DIScope *Context,
+ DINamespace *NS,
+ unsigned Line) {
+ return ::createImportedModule(VMContext, dwarf::DW_TAG_imported_module,
+ Context, NS, Line, StringRef(), AllImportedModules);
+}
+
+DIImportedEntity *DIBuilder::createImportedModule(DIScope *Context,
+ DIImportedEntity *NS,
+ unsigned Line) {
+ return ::createImportedModule(VMContext, dwarf::DW_TAG_imported_module,
+ Context, NS, Line, StringRef(), AllImportedModules);
+}
+
+DIImportedEntity *DIBuilder::createImportedModule(DIScope *Context, DIModule *M,
+ unsigned Line) {
+ return ::createImportedModule(VMContext, dwarf::DW_TAG_imported_module,
+ Context, M, Line, StringRef(), AllImportedModules);
+}
+
+DIImportedEntity *DIBuilder::createImportedDeclaration(DIScope *Context,
+ DINode *Decl,
+ unsigned Line,
+ StringRef Name) {
+ // Make sure to use the unique identifier based metadata reference for
+ // types that have one.
+ return ::createImportedModule(VMContext, dwarf::DW_TAG_imported_declaration,
+ Context, DINodeRef::get(Decl), Line, Name,
+ AllImportedModules);
+}
+
+DIFile *DIBuilder::createFile(StringRef Filename, StringRef Directory) {
+ return DIFile::get(VMContext, Filename, Directory);
+}
+
+DIEnumerator *DIBuilder::createEnumerator(StringRef Name, int64_t Val) {
+ assert(!Name.empty() && "Unable to create enumerator without name");
+ return DIEnumerator::get(VMContext, Val, Name);
+}
+
+DIBasicType *DIBuilder::createUnspecifiedType(StringRef Name) {
+ assert(!Name.empty() && "Unable to create type without name");
+ return DIBasicType::get(VMContext, dwarf::DW_TAG_unspecified_type, Name);
+}
+
+DIBasicType *DIBuilder::createNullPtrType() {
+ return createUnspecifiedType("decltype(nullptr)");
+}
+
+DIBasicType *DIBuilder::createBasicType(StringRef Name, uint64_t SizeInBits,
+ uint64_t AlignInBits,
+ unsigned Encoding) {
+ assert(!Name.empty() && "Unable to create type without name");
+ return DIBasicType::get(VMContext, dwarf::DW_TAG_base_type, Name, SizeInBits,
+ AlignInBits, Encoding);
+}
+
+DIDerivedType *DIBuilder::createQualifiedType(unsigned Tag, DIType *FromTy) {
+ return DIDerivedType::get(VMContext, Tag, "", nullptr, 0, nullptr,
+ DITypeRef::get(FromTy), 0, 0, 0, 0);
+}
+
+DIDerivedType *DIBuilder::createPointerType(DIType *PointeeTy,
+ uint64_t SizeInBits,
+ uint64_t AlignInBits,
+ StringRef Name) {
+ // FIXME: Why is there a name here?
+ return DIDerivedType::get(VMContext, dwarf::DW_TAG_pointer_type, Name,
+ nullptr, 0, nullptr, DITypeRef::get(PointeeTy),
+ SizeInBits, AlignInBits, 0, 0);
+}
+
+DIDerivedType *DIBuilder::createMemberPointerType(DIType *PointeeTy,
+ DIType *Base,
+ uint64_t SizeInBits,
+ uint64_t AlignInBits) {
+ return DIDerivedType::get(VMContext, dwarf::DW_TAG_ptr_to_member_type, "",
+ nullptr, 0, nullptr, DITypeRef::get(PointeeTy),
+ SizeInBits, AlignInBits, 0, 0,
+ DITypeRef::get(Base));
+}
+
+DIDerivedType *DIBuilder::createReferenceType(unsigned Tag, DIType *RTy,
+ uint64_t SizeInBits,
+ uint64_t AlignInBits) {
+ assert(RTy && "Unable to create reference type");
+ return DIDerivedType::get(VMContext, Tag, "", nullptr, 0, nullptr,
+ DITypeRef::get(RTy), SizeInBits, AlignInBits, 0, 0);
+}
+
+DIDerivedType *DIBuilder::createTypedef(DIType *Ty, StringRef Name,
+ DIFile *File, unsigned LineNo,
+ DIScope *Context) {
+ return DIDerivedType::get(VMContext, dwarf::DW_TAG_typedef, Name, File,
+ LineNo,
+ DIScopeRef::get(getNonCompileUnitScope(Context)),
+ DITypeRef::get(Ty), 0, 0, 0, 0);
+}
+
+DIDerivedType *DIBuilder::createFriend(DIType *Ty, DIType *FriendTy) {
+ assert(Ty && "Invalid type!");
+ assert(FriendTy && "Invalid friend type!");
+ return DIDerivedType::get(VMContext, dwarf::DW_TAG_friend, "", nullptr, 0,
+ DITypeRef::get(Ty), DITypeRef::get(FriendTy), 0, 0,
+ 0, 0);
+}
+
+DIDerivedType *DIBuilder::createInheritance(DIType *Ty, DIType *BaseTy,
+ uint64_t BaseOffset,
+ unsigned Flags) {
+ assert(Ty && "Unable to create inheritance");
+ return DIDerivedType::get(VMContext, dwarf::DW_TAG_inheritance, "", nullptr,
+ 0, DITypeRef::get(Ty), DITypeRef::get(BaseTy), 0, 0,
+ BaseOffset, Flags);
+}
+
+DIDerivedType *DIBuilder::createMemberType(DIScope *Scope, StringRef Name,
+ DIFile *File, unsigned LineNumber,
+ uint64_t SizeInBits,
+ uint64_t AlignInBits,
+ uint64_t OffsetInBits,
+ unsigned Flags, DIType *Ty) {
+ return DIDerivedType::get(
+ VMContext, dwarf::DW_TAG_member, Name, File, LineNumber,
+ DIScopeRef::get(getNonCompileUnitScope(Scope)), DITypeRef::get(Ty),
+ SizeInBits, AlignInBits, OffsetInBits, Flags);
+}
+
+static ConstantAsMetadata *getConstantOrNull(Constant *C) {
+ if (C)
+ return ConstantAsMetadata::get(C);
+ return nullptr;
+}
+
+DIDerivedType *DIBuilder::createStaticMemberType(DIScope *Scope, StringRef Name,
+ DIFile *File,
+ unsigned LineNumber,
+ DIType *Ty, unsigned Flags,
+ llvm::Constant *Val) {
+ Flags |= DINode::FlagStaticMember;
+ return DIDerivedType::get(
+ VMContext, dwarf::DW_TAG_member, Name, File, LineNumber,
+ DIScopeRef::get(getNonCompileUnitScope(Scope)), DITypeRef::get(Ty), 0, 0,
+ 0, Flags, getConstantOrNull(Val));
+}
+
+DIDerivedType *DIBuilder::createObjCIVar(StringRef Name, DIFile *File,
+ unsigned LineNumber,
+ uint64_t SizeInBits,
+ uint64_t AlignInBits,
+ uint64_t OffsetInBits, unsigned Flags,
+ DIType *Ty, MDNode *PropertyNode) {
+ return DIDerivedType::get(
+ VMContext, dwarf::DW_TAG_member, Name, File, LineNumber,
+ DIScopeRef::get(getNonCompileUnitScope(File)), DITypeRef::get(Ty),
+ SizeInBits, AlignInBits, OffsetInBits, Flags, PropertyNode);
+}
+
+DIObjCProperty *
+DIBuilder::createObjCProperty(StringRef Name, DIFile *File, unsigned LineNumber,
+ StringRef GetterName, StringRef SetterName,
+ unsigned PropertyAttributes, DIType *Ty) {
+ return DIObjCProperty::get(VMContext, Name, File, LineNumber, GetterName,
+ SetterName, PropertyAttributes,
+ DITypeRef::get(Ty));
+}
+
+DITemplateTypeParameter *
+DIBuilder::createTemplateTypeParameter(DIScope *Context, StringRef Name,
+ DIType *Ty) {
+ assert((!Context || isa<DICompileUnit>(Context)) && "Expected compile unit");
+ return DITemplateTypeParameter::get(VMContext, Name, DITypeRef::get(Ty));
+}
+
+static DITemplateValueParameter *
+createTemplateValueParameterHelper(LLVMContext &VMContext, unsigned Tag,
+ DIScope *Context, StringRef Name, DIType *Ty,
+ Metadata *MD) {
+ assert((!Context || isa<DICompileUnit>(Context)) && "Expected compile unit");
+ return DITemplateValueParameter::get(VMContext, Tag, Name, DITypeRef::get(Ty),
+ MD);
+}
+
+DITemplateValueParameter *
+DIBuilder::createTemplateValueParameter(DIScope *Context, StringRef Name,
+ DIType *Ty, Constant *Val) {
+ return createTemplateValueParameterHelper(
+ VMContext, dwarf::DW_TAG_template_value_parameter, Context, Name, Ty,
+ getConstantOrNull(Val));
+}
+
+DITemplateValueParameter *
+DIBuilder::createTemplateTemplateParameter(DIScope *Context, StringRef Name,
+ DIType *Ty, StringRef Val) {
+ return createTemplateValueParameterHelper(
+ VMContext, dwarf::DW_TAG_GNU_template_template_param, Context, Name, Ty,
+ MDString::get(VMContext, Val));
+}
+
+DITemplateValueParameter *
+DIBuilder::createTemplateParameterPack(DIScope *Context, StringRef Name,
+ DIType *Ty, DINodeArray Val) {
+ return createTemplateValueParameterHelper(
+ VMContext, dwarf::DW_TAG_GNU_template_parameter_pack, Context, Name, Ty,
+ Val.get());
+}
+
+DICompositeType *DIBuilder::createClassType(
+ DIScope *Context, StringRef Name, DIFile *File, unsigned LineNumber,
+ uint64_t SizeInBits, uint64_t AlignInBits, uint64_t OffsetInBits,
+ unsigned Flags, DIType *DerivedFrom, DINodeArray Elements,
+ DIType *VTableHolder, MDNode *TemplateParams, StringRef UniqueIdentifier) {
+ assert((!Context || isa<DIScope>(Context)) &&
+ "createClassType should be called with a valid Context");
+
+ auto *R = DICompositeType::get(
+ VMContext, dwarf::DW_TAG_structure_type, Name, File, LineNumber,
+ DIScopeRef::get(getNonCompileUnitScope(Context)),
+ DITypeRef::get(DerivedFrom), SizeInBits, AlignInBits, OffsetInBits, Flags,
+ Elements, 0, DITypeRef::get(VTableHolder),
+ cast_or_null<MDTuple>(TemplateParams), UniqueIdentifier);
+ if (!UniqueIdentifier.empty())
+ retainType(R);
+ trackIfUnresolved(R);
+ return R;
+}
+
+DICompositeType *DIBuilder::createStructType(
+ DIScope *Context, StringRef Name, DIFile *File, unsigned LineNumber,
+ uint64_t SizeInBits, uint64_t AlignInBits, unsigned Flags,
+ DIType *DerivedFrom, DINodeArray Elements, unsigned RunTimeLang,
+ DIType *VTableHolder, StringRef UniqueIdentifier) {
+ auto *R = DICompositeType::get(
+ VMContext, dwarf::DW_TAG_structure_type, Name, File, LineNumber,
+ DIScopeRef::get(getNonCompileUnitScope(Context)),
+ DITypeRef::get(DerivedFrom), SizeInBits, AlignInBits, 0, Flags, Elements,
+ RunTimeLang, DITypeRef::get(VTableHolder), nullptr, UniqueIdentifier);
+ if (!UniqueIdentifier.empty())
+ retainType(R);
+ trackIfUnresolved(R);
+ return R;
+}
+
+DICompositeType *DIBuilder::createUnionType(
+ DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNumber,
+ uint64_t SizeInBits, uint64_t AlignInBits, unsigned Flags,
+ DINodeArray Elements, unsigned RunTimeLang, StringRef UniqueIdentifier) {
+ auto *R = DICompositeType::get(
+ VMContext, dwarf::DW_TAG_union_type, Name, File, LineNumber,
+ DIScopeRef::get(getNonCompileUnitScope(Scope)), nullptr, SizeInBits,
+ AlignInBits, 0, Flags, Elements, RunTimeLang, nullptr, nullptr,
+ UniqueIdentifier);
+ if (!UniqueIdentifier.empty())
+ retainType(R);
+ trackIfUnresolved(R);
+ return R;
+}
+
+DISubroutineType *DIBuilder::createSubroutineType(DITypeRefArray ParameterTypes,
+ unsigned Flags) {
+ return DISubroutineType::get(VMContext, Flags, ParameterTypes);
+}
+
+DICompositeType *DIBuilder::createExternalTypeRef(unsigned Tag, DIFile *File,
+ StringRef UniqueIdentifier) {
+ assert(!UniqueIdentifier.empty() && "external type ref without uid");
+ auto *CTy =
+ DICompositeType::get(VMContext, Tag, "", nullptr, 0, nullptr, nullptr, 0,
+ 0, 0, DINode::FlagExternalTypeRef, nullptr, 0,
+ nullptr, nullptr, UniqueIdentifier);
+ // Types with unique IDs need to be in the type map.
+ retainType(CTy);
+ return CTy;
+}
+
+DICompositeType *DIBuilder::createEnumerationType(
+ DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNumber,
+ uint64_t SizeInBits, uint64_t AlignInBits, DINodeArray Elements,
+ DIType *UnderlyingType, StringRef UniqueIdentifier) {
+ auto *CTy = DICompositeType::get(
+ VMContext, dwarf::DW_TAG_enumeration_type, Name, File, LineNumber,
+ DIScopeRef::get(getNonCompileUnitScope(Scope)),
+ DITypeRef::get(UnderlyingType), SizeInBits, AlignInBits, 0, 0, Elements,
+ 0, nullptr, nullptr, UniqueIdentifier);
+ AllEnumTypes.push_back(CTy);
+ if (!UniqueIdentifier.empty())
+ retainType(CTy);
+ trackIfUnresolved(CTy);
+ return CTy;
+}
+
+DICompositeType *DIBuilder::createArrayType(uint64_t Size, uint64_t AlignInBits,
+ DIType *Ty,
+ DINodeArray Subscripts) {
+ auto *R = DICompositeType::get(VMContext, dwarf::DW_TAG_array_type, "",
+ nullptr, 0, nullptr, DITypeRef::get(Ty), Size,
+ AlignInBits, 0, 0, Subscripts, 0, nullptr);
+ trackIfUnresolved(R);
+ return R;
+}
+
+DICompositeType *DIBuilder::createVectorType(uint64_t Size,
+ uint64_t AlignInBits, DIType *Ty,
+ DINodeArray Subscripts) {
+ auto *R =
+ DICompositeType::get(VMContext, dwarf::DW_TAG_array_type, "", nullptr, 0,
+ nullptr, DITypeRef::get(Ty), Size, AlignInBits, 0,
+ DINode::FlagVector, Subscripts, 0, nullptr);
+ trackIfUnresolved(R);
+ return R;
+}
+
+static DIType *createTypeWithFlags(LLVMContext &Context, DIType *Ty,
+ unsigned FlagsToSet) {
+ auto NewTy = Ty->clone();
+ NewTy->setFlags(NewTy->getFlags() | FlagsToSet);
+ return MDNode::replaceWithUniqued(std::move(NewTy));
+}
+
+DIType *DIBuilder::createArtificialType(DIType *Ty) {
+ // FIXME: Restrict this to the nodes where it's valid.
+ if (Ty->isArtificial())
+ return Ty;
+ return createTypeWithFlags(VMContext, Ty, DINode::FlagArtificial);
+}
+
+DIType *DIBuilder::createObjectPointerType(DIType *Ty) {
+ // FIXME: Restrict this to the nodes where it's valid.
+ if (Ty->isObjectPointer())
+ return Ty;
+ unsigned Flags = DINode::FlagObjectPointer | DINode::FlagArtificial;
+ return createTypeWithFlags(VMContext, Ty, Flags);
+}
+
+void DIBuilder::retainType(DIType *T) {
+ assert(T && "Expected non-null type");
+ AllRetainTypes.emplace_back(T);
+}
+
+DIBasicType *DIBuilder::createUnspecifiedParameter() { return nullptr; }
+
+DICompositeType *
+DIBuilder::createForwardDecl(unsigned Tag, StringRef Name, DIScope *Scope,
+ DIFile *F, unsigned Line, unsigned RuntimeLang,
+ uint64_t SizeInBits, uint64_t AlignInBits,
+ StringRef UniqueIdentifier) {
+ // FIXME: Define in terms of createReplaceableForwardDecl() by calling
+ // replaceWithUniqued().
+ auto *RetTy = DICompositeType::get(
+ VMContext, Tag, Name, F, Line,
+ DIScopeRef::get(getNonCompileUnitScope(Scope)), nullptr, SizeInBits,
+ AlignInBits, 0, DINode::FlagFwdDecl, nullptr, RuntimeLang, nullptr,
+ nullptr, UniqueIdentifier);
+ if (!UniqueIdentifier.empty())
+ retainType(RetTy);
+ trackIfUnresolved(RetTy);
+ return RetTy;
+}
+
+DICompositeType *DIBuilder::createReplaceableCompositeType(
+ unsigned Tag, StringRef Name, DIScope *Scope, DIFile *F, unsigned Line,
+ unsigned RuntimeLang, uint64_t SizeInBits, uint64_t AlignInBits,
+ unsigned Flags, StringRef UniqueIdentifier) {
+ auto *RetTy = DICompositeType::getTemporary(
+ VMContext, Tag, Name, F, Line,
+ DIScopeRef::get(getNonCompileUnitScope(Scope)), nullptr,
+ SizeInBits, AlignInBits, 0, Flags, nullptr, RuntimeLang,
+ nullptr, nullptr, UniqueIdentifier)
+ .release();
+ if (!UniqueIdentifier.empty())
+ retainType(RetTy);
+ trackIfUnresolved(RetTy);
+ return RetTy;
+}
+
+DINodeArray DIBuilder::getOrCreateArray(ArrayRef<Metadata *> Elements) {
+ return MDTuple::get(VMContext, Elements);
+}
+
+DITypeRefArray DIBuilder::getOrCreateTypeArray(ArrayRef<Metadata *> Elements) {
+ SmallVector<llvm::Metadata *, 16> Elts;
+ for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
+ if (Elements[i] && isa<MDNode>(Elements[i]))
+ Elts.push_back(DITypeRef::get(cast<DIType>(Elements[i])));
+ else
+ Elts.push_back(Elements[i]);
+ }
+ return DITypeRefArray(MDNode::get(VMContext, Elts));
+}
+
+DISubrange *DIBuilder::getOrCreateSubrange(int64_t Lo, int64_t Count) {
+ return DISubrange::get(VMContext, Count, Lo);
+}
+
+static void checkGlobalVariableScope(DIScope *Context) {
+#ifndef NDEBUG
+ if (auto *CT =
+ dyn_cast_or_null<DICompositeType>(getNonCompileUnitScope(Context)))
+ assert(CT->getIdentifier().empty() &&
+ "Context of a global variable should not be a type with identifier");
+#endif
+}
+
+DIGlobalVariable *DIBuilder::createGlobalVariable(
+ DIScope *Context, StringRef Name, StringRef LinkageName, DIFile *F,
+ unsigned LineNumber, DIType *Ty, bool isLocalToUnit, Constant *Val,
+ MDNode *Decl) {
+ checkGlobalVariableScope(Context);
+
+ auto *N = DIGlobalVariable::get(VMContext, cast_or_null<DIScope>(Context),
+ Name, LinkageName, F, LineNumber,
+ DITypeRef::get(Ty), isLocalToUnit, true, Val,
+ cast_or_null<DIDerivedType>(Decl));
+ AllGVs.push_back(N);
+ return N;
+}
+
+DIGlobalVariable *DIBuilder::createTempGlobalVariableFwdDecl(
+ DIScope *Context, StringRef Name, StringRef LinkageName, DIFile *F,
+ unsigned LineNumber, DIType *Ty, bool isLocalToUnit, Constant *Val,
+ MDNode *Decl) {
+ checkGlobalVariableScope(Context);
+
+ return DIGlobalVariable::getTemporary(
+ VMContext, cast_or_null<DIScope>(Context), Name, LinkageName, F,
+ LineNumber, DITypeRef::get(Ty), isLocalToUnit, false, Val,
+ cast_or_null<DIDerivedType>(Decl))
+ .release();
+}
+
+static DILocalVariable *createLocalVariable(
+ LLVMContext &VMContext,
+ DenseMap<MDNode *, std::vector<TrackingMDNodeRef>> &PreservedVariables,
+ DIScope *Scope, StringRef Name, unsigned ArgNo, DIFile *File,
+ unsigned LineNo, DIType *Ty, bool AlwaysPreserve, unsigned Flags) {
+ // FIXME: Why getNonCompileUnitScope()?
+ // FIXME: Why is "!Context" okay here?
+ // FIXME: Why doesn't this check for a subprogram or lexical block (AFAICT
+ // the only valid scopes)?
+ DIScope *Context = getNonCompileUnitScope(Scope);
+
+ auto *Node =
+ DILocalVariable::get(VMContext, cast_or_null<DILocalScope>(Context), Name,
+ File, LineNo, DITypeRef::get(Ty), ArgNo, Flags);
+ if (AlwaysPreserve) {
+ // The optimizer may remove local variables. If there is an interest
+ // to preserve variable info in such situation then stash it in a
+ // named mdnode.
+ DISubprogram *Fn = getDISubprogram(Scope);
+ assert(Fn && "Missing subprogram for local variable");
+ PreservedVariables[Fn].emplace_back(Node);
+ }
+ return Node;
+}
+
+DILocalVariable *DIBuilder::createAutoVariable(DIScope *Scope, StringRef Name,
+ DIFile *File, unsigned LineNo,
+ DIType *Ty, bool AlwaysPreserve,
+ unsigned Flags) {
+ return createLocalVariable(VMContext, PreservedVariables, Scope, Name,
+ /* ArgNo */ 0, File, LineNo, Ty, AlwaysPreserve,
+ Flags);
+}
+
+DILocalVariable *DIBuilder::createParameterVariable(
+ DIScope *Scope, StringRef Name, unsigned ArgNo, DIFile *File,
+ unsigned LineNo, DIType *Ty, bool AlwaysPreserve, unsigned Flags) {
+ assert(ArgNo && "Expected non-zero argument number for parameter");
+ return createLocalVariable(VMContext, PreservedVariables, Scope, Name, ArgNo,
+ File, LineNo, Ty, AlwaysPreserve, Flags);
+}
+
+DIExpression *DIBuilder::createExpression(ArrayRef<uint64_t> Addr) {
+ return DIExpression::get(VMContext, Addr);
+}
+
+DIExpression *DIBuilder::createExpression(ArrayRef<int64_t> Signed) {
+ // TODO: Remove the callers of this signed version and delete.
+ SmallVector<uint64_t, 8> Addr(Signed.begin(), Signed.end());
+ return createExpression(Addr);
+}
+
+DIExpression *DIBuilder::createBitPieceExpression(unsigned OffsetInBytes,
+ unsigned SizeInBytes) {
+ uint64_t Addr[] = {dwarf::DW_OP_bit_piece, OffsetInBytes, SizeInBytes};
+ return DIExpression::get(VMContext, Addr);
+}
+
+DISubprogram *DIBuilder::createFunction(
+ DIScopeRef Context, StringRef Name, StringRef LinkageName, DIFile *File,
+ unsigned LineNo, DISubroutineType *Ty, bool isLocalToUnit,
+ bool isDefinition, unsigned ScopeLine, unsigned Flags, bool isOptimized,
+ DITemplateParameterArray TParams, DISubprogram *Decl) {
+ // dragonegg does not generate identifier for types, so using an empty map
+ // to resolve the context should be fine.
+ DITypeIdentifierMap EmptyMap;
+ return createFunction(Context.resolve(EmptyMap), Name, LinkageName, File,
+ LineNo, Ty, isLocalToUnit, isDefinition, ScopeLine,
+ Flags, isOptimized, TParams, Decl);
+}
+
+template <class... Ts>
+static DISubprogram *getSubprogram(bool IsDistinct, Ts &&... Args) {
+ if (IsDistinct)
+ return DISubprogram::getDistinct(std::forward<Ts>(Args)...);
+ return DISubprogram::get(std::forward<Ts>(Args)...);
+}
+
+DISubprogram *DIBuilder::createFunction(
+ DIScope *Context, StringRef Name, StringRef LinkageName, DIFile *File,
+ unsigned LineNo, DISubroutineType *Ty, bool isLocalToUnit,
+ bool isDefinition, unsigned ScopeLine, unsigned Flags, bool isOptimized,
+ DITemplateParameterArray TParams, DISubprogram *Decl) {
+ auto *Node =
+ getSubprogram(/* IsDistinct = */ isDefinition, VMContext,
+ DIScopeRef::get(getNonCompileUnitScope(Context)), Name,
+ LinkageName, File, LineNo, Ty, isLocalToUnit, isDefinition,
+ ScopeLine, nullptr, 0, 0, Flags, isOptimized, TParams, Decl,
+ MDTuple::getTemporary(VMContext, None).release());
+
+ if (isDefinition)
+ AllSubprograms.push_back(Node);
+ trackIfUnresolved(Node);
+ return Node;
+}
+
+DISubprogram *DIBuilder::createTempFunctionFwdDecl(
+ DIScope *Context, StringRef Name, StringRef LinkageName, DIFile *File,
+ unsigned LineNo, DISubroutineType *Ty, bool isLocalToUnit,
+ bool isDefinition, unsigned ScopeLine, unsigned Flags, bool isOptimized,
+ DITemplateParameterArray TParams, DISubprogram *Decl) {
+ return DISubprogram::getTemporary(
+ VMContext, DIScopeRef::get(getNonCompileUnitScope(Context)), Name,
+ LinkageName, File, LineNo, Ty, isLocalToUnit, isDefinition,
+ ScopeLine, nullptr, 0, 0, Flags, isOptimized, TParams, Decl,
+ nullptr)
+ .release();
+}
+
+DISubprogram *
+DIBuilder::createMethod(DIScope *Context, StringRef Name, StringRef LinkageName,
+ DIFile *F, unsigned LineNo, DISubroutineType *Ty,
+ bool isLocalToUnit, bool isDefinition, unsigned VK,
+ unsigned VIndex, DIType *VTableHolder, unsigned Flags,
+ bool isOptimized, DITemplateParameterArray TParams) {
+ assert(getNonCompileUnitScope(Context) &&
+ "Methods should have both a Context and a context that isn't "
+ "the compile unit.");
+ // FIXME: Do we want to use different scope/lines?
+ auto *SP = getSubprogram(
+ /* IsDistinct = */ isDefinition, VMContext,
+ DIScopeRef::get(cast<DIScope>(Context)), Name, LinkageName, F, LineNo, Ty,
+ isLocalToUnit, isDefinition, LineNo, DITypeRef::get(VTableHolder), VK,
+ VIndex, Flags, isOptimized, TParams, nullptr, nullptr);
+
+ if (isDefinition)
+ AllSubprograms.push_back(SP);
+ trackIfUnresolved(SP);
+ return SP;
+}
+
+DINamespace *DIBuilder::createNameSpace(DIScope *Scope, StringRef Name,
+ DIFile *File, unsigned LineNo) {
+ return DINamespace::get(VMContext, getNonCompileUnitScope(Scope), File, Name,
+ LineNo);
+}
+
+DIModule *DIBuilder::createModule(DIScope *Scope, StringRef Name,
+ StringRef ConfigurationMacros,
+ StringRef IncludePath,
+ StringRef ISysRoot) {
+ return DIModule::get(VMContext, getNonCompileUnitScope(Scope), Name,
+ ConfigurationMacros, IncludePath, ISysRoot);
+}
+
+DILexicalBlockFile *DIBuilder::createLexicalBlockFile(DIScope *Scope,
+ DIFile *File,
+ unsigned Discriminator) {
+ return DILexicalBlockFile::get(VMContext, Scope, File, Discriminator);
+}
+
+DILexicalBlock *DIBuilder::createLexicalBlock(DIScope *Scope, DIFile *File,
+ unsigned Line, unsigned Col) {
+ // Make these distinct, to avoid merging two lexical blocks on the same
+ // file/line/column.
+ return DILexicalBlock::getDistinct(VMContext, getNonCompileUnitScope(Scope),
+ File, Line, Col);
+}
+
+static Value *getDbgIntrinsicValueImpl(LLVMContext &VMContext, Value *V) {
+ assert(V && "no value passed to dbg intrinsic");
+ return MetadataAsValue::get(VMContext, ValueAsMetadata::get(V));
+}
+
+static Instruction *withDebugLoc(Instruction *I, const DILocation *DL) {
+ I->setDebugLoc(const_cast<DILocation *>(DL));
+ return I;
+}
+
+Instruction *DIBuilder::insertDeclare(Value *Storage, DILocalVariable *VarInfo,
+ DIExpression *Expr, const DILocation *DL,
+ Instruction *InsertBefore) {
+ assert(VarInfo && "empty or invalid DILocalVariable* passed to dbg.declare");
+ assert(DL && "Expected debug loc");
+ assert(DL->getScope()->getSubprogram() ==
+ VarInfo->getScope()->getSubprogram() &&
+ "Expected matching subprograms");
+ if (!DeclareFn)
+ DeclareFn = Intrinsic::getDeclaration(&M, Intrinsic::dbg_declare);
+
+ trackIfUnresolved(VarInfo);
+ trackIfUnresolved(Expr);
+ Value *Args[] = {getDbgIntrinsicValueImpl(VMContext, Storage),
+ MetadataAsValue::get(VMContext, VarInfo),
+ MetadataAsValue::get(VMContext, Expr)};
+ return withDebugLoc(CallInst::Create(DeclareFn, Args, "", InsertBefore), DL);
+}
+
+Instruction *DIBuilder::insertDeclare(Value *Storage, DILocalVariable *VarInfo,
+ DIExpression *Expr, const DILocation *DL,
+ BasicBlock *InsertAtEnd) {
+ assert(VarInfo && "empty or invalid DILocalVariable* passed to dbg.declare");
+ assert(DL && "Expected debug loc");
+ assert(DL->getScope()->getSubprogram() ==
+ VarInfo->getScope()->getSubprogram() &&
+ "Expected matching subprograms");
+ if (!DeclareFn)
+ DeclareFn = Intrinsic::getDeclaration(&M, Intrinsic::dbg_declare);
+
+ trackIfUnresolved(VarInfo);
+ trackIfUnresolved(Expr);
+ Value *Args[] = {getDbgIntrinsicValueImpl(VMContext, Storage),
+ MetadataAsValue::get(VMContext, VarInfo),
+ MetadataAsValue::get(VMContext, Expr)};
+
+ // If this block already has a terminator then insert this intrinsic
+ // before the terminator.
+ if (TerminatorInst *T = InsertAtEnd->getTerminator())
+ return withDebugLoc(CallInst::Create(DeclareFn, Args, "", T), DL);
+ return withDebugLoc(CallInst::Create(DeclareFn, Args, "", InsertAtEnd), DL);
+}
+
+Instruction *DIBuilder::insertDbgValueIntrinsic(Value *V, uint64_t Offset,
+ DILocalVariable *VarInfo,
+ DIExpression *Expr,
+ const DILocation *DL,
+ Instruction *InsertBefore) {
+ assert(V && "no value passed to dbg.value");
+ assert(VarInfo && "empty or invalid DILocalVariable* passed to dbg.value");
+ assert(DL && "Expected debug loc");
+ assert(DL->getScope()->getSubprogram() ==
+ VarInfo->getScope()->getSubprogram() &&
+ "Expected matching subprograms");
+ if (!ValueFn)
+ ValueFn = Intrinsic::getDeclaration(&M, Intrinsic::dbg_value);
+
+ trackIfUnresolved(VarInfo);
+ trackIfUnresolved(Expr);
+ Value *Args[] = {getDbgIntrinsicValueImpl(VMContext, V),
+ ConstantInt::get(Type::getInt64Ty(VMContext), Offset),
+ MetadataAsValue::get(VMContext, VarInfo),
+ MetadataAsValue::get(VMContext, Expr)};
+ return withDebugLoc(CallInst::Create(ValueFn, Args, "", InsertBefore), DL);
+}
+
+Instruction *DIBuilder::insertDbgValueIntrinsic(Value *V, uint64_t Offset,
+ DILocalVariable *VarInfo,
+ DIExpression *Expr,
+ const DILocation *DL,
+ BasicBlock *InsertAtEnd) {
+ assert(V && "no value passed to dbg.value");
+ assert(VarInfo && "empty or invalid DILocalVariable* passed to dbg.value");
+ assert(DL && "Expected debug loc");
+ assert(DL->getScope()->getSubprogram() ==
+ VarInfo->getScope()->getSubprogram() &&
+ "Expected matching subprograms");
+ if (!ValueFn)
+ ValueFn = Intrinsic::getDeclaration(&M, Intrinsic::dbg_value);
+
+ trackIfUnresolved(VarInfo);
+ trackIfUnresolved(Expr);
+ Value *Args[] = {getDbgIntrinsicValueImpl(VMContext, V),
+ ConstantInt::get(Type::getInt64Ty(VMContext), Offset),
+ MetadataAsValue::get(VMContext, VarInfo),
+ MetadataAsValue::get(VMContext, Expr)};
+
+ return withDebugLoc(CallInst::Create(ValueFn, Args, "", InsertAtEnd), DL);
+}
+
+void DIBuilder::replaceVTableHolder(DICompositeType *&T,
+ DICompositeType *VTableHolder) {
+ {
+ TypedTrackingMDRef<DICompositeType> N(T);
+ N->replaceVTableHolder(DITypeRef::get(VTableHolder));
+ T = N.get();
+ }
+
+ // If this didn't create a self-reference, just return.
+ if (T != VTableHolder)
+ return;
+
+ // Look for unresolved operands. T will drop RAUW support, orphaning any
+ // cycles underneath it.
+ if (T->isResolved())
+ for (const MDOperand &O : T->operands())
+ if (auto *N = dyn_cast_or_null<MDNode>(O))
+ trackIfUnresolved(N);
+}
+
+void DIBuilder::replaceArrays(DICompositeType *&T, DINodeArray Elements,
+ DINodeArray TParams) {
+ {
+ TypedTrackingMDRef<DICompositeType> N(T);
+ if (Elements)
+ N->replaceElements(Elements);
+ if (TParams)
+ N->replaceTemplateParams(DITemplateParameterArray(TParams));
+ T = N.get();
+ }
+
+ // If T isn't resolved, there's no problem.
+ if (!T->isResolved())
+ return;
+
+ // If T is resolved, it may be due to a self-reference cycle. Track the
+ // arrays explicitly if they're unresolved, or else the cycles will be
+ // orphaned.
+ if (Elements)
+ trackIfUnresolved(Elements.get());
+ if (TParams)
+ trackIfUnresolved(TParams.get());
+}
diff --git a/contrib/llvm/lib/IR/DataLayout.cpp b/contrib/llvm/lib/IR/DataLayout.cpp
new file mode 100644
index 0000000..5468f47
--- /dev/null
+++ b/contrib/llvm/lib/IR/DataLayout.cpp
@@ -0,0 +1,793 @@
+//===-- DataLayout.cpp - Data size & alignment routines --------------------==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines layout properties related to datatype size/offset/alignment
+// information.
+//
+// This structure should be created once, filled in if the defaults are not
+// correct and then passed around by const&. None of the members functions
+// require modification to the object.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/DataLayout.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/GetElementPtrTypeIterator.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/Mutex.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cstdlib>
+using namespace llvm;
+
+//===----------------------------------------------------------------------===//
+// Support for StructLayout
+//===----------------------------------------------------------------------===//
+
+StructLayout::StructLayout(StructType *ST, const DataLayout &DL) {
+ assert(!ST->isOpaque() && "Cannot get layout of opaque structs");
+ StructAlignment = 0;
+ StructSize = 0;
+ IsPadded = false;
+ NumElements = ST->getNumElements();
+
+ // Loop over each of the elements, placing them in memory.
+ for (unsigned i = 0, e = NumElements; i != e; ++i) {
+ Type *Ty = ST->getElementType(i);
+ unsigned TyAlign = ST->isPacked() ? 1 : DL.getABITypeAlignment(Ty);
+
+ // Add padding if necessary to align the data element properly.
+ if ((StructSize & (TyAlign-1)) != 0) {
+ IsPadded = true;
+ StructSize = RoundUpToAlignment(StructSize, TyAlign);
+ }
+
+ // Keep track of maximum alignment constraint.
+ StructAlignment = std::max(TyAlign, StructAlignment);
+
+ MemberOffsets[i] = StructSize;
+ StructSize += DL.getTypeAllocSize(Ty); // Consume space for this data item
+ }
+
+ // Empty structures have alignment of 1 byte.
+ if (StructAlignment == 0) StructAlignment = 1;
+
+ // Add padding to the end of the struct so that it could be put in an array
+ // and all array elements would be aligned correctly.
+ if ((StructSize & (StructAlignment-1)) != 0) {
+ IsPadded = true;
+ StructSize = RoundUpToAlignment(StructSize, StructAlignment);
+ }
+}
+
+
+/// getElementContainingOffset - Given a valid offset into the structure,
+/// return the structure index that contains it.
+unsigned StructLayout::getElementContainingOffset(uint64_t Offset) const {
+ const uint64_t *SI =
+ std::upper_bound(&MemberOffsets[0], &MemberOffsets[NumElements], Offset);
+ assert(SI != &MemberOffsets[0] && "Offset not in structure type!");
+ --SI;
+ assert(*SI <= Offset && "upper_bound didn't work");
+ assert((SI == &MemberOffsets[0] || *(SI-1) <= Offset) &&
+ (SI+1 == &MemberOffsets[NumElements] || *(SI+1) > Offset) &&
+ "Upper bound didn't work!");
+
+ // Multiple fields can have the same offset if any of them are zero sized.
+ // For example, in { i32, [0 x i32], i32 }, searching for offset 4 will stop
+ // at the i32 element, because it is the last element at that offset. This is
+ // the right one to return, because anything after it will have a higher
+ // offset, implying that this element is non-empty.
+ return SI-&MemberOffsets[0];
+}
+
+//===----------------------------------------------------------------------===//
+// LayoutAlignElem, LayoutAlign support
+//===----------------------------------------------------------------------===//
+
+LayoutAlignElem
+LayoutAlignElem::get(AlignTypeEnum align_type, unsigned abi_align,
+ unsigned pref_align, uint32_t bit_width) {
+ assert(abi_align <= pref_align && "Preferred alignment worse than ABI!");
+ LayoutAlignElem retval;
+ retval.AlignType = align_type;
+ retval.ABIAlign = abi_align;
+ retval.PrefAlign = pref_align;
+ retval.TypeBitWidth = bit_width;
+ return retval;
+}
+
+bool
+LayoutAlignElem::operator==(const LayoutAlignElem &rhs) const {
+ return (AlignType == rhs.AlignType
+ && ABIAlign == rhs.ABIAlign
+ && PrefAlign == rhs.PrefAlign
+ && TypeBitWidth == rhs.TypeBitWidth);
+}
+
+const LayoutAlignElem
+DataLayout::InvalidAlignmentElem = { INVALID_ALIGN, 0, 0, 0 };
+
+//===----------------------------------------------------------------------===//
+// PointerAlignElem, PointerAlign support
+//===----------------------------------------------------------------------===//
+
+PointerAlignElem
+PointerAlignElem::get(uint32_t AddressSpace, unsigned ABIAlign,
+ unsigned PrefAlign, uint32_t TypeByteWidth) {
+ assert(ABIAlign <= PrefAlign && "Preferred alignment worse than ABI!");
+ PointerAlignElem retval;
+ retval.AddressSpace = AddressSpace;
+ retval.ABIAlign = ABIAlign;
+ retval.PrefAlign = PrefAlign;
+ retval.TypeByteWidth = TypeByteWidth;
+ return retval;
+}
+
+bool
+PointerAlignElem::operator==(const PointerAlignElem &rhs) const {
+ return (ABIAlign == rhs.ABIAlign
+ && AddressSpace == rhs.AddressSpace
+ && PrefAlign == rhs.PrefAlign
+ && TypeByteWidth == rhs.TypeByteWidth);
+}
+
+const PointerAlignElem
+DataLayout::InvalidPointerElem = { 0U, 0U, 0U, ~0U };
+
+//===----------------------------------------------------------------------===//
+// DataLayout Class Implementation
+//===----------------------------------------------------------------------===//
+
+const char *DataLayout::getManglingComponent(const Triple &T) {
+ if (T.isOSBinFormatMachO())
+ return "-m:o";
+ if (T.isOSWindows() && T.isOSBinFormatCOFF())
+ return T.getArch() == Triple::x86 ? "-m:x" : "-m:w";
+ return "-m:e";
+}
+
+static const LayoutAlignElem DefaultAlignments[] = {
+ { INTEGER_ALIGN, 1, 1, 1 }, // i1
+ { INTEGER_ALIGN, 8, 1, 1 }, // i8
+ { INTEGER_ALIGN, 16, 2, 2 }, // i16
+ { INTEGER_ALIGN, 32, 4, 4 }, // i32
+ { INTEGER_ALIGN, 64, 4, 8 }, // i64
+ { FLOAT_ALIGN, 16, 2, 2 }, // half
+ { FLOAT_ALIGN, 32, 4, 4 }, // float
+ { FLOAT_ALIGN, 64, 8, 8 }, // double
+ { FLOAT_ALIGN, 128, 16, 16 }, // ppcf128, quad, ...
+ { VECTOR_ALIGN, 64, 8, 8 }, // v2i32, v1i64, ...
+ { VECTOR_ALIGN, 128, 16, 16 }, // v16i8, v8i16, v4i32, ...
+ { AGGREGATE_ALIGN, 0, 0, 8 } // struct
+};
+
+void DataLayout::reset(StringRef Desc) {
+ clear();
+
+ LayoutMap = nullptr;
+ BigEndian = false;
+ StackNaturalAlign = 0;
+ ManglingMode = MM_None;
+
+ // Default alignments
+ for (const LayoutAlignElem &E : DefaultAlignments) {
+ setAlignment((AlignTypeEnum)E.AlignType, E.ABIAlign, E.PrefAlign,
+ E.TypeBitWidth);
+ }
+ setPointerAlignment(0, 8, 8, 8);
+
+ parseSpecifier(Desc);
+}
+
+/// Checked version of split, to ensure mandatory subparts.
+static std::pair<StringRef, StringRef> split(StringRef Str, char Separator) {
+ assert(!Str.empty() && "parse error, string can't be empty here");
+ std::pair<StringRef, StringRef> Split = Str.split(Separator);
+ if (Split.second.empty() && Split.first != Str)
+ report_fatal_error("Trailing separator in datalayout string");
+ if (!Split.second.empty() && Split.first.empty())
+ report_fatal_error("Expected token before separator in datalayout string");
+ return Split;
+}
+
+/// Get an unsigned integer, including error checks.
+static unsigned getInt(StringRef R) {
+ unsigned Result;
+ bool error = R.getAsInteger(10, Result); (void)error;
+ if (error)
+ report_fatal_error("not a number, or does not fit in an unsigned int");
+ return Result;
+}
+
+/// Convert bits into bytes. Assert if not a byte width multiple.
+static unsigned inBytes(unsigned Bits) {
+ if (Bits % 8)
+ report_fatal_error("number of bits must be a byte width multiple");
+ return Bits / 8;
+}
+
+void DataLayout::parseSpecifier(StringRef Desc) {
+ StringRepresentation = Desc;
+ while (!Desc.empty()) {
+ // Split at '-'.
+ std::pair<StringRef, StringRef> Split = split(Desc, '-');
+ Desc = Split.second;
+
+ // Split at ':'.
+ Split = split(Split.first, ':');
+
+ // Aliases used below.
+ StringRef &Tok = Split.first; // Current token.
+ StringRef &Rest = Split.second; // The rest of the string.
+
+ char Specifier = Tok.front();
+ Tok = Tok.substr(1);
+
+ switch (Specifier) {
+ case 's':
+ // Ignored for backward compatibility.
+ // FIXME: remove this on LLVM 4.0.
+ break;
+ case 'E':
+ BigEndian = true;
+ break;
+ case 'e':
+ BigEndian = false;
+ break;
+ case 'p': {
+ // Address space.
+ unsigned AddrSpace = Tok.empty() ? 0 : getInt(Tok);
+ if (!isUInt<24>(AddrSpace))
+ report_fatal_error("Invalid address space, must be a 24bit integer");
+
+ // Size.
+ if (Rest.empty())
+ report_fatal_error(
+ "Missing size specification for pointer in datalayout string");
+ Split = split(Rest, ':');
+ unsigned PointerMemSize = inBytes(getInt(Tok));
+ if (!PointerMemSize)
+ report_fatal_error("Invalid pointer size of 0 bytes");
+
+ // ABI alignment.
+ if (Rest.empty())
+ report_fatal_error(
+ "Missing alignment specification for pointer in datalayout string");
+ Split = split(Rest, ':');
+ unsigned PointerABIAlign = inBytes(getInt(Tok));
+ if (!isPowerOf2_64(PointerABIAlign))
+ report_fatal_error(
+ "Pointer ABI alignment must be a power of 2");
+
+ // Preferred alignment.
+ unsigned PointerPrefAlign = PointerABIAlign;
+ if (!Rest.empty()) {
+ Split = split(Rest, ':');
+ PointerPrefAlign = inBytes(getInt(Tok));
+ if (!isPowerOf2_64(PointerPrefAlign))
+ report_fatal_error(
+ "Pointer preferred alignment must be a power of 2");
+ }
+
+ setPointerAlignment(AddrSpace, PointerABIAlign, PointerPrefAlign,
+ PointerMemSize);
+ break;
+ }
+ case 'i':
+ case 'v':
+ case 'f':
+ case 'a': {
+ AlignTypeEnum AlignType;
+ switch (Specifier) {
+ default:
+ case 'i': AlignType = INTEGER_ALIGN; break;
+ case 'v': AlignType = VECTOR_ALIGN; break;
+ case 'f': AlignType = FLOAT_ALIGN; break;
+ case 'a': AlignType = AGGREGATE_ALIGN; break;
+ }
+
+ // Bit size.
+ unsigned Size = Tok.empty() ? 0 : getInt(Tok);
+
+ if (AlignType == AGGREGATE_ALIGN && Size != 0)
+ report_fatal_error(
+ "Sized aggregate specification in datalayout string");
+
+ // ABI alignment.
+ if (Rest.empty())
+ report_fatal_error(
+ "Missing alignment specification in datalayout string");
+ Split = split(Rest, ':');
+ unsigned ABIAlign = inBytes(getInt(Tok));
+ if (AlignType != AGGREGATE_ALIGN && !ABIAlign)
+ report_fatal_error(
+ "ABI alignment specification must be >0 for non-aggregate types");
+
+ // Preferred alignment.
+ unsigned PrefAlign = ABIAlign;
+ if (!Rest.empty()) {
+ Split = split(Rest, ':');
+ PrefAlign = inBytes(getInt(Tok));
+ }
+
+ setAlignment(AlignType, ABIAlign, PrefAlign, Size);
+
+ break;
+ }
+ case 'n': // Native integer types.
+ for (;;) {
+ unsigned Width = getInt(Tok);
+ if (Width == 0)
+ report_fatal_error(
+ "Zero width native integer type in datalayout string");
+ LegalIntWidths.push_back(Width);
+ if (Rest.empty())
+ break;
+ Split = split(Rest, ':');
+ }
+ break;
+ case 'S': { // Stack natural alignment.
+ StackNaturalAlign = inBytes(getInt(Tok));
+ break;
+ }
+ case 'm':
+ if (!Tok.empty())
+ report_fatal_error("Unexpected trailing characters after mangling specifier in datalayout string");
+ if (Rest.empty())
+ report_fatal_error("Expected mangling specifier in datalayout string");
+ if (Rest.size() > 1)
+ report_fatal_error("Unknown mangling specifier in datalayout string");
+ switch(Rest[0]) {
+ default:
+ report_fatal_error("Unknown mangling in datalayout string");
+ case 'e':
+ ManglingMode = MM_ELF;
+ break;
+ case 'o':
+ ManglingMode = MM_MachO;
+ break;
+ case 'm':
+ ManglingMode = MM_Mips;
+ break;
+ case 'w':
+ ManglingMode = MM_WinCOFF;
+ break;
+ case 'x':
+ ManglingMode = MM_WinCOFFX86;
+ break;
+ }
+ break;
+ default:
+ report_fatal_error("Unknown specifier in datalayout string");
+ break;
+ }
+ }
+}
+
+DataLayout::DataLayout(const Module *M) : LayoutMap(nullptr) {
+ init(M);
+}
+
+void DataLayout::init(const Module *M) { *this = M->getDataLayout(); }
+
+bool DataLayout::operator==(const DataLayout &Other) const {
+ bool Ret = BigEndian == Other.BigEndian &&
+ StackNaturalAlign == Other.StackNaturalAlign &&
+ ManglingMode == Other.ManglingMode &&
+ LegalIntWidths == Other.LegalIntWidths &&
+ Alignments == Other.Alignments && Pointers == Other.Pointers;
+ // Note: getStringRepresentation() might differs, it is not canonicalized
+ return Ret;
+}
+
+void
+DataLayout::setAlignment(AlignTypeEnum align_type, unsigned abi_align,
+ unsigned pref_align, uint32_t bit_width) {
+ if (!isUInt<24>(bit_width))
+ report_fatal_error("Invalid bit width, must be a 24bit integer");
+ if (!isUInt<16>(abi_align))
+ report_fatal_error("Invalid ABI alignment, must be a 16bit integer");
+ if (!isUInt<16>(pref_align))
+ report_fatal_error("Invalid preferred alignment, must be a 16bit integer");
+ if (abi_align != 0 && !isPowerOf2_64(abi_align))
+ report_fatal_error("Invalid ABI alignment, must be a power of 2");
+ if (pref_align != 0 && !isPowerOf2_64(pref_align))
+ report_fatal_error("Invalid preferred alignment, must be a power of 2");
+
+ if (pref_align < abi_align)
+ report_fatal_error(
+ "Preferred alignment cannot be less than the ABI alignment");
+
+ for (LayoutAlignElem &Elem : Alignments) {
+ if (Elem.AlignType == (unsigned)align_type &&
+ Elem.TypeBitWidth == bit_width) {
+ // Update the abi, preferred alignments.
+ Elem.ABIAlign = abi_align;
+ Elem.PrefAlign = pref_align;
+ return;
+ }
+ }
+
+ Alignments.push_back(LayoutAlignElem::get(align_type, abi_align,
+ pref_align, bit_width));
+}
+
+DataLayout::PointersTy::iterator
+DataLayout::findPointerLowerBound(uint32_t AddressSpace) {
+ return std::lower_bound(Pointers.begin(), Pointers.end(), AddressSpace,
+ [](const PointerAlignElem &A, uint32_t AddressSpace) {
+ return A.AddressSpace < AddressSpace;
+ });
+}
+
+void DataLayout::setPointerAlignment(uint32_t AddrSpace, unsigned ABIAlign,
+ unsigned PrefAlign,
+ uint32_t TypeByteWidth) {
+ if (PrefAlign < ABIAlign)
+ report_fatal_error(
+ "Preferred alignment cannot be less than the ABI alignment");
+
+ PointersTy::iterator I = findPointerLowerBound(AddrSpace);
+ if (I == Pointers.end() || I->AddressSpace != AddrSpace) {
+ Pointers.insert(I, PointerAlignElem::get(AddrSpace, ABIAlign, PrefAlign,
+ TypeByteWidth));
+ } else {
+ I->ABIAlign = ABIAlign;
+ I->PrefAlign = PrefAlign;
+ I->TypeByteWidth = TypeByteWidth;
+ }
+}
+
+/// getAlignmentInfo - Return the alignment (either ABI if ABIInfo = true or
+/// preferred if ABIInfo = false) the layout wants for the specified datatype.
+unsigned DataLayout::getAlignmentInfo(AlignTypeEnum AlignType,
+ uint32_t BitWidth, bool ABIInfo,
+ Type *Ty) const {
+ // Check to see if we have an exact match and remember the best match we see.
+ int BestMatchIdx = -1;
+ int LargestInt = -1;
+ for (unsigned i = 0, e = Alignments.size(); i != e; ++i) {
+ if (Alignments[i].AlignType == (unsigned)AlignType &&
+ Alignments[i].TypeBitWidth == BitWidth)
+ return ABIInfo ? Alignments[i].ABIAlign : Alignments[i].PrefAlign;
+
+ // The best match so far depends on what we're looking for.
+ if (AlignType == INTEGER_ALIGN &&
+ Alignments[i].AlignType == INTEGER_ALIGN) {
+ // The "best match" for integers is the smallest size that is larger than
+ // the BitWidth requested.
+ if (Alignments[i].TypeBitWidth > BitWidth && (BestMatchIdx == -1 ||
+ Alignments[i].TypeBitWidth < Alignments[BestMatchIdx].TypeBitWidth))
+ BestMatchIdx = i;
+ // However, if there isn't one that's larger, then we must use the
+ // largest one we have (see below)
+ if (LargestInt == -1 ||
+ Alignments[i].TypeBitWidth > Alignments[LargestInt].TypeBitWidth)
+ LargestInt = i;
+ }
+ }
+
+ // Okay, we didn't find an exact solution. Fall back here depending on what
+ // is being looked for.
+ if (BestMatchIdx == -1) {
+ // If we didn't find an integer alignment, fall back on most conservative.
+ if (AlignType == INTEGER_ALIGN) {
+ BestMatchIdx = LargestInt;
+ } else if (AlignType == VECTOR_ALIGN) {
+ // By default, use natural alignment for vector types. This is consistent
+ // with what clang and llvm-gcc do.
+ unsigned Align = getTypeAllocSize(cast<VectorType>(Ty)->getElementType());
+ Align *= cast<VectorType>(Ty)->getNumElements();
+ // If the alignment is not a power of 2, round up to the next power of 2.
+ // This happens for non-power-of-2 length vectors.
+ if (Align & (Align-1))
+ Align = NextPowerOf2(Align);
+ return Align;
+ }
+ }
+
+ // If we still couldn't find a reasonable default alignment, fall back
+ // to a simple heuristic that the alignment is the first power of two
+ // greater-or-equal to the store size of the type. This is a reasonable
+ // approximation of reality, and if the user wanted something less
+ // less conservative, they should have specified it explicitly in the data
+ // layout.
+ if (BestMatchIdx == -1) {
+ unsigned Align = getTypeStoreSize(Ty);
+ if (Align & (Align-1))
+ Align = NextPowerOf2(Align);
+ return Align;
+ }
+
+ // Since we got a "best match" index, just return it.
+ return ABIInfo ? Alignments[BestMatchIdx].ABIAlign
+ : Alignments[BestMatchIdx].PrefAlign;
+}
+
+namespace {
+
+class StructLayoutMap {
+ typedef DenseMap<StructType*, StructLayout*> LayoutInfoTy;
+ LayoutInfoTy LayoutInfo;
+
+public:
+ ~StructLayoutMap() {
+ // Remove any layouts.
+ for (const auto &I : LayoutInfo) {
+ StructLayout *Value = I.second;
+ Value->~StructLayout();
+ free(Value);
+ }
+ }
+
+ StructLayout *&operator[](StructType *STy) {
+ return LayoutInfo[STy];
+ }
+};
+
+} // end anonymous namespace
+
+void DataLayout::clear() {
+ LegalIntWidths.clear();
+ Alignments.clear();
+ Pointers.clear();
+ delete static_cast<StructLayoutMap *>(LayoutMap);
+ LayoutMap = nullptr;
+}
+
+DataLayout::~DataLayout() {
+ clear();
+}
+
+const StructLayout *DataLayout::getStructLayout(StructType *Ty) const {
+ if (!LayoutMap)
+ LayoutMap = new StructLayoutMap();
+
+ StructLayoutMap *STM = static_cast<StructLayoutMap*>(LayoutMap);
+ StructLayout *&SL = (*STM)[Ty];
+ if (SL) return SL;
+
+ // Otherwise, create the struct layout. Because it is variable length, we
+ // malloc it, then use placement new.
+ int NumElts = Ty->getNumElements();
+ StructLayout *L =
+ (StructLayout *)malloc(sizeof(StructLayout)+(NumElts-1) * sizeof(uint64_t));
+
+ // Set SL before calling StructLayout's ctor. The ctor could cause other
+ // entries to be added to TheMap, invalidating our reference.
+ SL = L;
+
+ new (L) StructLayout(Ty, *this);
+
+ return L;
+}
+
+
+unsigned DataLayout::getPointerABIAlignment(unsigned AS) const {
+ PointersTy::const_iterator I = findPointerLowerBound(AS);
+ if (I == Pointers.end() || I->AddressSpace != AS) {
+ I = findPointerLowerBound(0);
+ assert(I->AddressSpace == 0);
+ }
+ return I->ABIAlign;
+}
+
+unsigned DataLayout::getPointerPrefAlignment(unsigned AS) const {
+ PointersTy::const_iterator I = findPointerLowerBound(AS);
+ if (I == Pointers.end() || I->AddressSpace != AS) {
+ I = findPointerLowerBound(0);
+ assert(I->AddressSpace == 0);
+ }
+ return I->PrefAlign;
+}
+
+unsigned DataLayout::getPointerSize(unsigned AS) const {
+ PointersTy::const_iterator I = findPointerLowerBound(AS);
+ if (I == Pointers.end() || I->AddressSpace != AS) {
+ I = findPointerLowerBound(0);
+ assert(I->AddressSpace == 0);
+ }
+ return I->TypeByteWidth;
+}
+
+unsigned DataLayout::getPointerTypeSizeInBits(Type *Ty) const {
+ assert(Ty->isPtrOrPtrVectorTy() &&
+ "This should only be called with a pointer or pointer vector type");
+
+ if (Ty->isPointerTy())
+ return getTypeSizeInBits(Ty);
+
+ return getTypeSizeInBits(Ty->getScalarType());
+}
+
+/*!
+ \param abi_or_pref Flag that determines which alignment is returned. true
+ returns the ABI alignment, false returns the preferred alignment.
+ \param Ty The underlying type for which alignment is determined.
+
+ Get the ABI (\a abi_or_pref == true) or preferred alignment (\a abi_or_pref
+ == false) for the requested type \a Ty.
+ */
+unsigned DataLayout::getAlignment(Type *Ty, bool abi_or_pref) const {
+ int AlignType = -1;
+
+ assert(Ty->isSized() && "Cannot getTypeInfo() on a type that is unsized!");
+ switch (Ty->getTypeID()) {
+ // Early escape for the non-numeric types.
+ case Type::LabelTyID:
+ return (abi_or_pref
+ ? getPointerABIAlignment(0)
+ : getPointerPrefAlignment(0));
+ case Type::PointerTyID: {
+ unsigned AS = cast<PointerType>(Ty)->getAddressSpace();
+ return (abi_or_pref
+ ? getPointerABIAlignment(AS)
+ : getPointerPrefAlignment(AS));
+ }
+ case Type::ArrayTyID:
+ return getAlignment(cast<ArrayType>(Ty)->getElementType(), abi_or_pref);
+
+ case Type::StructTyID: {
+ // Packed structure types always have an ABI alignment of one.
+ if (cast<StructType>(Ty)->isPacked() && abi_or_pref)
+ return 1;
+
+ // Get the layout annotation... which is lazily created on demand.
+ const StructLayout *Layout = getStructLayout(cast<StructType>(Ty));
+ unsigned Align = getAlignmentInfo(AGGREGATE_ALIGN, 0, abi_or_pref, Ty);
+ return std::max(Align, Layout->getAlignment());
+ }
+ case Type::IntegerTyID:
+ AlignType = INTEGER_ALIGN;
+ break;
+ case Type::HalfTyID:
+ case Type::FloatTyID:
+ case Type::DoubleTyID:
+ // PPC_FP128TyID and FP128TyID have different data contents, but the
+ // same size and alignment, so they look the same here.
+ case Type::PPC_FP128TyID:
+ case Type::FP128TyID:
+ case Type::X86_FP80TyID:
+ AlignType = FLOAT_ALIGN;
+ break;
+ case Type::X86_MMXTyID:
+ case Type::VectorTyID:
+ AlignType = VECTOR_ALIGN;
+ break;
+ default:
+ llvm_unreachable("Bad type for getAlignment!!!");
+ }
+
+ return getAlignmentInfo((AlignTypeEnum)AlignType, getTypeSizeInBits(Ty),
+ abi_or_pref, Ty);
+}
+
+unsigned DataLayout::getABITypeAlignment(Type *Ty) const {
+ return getAlignment(Ty, true);
+}
+
+/// getABIIntegerTypeAlignment - Return the minimum ABI-required alignment for
+/// an integer type of the specified bitwidth.
+unsigned DataLayout::getABIIntegerTypeAlignment(unsigned BitWidth) const {
+ return getAlignmentInfo(INTEGER_ALIGN, BitWidth, true, nullptr);
+}
+
+unsigned DataLayout::getPrefTypeAlignment(Type *Ty) const {
+ return getAlignment(Ty, false);
+}
+
+unsigned DataLayout::getPreferredTypeAlignmentShift(Type *Ty) const {
+ unsigned Align = getPrefTypeAlignment(Ty);
+ assert(!(Align & (Align-1)) && "Alignment is not a power of two!");
+ return Log2_32(Align);
+}
+
+IntegerType *DataLayout::getIntPtrType(LLVMContext &C,
+ unsigned AddressSpace) const {
+ return IntegerType::get(C, getPointerSizeInBits(AddressSpace));
+}
+
+Type *DataLayout::getIntPtrType(Type *Ty) const {
+ assert(Ty->isPtrOrPtrVectorTy() &&
+ "Expected a pointer or pointer vector type.");
+ unsigned NumBits = getPointerTypeSizeInBits(Ty);
+ IntegerType *IntTy = IntegerType::get(Ty->getContext(), NumBits);
+ if (VectorType *VecTy = dyn_cast<VectorType>(Ty))
+ return VectorType::get(IntTy, VecTy->getNumElements());
+ return IntTy;
+}
+
+Type *DataLayout::getSmallestLegalIntType(LLVMContext &C, unsigned Width) const {
+ for (unsigned LegalIntWidth : LegalIntWidths)
+ if (Width <= LegalIntWidth)
+ return Type::getIntNTy(C, LegalIntWidth);
+ return nullptr;
+}
+
+unsigned DataLayout::getLargestLegalIntTypeSize() const {
+ auto Max = std::max_element(LegalIntWidths.begin(), LegalIntWidths.end());
+ return Max != LegalIntWidths.end() ? *Max : 0;
+}
+
+uint64_t DataLayout::getIndexedOffset(Type *ptrTy,
+ ArrayRef<Value *> Indices) const {
+ Type *Ty = ptrTy;
+ assert(Ty->isPointerTy() && "Illegal argument for getIndexedOffset()");
+ uint64_t Result = 0;
+
+ generic_gep_type_iterator<Value* const*>
+ TI = gep_type_begin(ptrTy, Indices);
+ for (unsigned CurIDX = 0, EndIDX = Indices.size(); CurIDX != EndIDX;
+ ++CurIDX, ++TI) {
+ if (StructType *STy = dyn_cast<StructType>(*TI)) {
+ assert(Indices[CurIDX]->getType() ==
+ Type::getInt32Ty(ptrTy->getContext()) &&
+ "Illegal struct idx");
+ unsigned FieldNo = cast<ConstantInt>(Indices[CurIDX])->getZExtValue();
+
+ // Get structure layout information...
+ const StructLayout *Layout = getStructLayout(STy);
+
+ // Add in the offset, as calculated by the structure layout info...
+ Result += Layout->getElementOffset(FieldNo);
+
+ // Update Ty to refer to current element
+ Ty = STy->getElementType(FieldNo);
+ } else {
+ // Update Ty to refer to current element
+ Ty = cast<SequentialType>(Ty)->getElementType();
+
+ // Get the array index and the size of each array element.
+ if (int64_t arrayIdx = cast<ConstantInt>(Indices[CurIDX])->getSExtValue())
+ Result += (uint64_t)arrayIdx * getTypeAllocSize(Ty);
+ }
+ }
+
+ return Result;
+}
+
+/// getPreferredAlignment - Return the preferred alignment of the specified
+/// global. This includes an explicitly requested alignment (if the global
+/// has one).
+unsigned DataLayout::getPreferredAlignment(const GlobalVariable *GV) const {
+ Type *ElemType = GV->getType()->getElementType();
+ unsigned Alignment = getPrefTypeAlignment(ElemType);
+ unsigned GVAlignment = GV->getAlignment();
+ if (GVAlignment >= Alignment) {
+ Alignment = GVAlignment;
+ } else if (GVAlignment != 0) {
+ Alignment = std::max(GVAlignment, getABITypeAlignment(ElemType));
+ }
+
+ if (GV->hasInitializer() && GVAlignment == 0) {
+ if (Alignment < 16) {
+ // If the global is not external, see if it is large. If so, give it a
+ // larger alignment.
+ if (getTypeSizeInBits(ElemType) > 128)
+ Alignment = 16; // 16-byte alignment.
+ }
+ }
+ return Alignment;
+}
+
+/// getPreferredAlignmentLog - Return the preferred alignment of the
+/// specified global, returned in log form. This includes an explicitly
+/// requested alignment (if the global has one).
+unsigned DataLayout::getPreferredAlignmentLog(const GlobalVariable *GV) const {
+ return Log2_32(getPreferredAlignment(GV));
+}
+
diff --git a/contrib/llvm/lib/IR/DebugInfo.cpp b/contrib/llvm/lib/IR/DebugInfo.cpp
new file mode 100644
index 0000000..a2443be
--- /dev/null
+++ b/contrib/llvm/lib/IR/DebugInfo.cpp
@@ -0,0 +1,365 @@
+//===--- DebugInfo.cpp - Debug Information Helper Classes -----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the helper classes used to build and interpret debug
+// information in LLVM IR form.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/DebugInfo.h"
+#include "LLVMContextImpl.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DIBuilder.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/GVMaterializer.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/Dwarf.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+using namespace llvm::dwarf;
+
+DISubprogram *llvm::getDISubprogram(const MDNode *Scope) {
+ if (auto *LocalScope = dyn_cast_or_null<DILocalScope>(Scope))
+ return LocalScope->getSubprogram();
+ return nullptr;
+}
+
+DISubprogram *llvm::getDISubprogram(const Function *F) {
+ // We look for the first instr that has a debug annotation leading back to F.
+ for (auto &BB : *F) {
+ auto Inst = std::find_if(BB.begin(), BB.end(), [](const Instruction &Inst) {
+ return Inst.getDebugLoc();
+ });
+ if (Inst == BB.end())
+ continue;
+ DebugLoc DLoc = Inst->getDebugLoc();
+ const MDNode *Scope = DLoc.getInlinedAtScope();
+ auto *Subprogram = getDISubprogram(Scope);
+ return Subprogram->describes(F) ? Subprogram : nullptr;
+ }
+
+ return nullptr;
+}
+
+DITypeIdentifierMap
+llvm::generateDITypeIdentifierMap(const NamedMDNode *CU_Nodes) {
+ DITypeIdentifierMap Map;
+ for (unsigned CUi = 0, CUe = CU_Nodes->getNumOperands(); CUi != CUe; ++CUi) {
+ auto *CU = cast<DICompileUnit>(CU_Nodes->getOperand(CUi));
+ DINodeArray Retain = CU->getRetainedTypes();
+ for (unsigned Ti = 0, Te = Retain.size(); Ti != Te; ++Ti) {
+ if (!isa<DICompositeType>(Retain[Ti]))
+ continue;
+ auto *Ty = cast<DICompositeType>(Retain[Ti]);
+ if (MDString *TypeId = Ty->getRawIdentifier()) {
+ // Definition has priority over declaration.
+ // Try to insert (TypeId, Ty) to Map.
+ std::pair<DITypeIdentifierMap::iterator, bool> P =
+ Map.insert(std::make_pair(TypeId, Ty));
+ // If TypeId already exists in Map and this is a definition, replace
+ // whatever we had (declaration or definition) with the definition.
+ if (!P.second && !Ty->isForwardDecl())
+ P.first->second = Ty;
+ }
+ }
+ }
+ return Map;
+}
+
+//===----------------------------------------------------------------------===//
+// DebugInfoFinder implementations.
+//===----------------------------------------------------------------------===//
+
+void DebugInfoFinder::reset() {
+ CUs.clear();
+ SPs.clear();
+ GVs.clear();
+ TYs.clear();
+ Scopes.clear();
+ NodesSeen.clear();
+ TypeIdentifierMap.clear();
+ TypeMapInitialized = false;
+}
+
+void DebugInfoFinder::InitializeTypeMap(const Module &M) {
+ if (!TypeMapInitialized)
+ if (NamedMDNode *CU_Nodes = M.getNamedMetadata("llvm.dbg.cu")) {
+ TypeIdentifierMap = generateDITypeIdentifierMap(CU_Nodes);
+ TypeMapInitialized = true;
+ }
+}
+
+void DebugInfoFinder::processModule(const Module &M) {
+ InitializeTypeMap(M);
+ if (NamedMDNode *CU_Nodes = M.getNamedMetadata("llvm.dbg.cu")) {
+ for (unsigned i = 0, e = CU_Nodes->getNumOperands(); i != e; ++i) {
+ auto *CU = cast<DICompileUnit>(CU_Nodes->getOperand(i));
+ addCompileUnit(CU);
+ for (auto *DIG : CU->getGlobalVariables()) {
+ if (addGlobalVariable(DIG)) {
+ processScope(DIG->getScope());
+ processType(DIG->getType().resolve(TypeIdentifierMap));
+ }
+ }
+ for (auto *SP : CU->getSubprograms())
+ processSubprogram(SP);
+ for (auto *ET : CU->getEnumTypes())
+ processType(ET);
+ for (auto *RT : CU->getRetainedTypes())
+ processType(RT);
+ for (auto *Import : CU->getImportedEntities()) {
+ auto *Entity = Import->getEntity().resolve(TypeIdentifierMap);
+ if (auto *T = dyn_cast<DIType>(Entity))
+ processType(T);
+ else if (auto *SP = dyn_cast<DISubprogram>(Entity))
+ processSubprogram(SP);
+ else if (auto *NS = dyn_cast<DINamespace>(Entity))
+ processScope(NS->getScope());
+ else if (auto *M = dyn_cast<DIModule>(Entity))
+ processScope(M->getScope());
+ }
+ }
+ }
+}
+
+void DebugInfoFinder::processLocation(const Module &M, const DILocation *Loc) {
+ if (!Loc)
+ return;
+ InitializeTypeMap(M);
+ processScope(Loc->getScope());
+ processLocation(M, Loc->getInlinedAt());
+}
+
+void DebugInfoFinder::processType(DIType *DT) {
+ if (!addType(DT))
+ return;
+ processScope(DT->getScope().resolve(TypeIdentifierMap));
+ if (auto *ST = dyn_cast<DISubroutineType>(DT)) {
+ for (DITypeRef Ref : ST->getTypeArray())
+ processType(Ref.resolve(TypeIdentifierMap));
+ return;
+ }
+ if (auto *DCT = dyn_cast<DICompositeType>(DT)) {
+ processType(DCT->getBaseType().resolve(TypeIdentifierMap));
+ for (Metadata *D : DCT->getElements()) {
+ if (auto *T = dyn_cast<DIType>(D))
+ processType(T);
+ else if (auto *SP = dyn_cast<DISubprogram>(D))
+ processSubprogram(SP);
+ }
+ return;
+ }
+ if (auto *DDT = dyn_cast<DIDerivedType>(DT)) {
+ processType(DDT->getBaseType().resolve(TypeIdentifierMap));
+ }
+}
+
+void DebugInfoFinder::processScope(DIScope *Scope) {
+ if (!Scope)
+ return;
+ if (auto *Ty = dyn_cast<DIType>(Scope)) {
+ processType(Ty);
+ return;
+ }
+ if (auto *CU = dyn_cast<DICompileUnit>(Scope)) {
+ addCompileUnit(CU);
+ return;
+ }
+ if (auto *SP = dyn_cast<DISubprogram>(Scope)) {
+ processSubprogram(SP);
+ return;
+ }
+ if (!addScope(Scope))
+ return;
+ if (auto *LB = dyn_cast<DILexicalBlockBase>(Scope)) {
+ processScope(LB->getScope());
+ } else if (auto *NS = dyn_cast<DINamespace>(Scope)) {
+ processScope(NS->getScope());
+ } else if (auto *M = dyn_cast<DIModule>(Scope)) {
+ processScope(M->getScope());
+ }
+}
+
+void DebugInfoFinder::processSubprogram(DISubprogram *SP) {
+ if (!addSubprogram(SP))
+ return;
+ processScope(SP->getScope().resolve(TypeIdentifierMap));
+ processType(SP->getType());
+ for (auto *Element : SP->getTemplateParams()) {
+ if (auto *TType = dyn_cast<DITemplateTypeParameter>(Element)) {
+ processType(TType->getType().resolve(TypeIdentifierMap));
+ } else if (auto *TVal = dyn_cast<DITemplateValueParameter>(Element)) {
+ processType(TVal->getType().resolve(TypeIdentifierMap));
+ }
+ }
+}
+
+void DebugInfoFinder::processDeclare(const Module &M,
+ const DbgDeclareInst *DDI) {
+ auto *N = dyn_cast<MDNode>(DDI->getVariable());
+ if (!N)
+ return;
+ InitializeTypeMap(M);
+
+ auto *DV = dyn_cast<DILocalVariable>(N);
+ if (!DV)
+ return;
+
+ if (!NodesSeen.insert(DV).second)
+ return;
+ processScope(DV->getScope());
+ processType(DV->getType().resolve(TypeIdentifierMap));
+}
+
+void DebugInfoFinder::processValue(const Module &M, const DbgValueInst *DVI) {
+ auto *N = dyn_cast<MDNode>(DVI->getVariable());
+ if (!N)
+ return;
+ InitializeTypeMap(M);
+
+ auto *DV = dyn_cast<DILocalVariable>(N);
+ if (!DV)
+ return;
+
+ if (!NodesSeen.insert(DV).second)
+ return;
+ processScope(DV->getScope());
+ processType(DV->getType().resolve(TypeIdentifierMap));
+}
+
+bool DebugInfoFinder::addType(DIType *DT) {
+ if (!DT)
+ return false;
+
+ if (!NodesSeen.insert(DT).second)
+ return false;
+
+ TYs.push_back(const_cast<DIType *>(DT));
+ return true;
+}
+
+bool DebugInfoFinder::addCompileUnit(DICompileUnit *CU) {
+ if (!CU)
+ return false;
+ if (!NodesSeen.insert(CU).second)
+ return false;
+
+ CUs.push_back(CU);
+ return true;
+}
+
+bool DebugInfoFinder::addGlobalVariable(DIGlobalVariable *DIG) {
+ if (!DIG)
+ return false;
+
+ if (!NodesSeen.insert(DIG).second)
+ return false;
+
+ GVs.push_back(DIG);
+ return true;
+}
+
+bool DebugInfoFinder::addSubprogram(DISubprogram *SP) {
+ if (!SP)
+ return false;
+
+ if (!NodesSeen.insert(SP).second)
+ return false;
+
+ SPs.push_back(SP);
+ return true;
+}
+
+bool DebugInfoFinder::addScope(DIScope *Scope) {
+ if (!Scope)
+ return false;
+ // FIXME: Ocaml binding generates a scope with no content, we treat it
+ // as null for now.
+ if (Scope->getNumOperands() == 0)
+ return false;
+ if (!NodesSeen.insert(Scope).second)
+ return false;
+ Scopes.push_back(Scope);
+ return true;
+}
+
+bool llvm::stripDebugInfo(Function &F) {
+ bool Changed = false;
+ if (F.getSubprogram()) {
+ Changed = true;
+ F.setSubprogram(nullptr);
+ }
+ for (BasicBlock &BB : F) {
+ for (Instruction &I : BB) {
+ if (I.getDebugLoc()) {
+ Changed = true;
+ I.setDebugLoc(DebugLoc());
+ }
+ }
+ }
+ return Changed;
+}
+
+bool llvm::StripDebugInfo(Module &M) {
+ bool Changed = false;
+
+ // Remove all of the calls to the debugger intrinsics, and remove them from
+ // the module.
+ if (Function *Declare = M.getFunction("llvm.dbg.declare")) {
+ while (!Declare->use_empty()) {
+ CallInst *CI = cast<CallInst>(Declare->user_back());
+ CI->eraseFromParent();
+ }
+ Declare->eraseFromParent();
+ Changed = true;
+ }
+
+ if (Function *DbgVal = M.getFunction("llvm.dbg.value")) {
+ while (!DbgVal->use_empty()) {
+ CallInst *CI = cast<CallInst>(DbgVal->user_back());
+ CI->eraseFromParent();
+ }
+ DbgVal->eraseFromParent();
+ Changed = true;
+ }
+
+ for (Module::named_metadata_iterator NMI = M.named_metadata_begin(),
+ NME = M.named_metadata_end(); NMI != NME;) {
+ NamedMDNode *NMD = &*NMI;
+ ++NMI;
+ if (NMD->getName().startswith("llvm.dbg.")) {
+ NMD->eraseFromParent();
+ Changed = true;
+ }
+ }
+
+ for (Function &F : M)
+ Changed |= stripDebugInfo(F);
+
+ if (GVMaterializer *Materializer = M.getMaterializer())
+ Materializer->setStripDebugInfo();
+
+ return Changed;
+}
+
+unsigned llvm::getDebugMetadataVersionFromModule(const Module &M) {
+ if (auto *Val = mdconst::dyn_extract_or_null<ConstantInt>(
+ M.getModuleFlag("Debug Info Version")))
+ return Val->getZExtValue();
+ return 0;
+}
diff --git a/contrib/llvm/lib/IR/DebugInfoMetadata.cpp b/contrib/llvm/lib/IR/DebugInfoMetadata.cpp
new file mode 100644
index 0000000..58e0abd
--- /dev/null
+++ b/contrib/llvm/lib/IR/DebugInfoMetadata.cpp
@@ -0,0 +1,580 @@
+//===- DebugInfoMetadata.cpp - Implement debug info metadata --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the debug info Metadata classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/DebugInfoMetadata.h"
+#include "LLVMContextImpl.h"
+#include "MetadataImpl.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/IR/Function.h"
+
+using namespace llvm;
+
+DILocation::DILocation(LLVMContext &C, StorageType Storage, unsigned Line,
+ unsigned Column, ArrayRef<Metadata *> MDs)
+ : MDNode(C, DILocationKind, Storage, MDs) {
+ assert((MDs.size() == 1 || MDs.size() == 2) &&
+ "Expected a scope and optional inlined-at");
+
+ // Set line and column.
+ assert(Column < (1u << 16) && "Expected 16-bit column");
+
+ SubclassData32 = Line;
+ SubclassData16 = Column;
+}
+
+static void adjustColumn(unsigned &Column) {
+ // Set to unknown on overflow. We only have 16 bits to play with here.
+ if (Column >= (1u << 16))
+ Column = 0;
+}
+
+DILocation *DILocation::getImpl(LLVMContext &Context, unsigned Line,
+ unsigned Column, Metadata *Scope,
+ Metadata *InlinedAt, StorageType Storage,
+ bool ShouldCreate) {
+ // Fixup column.
+ adjustColumn(Column);
+
+ assert(Scope && "Expected scope");
+ if (Storage == Uniqued) {
+ if (auto *N =
+ getUniqued(Context.pImpl->DILocations,
+ DILocationInfo::KeyTy(Line, Column, Scope, InlinedAt)))
+ return N;
+ if (!ShouldCreate)
+ return nullptr;
+ } else {
+ assert(ShouldCreate && "Expected non-uniqued nodes to always be created");
+ }
+
+ SmallVector<Metadata *, 2> Ops;
+ Ops.push_back(Scope);
+ if (InlinedAt)
+ Ops.push_back(InlinedAt);
+ return storeImpl(new (Ops.size())
+ DILocation(Context, Storage, Line, Column, Ops),
+ Storage, Context.pImpl->DILocations);
+}
+
+unsigned DILocation::computeNewDiscriminator() const {
+ // FIXME: This seems completely wrong.
+ //
+ // 1. If two modules are generated in the same context, then the second
+ // Module will get different discriminators than it would have if it were
+ // generated in its own context.
+ // 2. If this function is called after round-tripping to bitcode instead of
+ // before, it will give a different (and potentially incorrect!) return.
+ //
+ // The discriminator should instead be calculated from local information
+ // where it's actually needed. This logic should be moved to
+ // AddDiscriminators::runOnFunction(), where it doesn't pollute the
+ // LLVMContext.
+ std::pair<const char *, unsigned> Key(getFilename().data(), getLine());
+ return ++getContext().pImpl->DiscriminatorTable[Key];
+}
+
+unsigned DINode::getFlag(StringRef Flag) {
+ return StringSwitch<unsigned>(Flag)
+#define HANDLE_DI_FLAG(ID, NAME) .Case("DIFlag" #NAME, Flag##NAME)
+#include "llvm/IR/DebugInfoFlags.def"
+ .Default(0);
+}
+
+const char *DINode::getFlagString(unsigned Flag) {
+ switch (Flag) {
+ default:
+ return "";
+#define HANDLE_DI_FLAG(ID, NAME) \
+ case Flag##NAME: \
+ return "DIFlag" #NAME;
+#include "llvm/IR/DebugInfoFlags.def"
+ }
+}
+
+unsigned DINode::splitFlags(unsigned Flags,
+ SmallVectorImpl<unsigned> &SplitFlags) {
+ // Accessibility flags need to be specially handled, since they're packed
+ // together.
+ if (unsigned A = Flags & FlagAccessibility) {
+ if (A == FlagPrivate)
+ SplitFlags.push_back(FlagPrivate);
+ else if (A == FlagProtected)
+ SplitFlags.push_back(FlagProtected);
+ else
+ SplitFlags.push_back(FlagPublic);
+ Flags &= ~A;
+ }
+
+#define HANDLE_DI_FLAG(ID, NAME) \
+ if (unsigned Bit = Flags & ID) { \
+ SplitFlags.push_back(Bit); \
+ Flags &= ~Bit; \
+ }
+#include "llvm/IR/DebugInfoFlags.def"
+
+ return Flags;
+}
+
+DIScopeRef DIScope::getScope() const {
+ if (auto *T = dyn_cast<DIType>(this))
+ return T->getScope();
+
+ if (auto *SP = dyn_cast<DISubprogram>(this))
+ return SP->getScope();
+
+ if (auto *LB = dyn_cast<DILexicalBlockBase>(this))
+ return DIScopeRef(LB->getScope());
+
+ if (auto *NS = dyn_cast<DINamespace>(this))
+ return DIScopeRef(NS->getScope());
+
+ if (auto *M = dyn_cast<DIModule>(this))
+ return DIScopeRef(M->getScope());
+
+ assert((isa<DIFile>(this) || isa<DICompileUnit>(this)) &&
+ "Unhandled type of scope.");
+ return nullptr;
+}
+
+StringRef DIScope::getName() const {
+ if (auto *T = dyn_cast<DIType>(this))
+ return T->getName();
+ if (auto *SP = dyn_cast<DISubprogram>(this))
+ return SP->getName();
+ if (auto *NS = dyn_cast<DINamespace>(this))
+ return NS->getName();
+ if (auto *M = dyn_cast<DIModule>(this))
+ return M->getName();
+ assert((isa<DILexicalBlockBase>(this) || isa<DIFile>(this) ||
+ isa<DICompileUnit>(this)) &&
+ "Unhandled type of scope.");
+ return "";
+}
+
+static StringRef getString(const MDString *S) {
+ if (S)
+ return S->getString();
+ return StringRef();
+}
+
+#ifndef NDEBUG
+static bool isCanonical(const MDString *S) {
+ return !S || !S->getString().empty();
+}
+#endif
+
+GenericDINode *GenericDINode::getImpl(LLVMContext &Context, unsigned Tag,
+ MDString *Header,
+ ArrayRef<Metadata *> DwarfOps,
+ StorageType Storage, bool ShouldCreate) {
+ unsigned Hash = 0;
+ if (Storage == Uniqued) {
+ GenericDINodeInfo::KeyTy Key(Tag, getString(Header), DwarfOps);
+ if (auto *N = getUniqued(Context.pImpl->GenericDINodes, Key))
+ return N;
+ if (!ShouldCreate)
+ return nullptr;
+ Hash = Key.getHash();
+ } else {
+ assert(ShouldCreate && "Expected non-uniqued nodes to always be created");
+ }
+
+ // Use a nullptr for empty headers.
+ assert(isCanonical(Header) && "Expected canonical MDString");
+ Metadata *PreOps[] = {Header};
+ return storeImpl(new (DwarfOps.size() + 1) GenericDINode(
+ Context, Storage, Hash, Tag, PreOps, DwarfOps),
+ Storage, Context.pImpl->GenericDINodes);
+}
+
+void GenericDINode::recalculateHash() {
+ setHash(GenericDINodeInfo::KeyTy::calculateHash(this));
+}
+
+#define UNWRAP_ARGS_IMPL(...) __VA_ARGS__
+#define UNWRAP_ARGS(ARGS) UNWRAP_ARGS_IMPL ARGS
+#define DEFINE_GETIMPL_LOOKUP(CLASS, ARGS) \
+ do { \
+ if (Storage == Uniqued) { \
+ if (auto *N = getUniqued(Context.pImpl->CLASS##s, \
+ CLASS##Info::KeyTy(UNWRAP_ARGS(ARGS)))) \
+ return N; \
+ if (!ShouldCreate) \
+ return nullptr; \
+ } else { \
+ assert(ShouldCreate && \
+ "Expected non-uniqued nodes to always be created"); \
+ } \
+ } while (false)
+#define DEFINE_GETIMPL_STORE(CLASS, ARGS, OPS) \
+ return storeImpl(new (ArrayRef<Metadata *>(OPS).size()) \
+ CLASS(Context, Storage, UNWRAP_ARGS(ARGS), OPS), \
+ Storage, Context.pImpl->CLASS##s)
+#define DEFINE_GETIMPL_STORE_NO_OPS(CLASS, ARGS) \
+ return storeImpl(new (0u) CLASS(Context, Storage, UNWRAP_ARGS(ARGS)), \
+ Storage, Context.pImpl->CLASS##s)
+#define DEFINE_GETIMPL_STORE_NO_CONSTRUCTOR_ARGS(CLASS, OPS) \
+ return storeImpl(new (ArrayRef<Metadata *>(OPS).size()) \
+ CLASS(Context, Storage, OPS), \
+ Storage, Context.pImpl->CLASS##s)
+
+DISubrange *DISubrange::getImpl(LLVMContext &Context, int64_t Count, int64_t Lo,
+ StorageType Storage, bool ShouldCreate) {
+ DEFINE_GETIMPL_LOOKUP(DISubrange, (Count, Lo));
+ DEFINE_GETIMPL_STORE_NO_OPS(DISubrange, (Count, Lo));
+}
+
+DIEnumerator *DIEnumerator::getImpl(LLVMContext &Context, int64_t Value,
+ MDString *Name, StorageType Storage,
+ bool ShouldCreate) {
+ assert(isCanonical(Name) && "Expected canonical MDString");
+ DEFINE_GETIMPL_LOOKUP(DIEnumerator, (Value, getString(Name)));
+ Metadata *Ops[] = {Name};
+ DEFINE_GETIMPL_STORE(DIEnumerator, (Value), Ops);
+}
+
+DIBasicType *DIBasicType::getImpl(LLVMContext &Context, unsigned Tag,
+ MDString *Name, uint64_t SizeInBits,
+ uint64_t AlignInBits, unsigned Encoding,
+ StorageType Storage, bool ShouldCreate) {
+ assert(isCanonical(Name) && "Expected canonical MDString");
+ DEFINE_GETIMPL_LOOKUP(
+ DIBasicType, (Tag, getString(Name), SizeInBits, AlignInBits, Encoding));
+ Metadata *Ops[] = {nullptr, nullptr, Name};
+ DEFINE_GETIMPL_STORE(DIBasicType, (Tag, SizeInBits, AlignInBits, Encoding),
+ Ops);
+}
+
+DIDerivedType *DIDerivedType::getImpl(
+ LLVMContext &Context, unsigned Tag, MDString *Name, Metadata *File,
+ unsigned Line, Metadata *Scope, Metadata *BaseType, uint64_t SizeInBits,
+ uint64_t AlignInBits, uint64_t OffsetInBits, unsigned Flags,
+ Metadata *ExtraData, StorageType Storage, bool ShouldCreate) {
+ assert(isCanonical(Name) && "Expected canonical MDString");
+ DEFINE_GETIMPL_LOOKUP(DIDerivedType, (Tag, getString(Name), File, Line, Scope,
+ BaseType, SizeInBits, AlignInBits,
+ OffsetInBits, Flags, ExtraData));
+ Metadata *Ops[] = {File, Scope, Name, BaseType, ExtraData};
+ DEFINE_GETIMPL_STORE(
+ DIDerivedType, (Tag, Line, SizeInBits, AlignInBits, OffsetInBits, Flags),
+ Ops);
+}
+
+DICompositeType *DICompositeType::getImpl(
+ LLVMContext &Context, unsigned Tag, MDString *Name, Metadata *File,
+ unsigned Line, Metadata *Scope, Metadata *BaseType, uint64_t SizeInBits,
+ uint64_t AlignInBits, uint64_t OffsetInBits, unsigned Flags,
+ Metadata *Elements, unsigned RuntimeLang, Metadata *VTableHolder,
+ Metadata *TemplateParams, MDString *Identifier, StorageType Storage,
+ bool ShouldCreate) {
+ assert(isCanonical(Name) && "Expected canonical MDString");
+ DEFINE_GETIMPL_LOOKUP(DICompositeType,
+ (Tag, getString(Name), File, Line, Scope, BaseType,
+ SizeInBits, AlignInBits, OffsetInBits, Flags, Elements,
+ RuntimeLang, VTableHolder, TemplateParams,
+ getString(Identifier)));
+ Metadata *Ops[] = {File, Scope, Name, BaseType,
+ Elements, VTableHolder, TemplateParams, Identifier};
+ DEFINE_GETIMPL_STORE(DICompositeType, (Tag, Line, RuntimeLang, SizeInBits,
+ AlignInBits, OffsetInBits, Flags),
+ Ops);
+}
+
+DISubroutineType *DISubroutineType::getImpl(LLVMContext &Context,
+ unsigned Flags, Metadata *TypeArray,
+ StorageType Storage,
+ bool ShouldCreate) {
+ DEFINE_GETIMPL_LOOKUP(DISubroutineType, (Flags, TypeArray));
+ Metadata *Ops[] = {nullptr, nullptr, nullptr, TypeArray};
+ DEFINE_GETIMPL_STORE(DISubroutineType, (Flags), Ops);
+}
+
+DIFile *DIFile::getImpl(LLVMContext &Context, MDString *Filename,
+ MDString *Directory, StorageType Storage,
+ bool ShouldCreate) {
+ assert(isCanonical(Filename) && "Expected canonical MDString");
+ assert(isCanonical(Directory) && "Expected canonical MDString");
+ DEFINE_GETIMPL_LOOKUP(DIFile, (getString(Filename), getString(Directory)));
+ Metadata *Ops[] = {Filename, Directory};
+ DEFINE_GETIMPL_STORE_NO_CONSTRUCTOR_ARGS(DIFile, Ops);
+}
+
+DICompileUnit *DICompileUnit::getImpl(
+ LLVMContext &Context, unsigned SourceLanguage, Metadata *File,
+ MDString *Producer, bool IsOptimized, MDString *Flags,
+ unsigned RuntimeVersion, MDString *SplitDebugFilename,
+ unsigned EmissionKind, Metadata *EnumTypes, Metadata *RetainedTypes,
+ Metadata *Subprograms, Metadata *GlobalVariables,
+ Metadata *ImportedEntities, Metadata *Macros, uint64_t DWOId,
+ StorageType Storage, bool ShouldCreate) {
+ assert(Storage != Uniqued && "Cannot unique DICompileUnit");
+ assert(isCanonical(Producer) && "Expected canonical MDString");
+ assert(isCanonical(Flags) && "Expected canonical MDString");
+ assert(isCanonical(SplitDebugFilename) && "Expected canonical MDString");
+
+ Metadata *Ops[] = {File, Producer, Flags, SplitDebugFilename, EnumTypes,
+ RetainedTypes, Subprograms, GlobalVariables,
+ ImportedEntities, Macros};
+ return storeImpl(new (ArrayRef<Metadata *>(Ops).size()) DICompileUnit(
+ Context, Storage, SourceLanguage, IsOptimized,
+ RuntimeVersion, EmissionKind, DWOId, Ops),
+ Storage);
+}
+
+DISubprogram *DILocalScope::getSubprogram() const {
+ if (auto *Block = dyn_cast<DILexicalBlockBase>(this))
+ return Block->getScope()->getSubprogram();
+ return const_cast<DISubprogram *>(cast<DISubprogram>(this));
+}
+
+DISubprogram *DISubprogram::getImpl(
+ LLVMContext &Context, Metadata *Scope, MDString *Name,
+ MDString *LinkageName, Metadata *File, unsigned Line, Metadata *Type,
+ bool IsLocalToUnit, bool IsDefinition, unsigned ScopeLine,
+ Metadata *ContainingType, unsigned Virtuality, unsigned VirtualIndex,
+ unsigned Flags, bool IsOptimized, Metadata *TemplateParams,
+ Metadata *Declaration, Metadata *Variables, StorageType Storage,
+ bool ShouldCreate) {
+ assert(isCanonical(Name) && "Expected canonical MDString");
+ assert(isCanonical(LinkageName) && "Expected canonical MDString");
+ DEFINE_GETIMPL_LOOKUP(DISubprogram,
+ (Scope, getString(Name), getString(LinkageName), File,
+ Line, Type, IsLocalToUnit, IsDefinition, ScopeLine,
+ ContainingType, Virtuality, VirtualIndex, Flags,
+ IsOptimized, TemplateParams, Declaration, Variables));
+ Metadata *Ops[] = {File, Scope, Name, Name,
+ LinkageName, Type, ContainingType, TemplateParams,
+ Declaration, Variables};
+ DEFINE_GETIMPL_STORE(DISubprogram,
+ (Line, ScopeLine, Virtuality, VirtualIndex, Flags,
+ IsLocalToUnit, IsDefinition, IsOptimized),
+ Ops);
+}
+
+bool DISubprogram::describes(const Function *F) const {
+ assert(F && "Invalid function");
+ if (F->getSubprogram() == this)
+ return true;
+ StringRef Name = getLinkageName();
+ if (Name.empty())
+ Name = getName();
+ return F->getName() == Name;
+}
+
+DILexicalBlock *DILexicalBlock::getImpl(LLVMContext &Context, Metadata *Scope,
+ Metadata *File, unsigned Line,
+ unsigned Column, StorageType Storage,
+ bool ShouldCreate) {
+ // Fixup column.
+ adjustColumn(Column);
+
+ assert(Scope && "Expected scope");
+ DEFINE_GETIMPL_LOOKUP(DILexicalBlock, (Scope, File, Line, Column));
+ Metadata *Ops[] = {File, Scope};
+ DEFINE_GETIMPL_STORE(DILexicalBlock, (Line, Column), Ops);
+}
+
+DILexicalBlockFile *DILexicalBlockFile::getImpl(LLVMContext &Context,
+ Metadata *Scope, Metadata *File,
+ unsigned Discriminator,
+ StorageType Storage,
+ bool ShouldCreate) {
+ assert(Scope && "Expected scope");
+ DEFINE_GETIMPL_LOOKUP(DILexicalBlockFile, (Scope, File, Discriminator));
+ Metadata *Ops[] = {File, Scope};
+ DEFINE_GETIMPL_STORE(DILexicalBlockFile, (Discriminator), Ops);
+}
+
+DINamespace *DINamespace::getImpl(LLVMContext &Context, Metadata *Scope,
+ Metadata *File, MDString *Name, unsigned Line,
+ StorageType Storage, bool ShouldCreate) {
+ assert(isCanonical(Name) && "Expected canonical MDString");
+ DEFINE_GETIMPL_LOOKUP(DINamespace, (Scope, File, getString(Name), Line));
+ Metadata *Ops[] = {File, Scope, Name};
+ DEFINE_GETIMPL_STORE(DINamespace, (Line), Ops);
+}
+
+DIModule *DIModule::getImpl(LLVMContext &Context, Metadata *Scope,
+ MDString *Name, MDString *ConfigurationMacros,
+ MDString *IncludePath, MDString *ISysRoot,
+ StorageType Storage, bool ShouldCreate) {
+ assert(isCanonical(Name) && "Expected canonical MDString");
+ DEFINE_GETIMPL_LOOKUP(DIModule,
+ (Scope, getString(Name), getString(ConfigurationMacros),
+ getString(IncludePath), getString(ISysRoot)));
+ Metadata *Ops[] = {Scope, Name, ConfigurationMacros, IncludePath, ISysRoot};
+ DEFINE_GETIMPL_STORE_NO_CONSTRUCTOR_ARGS(DIModule, Ops);
+}
+
+DITemplateTypeParameter *DITemplateTypeParameter::getImpl(LLVMContext &Context,
+ MDString *Name,
+ Metadata *Type,
+ StorageType Storage,
+ bool ShouldCreate) {
+ assert(isCanonical(Name) && "Expected canonical MDString");
+ DEFINE_GETIMPL_LOOKUP(DITemplateTypeParameter, (getString(Name), Type));
+ Metadata *Ops[] = {Name, Type};
+ DEFINE_GETIMPL_STORE_NO_CONSTRUCTOR_ARGS(DITemplateTypeParameter, Ops);
+}
+
+DITemplateValueParameter *DITemplateValueParameter::getImpl(
+ LLVMContext &Context, unsigned Tag, MDString *Name, Metadata *Type,
+ Metadata *Value, StorageType Storage, bool ShouldCreate) {
+ assert(isCanonical(Name) && "Expected canonical MDString");
+ DEFINE_GETIMPL_LOOKUP(DITemplateValueParameter,
+ (Tag, getString(Name), Type, Value));
+ Metadata *Ops[] = {Name, Type, Value};
+ DEFINE_GETIMPL_STORE(DITemplateValueParameter, (Tag), Ops);
+}
+
+DIGlobalVariable *
+DIGlobalVariable::getImpl(LLVMContext &Context, Metadata *Scope, MDString *Name,
+ MDString *LinkageName, Metadata *File, unsigned Line,
+ Metadata *Type, bool IsLocalToUnit, bool IsDefinition,
+ Metadata *Variable,
+ Metadata *StaticDataMemberDeclaration,
+ StorageType Storage, bool ShouldCreate) {
+ assert(isCanonical(Name) && "Expected canonical MDString");
+ assert(isCanonical(LinkageName) && "Expected canonical MDString");
+ DEFINE_GETIMPL_LOOKUP(DIGlobalVariable,
+ (Scope, getString(Name), getString(LinkageName), File,
+ Line, Type, IsLocalToUnit, IsDefinition, Variable,
+ StaticDataMemberDeclaration));
+ Metadata *Ops[] = {Scope, Name, File, Type,
+ Name, LinkageName, Variable, StaticDataMemberDeclaration};
+ DEFINE_GETIMPL_STORE(DIGlobalVariable, (Line, IsLocalToUnit, IsDefinition),
+ Ops);
+}
+
+DILocalVariable *DILocalVariable::getImpl(LLVMContext &Context, Metadata *Scope,
+ MDString *Name, Metadata *File,
+ unsigned Line, Metadata *Type,
+ unsigned Arg, unsigned Flags,
+ StorageType Storage,
+ bool ShouldCreate) {
+ // 64K ought to be enough for any frontend.
+ assert(Arg <= UINT16_MAX && "Expected argument number to fit in 16-bits");
+
+ assert(Scope && "Expected scope");
+ assert(isCanonical(Name) && "Expected canonical MDString");
+ DEFINE_GETIMPL_LOOKUP(DILocalVariable,
+ (Scope, getString(Name), File, Line, Type, Arg, Flags));
+ Metadata *Ops[] = {Scope, Name, File, Type};
+ DEFINE_GETIMPL_STORE(DILocalVariable, (Line, Arg, Flags), Ops);
+}
+
+DIExpression *DIExpression::getImpl(LLVMContext &Context,
+ ArrayRef<uint64_t> Elements,
+ StorageType Storage, bool ShouldCreate) {
+ DEFINE_GETIMPL_LOOKUP(DIExpression, (Elements));
+ DEFINE_GETIMPL_STORE_NO_OPS(DIExpression, (Elements));
+}
+
+unsigned DIExpression::ExprOperand::getSize() const {
+ switch (getOp()) {
+ case dwarf::DW_OP_bit_piece:
+ return 3;
+ case dwarf::DW_OP_plus:
+ case dwarf::DW_OP_minus:
+ return 2;
+ default:
+ return 1;
+ }
+}
+
+bool DIExpression::isValid() const {
+ for (auto I = expr_op_begin(), E = expr_op_end(); I != E; ++I) {
+ // Check that there's space for the operand.
+ if (I->get() + I->getSize() > E->get())
+ return false;
+
+ // Check that the operand is valid.
+ switch (I->getOp()) {
+ default:
+ return false;
+ case dwarf::DW_OP_bit_piece:
+ // Piece expressions must be at the end.
+ return I->get() + I->getSize() == E->get();
+ case dwarf::DW_OP_plus:
+ case dwarf::DW_OP_minus:
+ case dwarf::DW_OP_deref:
+ break;
+ }
+ }
+ return true;
+}
+
+bool DIExpression::isBitPiece() const {
+ assert(isValid() && "Expected valid expression");
+ if (unsigned N = getNumElements())
+ if (N >= 3)
+ return getElement(N - 3) == dwarf::DW_OP_bit_piece;
+ return false;
+}
+
+uint64_t DIExpression::getBitPieceOffset() const {
+ assert(isBitPiece() && "Expected bit piece");
+ return getElement(getNumElements() - 2);
+}
+
+uint64_t DIExpression::getBitPieceSize() const {
+ assert(isBitPiece() && "Expected bit piece");
+ return getElement(getNumElements() - 1);
+}
+
+DIObjCProperty *DIObjCProperty::getImpl(
+ LLVMContext &Context, MDString *Name, Metadata *File, unsigned Line,
+ MDString *GetterName, MDString *SetterName, unsigned Attributes,
+ Metadata *Type, StorageType Storage, bool ShouldCreate) {
+ assert(isCanonical(Name) && "Expected canonical MDString");
+ assert(isCanonical(GetterName) && "Expected canonical MDString");
+ assert(isCanonical(SetterName) && "Expected canonical MDString");
+ DEFINE_GETIMPL_LOOKUP(DIObjCProperty,
+ (getString(Name), File, Line, getString(GetterName),
+ getString(SetterName), Attributes, Type));
+ Metadata *Ops[] = {Name, File, GetterName, SetterName, Type};
+ DEFINE_GETIMPL_STORE(DIObjCProperty, (Line, Attributes), Ops);
+}
+
+DIImportedEntity *DIImportedEntity::getImpl(LLVMContext &Context, unsigned Tag,
+ Metadata *Scope, Metadata *Entity,
+ unsigned Line, MDString *Name,
+ StorageType Storage,
+ bool ShouldCreate) {
+ assert(isCanonical(Name) && "Expected canonical MDString");
+ DEFINE_GETIMPL_LOOKUP(DIImportedEntity,
+ (Tag, Scope, Entity, Line, getString(Name)));
+ Metadata *Ops[] = {Scope, Entity, Name};
+ DEFINE_GETIMPL_STORE(DIImportedEntity, (Tag, Line), Ops);
+}
+
+DIMacro *DIMacro::getImpl(LLVMContext &Context, unsigned MIType,
+ unsigned Line, MDString *Name, MDString *Value,
+ StorageType Storage, bool ShouldCreate) {
+ assert(isCanonical(Name) && "Expected canonical MDString");
+ DEFINE_GETIMPL_LOOKUP(DIMacro,
+ (MIType, Line, getString(Name), getString(Value)));
+ Metadata *Ops[] = { Name, Value };
+ DEFINE_GETIMPL_STORE(DIMacro, (MIType, Line), Ops);
+}
+
+DIMacroFile *DIMacroFile::getImpl(LLVMContext &Context, unsigned MIType,
+ unsigned Line, Metadata *File,
+ Metadata *Elements, StorageType Storage,
+ bool ShouldCreate) {
+ DEFINE_GETIMPL_LOOKUP(DIMacroFile,
+ (MIType, Line, File, Elements));
+ Metadata *Ops[] = { File, Elements };
+ DEFINE_GETIMPL_STORE(DIMacroFile, (MIType, Line), Ops);
+}
+
diff --git a/contrib/llvm/lib/IR/DebugLoc.cpp b/contrib/llvm/lib/IR/DebugLoc.cpp
new file mode 100644
index 0000000..72d5c0e
--- /dev/null
+++ b/contrib/llvm/lib/IR/DebugLoc.cpp
@@ -0,0 +1,102 @@
+//===-- DebugLoc.cpp - Implement DebugLoc class ---------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/DebugLoc.h"
+#include "LLVMContextImpl.h"
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/IR/DebugInfo.h"
+using namespace llvm;
+
+//===----------------------------------------------------------------------===//
+// DebugLoc Implementation
+//===----------------------------------------------------------------------===//
+DebugLoc::DebugLoc(const DILocation *L) : Loc(const_cast<DILocation *>(L)) {}
+DebugLoc::DebugLoc(const MDNode *L) : Loc(const_cast<MDNode *>(L)) {}
+
+DILocation *DebugLoc::get() const {
+ return cast_or_null<DILocation>(Loc.get());
+}
+
+unsigned DebugLoc::getLine() const {
+ assert(get() && "Expected valid DebugLoc");
+ return get()->getLine();
+}
+
+unsigned DebugLoc::getCol() const {
+ assert(get() && "Expected valid DebugLoc");
+ return get()->getColumn();
+}
+
+MDNode *DebugLoc::getScope() const {
+ assert(get() && "Expected valid DebugLoc");
+ return get()->getScope();
+}
+
+DILocation *DebugLoc::getInlinedAt() const {
+ assert(get() && "Expected valid DebugLoc");
+ return get()->getInlinedAt();
+}
+
+MDNode *DebugLoc::getInlinedAtScope() const {
+ return cast<DILocation>(Loc)->getInlinedAtScope();
+}
+
+DebugLoc DebugLoc::getFnDebugLoc() const {
+ // FIXME: Add a method on \a DILocation that does this work.
+ const MDNode *Scope = getInlinedAtScope();
+ if (auto *SP = getDISubprogram(Scope))
+ return DebugLoc::get(SP->getScopeLine(), 0, SP);
+
+ return DebugLoc();
+}
+
+DebugLoc DebugLoc::get(unsigned Line, unsigned Col, const MDNode *Scope,
+ const MDNode *InlinedAt) {
+ // If no scope is available, this is an unknown location.
+ if (!Scope)
+ return DebugLoc();
+
+ return DILocation::get(Scope->getContext(), Line, Col,
+ const_cast<MDNode *>(Scope),
+ const_cast<MDNode *>(InlinedAt));
+}
+
+void DebugLoc::dump() const {
+#ifndef NDEBUG
+ if (!Loc)
+ return;
+
+ dbgs() << getLine();
+ if (getCol() != 0)
+ dbgs() << ',' << getCol();
+ if (DebugLoc InlinedAtDL = DebugLoc(getInlinedAt())) {
+ dbgs() << " @ ";
+ InlinedAtDL.dump();
+ } else
+ dbgs() << "\n";
+#endif
+}
+
+void DebugLoc::print(raw_ostream &OS) const {
+ if (!Loc)
+ return;
+
+ // Print source line info.
+ auto *Scope = cast<DIScope>(getScope());
+ OS << Scope->getFilename();
+ OS << ':' << getLine();
+ if (getCol() != 0)
+ OS << ':' << getCol();
+
+ if (DebugLoc InlinedAtDL = getInlinedAt()) {
+ OS << " @[ ";
+ InlinedAtDL.print(OS);
+ OS << " ]";
+ }
+}
diff --git a/contrib/llvm/lib/IR/DiagnosticInfo.cpp b/contrib/llvm/lib/IR/DiagnosticInfo.cpp
new file mode 100644
index 0000000..6426f76
--- /dev/null
+++ b/contrib/llvm/lib/IR/DiagnosticInfo.cpp
@@ -0,0 +1,243 @@
+//===- llvm/Support/DiagnosticInfo.cpp - Diagnostic Definitions -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the different classes involved in low level diagnostics.
+//
+// Diagnostics reporting is still done as part of the LLVMContext.
+//===----------------------------------------------------------------------===//
+
+#include "LLVMContextImpl.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DebugInfo.h"
+#include "llvm/IR/DiagnosticInfo.h"
+#include "llvm/IR/DiagnosticPrinter.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Regex.h"
+#include <atomic>
+#include <string>
+
+using namespace llvm;
+
+namespace {
+
+/// \brief Regular expression corresponding to the value given in one of the
+/// -pass-remarks* command line flags. Passes whose name matches this regexp
+/// will emit a diagnostic when calling the associated diagnostic function
+/// (emitOptimizationRemark, emitOptimizationRemarkMissed or
+/// emitOptimizationRemarkAnalysis).
+struct PassRemarksOpt {
+ std::shared_ptr<Regex> Pattern;
+
+ void operator=(const std::string &Val) {
+ // Create a regexp object to match pass names for emitOptimizationRemark.
+ if (!Val.empty()) {
+ Pattern = std::make_shared<Regex>(Val);
+ std::string RegexError;
+ if (!Pattern->isValid(RegexError))
+ report_fatal_error("Invalid regular expression '" + Val +
+ "' in -pass-remarks: " + RegexError,
+ false);
+ }
+ }
+};
+
+static PassRemarksOpt PassRemarksOptLoc;
+static PassRemarksOpt PassRemarksMissedOptLoc;
+static PassRemarksOpt PassRemarksAnalysisOptLoc;
+
+// -pass-remarks
+// Command line flag to enable emitOptimizationRemark()
+static cl::opt<PassRemarksOpt, true, cl::parser<std::string>>
+PassRemarks("pass-remarks", cl::value_desc("pattern"),
+ cl::desc("Enable optimization remarks from passes whose name match "
+ "the given regular expression"),
+ cl::Hidden, cl::location(PassRemarksOptLoc), cl::ValueRequired,
+ cl::ZeroOrMore);
+
+// -pass-remarks-missed
+// Command line flag to enable emitOptimizationRemarkMissed()
+static cl::opt<PassRemarksOpt, true, cl::parser<std::string>> PassRemarksMissed(
+ "pass-remarks-missed", cl::value_desc("pattern"),
+ cl::desc("Enable missed optimization remarks from passes whose name match "
+ "the given regular expression"),
+ cl::Hidden, cl::location(PassRemarksMissedOptLoc), cl::ValueRequired,
+ cl::ZeroOrMore);
+
+// -pass-remarks-analysis
+// Command line flag to enable emitOptimizationRemarkAnalysis()
+static cl::opt<PassRemarksOpt, true, cl::parser<std::string>>
+PassRemarksAnalysis(
+ "pass-remarks-analysis", cl::value_desc("pattern"),
+ cl::desc(
+ "Enable optimization analysis remarks from passes whose name match "
+ "the given regular expression"),
+ cl::Hidden, cl::location(PassRemarksAnalysisOptLoc), cl::ValueRequired,
+ cl::ZeroOrMore);
+}
+
+int llvm::getNextAvailablePluginDiagnosticKind() {
+ static std::atomic<int> PluginKindID(DK_FirstPluginKind);
+ return ++PluginKindID;
+}
+
+const char *DiagnosticInfo::AlwaysPrint = "";
+
+DiagnosticInfoInlineAsm::DiagnosticInfoInlineAsm(const Instruction &I,
+ const Twine &MsgStr,
+ DiagnosticSeverity Severity)
+ : DiagnosticInfo(DK_InlineAsm, Severity), LocCookie(0), MsgStr(MsgStr),
+ Instr(&I) {
+ if (const MDNode *SrcLoc = I.getMetadata("srcloc")) {
+ if (SrcLoc->getNumOperands() != 0)
+ if (const auto *CI =
+ mdconst::dyn_extract<ConstantInt>(SrcLoc->getOperand(0)))
+ LocCookie = CI->getZExtValue();
+ }
+}
+
+void DiagnosticInfoInlineAsm::print(DiagnosticPrinter &DP) const {
+ DP << getMsgStr();
+ if (getLocCookie())
+ DP << " at line " << getLocCookie();
+}
+
+void DiagnosticInfoStackSize::print(DiagnosticPrinter &DP) const {
+ DP << "stack size limit exceeded (" << getStackSize() << ") in "
+ << getFunction();
+}
+
+void DiagnosticInfoDebugMetadataVersion::print(DiagnosticPrinter &DP) const {
+ DP << "ignoring debug info with an invalid version (" << getMetadataVersion()
+ << ") in " << getModule();
+}
+
+void DiagnosticInfoSampleProfile::print(DiagnosticPrinter &DP) const {
+ if (!FileName.empty()) {
+ DP << getFileName();
+ if (LineNum > 0)
+ DP << ":" << getLineNum();
+ DP << ": ";
+ }
+ DP << getMsg();
+}
+
+void DiagnosticInfoPGOProfile::print(DiagnosticPrinter &DP) const {
+ if (getFileName())
+ DP << getFileName() << ": ";
+ DP << getMsg();
+}
+
+bool DiagnosticInfoOptimizationBase::isLocationAvailable() const {
+ return getDebugLoc();
+}
+
+void DiagnosticInfoOptimizationBase::getLocation(StringRef *Filename,
+ unsigned *Line,
+ unsigned *Column) const {
+ DILocation *L = getDebugLoc();
+ assert(L != nullptr && "debug location is invalid");
+ *Filename = L->getFilename();
+ *Line = L->getLine();
+ *Column = L->getColumn();
+}
+
+const std::string DiagnosticInfoOptimizationBase::getLocationStr() const {
+ StringRef Filename("<unknown>");
+ unsigned Line = 0;
+ unsigned Column = 0;
+ if (isLocationAvailable())
+ getLocation(&Filename, &Line, &Column);
+ return (Filename + ":" + Twine(Line) + ":" + Twine(Column)).str();
+}
+
+void DiagnosticInfoOptimizationBase::print(DiagnosticPrinter &DP) const {
+ DP << getLocationStr() << ": " << getMsg();
+}
+
+bool DiagnosticInfoOptimizationRemark::isEnabled() const {
+ return PassRemarksOptLoc.Pattern &&
+ PassRemarksOptLoc.Pattern->match(getPassName());
+}
+
+bool DiagnosticInfoOptimizationRemarkMissed::isEnabled() const {
+ return PassRemarksMissedOptLoc.Pattern &&
+ PassRemarksMissedOptLoc.Pattern->match(getPassName());
+}
+
+bool DiagnosticInfoOptimizationRemarkAnalysis::isEnabled() const {
+ return getPassName() == DiagnosticInfo::AlwaysPrint ||
+ (PassRemarksAnalysisOptLoc.Pattern &&
+ PassRemarksAnalysisOptLoc.Pattern->match(getPassName()));
+}
+
+void DiagnosticInfoMIRParser::print(DiagnosticPrinter &DP) const {
+ DP << Diagnostic;
+}
+
+void llvm::emitOptimizationRemark(LLVMContext &Ctx, const char *PassName,
+ const Function &Fn, const DebugLoc &DLoc,
+ const Twine &Msg) {
+ Ctx.diagnose(DiagnosticInfoOptimizationRemark(PassName, Fn, DLoc, Msg));
+}
+
+void llvm::emitOptimizationRemarkMissed(LLVMContext &Ctx, const char *PassName,
+ const Function &Fn,
+ const DebugLoc &DLoc,
+ const Twine &Msg) {
+ Ctx.diagnose(DiagnosticInfoOptimizationRemarkMissed(PassName, Fn, DLoc, Msg));
+}
+
+void llvm::emitOptimizationRemarkAnalysis(LLVMContext &Ctx,
+ const char *PassName,
+ const Function &Fn,
+ const DebugLoc &DLoc,
+ const Twine &Msg) {
+ Ctx.diagnose(
+ DiagnosticInfoOptimizationRemarkAnalysis(PassName, Fn, DLoc, Msg));
+}
+
+void llvm::emitOptimizationRemarkAnalysisFPCommute(LLVMContext &Ctx,
+ const char *PassName,
+ const Function &Fn,
+ const DebugLoc &DLoc,
+ const Twine &Msg) {
+ Ctx.diagnose(DiagnosticInfoOptimizationRemarkAnalysisFPCommute(PassName, Fn,
+ DLoc, Msg));
+}
+
+void llvm::emitOptimizationRemarkAnalysisAliasing(LLVMContext &Ctx,
+ const char *PassName,
+ const Function &Fn,
+ const DebugLoc &DLoc,
+ const Twine &Msg) {
+ Ctx.diagnose(DiagnosticInfoOptimizationRemarkAnalysisAliasing(PassName, Fn,
+ DLoc, Msg));
+}
+
+bool DiagnosticInfoOptimizationFailure::isEnabled() const {
+ // Only print warnings.
+ return getSeverity() == DS_Warning;
+}
+
+void llvm::emitLoopVectorizeWarning(LLVMContext &Ctx, const Function &Fn,
+ const DebugLoc &DLoc, const Twine &Msg) {
+ Ctx.diagnose(DiagnosticInfoOptimizationFailure(
+ Fn, DLoc, Twine("loop not vectorized: " + Msg)));
+}
+
+void llvm::emitLoopInterleaveWarning(LLVMContext &Ctx, const Function &Fn,
+ const DebugLoc &DLoc, const Twine &Msg) {
+ Ctx.diagnose(DiagnosticInfoOptimizationFailure(
+ Fn, DLoc, Twine("loop not interleaved: " + Msg)));
+}
diff --git a/contrib/llvm/lib/IR/DiagnosticPrinter.cpp b/contrib/llvm/lib/IR/DiagnosticPrinter.cpp
new file mode 100644
index 0000000..659ff49
--- /dev/null
+++ b/contrib/llvm/lib/IR/DiagnosticPrinter.cpp
@@ -0,0 +1,117 @@
+//===- llvm/Support/DiagnosticInfo.cpp - Diagnostic Definitions -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a diagnostic printer relying on raw_ostream.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/Twine.h"
+#include "llvm/IR/DiagnosticPrinter.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/SourceMgr.h"
+
+using namespace llvm;
+
+DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(char C) {
+ Stream << C;
+ return *this;
+}
+
+DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(unsigned char C) {
+ Stream << C;
+ return *this;
+}
+
+DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(signed char C) {
+ Stream << C;
+ return *this;
+}
+
+DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(StringRef Str) {
+ Stream << Str;
+ return *this;
+}
+
+DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(const char *Str) {
+ Stream << Str;
+ return *this;
+}
+
+DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(
+ const std::string &Str) {
+ Stream << Str;
+ return *this;
+}
+
+DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(unsigned long N) {
+ Stream << N;
+ return *this;
+}
+DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(long N) {
+ Stream << N;
+ return *this;
+}
+
+DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(
+ unsigned long long N) {
+ Stream << N;
+ return *this;
+}
+
+DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(long long N) {
+ Stream << N;
+ return *this;
+}
+
+DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(const void *P) {
+ Stream << P;
+ return *this;
+}
+
+DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(unsigned int N) {
+ Stream << N;
+ return *this;
+}
+
+DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(int N) {
+ Stream << N;
+ return *this;
+}
+
+DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(double N) {
+ Stream << N;
+ return *this;
+}
+
+DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(const Twine &Str) {
+ Str.print(Stream);
+ return *this;
+}
+
+// IR related types.
+DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(const Value &V) {
+ Stream << V.getName();
+ return *this;
+}
+
+DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(const Module &M) {
+ Stream << M.getModuleIdentifier();
+ return *this;
+}
+
+// Other types.
+DiagnosticPrinter &DiagnosticPrinterRawOStream::
+operator<<(const SMDiagnostic &Diag) {
+ // We don't have to print the SMDiagnostic kind, as the diagnostic severity
+ // is printed by the diagnostic handler.
+ Diag.print("", Stream, /*ShowColors=*/true, /*ShowKindLabel=*/false);
+ return *this;
+}
diff --git a/contrib/llvm/lib/IR/Dominators.cpp b/contrib/llvm/lib/IR/Dominators.cpp
new file mode 100644
index 0000000..b9d4fb7
--- /dev/null
+++ b/contrib/llvm/lib/IR/Dominators.cpp
@@ -0,0 +1,356 @@
+//===- Dominators.cpp - Dominator Calculation -----------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements simple dominator construction algorithms for finding
+// forward dominators. Postdominators are available in libanalysis, but are not
+// included in libvmcore, because it's not needed. Forward dominators are
+// needed to support the Verifier pass.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/Dominators.h"
+#include "llvm/ADT/DepthFirstIterator.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/CFG.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/GenericDomTreeConstruction.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+using namespace llvm;
+
+// Always verify dominfo if expensive checking is enabled.
+#ifdef XDEBUG
+static bool VerifyDomInfo = true;
+#else
+static bool VerifyDomInfo = false;
+#endif
+static cl::opt<bool,true>
+VerifyDomInfoX("verify-dom-info", cl::location(VerifyDomInfo),
+ cl::desc("Verify dominator info (time consuming)"));
+
+bool BasicBlockEdge::isSingleEdge() const {
+ const TerminatorInst *TI = Start->getTerminator();
+ unsigned NumEdgesToEnd = 0;
+ for (unsigned int i = 0, n = TI->getNumSuccessors(); i < n; ++i) {
+ if (TI->getSuccessor(i) == End)
+ ++NumEdgesToEnd;
+ if (NumEdgesToEnd >= 2)
+ return false;
+ }
+ assert(NumEdgesToEnd == 1);
+ return true;
+}
+
+//===----------------------------------------------------------------------===//
+// DominatorTree Implementation
+//===----------------------------------------------------------------------===//
+//
+// Provide public access to DominatorTree information. Implementation details
+// can be found in Dominators.h, GenericDomTree.h, and
+// GenericDomTreeConstruction.h.
+//
+//===----------------------------------------------------------------------===//
+
+template class llvm::DomTreeNodeBase<BasicBlock>;
+template class llvm::DominatorTreeBase<BasicBlock>;
+
+template void llvm::Calculate<Function, BasicBlock *>(
+ DominatorTreeBase<GraphTraits<BasicBlock *>::NodeType> &DT, Function &F);
+template void llvm::Calculate<Function, Inverse<BasicBlock *>>(
+ DominatorTreeBase<GraphTraits<Inverse<BasicBlock *>>::NodeType> &DT,
+ Function &F);
+
+// dominates - Return true if Def dominates a use in User. This performs
+// the special checks necessary if Def and User are in the same basic block.
+// Note that Def doesn't dominate a use in Def itself!
+bool DominatorTree::dominates(const Instruction *Def,
+ const Instruction *User) const {
+ const BasicBlock *UseBB = User->getParent();
+ const BasicBlock *DefBB = Def->getParent();
+
+ // Any unreachable use is dominated, even if Def == User.
+ if (!isReachableFromEntry(UseBB))
+ return true;
+
+ // Unreachable definitions don't dominate anything.
+ if (!isReachableFromEntry(DefBB))
+ return false;
+
+ // An instruction doesn't dominate a use in itself.
+ if (Def == User)
+ return false;
+
+ // The value defined by an invoke dominates an instruction only if it
+ // dominates every instruction in UseBB.
+ // A PHI is dominated only if the instruction dominates every possible use in
+ // the UseBB.
+ if (isa<InvokeInst>(Def) || isa<PHINode>(User))
+ return dominates(Def, UseBB);
+
+ if (DefBB != UseBB)
+ return dominates(DefBB, UseBB);
+
+ // Loop through the basic block until we find Def or User.
+ BasicBlock::const_iterator I = DefBB->begin();
+ for (; &*I != Def && &*I != User; ++I)
+ /*empty*/;
+
+ return &*I == Def;
+}
+
+// true if Def would dominate a use in any instruction in UseBB.
+// note that dominates(Def, Def->getParent()) is false.
+bool DominatorTree::dominates(const Instruction *Def,
+ const BasicBlock *UseBB) const {
+ const BasicBlock *DefBB = Def->getParent();
+
+ // Any unreachable use is dominated, even if DefBB == UseBB.
+ if (!isReachableFromEntry(UseBB))
+ return true;
+
+ // Unreachable definitions don't dominate anything.
+ if (!isReachableFromEntry(DefBB))
+ return false;
+
+ if (DefBB == UseBB)
+ return false;
+
+ // Invoke results are only usable in the normal destination, not in the
+ // exceptional destination.
+ if (const auto *II = dyn_cast<InvokeInst>(Def)) {
+ BasicBlock *NormalDest = II->getNormalDest();
+ BasicBlockEdge E(DefBB, NormalDest);
+ return dominates(E, UseBB);
+ }
+
+ return dominates(DefBB, UseBB);
+}
+
+bool DominatorTree::dominates(const BasicBlockEdge &BBE,
+ const BasicBlock *UseBB) const {
+ // Assert that we have a single edge. We could handle them by simply
+ // returning false, but since isSingleEdge is linear on the number of
+ // edges, the callers can normally handle them more efficiently.
+ assert(BBE.isSingleEdge() &&
+ "This function is not efficient in handling multiple edges");
+
+ // If the BB the edge ends in doesn't dominate the use BB, then the
+ // edge also doesn't.
+ const BasicBlock *Start = BBE.getStart();
+ const BasicBlock *End = BBE.getEnd();
+ if (!dominates(End, UseBB))
+ return false;
+
+ // Simple case: if the end BB has a single predecessor, the fact that it
+ // dominates the use block implies that the edge also does.
+ if (End->getSinglePredecessor())
+ return true;
+
+ // The normal edge from the invoke is critical. Conceptually, what we would
+ // like to do is split it and check if the new block dominates the use.
+ // With X being the new block, the graph would look like:
+ //
+ // DefBB
+ // /\ . .
+ // / \ . .
+ // / \ . .
+ // / \ | |
+ // A X B C
+ // | \ | /
+ // . \|/
+ // . NormalDest
+ // .
+ //
+ // Given the definition of dominance, NormalDest is dominated by X iff X
+ // dominates all of NormalDest's predecessors (X, B, C in the example). X
+ // trivially dominates itself, so we only have to find if it dominates the
+ // other predecessors. Since the only way out of X is via NormalDest, X can
+ // only properly dominate a node if NormalDest dominates that node too.
+ for (const_pred_iterator PI = pred_begin(End), E = pred_end(End);
+ PI != E; ++PI) {
+ const BasicBlock *BB = *PI;
+ if (BB == Start)
+ continue;
+
+ if (!dominates(End, BB))
+ return false;
+ }
+ return true;
+}
+
+bool DominatorTree::dominates(const BasicBlockEdge &BBE, const Use &U) const {
+ // Assert that we have a single edge. We could handle them by simply
+ // returning false, but since isSingleEdge is linear on the number of
+ // edges, the callers can normally handle them more efficiently.
+ assert(BBE.isSingleEdge() &&
+ "This function is not efficient in handling multiple edges");
+
+ Instruction *UserInst = cast<Instruction>(U.getUser());
+ // A PHI in the end of the edge is dominated by it.
+ PHINode *PN = dyn_cast<PHINode>(UserInst);
+ if (PN && PN->getParent() == BBE.getEnd() &&
+ PN->getIncomingBlock(U) == BBE.getStart())
+ return true;
+
+ // Otherwise use the edge-dominates-block query, which
+ // handles the crazy critical edge cases properly.
+ const BasicBlock *UseBB;
+ if (PN)
+ UseBB = PN->getIncomingBlock(U);
+ else
+ UseBB = UserInst->getParent();
+ return dominates(BBE, UseBB);
+}
+
+bool DominatorTree::dominates(const Instruction *Def, const Use &U) const {
+ Instruction *UserInst = cast<Instruction>(U.getUser());
+ const BasicBlock *DefBB = Def->getParent();
+
+ // Determine the block in which the use happens. PHI nodes use
+ // their operands on edges; simulate this by thinking of the use
+ // happening at the end of the predecessor block.
+ const BasicBlock *UseBB;
+ if (PHINode *PN = dyn_cast<PHINode>(UserInst))
+ UseBB = PN->getIncomingBlock(U);
+ else
+ UseBB = UserInst->getParent();
+
+ // Any unreachable use is dominated, even if Def == User.
+ if (!isReachableFromEntry(UseBB))
+ return true;
+
+ // Unreachable definitions don't dominate anything.
+ if (!isReachableFromEntry(DefBB))
+ return false;
+
+ // Invoke instructions define their return values on the edges to their normal
+ // successors, so we have to handle them specially.
+ // Among other things, this means they don't dominate anything in
+ // their own block, except possibly a phi, so we don't need to
+ // walk the block in any case.
+ if (const InvokeInst *II = dyn_cast<InvokeInst>(Def)) {
+ BasicBlock *NormalDest = II->getNormalDest();
+ BasicBlockEdge E(DefBB, NormalDest);
+ return dominates(E, U);
+ }
+
+ // If the def and use are in different blocks, do a simple CFG dominator
+ // tree query.
+ if (DefBB != UseBB)
+ return dominates(DefBB, UseBB);
+
+ // Ok, def and use are in the same block. If the def is an invoke, it
+ // doesn't dominate anything in the block. If it's a PHI, it dominates
+ // everything in the block.
+ if (isa<PHINode>(UserInst))
+ return true;
+
+ // Otherwise, just loop through the basic block until we find Def or User.
+ BasicBlock::const_iterator I = DefBB->begin();
+ for (; &*I != Def && &*I != UserInst; ++I)
+ /*empty*/;
+
+ return &*I != UserInst;
+}
+
+bool DominatorTree::isReachableFromEntry(const Use &U) const {
+ Instruction *I = dyn_cast<Instruction>(U.getUser());
+
+ // ConstantExprs aren't really reachable from the entry block, but they
+ // don't need to be treated like unreachable code either.
+ if (!I) return true;
+
+ // PHI nodes use their operands on their incoming edges.
+ if (PHINode *PN = dyn_cast<PHINode>(I))
+ return isReachableFromEntry(PN->getIncomingBlock(U));
+
+ // Everything else uses their operands in their own block.
+ return isReachableFromEntry(I->getParent());
+}
+
+void DominatorTree::verifyDomTree() const {
+ Function &F = *getRoot()->getParent();
+
+ DominatorTree OtherDT;
+ OtherDT.recalculate(F);
+ if (compare(OtherDT)) {
+ errs() << "DominatorTree is not up to date!\nComputed:\n";
+ print(errs());
+ errs() << "\nActual:\n";
+ OtherDT.print(errs());
+ abort();
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// DominatorTreeAnalysis and related pass implementations
+//===----------------------------------------------------------------------===//
+//
+// This implements the DominatorTreeAnalysis which is used with the new pass
+// manager. It also implements some methods from utility passes.
+//
+//===----------------------------------------------------------------------===//
+
+DominatorTree DominatorTreeAnalysis::run(Function &F) {
+ DominatorTree DT;
+ DT.recalculate(F);
+ return DT;
+}
+
+char DominatorTreeAnalysis::PassID;
+
+DominatorTreePrinterPass::DominatorTreePrinterPass(raw_ostream &OS) : OS(OS) {}
+
+PreservedAnalyses DominatorTreePrinterPass::run(Function &F,
+ FunctionAnalysisManager *AM) {
+ OS << "DominatorTree for function: " << F.getName() << "\n";
+ AM->getResult<DominatorTreeAnalysis>(F).print(OS);
+
+ return PreservedAnalyses::all();
+}
+
+PreservedAnalyses DominatorTreeVerifierPass::run(Function &F,
+ FunctionAnalysisManager *AM) {
+ AM->getResult<DominatorTreeAnalysis>(F).verifyDomTree();
+
+ return PreservedAnalyses::all();
+}
+
+//===----------------------------------------------------------------------===//
+// DominatorTreeWrapperPass Implementation
+//===----------------------------------------------------------------------===//
+//
+// The implementation details of the wrapper pass that holds a DominatorTree
+// suitable for use with the legacy pass manager.
+//
+//===----------------------------------------------------------------------===//
+
+char DominatorTreeWrapperPass::ID = 0;
+INITIALIZE_PASS(DominatorTreeWrapperPass, "domtree",
+ "Dominator Tree Construction", true, true)
+
+bool DominatorTreeWrapperPass::runOnFunction(Function &F) {
+ DT.recalculate(F);
+ return false;
+}
+
+void DominatorTreeWrapperPass::verifyAnalysis() const {
+ if (VerifyDomInfo)
+ DT.verifyDomTree();
+}
+
+void DominatorTreeWrapperPass::print(raw_ostream &OS, const Module *) const {
+ DT.print(OS);
+}
+
diff --git a/contrib/llvm/lib/IR/Function.cpp b/contrib/llvm/lib/IR/Function.cpp
new file mode 100644
index 0000000..cfdfc40
--- /dev/null
+++ b/contrib/llvm/lib/IR/Function.cpp
@@ -0,0 +1,991 @@
+//===-- Function.cpp - Implement the Global object classes ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Function class for the IR library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/Function.h"
+#include "LLVMContextImpl.h"
+#include "SymbolTableListTraitsImpl.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/InstIterator.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/MDBuilder.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/RWMutex.h"
+#include "llvm/Support/StringPool.h"
+#include "llvm/Support/Threading.h"
+using namespace llvm;
+
+// Explicit instantiations of SymbolTableListTraits since some of the methods
+// are not in the public header file...
+template class llvm::SymbolTableListTraits<Argument>;
+template class llvm::SymbolTableListTraits<BasicBlock>;
+
+//===----------------------------------------------------------------------===//
+// Argument Implementation
+//===----------------------------------------------------------------------===//
+
+void Argument::anchor() { }
+
+Argument::Argument(Type *Ty, const Twine &Name, Function *Par)
+ : Value(Ty, Value::ArgumentVal) {
+ Parent = nullptr;
+
+ if (Par)
+ Par->getArgumentList().push_back(this);
+ setName(Name);
+}
+
+void Argument::setParent(Function *parent) {
+ Parent = parent;
+}
+
+/// getArgNo - Return the index of this formal argument in its containing
+/// function. For example in "void foo(int a, float b)" a is 0 and b is 1.
+unsigned Argument::getArgNo() const {
+ const Function *F = getParent();
+ assert(F && "Argument is not in a function");
+
+ Function::const_arg_iterator AI = F->arg_begin();
+ unsigned ArgIdx = 0;
+ for (; &*AI != this; ++AI)
+ ++ArgIdx;
+
+ return ArgIdx;
+}
+
+/// hasNonNullAttr - Return true if this argument has the nonnull attribute on
+/// it in its containing function. Also returns true if at least one byte is
+/// known to be dereferenceable and the pointer is in addrspace(0).
+bool Argument::hasNonNullAttr() const {
+ if (!getType()->isPointerTy()) return false;
+ if (getParent()->getAttributes().
+ hasAttribute(getArgNo()+1, Attribute::NonNull))
+ return true;
+ else if (getDereferenceableBytes() > 0 &&
+ getType()->getPointerAddressSpace() == 0)
+ return true;
+ return false;
+}
+
+/// hasByValAttr - Return true if this argument has the byval attribute on it
+/// in its containing function.
+bool Argument::hasByValAttr() const {
+ if (!getType()->isPointerTy()) return false;
+ return getParent()->getAttributes().
+ hasAttribute(getArgNo()+1, Attribute::ByVal);
+}
+
+/// \brief Return true if this argument has the inalloca attribute on it in
+/// its containing function.
+bool Argument::hasInAllocaAttr() const {
+ if (!getType()->isPointerTy()) return false;
+ return getParent()->getAttributes().
+ hasAttribute(getArgNo()+1, Attribute::InAlloca);
+}
+
+bool Argument::hasByValOrInAllocaAttr() const {
+ if (!getType()->isPointerTy()) return false;
+ AttributeSet Attrs = getParent()->getAttributes();
+ return Attrs.hasAttribute(getArgNo() + 1, Attribute::ByVal) ||
+ Attrs.hasAttribute(getArgNo() + 1, Attribute::InAlloca);
+}
+
+unsigned Argument::getParamAlignment() const {
+ assert(getType()->isPointerTy() && "Only pointers have alignments");
+ return getParent()->getParamAlignment(getArgNo()+1);
+
+}
+
+uint64_t Argument::getDereferenceableBytes() const {
+ assert(getType()->isPointerTy() &&
+ "Only pointers have dereferenceable bytes");
+ return getParent()->getDereferenceableBytes(getArgNo()+1);
+}
+
+uint64_t Argument::getDereferenceableOrNullBytes() const {
+ assert(getType()->isPointerTy() &&
+ "Only pointers have dereferenceable bytes");
+ return getParent()->getDereferenceableOrNullBytes(getArgNo()+1);
+}
+
+/// hasNestAttr - Return true if this argument has the nest attribute on
+/// it in its containing function.
+bool Argument::hasNestAttr() const {
+ if (!getType()->isPointerTy()) return false;
+ return getParent()->getAttributes().
+ hasAttribute(getArgNo()+1, Attribute::Nest);
+}
+
+/// hasNoAliasAttr - Return true if this argument has the noalias attribute on
+/// it in its containing function.
+bool Argument::hasNoAliasAttr() const {
+ if (!getType()->isPointerTy()) return false;
+ return getParent()->getAttributes().
+ hasAttribute(getArgNo()+1, Attribute::NoAlias);
+}
+
+/// hasNoCaptureAttr - Return true if this argument has the nocapture attribute
+/// on it in its containing function.
+bool Argument::hasNoCaptureAttr() const {
+ if (!getType()->isPointerTy()) return false;
+ return getParent()->getAttributes().
+ hasAttribute(getArgNo()+1, Attribute::NoCapture);
+}
+
+/// hasSRetAttr - Return true if this argument has the sret attribute on
+/// it in its containing function.
+bool Argument::hasStructRetAttr() const {
+ if (!getType()->isPointerTy()) return false;
+ return getParent()->getAttributes().
+ hasAttribute(getArgNo()+1, Attribute::StructRet);
+}
+
+/// hasReturnedAttr - Return true if this argument has the returned attribute on
+/// it in its containing function.
+bool Argument::hasReturnedAttr() const {
+ return getParent()->getAttributes().
+ hasAttribute(getArgNo()+1, Attribute::Returned);
+}
+
+/// hasZExtAttr - Return true if this argument has the zext attribute on it in
+/// its containing function.
+bool Argument::hasZExtAttr() const {
+ return getParent()->getAttributes().
+ hasAttribute(getArgNo()+1, Attribute::ZExt);
+}
+
+/// hasSExtAttr Return true if this argument has the sext attribute on it in its
+/// containing function.
+bool Argument::hasSExtAttr() const {
+ return getParent()->getAttributes().
+ hasAttribute(getArgNo()+1, Attribute::SExt);
+}
+
+/// Return true if this argument has the readonly or readnone attribute on it
+/// in its containing function.
+bool Argument::onlyReadsMemory() const {
+ return getParent()->getAttributes().
+ hasAttribute(getArgNo()+1, Attribute::ReadOnly) ||
+ getParent()->getAttributes().
+ hasAttribute(getArgNo()+1, Attribute::ReadNone);
+}
+
+/// addAttr - Add attributes to an argument.
+void Argument::addAttr(AttributeSet AS) {
+ assert(AS.getNumSlots() <= 1 &&
+ "Trying to add more than one attribute set to an argument!");
+ AttrBuilder B(AS, AS.getSlotIndex(0));
+ getParent()->addAttributes(getArgNo() + 1,
+ AttributeSet::get(Parent->getContext(),
+ getArgNo() + 1, B));
+}
+
+/// removeAttr - Remove attributes from an argument.
+void Argument::removeAttr(AttributeSet AS) {
+ assert(AS.getNumSlots() <= 1 &&
+ "Trying to remove more than one attribute set from an argument!");
+ AttrBuilder B(AS, AS.getSlotIndex(0));
+ getParent()->removeAttributes(getArgNo() + 1,
+ AttributeSet::get(Parent->getContext(),
+ getArgNo() + 1, B));
+}
+
+//===----------------------------------------------------------------------===//
+// Helper Methods in Function
+//===----------------------------------------------------------------------===//
+
+bool Function::isMaterializable() const {
+ return getGlobalObjectSubClassData() & IsMaterializableBit;
+}
+
+void Function::setIsMaterializable(bool V) {
+ setGlobalObjectBit(IsMaterializableBit, V);
+}
+
+LLVMContext &Function::getContext() const {
+ return getType()->getContext();
+}
+
+FunctionType *Function::getFunctionType() const { return Ty; }
+
+bool Function::isVarArg() const {
+ return getFunctionType()->isVarArg();
+}
+
+Type *Function::getReturnType() const {
+ return getFunctionType()->getReturnType();
+}
+
+void Function::removeFromParent() {
+ getParent()->getFunctionList().remove(getIterator());
+}
+
+void Function::eraseFromParent() {
+ getParent()->getFunctionList().erase(getIterator());
+}
+
+//===----------------------------------------------------------------------===//
+// Function Implementation
+//===----------------------------------------------------------------------===//
+
+Function::Function(FunctionType *Ty, LinkageTypes Linkage, const Twine &name,
+ Module *ParentModule)
+ : GlobalObject(Ty, Value::FunctionVal,
+ OperandTraits<Function>::op_begin(this), 0, Linkage, name),
+ Ty(Ty) {
+ assert(FunctionType::isValidReturnType(getReturnType()) &&
+ "invalid return type");
+ setGlobalObjectSubClassData(0);
+ SymTab = new ValueSymbolTable();
+
+ // If the function has arguments, mark them as lazily built.
+ if (Ty->getNumParams())
+ setValueSubclassData(1); // Set the "has lazy arguments" bit.
+
+ if (ParentModule)
+ ParentModule->getFunctionList().push_back(this);
+
+ // Ensure intrinsics have the right parameter attributes.
+ // Note, the IntID field will have been set in Value::setName if this function
+ // name is a valid intrinsic ID.
+ if (IntID)
+ setAttributes(Intrinsic::getAttributes(getContext(), IntID));
+}
+
+Function::~Function() {
+ dropAllReferences(); // After this it is safe to delete instructions.
+
+ // Delete all of the method arguments and unlink from symbol table...
+ ArgumentList.clear();
+ delete SymTab;
+
+ // Remove the function from the on-the-side GC table.
+ clearGC();
+}
+
+void Function::BuildLazyArguments() const {
+ // Create the arguments vector, all arguments start out unnamed.
+ FunctionType *FT = getFunctionType();
+ for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
+ assert(!FT->getParamType(i)->isVoidTy() &&
+ "Cannot have void typed arguments!");
+ ArgumentList.push_back(new Argument(FT->getParamType(i)));
+ }
+
+ // Clear the lazy arguments bit.
+ unsigned SDC = getSubclassDataFromValue();
+ const_cast<Function*>(this)->setValueSubclassData(SDC &= ~(1<<0));
+}
+
+size_t Function::arg_size() const {
+ return getFunctionType()->getNumParams();
+}
+bool Function::arg_empty() const {
+ return getFunctionType()->getNumParams() == 0;
+}
+
+void Function::setParent(Module *parent) {
+ Parent = parent;
+}
+
+// dropAllReferences() - This function causes all the subinstructions to "let
+// go" of all references that they are maintaining. This allows one to
+// 'delete' a whole class at a time, even though there may be circular
+// references... first all references are dropped, and all use counts go to
+// zero. Then everything is deleted for real. Note that no operations are
+// valid on an object that has "dropped all references", except operator
+// delete.
+//
+void Function::dropAllReferences() {
+ setIsMaterializable(false);
+
+ for (iterator I = begin(), E = end(); I != E; ++I)
+ I->dropAllReferences();
+
+ // Delete all basic blocks. They are now unused, except possibly by
+ // blockaddresses, but BasicBlock's destructor takes care of those.
+ while (!BasicBlocks.empty())
+ BasicBlocks.begin()->eraseFromParent();
+
+ // Drop uses of any optional data (real or placeholder).
+ if (getNumOperands()) {
+ User::dropAllReferences();
+ setNumHungOffUseOperands(0);
+ setValueSubclassData(getSubclassDataFromValue() & ~0xe);
+ }
+
+ // Metadata is stored in a side-table.
+ clearMetadata();
+}
+
+void Function::addAttribute(unsigned i, Attribute::AttrKind attr) {
+ AttributeSet PAL = getAttributes();
+ PAL = PAL.addAttribute(getContext(), i, attr);
+ setAttributes(PAL);
+}
+
+void Function::addAttributes(unsigned i, AttributeSet attrs) {
+ AttributeSet PAL = getAttributes();
+ PAL = PAL.addAttributes(getContext(), i, attrs);
+ setAttributes(PAL);
+}
+
+void Function::removeAttributes(unsigned i, AttributeSet attrs) {
+ AttributeSet PAL = getAttributes();
+ PAL = PAL.removeAttributes(getContext(), i, attrs);
+ setAttributes(PAL);
+}
+
+void Function::addDereferenceableAttr(unsigned i, uint64_t Bytes) {
+ AttributeSet PAL = getAttributes();
+ PAL = PAL.addDereferenceableAttr(getContext(), i, Bytes);
+ setAttributes(PAL);
+}
+
+void Function::addDereferenceableOrNullAttr(unsigned i, uint64_t Bytes) {
+ AttributeSet PAL = getAttributes();
+ PAL = PAL.addDereferenceableOrNullAttr(getContext(), i, Bytes);
+ setAttributes(PAL);
+}
+
+const std::string &Function::getGC() const {
+ assert(hasGC() && "Function has no collector");
+ return getContext().getGC(*this);
+}
+
+void Function::setGC(const std::string Str) {
+ setValueSubclassDataBit(14, !Str.empty());
+ getContext().setGC(*this, std::move(Str));
+}
+
+void Function::clearGC() {
+ if (!hasGC())
+ return;
+ getContext().deleteGC(*this);
+ setValueSubclassDataBit(14, false);
+}
+
+/// Copy all additional attributes (those not needed to create a Function) from
+/// the Function Src to this one.
+void Function::copyAttributesFrom(const GlobalValue *Src) {
+ GlobalObject::copyAttributesFrom(Src);
+ const Function *SrcF = dyn_cast<Function>(Src);
+ if (!SrcF)
+ return;
+
+ setCallingConv(SrcF->getCallingConv());
+ setAttributes(SrcF->getAttributes());
+ if (SrcF->hasGC())
+ setGC(SrcF->getGC());
+ else
+ clearGC();
+ if (SrcF->hasPersonalityFn())
+ setPersonalityFn(SrcF->getPersonalityFn());
+ if (SrcF->hasPrefixData())
+ setPrefixData(SrcF->getPrefixData());
+ if (SrcF->hasPrologueData())
+ setPrologueData(SrcF->getPrologueData());
+}
+
+/// \brief This does the actual lookup of an intrinsic ID which
+/// matches the given function name.
+static Intrinsic::ID lookupIntrinsicID(const ValueName *ValName) {
+ unsigned Len = ValName->getKeyLength();
+ const char *Name = ValName->getKeyData();
+
+#define GET_FUNCTION_RECOGNIZER
+#include "llvm/IR/Intrinsics.gen"
+#undef GET_FUNCTION_RECOGNIZER
+
+ return Intrinsic::not_intrinsic;
+}
+
+void Function::recalculateIntrinsicID() {
+ const ValueName *ValName = this->getValueName();
+ if (!ValName || !isIntrinsic()) {
+ IntID = Intrinsic::not_intrinsic;
+ return;
+ }
+ IntID = lookupIntrinsicID(ValName);
+}
+
+/// Returns a stable mangling for the type specified for use in the name
+/// mangling scheme used by 'any' types in intrinsic signatures. The mangling
+/// of named types is simply their name. Manglings for unnamed types consist
+/// of a prefix ('p' for pointers, 'a' for arrays, 'f_' for functions)
+/// combined with the mangling of their component types. A vararg function
+/// type will have a suffix of 'vararg'. Since function types can contain
+/// other function types, we close a function type mangling with suffix 'f'
+/// which can't be confused with it's prefix. This ensures we don't have
+/// collisions between two unrelated function types. Otherwise, you might
+/// parse ffXX as f(fXX) or f(fX)X. (X is a placeholder for any other type.)
+/// Manglings of integers, floats, and vectors ('i', 'f', and 'v' prefix in most
+/// cases) fall back to the MVT codepath, where they could be mangled to
+/// 'x86mmx', for example; matching on derived types is not sufficient to mangle
+/// everything.
+static std::string getMangledTypeStr(Type* Ty) {
+ std::string Result;
+ if (PointerType* PTyp = dyn_cast<PointerType>(Ty)) {
+ Result += "p" + llvm::utostr(PTyp->getAddressSpace()) +
+ getMangledTypeStr(PTyp->getElementType());
+ } else if (ArrayType* ATyp = dyn_cast<ArrayType>(Ty)) {
+ Result += "a" + llvm::utostr(ATyp->getNumElements()) +
+ getMangledTypeStr(ATyp->getElementType());
+ } else if (StructType* STyp = dyn_cast<StructType>(Ty)) {
+ assert(!STyp->isLiteral() && "TODO: implement literal types");
+ Result += STyp->getName();
+ } else if (FunctionType* FT = dyn_cast<FunctionType>(Ty)) {
+ Result += "f_" + getMangledTypeStr(FT->getReturnType());
+ for (size_t i = 0; i < FT->getNumParams(); i++)
+ Result += getMangledTypeStr(FT->getParamType(i));
+ if (FT->isVarArg())
+ Result += "vararg";
+ // Ensure nested function types are distinguishable.
+ Result += "f";
+ } else if (isa<VectorType>(Ty))
+ Result += "v" + utostr(Ty->getVectorNumElements()) +
+ getMangledTypeStr(Ty->getVectorElementType());
+ else if (Ty)
+ Result += EVT::getEVT(Ty).getEVTString();
+ return Result;
+}
+
+std::string Intrinsic::getName(ID id, ArrayRef<Type*> Tys) {
+ assert(id < num_intrinsics && "Invalid intrinsic ID!");
+ static const char * const Table[] = {
+ "not_intrinsic",
+#define GET_INTRINSIC_NAME_TABLE
+#include "llvm/IR/Intrinsics.gen"
+#undef GET_INTRINSIC_NAME_TABLE
+ };
+ if (Tys.empty())
+ return Table[id];
+ std::string Result(Table[id]);
+ for (unsigned i = 0; i < Tys.size(); ++i) {
+ Result += "." + getMangledTypeStr(Tys[i]);
+ }
+ return Result;
+}
+
+
+/// IIT_Info - These are enumerators that describe the entries returned by the
+/// getIntrinsicInfoTableEntries function.
+///
+/// NOTE: This must be kept in synch with the copy in TblGen/IntrinsicEmitter!
+enum IIT_Info {
+ // Common values should be encoded with 0-15.
+ IIT_Done = 0,
+ IIT_I1 = 1,
+ IIT_I8 = 2,
+ IIT_I16 = 3,
+ IIT_I32 = 4,
+ IIT_I64 = 5,
+ IIT_F16 = 6,
+ IIT_F32 = 7,
+ IIT_F64 = 8,
+ IIT_V2 = 9,
+ IIT_V4 = 10,
+ IIT_V8 = 11,
+ IIT_V16 = 12,
+ IIT_V32 = 13,
+ IIT_PTR = 14,
+ IIT_ARG = 15,
+
+ // Values from 16+ are only encodable with the inefficient encoding.
+ IIT_V64 = 16,
+ IIT_MMX = 17,
+ IIT_TOKEN = 18,
+ IIT_METADATA = 19,
+ IIT_EMPTYSTRUCT = 20,
+ IIT_STRUCT2 = 21,
+ IIT_STRUCT3 = 22,
+ IIT_STRUCT4 = 23,
+ IIT_STRUCT5 = 24,
+ IIT_EXTEND_ARG = 25,
+ IIT_TRUNC_ARG = 26,
+ IIT_ANYPTR = 27,
+ IIT_V1 = 28,
+ IIT_VARARG = 29,
+ IIT_HALF_VEC_ARG = 30,
+ IIT_SAME_VEC_WIDTH_ARG = 31,
+ IIT_PTR_TO_ARG = 32,
+ IIT_VEC_OF_PTRS_TO_ELT = 33,
+ IIT_I128 = 34,
+ IIT_V512 = 35,
+ IIT_V1024 = 36
+};
+
+
+static void DecodeIITType(unsigned &NextElt, ArrayRef<unsigned char> Infos,
+ SmallVectorImpl<Intrinsic::IITDescriptor> &OutputTable) {
+ IIT_Info Info = IIT_Info(Infos[NextElt++]);
+ unsigned StructElts = 2;
+ using namespace Intrinsic;
+
+ switch (Info) {
+ case IIT_Done:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Void, 0));
+ return;
+ case IIT_VARARG:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::VarArg, 0));
+ return;
+ case IIT_MMX:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::MMX, 0));
+ return;
+ case IIT_TOKEN:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Token, 0));
+ return;
+ case IIT_METADATA:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Metadata, 0));
+ return;
+ case IIT_F16:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Half, 0));
+ return;
+ case IIT_F32:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Float, 0));
+ return;
+ case IIT_F64:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Double, 0));
+ return;
+ case IIT_I1:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Integer, 1));
+ return;
+ case IIT_I8:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Integer, 8));
+ return;
+ case IIT_I16:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Integer,16));
+ return;
+ case IIT_I32:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Integer, 32));
+ return;
+ case IIT_I64:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Integer, 64));
+ return;
+ case IIT_I128:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Integer, 128));
+ return;
+ case IIT_V1:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Vector, 1));
+ DecodeIITType(NextElt, Infos, OutputTable);
+ return;
+ case IIT_V2:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Vector, 2));
+ DecodeIITType(NextElt, Infos, OutputTable);
+ return;
+ case IIT_V4:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Vector, 4));
+ DecodeIITType(NextElt, Infos, OutputTable);
+ return;
+ case IIT_V8:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Vector, 8));
+ DecodeIITType(NextElt, Infos, OutputTable);
+ return;
+ case IIT_V16:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Vector, 16));
+ DecodeIITType(NextElt, Infos, OutputTable);
+ return;
+ case IIT_V32:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Vector, 32));
+ DecodeIITType(NextElt, Infos, OutputTable);
+ return;
+ case IIT_V64:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Vector, 64));
+ DecodeIITType(NextElt, Infos, OutputTable);
+ return;
+ case IIT_V512:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Vector, 512));
+ DecodeIITType(NextElt, Infos, OutputTable);
+ return;
+ case IIT_V1024:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Vector, 1024));
+ DecodeIITType(NextElt, Infos, OutputTable);
+ return;
+ case IIT_PTR:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Pointer, 0));
+ DecodeIITType(NextElt, Infos, OutputTable);
+ return;
+ case IIT_ANYPTR: { // [ANYPTR addrspace, subtype]
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Pointer,
+ Infos[NextElt++]));
+ DecodeIITType(NextElt, Infos, OutputTable);
+ return;
+ }
+ case IIT_ARG: {
+ unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Argument, ArgInfo));
+ return;
+ }
+ case IIT_EXTEND_ARG: {
+ unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::ExtendArgument,
+ ArgInfo));
+ return;
+ }
+ case IIT_TRUNC_ARG: {
+ unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::TruncArgument,
+ ArgInfo));
+ return;
+ }
+ case IIT_HALF_VEC_ARG: {
+ unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::HalfVecArgument,
+ ArgInfo));
+ return;
+ }
+ case IIT_SAME_VEC_WIDTH_ARG: {
+ unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::SameVecWidthArgument,
+ ArgInfo));
+ return;
+ }
+ case IIT_PTR_TO_ARG: {
+ unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::PtrToArgument,
+ ArgInfo));
+ return;
+ }
+ case IIT_VEC_OF_PTRS_TO_ELT: {
+ unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::VecOfPtrsToElt,
+ ArgInfo));
+ return;
+ }
+ case IIT_EMPTYSTRUCT:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Struct, 0));
+ return;
+ case IIT_STRUCT5: ++StructElts; // FALL THROUGH.
+ case IIT_STRUCT4: ++StructElts; // FALL THROUGH.
+ case IIT_STRUCT3: ++StructElts; // FALL THROUGH.
+ case IIT_STRUCT2: {
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Struct,StructElts));
+
+ for (unsigned i = 0; i != StructElts; ++i)
+ DecodeIITType(NextElt, Infos, OutputTable);
+ return;
+ }
+ }
+ llvm_unreachable("unhandled");
+}
+
+
+#define GET_INTRINSIC_GENERATOR_GLOBAL
+#include "llvm/IR/Intrinsics.gen"
+#undef GET_INTRINSIC_GENERATOR_GLOBAL
+
+void Intrinsic::getIntrinsicInfoTableEntries(ID id,
+ SmallVectorImpl<IITDescriptor> &T){
+ // Check to see if the intrinsic's type was expressible by the table.
+ unsigned TableVal = IIT_Table[id-1];
+
+ // Decode the TableVal into an array of IITValues.
+ SmallVector<unsigned char, 8> IITValues;
+ ArrayRef<unsigned char> IITEntries;
+ unsigned NextElt = 0;
+ if ((TableVal >> 31) != 0) {
+ // This is an offset into the IIT_LongEncodingTable.
+ IITEntries = IIT_LongEncodingTable;
+
+ // Strip sentinel bit.
+ NextElt = (TableVal << 1) >> 1;
+ } else {
+ // Decode the TableVal into an array of IITValues. If the entry was encoded
+ // into a single word in the table itself, decode it now.
+ do {
+ IITValues.push_back(TableVal & 0xF);
+ TableVal >>= 4;
+ } while (TableVal);
+
+ IITEntries = IITValues;
+ NextElt = 0;
+ }
+
+ // Okay, decode the table into the output vector of IITDescriptors.
+ DecodeIITType(NextElt, IITEntries, T);
+ while (NextElt != IITEntries.size() && IITEntries[NextElt] != 0)
+ DecodeIITType(NextElt, IITEntries, T);
+}
+
+
+static Type *DecodeFixedType(ArrayRef<Intrinsic::IITDescriptor> &Infos,
+ ArrayRef<Type*> Tys, LLVMContext &Context) {
+ using namespace Intrinsic;
+ IITDescriptor D = Infos.front();
+ Infos = Infos.slice(1);
+
+ switch (D.Kind) {
+ case IITDescriptor::Void: return Type::getVoidTy(Context);
+ case IITDescriptor::VarArg: return Type::getVoidTy(Context);
+ case IITDescriptor::MMX: return Type::getX86_MMXTy(Context);
+ case IITDescriptor::Token: return Type::getTokenTy(Context);
+ case IITDescriptor::Metadata: return Type::getMetadataTy(Context);
+ case IITDescriptor::Half: return Type::getHalfTy(Context);
+ case IITDescriptor::Float: return Type::getFloatTy(Context);
+ case IITDescriptor::Double: return Type::getDoubleTy(Context);
+
+ case IITDescriptor::Integer:
+ return IntegerType::get(Context, D.Integer_Width);
+ case IITDescriptor::Vector:
+ return VectorType::get(DecodeFixedType(Infos, Tys, Context),D.Vector_Width);
+ case IITDescriptor::Pointer:
+ return PointerType::get(DecodeFixedType(Infos, Tys, Context),
+ D.Pointer_AddressSpace);
+ case IITDescriptor::Struct: {
+ Type *Elts[5];
+ assert(D.Struct_NumElements <= 5 && "Can't handle this yet");
+ for (unsigned i = 0, e = D.Struct_NumElements; i != e; ++i)
+ Elts[i] = DecodeFixedType(Infos, Tys, Context);
+ return StructType::get(Context, makeArrayRef(Elts,D.Struct_NumElements));
+ }
+
+ case IITDescriptor::Argument:
+ return Tys[D.getArgumentNumber()];
+ case IITDescriptor::ExtendArgument: {
+ Type *Ty = Tys[D.getArgumentNumber()];
+ if (VectorType *VTy = dyn_cast<VectorType>(Ty))
+ return VectorType::getExtendedElementVectorType(VTy);
+
+ return IntegerType::get(Context, 2 * cast<IntegerType>(Ty)->getBitWidth());
+ }
+ case IITDescriptor::TruncArgument: {
+ Type *Ty = Tys[D.getArgumentNumber()];
+ if (VectorType *VTy = dyn_cast<VectorType>(Ty))
+ return VectorType::getTruncatedElementVectorType(VTy);
+
+ IntegerType *ITy = cast<IntegerType>(Ty);
+ assert(ITy->getBitWidth() % 2 == 0);
+ return IntegerType::get(Context, ITy->getBitWidth() / 2);
+ }
+ case IITDescriptor::HalfVecArgument:
+ return VectorType::getHalfElementsVectorType(cast<VectorType>(
+ Tys[D.getArgumentNumber()]));
+ case IITDescriptor::SameVecWidthArgument: {
+ Type *EltTy = DecodeFixedType(Infos, Tys, Context);
+ Type *Ty = Tys[D.getArgumentNumber()];
+ if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
+ return VectorType::get(EltTy, VTy->getNumElements());
+ }
+ llvm_unreachable("unhandled");
+ }
+ case IITDescriptor::PtrToArgument: {
+ Type *Ty = Tys[D.getArgumentNumber()];
+ return PointerType::getUnqual(Ty);
+ }
+ case IITDescriptor::VecOfPtrsToElt: {
+ Type *Ty = Tys[D.getArgumentNumber()];
+ VectorType *VTy = dyn_cast<VectorType>(Ty);
+ if (!VTy)
+ llvm_unreachable("Expected an argument of Vector Type");
+ Type *EltTy = VTy->getVectorElementType();
+ return VectorType::get(PointerType::getUnqual(EltTy),
+ VTy->getNumElements());
+ }
+ }
+ llvm_unreachable("unhandled");
+}
+
+
+
+FunctionType *Intrinsic::getType(LLVMContext &Context,
+ ID id, ArrayRef<Type*> Tys) {
+ SmallVector<IITDescriptor, 8> Table;
+ getIntrinsicInfoTableEntries(id, Table);
+
+ ArrayRef<IITDescriptor> TableRef = Table;
+ Type *ResultTy = DecodeFixedType(TableRef, Tys, Context);
+
+ SmallVector<Type*, 8> ArgTys;
+ while (!TableRef.empty())
+ ArgTys.push_back(DecodeFixedType(TableRef, Tys, Context));
+
+ // DecodeFixedType returns Void for IITDescriptor::Void and IITDescriptor::VarArg
+ // If we see void type as the type of the last argument, it is vararg intrinsic
+ if (!ArgTys.empty() && ArgTys.back()->isVoidTy()) {
+ ArgTys.pop_back();
+ return FunctionType::get(ResultTy, ArgTys, true);
+ }
+ return FunctionType::get(ResultTy, ArgTys, false);
+}
+
+bool Intrinsic::isOverloaded(ID id) {
+#define GET_INTRINSIC_OVERLOAD_TABLE
+#include "llvm/IR/Intrinsics.gen"
+#undef GET_INTRINSIC_OVERLOAD_TABLE
+}
+
+bool Intrinsic::isLeaf(ID id) {
+ switch (id) {
+ default:
+ return true;
+
+ case Intrinsic::experimental_gc_statepoint:
+ case Intrinsic::experimental_patchpoint_void:
+ case Intrinsic::experimental_patchpoint_i64:
+ return false;
+ }
+}
+
+/// This defines the "Intrinsic::getAttributes(ID id)" method.
+#define GET_INTRINSIC_ATTRIBUTES
+#include "llvm/IR/Intrinsics.gen"
+#undef GET_INTRINSIC_ATTRIBUTES
+
+Function *Intrinsic::getDeclaration(Module *M, ID id, ArrayRef<Type*> Tys) {
+ // There can never be multiple globals with the same name of different types,
+ // because intrinsics must be a specific type.
+ return
+ cast<Function>(M->getOrInsertFunction(getName(id, Tys),
+ getType(M->getContext(), id, Tys)));
+}
+
+// This defines the "Intrinsic::getIntrinsicForGCCBuiltin()" method.
+#define GET_LLVM_INTRINSIC_FOR_GCC_BUILTIN
+#include "llvm/IR/Intrinsics.gen"
+#undef GET_LLVM_INTRINSIC_FOR_GCC_BUILTIN
+
+// This defines the "Intrinsic::getIntrinsicForMSBuiltin()" method.
+#define GET_LLVM_INTRINSIC_FOR_MS_BUILTIN
+#include "llvm/IR/Intrinsics.gen"
+#undef GET_LLVM_INTRINSIC_FOR_MS_BUILTIN
+
+/// hasAddressTaken - returns true if there are any uses of this function
+/// other than direct calls or invokes to it.
+bool Function::hasAddressTaken(const User* *PutOffender) const {
+ for (const Use &U : uses()) {
+ const User *FU = U.getUser();
+ if (isa<BlockAddress>(FU))
+ continue;
+ if (!isa<CallInst>(FU) && !isa<InvokeInst>(FU))
+ return PutOffender ? (*PutOffender = FU, true) : true;
+ ImmutableCallSite CS(cast<Instruction>(FU));
+ if (!CS.isCallee(&U))
+ return PutOffender ? (*PutOffender = FU, true) : true;
+ }
+ return false;
+}
+
+bool Function::isDefTriviallyDead() const {
+ // Check the linkage
+ if (!hasLinkOnceLinkage() && !hasLocalLinkage() &&
+ !hasAvailableExternallyLinkage())
+ return false;
+
+ // Check if the function is used by anything other than a blockaddress.
+ for (const User *U : users())
+ if (!isa<BlockAddress>(U))
+ return false;
+
+ return true;
+}
+
+/// callsFunctionThatReturnsTwice - Return true if the function has a call to
+/// setjmp or other function that gcc recognizes as "returning twice".
+bool Function::callsFunctionThatReturnsTwice() const {
+ for (const_inst_iterator
+ I = inst_begin(this), E = inst_end(this); I != E; ++I) {
+ ImmutableCallSite CS(&*I);
+ if (CS && CS.hasFnAttr(Attribute::ReturnsTwice))
+ return true;
+ }
+
+ return false;
+}
+
+Constant *Function::getPersonalityFn() const {
+ assert(hasPersonalityFn() && getNumOperands());
+ return cast<Constant>(Op<0>());
+}
+
+void Function::setPersonalityFn(Constant *Fn) {
+ setHungoffOperand<0>(Fn);
+ setValueSubclassDataBit(3, Fn != nullptr);
+}
+
+Constant *Function::getPrefixData() const {
+ assert(hasPrefixData() && getNumOperands());
+ return cast<Constant>(Op<1>());
+}
+
+void Function::setPrefixData(Constant *PrefixData) {
+ setHungoffOperand<1>(PrefixData);
+ setValueSubclassDataBit(1, PrefixData != nullptr);
+}
+
+Constant *Function::getPrologueData() const {
+ assert(hasPrologueData() && getNumOperands());
+ return cast<Constant>(Op<2>());
+}
+
+void Function::setPrologueData(Constant *PrologueData) {
+ setHungoffOperand<2>(PrologueData);
+ setValueSubclassDataBit(2, PrologueData != nullptr);
+}
+
+void Function::allocHungoffUselist() {
+ // If we've already allocated a uselist, stop here.
+ if (getNumOperands())
+ return;
+
+ allocHungoffUses(3, /*IsPhi=*/ false);
+ setNumHungOffUseOperands(3);
+
+ // Initialize the uselist with placeholder operands to allow traversal.
+ auto *CPN = ConstantPointerNull::get(Type::getInt1PtrTy(getContext(), 0));
+ Op<0>().set(CPN);
+ Op<1>().set(CPN);
+ Op<2>().set(CPN);
+}
+
+template <int Idx>
+void Function::setHungoffOperand(Constant *C) {
+ if (C) {
+ allocHungoffUselist();
+ Op<Idx>().set(C);
+ } else if (getNumOperands()) {
+ Op<Idx>().set(
+ ConstantPointerNull::get(Type::getInt1PtrTy(getContext(), 0)));
+ }
+}
+
+void Function::setValueSubclassDataBit(unsigned Bit, bool On) {
+ assert(Bit < 16 && "SubclassData contains only 16 bits");
+ if (On)
+ setValueSubclassData(getSubclassDataFromValue() | (1 << Bit));
+ else
+ setValueSubclassData(getSubclassDataFromValue() & ~(1 << Bit));
+}
+
+void Function::setEntryCount(uint64_t Count) {
+ MDBuilder MDB(getContext());
+ setMetadata(LLVMContext::MD_prof, MDB.createFunctionEntryCount(Count));
+}
+
+Optional<uint64_t> Function::getEntryCount() const {
+ MDNode *MD = getMetadata(LLVMContext::MD_prof);
+ if (MD && MD->getOperand(0))
+ if (MDString *MDS = dyn_cast<MDString>(MD->getOperand(0)))
+ if (MDS->getString().equals("function_entry_count")) {
+ ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(1));
+ return CI->getValue().getZExtValue();
+ }
+ return None;
+}
diff --git a/contrib/llvm/lib/IR/FunctionInfo.cpp b/contrib/llvm/lib/IR/FunctionInfo.cpp
new file mode 100644
index 0000000..17a67bc
--- /dev/null
+++ b/contrib/llvm/lib/IR/FunctionInfo.cpp
@@ -0,0 +1,67 @@
+//===-- FunctionInfo.cpp - Function Info Index ----------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the function info index and summary classes for the
+// IR library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/FunctionInfo.h"
+#include "llvm/ADT/StringMap.h"
+using namespace llvm;
+
+// Create the combined function index/summary from multiple
+// per-module instances.
+void FunctionInfoIndex::mergeFrom(std::unique_ptr<FunctionInfoIndex> Other,
+ uint64_t NextModuleId) {
+
+ StringRef ModPath;
+ for (auto &OtherFuncInfoLists : *Other) {
+ std::string FuncName = OtherFuncInfoLists.getKey();
+ FunctionInfoList &List = OtherFuncInfoLists.second;
+
+ // Assert that the func info list only has one entry, since we shouldn't
+ // have duplicate names within a single per-module index.
+ assert(List.size() == 1);
+ std::unique_ptr<FunctionInfo> Info = std::move(List.front());
+
+ // Skip if there was no function summary section.
+ if (!Info->functionSummary())
+ continue;
+
+ // Add the module path string ref for this module if we haven't already
+ // saved a reference to it.
+ if (ModPath.empty())
+ ModPath =
+ addModulePath(Info->functionSummary()->modulePath(), NextModuleId);
+ else
+ assert(ModPath == Info->functionSummary()->modulePath() &&
+ "Each module in the combined map should have a unique ID");
+
+ // Note the module path string ref was copied above and is still owned by
+ // the original per-module index. Reset it to the new module path
+ // string reference owned by the combined index.
+ Info->functionSummary()->setModulePath(ModPath);
+
+ // If it is a local function, rename it.
+ if (Info->functionSummary()->isLocalFunction()) {
+ // Any local functions are virtually renamed when being added to the
+ // combined index map, to disambiguate from other functions with
+ // the same name. The symbol table created for the combined index
+ // file should contain the renamed symbols.
+ FuncName =
+ FunctionInfoIndex::getGlobalNameForLocal(FuncName, NextModuleId);
+ }
+
+ // Add new function info to existing list. There may be duplicates when
+ // combining FunctionMap entries, due to COMDAT functions. Any local
+ // functions were virtually renamed above.
+ addFunctionInfo(FuncName, std::move(Info));
+ }
+}
diff --git a/contrib/llvm/lib/IR/GCOV.cpp b/contrib/llvm/lib/IR/GCOV.cpp
new file mode 100644
index 0000000..35b8157
--- /dev/null
+++ b/contrib/llvm/lib/IR/GCOV.cpp
@@ -0,0 +1,796 @@
+//===- GCOV.cpp - LLVM coverage tool --------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// GCOV implements the interface to read and write coverage files that use
+// 'gcov' format.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Support/GCOV.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/MemoryObject.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <system_error>
+using namespace llvm;
+
+//===----------------------------------------------------------------------===//
+// GCOVFile implementation.
+
+/// readGCNO - Read GCNO buffer.
+bool GCOVFile::readGCNO(GCOVBuffer &Buffer) {
+ if (!Buffer.readGCNOFormat())
+ return false;
+ if (!Buffer.readGCOVVersion(Version))
+ return false;
+
+ if (!Buffer.readInt(Checksum))
+ return false;
+ while (true) {
+ if (!Buffer.readFunctionTag())
+ break;
+ auto GFun = make_unique<GCOVFunction>(*this);
+ if (!GFun->readGCNO(Buffer, Version))
+ return false;
+ Functions.push_back(std::move(GFun));
+ }
+
+ GCNOInitialized = true;
+ return true;
+}
+
+/// readGCDA - Read GCDA buffer. It is required that readGCDA() can only be
+/// called after readGCNO().
+bool GCOVFile::readGCDA(GCOVBuffer &Buffer) {
+ assert(GCNOInitialized && "readGCDA() can only be called after readGCNO()");
+ if (!Buffer.readGCDAFormat())
+ return false;
+ GCOV::GCOVVersion GCDAVersion;
+ if (!Buffer.readGCOVVersion(GCDAVersion))
+ return false;
+ if (Version != GCDAVersion) {
+ errs() << "GCOV versions do not match.\n";
+ return false;
+ }
+
+ uint32_t GCDAChecksum;
+ if (!Buffer.readInt(GCDAChecksum))
+ return false;
+ if (Checksum != GCDAChecksum) {
+ errs() << "File checksums do not match: " << Checksum
+ << " != " << GCDAChecksum << ".\n";
+ return false;
+ }
+ for (size_t i = 0, e = Functions.size(); i < e; ++i) {
+ if (!Buffer.readFunctionTag()) {
+ errs() << "Unexpected number of functions.\n";
+ return false;
+ }
+ if (!Functions[i]->readGCDA(Buffer, Version))
+ return false;
+ }
+ if (Buffer.readObjectTag()) {
+ uint32_t Length;
+ uint32_t Dummy;
+ if (!Buffer.readInt(Length))
+ return false;
+ if (!Buffer.readInt(Dummy))
+ return false; // checksum
+ if (!Buffer.readInt(Dummy))
+ return false; // num
+ if (!Buffer.readInt(RunCount))
+ return false;
+ Buffer.advanceCursor(Length - 3);
+ }
+ while (Buffer.readProgramTag()) {
+ uint32_t Length;
+ if (!Buffer.readInt(Length))
+ return false;
+ Buffer.advanceCursor(Length);
+ ++ProgramCount;
+ }
+
+ return true;
+}
+
+/// dump - Dump GCOVFile content to dbgs() for debugging purposes.
+void GCOVFile::dump() const {
+ for (const auto &FPtr : Functions)
+ FPtr->dump();
+}
+
+/// collectLineCounts - Collect line counts. This must be used after
+/// reading .gcno and .gcda files.
+void GCOVFile::collectLineCounts(FileInfo &FI) {
+ for (const auto &FPtr : Functions)
+ FPtr->collectLineCounts(FI);
+ FI.setRunCount(RunCount);
+ FI.setProgramCount(ProgramCount);
+}
+
+//===----------------------------------------------------------------------===//
+// GCOVFunction implementation.
+
+/// readGCNO - Read a function from the GCNO buffer. Return false if an error
+/// occurs.
+bool GCOVFunction::readGCNO(GCOVBuffer &Buff, GCOV::GCOVVersion Version) {
+ uint32_t Dummy;
+ if (!Buff.readInt(Dummy))
+ return false; // Function header length
+ if (!Buff.readInt(Ident))
+ return false;
+ if (!Buff.readInt(Checksum))
+ return false;
+ if (Version != GCOV::V402) {
+ uint32_t CfgChecksum;
+ if (!Buff.readInt(CfgChecksum))
+ return false;
+ if (Parent.getChecksum() != CfgChecksum) {
+ errs() << "File checksums do not match: " << Parent.getChecksum()
+ << " != " << CfgChecksum << " in (" << Name << ").\n";
+ return false;
+ }
+ }
+ if (!Buff.readString(Name))
+ return false;
+ if (!Buff.readString(Filename))
+ return false;
+ if (!Buff.readInt(LineNumber))
+ return false;
+
+ // read blocks.
+ if (!Buff.readBlockTag()) {
+ errs() << "Block tag not found.\n";
+ return false;
+ }
+ uint32_t BlockCount;
+ if (!Buff.readInt(BlockCount))
+ return false;
+ for (uint32_t i = 0, e = BlockCount; i != e; ++i) {
+ if (!Buff.readInt(Dummy))
+ return false; // Block flags;
+ Blocks.push_back(make_unique<GCOVBlock>(*this, i));
+ }
+
+ // read edges.
+ while (Buff.readEdgeTag()) {
+ uint32_t EdgeCount;
+ if (!Buff.readInt(EdgeCount))
+ return false;
+ EdgeCount = (EdgeCount - 1) / 2;
+ uint32_t BlockNo;
+ if (!Buff.readInt(BlockNo))
+ return false;
+ if (BlockNo >= BlockCount) {
+ errs() << "Unexpected block number: " << BlockNo << " (in " << Name
+ << ").\n";
+ return false;
+ }
+ for (uint32_t i = 0, e = EdgeCount; i != e; ++i) {
+ uint32_t Dst;
+ if (!Buff.readInt(Dst))
+ return false;
+ Edges.push_back(make_unique<GCOVEdge>(*Blocks[BlockNo], *Blocks[Dst]));
+ GCOVEdge *Edge = Edges.back().get();
+ Blocks[BlockNo]->addDstEdge(Edge);
+ Blocks[Dst]->addSrcEdge(Edge);
+ if (!Buff.readInt(Dummy))
+ return false; // Edge flag
+ }
+ }
+
+ // read line table.
+ while (Buff.readLineTag()) {
+ uint32_t LineTableLength;
+ // Read the length of this line table.
+ if (!Buff.readInt(LineTableLength))
+ return false;
+ uint32_t EndPos = Buff.getCursor() + LineTableLength * 4;
+ uint32_t BlockNo;
+ // Read the block number this table is associated with.
+ if (!Buff.readInt(BlockNo))
+ return false;
+ if (BlockNo >= BlockCount) {
+ errs() << "Unexpected block number: " << BlockNo << " (in " << Name
+ << ").\n";
+ return false;
+ }
+ GCOVBlock &Block = *Blocks[BlockNo];
+ // Read the word that pads the beginning of the line table. This may be a
+ // flag of some sort, but seems to always be zero.
+ if (!Buff.readInt(Dummy))
+ return false;
+
+ // Line information starts here and continues up until the last word.
+ if (Buff.getCursor() != (EndPos - sizeof(uint32_t))) {
+ StringRef F;
+ // Read the source file name.
+ if (!Buff.readString(F))
+ return false;
+ if (Filename != F) {
+ errs() << "Multiple sources for a single basic block: " << Filename
+ << " != " << F << " (in " << Name << ").\n";
+ return false;
+ }
+ // Read lines up to, but not including, the null terminator.
+ while (Buff.getCursor() < (EndPos - 2 * sizeof(uint32_t))) {
+ uint32_t Line;
+ if (!Buff.readInt(Line))
+ return false;
+ // Line 0 means this instruction was injected by the compiler. Skip it.
+ if (!Line)
+ continue;
+ Block.addLine(Line);
+ }
+ // Read the null terminator.
+ if (!Buff.readInt(Dummy))
+ return false;
+ }
+ // The last word is either a flag or padding, it isn't clear which. Skip
+ // over it.
+ if (!Buff.readInt(Dummy))
+ return false;
+ }
+ return true;
+}
+
+/// readGCDA - Read a function from the GCDA buffer. Return false if an error
+/// occurs.
+bool GCOVFunction::readGCDA(GCOVBuffer &Buff, GCOV::GCOVVersion Version) {
+ uint32_t Dummy;
+ if (!Buff.readInt(Dummy))
+ return false; // Function header length
+
+ uint32_t GCDAIdent;
+ if (!Buff.readInt(GCDAIdent))
+ return false;
+ if (Ident != GCDAIdent) {
+ errs() << "Function identifiers do not match: " << Ident
+ << " != " << GCDAIdent << " (in " << Name << ").\n";
+ return false;
+ }
+
+ uint32_t GCDAChecksum;
+ if (!Buff.readInt(GCDAChecksum))
+ return false;
+ if (Checksum != GCDAChecksum) {
+ errs() << "Function checksums do not match: " << Checksum
+ << " != " << GCDAChecksum << " (in " << Name << ").\n";
+ return false;
+ }
+
+ uint32_t CfgChecksum;
+ if (Version != GCOV::V402) {
+ if (!Buff.readInt(CfgChecksum))
+ return false;
+ if (Parent.getChecksum() != CfgChecksum) {
+ errs() << "File checksums do not match: " << Parent.getChecksum()
+ << " != " << CfgChecksum << " (in " << Name << ").\n";
+ return false;
+ }
+ }
+
+ StringRef GCDAName;
+ if (!Buff.readString(GCDAName))
+ return false;
+ if (Name != GCDAName) {
+ errs() << "Function names do not match: " << Name << " != " << GCDAName
+ << ".\n";
+ return false;
+ }
+
+ if (!Buff.readArcTag()) {
+ errs() << "Arc tag not found (in " << Name << ").\n";
+ return false;
+ }
+
+ uint32_t Count;
+ if (!Buff.readInt(Count))
+ return false;
+ Count /= 2;
+
+ // This for loop adds the counts for each block. A second nested loop is
+ // required to combine the edge counts that are contained in the GCDA file.
+ for (uint32_t BlockNo = 0; Count > 0; ++BlockNo) {
+ // The last block is always reserved for exit block
+ if (BlockNo >= Blocks.size()) {
+ errs() << "Unexpected number of edges (in " << Name << ").\n";
+ return false;
+ }
+ if (BlockNo == Blocks.size() - 1)
+ errs() << "(" << Name << ") has arcs from exit block.\n";
+ GCOVBlock &Block = *Blocks[BlockNo];
+ for (size_t EdgeNo = 0, End = Block.getNumDstEdges(); EdgeNo < End;
+ ++EdgeNo) {
+ if (Count == 0) {
+ errs() << "Unexpected number of edges (in " << Name << ").\n";
+ return false;
+ }
+ uint64_t ArcCount;
+ if (!Buff.readInt64(ArcCount))
+ return false;
+ Block.addCount(EdgeNo, ArcCount);
+ --Count;
+ }
+ Block.sortDstEdges();
+ }
+ return true;
+}
+
+/// getEntryCount - Get the number of times the function was called by
+/// retrieving the entry block's count.
+uint64_t GCOVFunction::getEntryCount() const {
+ return Blocks.front()->getCount();
+}
+
+/// getExitCount - Get the number of times the function returned by retrieving
+/// the exit block's count.
+uint64_t GCOVFunction::getExitCount() const {
+ return Blocks.back()->getCount();
+}
+
+/// dump - Dump GCOVFunction content to dbgs() for debugging purposes.
+void GCOVFunction::dump() const {
+ dbgs() << "===== " << Name << " (" << Ident << ") @ " << Filename << ":"
+ << LineNumber << "\n";
+ for (const auto &Block : Blocks)
+ Block->dump();
+}
+
+/// collectLineCounts - Collect line counts. This must be used after
+/// reading .gcno and .gcda files.
+void GCOVFunction::collectLineCounts(FileInfo &FI) {
+ // If the line number is zero, this is a function that doesn't actually appear
+ // in the source file, so there isn't anything we can do with it.
+ if (LineNumber == 0)
+ return;
+
+ for (const auto &Block : Blocks)
+ Block->collectLineCounts(FI);
+ FI.addFunctionLine(Filename, LineNumber, this);
+}
+
+//===----------------------------------------------------------------------===//
+// GCOVBlock implementation.
+
+/// ~GCOVBlock - Delete GCOVBlock and its content.
+GCOVBlock::~GCOVBlock() {
+ SrcEdges.clear();
+ DstEdges.clear();
+ Lines.clear();
+}
+
+/// addCount - Add to block counter while storing the edge count. If the
+/// destination has no outgoing edges, also update that block's count too.
+void GCOVBlock::addCount(size_t DstEdgeNo, uint64_t N) {
+ assert(DstEdgeNo < DstEdges.size()); // up to caller to ensure EdgeNo is valid
+ DstEdges[DstEdgeNo]->Count = N;
+ Counter += N;
+ if (!DstEdges[DstEdgeNo]->Dst.getNumDstEdges())
+ DstEdges[DstEdgeNo]->Dst.Counter += N;
+}
+
+/// sortDstEdges - Sort destination edges by block number, nop if already
+/// sorted. This is required for printing branch info in the correct order.
+void GCOVBlock::sortDstEdges() {
+ if (!DstEdgesAreSorted) {
+ SortDstEdgesFunctor SortEdges;
+ std::stable_sort(DstEdges.begin(), DstEdges.end(), SortEdges);
+ }
+}
+
+/// collectLineCounts - Collect line counts. This must be used after
+/// reading .gcno and .gcda files.
+void GCOVBlock::collectLineCounts(FileInfo &FI) {
+ for (uint32_t N : Lines)
+ FI.addBlockLine(Parent.getFilename(), N, this);
+}
+
+/// dump - Dump GCOVBlock content to dbgs() for debugging purposes.
+void GCOVBlock::dump() const {
+ dbgs() << "Block : " << Number << " Counter : " << Counter << "\n";
+ if (!SrcEdges.empty()) {
+ dbgs() << "\tSource Edges : ";
+ for (const GCOVEdge *Edge : SrcEdges)
+ dbgs() << Edge->Src.Number << " (" << Edge->Count << "), ";
+ dbgs() << "\n";
+ }
+ if (!DstEdges.empty()) {
+ dbgs() << "\tDestination Edges : ";
+ for (const GCOVEdge *Edge : DstEdges)
+ dbgs() << Edge->Dst.Number << " (" << Edge->Count << "), ";
+ dbgs() << "\n";
+ }
+ if (!Lines.empty()) {
+ dbgs() << "\tLines : ";
+ for (uint32_t N : Lines)
+ dbgs() << (N) << ",";
+ dbgs() << "\n";
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// FileInfo implementation.
+
+// Safe integer division, returns 0 if numerator is 0.
+static uint32_t safeDiv(uint64_t Numerator, uint64_t Divisor) {
+ if (!Numerator)
+ return 0;
+ return Numerator / Divisor;
+}
+
+// This custom division function mimics gcov's branch ouputs:
+// - Round to closest whole number
+// - Only output 0% or 100% if it's exactly that value
+static uint32_t branchDiv(uint64_t Numerator, uint64_t Divisor) {
+ if (!Numerator)
+ return 0;
+ if (Numerator == Divisor)
+ return 100;
+
+ uint8_t Res = (Numerator * 100 + Divisor / 2) / Divisor;
+ if (Res == 0)
+ return 1;
+ if (Res == 100)
+ return 99;
+ return Res;
+}
+
+namespace {
+struct formatBranchInfo {
+ formatBranchInfo(const GCOV::Options &Options, uint64_t Count, uint64_t Total)
+ : Options(Options), Count(Count), Total(Total) {}
+
+ void print(raw_ostream &OS) const {
+ if (!Total)
+ OS << "never executed";
+ else if (Options.BranchCount)
+ OS << "taken " << Count;
+ else
+ OS << "taken " << branchDiv(Count, Total) << "%";
+ }
+
+ const GCOV::Options &Options;
+ uint64_t Count;
+ uint64_t Total;
+};
+
+static raw_ostream &operator<<(raw_ostream &OS, const formatBranchInfo &FBI) {
+ FBI.print(OS);
+ return OS;
+}
+
+class LineConsumer {
+ std::unique_ptr<MemoryBuffer> Buffer;
+ StringRef Remaining;
+
+public:
+ LineConsumer(StringRef Filename) {
+ ErrorOr<std::unique_ptr<MemoryBuffer>> BufferOrErr =
+ MemoryBuffer::getFileOrSTDIN(Filename);
+ if (std::error_code EC = BufferOrErr.getError()) {
+ errs() << Filename << ": " << EC.message() << "\n";
+ Remaining = "";
+ } else {
+ Buffer = std::move(BufferOrErr.get());
+ Remaining = Buffer->getBuffer();
+ }
+ }
+ bool empty() { return Remaining.empty(); }
+ void printNext(raw_ostream &OS, uint32_t LineNum) {
+ StringRef Line;
+ if (empty())
+ Line = "/*EOF*/";
+ else
+ std::tie(Line, Remaining) = Remaining.split("\n");
+ OS << format("%5u:", LineNum) << Line << "\n";
+ }
+};
+}
+
+/// Convert a path to a gcov filename. If PreservePaths is true, this
+/// translates "/" to "#", ".." to "^", and drops ".", to match gcov.
+static std::string mangleCoveragePath(StringRef Filename, bool PreservePaths) {
+ if (!PreservePaths)
+ return sys::path::filename(Filename).str();
+
+ // This behaviour is defined by gcov in terms of text replacements, so it's
+ // not likely to do anything useful on filesystems with different textual
+ // conventions.
+ llvm::SmallString<256> Result("");
+ StringRef::iterator I, S, E;
+ for (I = S = Filename.begin(), E = Filename.end(); I != E; ++I) {
+ if (*I != '/')
+ continue;
+
+ if (I - S == 1 && *S == '.') {
+ // ".", the current directory, is skipped.
+ } else if (I - S == 2 && *S == '.' && *(S + 1) == '.') {
+ // "..", the parent directory, is replaced with "^".
+ Result.append("^#");
+ } else {
+ if (S < I)
+ // Leave other components intact,
+ Result.append(S, I);
+ // And separate with "#".
+ Result.push_back('#');
+ }
+ S = I + 1;
+ }
+
+ if (S < I)
+ Result.append(S, I);
+ return Result.str();
+}
+
+std::string FileInfo::getCoveragePath(StringRef Filename,
+ StringRef MainFilename) {
+ if (Options.NoOutput)
+ // This is probably a bug in gcov, but when -n is specified, paths aren't
+ // mangled at all, and the -l and -p options are ignored. Here, we do the
+ // same.
+ return Filename;
+
+ std::string CoveragePath;
+ if (Options.LongFileNames && !Filename.equals(MainFilename))
+ CoveragePath =
+ mangleCoveragePath(MainFilename, Options.PreservePaths) + "##";
+ CoveragePath += mangleCoveragePath(Filename, Options.PreservePaths) + ".gcov";
+ return CoveragePath;
+}
+
+std::unique_ptr<raw_ostream>
+FileInfo::openCoveragePath(StringRef CoveragePath) {
+ if (Options.NoOutput)
+ return llvm::make_unique<raw_null_ostream>();
+
+ std::error_code EC;
+ auto OS = llvm::make_unique<raw_fd_ostream>(CoveragePath, EC,
+ sys::fs::F_Text);
+ if (EC) {
+ errs() << EC.message() << "\n";
+ return llvm::make_unique<raw_null_ostream>();
+ }
+ return std::move(OS);
+}
+
+/// print - Print source files with collected line count information.
+void FileInfo::print(raw_ostream &InfoOS, StringRef MainFilename,
+ StringRef GCNOFile, StringRef GCDAFile) {
+ for (const auto &LI : LineInfo) {
+ StringRef Filename = LI.first();
+ auto AllLines = LineConsumer(Filename);
+
+ std::string CoveragePath = getCoveragePath(Filename, MainFilename);
+ std::unique_ptr<raw_ostream> CovStream = openCoveragePath(CoveragePath);
+ raw_ostream &CovOS = *CovStream;
+
+ CovOS << " -: 0:Source:" << Filename << "\n";
+ CovOS << " -: 0:Graph:" << GCNOFile << "\n";
+ CovOS << " -: 0:Data:" << GCDAFile << "\n";
+ CovOS << " -: 0:Runs:" << RunCount << "\n";
+ CovOS << " -: 0:Programs:" << ProgramCount << "\n";
+
+ const LineData &Line = LI.second;
+ GCOVCoverage FileCoverage(Filename);
+ for (uint32_t LineIndex = 0; LineIndex < Line.LastLine || !AllLines.empty();
+ ++LineIndex) {
+ if (Options.BranchInfo) {
+ FunctionLines::const_iterator FuncsIt = Line.Functions.find(LineIndex);
+ if (FuncsIt != Line.Functions.end())
+ printFunctionSummary(CovOS, FuncsIt->second);
+ }
+
+ BlockLines::const_iterator BlocksIt = Line.Blocks.find(LineIndex);
+ if (BlocksIt == Line.Blocks.end()) {
+ // No basic blocks are on this line. Not an executable line of code.
+ CovOS << " -:";
+ AllLines.printNext(CovOS, LineIndex + 1);
+ } else {
+ const BlockVector &Blocks = BlocksIt->second;
+
+ // Add up the block counts to form line counts.
+ DenseMap<const GCOVFunction *, bool> LineExecs;
+ uint64_t LineCount = 0;
+ for (const GCOVBlock *Block : Blocks) {
+ if (Options.AllBlocks) {
+ // Only take the highest block count for that line.
+ uint64_t BlockCount = Block->getCount();
+ LineCount = LineCount > BlockCount ? LineCount : BlockCount;
+ } else {
+ // Sum up all of the block counts.
+ LineCount += Block->getCount();
+ }
+
+ if (Options.FuncCoverage) {
+ // This is a slightly convoluted way to most accurately gather line
+ // statistics for functions. Basically what is happening is that we
+ // don't want to count a single line with multiple blocks more than
+ // once. However, we also don't simply want to give the total line
+ // count to every function that starts on the line. Thus, what is
+ // happening here are two things:
+ // 1) Ensure that the number of logical lines is only incremented
+ // once per function.
+ // 2) If there are multiple blocks on the same line, ensure that the
+ // number of lines executed is incremented as long as at least
+ // one of the blocks are executed.
+ const GCOVFunction *Function = &Block->getParent();
+ if (FuncCoverages.find(Function) == FuncCoverages.end()) {
+ std::pair<const GCOVFunction *, GCOVCoverage> KeyValue(
+ Function, GCOVCoverage(Function->getName()));
+ FuncCoverages.insert(KeyValue);
+ }
+ GCOVCoverage &FuncCoverage = FuncCoverages.find(Function)->second;
+
+ if (LineExecs.find(Function) == LineExecs.end()) {
+ if (Block->getCount()) {
+ ++FuncCoverage.LinesExec;
+ LineExecs[Function] = true;
+ } else {
+ LineExecs[Function] = false;
+ }
+ ++FuncCoverage.LogicalLines;
+ } else if (!LineExecs[Function] && Block->getCount()) {
+ ++FuncCoverage.LinesExec;
+ LineExecs[Function] = true;
+ }
+ }
+ }
+
+ if (LineCount == 0)
+ CovOS << " #####:";
+ else {
+ CovOS << format("%9" PRIu64 ":", LineCount);
+ ++FileCoverage.LinesExec;
+ }
+ ++FileCoverage.LogicalLines;
+
+ AllLines.printNext(CovOS, LineIndex + 1);
+
+ uint32_t BlockNo = 0;
+ uint32_t EdgeNo = 0;
+ for (const GCOVBlock *Block : Blocks) {
+ // Only print block and branch information at the end of the block.
+ if (Block->getLastLine() != LineIndex + 1)
+ continue;
+ if (Options.AllBlocks)
+ printBlockInfo(CovOS, *Block, LineIndex, BlockNo);
+ if (Options.BranchInfo) {
+ size_t NumEdges = Block->getNumDstEdges();
+ if (NumEdges > 1)
+ printBranchInfo(CovOS, *Block, FileCoverage, EdgeNo);
+ else if (Options.UncondBranch && NumEdges == 1)
+ printUncondBranchInfo(CovOS, EdgeNo,
+ (*Block->dst_begin())->Count);
+ }
+ }
+ }
+ }
+ FileCoverages.push_back(std::make_pair(CoveragePath, FileCoverage));
+ }
+
+ // FIXME: There is no way to detect calls given current instrumentation.
+ if (Options.FuncCoverage)
+ printFuncCoverage(InfoOS);
+ printFileCoverage(InfoOS);
+ return;
+}
+
+/// printFunctionSummary - Print function and block summary.
+void FileInfo::printFunctionSummary(raw_ostream &OS,
+ const FunctionVector &Funcs) const {
+ for (const GCOVFunction *Func : Funcs) {
+ uint64_t EntryCount = Func->getEntryCount();
+ uint32_t BlocksExec = 0;
+ for (const GCOVBlock &Block : Func->blocks())
+ if (Block.getNumDstEdges() && Block.getCount())
+ ++BlocksExec;
+
+ OS << "function " << Func->getName() << " called " << EntryCount
+ << " returned " << safeDiv(Func->getExitCount() * 100, EntryCount)
+ << "% blocks executed "
+ << safeDiv(BlocksExec * 100, Func->getNumBlocks() - 1) << "%\n";
+ }
+}
+
+/// printBlockInfo - Output counts for each block.
+void FileInfo::printBlockInfo(raw_ostream &OS, const GCOVBlock &Block,
+ uint32_t LineIndex, uint32_t &BlockNo) const {
+ if (Block.getCount() == 0)
+ OS << " $$$$$:";
+ else
+ OS << format("%9" PRIu64 ":", Block.getCount());
+ OS << format("%5u-block %2u\n", LineIndex + 1, BlockNo++);
+}
+
+/// printBranchInfo - Print conditional branch probabilities.
+void FileInfo::printBranchInfo(raw_ostream &OS, const GCOVBlock &Block,
+ GCOVCoverage &Coverage, uint32_t &EdgeNo) {
+ SmallVector<uint64_t, 16> BranchCounts;
+ uint64_t TotalCounts = 0;
+ for (const GCOVEdge *Edge : Block.dsts()) {
+ BranchCounts.push_back(Edge->Count);
+ TotalCounts += Edge->Count;
+ if (Block.getCount())
+ ++Coverage.BranchesExec;
+ if (Edge->Count)
+ ++Coverage.BranchesTaken;
+ ++Coverage.Branches;
+
+ if (Options.FuncCoverage) {
+ const GCOVFunction *Function = &Block.getParent();
+ GCOVCoverage &FuncCoverage = FuncCoverages.find(Function)->second;
+ if (Block.getCount())
+ ++FuncCoverage.BranchesExec;
+ if (Edge->Count)
+ ++FuncCoverage.BranchesTaken;
+ ++FuncCoverage.Branches;
+ }
+ }
+
+ for (uint64_t N : BranchCounts)
+ OS << format("branch %2u ", EdgeNo++)
+ << formatBranchInfo(Options, N, TotalCounts) << "\n";
+}
+
+/// printUncondBranchInfo - Print unconditional branch probabilities.
+void FileInfo::printUncondBranchInfo(raw_ostream &OS, uint32_t &EdgeNo,
+ uint64_t Count) const {
+ OS << format("unconditional %2u ", EdgeNo++)
+ << formatBranchInfo(Options, Count, Count) << "\n";
+}
+
+// printCoverage - Print generic coverage info used by both printFuncCoverage
+// and printFileCoverage.
+void FileInfo::printCoverage(raw_ostream &OS,
+ const GCOVCoverage &Coverage) const {
+ OS << format("Lines executed:%.2f%% of %u\n",
+ double(Coverage.LinesExec) * 100 / Coverage.LogicalLines,
+ Coverage.LogicalLines);
+ if (Options.BranchInfo) {
+ if (Coverage.Branches) {
+ OS << format("Branches executed:%.2f%% of %u\n",
+ double(Coverage.BranchesExec) * 100 / Coverage.Branches,
+ Coverage.Branches);
+ OS << format("Taken at least once:%.2f%% of %u\n",
+ double(Coverage.BranchesTaken) * 100 / Coverage.Branches,
+ Coverage.Branches);
+ } else {
+ OS << "No branches\n";
+ }
+ OS << "No calls\n"; // to be consistent with gcov
+ }
+}
+
+// printFuncCoverage - Print per-function coverage info.
+void FileInfo::printFuncCoverage(raw_ostream &OS) const {
+ for (const auto &FC : FuncCoverages) {
+ const GCOVCoverage &Coverage = FC.second;
+ OS << "Function '" << Coverage.Name << "'\n";
+ printCoverage(OS, Coverage);
+ OS << "\n";
+ }
+}
+
+// printFileCoverage - Print per-file coverage info.
+void FileInfo::printFileCoverage(raw_ostream &OS) const {
+ for (const auto &FC : FileCoverages) {
+ const std::string &Filename = FC.first;
+ const GCOVCoverage &Coverage = FC.second;
+ OS << "File '" << Coverage.Name << "'\n";
+ printCoverage(OS, Coverage);
+ if (!Options.NoOutput)
+ OS << Coverage.Name << ":creating '" << Filename << "'\n";
+ OS << "\n";
+ }
+}
diff --git a/contrib/llvm/lib/IR/GVMaterializer.cpp b/contrib/llvm/lib/IR/GVMaterializer.cpp
new file mode 100644
index 0000000..706926d
--- /dev/null
+++ b/contrib/llvm/lib/IR/GVMaterializer.cpp
@@ -0,0 +1,18 @@
+//===-- GVMaterializer.cpp - Base implementation for GV materializers -----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Minimal implementation of the abstract interface for materializing
+// GlobalValues.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/GVMaterializer.h"
+using namespace llvm;
+
+GVMaterializer::~GVMaterializer() {}
diff --git a/contrib/llvm/lib/IR/Globals.cpp b/contrib/llvm/lib/IR/Globals.cpp
new file mode 100644
index 0000000..6159f93
--- /dev/null
+++ b/contrib/llvm/lib/IR/Globals.cpp
@@ -0,0 +1,285 @@
+//===-- Globals.cpp - Implement the GlobalValue & GlobalVariable class ----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the GlobalValue & GlobalVariable classes for the IR
+// library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/GlobalAlias.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/Support/ErrorHandling.h"
+using namespace llvm;
+
+//===----------------------------------------------------------------------===//
+// GlobalValue Class
+//===----------------------------------------------------------------------===//
+
+bool GlobalValue::isMaterializable() const {
+ if (const Function *F = dyn_cast<Function>(this))
+ return F->isMaterializable();
+ return false;
+}
+std::error_code GlobalValue::materialize() {
+ return getParent()->materialize(this);
+}
+
+/// Override destroyConstantImpl to make sure it doesn't get called on
+/// GlobalValue's because they shouldn't be treated like other constants.
+void GlobalValue::destroyConstantImpl() {
+ llvm_unreachable("You can't GV->destroyConstantImpl()!");
+}
+
+Value *GlobalValue::handleOperandChangeImpl(Value *From, Value *To, Use *U) {
+ llvm_unreachable("Unsupported class for handleOperandChange()!");
+}
+
+/// copyAttributesFrom - copy all additional attributes (those not needed to
+/// create a GlobalValue) from the GlobalValue Src to this one.
+void GlobalValue::copyAttributesFrom(const GlobalValue *Src) {
+ setVisibility(Src->getVisibility());
+ setUnnamedAddr(Src->hasUnnamedAddr());
+ setDLLStorageClass(Src->getDLLStorageClass());
+}
+
+unsigned GlobalValue::getAlignment() const {
+ if (auto *GA = dyn_cast<GlobalAlias>(this)) {
+ // In general we cannot compute this at the IR level, but we try.
+ if (const GlobalObject *GO = GA->getBaseObject())
+ return GO->getAlignment();
+
+ // FIXME: we should also be able to handle:
+ // Alias = Global + Offset
+ // Alias = Absolute
+ return 0;
+ }
+ return cast<GlobalObject>(this)->getAlignment();
+}
+
+void GlobalObject::setAlignment(unsigned Align) {
+ assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!");
+ assert(Align <= MaximumAlignment &&
+ "Alignment is greater than MaximumAlignment!");
+ unsigned AlignmentData = Log2_32(Align) + 1;
+ unsigned OldData = getGlobalValueSubClassData();
+ setGlobalValueSubClassData((OldData & ~AlignmentMask) | AlignmentData);
+ assert(getAlignment() == Align && "Alignment representation error!");
+}
+
+unsigned GlobalObject::getGlobalObjectSubClassData() const {
+ unsigned ValueData = getGlobalValueSubClassData();
+ return ValueData >> AlignmentBits;
+}
+
+void GlobalObject::setGlobalObjectSubClassData(unsigned Val) {
+ unsigned OldData = getGlobalValueSubClassData();
+ setGlobalValueSubClassData((OldData & AlignmentMask) |
+ (Val << AlignmentBits));
+ assert(getGlobalObjectSubClassData() == Val && "representation error");
+}
+
+void GlobalObject::copyAttributesFrom(const GlobalValue *Src) {
+ GlobalValue::copyAttributesFrom(Src);
+ if (const auto *GV = dyn_cast<GlobalObject>(Src)) {
+ setAlignment(GV->getAlignment());
+ setSection(GV->getSection());
+ }
+}
+
+const char *GlobalValue::getSection() const {
+ if (auto *GA = dyn_cast<GlobalAlias>(this)) {
+ // In general we cannot compute this at the IR level, but we try.
+ if (const GlobalObject *GO = GA->getBaseObject())
+ return GO->getSection();
+ return "";
+ }
+ return cast<GlobalObject>(this)->getSection();
+}
+
+Comdat *GlobalValue::getComdat() {
+ if (auto *GA = dyn_cast<GlobalAlias>(this)) {
+ // In general we cannot compute this at the IR level, but we try.
+ if (const GlobalObject *GO = GA->getBaseObject())
+ return const_cast<GlobalObject *>(GO)->getComdat();
+ return nullptr;
+ }
+ return cast<GlobalObject>(this)->getComdat();
+}
+
+void GlobalObject::setSection(StringRef S) { Section = S; }
+
+bool GlobalValue::isDeclaration() const {
+ // Globals are definitions if they have an initializer.
+ if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(this))
+ return GV->getNumOperands() == 0;
+
+ // Functions are definitions if they have a body.
+ if (const Function *F = dyn_cast<Function>(this))
+ return F->empty() && !F->isMaterializable();
+
+ // Aliases are always definitions.
+ assert(isa<GlobalAlias>(this));
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// GlobalVariable Implementation
+//===----------------------------------------------------------------------===//
+
+GlobalVariable::GlobalVariable(Type *Ty, bool constant, LinkageTypes Link,
+ Constant *InitVal, const Twine &Name,
+ ThreadLocalMode TLMode, unsigned AddressSpace,
+ bool isExternallyInitialized)
+ : GlobalObject(Ty, Value::GlobalVariableVal,
+ OperandTraits<GlobalVariable>::op_begin(this),
+ InitVal != nullptr, Link, Name, AddressSpace),
+ isConstantGlobal(constant),
+ isExternallyInitializedConstant(isExternallyInitialized) {
+ setThreadLocalMode(TLMode);
+ if (InitVal) {
+ assert(InitVal->getType() == Ty &&
+ "Initializer should be the same type as the GlobalVariable!");
+ Op<0>() = InitVal;
+ }
+}
+
+GlobalVariable::GlobalVariable(Module &M, Type *Ty, bool constant,
+ LinkageTypes Link, Constant *InitVal,
+ const Twine &Name, GlobalVariable *Before,
+ ThreadLocalMode TLMode, unsigned AddressSpace,
+ bool isExternallyInitialized)
+ : GlobalObject(Ty, Value::GlobalVariableVal,
+ OperandTraits<GlobalVariable>::op_begin(this),
+ InitVal != nullptr, Link, Name, AddressSpace),
+ isConstantGlobal(constant),
+ isExternallyInitializedConstant(isExternallyInitialized) {
+ setThreadLocalMode(TLMode);
+ if (InitVal) {
+ assert(InitVal->getType() == Ty &&
+ "Initializer should be the same type as the GlobalVariable!");
+ Op<0>() = InitVal;
+ }
+
+ if (Before)
+ Before->getParent()->getGlobalList().insert(Before->getIterator(), this);
+ else
+ M.getGlobalList().push_back(this);
+}
+
+void GlobalVariable::setParent(Module *parent) {
+ Parent = parent;
+}
+
+void GlobalVariable::removeFromParent() {
+ getParent()->getGlobalList().remove(getIterator());
+}
+
+void GlobalVariable::eraseFromParent() {
+ getParent()->getGlobalList().erase(getIterator());
+}
+
+void GlobalVariable::setInitializer(Constant *InitVal) {
+ if (!InitVal) {
+ if (hasInitializer()) {
+ // Note, the num operands is used to compute the offset of the operand, so
+ // the order here matters. Clearing the operand then clearing the num
+ // operands ensures we have the correct offset to the operand.
+ Op<0>().set(nullptr);
+ setGlobalVariableNumOperands(0);
+ }
+ } else {
+ assert(InitVal->getType() == getType()->getElementType() &&
+ "Initializer type must match GlobalVariable type");
+ // Note, the num operands is used to compute the offset of the operand, so
+ // the order here matters. We need to set num operands to 1 first so that
+ // we get the correct offset to the first operand when we set it.
+ if (!hasInitializer())
+ setGlobalVariableNumOperands(1);
+ Op<0>().set(InitVal);
+ }
+}
+
+/// Copy all additional attributes (those not needed to create a GlobalVariable)
+/// from the GlobalVariable Src to this one.
+void GlobalVariable::copyAttributesFrom(const GlobalValue *Src) {
+ GlobalObject::copyAttributesFrom(Src);
+ if (const GlobalVariable *SrcVar = dyn_cast<GlobalVariable>(Src)) {
+ setThreadLocalMode(SrcVar->getThreadLocalMode());
+ setExternallyInitialized(SrcVar->isExternallyInitialized());
+ }
+}
+
+
+//===----------------------------------------------------------------------===//
+// GlobalAlias Implementation
+//===----------------------------------------------------------------------===//
+
+GlobalAlias::GlobalAlias(Type *Ty, unsigned AddressSpace, LinkageTypes Link,
+ const Twine &Name, Constant *Aliasee,
+ Module *ParentModule)
+ : GlobalValue(Ty, Value::GlobalAliasVal, &Op<0>(), 1, Link, Name,
+ AddressSpace) {
+ Op<0>() = Aliasee;
+
+ if (ParentModule)
+ ParentModule->getAliasList().push_back(this);
+}
+
+GlobalAlias *GlobalAlias::create(Type *Ty, unsigned AddressSpace,
+ LinkageTypes Link, const Twine &Name,
+ Constant *Aliasee, Module *ParentModule) {
+ return new GlobalAlias(Ty, AddressSpace, Link, Name, Aliasee, ParentModule);
+}
+
+GlobalAlias *GlobalAlias::create(Type *Ty, unsigned AddressSpace,
+ LinkageTypes Linkage, const Twine &Name,
+ Module *Parent) {
+ return create(Ty, AddressSpace, Linkage, Name, nullptr, Parent);
+}
+
+GlobalAlias *GlobalAlias::create(Type *Ty, unsigned AddressSpace,
+ LinkageTypes Linkage, const Twine &Name,
+ GlobalValue *Aliasee) {
+ return create(Ty, AddressSpace, Linkage, Name, Aliasee, Aliasee->getParent());
+}
+
+GlobalAlias *GlobalAlias::create(LinkageTypes Link, const Twine &Name,
+ GlobalValue *Aliasee) {
+ PointerType *PTy = Aliasee->getType();
+ return create(PTy->getElementType(), PTy->getAddressSpace(), Link, Name,
+ Aliasee);
+}
+
+GlobalAlias *GlobalAlias::create(const Twine &Name, GlobalValue *Aliasee) {
+ return create(Aliasee->getLinkage(), Name, Aliasee);
+}
+
+void GlobalAlias::setParent(Module *parent) {
+ Parent = parent;
+}
+
+void GlobalAlias::removeFromParent() {
+ getParent()->getAliasList().remove(getIterator());
+}
+
+void GlobalAlias::eraseFromParent() {
+ getParent()->getAliasList().erase(getIterator());
+}
+
+void GlobalAlias::setAliasee(Constant *Aliasee) {
+ assert((!Aliasee || Aliasee->getType() == getType()) &&
+ "Alias and aliasee types should match!");
+ setOperand(0, Aliasee);
+}
diff --git a/contrib/llvm/lib/IR/IRBuilder.cpp b/contrib/llvm/lib/IR/IRBuilder.cpp
new file mode 100644
index 0000000..4474129
--- /dev/null
+++ b/contrib/llvm/lib/IR/IRBuilder.cpp
@@ -0,0 +1,403 @@
+//===---- IRBuilder.cpp - Builder for LLVM Instrs -------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the IRBuilder class, which is used as a convenient way
+// to create LLVM instructions with a consistent and simplified interface.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Statepoint.h"
+using namespace llvm;
+
+/// CreateGlobalString - Make a new global variable with an initializer that
+/// has array of i8 type filled in with the nul terminated string value
+/// specified. If Name is specified, it is the name of the global variable
+/// created.
+GlobalVariable *IRBuilderBase::CreateGlobalString(StringRef Str,
+ const Twine &Name,
+ unsigned AddressSpace) {
+ Constant *StrConstant = ConstantDataArray::getString(Context, Str);
+ Module &M = *BB->getParent()->getParent();
+ GlobalVariable *GV = new GlobalVariable(M, StrConstant->getType(),
+ true, GlobalValue::PrivateLinkage,
+ StrConstant, Name, nullptr,
+ GlobalVariable::NotThreadLocal,
+ AddressSpace);
+ GV->setUnnamedAddr(true);
+ return GV;
+}
+
+Type *IRBuilderBase::getCurrentFunctionReturnType() const {
+ assert(BB && BB->getParent() && "No current function!");
+ return BB->getParent()->getReturnType();
+}
+
+Value *IRBuilderBase::getCastedInt8PtrValue(Value *Ptr) {
+ PointerType *PT = cast<PointerType>(Ptr->getType());
+ if (PT->getElementType()->isIntegerTy(8))
+ return Ptr;
+
+ // Otherwise, we need to insert a bitcast.
+ PT = getInt8PtrTy(PT->getAddressSpace());
+ BitCastInst *BCI = new BitCastInst(Ptr, PT, "");
+ BB->getInstList().insert(InsertPt, BCI);
+ SetInstDebugLocation(BCI);
+ return BCI;
+}
+
+static CallInst *createCallHelper(Value *Callee, ArrayRef<Value *> Ops,
+ IRBuilderBase *Builder,
+ const Twine& Name="") {
+ CallInst *CI = CallInst::Create(Callee, Ops, Name);
+ Builder->GetInsertBlock()->getInstList().insert(Builder->GetInsertPoint(),CI);
+ Builder->SetInstDebugLocation(CI);
+ return CI;
+}
+
+static InvokeInst *createInvokeHelper(Value *Invokee, BasicBlock *NormalDest,
+ BasicBlock *UnwindDest,
+ ArrayRef<Value *> Ops,
+ IRBuilderBase *Builder,
+ const Twine &Name = "") {
+ InvokeInst *II =
+ InvokeInst::Create(Invokee, NormalDest, UnwindDest, Ops, Name);
+ Builder->GetInsertBlock()->getInstList().insert(Builder->GetInsertPoint(),
+ II);
+ Builder->SetInstDebugLocation(II);
+ return II;
+}
+
+CallInst *IRBuilderBase::
+CreateMemSet(Value *Ptr, Value *Val, Value *Size, unsigned Align,
+ bool isVolatile, MDNode *TBAATag, MDNode *ScopeTag,
+ MDNode *NoAliasTag) {
+ Ptr = getCastedInt8PtrValue(Ptr);
+ Value *Ops[] = { Ptr, Val, Size, getInt32(Align), getInt1(isVolatile) };
+ Type *Tys[] = { Ptr->getType(), Size->getType() };
+ Module *M = BB->getParent()->getParent();
+ Value *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
+
+ CallInst *CI = createCallHelper(TheFn, Ops, this);
+
+ // Set the TBAA info if present.
+ if (TBAATag)
+ CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
+
+ if (ScopeTag)
+ CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
+
+ if (NoAliasTag)
+ CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
+
+ return CI;
+}
+
+CallInst *IRBuilderBase::
+CreateMemCpy(Value *Dst, Value *Src, Value *Size, unsigned Align,
+ bool isVolatile, MDNode *TBAATag, MDNode *TBAAStructTag,
+ MDNode *ScopeTag, MDNode *NoAliasTag) {
+ Dst = getCastedInt8PtrValue(Dst);
+ Src = getCastedInt8PtrValue(Src);
+
+ Value *Ops[] = { Dst, Src, Size, getInt32(Align), getInt1(isVolatile) };
+ Type *Tys[] = { Dst->getType(), Src->getType(), Size->getType() };
+ Module *M = BB->getParent()->getParent();
+ Value *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memcpy, Tys);
+
+ CallInst *CI = createCallHelper(TheFn, Ops, this);
+
+ // Set the TBAA info if present.
+ if (TBAATag)
+ CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
+
+ // Set the TBAA Struct info if present.
+ if (TBAAStructTag)
+ CI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag);
+
+ if (ScopeTag)
+ CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
+
+ if (NoAliasTag)
+ CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
+
+ return CI;
+}
+
+CallInst *IRBuilderBase::
+CreateMemMove(Value *Dst, Value *Src, Value *Size, unsigned Align,
+ bool isVolatile, MDNode *TBAATag, MDNode *ScopeTag,
+ MDNode *NoAliasTag) {
+ Dst = getCastedInt8PtrValue(Dst);
+ Src = getCastedInt8PtrValue(Src);
+
+ Value *Ops[] = { Dst, Src, Size, getInt32(Align), getInt1(isVolatile) };
+ Type *Tys[] = { Dst->getType(), Src->getType(), Size->getType() };
+ Module *M = BB->getParent()->getParent();
+ Value *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memmove, Tys);
+
+ CallInst *CI = createCallHelper(TheFn, Ops, this);
+
+ // Set the TBAA info if present.
+ if (TBAATag)
+ CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
+
+ if (ScopeTag)
+ CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
+
+ if (NoAliasTag)
+ CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
+
+ return CI;
+}
+
+CallInst *IRBuilderBase::CreateLifetimeStart(Value *Ptr, ConstantInt *Size) {
+ assert(isa<PointerType>(Ptr->getType()) &&
+ "lifetime.start only applies to pointers.");
+ Ptr = getCastedInt8PtrValue(Ptr);
+ if (!Size)
+ Size = getInt64(-1);
+ else
+ assert(Size->getType() == getInt64Ty() &&
+ "lifetime.start requires the size to be an i64");
+ Value *Ops[] = { Size, Ptr };
+ Module *M = BB->getParent()->getParent();
+ Value *TheFn = Intrinsic::getDeclaration(M, Intrinsic::lifetime_start);
+ return createCallHelper(TheFn, Ops, this);
+}
+
+CallInst *IRBuilderBase::CreateLifetimeEnd(Value *Ptr, ConstantInt *Size) {
+ assert(isa<PointerType>(Ptr->getType()) &&
+ "lifetime.end only applies to pointers.");
+ Ptr = getCastedInt8PtrValue(Ptr);
+ if (!Size)
+ Size = getInt64(-1);
+ else
+ assert(Size->getType() == getInt64Ty() &&
+ "lifetime.end requires the size to be an i64");
+ Value *Ops[] = { Size, Ptr };
+ Module *M = BB->getParent()->getParent();
+ Value *TheFn = Intrinsic::getDeclaration(M, Intrinsic::lifetime_end);
+ return createCallHelper(TheFn, Ops, this);
+}
+
+CallInst *IRBuilderBase::CreateAssumption(Value *Cond) {
+ assert(Cond->getType() == getInt1Ty() &&
+ "an assumption condition must be of type i1");
+
+ Value *Ops[] = { Cond };
+ Module *M = BB->getParent()->getParent();
+ Value *FnAssume = Intrinsic::getDeclaration(M, Intrinsic::assume);
+ return createCallHelper(FnAssume, Ops, this);
+}
+
+/// Create a call to a Masked Load intrinsic.
+/// Ptr - the base pointer for the load
+/// Align - alignment of the source location
+/// Mask - an vector of booleans which indicates what vector lanes should
+/// be accessed in memory
+/// PassThru - a pass-through value that is used to fill the masked-off lanes
+/// of the result
+/// Name - name of the result variable
+CallInst *IRBuilderBase::CreateMaskedLoad(Value *Ptr, unsigned Align,
+ Value *Mask, Value *PassThru,
+ const Twine &Name) {
+ assert(Ptr->getType()->isPointerTy() && "Ptr must be of pointer type");
+ // DataTy is the overloaded type
+ Type *DataTy = cast<PointerType>(Ptr->getType())->getElementType();
+ assert(DataTy->isVectorTy() && "Ptr should point to a vector");
+ if (!PassThru)
+ PassThru = UndefValue::get(DataTy);
+ Value *Ops[] = { Ptr, getInt32(Align), Mask, PassThru};
+ return CreateMaskedIntrinsic(Intrinsic::masked_load, Ops, DataTy, Name);
+}
+
+/// Create a call to a Masked Store intrinsic.
+/// Val - the data to be stored,
+/// Ptr - the base pointer for the store
+/// Align - alignment of the destination location
+/// Mask - an vector of booleans which indicates what vector lanes should
+/// be accessed in memory
+CallInst *IRBuilderBase::CreateMaskedStore(Value *Val, Value *Ptr,
+ unsigned Align, Value *Mask) {
+ Value *Ops[] = { Val, Ptr, getInt32(Align), Mask };
+ // Type of the data to be stored - the only one overloaded type
+ return CreateMaskedIntrinsic(Intrinsic::masked_store, Ops, Val->getType());
+}
+
+/// Create a call to a Masked intrinsic, with given intrinsic Id,
+/// an array of operands - Ops, and one overloaded type - DataTy
+CallInst *IRBuilderBase::CreateMaskedIntrinsic(Intrinsic::ID Id,
+ ArrayRef<Value *> Ops,
+ Type *DataTy,
+ const Twine &Name) {
+ Module *M = BB->getParent()->getParent();
+ Type *OverloadedTypes[] = { DataTy };
+ Value *TheFn = Intrinsic::getDeclaration(M, Id, OverloadedTypes);
+ return createCallHelper(TheFn, Ops, this, Name);
+}
+
+template <typename T0, typename T1, typename T2, typename T3>
+static std::vector<Value *>
+getStatepointArgs(IRBuilderBase &B, uint64_t ID, uint32_t NumPatchBytes,
+ Value *ActualCallee, uint32_t Flags, ArrayRef<T0> CallArgs,
+ ArrayRef<T1> TransitionArgs, ArrayRef<T2> DeoptArgs,
+ ArrayRef<T3> GCArgs) {
+ std::vector<Value *> Args;
+ Args.push_back(B.getInt64(ID));
+ Args.push_back(B.getInt32(NumPatchBytes));
+ Args.push_back(ActualCallee);
+ Args.push_back(B.getInt32(CallArgs.size()));
+ Args.push_back(B.getInt32(Flags));
+ Args.insert(Args.end(), CallArgs.begin(), CallArgs.end());
+ Args.push_back(B.getInt32(TransitionArgs.size()));
+ Args.insert(Args.end(), TransitionArgs.begin(), TransitionArgs.end());
+ Args.push_back(B.getInt32(DeoptArgs.size()));
+ Args.insert(Args.end(), DeoptArgs.begin(), DeoptArgs.end());
+ Args.insert(Args.end(), GCArgs.begin(), GCArgs.end());
+
+ return Args;
+}
+
+template <typename T0, typename T1, typename T2, typename T3>
+static CallInst *CreateGCStatepointCallCommon(
+ IRBuilderBase *Builder, uint64_t ID, uint32_t NumPatchBytes,
+ Value *ActualCallee, uint32_t Flags, ArrayRef<T0> CallArgs,
+ ArrayRef<T1> TransitionArgs, ArrayRef<T2> DeoptArgs, ArrayRef<T3> GCArgs,
+ const Twine &Name) {
+ // Extract out the type of the callee.
+ PointerType *FuncPtrType = cast<PointerType>(ActualCallee->getType());
+ assert(isa<FunctionType>(FuncPtrType->getElementType()) &&
+ "actual callee must be a callable value");
+
+ Module *M = Builder->GetInsertBlock()->getParent()->getParent();
+ // Fill in the one generic type'd argument (the function is also vararg)
+ Type *ArgTypes[] = { FuncPtrType };
+ Function *FnStatepoint =
+ Intrinsic::getDeclaration(M, Intrinsic::experimental_gc_statepoint,
+ ArgTypes);
+
+ std::vector<llvm::Value *> Args =
+ getStatepointArgs(*Builder, ID, NumPatchBytes, ActualCallee, Flags,
+ CallArgs, TransitionArgs, DeoptArgs, GCArgs);
+ return createCallHelper(FnStatepoint, Args, Builder, Name);
+}
+
+CallInst *IRBuilderBase::CreateGCStatepointCall(
+ uint64_t ID, uint32_t NumPatchBytes, Value *ActualCallee,
+ ArrayRef<Value *> CallArgs, ArrayRef<Value *> DeoptArgs,
+ ArrayRef<Value *> GCArgs, const Twine &Name) {
+ return CreateGCStatepointCallCommon<Value *, Value *, Value *, Value *>(
+ this, ID, NumPatchBytes, ActualCallee, uint32_t(StatepointFlags::None),
+ CallArgs, None /* No Transition Args */, DeoptArgs, GCArgs, Name);
+}
+
+CallInst *IRBuilderBase::CreateGCStatepointCall(
+ uint64_t ID, uint32_t NumPatchBytes, Value *ActualCallee, uint32_t Flags,
+ ArrayRef<Use> CallArgs, ArrayRef<Use> TransitionArgs,
+ ArrayRef<Use> DeoptArgs, ArrayRef<Value *> GCArgs, const Twine &Name) {
+ return CreateGCStatepointCallCommon<Use, Use, Use, Value *>(
+ this, ID, NumPatchBytes, ActualCallee, Flags, CallArgs, TransitionArgs,
+ DeoptArgs, GCArgs, Name);
+}
+
+CallInst *IRBuilderBase::CreateGCStatepointCall(
+ uint64_t ID, uint32_t NumPatchBytes, Value *ActualCallee,
+ ArrayRef<Use> CallArgs, ArrayRef<Value *> DeoptArgs,
+ ArrayRef<Value *> GCArgs, const Twine &Name) {
+ return CreateGCStatepointCallCommon<Use, Value *, Value *, Value *>(
+ this, ID, NumPatchBytes, ActualCallee, uint32_t(StatepointFlags::None),
+ CallArgs, None, DeoptArgs, GCArgs, Name);
+}
+
+template <typename T0, typename T1, typename T2, typename T3>
+static InvokeInst *CreateGCStatepointInvokeCommon(
+ IRBuilderBase *Builder, uint64_t ID, uint32_t NumPatchBytes,
+ Value *ActualInvokee, BasicBlock *NormalDest, BasicBlock *UnwindDest,
+ uint32_t Flags, ArrayRef<T0> InvokeArgs, ArrayRef<T1> TransitionArgs,
+ ArrayRef<T2> DeoptArgs, ArrayRef<T3> GCArgs, const Twine &Name) {
+ // Extract out the type of the callee.
+ PointerType *FuncPtrType = cast<PointerType>(ActualInvokee->getType());
+ assert(isa<FunctionType>(FuncPtrType->getElementType()) &&
+ "actual callee must be a callable value");
+
+ Module *M = Builder->GetInsertBlock()->getParent()->getParent();
+ // Fill in the one generic type'd argument (the function is also vararg)
+ Function *FnStatepoint = Intrinsic::getDeclaration(
+ M, Intrinsic::experimental_gc_statepoint, {FuncPtrType});
+
+ std::vector<llvm::Value *> Args =
+ getStatepointArgs(*Builder, ID, NumPatchBytes, ActualInvokee, Flags,
+ InvokeArgs, TransitionArgs, DeoptArgs, GCArgs);
+ return createInvokeHelper(FnStatepoint, NormalDest, UnwindDest, Args, Builder,
+ Name);
+}
+
+InvokeInst *IRBuilderBase::CreateGCStatepointInvoke(
+ uint64_t ID, uint32_t NumPatchBytes, Value *ActualInvokee,
+ BasicBlock *NormalDest, BasicBlock *UnwindDest,
+ ArrayRef<Value *> InvokeArgs, ArrayRef<Value *> DeoptArgs,
+ ArrayRef<Value *> GCArgs, const Twine &Name) {
+ return CreateGCStatepointInvokeCommon<Value *, Value *, Value *, Value *>(
+ this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest,
+ uint32_t(StatepointFlags::None), InvokeArgs, None /* No Transition Args*/,
+ DeoptArgs, GCArgs, Name);
+}
+
+InvokeInst *IRBuilderBase::CreateGCStatepointInvoke(
+ uint64_t ID, uint32_t NumPatchBytes, Value *ActualInvokee,
+ BasicBlock *NormalDest, BasicBlock *UnwindDest, uint32_t Flags,
+ ArrayRef<Use> InvokeArgs, ArrayRef<Use> TransitionArgs,
+ ArrayRef<Use> DeoptArgs, ArrayRef<Value *> GCArgs, const Twine &Name) {
+ return CreateGCStatepointInvokeCommon<Use, Use, Use, Value *>(
+ this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest, Flags,
+ InvokeArgs, TransitionArgs, DeoptArgs, GCArgs, Name);
+}
+
+InvokeInst *IRBuilderBase::CreateGCStatepointInvoke(
+ uint64_t ID, uint32_t NumPatchBytes, Value *ActualInvokee,
+ BasicBlock *NormalDest, BasicBlock *UnwindDest, ArrayRef<Use> InvokeArgs,
+ ArrayRef<Value *> DeoptArgs, ArrayRef<Value *> GCArgs, const Twine &Name) {
+ return CreateGCStatepointInvokeCommon<Use, Value *, Value *, Value *>(
+ this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest,
+ uint32_t(StatepointFlags::None), InvokeArgs, None, DeoptArgs, GCArgs,
+ Name);
+}
+
+CallInst *IRBuilderBase::CreateGCResult(Instruction *Statepoint,
+ Type *ResultType,
+ const Twine &Name) {
+ Intrinsic::ID ID = Intrinsic::experimental_gc_result;
+ Module *M = BB->getParent()->getParent();
+ Type *Types[] = {ResultType};
+ Value *FnGCResult = Intrinsic::getDeclaration(M, ID, Types);
+
+ Value *Args[] = {Statepoint};
+ return createCallHelper(FnGCResult, Args, this, Name);
+}
+
+CallInst *IRBuilderBase::CreateGCRelocate(Instruction *Statepoint,
+ int BaseOffset,
+ int DerivedOffset,
+ Type *ResultType,
+ const Twine &Name) {
+ Module *M = BB->getParent()->getParent();
+ Type *Types[] = {ResultType};
+ Value *FnGCRelocate =
+ Intrinsic::getDeclaration(M, Intrinsic::experimental_gc_relocate, Types);
+
+ Value *Args[] = {Statepoint,
+ getInt32(BaseOffset),
+ getInt32(DerivedOffset)};
+ return createCallHelper(FnGCRelocate, Args, this, Name);
+}
diff --git a/contrib/llvm/lib/IR/IRPrintingPasses.cpp b/contrib/llvm/lib/IR/IRPrintingPasses.cpp
new file mode 100644
index 0000000..822dbeb
--- /dev/null
+++ b/contrib/llvm/lib/IR/IRPrintingPasses.cpp
@@ -0,0 +1,139 @@
+//===--- IRPrintingPasses.cpp - Module and Function printing passes -------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// PrintModulePass and PrintFunctionPass implementations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/IRPrintingPasses.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+PrintModulePass::PrintModulePass() : OS(dbgs()) {}
+PrintModulePass::PrintModulePass(raw_ostream &OS, const std::string &Banner,
+ bool ShouldPreserveUseListOrder)
+ : OS(OS), Banner(Banner),
+ ShouldPreserveUseListOrder(ShouldPreserveUseListOrder) {}
+
+PreservedAnalyses PrintModulePass::run(Module &M) {
+ OS << Banner;
+ if (llvm::isFunctionInPrintList("*"))
+ M.print(OS, nullptr, ShouldPreserveUseListOrder);
+ else {
+ for(const auto &F : M.functions())
+ if (llvm::isFunctionInPrintList(F.getName()))
+ F.print(OS);
+ }
+ return PreservedAnalyses::all();
+}
+
+PrintFunctionPass::PrintFunctionPass() : OS(dbgs()) {}
+PrintFunctionPass::PrintFunctionPass(raw_ostream &OS, const std::string &Banner)
+ : OS(OS), Banner(Banner) {}
+
+PreservedAnalyses PrintFunctionPass::run(Function &F) {
+ if (isFunctionInPrintList(F.getName()))
+ OS << Banner << static_cast<Value &>(F);
+ return PreservedAnalyses::all();
+}
+
+namespace {
+
+class PrintModulePassWrapper : public ModulePass {
+ PrintModulePass P;
+
+public:
+ static char ID;
+ PrintModulePassWrapper() : ModulePass(ID) {}
+ PrintModulePassWrapper(raw_ostream &OS, const std::string &Banner,
+ bool ShouldPreserveUseListOrder)
+ : ModulePass(ID), P(OS, Banner, ShouldPreserveUseListOrder) {}
+
+ bool runOnModule(Module &M) override {
+ P.run(M);
+ return false;
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesAll();
+ }
+};
+
+class PrintFunctionPassWrapper : public FunctionPass {
+ PrintFunctionPass P;
+
+public:
+ static char ID;
+ PrintFunctionPassWrapper() : FunctionPass(ID) {}
+ PrintFunctionPassWrapper(raw_ostream &OS, const std::string &Banner)
+ : FunctionPass(ID), P(OS, Banner) {}
+
+ // This pass just prints a banner followed by the function as it's processed.
+ bool runOnFunction(Function &F) override {
+ P.run(F);
+ return false;
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesAll();
+ }
+};
+
+class PrintBasicBlockPass : public BasicBlockPass {
+ raw_ostream &Out;
+ std::string Banner;
+
+public:
+ static char ID;
+ PrintBasicBlockPass() : BasicBlockPass(ID), Out(dbgs()) {}
+ PrintBasicBlockPass(raw_ostream &Out, const std::string &Banner)
+ : BasicBlockPass(ID), Out(Out), Banner(Banner) {}
+
+ bool runOnBasicBlock(BasicBlock &BB) override {
+ Out << Banner << BB;
+ return false;
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesAll();
+ }
+};
+
+}
+
+char PrintModulePassWrapper::ID = 0;
+INITIALIZE_PASS(PrintModulePassWrapper, "print-module",
+ "Print module to stderr", false, false)
+char PrintFunctionPassWrapper::ID = 0;
+INITIALIZE_PASS(PrintFunctionPassWrapper, "print-function",
+ "Print function to stderr", false, false)
+char PrintBasicBlockPass::ID = 0;
+INITIALIZE_PASS(PrintBasicBlockPass, "print-bb", "Print BB to stderr", false,
+ false)
+
+ModulePass *llvm::createPrintModulePass(llvm::raw_ostream &OS,
+ const std::string &Banner,
+ bool ShouldPreserveUseListOrder) {
+ return new PrintModulePassWrapper(OS, Banner, ShouldPreserveUseListOrder);
+}
+
+FunctionPass *llvm::createPrintFunctionPass(llvm::raw_ostream &OS,
+ const std::string &Banner) {
+ return new PrintFunctionPassWrapper(OS, Banner);
+}
+
+BasicBlockPass *llvm::createPrintBasicBlockPass(llvm::raw_ostream &OS,
+ const std::string &Banner) {
+ return new PrintBasicBlockPass(OS, Banner);
+}
diff --git a/contrib/llvm/lib/IR/InlineAsm.cpp b/contrib/llvm/lib/IR/InlineAsm.cpp
new file mode 100644
index 0000000..15d3b83
--- /dev/null
+++ b/contrib/llvm/lib/IR/InlineAsm.cpp
@@ -0,0 +1,295 @@
+//===-- InlineAsm.cpp - Implement the InlineAsm class ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the InlineAsm class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/InlineAsm.h"
+#include "ConstantsContext.h"
+#include "LLVMContextImpl.h"
+#include "llvm/IR/DerivedTypes.h"
+#include <algorithm>
+#include <cctype>
+using namespace llvm;
+
+// Implement the first virtual method in this class in this file so the
+// InlineAsm vtable is emitted here.
+InlineAsm::~InlineAsm() {
+}
+
+InlineAsm *InlineAsm::get(FunctionType *FTy, StringRef AsmString,
+ StringRef Constraints, bool hasSideEffects,
+ bool isAlignStack, AsmDialect asmDialect) {
+ InlineAsmKeyType Key(AsmString, Constraints, FTy, hasSideEffects,
+ isAlignStack, asmDialect);
+ LLVMContextImpl *pImpl = FTy->getContext().pImpl;
+ return pImpl->InlineAsms.getOrCreate(PointerType::getUnqual(FTy), Key);
+}
+
+InlineAsm::InlineAsm(FunctionType *FTy, const std::string &asmString,
+ const std::string &constraints, bool hasSideEffects,
+ bool isAlignStack, AsmDialect asmDialect)
+ : Value(PointerType::getUnqual(FTy), Value::InlineAsmVal),
+ AsmString(asmString), Constraints(constraints), FTy(FTy),
+ HasSideEffects(hasSideEffects), IsAlignStack(isAlignStack),
+ Dialect(asmDialect) {
+
+ // Do various checks on the constraint string and type.
+ assert(Verify(getFunctionType(), constraints) &&
+ "Function type not legal for constraints!");
+}
+
+void InlineAsm::destroyConstant() {
+ getType()->getContext().pImpl->InlineAsms.remove(this);
+ delete this;
+}
+
+FunctionType *InlineAsm::getFunctionType() const {
+ return FTy;
+}
+
+///Default constructor.
+InlineAsm::ConstraintInfo::ConstraintInfo() :
+ Type(isInput), isEarlyClobber(false),
+ MatchingInput(-1), isCommutative(false),
+ isIndirect(false), isMultipleAlternative(false),
+ currentAlternativeIndex(0) {
+}
+
+/// Parse - Analyze the specified string (e.g. "==&{eax}") and fill in the
+/// fields in this structure. If the constraint string is not understood,
+/// return true, otherwise return false.
+bool InlineAsm::ConstraintInfo::Parse(StringRef Str,
+ InlineAsm::ConstraintInfoVector &ConstraintsSoFar) {
+ StringRef::iterator I = Str.begin(), E = Str.end();
+ unsigned multipleAlternativeCount = Str.count('|') + 1;
+ unsigned multipleAlternativeIndex = 0;
+ ConstraintCodeVector *pCodes = &Codes;
+
+ // Initialize
+ isMultipleAlternative = multipleAlternativeCount > 1;
+ if (isMultipleAlternative) {
+ multipleAlternatives.resize(multipleAlternativeCount);
+ pCodes = &multipleAlternatives[0].Codes;
+ }
+ Type = isInput;
+ isEarlyClobber = false;
+ MatchingInput = -1;
+ isCommutative = false;
+ isIndirect = false;
+ currentAlternativeIndex = 0;
+
+ // Parse prefixes.
+ if (*I == '~') {
+ Type = isClobber;
+ ++I;
+
+ // '{' must immediately follow '~'.
+ if (I != E && *I != '{')
+ return true;
+ } else if (*I == '=') {
+ ++I;
+ Type = isOutput;
+ }
+
+ if (*I == '*') {
+ isIndirect = true;
+ ++I;
+ }
+
+ if (I == E) return true; // Just a prefix, like "==" or "~".
+
+ // Parse the modifiers.
+ bool DoneWithModifiers = false;
+ while (!DoneWithModifiers) {
+ switch (*I) {
+ default:
+ DoneWithModifiers = true;
+ break;
+ case '&': // Early clobber.
+ if (Type != isOutput || // Cannot early clobber anything but output.
+ isEarlyClobber) // Reject &&&&&&
+ return true;
+ isEarlyClobber = true;
+ break;
+ case '%': // Commutative.
+ if (Type == isClobber || // Cannot commute clobbers.
+ isCommutative) // Reject %%%%%
+ return true;
+ isCommutative = true;
+ break;
+ case '#': // Comment.
+ case '*': // Register preferencing.
+ return true; // Not supported.
+ }
+
+ if (!DoneWithModifiers) {
+ ++I;
+ if (I == E) return true; // Just prefixes and modifiers!
+ }
+ }
+
+ // Parse the various constraints.
+ while (I != E) {
+ if (*I == '{') { // Physical register reference.
+ // Find the end of the register name.
+ StringRef::iterator ConstraintEnd = std::find(I+1, E, '}');
+ if (ConstraintEnd == E) return true; // "{foo"
+ pCodes->push_back(std::string(I, ConstraintEnd+1));
+ I = ConstraintEnd+1;
+ } else if (isdigit(static_cast<unsigned char>(*I))) { // Matching Constraint
+ // Maximal munch numbers.
+ StringRef::iterator NumStart = I;
+ while (I != E && isdigit(static_cast<unsigned char>(*I)))
+ ++I;
+ pCodes->push_back(std::string(NumStart, I));
+ unsigned N = atoi(pCodes->back().c_str());
+ // Check that this is a valid matching constraint!
+ if (N >= ConstraintsSoFar.size() || ConstraintsSoFar[N].Type != isOutput||
+ Type != isInput)
+ return true; // Invalid constraint number.
+
+ // If Operand N already has a matching input, reject this. An output
+ // can't be constrained to the same value as multiple inputs.
+ if (isMultipleAlternative) {
+ if (multipleAlternativeIndex >=
+ ConstraintsSoFar[N].multipleAlternatives.size())
+ return true;
+ InlineAsm::SubConstraintInfo &scInfo =
+ ConstraintsSoFar[N].multipleAlternatives[multipleAlternativeIndex];
+ if (scInfo.MatchingInput != -1)
+ return true;
+ // Note that operand #n has a matching input.
+ scInfo.MatchingInput = ConstraintsSoFar.size();
+ } else {
+ if (ConstraintsSoFar[N].hasMatchingInput() &&
+ (size_t)ConstraintsSoFar[N].MatchingInput !=
+ ConstraintsSoFar.size())
+ return true;
+ // Note that operand #n has a matching input.
+ ConstraintsSoFar[N].MatchingInput = ConstraintsSoFar.size();
+ }
+ } else if (*I == '|') {
+ multipleAlternativeIndex++;
+ pCodes = &multipleAlternatives[multipleAlternativeIndex].Codes;
+ ++I;
+ } else if (*I == '^') {
+ // Multi-letter constraint
+ // FIXME: For now assuming these are 2-character constraints.
+ pCodes->push_back(std::string(I+1, I+3));
+ I += 3;
+ } else {
+ // Single letter constraint.
+ pCodes->push_back(std::string(I, I+1));
+ ++I;
+ }
+ }
+
+ return false;
+}
+
+/// selectAlternative - Point this constraint to the alternative constraint
+/// indicated by the index.
+void InlineAsm::ConstraintInfo::selectAlternative(unsigned index) {
+ if (index < multipleAlternatives.size()) {
+ currentAlternativeIndex = index;
+ InlineAsm::SubConstraintInfo &scInfo =
+ multipleAlternatives[currentAlternativeIndex];
+ MatchingInput = scInfo.MatchingInput;
+ Codes = scInfo.Codes;
+ }
+}
+
+InlineAsm::ConstraintInfoVector
+InlineAsm::ParseConstraints(StringRef Constraints) {
+ ConstraintInfoVector Result;
+
+ // Scan the constraints string.
+ for (StringRef::iterator I = Constraints.begin(),
+ E = Constraints.end(); I != E; ) {
+ ConstraintInfo Info;
+
+ // Find the end of this constraint.
+ StringRef::iterator ConstraintEnd = std::find(I, E, ',');
+
+ if (ConstraintEnd == I || // Empty constraint like ",,"
+ Info.Parse(StringRef(I, ConstraintEnd-I), Result)) {
+ Result.clear(); // Erroneous constraint?
+ break;
+ }
+
+ Result.push_back(Info);
+
+ // ConstraintEnd may be either the next comma or the end of the string. In
+ // the former case, we skip the comma.
+ I = ConstraintEnd;
+ if (I != E) {
+ ++I;
+ if (I == E) {
+ Result.clear();
+ break;
+ } // don't allow "xyz,"
+ }
+ }
+
+ return Result;
+}
+
+/// Verify - Verify that the specified constraint string is reasonable for the
+/// specified function type, and otherwise validate the constraint string.
+bool InlineAsm::Verify(FunctionType *Ty, StringRef ConstStr) {
+ if (Ty->isVarArg()) return false;
+
+ ConstraintInfoVector Constraints = ParseConstraints(ConstStr);
+
+ // Error parsing constraints.
+ if (Constraints.empty() && !ConstStr.empty()) return false;
+
+ unsigned NumOutputs = 0, NumInputs = 0, NumClobbers = 0;
+ unsigned NumIndirect = 0;
+
+ for (unsigned i = 0, e = Constraints.size(); i != e; ++i) {
+ switch (Constraints[i].Type) {
+ case InlineAsm::isOutput:
+ if ((NumInputs-NumIndirect) != 0 || NumClobbers != 0)
+ return false; // outputs before inputs and clobbers.
+ if (!Constraints[i].isIndirect) {
+ ++NumOutputs;
+ break;
+ }
+ ++NumIndirect;
+ // FALLTHROUGH for Indirect Outputs.
+ case InlineAsm::isInput:
+ if (NumClobbers) return false; // inputs before clobbers.
+ ++NumInputs;
+ break;
+ case InlineAsm::isClobber:
+ ++NumClobbers;
+ break;
+ }
+ }
+
+ switch (NumOutputs) {
+ case 0:
+ if (!Ty->getReturnType()->isVoidTy()) return false;
+ break;
+ case 1:
+ if (Ty->getReturnType()->isStructTy()) return false;
+ break;
+ default:
+ StructType *STy = dyn_cast<StructType>(Ty->getReturnType());
+ if (!STy || STy->getNumElements() != NumOutputs)
+ return false;
+ break;
+ }
+
+ if (Ty->getNumParams() != NumInputs) return false;
+ return true;
+}
diff --git a/contrib/llvm/lib/IR/Instruction.cpp b/contrib/llvm/lib/IR/Instruction.cpp
new file mode 100644
index 0000000..4b33d2e
--- /dev/null
+++ b/contrib/llvm/lib/IR/Instruction.cpp
@@ -0,0 +1,586 @@
+//===-- Instruction.cpp - Implement the Instruction class -----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Instruction class for the IR library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/IR/Type.h"
+using namespace llvm;
+
+Instruction::Instruction(Type *ty, unsigned it, Use *Ops, unsigned NumOps,
+ Instruction *InsertBefore)
+ : User(ty, Value::InstructionVal + it, Ops, NumOps), Parent(nullptr) {
+
+ // If requested, insert this instruction into a basic block...
+ if (InsertBefore) {
+ BasicBlock *BB = InsertBefore->getParent();
+ assert(BB && "Instruction to insert before is not in a basic block!");
+ BB->getInstList().insert(InsertBefore->getIterator(), this);
+ }
+}
+
+Instruction::Instruction(Type *ty, unsigned it, Use *Ops, unsigned NumOps,
+ BasicBlock *InsertAtEnd)
+ : User(ty, Value::InstructionVal + it, Ops, NumOps), Parent(nullptr) {
+
+ // append this instruction into the basic block
+ assert(InsertAtEnd && "Basic block to append to may not be NULL!");
+ InsertAtEnd->getInstList().push_back(this);
+}
+
+
+// Out of line virtual method, so the vtable, etc has a home.
+Instruction::~Instruction() {
+ assert(!Parent && "Instruction still linked in the program!");
+ if (hasMetadataHashEntry())
+ clearMetadataHashEntries();
+}
+
+
+void Instruction::setParent(BasicBlock *P) {
+ Parent = P;
+}
+
+const Module *Instruction::getModule() const {
+ return getParent()->getModule();
+}
+
+Module *Instruction::getModule() {
+ return getParent()->getModule();
+}
+
+Function *Instruction::getFunction() { return getParent()->getParent(); }
+
+const Function *Instruction::getFunction() const {
+ return getParent()->getParent();
+}
+
+void Instruction::removeFromParent() {
+ getParent()->getInstList().remove(getIterator());
+}
+
+iplist<Instruction>::iterator Instruction::eraseFromParent() {
+ return getParent()->getInstList().erase(getIterator());
+}
+
+/// Insert an unlinked instruction into a basic block immediately before the
+/// specified instruction.
+void Instruction::insertBefore(Instruction *InsertPos) {
+ InsertPos->getParent()->getInstList().insert(InsertPos->getIterator(), this);
+}
+
+/// Insert an unlinked instruction into a basic block immediately after the
+/// specified instruction.
+void Instruction::insertAfter(Instruction *InsertPos) {
+ InsertPos->getParent()->getInstList().insertAfter(InsertPos->getIterator(),
+ this);
+}
+
+/// Unlink this instruction from its current basic block and insert it into the
+/// basic block that MovePos lives in, right before MovePos.
+void Instruction::moveBefore(Instruction *MovePos) {
+ MovePos->getParent()->getInstList().splice(
+ MovePos->getIterator(), getParent()->getInstList(), getIterator());
+}
+
+/// Set or clear the unsafe-algebra flag on this instruction, which must be an
+/// operator which supports this flag. See LangRef.html for the meaning of this
+/// flag.
+void Instruction::setHasUnsafeAlgebra(bool B) {
+ assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
+ cast<FPMathOperator>(this)->setHasUnsafeAlgebra(B);
+}
+
+/// Set or clear the NoNaNs flag on this instruction, which must be an operator
+/// which supports this flag. See LangRef.html for the meaning of this flag.
+void Instruction::setHasNoNaNs(bool B) {
+ assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
+ cast<FPMathOperator>(this)->setHasNoNaNs(B);
+}
+
+/// Set or clear the no-infs flag on this instruction, which must be an operator
+/// which supports this flag. See LangRef.html for the meaning of this flag.
+void Instruction::setHasNoInfs(bool B) {
+ assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
+ cast<FPMathOperator>(this)->setHasNoInfs(B);
+}
+
+/// Set or clear the no-signed-zeros flag on this instruction, which must be an
+/// operator which supports this flag. See LangRef.html for the meaning of this
+/// flag.
+void Instruction::setHasNoSignedZeros(bool B) {
+ assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
+ cast<FPMathOperator>(this)->setHasNoSignedZeros(B);
+}
+
+/// Set or clear the allow-reciprocal flag on this instruction, which must be an
+/// operator which supports this flag. See LangRef.html for the meaning of this
+/// flag.
+void Instruction::setHasAllowReciprocal(bool B) {
+ assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
+ cast<FPMathOperator>(this)->setHasAllowReciprocal(B);
+}
+
+/// Convenience function for setting all the fast-math flags on this
+/// instruction, which must be an operator which supports these flags. See
+/// LangRef.html for the meaning of these flats.
+void Instruction::setFastMathFlags(FastMathFlags FMF) {
+ assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
+ cast<FPMathOperator>(this)->setFastMathFlags(FMF);
+}
+
+void Instruction::copyFastMathFlags(FastMathFlags FMF) {
+ assert(isa<FPMathOperator>(this) && "copying fast-math flag on invalid op");
+ cast<FPMathOperator>(this)->copyFastMathFlags(FMF);
+}
+
+/// Determine whether the unsafe-algebra flag is set.
+bool Instruction::hasUnsafeAlgebra() const {
+ assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
+ return cast<FPMathOperator>(this)->hasUnsafeAlgebra();
+}
+
+/// Determine whether the no-NaNs flag is set.
+bool Instruction::hasNoNaNs() const {
+ assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
+ return cast<FPMathOperator>(this)->hasNoNaNs();
+}
+
+/// Determine whether the no-infs flag is set.
+bool Instruction::hasNoInfs() const {
+ assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
+ return cast<FPMathOperator>(this)->hasNoInfs();
+}
+
+/// Determine whether the no-signed-zeros flag is set.
+bool Instruction::hasNoSignedZeros() const {
+ assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
+ return cast<FPMathOperator>(this)->hasNoSignedZeros();
+}
+
+/// Determine whether the allow-reciprocal flag is set.
+bool Instruction::hasAllowReciprocal() const {
+ assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
+ return cast<FPMathOperator>(this)->hasAllowReciprocal();
+}
+
+/// Convenience function for getting all the fast-math flags, which must be an
+/// operator which supports these flags. See LangRef.html for the meaning of
+/// these flags.
+FastMathFlags Instruction::getFastMathFlags() const {
+ assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
+ return cast<FPMathOperator>(this)->getFastMathFlags();
+}
+
+/// Copy I's fast-math flags
+void Instruction::copyFastMathFlags(const Instruction *I) {
+ copyFastMathFlags(I->getFastMathFlags());
+}
+
+
+const char *Instruction::getOpcodeName(unsigned OpCode) {
+ switch (OpCode) {
+ // Terminators
+ case Ret: return "ret";
+ case Br: return "br";
+ case Switch: return "switch";
+ case IndirectBr: return "indirectbr";
+ case Invoke: return "invoke";
+ case Resume: return "resume";
+ case Unreachable: return "unreachable";
+ case CleanupRet: return "cleanupret";
+ case CatchRet: return "catchret";
+ case CatchPad: return "catchpad";
+ case CatchSwitch: return "catchswitch";
+
+ // Standard binary operators...
+ case Add: return "add";
+ case FAdd: return "fadd";
+ case Sub: return "sub";
+ case FSub: return "fsub";
+ case Mul: return "mul";
+ case FMul: return "fmul";
+ case UDiv: return "udiv";
+ case SDiv: return "sdiv";
+ case FDiv: return "fdiv";
+ case URem: return "urem";
+ case SRem: return "srem";
+ case FRem: return "frem";
+
+ // Logical operators...
+ case And: return "and";
+ case Or : return "or";
+ case Xor: return "xor";
+
+ // Memory instructions...
+ case Alloca: return "alloca";
+ case Load: return "load";
+ case Store: return "store";
+ case AtomicCmpXchg: return "cmpxchg";
+ case AtomicRMW: return "atomicrmw";
+ case Fence: return "fence";
+ case GetElementPtr: return "getelementptr";
+
+ // Convert instructions...
+ case Trunc: return "trunc";
+ case ZExt: return "zext";
+ case SExt: return "sext";
+ case FPTrunc: return "fptrunc";
+ case FPExt: return "fpext";
+ case FPToUI: return "fptoui";
+ case FPToSI: return "fptosi";
+ case UIToFP: return "uitofp";
+ case SIToFP: return "sitofp";
+ case IntToPtr: return "inttoptr";
+ case PtrToInt: return "ptrtoint";
+ case BitCast: return "bitcast";
+ case AddrSpaceCast: return "addrspacecast";
+
+ // Other instructions...
+ case ICmp: return "icmp";
+ case FCmp: return "fcmp";
+ case PHI: return "phi";
+ case Select: return "select";
+ case Call: return "call";
+ case Shl: return "shl";
+ case LShr: return "lshr";
+ case AShr: return "ashr";
+ case VAArg: return "va_arg";
+ case ExtractElement: return "extractelement";
+ case InsertElement: return "insertelement";
+ case ShuffleVector: return "shufflevector";
+ case ExtractValue: return "extractvalue";
+ case InsertValue: return "insertvalue";
+ case LandingPad: return "landingpad";
+ case CleanupPad: return "cleanuppad";
+
+ default: return "<Invalid operator> ";
+ }
+}
+
+/// Return true if both instructions have the same special state
+/// This must be kept in sync with lib/Transforms/IPO/MergeFunctions.cpp.
+static bool haveSameSpecialState(const Instruction *I1, const Instruction *I2,
+ bool IgnoreAlignment = false) {
+ assert(I1->getOpcode() == I2->getOpcode() &&
+ "Can not compare special state of different instructions");
+
+ if (const LoadInst *LI = dyn_cast<LoadInst>(I1))
+ return LI->isVolatile() == cast<LoadInst>(I2)->isVolatile() &&
+ (LI->getAlignment() == cast<LoadInst>(I2)->getAlignment() ||
+ IgnoreAlignment) &&
+ LI->getOrdering() == cast<LoadInst>(I2)->getOrdering() &&
+ LI->getSynchScope() == cast<LoadInst>(I2)->getSynchScope();
+ if (const StoreInst *SI = dyn_cast<StoreInst>(I1))
+ return SI->isVolatile() == cast<StoreInst>(I2)->isVolatile() &&
+ (SI->getAlignment() == cast<StoreInst>(I2)->getAlignment() ||
+ IgnoreAlignment) &&
+ SI->getOrdering() == cast<StoreInst>(I2)->getOrdering() &&
+ SI->getSynchScope() == cast<StoreInst>(I2)->getSynchScope();
+ if (const CmpInst *CI = dyn_cast<CmpInst>(I1))
+ return CI->getPredicate() == cast<CmpInst>(I2)->getPredicate();
+ if (const CallInst *CI = dyn_cast<CallInst>(I1))
+ return CI->isTailCall() == cast<CallInst>(I2)->isTailCall() &&
+ CI->getCallingConv() == cast<CallInst>(I2)->getCallingConv() &&
+ CI->getAttributes() == cast<CallInst>(I2)->getAttributes() &&
+ CI->hasIdenticalOperandBundleSchema(*cast<CallInst>(I2));
+ if (const InvokeInst *CI = dyn_cast<InvokeInst>(I1))
+ return CI->getCallingConv() == cast<InvokeInst>(I2)->getCallingConv() &&
+ CI->getAttributes() == cast<InvokeInst>(I2)->getAttributes() &&
+ CI->hasIdenticalOperandBundleSchema(*cast<InvokeInst>(I2));
+ if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(I1))
+ return IVI->getIndices() == cast<InsertValueInst>(I2)->getIndices();
+ if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(I1))
+ return EVI->getIndices() == cast<ExtractValueInst>(I2)->getIndices();
+ if (const FenceInst *FI = dyn_cast<FenceInst>(I1))
+ return FI->getOrdering() == cast<FenceInst>(I2)->getOrdering() &&
+ FI->getSynchScope() == cast<FenceInst>(I2)->getSynchScope();
+ if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I1))
+ return CXI->isVolatile() == cast<AtomicCmpXchgInst>(I2)->isVolatile() &&
+ CXI->isWeak() == cast<AtomicCmpXchgInst>(I2)->isWeak() &&
+ CXI->getSuccessOrdering() ==
+ cast<AtomicCmpXchgInst>(I2)->getSuccessOrdering() &&
+ CXI->getFailureOrdering() ==
+ cast<AtomicCmpXchgInst>(I2)->getFailureOrdering() &&
+ CXI->getSynchScope() == cast<AtomicCmpXchgInst>(I2)->getSynchScope();
+ if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I1))
+ return RMWI->getOperation() == cast<AtomicRMWInst>(I2)->getOperation() &&
+ RMWI->isVolatile() == cast<AtomicRMWInst>(I2)->isVolatile() &&
+ RMWI->getOrdering() == cast<AtomicRMWInst>(I2)->getOrdering() &&
+ RMWI->getSynchScope() == cast<AtomicRMWInst>(I2)->getSynchScope();
+
+ return true;
+}
+
+/// isIdenticalTo - Return true if the specified instruction is exactly
+/// identical to the current one. This means that all operands match and any
+/// extra information (e.g. load is volatile) agree.
+bool Instruction::isIdenticalTo(const Instruction *I) const {
+ return isIdenticalToWhenDefined(I) &&
+ SubclassOptionalData == I->SubclassOptionalData;
+}
+
+/// isIdenticalToWhenDefined - This is like isIdenticalTo, except that it
+/// ignores the SubclassOptionalData flags, which specify conditions
+/// under which the instruction's result is undefined.
+bool Instruction::isIdenticalToWhenDefined(const Instruction *I) const {
+ if (getOpcode() != I->getOpcode() ||
+ getNumOperands() != I->getNumOperands() ||
+ getType() != I->getType())
+ return false;
+
+ // If both instructions have no operands, they are identical.
+ if (getNumOperands() == 0 && I->getNumOperands() == 0)
+ return haveSameSpecialState(this, I);
+
+ // We have two instructions of identical opcode and #operands. Check to see
+ // if all operands are the same.
+ if (!std::equal(op_begin(), op_end(), I->op_begin()))
+ return false;
+
+ if (const PHINode *thisPHI = dyn_cast<PHINode>(this)) {
+ const PHINode *otherPHI = cast<PHINode>(I);
+ return std::equal(thisPHI->block_begin(), thisPHI->block_end(),
+ otherPHI->block_begin());
+ }
+
+ return haveSameSpecialState(this, I);
+}
+
+// isSameOperationAs
+// This should be kept in sync with isEquivalentOperation in
+// lib/Transforms/IPO/MergeFunctions.cpp.
+bool Instruction::isSameOperationAs(const Instruction *I,
+ unsigned flags) const {
+ bool IgnoreAlignment = flags & CompareIgnoringAlignment;
+ bool UseScalarTypes = flags & CompareUsingScalarTypes;
+
+ if (getOpcode() != I->getOpcode() ||
+ getNumOperands() != I->getNumOperands() ||
+ (UseScalarTypes ?
+ getType()->getScalarType() != I->getType()->getScalarType() :
+ getType() != I->getType()))
+ return false;
+
+ // We have two instructions of identical opcode and #operands. Check to see
+ // if all operands are the same type
+ for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
+ if (UseScalarTypes ?
+ getOperand(i)->getType()->getScalarType() !=
+ I->getOperand(i)->getType()->getScalarType() :
+ getOperand(i)->getType() != I->getOperand(i)->getType())
+ return false;
+
+ return haveSameSpecialState(this, I, IgnoreAlignment);
+}
+
+/// isUsedOutsideOfBlock - Return true if there are any uses of I outside of the
+/// specified block. Note that PHI nodes are considered to evaluate their
+/// operands in the corresponding predecessor block.
+bool Instruction::isUsedOutsideOfBlock(const BasicBlock *BB) const {
+ for (const Use &U : uses()) {
+ // PHI nodes uses values in the corresponding predecessor block. For other
+ // instructions, just check to see whether the parent of the use matches up.
+ const Instruction *I = cast<Instruction>(U.getUser());
+ const PHINode *PN = dyn_cast<PHINode>(I);
+ if (!PN) {
+ if (I->getParent() != BB)
+ return true;
+ continue;
+ }
+
+ if (PN->getIncomingBlock(U) != BB)
+ return true;
+ }
+ return false;
+}
+
+/// mayReadFromMemory - Return true if this instruction may read memory.
+///
+bool Instruction::mayReadFromMemory() const {
+ switch (getOpcode()) {
+ default: return false;
+ case Instruction::VAArg:
+ case Instruction::Load:
+ case Instruction::Fence: // FIXME: refine definition of mayReadFromMemory
+ case Instruction::AtomicCmpXchg:
+ case Instruction::AtomicRMW:
+ case Instruction::CatchPad:
+ case Instruction::CatchRet:
+ return true;
+ case Instruction::Call:
+ return !cast<CallInst>(this)->doesNotAccessMemory();
+ case Instruction::Invoke:
+ return !cast<InvokeInst>(this)->doesNotAccessMemory();
+ case Instruction::Store:
+ return !cast<StoreInst>(this)->isUnordered();
+ }
+}
+
+/// mayWriteToMemory - Return true if this instruction may modify memory.
+///
+bool Instruction::mayWriteToMemory() const {
+ switch (getOpcode()) {
+ default: return false;
+ case Instruction::Fence: // FIXME: refine definition of mayWriteToMemory
+ case Instruction::Store:
+ case Instruction::VAArg:
+ case Instruction::AtomicCmpXchg:
+ case Instruction::AtomicRMW:
+ case Instruction::CatchPad:
+ case Instruction::CatchRet:
+ return true;
+ case Instruction::Call:
+ return !cast<CallInst>(this)->onlyReadsMemory();
+ case Instruction::Invoke:
+ return !cast<InvokeInst>(this)->onlyReadsMemory();
+ case Instruction::Load:
+ return !cast<LoadInst>(this)->isUnordered();
+ }
+}
+
+bool Instruction::isAtomic() const {
+ switch (getOpcode()) {
+ default:
+ return false;
+ case Instruction::AtomicCmpXchg:
+ case Instruction::AtomicRMW:
+ case Instruction::Fence:
+ return true;
+ case Instruction::Load:
+ return cast<LoadInst>(this)->getOrdering() != NotAtomic;
+ case Instruction::Store:
+ return cast<StoreInst>(this)->getOrdering() != NotAtomic;
+ }
+}
+
+bool Instruction::mayThrow() const {
+ if (const CallInst *CI = dyn_cast<CallInst>(this))
+ return !CI->doesNotThrow();
+ if (const auto *CRI = dyn_cast<CleanupReturnInst>(this))
+ return CRI->unwindsToCaller();
+ if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(this))
+ return CatchSwitch->unwindsToCaller();
+ return isa<ResumeInst>(this);
+}
+
+bool Instruction::mayReturn() const {
+ if (const CallInst *CI = dyn_cast<CallInst>(this))
+ return !CI->doesNotReturn();
+ return true;
+}
+
+/// isAssociative - Return true if the instruction is associative:
+///
+/// Associative operators satisfy: x op (y op z) === (x op y) op z
+///
+/// In LLVM, the Add, Mul, And, Or, and Xor operators are associative.
+///
+bool Instruction::isAssociative(unsigned Opcode) {
+ return Opcode == And || Opcode == Or || Opcode == Xor ||
+ Opcode == Add || Opcode == Mul;
+}
+
+bool Instruction::isAssociative() const {
+ unsigned Opcode = getOpcode();
+ if (isAssociative(Opcode))
+ return true;
+
+ switch (Opcode) {
+ case FMul:
+ case FAdd:
+ return cast<FPMathOperator>(this)->hasUnsafeAlgebra();
+ default:
+ return false;
+ }
+}
+
+/// isCommutative - Return true if the instruction is commutative:
+///
+/// Commutative operators satisfy: (x op y) === (y op x)
+///
+/// In LLVM, these are the associative operators, plus SetEQ and SetNE, when
+/// applied to any type.
+///
+bool Instruction::isCommutative(unsigned op) {
+ switch (op) {
+ case Add:
+ case FAdd:
+ case Mul:
+ case FMul:
+ case And:
+ case Or:
+ case Xor:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/// isIdempotent - Return true if the instruction is idempotent:
+///
+/// Idempotent operators satisfy: x op x === x
+///
+/// In LLVM, the And and Or operators are idempotent.
+///
+bool Instruction::isIdempotent(unsigned Opcode) {
+ return Opcode == And || Opcode == Or;
+}
+
+/// isNilpotent - Return true if the instruction is nilpotent:
+///
+/// Nilpotent operators satisfy: x op x === Id,
+///
+/// where Id is the identity for the operator, i.e. a constant such that
+/// x op Id === x and Id op x === x for all x.
+///
+/// In LLVM, the Xor operator is nilpotent.
+///
+bool Instruction::isNilpotent(unsigned Opcode) {
+ return Opcode == Xor;
+}
+
+Instruction *Instruction::cloneImpl() const {
+ llvm_unreachable("Subclass of Instruction failed to implement cloneImpl");
+}
+
+Instruction *Instruction::clone() const {
+ Instruction *New = nullptr;
+ switch (getOpcode()) {
+ default:
+ llvm_unreachable("Unhandled Opcode.");
+#define HANDLE_INST(num, opc, clas) \
+ case Instruction::opc: \
+ New = cast<clas>(this)->cloneImpl(); \
+ break;
+#include "llvm/IR/Instruction.def"
+#undef HANDLE_INST
+ }
+
+ New->SubclassOptionalData = SubclassOptionalData;
+ if (!hasMetadata())
+ return New;
+
+ // Otherwise, enumerate and copy over metadata from the old instruction to the
+ // new one.
+ SmallVector<std::pair<unsigned, MDNode *>, 4> TheMDs;
+ getAllMetadataOtherThanDebugLoc(TheMDs);
+ for (const auto &MD : TheMDs)
+ New->setMetadata(MD.first, MD.second);
+
+ New->setDebugLoc(getDebugLoc());
+ return New;
+}
diff --git a/contrib/llvm/lib/IR/Instructions.cpp b/contrib/llvm/lib/IR/Instructions.cpp
new file mode 100644
index 0000000..7c64ca7
--- /dev/null
+++ b/contrib/llvm/lib/IR/Instructions.cpp
@@ -0,0 +1,3977 @@
+//===-- Instructions.cpp - Implement the LLVM instructions ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements all of the non-inline methods for the LLVM instruction
+// classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/Instructions.h"
+#include "LLVMContextImpl.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/ConstantRange.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MathExtras.h"
+using namespace llvm;
+
+//===----------------------------------------------------------------------===//
+// CallSite Class
+//===----------------------------------------------------------------------===//
+
+User::op_iterator CallSite::getCallee() const {
+ Instruction *II(getInstruction());
+ return isCall()
+ ? cast<CallInst>(II)->op_end() - 1 // Skip Callee
+ : cast<InvokeInst>(II)->op_end() - 3; // Skip BB, BB, Callee
+}
+
+//===----------------------------------------------------------------------===//
+// TerminatorInst Class
+//===----------------------------------------------------------------------===//
+
+// Out of line virtual method, so the vtable, etc has a home.
+TerminatorInst::~TerminatorInst() {
+}
+
+//===----------------------------------------------------------------------===//
+// UnaryInstruction Class
+//===----------------------------------------------------------------------===//
+
+// Out of line virtual method, so the vtable, etc has a home.
+UnaryInstruction::~UnaryInstruction() {
+}
+
+//===----------------------------------------------------------------------===//
+// SelectInst Class
+//===----------------------------------------------------------------------===//
+
+/// areInvalidOperands - Return a string if the specified operands are invalid
+/// for a select operation, otherwise return null.
+const char *SelectInst::areInvalidOperands(Value *Op0, Value *Op1, Value *Op2) {
+ if (Op1->getType() != Op2->getType())
+ return "both values to select must have same type";
+
+ if (Op1->getType()->isTokenTy())
+ return "select values cannot have token type";
+
+ if (VectorType *VT = dyn_cast<VectorType>(Op0->getType())) {
+ // Vector select.
+ if (VT->getElementType() != Type::getInt1Ty(Op0->getContext()))
+ return "vector select condition element type must be i1";
+ VectorType *ET = dyn_cast<VectorType>(Op1->getType());
+ if (!ET)
+ return "selected values for vector select must be vectors";
+ if (ET->getNumElements() != VT->getNumElements())
+ return "vector select requires selected vectors to have "
+ "the same vector length as select condition";
+ } else if (Op0->getType() != Type::getInt1Ty(Op0->getContext())) {
+ return "select condition must be i1 or <n x i1>";
+ }
+ return nullptr;
+}
+
+
+//===----------------------------------------------------------------------===//
+// PHINode Class
+//===----------------------------------------------------------------------===//
+
+void PHINode::anchor() {}
+
+PHINode::PHINode(const PHINode &PN)
+ : Instruction(PN.getType(), Instruction::PHI, nullptr, PN.getNumOperands()),
+ ReservedSpace(PN.getNumOperands()) {
+ allocHungoffUses(PN.getNumOperands());
+ std::copy(PN.op_begin(), PN.op_end(), op_begin());
+ std::copy(PN.block_begin(), PN.block_end(), block_begin());
+ SubclassOptionalData = PN.SubclassOptionalData;
+}
+
+// removeIncomingValue - Remove an incoming value. This is useful if a
+// predecessor basic block is deleted.
+Value *PHINode::removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty) {
+ Value *Removed = getIncomingValue(Idx);
+
+ // Move everything after this operand down.
+ //
+ // FIXME: we could just swap with the end of the list, then erase. However,
+ // clients might not expect this to happen. The code as it is thrashes the
+ // use/def lists, which is kinda lame.
+ std::copy(op_begin() + Idx + 1, op_end(), op_begin() + Idx);
+ std::copy(block_begin() + Idx + 1, block_end(), block_begin() + Idx);
+
+ // Nuke the last value.
+ Op<-1>().set(nullptr);
+ setNumHungOffUseOperands(getNumOperands() - 1);
+
+ // If the PHI node is dead, because it has zero entries, nuke it now.
+ if (getNumOperands() == 0 && DeletePHIIfEmpty) {
+ // If anyone is using this PHI, make them use a dummy value instead...
+ replaceAllUsesWith(UndefValue::get(getType()));
+ eraseFromParent();
+ }
+ return Removed;
+}
+
+/// growOperands - grow operands - This grows the operand list in response
+/// to a push_back style of operation. This grows the number of ops by 1.5
+/// times.
+///
+void PHINode::growOperands() {
+ unsigned e = getNumOperands();
+ unsigned NumOps = e + e / 2;
+ if (NumOps < 2) NumOps = 2; // 2 op PHI nodes are VERY common.
+
+ ReservedSpace = NumOps;
+ growHungoffUses(ReservedSpace, /* IsPhi */ true);
+}
+
+/// hasConstantValue - If the specified PHI node always merges together the same
+/// value, return the value, otherwise return null.
+Value *PHINode::hasConstantValue() const {
+ // Exploit the fact that phi nodes always have at least one entry.
+ Value *ConstantValue = getIncomingValue(0);
+ for (unsigned i = 1, e = getNumIncomingValues(); i != e; ++i)
+ if (getIncomingValue(i) != ConstantValue && getIncomingValue(i) != this) {
+ if (ConstantValue != this)
+ return nullptr; // Incoming values not all the same.
+ // The case where the first value is this PHI.
+ ConstantValue = getIncomingValue(i);
+ }
+ if (ConstantValue == this)
+ return UndefValue::get(getType());
+ return ConstantValue;
+}
+
+//===----------------------------------------------------------------------===//
+// LandingPadInst Implementation
+//===----------------------------------------------------------------------===//
+
+LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
+ const Twine &NameStr, Instruction *InsertBefore)
+ : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertBefore) {
+ init(NumReservedValues, NameStr);
+}
+
+LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
+ const Twine &NameStr, BasicBlock *InsertAtEnd)
+ : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertAtEnd) {
+ init(NumReservedValues, NameStr);
+}
+
+LandingPadInst::LandingPadInst(const LandingPadInst &LP)
+ : Instruction(LP.getType(), Instruction::LandingPad, nullptr,
+ LP.getNumOperands()),
+ ReservedSpace(LP.getNumOperands()) {
+ allocHungoffUses(LP.getNumOperands());
+ Use *OL = getOperandList();
+ const Use *InOL = LP.getOperandList();
+ for (unsigned I = 0, E = ReservedSpace; I != E; ++I)
+ OL[I] = InOL[I];
+
+ setCleanup(LP.isCleanup());
+}
+
+LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses,
+ const Twine &NameStr,
+ Instruction *InsertBefore) {
+ return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertBefore);
+}
+
+LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses,
+ const Twine &NameStr,
+ BasicBlock *InsertAtEnd) {
+ return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertAtEnd);
+}
+
+void LandingPadInst::init(unsigned NumReservedValues, const Twine &NameStr) {
+ ReservedSpace = NumReservedValues;
+ setNumHungOffUseOperands(0);
+ allocHungoffUses(ReservedSpace);
+ setName(NameStr);
+ setCleanup(false);
+}
+
+/// growOperands - grow operands - This grows the operand list in response to a
+/// push_back style of operation. This grows the number of ops by 2 times.
+void LandingPadInst::growOperands(unsigned Size) {
+ unsigned e = getNumOperands();
+ if (ReservedSpace >= e + Size) return;
+ ReservedSpace = (std::max(e, 1U) + Size / 2) * 2;
+ growHungoffUses(ReservedSpace);
+}
+
+void LandingPadInst::addClause(Constant *Val) {
+ unsigned OpNo = getNumOperands();
+ growOperands(1);
+ assert(OpNo < ReservedSpace && "Growing didn't work!");
+ setNumHungOffUseOperands(getNumOperands() + 1);
+ getOperandList()[OpNo] = Val;
+}
+
+//===----------------------------------------------------------------------===//
+// CallInst Implementation
+//===----------------------------------------------------------------------===//
+
+CallInst::~CallInst() {
+}
+
+void CallInst::init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
+ ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) {
+ this->FTy = FTy;
+ assert(getNumOperands() == Args.size() + CountBundleInputs(Bundles) + 1 &&
+ "NumOperands not set up?");
+ Op<-1>() = Func;
+
+#ifndef NDEBUG
+ assert((Args.size() == FTy->getNumParams() ||
+ (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
+ "Calling a function with bad signature!");
+
+ for (unsigned i = 0; i != Args.size(); ++i)
+ assert((i >= FTy->getNumParams() ||
+ FTy->getParamType(i) == Args[i]->getType()) &&
+ "Calling a function with a bad signature!");
+#endif
+
+ std::copy(Args.begin(), Args.end(), op_begin());
+
+ auto It = populateBundleOperandInfos(Bundles, Args.size());
+ (void)It;
+ assert(It + 1 == op_end() && "Should add up!");
+
+ setName(NameStr);
+}
+
+void CallInst::init(Value *Func, const Twine &NameStr) {
+ FTy =
+ cast<FunctionType>(cast<PointerType>(Func->getType())->getElementType());
+ assert(getNumOperands() == 1 && "NumOperands not set up?");
+ Op<-1>() = Func;
+
+ assert(FTy->getNumParams() == 0 && "Calling a function with bad signature");
+
+ setName(NameStr);
+}
+
+CallInst::CallInst(Value *Func, const Twine &Name,
+ Instruction *InsertBefore)
+ : Instruction(cast<FunctionType>(cast<PointerType>(Func->getType())
+ ->getElementType())->getReturnType(),
+ Instruction::Call,
+ OperandTraits<CallInst>::op_end(this) - 1,
+ 1, InsertBefore) {
+ init(Func, Name);
+}
+
+CallInst::CallInst(Value *Func, const Twine &Name,
+ BasicBlock *InsertAtEnd)
+ : Instruction(cast<FunctionType>(cast<PointerType>(Func->getType())
+ ->getElementType())->getReturnType(),
+ Instruction::Call,
+ OperandTraits<CallInst>::op_end(this) - 1,
+ 1, InsertAtEnd) {
+ init(Func, Name);
+}
+
+CallInst::CallInst(const CallInst &CI)
+ : Instruction(CI.getType(), Instruction::Call,
+ OperandTraits<CallInst>::op_end(this) - CI.getNumOperands(),
+ CI.getNumOperands()),
+ AttributeList(CI.AttributeList), FTy(CI.FTy) {
+ setTailCallKind(CI.getTailCallKind());
+ setCallingConv(CI.getCallingConv());
+
+ std::copy(CI.op_begin(), CI.op_end(), op_begin());
+ std::copy(CI.bundle_op_info_begin(), CI.bundle_op_info_end(),
+ bundle_op_info_begin());
+ SubclassOptionalData = CI.SubclassOptionalData;
+}
+
+CallInst *CallInst::Create(CallInst *CI, ArrayRef<OperandBundleDef> OpB,
+ Instruction *InsertPt) {
+ std::vector<Value *> Args(CI->arg_begin(), CI->arg_end());
+
+ auto *NewCI = CallInst::Create(CI->getCalledValue(), Args, OpB, CI->getName(),
+ InsertPt);
+ NewCI->setTailCallKind(CI->getTailCallKind());
+ NewCI->setCallingConv(CI->getCallingConv());
+ NewCI->SubclassOptionalData = CI->SubclassOptionalData;
+ NewCI->setAttributes(CI->getAttributes());
+ return NewCI;
+}
+
+void CallInst::addAttribute(unsigned i, Attribute::AttrKind attr) {
+ AttributeSet PAL = getAttributes();
+ PAL = PAL.addAttribute(getContext(), i, attr);
+ setAttributes(PAL);
+}
+
+void CallInst::addAttribute(unsigned i, StringRef Kind, StringRef Value) {
+ AttributeSet PAL = getAttributes();
+ PAL = PAL.addAttribute(getContext(), i, Kind, Value);
+ setAttributes(PAL);
+}
+
+void CallInst::removeAttribute(unsigned i, Attribute attr) {
+ AttributeSet PAL = getAttributes();
+ AttrBuilder B(attr);
+ LLVMContext &Context = getContext();
+ PAL = PAL.removeAttributes(Context, i,
+ AttributeSet::get(Context, i, B));
+ setAttributes(PAL);
+}
+
+void CallInst::addDereferenceableAttr(unsigned i, uint64_t Bytes) {
+ AttributeSet PAL = getAttributes();
+ PAL = PAL.addDereferenceableAttr(getContext(), i, Bytes);
+ setAttributes(PAL);
+}
+
+void CallInst::addDereferenceableOrNullAttr(unsigned i, uint64_t Bytes) {
+ AttributeSet PAL = getAttributes();
+ PAL = PAL.addDereferenceableOrNullAttr(getContext(), i, Bytes);
+ setAttributes(PAL);
+}
+
+bool CallInst::paramHasAttr(unsigned i, Attribute::AttrKind A) const {
+ assert(i < (getNumArgOperands() + 1) && "Param index out of bounds!");
+
+ if (AttributeList.hasAttribute(i, A))
+ return true;
+ if (const Function *F = getCalledFunction())
+ return F->getAttributes().hasAttribute(i, A);
+ return false;
+}
+
+bool CallInst::dataOperandHasImpliedAttr(unsigned i,
+ Attribute::AttrKind A) const {
+
+ // There are getNumOperands() - 1 data operands. The last operand is the
+ // callee.
+ assert(i < getNumOperands() && "Data operand index out of bounds!");
+
+ // The attribute A can either be directly specified, if the operand in
+ // question is a call argument; or be indirectly implied by the kind of its
+ // containing operand bundle, if the operand is a bundle operand.
+
+ if (i < (getNumArgOperands() + 1))
+ return paramHasAttr(i, A);
+
+ assert(hasOperandBundles() && i >= (getBundleOperandsStartIndex() + 1) &&
+ "Must be either a call argument or an operand bundle!");
+ return bundleOperandHasAttr(i - 1, A);
+}
+
+/// IsConstantOne - Return true only if val is constant int 1
+static bool IsConstantOne(Value *val) {
+ assert(val && "IsConstantOne does not work with nullptr val");
+ const ConstantInt *CVal = dyn_cast<ConstantInt>(val);
+ return CVal && CVal->isOne();
+}
+
+static Instruction *createMalloc(Instruction *InsertBefore,
+ BasicBlock *InsertAtEnd, Type *IntPtrTy,
+ Type *AllocTy, Value *AllocSize,
+ Value *ArraySize, Function *MallocF,
+ const Twine &Name) {
+ assert(((!InsertBefore && InsertAtEnd) || (InsertBefore && !InsertAtEnd)) &&
+ "createMalloc needs either InsertBefore or InsertAtEnd");
+
+ // malloc(type) becomes:
+ // bitcast (i8* malloc(typeSize)) to type*
+ // malloc(type, arraySize) becomes:
+ // bitcast (i8 *malloc(typeSize*arraySize)) to type*
+ if (!ArraySize)
+ ArraySize = ConstantInt::get(IntPtrTy, 1);
+ else if (ArraySize->getType() != IntPtrTy) {
+ if (InsertBefore)
+ ArraySize = CastInst::CreateIntegerCast(ArraySize, IntPtrTy, false,
+ "", InsertBefore);
+ else
+ ArraySize = CastInst::CreateIntegerCast(ArraySize, IntPtrTy, false,
+ "", InsertAtEnd);
+ }
+
+ if (!IsConstantOne(ArraySize)) {
+ if (IsConstantOne(AllocSize)) {
+ AllocSize = ArraySize; // Operand * 1 = Operand
+ } else if (Constant *CO = dyn_cast<Constant>(ArraySize)) {
+ Constant *Scale = ConstantExpr::getIntegerCast(CO, IntPtrTy,
+ false /*ZExt*/);
+ // Malloc arg is constant product of type size and array size
+ AllocSize = ConstantExpr::getMul(Scale, cast<Constant>(AllocSize));
+ } else {
+ // Multiply type size by the array size...
+ if (InsertBefore)
+ AllocSize = BinaryOperator::CreateMul(ArraySize, AllocSize,
+ "mallocsize", InsertBefore);
+ else
+ AllocSize = BinaryOperator::CreateMul(ArraySize, AllocSize,
+ "mallocsize", InsertAtEnd);
+ }
+ }
+
+ assert(AllocSize->getType() == IntPtrTy && "malloc arg is wrong size");
+ // Create the call to Malloc.
+ BasicBlock* BB = InsertBefore ? InsertBefore->getParent() : InsertAtEnd;
+ Module* M = BB->getParent()->getParent();
+ Type *BPTy = Type::getInt8PtrTy(BB->getContext());
+ Value *MallocFunc = MallocF;
+ if (!MallocFunc)
+ // prototype malloc as "void *malloc(size_t)"
+ MallocFunc = M->getOrInsertFunction("malloc", BPTy, IntPtrTy, nullptr);
+ PointerType *AllocPtrType = PointerType::getUnqual(AllocTy);
+ CallInst *MCall = nullptr;
+ Instruction *Result = nullptr;
+ if (InsertBefore) {
+ MCall = CallInst::Create(MallocFunc, AllocSize, "malloccall", InsertBefore);
+ Result = MCall;
+ if (Result->getType() != AllocPtrType)
+ // Create a cast instruction to convert to the right type...
+ Result = new BitCastInst(MCall, AllocPtrType, Name, InsertBefore);
+ } else {
+ MCall = CallInst::Create(MallocFunc, AllocSize, "malloccall");
+ Result = MCall;
+ if (Result->getType() != AllocPtrType) {
+ InsertAtEnd->getInstList().push_back(MCall);
+ // Create a cast instruction to convert to the right type...
+ Result = new BitCastInst(MCall, AllocPtrType, Name);
+ }
+ }
+ MCall->setTailCall();
+ if (Function *F = dyn_cast<Function>(MallocFunc)) {
+ MCall->setCallingConv(F->getCallingConv());
+ if (!F->doesNotAlias(0)) F->setDoesNotAlias(0);
+ }
+ assert(!MCall->getType()->isVoidTy() && "Malloc has void return type");
+
+ return Result;
+}
+
+/// CreateMalloc - Generate the IR for a call to malloc:
+/// 1. Compute the malloc call's argument as the specified type's size,
+/// possibly multiplied by the array size if the array size is not
+/// constant 1.
+/// 2. Call malloc with that argument.
+/// 3. Bitcast the result of the malloc call to the specified type.
+Instruction *CallInst::CreateMalloc(Instruction *InsertBefore,
+ Type *IntPtrTy, Type *AllocTy,
+ Value *AllocSize, Value *ArraySize,
+ Function * MallocF,
+ const Twine &Name) {
+ return createMalloc(InsertBefore, nullptr, IntPtrTy, AllocTy, AllocSize,
+ ArraySize, MallocF, Name);
+}
+
+/// CreateMalloc - Generate the IR for a call to malloc:
+/// 1. Compute the malloc call's argument as the specified type's size,
+/// possibly multiplied by the array size if the array size is not
+/// constant 1.
+/// 2. Call malloc with that argument.
+/// 3. Bitcast the result of the malloc call to the specified type.
+/// Note: This function does not add the bitcast to the basic block, that is the
+/// responsibility of the caller.
+Instruction *CallInst::CreateMalloc(BasicBlock *InsertAtEnd,
+ Type *IntPtrTy, Type *AllocTy,
+ Value *AllocSize, Value *ArraySize,
+ Function *MallocF, const Twine &Name) {
+ return createMalloc(nullptr, InsertAtEnd, IntPtrTy, AllocTy, AllocSize,
+ ArraySize, MallocF, Name);
+}
+
+static Instruction* createFree(Value* Source, Instruction *InsertBefore,
+ BasicBlock *InsertAtEnd) {
+ assert(((!InsertBefore && InsertAtEnd) || (InsertBefore && !InsertAtEnd)) &&
+ "createFree needs either InsertBefore or InsertAtEnd");
+ assert(Source->getType()->isPointerTy() &&
+ "Can not free something of nonpointer type!");
+
+ BasicBlock* BB = InsertBefore ? InsertBefore->getParent() : InsertAtEnd;
+ Module* M = BB->getParent()->getParent();
+
+ Type *VoidTy = Type::getVoidTy(M->getContext());
+ Type *IntPtrTy = Type::getInt8PtrTy(M->getContext());
+ // prototype free as "void free(void*)"
+ Value *FreeFunc = M->getOrInsertFunction("free", VoidTy, IntPtrTy, nullptr);
+ CallInst* Result = nullptr;
+ Value *PtrCast = Source;
+ if (InsertBefore) {
+ if (Source->getType() != IntPtrTy)
+ PtrCast = new BitCastInst(Source, IntPtrTy, "", InsertBefore);
+ Result = CallInst::Create(FreeFunc, PtrCast, "", InsertBefore);
+ } else {
+ if (Source->getType() != IntPtrTy)
+ PtrCast = new BitCastInst(Source, IntPtrTy, "", InsertAtEnd);
+ Result = CallInst::Create(FreeFunc, PtrCast, "");
+ }
+ Result->setTailCall();
+ if (Function *F = dyn_cast<Function>(FreeFunc))
+ Result->setCallingConv(F->getCallingConv());
+
+ return Result;
+}
+
+/// CreateFree - Generate the IR for a call to the builtin free function.
+Instruction * CallInst::CreateFree(Value* Source, Instruction *InsertBefore) {
+ return createFree(Source, InsertBefore, nullptr);
+}
+
+/// CreateFree - Generate the IR for a call to the builtin free function.
+/// Note: This function does not add the call to the basic block, that is the
+/// responsibility of the caller.
+Instruction* CallInst::CreateFree(Value* Source, BasicBlock *InsertAtEnd) {
+ Instruction* FreeCall = createFree(Source, nullptr, InsertAtEnd);
+ assert(FreeCall && "CreateFree did not create a CallInst");
+ return FreeCall;
+}
+
+//===----------------------------------------------------------------------===//
+// InvokeInst Implementation
+//===----------------------------------------------------------------------===//
+
+void InvokeInst::init(FunctionType *FTy, Value *Fn, BasicBlock *IfNormal,
+ BasicBlock *IfException, ArrayRef<Value *> Args,
+ ArrayRef<OperandBundleDef> Bundles,
+ const Twine &NameStr) {
+ this->FTy = FTy;
+
+ assert(getNumOperands() == 3 + Args.size() + CountBundleInputs(Bundles) &&
+ "NumOperands not set up?");
+ Op<-3>() = Fn;
+ Op<-2>() = IfNormal;
+ Op<-1>() = IfException;
+
+#ifndef NDEBUG
+ assert(((Args.size() == FTy->getNumParams()) ||
+ (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
+ "Invoking a function with bad signature");
+
+ for (unsigned i = 0, e = Args.size(); i != e; i++)
+ assert((i >= FTy->getNumParams() ||
+ FTy->getParamType(i) == Args[i]->getType()) &&
+ "Invoking a function with a bad signature!");
+#endif
+
+ std::copy(Args.begin(), Args.end(), op_begin());
+
+ auto It = populateBundleOperandInfos(Bundles, Args.size());
+ (void)It;
+ assert(It + 3 == op_end() && "Should add up!");
+
+ setName(NameStr);
+}
+
+InvokeInst::InvokeInst(const InvokeInst &II)
+ : TerminatorInst(II.getType(), Instruction::Invoke,
+ OperandTraits<InvokeInst>::op_end(this) -
+ II.getNumOperands(),
+ II.getNumOperands()),
+ AttributeList(II.AttributeList), FTy(II.FTy) {
+ setCallingConv(II.getCallingConv());
+ std::copy(II.op_begin(), II.op_end(), op_begin());
+ std::copy(II.bundle_op_info_begin(), II.bundle_op_info_end(),
+ bundle_op_info_begin());
+ SubclassOptionalData = II.SubclassOptionalData;
+}
+
+InvokeInst *InvokeInst::Create(InvokeInst *II, ArrayRef<OperandBundleDef> OpB,
+ Instruction *InsertPt) {
+ std::vector<Value *> Args(II->arg_begin(), II->arg_end());
+
+ auto *NewII = InvokeInst::Create(II->getCalledValue(), II->getNormalDest(),
+ II->getUnwindDest(), Args, OpB,
+ II->getName(), InsertPt);
+ NewII->setCallingConv(II->getCallingConv());
+ NewII->SubclassOptionalData = II->SubclassOptionalData;
+ NewII->setAttributes(II->getAttributes());
+ return NewII;
+}
+
+BasicBlock *InvokeInst::getSuccessorV(unsigned idx) const {
+ return getSuccessor(idx);
+}
+unsigned InvokeInst::getNumSuccessorsV() const {
+ return getNumSuccessors();
+}
+void InvokeInst::setSuccessorV(unsigned idx, BasicBlock *B) {
+ return setSuccessor(idx, B);
+}
+
+bool InvokeInst::paramHasAttr(unsigned i, Attribute::AttrKind A) const {
+ assert(i < (getNumArgOperands() + 1) && "Param index out of bounds!");
+
+ if (AttributeList.hasAttribute(i, A))
+ return true;
+ if (const Function *F = getCalledFunction())
+ return F->getAttributes().hasAttribute(i, A);
+ return false;
+}
+
+bool InvokeInst::dataOperandHasImpliedAttr(unsigned i,
+ Attribute::AttrKind A) const {
+ // There are getNumOperands() - 3 data operands. The last three operands are
+ // the callee and the two successor basic blocks.
+ assert(i < (getNumOperands() - 2) && "Data operand index out of bounds!");
+
+ // The attribute A can either be directly specified, if the operand in
+ // question is an invoke argument; or be indirectly implied by the kind of its
+ // containing operand bundle, if the operand is a bundle operand.
+
+ if (i < (getNumArgOperands() + 1))
+ return paramHasAttr(i, A);
+
+ assert(hasOperandBundles() && i >= (getBundleOperandsStartIndex() + 1) &&
+ "Must be either an invoke argument or an operand bundle!");
+ return bundleOperandHasAttr(i - 1, A);
+}
+
+void InvokeInst::addAttribute(unsigned i, Attribute::AttrKind attr) {
+ AttributeSet PAL = getAttributes();
+ PAL = PAL.addAttribute(getContext(), i, attr);
+ setAttributes(PAL);
+}
+
+void InvokeInst::removeAttribute(unsigned i, Attribute attr) {
+ AttributeSet PAL = getAttributes();
+ AttrBuilder B(attr);
+ PAL = PAL.removeAttributes(getContext(), i,
+ AttributeSet::get(getContext(), i, B));
+ setAttributes(PAL);
+}
+
+void InvokeInst::addDereferenceableAttr(unsigned i, uint64_t Bytes) {
+ AttributeSet PAL = getAttributes();
+ PAL = PAL.addDereferenceableAttr(getContext(), i, Bytes);
+ setAttributes(PAL);
+}
+
+void InvokeInst::addDereferenceableOrNullAttr(unsigned i, uint64_t Bytes) {
+ AttributeSet PAL = getAttributes();
+ PAL = PAL.addDereferenceableOrNullAttr(getContext(), i, Bytes);
+ setAttributes(PAL);
+}
+
+LandingPadInst *InvokeInst::getLandingPadInst() const {
+ return cast<LandingPadInst>(getUnwindDest()->getFirstNonPHI());
+}
+
+//===----------------------------------------------------------------------===//
+// ReturnInst Implementation
+//===----------------------------------------------------------------------===//
+
+ReturnInst::ReturnInst(const ReturnInst &RI)
+ : TerminatorInst(Type::getVoidTy(RI.getContext()), Instruction::Ret,
+ OperandTraits<ReturnInst>::op_end(this) -
+ RI.getNumOperands(),
+ RI.getNumOperands()) {
+ if (RI.getNumOperands())
+ Op<0>() = RI.Op<0>();
+ SubclassOptionalData = RI.SubclassOptionalData;
+}
+
+ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, Instruction *InsertBefore)
+ : TerminatorInst(Type::getVoidTy(C), Instruction::Ret,
+ OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal,
+ InsertBefore) {
+ if (retVal)
+ Op<0>() = retVal;
+}
+ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd)
+ : TerminatorInst(Type::getVoidTy(C), Instruction::Ret,
+ OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal,
+ InsertAtEnd) {
+ if (retVal)
+ Op<0>() = retVal;
+}
+ReturnInst::ReturnInst(LLVMContext &Context, BasicBlock *InsertAtEnd)
+ : TerminatorInst(Type::getVoidTy(Context), Instruction::Ret,
+ OperandTraits<ReturnInst>::op_end(this), 0, InsertAtEnd) {
+}
+
+unsigned ReturnInst::getNumSuccessorsV() const {
+ return getNumSuccessors();
+}
+
+/// Out-of-line ReturnInst method, put here so the C++ compiler can choose to
+/// emit the vtable for the class in this translation unit.
+void ReturnInst::setSuccessorV(unsigned idx, BasicBlock *NewSucc) {
+ llvm_unreachable("ReturnInst has no successors!");
+}
+
+BasicBlock *ReturnInst::getSuccessorV(unsigned idx) const {
+ llvm_unreachable("ReturnInst has no successors!");
+}
+
+ReturnInst::~ReturnInst() {
+}
+
+//===----------------------------------------------------------------------===//
+// ResumeInst Implementation
+//===----------------------------------------------------------------------===//
+
+ResumeInst::ResumeInst(const ResumeInst &RI)
+ : TerminatorInst(Type::getVoidTy(RI.getContext()), Instruction::Resume,
+ OperandTraits<ResumeInst>::op_begin(this), 1) {
+ Op<0>() = RI.Op<0>();
+}
+
+ResumeInst::ResumeInst(Value *Exn, Instruction *InsertBefore)
+ : TerminatorInst(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
+ OperandTraits<ResumeInst>::op_begin(this), 1, InsertBefore) {
+ Op<0>() = Exn;
+}
+
+ResumeInst::ResumeInst(Value *Exn, BasicBlock *InsertAtEnd)
+ : TerminatorInst(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
+ OperandTraits<ResumeInst>::op_begin(this), 1, InsertAtEnd) {
+ Op<0>() = Exn;
+}
+
+unsigned ResumeInst::getNumSuccessorsV() const {
+ return getNumSuccessors();
+}
+
+void ResumeInst::setSuccessorV(unsigned idx, BasicBlock *NewSucc) {
+ llvm_unreachable("ResumeInst has no successors!");
+}
+
+BasicBlock *ResumeInst::getSuccessorV(unsigned idx) const {
+ llvm_unreachable("ResumeInst has no successors!");
+}
+
+//===----------------------------------------------------------------------===//
+// CleanupReturnInst Implementation
+//===----------------------------------------------------------------------===//
+
+CleanupReturnInst::CleanupReturnInst(const CleanupReturnInst &CRI)
+ : TerminatorInst(CRI.getType(), Instruction::CleanupRet,
+ OperandTraits<CleanupReturnInst>::op_end(this) -
+ CRI.getNumOperands(),
+ CRI.getNumOperands()) {
+ setInstructionSubclassData(CRI.getSubclassDataFromInstruction());
+ Op<0>() = CRI.Op<0>();
+ if (CRI.hasUnwindDest())
+ Op<1>() = CRI.Op<1>();
+}
+
+void CleanupReturnInst::init(Value *CleanupPad, BasicBlock *UnwindBB) {
+ if (UnwindBB)
+ setInstructionSubclassData(getSubclassDataFromInstruction() | 1);
+
+ Op<0>() = CleanupPad;
+ if (UnwindBB)
+ Op<1>() = UnwindBB;
+}
+
+CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
+ unsigned Values, Instruction *InsertBefore)
+ : TerminatorInst(Type::getVoidTy(CleanupPad->getContext()),
+ Instruction::CleanupRet,
+ OperandTraits<CleanupReturnInst>::op_end(this) - Values,
+ Values, InsertBefore) {
+ init(CleanupPad, UnwindBB);
+}
+
+CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
+ unsigned Values, BasicBlock *InsertAtEnd)
+ : TerminatorInst(Type::getVoidTy(CleanupPad->getContext()),
+ Instruction::CleanupRet,
+ OperandTraits<CleanupReturnInst>::op_end(this) - Values,
+ Values, InsertAtEnd) {
+ init(CleanupPad, UnwindBB);
+}
+
+BasicBlock *CleanupReturnInst::getSuccessorV(unsigned Idx) const {
+ assert(Idx == 0);
+ return getUnwindDest();
+}
+unsigned CleanupReturnInst::getNumSuccessorsV() const {
+ return getNumSuccessors();
+}
+void CleanupReturnInst::setSuccessorV(unsigned Idx, BasicBlock *B) {
+ assert(Idx == 0);
+ setUnwindDest(B);
+}
+
+//===----------------------------------------------------------------------===//
+// CatchReturnInst Implementation
+//===----------------------------------------------------------------------===//
+void CatchReturnInst::init(Value *CatchPad, BasicBlock *BB) {
+ Op<0>() = CatchPad;
+ Op<1>() = BB;
+}
+
+CatchReturnInst::CatchReturnInst(const CatchReturnInst &CRI)
+ : TerminatorInst(Type::getVoidTy(CRI.getContext()), Instruction::CatchRet,
+ OperandTraits<CatchReturnInst>::op_begin(this), 2) {
+ Op<0>() = CRI.Op<0>();
+ Op<1>() = CRI.Op<1>();
+}
+
+CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
+ Instruction *InsertBefore)
+ : TerminatorInst(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
+ OperandTraits<CatchReturnInst>::op_begin(this), 2,
+ InsertBefore) {
+ init(CatchPad, BB);
+}
+
+CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
+ BasicBlock *InsertAtEnd)
+ : TerminatorInst(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
+ OperandTraits<CatchReturnInst>::op_begin(this), 2,
+ InsertAtEnd) {
+ init(CatchPad, BB);
+}
+
+BasicBlock *CatchReturnInst::getSuccessorV(unsigned Idx) const {
+ assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!");
+ return getSuccessor();
+}
+unsigned CatchReturnInst::getNumSuccessorsV() const {
+ return getNumSuccessors();
+}
+void CatchReturnInst::setSuccessorV(unsigned Idx, BasicBlock *B) {
+ assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!");
+ setSuccessor(B);
+}
+
+//===----------------------------------------------------------------------===//
+// CatchSwitchInst Implementation
+//===----------------------------------------------------------------------===//
+
+CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
+ unsigned NumReservedValues,
+ const Twine &NameStr,
+ Instruction *InsertBefore)
+ : TerminatorInst(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0,
+ InsertBefore) {
+ if (UnwindDest)
+ ++NumReservedValues;
+ init(ParentPad, UnwindDest, NumReservedValues + 1);
+ setName(NameStr);
+}
+
+CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
+ unsigned NumReservedValues,
+ const Twine &NameStr, BasicBlock *InsertAtEnd)
+ : TerminatorInst(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0,
+ InsertAtEnd) {
+ if (UnwindDest)
+ ++NumReservedValues;
+ init(ParentPad, UnwindDest, NumReservedValues + 1);
+ setName(NameStr);
+}
+
+CatchSwitchInst::CatchSwitchInst(const CatchSwitchInst &CSI)
+ : TerminatorInst(CSI.getType(), Instruction::CatchSwitch, nullptr,
+ CSI.getNumOperands()) {
+ init(CSI.getParentPad(), CSI.getUnwindDest(), CSI.getNumOperands());
+ setNumHungOffUseOperands(ReservedSpace);
+ Use *OL = getOperandList();
+ const Use *InOL = CSI.getOperandList();
+ for (unsigned I = 1, E = ReservedSpace; I != E; ++I)
+ OL[I] = InOL[I];
+}
+
+void CatchSwitchInst::init(Value *ParentPad, BasicBlock *UnwindDest,
+ unsigned NumReservedValues) {
+ assert(ParentPad && NumReservedValues);
+
+ ReservedSpace = NumReservedValues;
+ setNumHungOffUseOperands(UnwindDest ? 2 : 1);
+ allocHungoffUses(ReservedSpace);
+
+ Op<0>() = ParentPad;
+ if (UnwindDest) {
+ setInstructionSubclassData(getSubclassDataFromInstruction() | 1);
+ setUnwindDest(UnwindDest);
+ }
+}
+
+/// growOperands - grow operands - This grows the operand list in response to a
+/// push_back style of operation. This grows the number of ops by 2 times.
+void CatchSwitchInst::growOperands(unsigned Size) {
+ unsigned NumOperands = getNumOperands();
+ assert(NumOperands >= 1);
+ if (ReservedSpace >= NumOperands + Size)
+ return;
+ ReservedSpace = (NumOperands + Size / 2) * 2;
+ growHungoffUses(ReservedSpace);
+}
+
+void CatchSwitchInst::addHandler(BasicBlock *Handler) {
+ unsigned OpNo = getNumOperands();
+ growOperands(1);
+ assert(OpNo < ReservedSpace && "Growing didn't work!");
+ setNumHungOffUseOperands(getNumOperands() + 1);
+ getOperandList()[OpNo] = Handler;
+}
+
+void CatchSwitchInst::removeHandler(handler_iterator HI) {
+ // Move all subsequent handlers up one.
+ Use *EndDst = op_end() - 1;
+ for (Use *CurDst = HI.getCurrent(); CurDst != EndDst; ++CurDst)
+ *CurDst = *(CurDst + 1);
+ // Null out the last handler use.
+ *EndDst = nullptr;
+
+ setNumHungOffUseOperands(getNumOperands() - 1);
+}
+
+BasicBlock *CatchSwitchInst::getSuccessorV(unsigned idx) const {
+ return getSuccessor(idx);
+}
+unsigned CatchSwitchInst::getNumSuccessorsV() const {
+ return getNumSuccessors();
+}
+void CatchSwitchInst::setSuccessorV(unsigned idx, BasicBlock *B) {
+ setSuccessor(idx, B);
+}
+
+//===----------------------------------------------------------------------===//
+// FuncletPadInst Implementation
+//===----------------------------------------------------------------------===//
+void FuncletPadInst::init(Value *ParentPad, ArrayRef<Value *> Args,
+ const Twine &NameStr) {
+ assert(getNumOperands() == 1 + Args.size() && "NumOperands not set up?");
+ std::copy(Args.begin(), Args.end(), op_begin());
+ setParentPad(ParentPad);
+ setName(NameStr);
+}
+
+FuncletPadInst::FuncletPadInst(const FuncletPadInst &FPI)
+ : Instruction(FPI.getType(), FPI.getOpcode(),
+ OperandTraits<FuncletPadInst>::op_end(this) -
+ FPI.getNumOperands(),
+ FPI.getNumOperands()) {
+ std::copy(FPI.op_begin(), FPI.op_end(), op_begin());
+ setParentPad(FPI.getParentPad());
+}
+
+FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
+ ArrayRef<Value *> Args, unsigned Values,
+ const Twine &NameStr, Instruction *InsertBefore)
+ : Instruction(ParentPad->getType(), Op,
+ OperandTraits<FuncletPadInst>::op_end(this) - Values, Values,
+ InsertBefore) {
+ init(ParentPad, Args, NameStr);
+}
+
+FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
+ ArrayRef<Value *> Args, unsigned Values,
+ const Twine &NameStr, BasicBlock *InsertAtEnd)
+ : Instruction(ParentPad->getType(), Op,
+ OperandTraits<FuncletPadInst>::op_end(this) - Values, Values,
+ InsertAtEnd) {
+ init(ParentPad, Args, NameStr);
+}
+
+//===----------------------------------------------------------------------===//
+// UnreachableInst Implementation
+//===----------------------------------------------------------------------===//
+
+UnreachableInst::UnreachableInst(LLVMContext &Context,
+ Instruction *InsertBefore)
+ : TerminatorInst(Type::getVoidTy(Context), Instruction::Unreachable,
+ nullptr, 0, InsertBefore) {
+}
+UnreachableInst::UnreachableInst(LLVMContext &Context, BasicBlock *InsertAtEnd)
+ : TerminatorInst(Type::getVoidTy(Context), Instruction::Unreachable,
+ nullptr, 0, InsertAtEnd) {
+}
+
+unsigned UnreachableInst::getNumSuccessorsV() const {
+ return getNumSuccessors();
+}
+
+void UnreachableInst::setSuccessorV(unsigned idx, BasicBlock *NewSucc) {
+ llvm_unreachable("UnreachableInst has no successors!");
+}
+
+BasicBlock *UnreachableInst::getSuccessorV(unsigned idx) const {
+ llvm_unreachable("UnreachableInst has no successors!");
+}
+
+//===----------------------------------------------------------------------===//
+// BranchInst Implementation
+//===----------------------------------------------------------------------===//
+
+void BranchInst::AssertOK() {
+ if (isConditional())
+ assert(getCondition()->getType()->isIntegerTy(1) &&
+ "May only branch on boolean predicates!");
+}
+
+BranchInst::BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore)
+ : TerminatorInst(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
+ OperandTraits<BranchInst>::op_end(this) - 1,
+ 1, InsertBefore) {
+ assert(IfTrue && "Branch destination may not be null!");
+ Op<-1>() = IfTrue;
+}
+BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
+ Instruction *InsertBefore)
+ : TerminatorInst(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
+ OperandTraits<BranchInst>::op_end(this) - 3,
+ 3, InsertBefore) {
+ Op<-1>() = IfTrue;
+ Op<-2>() = IfFalse;
+ Op<-3>() = Cond;
+#ifndef NDEBUG
+ AssertOK();
+#endif
+}
+
+BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd)
+ : TerminatorInst(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
+ OperandTraits<BranchInst>::op_end(this) - 1,
+ 1, InsertAtEnd) {
+ assert(IfTrue && "Branch destination may not be null!");
+ Op<-1>() = IfTrue;
+}
+
+BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
+ BasicBlock *InsertAtEnd)
+ : TerminatorInst(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
+ OperandTraits<BranchInst>::op_end(this) - 3,
+ 3, InsertAtEnd) {
+ Op<-1>() = IfTrue;
+ Op<-2>() = IfFalse;
+ Op<-3>() = Cond;
+#ifndef NDEBUG
+ AssertOK();
+#endif
+}
+
+
+BranchInst::BranchInst(const BranchInst &BI) :
+ TerminatorInst(Type::getVoidTy(BI.getContext()), Instruction::Br,
+ OperandTraits<BranchInst>::op_end(this) - BI.getNumOperands(),
+ BI.getNumOperands()) {
+ Op<-1>() = BI.Op<-1>();
+ if (BI.getNumOperands() != 1) {
+ assert(BI.getNumOperands() == 3 && "BR can have 1 or 3 operands!");
+ Op<-3>() = BI.Op<-3>();
+ Op<-2>() = BI.Op<-2>();
+ }
+ SubclassOptionalData = BI.SubclassOptionalData;
+}
+
+void BranchInst::swapSuccessors() {
+ assert(isConditional() &&
+ "Cannot swap successors of an unconditional branch");
+ Op<-1>().swap(Op<-2>());
+
+ // Update profile metadata if present and it matches our structural
+ // expectations.
+ MDNode *ProfileData = getMetadata(LLVMContext::MD_prof);
+ if (!ProfileData || ProfileData->getNumOperands() != 3)
+ return;
+
+ // The first operand is the name. Fetch them backwards and build a new one.
+ Metadata *Ops[] = {ProfileData->getOperand(0), ProfileData->getOperand(2),
+ ProfileData->getOperand(1)};
+ setMetadata(LLVMContext::MD_prof,
+ MDNode::get(ProfileData->getContext(), Ops));
+}
+
+BasicBlock *BranchInst::getSuccessorV(unsigned idx) const {
+ return getSuccessor(idx);
+}
+unsigned BranchInst::getNumSuccessorsV() const {
+ return getNumSuccessors();
+}
+void BranchInst::setSuccessorV(unsigned idx, BasicBlock *B) {
+ setSuccessor(idx, B);
+}
+
+
+//===----------------------------------------------------------------------===//
+// AllocaInst Implementation
+//===----------------------------------------------------------------------===//
+
+static Value *getAISize(LLVMContext &Context, Value *Amt) {
+ if (!Amt)
+ Amt = ConstantInt::get(Type::getInt32Ty(Context), 1);
+ else {
+ assert(!isa<BasicBlock>(Amt) &&
+ "Passed basic block into allocation size parameter! Use other ctor");
+ assert(Amt->getType()->isIntegerTy() &&
+ "Allocation array size is not an integer!");
+ }
+ return Amt;
+}
+
+AllocaInst::AllocaInst(Type *Ty, const Twine &Name, Instruction *InsertBefore)
+ : AllocaInst(Ty, /*ArraySize=*/nullptr, Name, InsertBefore) {}
+
+AllocaInst::AllocaInst(Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd)
+ : AllocaInst(Ty, /*ArraySize=*/nullptr, Name, InsertAtEnd) {}
+
+AllocaInst::AllocaInst(Type *Ty, Value *ArraySize, const Twine &Name,
+ Instruction *InsertBefore)
+ : AllocaInst(Ty, ArraySize, /*Align=*/0, Name, InsertBefore) {}
+
+AllocaInst::AllocaInst(Type *Ty, Value *ArraySize, const Twine &Name,
+ BasicBlock *InsertAtEnd)
+ : AllocaInst(Ty, ArraySize, /*Align=*/0, Name, InsertAtEnd) {}
+
+AllocaInst::AllocaInst(Type *Ty, Value *ArraySize, unsigned Align,
+ const Twine &Name, Instruction *InsertBefore)
+ : UnaryInstruction(PointerType::getUnqual(Ty), Alloca,
+ getAISize(Ty->getContext(), ArraySize), InsertBefore),
+ AllocatedType(Ty) {
+ setAlignment(Align);
+ assert(!Ty->isVoidTy() && "Cannot allocate void!");
+ setName(Name);
+}
+
+AllocaInst::AllocaInst(Type *Ty, Value *ArraySize, unsigned Align,
+ const Twine &Name, BasicBlock *InsertAtEnd)
+ : UnaryInstruction(PointerType::getUnqual(Ty), Alloca,
+ getAISize(Ty->getContext(), ArraySize), InsertAtEnd),
+ AllocatedType(Ty) {
+ setAlignment(Align);
+ assert(!Ty->isVoidTy() && "Cannot allocate void!");
+ setName(Name);
+}
+
+// Out of line virtual method, so the vtable, etc has a home.
+AllocaInst::~AllocaInst() {
+}
+
+void AllocaInst::setAlignment(unsigned Align) {
+ assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!");
+ assert(Align <= MaximumAlignment &&
+ "Alignment is greater than MaximumAlignment!");
+ setInstructionSubclassData((getSubclassDataFromInstruction() & ~31) |
+ (Log2_32(Align) + 1));
+ assert(getAlignment() == Align && "Alignment representation error!");
+}
+
+bool AllocaInst::isArrayAllocation() const {
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(0)))
+ return !CI->isOne();
+ return true;
+}
+
+/// isStaticAlloca - Return true if this alloca is in the entry block of the
+/// function and is a constant size. If so, the code generator will fold it
+/// into the prolog/epilog code, so it is basically free.
+bool AllocaInst::isStaticAlloca() const {
+ // Must be constant size.
+ if (!isa<ConstantInt>(getArraySize())) return false;
+
+ // Must be in the entry block.
+ const BasicBlock *Parent = getParent();
+ return Parent == &Parent->getParent()->front() && !isUsedWithInAlloca();
+}
+
+//===----------------------------------------------------------------------===//
+// LoadInst Implementation
+//===----------------------------------------------------------------------===//
+
+void LoadInst::AssertOK() {
+ assert(getOperand(0)->getType()->isPointerTy() &&
+ "Ptr must have pointer type.");
+ assert(!(isAtomic() && getAlignment() == 0) &&
+ "Alignment required for atomic load");
+}
+
+LoadInst::LoadInst(Value *Ptr, const Twine &Name, Instruction *InsertBef)
+ : LoadInst(Ptr, Name, /*isVolatile=*/false, InsertBef) {}
+
+LoadInst::LoadInst(Value *Ptr, const Twine &Name, BasicBlock *InsertAE)
+ : LoadInst(Ptr, Name, /*isVolatile=*/false, InsertAE) {}
+
+LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
+ Instruction *InsertBef)
+ : LoadInst(Ty, Ptr, Name, isVolatile, /*Align=*/0, InsertBef) {}
+
+LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
+ BasicBlock *InsertAE)
+ : LoadInst(Ptr, Name, isVolatile, /*Align=*/0, InsertAE) {}
+
+LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
+ unsigned Align, Instruction *InsertBef)
+ : LoadInst(Ty, Ptr, Name, isVolatile, Align, NotAtomic, CrossThread,
+ InsertBef) {}
+
+LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
+ unsigned Align, BasicBlock *InsertAE)
+ : LoadInst(Ptr, Name, isVolatile, Align, NotAtomic, CrossThread, InsertAE) {
+}
+
+LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
+ unsigned Align, AtomicOrdering Order,
+ SynchronizationScope SynchScope, Instruction *InsertBef)
+ : UnaryInstruction(Ty, Load, Ptr, InsertBef) {
+ assert(Ty == cast<PointerType>(Ptr->getType())->getElementType());
+ setVolatile(isVolatile);
+ setAlignment(Align);
+ setAtomic(Order, SynchScope);
+ AssertOK();
+ setName(Name);
+}
+
+LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
+ unsigned Align, AtomicOrdering Order,
+ SynchronizationScope SynchScope,
+ BasicBlock *InsertAE)
+ : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(),
+ Load, Ptr, InsertAE) {
+ setVolatile(isVolatile);
+ setAlignment(Align);
+ setAtomic(Order, SynchScope);
+ AssertOK();
+ setName(Name);
+}
+
+LoadInst::LoadInst(Value *Ptr, const char *Name, Instruction *InsertBef)
+ : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(),
+ Load, Ptr, InsertBef) {
+ setVolatile(false);
+ setAlignment(0);
+ setAtomic(NotAtomic);
+ AssertOK();
+ if (Name && Name[0]) setName(Name);
+}
+
+LoadInst::LoadInst(Value *Ptr, const char *Name, BasicBlock *InsertAE)
+ : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(),
+ Load, Ptr, InsertAE) {
+ setVolatile(false);
+ setAlignment(0);
+ setAtomic(NotAtomic);
+ AssertOK();
+ if (Name && Name[0]) setName(Name);
+}
+
+LoadInst::LoadInst(Type *Ty, Value *Ptr, const char *Name, bool isVolatile,
+ Instruction *InsertBef)
+ : UnaryInstruction(Ty, Load, Ptr, InsertBef) {
+ assert(Ty == cast<PointerType>(Ptr->getType())->getElementType());
+ setVolatile(isVolatile);
+ setAlignment(0);
+ setAtomic(NotAtomic);
+ AssertOK();
+ if (Name && Name[0]) setName(Name);
+}
+
+LoadInst::LoadInst(Value *Ptr, const char *Name, bool isVolatile,
+ BasicBlock *InsertAE)
+ : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(),
+ Load, Ptr, InsertAE) {
+ setVolatile(isVolatile);
+ setAlignment(0);
+ setAtomic(NotAtomic);
+ AssertOK();
+ if (Name && Name[0]) setName(Name);
+}
+
+void LoadInst::setAlignment(unsigned Align) {
+ assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!");
+ assert(Align <= MaximumAlignment &&
+ "Alignment is greater than MaximumAlignment!");
+ setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) |
+ ((Log2_32(Align)+1)<<1));
+ assert(getAlignment() == Align && "Alignment representation error!");
+}
+
+//===----------------------------------------------------------------------===//
+// StoreInst Implementation
+//===----------------------------------------------------------------------===//
+
+void StoreInst::AssertOK() {
+ assert(getOperand(0) && getOperand(1) && "Both operands must be non-null!");
+ assert(getOperand(1)->getType()->isPointerTy() &&
+ "Ptr must have pointer type!");
+ assert(getOperand(0)->getType() ==
+ cast<PointerType>(getOperand(1)->getType())->getElementType()
+ && "Ptr must be a pointer to Val type!");
+ assert(!(isAtomic() && getAlignment() == 0) &&
+ "Alignment required for atomic store");
+}
+
+StoreInst::StoreInst(Value *val, Value *addr, Instruction *InsertBefore)
+ : StoreInst(val, addr, /*isVolatile=*/false, InsertBefore) {}
+
+StoreInst::StoreInst(Value *val, Value *addr, BasicBlock *InsertAtEnd)
+ : StoreInst(val, addr, /*isVolatile=*/false, InsertAtEnd) {}
+
+StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
+ Instruction *InsertBefore)
+ : StoreInst(val, addr, isVolatile, /*Align=*/0, InsertBefore) {}
+
+StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
+ BasicBlock *InsertAtEnd)
+ : StoreInst(val, addr, isVolatile, /*Align=*/0, InsertAtEnd) {}
+
+StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, unsigned Align,
+ Instruction *InsertBefore)
+ : StoreInst(val, addr, isVolatile, Align, NotAtomic, CrossThread,
+ InsertBefore) {}
+
+StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, unsigned Align,
+ BasicBlock *InsertAtEnd)
+ : StoreInst(val, addr, isVolatile, Align, NotAtomic, CrossThread,
+ InsertAtEnd) {}
+
+StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
+ unsigned Align, AtomicOrdering Order,
+ SynchronizationScope SynchScope,
+ Instruction *InsertBefore)
+ : Instruction(Type::getVoidTy(val->getContext()), Store,
+ OperandTraits<StoreInst>::op_begin(this),
+ OperandTraits<StoreInst>::operands(this),
+ InsertBefore) {
+ Op<0>() = val;
+ Op<1>() = addr;
+ setVolatile(isVolatile);
+ setAlignment(Align);
+ setAtomic(Order, SynchScope);
+ AssertOK();
+}
+
+StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
+ unsigned Align, AtomicOrdering Order,
+ SynchronizationScope SynchScope,
+ BasicBlock *InsertAtEnd)
+ : Instruction(Type::getVoidTy(val->getContext()), Store,
+ OperandTraits<StoreInst>::op_begin(this),
+ OperandTraits<StoreInst>::operands(this),
+ InsertAtEnd) {
+ Op<0>() = val;
+ Op<1>() = addr;
+ setVolatile(isVolatile);
+ setAlignment(Align);
+ setAtomic(Order, SynchScope);
+ AssertOK();
+}
+
+void StoreInst::setAlignment(unsigned Align) {
+ assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!");
+ assert(Align <= MaximumAlignment &&
+ "Alignment is greater than MaximumAlignment!");
+ setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) |
+ ((Log2_32(Align)+1) << 1));
+ assert(getAlignment() == Align && "Alignment representation error!");
+}
+
+//===----------------------------------------------------------------------===//
+// AtomicCmpXchgInst Implementation
+//===----------------------------------------------------------------------===//
+
+void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,
+ AtomicOrdering SuccessOrdering,
+ AtomicOrdering FailureOrdering,
+ SynchronizationScope SynchScope) {
+ Op<0>() = Ptr;
+ Op<1>() = Cmp;
+ Op<2>() = NewVal;
+ setSuccessOrdering(SuccessOrdering);
+ setFailureOrdering(FailureOrdering);
+ setSynchScope(SynchScope);
+
+ assert(getOperand(0) && getOperand(1) && getOperand(2) &&
+ "All operands must be non-null!");
+ assert(getOperand(0)->getType()->isPointerTy() &&
+ "Ptr must have pointer type!");
+ assert(getOperand(1)->getType() ==
+ cast<PointerType>(getOperand(0)->getType())->getElementType()
+ && "Ptr must be a pointer to Cmp type!");
+ assert(getOperand(2)->getType() ==
+ cast<PointerType>(getOperand(0)->getType())->getElementType()
+ && "Ptr must be a pointer to NewVal type!");
+ assert(SuccessOrdering != NotAtomic &&
+ "AtomicCmpXchg instructions must be atomic!");
+ assert(FailureOrdering != NotAtomic &&
+ "AtomicCmpXchg instructions must be atomic!");
+ assert(SuccessOrdering >= FailureOrdering &&
+ "AtomicCmpXchg success ordering must be at least as strong as fail");
+ assert(FailureOrdering != Release && FailureOrdering != AcquireRelease &&
+ "AtomicCmpXchg failure ordering cannot include release semantics");
+}
+
+AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
+ AtomicOrdering SuccessOrdering,
+ AtomicOrdering FailureOrdering,
+ SynchronizationScope SynchScope,
+ Instruction *InsertBefore)
+ : Instruction(
+ StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext()),
+ nullptr),
+ AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),
+ OperandTraits<AtomicCmpXchgInst>::operands(this), InsertBefore) {
+ Init(Ptr, Cmp, NewVal, SuccessOrdering, FailureOrdering, SynchScope);
+}
+
+AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
+ AtomicOrdering SuccessOrdering,
+ AtomicOrdering FailureOrdering,
+ SynchronizationScope SynchScope,
+ BasicBlock *InsertAtEnd)
+ : Instruction(
+ StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext()),
+ nullptr),
+ AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),
+ OperandTraits<AtomicCmpXchgInst>::operands(this), InsertAtEnd) {
+ Init(Ptr, Cmp, NewVal, SuccessOrdering, FailureOrdering, SynchScope);
+}
+
+//===----------------------------------------------------------------------===//
+// AtomicRMWInst Implementation
+//===----------------------------------------------------------------------===//
+
+void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val,
+ AtomicOrdering Ordering,
+ SynchronizationScope SynchScope) {
+ Op<0>() = Ptr;
+ Op<1>() = Val;
+ setOperation(Operation);
+ setOrdering(Ordering);
+ setSynchScope(SynchScope);
+
+ assert(getOperand(0) && getOperand(1) &&
+ "All operands must be non-null!");
+ assert(getOperand(0)->getType()->isPointerTy() &&
+ "Ptr must have pointer type!");
+ assert(getOperand(1)->getType() ==
+ cast<PointerType>(getOperand(0)->getType())->getElementType()
+ && "Ptr must be a pointer to Val type!");
+ assert(Ordering != NotAtomic &&
+ "AtomicRMW instructions must be atomic!");
+}
+
+AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
+ AtomicOrdering Ordering,
+ SynchronizationScope SynchScope,
+ Instruction *InsertBefore)
+ : Instruction(Val->getType(), AtomicRMW,
+ OperandTraits<AtomicRMWInst>::op_begin(this),
+ OperandTraits<AtomicRMWInst>::operands(this),
+ InsertBefore) {
+ Init(Operation, Ptr, Val, Ordering, SynchScope);
+}
+
+AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
+ AtomicOrdering Ordering,
+ SynchronizationScope SynchScope,
+ BasicBlock *InsertAtEnd)
+ : Instruction(Val->getType(), AtomicRMW,
+ OperandTraits<AtomicRMWInst>::op_begin(this),
+ OperandTraits<AtomicRMWInst>::operands(this),
+ InsertAtEnd) {
+ Init(Operation, Ptr, Val, Ordering, SynchScope);
+}
+
+//===----------------------------------------------------------------------===//
+// FenceInst Implementation
+//===----------------------------------------------------------------------===//
+
+FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering,
+ SynchronizationScope SynchScope,
+ Instruction *InsertBefore)
+ : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertBefore) {
+ setOrdering(Ordering);
+ setSynchScope(SynchScope);
+}
+
+FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering,
+ SynchronizationScope SynchScope,
+ BasicBlock *InsertAtEnd)
+ : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertAtEnd) {
+ setOrdering(Ordering);
+ setSynchScope(SynchScope);
+}
+
+//===----------------------------------------------------------------------===//
+// GetElementPtrInst Implementation
+//===----------------------------------------------------------------------===//
+
+void GetElementPtrInst::anchor() {}
+
+void GetElementPtrInst::init(Value *Ptr, ArrayRef<Value *> IdxList,
+ const Twine &Name) {
+ assert(getNumOperands() == 1 + IdxList.size() &&
+ "NumOperands not initialized?");
+ Op<0>() = Ptr;
+ std::copy(IdxList.begin(), IdxList.end(), op_begin() + 1);
+ setName(Name);
+}
+
+GetElementPtrInst::GetElementPtrInst(const GetElementPtrInst &GEPI)
+ : Instruction(GEPI.getType(), GetElementPtr,
+ OperandTraits<GetElementPtrInst>::op_end(this) -
+ GEPI.getNumOperands(),
+ GEPI.getNumOperands()),
+ SourceElementType(GEPI.SourceElementType),
+ ResultElementType(GEPI.ResultElementType) {
+ std::copy(GEPI.op_begin(), GEPI.op_end(), op_begin());
+ SubclassOptionalData = GEPI.SubclassOptionalData;
+}
+
+/// getIndexedType - Returns the type of the element that would be accessed with
+/// a gep instruction with the specified parameters.
+///
+/// The Idxs pointer should point to a continuous piece of memory containing the
+/// indices, either as Value* or uint64_t.
+///
+/// A null type is returned if the indices are invalid for the specified
+/// pointer type.
+///
+template <typename IndexTy>
+static Type *getIndexedTypeInternal(Type *Agg, ArrayRef<IndexTy> IdxList) {
+ // Handle the special case of the empty set index set, which is always valid.
+ if (IdxList.empty())
+ return Agg;
+
+ // If there is at least one index, the top level type must be sized, otherwise
+ // it cannot be 'stepped over'.
+ if (!Agg->isSized())
+ return nullptr;
+
+ unsigned CurIdx = 1;
+ for (; CurIdx != IdxList.size(); ++CurIdx) {
+ CompositeType *CT = dyn_cast<CompositeType>(Agg);
+ if (!CT || CT->isPointerTy()) return nullptr;
+ IndexTy Index = IdxList[CurIdx];
+ if (!CT->indexValid(Index)) return nullptr;
+ Agg = CT->getTypeAtIndex(Index);
+ }
+ return CurIdx == IdxList.size() ? Agg : nullptr;
+}
+
+Type *GetElementPtrInst::getIndexedType(Type *Ty, ArrayRef<Value *> IdxList) {
+ return getIndexedTypeInternal(Ty, IdxList);
+}
+
+Type *GetElementPtrInst::getIndexedType(Type *Ty,
+ ArrayRef<Constant *> IdxList) {
+ return getIndexedTypeInternal(Ty, IdxList);
+}
+
+Type *GetElementPtrInst::getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList) {
+ return getIndexedTypeInternal(Ty, IdxList);
+}
+
+/// hasAllZeroIndices - Return true if all of the indices of this GEP are
+/// zeros. If so, the result pointer and the first operand have the same
+/// value, just potentially different types.
+bool GetElementPtrInst::hasAllZeroIndices() const {
+ for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(i))) {
+ if (!CI->isZero()) return false;
+ } else {
+ return false;
+ }
+ }
+ return true;
+}
+
+/// hasAllConstantIndices - Return true if all of the indices of this GEP are
+/// constant integers. If so, the result pointer and the first operand have
+/// a constant offset between them.
+bool GetElementPtrInst::hasAllConstantIndices() const {
+ for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
+ if (!isa<ConstantInt>(getOperand(i)))
+ return false;
+ }
+ return true;
+}
+
+void GetElementPtrInst::setIsInBounds(bool B) {
+ cast<GEPOperator>(this)->setIsInBounds(B);
+}
+
+bool GetElementPtrInst::isInBounds() const {
+ return cast<GEPOperator>(this)->isInBounds();
+}
+
+bool GetElementPtrInst::accumulateConstantOffset(const DataLayout &DL,
+ APInt &Offset) const {
+ // Delegate to the generic GEPOperator implementation.
+ return cast<GEPOperator>(this)->accumulateConstantOffset(DL, Offset);
+}
+
+//===----------------------------------------------------------------------===//
+// ExtractElementInst Implementation
+//===----------------------------------------------------------------------===//
+
+ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
+ const Twine &Name,
+ Instruction *InsertBef)
+ : Instruction(cast<VectorType>(Val->getType())->getElementType(),
+ ExtractElement,
+ OperandTraits<ExtractElementInst>::op_begin(this),
+ 2, InsertBef) {
+ assert(isValidOperands(Val, Index) &&
+ "Invalid extractelement instruction operands!");
+ Op<0>() = Val;
+ Op<1>() = Index;
+ setName(Name);
+}
+
+ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
+ const Twine &Name,
+ BasicBlock *InsertAE)
+ : Instruction(cast<VectorType>(Val->getType())->getElementType(),
+ ExtractElement,
+ OperandTraits<ExtractElementInst>::op_begin(this),
+ 2, InsertAE) {
+ assert(isValidOperands(Val, Index) &&
+ "Invalid extractelement instruction operands!");
+
+ Op<0>() = Val;
+ Op<1>() = Index;
+ setName(Name);
+}
+
+
+bool ExtractElementInst::isValidOperands(const Value *Val, const Value *Index) {
+ if (!Val->getType()->isVectorTy() || !Index->getType()->isIntegerTy())
+ return false;
+ return true;
+}
+
+
+//===----------------------------------------------------------------------===//
+// InsertElementInst Implementation
+//===----------------------------------------------------------------------===//
+
+InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
+ const Twine &Name,
+ Instruction *InsertBef)
+ : Instruction(Vec->getType(), InsertElement,
+ OperandTraits<InsertElementInst>::op_begin(this),
+ 3, InsertBef) {
+ assert(isValidOperands(Vec, Elt, Index) &&
+ "Invalid insertelement instruction operands!");
+ Op<0>() = Vec;
+ Op<1>() = Elt;
+ Op<2>() = Index;
+ setName(Name);
+}
+
+InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
+ const Twine &Name,
+ BasicBlock *InsertAE)
+ : Instruction(Vec->getType(), InsertElement,
+ OperandTraits<InsertElementInst>::op_begin(this),
+ 3, InsertAE) {
+ assert(isValidOperands(Vec, Elt, Index) &&
+ "Invalid insertelement instruction operands!");
+
+ Op<0>() = Vec;
+ Op<1>() = Elt;
+ Op<2>() = Index;
+ setName(Name);
+}
+
+bool InsertElementInst::isValidOperands(const Value *Vec, const Value *Elt,
+ const Value *Index) {
+ if (!Vec->getType()->isVectorTy())
+ return false; // First operand of insertelement must be vector type.
+
+ if (Elt->getType() != cast<VectorType>(Vec->getType())->getElementType())
+ return false;// Second operand of insertelement must be vector element type.
+
+ if (!Index->getType()->isIntegerTy())
+ return false; // Third operand of insertelement must be i32.
+ return true;
+}
+
+
+//===----------------------------------------------------------------------===//
+// ShuffleVectorInst Implementation
+//===----------------------------------------------------------------------===//
+
+ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
+ const Twine &Name,
+ Instruction *InsertBefore)
+: Instruction(VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
+ cast<VectorType>(Mask->getType())->getNumElements()),
+ ShuffleVector,
+ OperandTraits<ShuffleVectorInst>::op_begin(this),
+ OperandTraits<ShuffleVectorInst>::operands(this),
+ InsertBefore) {
+ assert(isValidOperands(V1, V2, Mask) &&
+ "Invalid shuffle vector instruction operands!");
+ Op<0>() = V1;
+ Op<1>() = V2;
+ Op<2>() = Mask;
+ setName(Name);
+}
+
+ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
+ const Twine &Name,
+ BasicBlock *InsertAtEnd)
+: Instruction(VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
+ cast<VectorType>(Mask->getType())->getNumElements()),
+ ShuffleVector,
+ OperandTraits<ShuffleVectorInst>::op_begin(this),
+ OperandTraits<ShuffleVectorInst>::operands(this),
+ InsertAtEnd) {
+ assert(isValidOperands(V1, V2, Mask) &&
+ "Invalid shuffle vector instruction operands!");
+
+ Op<0>() = V1;
+ Op<1>() = V2;
+ Op<2>() = Mask;
+ setName(Name);
+}
+
+bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2,
+ const Value *Mask) {
+ // V1 and V2 must be vectors of the same type.
+ if (!V1->getType()->isVectorTy() || V1->getType() != V2->getType())
+ return false;
+
+ // Mask must be vector of i32.
+ VectorType *MaskTy = dyn_cast<VectorType>(Mask->getType());
+ if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32))
+ return false;
+
+ // Check to see if Mask is valid.
+ if (isa<UndefValue>(Mask) || isa<ConstantAggregateZero>(Mask))
+ return true;
+
+ if (const ConstantVector *MV = dyn_cast<ConstantVector>(Mask)) {
+ unsigned V1Size = cast<VectorType>(V1->getType())->getNumElements();
+ for (Value *Op : MV->operands()) {
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
+ if (CI->uge(V1Size*2))
+ return false;
+ } else if (!isa<UndefValue>(Op)) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ if (const ConstantDataSequential *CDS =
+ dyn_cast<ConstantDataSequential>(Mask)) {
+ unsigned V1Size = cast<VectorType>(V1->getType())->getNumElements();
+ for (unsigned i = 0, e = MaskTy->getNumElements(); i != e; ++i)
+ if (CDS->getElementAsInteger(i) >= V1Size*2)
+ return false;
+ return true;
+ }
+
+ // The bitcode reader can create a place holder for a forward reference
+ // used as the shuffle mask. When this occurs, the shuffle mask will
+ // fall into this case and fail. To avoid this error, do this bit of
+ // ugliness to allow such a mask pass.
+ if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(Mask))
+ if (CE->getOpcode() == Instruction::UserOp1)
+ return true;
+
+ return false;
+}
+
+/// getMaskValue - Return the index from the shuffle mask for the specified
+/// output result. This is either -1 if the element is undef or a number less
+/// than 2*numelements.
+int ShuffleVectorInst::getMaskValue(Constant *Mask, unsigned i) {
+ assert(i < Mask->getType()->getVectorNumElements() && "Index out of range");
+ if (ConstantDataSequential *CDS =dyn_cast<ConstantDataSequential>(Mask))
+ return CDS->getElementAsInteger(i);
+ Constant *C = Mask->getAggregateElement(i);
+ if (isa<UndefValue>(C))
+ return -1;
+ return cast<ConstantInt>(C)->getZExtValue();
+}
+
+/// getShuffleMask - Return the full mask for this instruction, where each
+/// element is the element number and undef's are returned as -1.
+void ShuffleVectorInst::getShuffleMask(Constant *Mask,
+ SmallVectorImpl<int> &Result) {
+ unsigned NumElts = Mask->getType()->getVectorNumElements();
+
+ if (ConstantDataSequential *CDS=dyn_cast<ConstantDataSequential>(Mask)) {
+ for (unsigned i = 0; i != NumElts; ++i)
+ Result.push_back(CDS->getElementAsInteger(i));
+ return;
+ }
+ for (unsigned i = 0; i != NumElts; ++i) {
+ Constant *C = Mask->getAggregateElement(i);
+ Result.push_back(isa<UndefValue>(C) ? -1 :
+ cast<ConstantInt>(C)->getZExtValue());
+ }
+}
+
+
+//===----------------------------------------------------------------------===//
+// InsertValueInst Class
+//===----------------------------------------------------------------------===//
+
+void InsertValueInst::init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
+ const Twine &Name) {
+ assert(getNumOperands() == 2 && "NumOperands not initialized?");
+
+ // There's no fundamental reason why we require at least one index
+ // (other than weirdness with &*IdxBegin being invalid; see
+ // getelementptr's init routine for example). But there's no
+ // present need to support it.
+ assert(Idxs.size() > 0 && "InsertValueInst must have at least one index");
+
+ assert(ExtractValueInst::getIndexedType(Agg->getType(), Idxs) ==
+ Val->getType() && "Inserted value must match indexed type!");
+ Op<0>() = Agg;
+ Op<1>() = Val;
+
+ Indices.append(Idxs.begin(), Idxs.end());
+ setName(Name);
+}
+
+InsertValueInst::InsertValueInst(const InsertValueInst &IVI)
+ : Instruction(IVI.getType(), InsertValue,
+ OperandTraits<InsertValueInst>::op_begin(this), 2),
+ Indices(IVI.Indices) {
+ Op<0>() = IVI.getOperand(0);
+ Op<1>() = IVI.getOperand(1);
+ SubclassOptionalData = IVI.SubclassOptionalData;
+}
+
+//===----------------------------------------------------------------------===//
+// ExtractValueInst Class
+//===----------------------------------------------------------------------===//
+
+void ExtractValueInst::init(ArrayRef<unsigned> Idxs, const Twine &Name) {
+ assert(getNumOperands() == 1 && "NumOperands not initialized?");
+
+ // There's no fundamental reason why we require at least one index.
+ // But there's no present need to support it.
+ assert(Idxs.size() > 0 && "ExtractValueInst must have at least one index");
+
+ Indices.append(Idxs.begin(), Idxs.end());
+ setName(Name);
+}
+
+ExtractValueInst::ExtractValueInst(const ExtractValueInst &EVI)
+ : UnaryInstruction(EVI.getType(), ExtractValue, EVI.getOperand(0)),
+ Indices(EVI.Indices) {
+ SubclassOptionalData = EVI.SubclassOptionalData;
+}
+
+// getIndexedType - Returns the type of the element that would be extracted
+// with an extractvalue instruction with the specified parameters.
+//
+// A null type is returned if the indices are invalid for the specified
+// pointer type.
+//
+Type *ExtractValueInst::getIndexedType(Type *Agg,
+ ArrayRef<unsigned> Idxs) {
+ for (unsigned Index : Idxs) {
+ // We can't use CompositeType::indexValid(Index) here.
+ // indexValid() always returns true for arrays because getelementptr allows
+ // out-of-bounds indices. Since we don't allow those for extractvalue and
+ // insertvalue we need to check array indexing manually.
+ // Since the only other types we can index into are struct types it's just
+ // as easy to check those manually as well.
+ if (ArrayType *AT = dyn_cast<ArrayType>(Agg)) {
+ if (Index >= AT->getNumElements())
+ return nullptr;
+ } else if (StructType *ST = dyn_cast<StructType>(Agg)) {
+ if (Index >= ST->getNumElements())
+ return nullptr;
+ } else {
+ // Not a valid type to index into.
+ return nullptr;
+ }
+
+ Agg = cast<CompositeType>(Agg)->getTypeAtIndex(Index);
+ }
+ return const_cast<Type*>(Agg);
+}
+
+//===----------------------------------------------------------------------===//
+// BinaryOperator Class
+//===----------------------------------------------------------------------===//
+
+BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2,
+ Type *Ty, const Twine &Name,
+ Instruction *InsertBefore)
+ : Instruction(Ty, iType,
+ OperandTraits<BinaryOperator>::op_begin(this),
+ OperandTraits<BinaryOperator>::operands(this),
+ InsertBefore) {
+ Op<0>() = S1;
+ Op<1>() = S2;
+ init(iType);
+ setName(Name);
+}
+
+BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2,
+ Type *Ty, const Twine &Name,
+ BasicBlock *InsertAtEnd)
+ : Instruction(Ty, iType,
+ OperandTraits<BinaryOperator>::op_begin(this),
+ OperandTraits<BinaryOperator>::operands(this),
+ InsertAtEnd) {
+ Op<0>() = S1;
+ Op<1>() = S2;
+ init(iType);
+ setName(Name);
+}
+
+
+void BinaryOperator::init(BinaryOps iType) {
+ Value *LHS = getOperand(0), *RHS = getOperand(1);
+ (void)LHS; (void)RHS; // Silence warnings.
+ assert(LHS->getType() == RHS->getType() &&
+ "Binary operator operand types must match!");
+#ifndef NDEBUG
+ switch (iType) {
+ case Add: case Sub:
+ case Mul:
+ assert(getType() == LHS->getType() &&
+ "Arithmetic operation should return same type as operands!");
+ assert(getType()->isIntOrIntVectorTy() &&
+ "Tried to create an integer operation on a non-integer type!");
+ break;
+ case FAdd: case FSub:
+ case FMul:
+ assert(getType() == LHS->getType() &&
+ "Arithmetic operation should return same type as operands!");
+ assert(getType()->isFPOrFPVectorTy() &&
+ "Tried to create a floating-point operation on a "
+ "non-floating-point type!");
+ break;
+ case UDiv:
+ case SDiv:
+ assert(getType() == LHS->getType() &&
+ "Arithmetic operation should return same type as operands!");
+ assert((getType()->isIntegerTy() || (getType()->isVectorTy() &&
+ cast<VectorType>(getType())->getElementType()->isIntegerTy())) &&
+ "Incorrect operand type (not integer) for S/UDIV");
+ break;
+ case FDiv:
+ assert(getType() == LHS->getType() &&
+ "Arithmetic operation should return same type as operands!");
+ assert(getType()->isFPOrFPVectorTy() &&
+ "Incorrect operand type (not floating point) for FDIV");
+ break;
+ case URem:
+ case SRem:
+ assert(getType() == LHS->getType() &&
+ "Arithmetic operation should return same type as operands!");
+ assert((getType()->isIntegerTy() || (getType()->isVectorTy() &&
+ cast<VectorType>(getType())->getElementType()->isIntegerTy())) &&
+ "Incorrect operand type (not integer) for S/UREM");
+ break;
+ case FRem:
+ assert(getType() == LHS->getType() &&
+ "Arithmetic operation should return same type as operands!");
+ assert(getType()->isFPOrFPVectorTy() &&
+ "Incorrect operand type (not floating point) for FREM");
+ break;
+ case Shl:
+ case LShr:
+ case AShr:
+ assert(getType() == LHS->getType() &&
+ "Shift operation should return same type as operands!");
+ assert((getType()->isIntegerTy() ||
+ (getType()->isVectorTy() &&
+ cast<VectorType>(getType())->getElementType()->isIntegerTy())) &&
+ "Tried to create a shift operation on a non-integral type!");
+ break;
+ case And: case Or:
+ case Xor:
+ assert(getType() == LHS->getType() &&
+ "Logical operation should return same type as operands!");
+ assert((getType()->isIntegerTy() ||
+ (getType()->isVectorTy() &&
+ cast<VectorType>(getType())->getElementType()->isIntegerTy())) &&
+ "Tried to create a logical operation on a non-integral type!");
+ break;
+ default:
+ break;
+ }
+#endif
+}
+
+BinaryOperator *BinaryOperator::Create(BinaryOps Op, Value *S1, Value *S2,
+ const Twine &Name,
+ Instruction *InsertBefore) {
+ assert(S1->getType() == S2->getType() &&
+ "Cannot create binary operator with two operands of differing type!");
+ return new BinaryOperator(Op, S1, S2, S1->getType(), Name, InsertBefore);
+}
+
+BinaryOperator *BinaryOperator::Create(BinaryOps Op, Value *S1, Value *S2,
+ const Twine &Name,
+ BasicBlock *InsertAtEnd) {
+ BinaryOperator *Res = Create(Op, S1, S2, Name);
+ InsertAtEnd->getInstList().push_back(Res);
+ return Res;
+}
+
+BinaryOperator *BinaryOperator::CreateNeg(Value *Op, const Twine &Name,
+ Instruction *InsertBefore) {
+ Value *zero = ConstantFP::getZeroValueForNegation(Op->getType());
+ return new BinaryOperator(Instruction::Sub,
+ zero, Op,
+ Op->getType(), Name, InsertBefore);
+}
+
+BinaryOperator *BinaryOperator::CreateNeg(Value *Op, const Twine &Name,
+ BasicBlock *InsertAtEnd) {
+ Value *zero = ConstantFP::getZeroValueForNegation(Op->getType());
+ return new BinaryOperator(Instruction::Sub,
+ zero, Op,
+ Op->getType(), Name, InsertAtEnd);
+}
+
+BinaryOperator *BinaryOperator::CreateNSWNeg(Value *Op, const Twine &Name,
+ Instruction *InsertBefore) {
+ Value *zero = ConstantFP::getZeroValueForNegation(Op->getType());
+ return BinaryOperator::CreateNSWSub(zero, Op, Name, InsertBefore);
+}
+
+BinaryOperator *BinaryOperator::CreateNSWNeg(Value *Op, const Twine &Name,
+ BasicBlock *InsertAtEnd) {
+ Value *zero = ConstantFP::getZeroValueForNegation(Op->getType());
+ return BinaryOperator::CreateNSWSub(zero, Op, Name, InsertAtEnd);
+}
+
+BinaryOperator *BinaryOperator::CreateNUWNeg(Value *Op, const Twine &Name,
+ Instruction *InsertBefore) {
+ Value *zero = ConstantFP::getZeroValueForNegation(Op->getType());
+ return BinaryOperator::CreateNUWSub(zero, Op, Name, InsertBefore);
+}
+
+BinaryOperator *BinaryOperator::CreateNUWNeg(Value *Op, const Twine &Name,
+ BasicBlock *InsertAtEnd) {
+ Value *zero = ConstantFP::getZeroValueForNegation(Op->getType());
+ return BinaryOperator::CreateNUWSub(zero, Op, Name, InsertAtEnd);
+}
+
+BinaryOperator *BinaryOperator::CreateFNeg(Value *Op, const Twine &Name,
+ Instruction *InsertBefore) {
+ Value *zero = ConstantFP::getZeroValueForNegation(Op->getType());
+ return new BinaryOperator(Instruction::FSub, zero, Op,
+ Op->getType(), Name, InsertBefore);
+}
+
+BinaryOperator *BinaryOperator::CreateFNeg(Value *Op, const Twine &Name,
+ BasicBlock *InsertAtEnd) {
+ Value *zero = ConstantFP::getZeroValueForNegation(Op->getType());
+ return new BinaryOperator(Instruction::FSub, zero, Op,
+ Op->getType(), Name, InsertAtEnd);
+}
+
+BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name,
+ Instruction *InsertBefore) {
+ Constant *C = Constant::getAllOnesValue(Op->getType());
+ return new BinaryOperator(Instruction::Xor, Op, C,
+ Op->getType(), Name, InsertBefore);
+}
+
+BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name,
+ BasicBlock *InsertAtEnd) {
+ Constant *AllOnes = Constant::getAllOnesValue(Op->getType());
+ return new BinaryOperator(Instruction::Xor, Op, AllOnes,
+ Op->getType(), Name, InsertAtEnd);
+}
+
+
+// isConstantAllOnes - Helper function for several functions below
+static inline bool isConstantAllOnes(const Value *V) {
+ if (const Constant *C = dyn_cast<Constant>(V))
+ return C->isAllOnesValue();
+ return false;
+}
+
+bool BinaryOperator::isNeg(const Value *V) {
+ if (const BinaryOperator *Bop = dyn_cast<BinaryOperator>(V))
+ if (Bop->getOpcode() == Instruction::Sub)
+ if (Constant* C = dyn_cast<Constant>(Bop->getOperand(0)))
+ return C->isNegativeZeroValue();
+ return false;
+}
+
+bool BinaryOperator::isFNeg(const Value *V, bool IgnoreZeroSign) {
+ if (const BinaryOperator *Bop = dyn_cast<BinaryOperator>(V))
+ if (Bop->getOpcode() == Instruction::FSub)
+ if (Constant* C = dyn_cast<Constant>(Bop->getOperand(0))) {
+ if (!IgnoreZeroSign)
+ IgnoreZeroSign = cast<Instruction>(V)->hasNoSignedZeros();
+ return !IgnoreZeroSign ? C->isNegativeZeroValue() : C->isZeroValue();
+ }
+ return false;
+}
+
+bool BinaryOperator::isNot(const Value *V) {
+ if (const BinaryOperator *Bop = dyn_cast<BinaryOperator>(V))
+ return (Bop->getOpcode() == Instruction::Xor &&
+ (isConstantAllOnes(Bop->getOperand(1)) ||
+ isConstantAllOnes(Bop->getOperand(0))));
+ return false;
+}
+
+Value *BinaryOperator::getNegArgument(Value *BinOp) {
+ return cast<BinaryOperator>(BinOp)->getOperand(1);
+}
+
+const Value *BinaryOperator::getNegArgument(const Value *BinOp) {
+ return getNegArgument(const_cast<Value*>(BinOp));
+}
+
+Value *BinaryOperator::getFNegArgument(Value *BinOp) {
+ return cast<BinaryOperator>(BinOp)->getOperand(1);
+}
+
+const Value *BinaryOperator::getFNegArgument(const Value *BinOp) {
+ return getFNegArgument(const_cast<Value*>(BinOp));
+}
+
+Value *BinaryOperator::getNotArgument(Value *BinOp) {
+ assert(isNot(BinOp) && "getNotArgument on non-'not' instruction!");
+ BinaryOperator *BO = cast<BinaryOperator>(BinOp);
+ Value *Op0 = BO->getOperand(0);
+ Value *Op1 = BO->getOperand(1);
+ if (isConstantAllOnes(Op0)) return Op1;
+
+ assert(isConstantAllOnes(Op1));
+ return Op0;
+}
+
+const Value *BinaryOperator::getNotArgument(const Value *BinOp) {
+ return getNotArgument(const_cast<Value*>(BinOp));
+}
+
+
+// swapOperands - Exchange the two operands to this instruction. This
+// instruction is safe to use on any binary instruction and does not
+// modify the semantics of the instruction. If the instruction is
+// order dependent (SetLT f.e.) the opcode is changed.
+//
+bool BinaryOperator::swapOperands() {
+ if (!isCommutative())
+ return true; // Can't commute operands
+ Op<0>().swap(Op<1>());
+ return false;
+}
+
+void BinaryOperator::setHasNoUnsignedWrap(bool b) {
+ cast<OverflowingBinaryOperator>(this)->setHasNoUnsignedWrap(b);
+}
+
+void BinaryOperator::setHasNoSignedWrap(bool b) {
+ cast<OverflowingBinaryOperator>(this)->setHasNoSignedWrap(b);
+}
+
+void BinaryOperator::setIsExact(bool b) {
+ cast<PossiblyExactOperator>(this)->setIsExact(b);
+}
+
+bool BinaryOperator::hasNoUnsignedWrap() const {
+ return cast<OverflowingBinaryOperator>(this)->hasNoUnsignedWrap();
+}
+
+bool BinaryOperator::hasNoSignedWrap() const {
+ return cast<OverflowingBinaryOperator>(this)->hasNoSignedWrap();
+}
+
+bool BinaryOperator::isExact() const {
+ return cast<PossiblyExactOperator>(this)->isExact();
+}
+
+void BinaryOperator::copyIRFlags(const Value *V) {
+ // Copy the wrapping flags.
+ if (auto *OB = dyn_cast<OverflowingBinaryOperator>(V)) {
+ setHasNoSignedWrap(OB->hasNoSignedWrap());
+ setHasNoUnsignedWrap(OB->hasNoUnsignedWrap());
+ }
+
+ // Copy the exact flag.
+ if (auto *PE = dyn_cast<PossiblyExactOperator>(V))
+ setIsExact(PE->isExact());
+
+ // Copy the fast-math flags.
+ if (auto *FP = dyn_cast<FPMathOperator>(V))
+ copyFastMathFlags(FP->getFastMathFlags());
+}
+
+void BinaryOperator::andIRFlags(const Value *V) {
+ if (auto *OB = dyn_cast<OverflowingBinaryOperator>(V)) {
+ setHasNoSignedWrap(hasNoSignedWrap() & OB->hasNoSignedWrap());
+ setHasNoUnsignedWrap(hasNoUnsignedWrap() & OB->hasNoUnsignedWrap());
+ }
+
+ if (auto *PE = dyn_cast<PossiblyExactOperator>(V))
+ setIsExact(isExact() & PE->isExact());
+
+ if (auto *FP = dyn_cast<FPMathOperator>(V)) {
+ FastMathFlags FM = getFastMathFlags();
+ FM &= FP->getFastMathFlags();
+ copyFastMathFlags(FM);
+ }
+}
+
+
+//===----------------------------------------------------------------------===//
+// FPMathOperator Class
+//===----------------------------------------------------------------------===//
+
+/// getFPAccuracy - Get the maximum error permitted by this operation in ULPs.
+/// An accuracy of 0.0 means that the operation should be performed with the
+/// default precision.
+float FPMathOperator::getFPAccuracy() const {
+ const MDNode *MD =
+ cast<Instruction>(this)->getMetadata(LLVMContext::MD_fpmath);
+ if (!MD)
+ return 0.0;
+ ConstantFP *Accuracy = mdconst::extract<ConstantFP>(MD->getOperand(0));
+ return Accuracy->getValueAPF().convertToFloat();
+}
+
+
+//===----------------------------------------------------------------------===//
+// CastInst Class
+//===----------------------------------------------------------------------===//
+
+void CastInst::anchor() {}
+
+// Just determine if this cast only deals with integral->integral conversion.
+bool CastInst::isIntegerCast() const {
+ switch (getOpcode()) {
+ default: return false;
+ case Instruction::ZExt:
+ case Instruction::SExt:
+ case Instruction::Trunc:
+ return true;
+ case Instruction::BitCast:
+ return getOperand(0)->getType()->isIntegerTy() &&
+ getType()->isIntegerTy();
+ }
+}
+
+bool CastInst::isLosslessCast() const {
+ // Only BitCast can be lossless, exit fast if we're not BitCast
+ if (getOpcode() != Instruction::BitCast)
+ return false;
+
+ // Identity cast is always lossless
+ Type* SrcTy = getOperand(0)->getType();
+ Type* DstTy = getType();
+ if (SrcTy == DstTy)
+ return true;
+
+ // Pointer to pointer is always lossless.
+ if (SrcTy->isPointerTy())
+ return DstTy->isPointerTy();
+ return false; // Other types have no identity values
+}
+
+/// This function determines if the CastInst does not require any bits to be
+/// changed in order to effect the cast. Essentially, it identifies cases where
+/// no code gen is necessary for the cast, hence the name no-op cast. For
+/// example, the following are all no-op casts:
+/// # bitcast i32* %x to i8*
+/// # bitcast <2 x i32> %x to <4 x i16>
+/// # ptrtoint i32* %x to i32 ; on 32-bit plaforms only
+/// @brief Determine if the described cast is a no-op.
+bool CastInst::isNoopCast(Instruction::CastOps Opcode,
+ Type *SrcTy,
+ Type *DestTy,
+ Type *IntPtrTy) {
+ switch (Opcode) {
+ default: llvm_unreachable("Invalid CastOp");
+ case Instruction::Trunc:
+ case Instruction::ZExt:
+ case Instruction::SExt:
+ case Instruction::FPTrunc:
+ case Instruction::FPExt:
+ case Instruction::UIToFP:
+ case Instruction::SIToFP:
+ case Instruction::FPToUI:
+ case Instruction::FPToSI:
+ case Instruction::AddrSpaceCast:
+ // TODO: Target informations may give a more accurate answer here.
+ return false;
+ case Instruction::BitCast:
+ return true; // BitCast never modifies bits.
+ case Instruction::PtrToInt:
+ return IntPtrTy->getScalarSizeInBits() ==
+ DestTy->getScalarSizeInBits();
+ case Instruction::IntToPtr:
+ return IntPtrTy->getScalarSizeInBits() ==
+ SrcTy->getScalarSizeInBits();
+ }
+}
+
+/// @brief Determine if a cast is a no-op.
+bool CastInst::isNoopCast(Type *IntPtrTy) const {
+ return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), IntPtrTy);
+}
+
+bool CastInst::isNoopCast(const DataLayout &DL) const {
+ Type *PtrOpTy = nullptr;
+ if (getOpcode() == Instruction::PtrToInt)
+ PtrOpTy = getOperand(0)->getType();
+ else if (getOpcode() == Instruction::IntToPtr)
+ PtrOpTy = getType();
+
+ Type *IntPtrTy =
+ PtrOpTy ? DL.getIntPtrType(PtrOpTy) : DL.getIntPtrType(getContext(), 0);
+
+ return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), IntPtrTy);
+}
+
+/// This function determines if a pair of casts can be eliminated and what
+/// opcode should be used in the elimination. This assumes that there are two
+/// instructions like this:
+/// * %F = firstOpcode SrcTy %x to MidTy
+/// * %S = secondOpcode MidTy %F to DstTy
+/// The function returns a resultOpcode so these two casts can be replaced with:
+/// * %Replacement = resultOpcode %SrcTy %x to DstTy
+/// If no such cast is permitted, the function returns 0.
+unsigned CastInst::isEliminableCastPair(
+ Instruction::CastOps firstOp, Instruction::CastOps secondOp,
+ Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy,
+ Type *DstIntPtrTy) {
+ // Define the 144 possibilities for these two cast instructions. The values
+ // in this matrix determine what to do in a given situation and select the
+ // case in the switch below. The rows correspond to firstOp, the columns
+ // correspond to secondOp. In looking at the table below, keep in mind
+ // the following cast properties:
+ //
+ // Size Compare Source Destination
+ // Operator Src ? Size Type Sign Type Sign
+ // -------- ------------ ------------------- ---------------------
+ // TRUNC > Integer Any Integral Any
+ // ZEXT < Integral Unsigned Integer Any
+ // SEXT < Integral Signed Integer Any
+ // FPTOUI n/a FloatPt n/a Integral Unsigned
+ // FPTOSI n/a FloatPt n/a Integral Signed
+ // UITOFP n/a Integral Unsigned FloatPt n/a
+ // SITOFP n/a Integral Signed FloatPt n/a
+ // FPTRUNC > FloatPt n/a FloatPt n/a
+ // FPEXT < FloatPt n/a FloatPt n/a
+ // PTRTOINT n/a Pointer n/a Integral Unsigned
+ // INTTOPTR n/a Integral Unsigned Pointer n/a
+ // BITCAST = FirstClass n/a FirstClass n/a
+ // ADDRSPCST n/a Pointer n/a Pointer n/a
+ //
+ // NOTE: some transforms are safe, but we consider them to be non-profitable.
+ // For example, we could merge "fptoui double to i32" + "zext i32 to i64",
+ // into "fptoui double to i64", but this loses information about the range
+ // of the produced value (we no longer know the top-part is all zeros).
+ // Further this conversion is often much more expensive for typical hardware,
+ // and causes issues when building libgcc. We disallow fptosi+sext for the
+ // same reason.
+ const unsigned numCastOps =
+ Instruction::CastOpsEnd - Instruction::CastOpsBegin;
+ static const uint8_t CastResults[numCastOps][numCastOps] = {
+ // T F F U S F F P I B A -+
+ // R Z S P P I I T P 2 N T S |
+ // U E E 2 2 2 2 R E I T C C +- secondOp
+ // N X X U S F F N X N 2 V V |
+ // C T T I I P P C T T P T T -+
+ { 1, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // Trunc -+
+ { 8, 1, 9,99,99, 2,17,99,99,99, 2, 3, 0}, // ZExt |
+ { 8, 0, 1,99,99, 0, 2,99,99,99, 0, 3, 0}, // SExt |
+ { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToUI |
+ { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToSI |
+ { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // UIToFP +- firstOp
+ { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // SIToFP |
+ { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // FPTrunc |
+ { 99,99,99, 2, 2,99,99,10, 2,99,99, 4, 0}, // FPExt |
+ { 1, 0, 0,99,99, 0, 0,99,99,99, 7, 3, 0}, // PtrToInt |
+ { 99,99,99,99,99,99,99,99,99,11,99,15, 0}, // IntToPtr |
+ { 5, 5, 5, 6, 6, 5, 5, 6, 6,16, 5, 1,14}, // BitCast |
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,13,12}, // AddrSpaceCast -+
+ };
+
+ // TODO: This logic could be encoded into the table above and handled in the
+ // switch below.
+ // If either of the casts are a bitcast from scalar to vector, disallow the
+ // merging. However, any pair of bitcasts are allowed.
+ bool IsFirstBitcast = (firstOp == Instruction::BitCast);
+ bool IsSecondBitcast = (secondOp == Instruction::BitCast);
+ bool AreBothBitcasts = IsFirstBitcast && IsSecondBitcast;
+
+ // Check if any of the casts convert scalars <-> vectors.
+ if ((IsFirstBitcast && isa<VectorType>(SrcTy) != isa<VectorType>(MidTy)) ||
+ (IsSecondBitcast && isa<VectorType>(MidTy) != isa<VectorType>(DstTy)))
+ if (!AreBothBitcasts)
+ return 0;
+
+ int ElimCase = CastResults[firstOp-Instruction::CastOpsBegin]
+ [secondOp-Instruction::CastOpsBegin];
+ switch (ElimCase) {
+ case 0:
+ // Categorically disallowed.
+ return 0;
+ case 1:
+ // Allowed, use first cast's opcode.
+ return firstOp;
+ case 2:
+ // Allowed, use second cast's opcode.
+ return secondOp;
+ case 3:
+ // No-op cast in second op implies firstOp as long as the DestTy
+ // is integer and we are not converting between a vector and a
+ // non-vector type.
+ if (!SrcTy->isVectorTy() && DstTy->isIntegerTy())
+ return firstOp;
+ return 0;
+ case 4:
+ // No-op cast in second op implies firstOp as long as the DestTy
+ // is floating point.
+ if (DstTy->isFloatingPointTy())
+ return firstOp;
+ return 0;
+ case 5:
+ // No-op cast in first op implies secondOp as long as the SrcTy
+ // is an integer.
+ if (SrcTy->isIntegerTy())
+ return secondOp;
+ return 0;
+ case 6:
+ // No-op cast in first op implies secondOp as long as the SrcTy
+ // is a floating point.
+ if (SrcTy->isFloatingPointTy())
+ return secondOp;
+ return 0;
+ case 7: {
+ // Cannot simplify if address spaces are different!
+ if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
+ return 0;
+
+ unsigned MidSize = MidTy->getScalarSizeInBits();
+ // We can still fold this without knowing the actual sizes as long we
+ // know that the intermediate pointer is the largest possible
+ // pointer size.
+ // FIXME: Is this always true?
+ if (MidSize == 64)
+ return Instruction::BitCast;
+
+ // ptrtoint, inttoptr -> bitcast (ptr -> ptr) if int size is >= ptr size.
+ if (!SrcIntPtrTy || DstIntPtrTy != SrcIntPtrTy)
+ return 0;
+ unsigned PtrSize = SrcIntPtrTy->getScalarSizeInBits();
+ if (MidSize >= PtrSize)
+ return Instruction::BitCast;
+ return 0;
+ }
+ case 8: {
+ // ext, trunc -> bitcast, if the SrcTy and DstTy are same size
+ // ext, trunc -> ext, if sizeof(SrcTy) < sizeof(DstTy)
+ // ext, trunc -> trunc, if sizeof(SrcTy) > sizeof(DstTy)
+ unsigned SrcSize = SrcTy->getScalarSizeInBits();
+ unsigned DstSize = DstTy->getScalarSizeInBits();
+ if (SrcSize == DstSize)
+ return Instruction::BitCast;
+ else if (SrcSize < DstSize)
+ return firstOp;
+ return secondOp;
+ }
+ case 9:
+ // zext, sext -> zext, because sext can't sign extend after zext
+ return Instruction::ZExt;
+ case 10:
+ // fpext followed by ftrunc is allowed if the bit size returned to is
+ // the same as the original, in which case its just a bitcast
+ if (SrcTy == DstTy)
+ return Instruction::BitCast;
+ return 0; // If the types are not the same we can't eliminate it.
+ case 11: {
+ // inttoptr, ptrtoint -> bitcast if SrcSize<=PtrSize and SrcSize==DstSize
+ if (!MidIntPtrTy)
+ return 0;
+ unsigned PtrSize = MidIntPtrTy->getScalarSizeInBits();
+ unsigned SrcSize = SrcTy->getScalarSizeInBits();
+ unsigned DstSize = DstTy->getScalarSizeInBits();
+ if (SrcSize <= PtrSize && SrcSize == DstSize)
+ return Instruction::BitCast;
+ return 0;
+ }
+ case 12: {
+ // addrspacecast, addrspacecast -> bitcast, if SrcAS == DstAS
+ // addrspacecast, addrspacecast -> addrspacecast, if SrcAS != DstAS
+ if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
+ return Instruction::AddrSpaceCast;
+ return Instruction::BitCast;
+ }
+ case 13:
+ // FIXME: this state can be merged with (1), but the following assert
+ // is useful to check the correcteness of the sequence due to semantic
+ // change of bitcast.
+ assert(
+ SrcTy->isPtrOrPtrVectorTy() &&
+ MidTy->isPtrOrPtrVectorTy() &&
+ DstTy->isPtrOrPtrVectorTy() &&
+ SrcTy->getPointerAddressSpace() != MidTy->getPointerAddressSpace() &&
+ MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
+ "Illegal addrspacecast, bitcast sequence!");
+ // Allowed, use first cast's opcode
+ return firstOp;
+ case 14:
+ // bitcast, addrspacecast -> addrspacecast if the element type of
+ // bitcast's source is the same as that of addrspacecast's destination.
+ if (SrcTy->getPointerElementType() == DstTy->getPointerElementType())
+ return Instruction::AddrSpaceCast;
+ return 0;
+
+ case 15:
+ // FIXME: this state can be merged with (1), but the following assert
+ // is useful to check the correcteness of the sequence due to semantic
+ // change of bitcast.
+ assert(
+ SrcTy->isIntOrIntVectorTy() &&
+ MidTy->isPtrOrPtrVectorTy() &&
+ DstTy->isPtrOrPtrVectorTy() &&
+ MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
+ "Illegal inttoptr, bitcast sequence!");
+ // Allowed, use first cast's opcode
+ return firstOp;
+ case 16:
+ // FIXME: this state can be merged with (2), but the following assert
+ // is useful to check the correcteness of the sequence due to semantic
+ // change of bitcast.
+ assert(
+ SrcTy->isPtrOrPtrVectorTy() &&
+ MidTy->isPtrOrPtrVectorTy() &&
+ DstTy->isIntOrIntVectorTy() &&
+ SrcTy->getPointerAddressSpace() == MidTy->getPointerAddressSpace() &&
+ "Illegal bitcast, ptrtoint sequence!");
+ // Allowed, use second cast's opcode
+ return secondOp;
+ case 17:
+ // (sitofp (zext x)) -> (uitofp x)
+ return Instruction::UIToFP;
+ case 99:
+ // Cast combination can't happen (error in input). This is for all cases
+ // where the MidTy is not the same for the two cast instructions.
+ llvm_unreachable("Invalid Cast Combination");
+ default:
+ llvm_unreachable("Error in CastResults table!!!");
+ }
+}
+
+CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty,
+ const Twine &Name, Instruction *InsertBefore) {
+ assert(castIsValid(op, S, Ty) && "Invalid cast!");
+ // Construct and return the appropriate CastInst subclass
+ switch (op) {
+ case Trunc: return new TruncInst (S, Ty, Name, InsertBefore);
+ case ZExt: return new ZExtInst (S, Ty, Name, InsertBefore);
+ case SExt: return new SExtInst (S, Ty, Name, InsertBefore);
+ case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertBefore);
+ case FPExt: return new FPExtInst (S, Ty, Name, InsertBefore);
+ case UIToFP: return new UIToFPInst (S, Ty, Name, InsertBefore);
+ case SIToFP: return new SIToFPInst (S, Ty, Name, InsertBefore);
+ case FPToUI: return new FPToUIInst (S, Ty, Name, InsertBefore);
+ case FPToSI: return new FPToSIInst (S, Ty, Name, InsertBefore);
+ case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertBefore);
+ case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertBefore);
+ case BitCast: return new BitCastInst (S, Ty, Name, InsertBefore);
+ case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertBefore);
+ default: llvm_unreachable("Invalid opcode provided");
+ }
+}
+
+CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty,
+ const Twine &Name, BasicBlock *InsertAtEnd) {
+ assert(castIsValid(op, S, Ty) && "Invalid cast!");
+ // Construct and return the appropriate CastInst subclass
+ switch (op) {
+ case Trunc: return new TruncInst (S, Ty, Name, InsertAtEnd);
+ case ZExt: return new ZExtInst (S, Ty, Name, InsertAtEnd);
+ case SExt: return new SExtInst (S, Ty, Name, InsertAtEnd);
+ case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertAtEnd);
+ case FPExt: return new FPExtInst (S, Ty, Name, InsertAtEnd);
+ case UIToFP: return new UIToFPInst (S, Ty, Name, InsertAtEnd);
+ case SIToFP: return new SIToFPInst (S, Ty, Name, InsertAtEnd);
+ case FPToUI: return new FPToUIInst (S, Ty, Name, InsertAtEnd);
+ case FPToSI: return new FPToSIInst (S, Ty, Name, InsertAtEnd);
+ case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertAtEnd);
+ case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertAtEnd);
+ case BitCast: return new BitCastInst (S, Ty, Name, InsertAtEnd);
+ case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertAtEnd);
+ default: llvm_unreachable("Invalid opcode provided");
+ }
+}
+
+CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty,
+ const Twine &Name,
+ Instruction *InsertBefore) {
+ if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
+ return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
+ return Create(Instruction::ZExt, S, Ty, Name, InsertBefore);
+}
+
+CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty,
+ const Twine &Name,
+ BasicBlock *InsertAtEnd) {
+ if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
+ return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
+ return Create(Instruction::ZExt, S, Ty, Name, InsertAtEnd);
+}
+
+CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty,
+ const Twine &Name,
+ Instruction *InsertBefore) {
+ if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
+ return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
+ return Create(Instruction::SExt, S, Ty, Name, InsertBefore);
+}
+
+CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty,
+ const Twine &Name,
+ BasicBlock *InsertAtEnd) {
+ if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
+ return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
+ return Create(Instruction::SExt, S, Ty, Name, InsertAtEnd);
+}
+
+CastInst *CastInst::CreateTruncOrBitCast(Value *S, Type *Ty,
+ const Twine &Name,
+ Instruction *InsertBefore) {
+ if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
+ return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
+ return Create(Instruction::Trunc, S, Ty, Name, InsertBefore);
+}
+
+CastInst *CastInst::CreateTruncOrBitCast(Value *S, Type *Ty,
+ const Twine &Name,
+ BasicBlock *InsertAtEnd) {
+ if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
+ return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
+ return Create(Instruction::Trunc, S, Ty, Name, InsertAtEnd);
+}
+
+CastInst *CastInst::CreatePointerCast(Value *S, Type *Ty,
+ const Twine &Name,
+ BasicBlock *InsertAtEnd) {
+ assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
+ assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
+ "Invalid cast");
+ assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
+ assert((!Ty->isVectorTy() ||
+ Ty->getVectorNumElements() == S->getType()->getVectorNumElements()) &&
+ "Invalid cast");
+
+ if (Ty->isIntOrIntVectorTy())
+ return Create(Instruction::PtrToInt, S, Ty, Name, InsertAtEnd);
+
+ return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertAtEnd);
+}
+
+/// @brief Create a BitCast or a PtrToInt cast instruction
+CastInst *CastInst::CreatePointerCast(Value *S, Type *Ty,
+ const Twine &Name,
+ Instruction *InsertBefore) {
+ assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
+ assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
+ "Invalid cast");
+ assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
+ assert((!Ty->isVectorTy() ||
+ Ty->getVectorNumElements() == S->getType()->getVectorNumElements()) &&
+ "Invalid cast");
+
+ if (Ty->isIntOrIntVectorTy())
+ return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
+
+ return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertBefore);
+}
+
+CastInst *CastInst::CreatePointerBitCastOrAddrSpaceCast(
+ Value *S, Type *Ty,
+ const Twine &Name,
+ BasicBlock *InsertAtEnd) {
+ assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
+ assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
+
+ if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace())
+ return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertAtEnd);
+
+ return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
+}
+
+CastInst *CastInst::CreatePointerBitCastOrAddrSpaceCast(
+ Value *S, Type *Ty,
+ const Twine &Name,
+ Instruction *InsertBefore) {
+ assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
+ assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
+
+ if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace())
+ return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertBefore);
+
+ return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
+}
+
+CastInst *CastInst::CreateBitOrPointerCast(Value *S, Type *Ty,
+ const Twine &Name,
+ Instruction *InsertBefore) {
+ if (S->getType()->isPointerTy() && Ty->isIntegerTy())
+ return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
+ if (S->getType()->isIntegerTy() && Ty->isPointerTy())
+ return Create(Instruction::IntToPtr, S, Ty, Name, InsertBefore);
+
+ return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
+}
+
+CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty,
+ bool isSigned, const Twine &Name,
+ Instruction *InsertBefore) {
+ assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
+ "Invalid integer cast");
+ unsigned SrcBits = C->getType()->getScalarSizeInBits();
+ unsigned DstBits = Ty->getScalarSizeInBits();
+ Instruction::CastOps opcode =
+ (SrcBits == DstBits ? Instruction::BitCast :
+ (SrcBits > DstBits ? Instruction::Trunc :
+ (isSigned ? Instruction::SExt : Instruction::ZExt)));
+ return Create(opcode, C, Ty, Name, InsertBefore);
+}
+
+CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty,
+ bool isSigned, const Twine &Name,
+ BasicBlock *InsertAtEnd) {
+ assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
+ "Invalid cast");
+ unsigned SrcBits = C->getType()->getScalarSizeInBits();
+ unsigned DstBits = Ty->getScalarSizeInBits();
+ Instruction::CastOps opcode =
+ (SrcBits == DstBits ? Instruction::BitCast :
+ (SrcBits > DstBits ? Instruction::Trunc :
+ (isSigned ? Instruction::SExt : Instruction::ZExt)));
+ return Create(opcode, C, Ty, Name, InsertAtEnd);
+}
+
+CastInst *CastInst::CreateFPCast(Value *C, Type *Ty,
+ const Twine &Name,
+ Instruction *InsertBefore) {
+ assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
+ "Invalid cast");
+ unsigned SrcBits = C->getType()->getScalarSizeInBits();
+ unsigned DstBits = Ty->getScalarSizeInBits();
+ Instruction::CastOps opcode =
+ (SrcBits == DstBits ? Instruction::BitCast :
+ (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
+ return Create(opcode, C, Ty, Name, InsertBefore);
+}
+
+CastInst *CastInst::CreateFPCast(Value *C, Type *Ty,
+ const Twine &Name,
+ BasicBlock *InsertAtEnd) {
+ assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
+ "Invalid cast");
+ unsigned SrcBits = C->getType()->getScalarSizeInBits();
+ unsigned DstBits = Ty->getScalarSizeInBits();
+ Instruction::CastOps opcode =
+ (SrcBits == DstBits ? Instruction::BitCast :
+ (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
+ return Create(opcode, C, Ty, Name, InsertAtEnd);
+}
+
+// Check whether it is valid to call getCastOpcode for these types.
+// This routine must be kept in sync with getCastOpcode.
+bool CastInst::isCastable(Type *SrcTy, Type *DestTy) {
+ if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType())
+ return false;
+
+ if (SrcTy == DestTy)
+ return true;
+
+ if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy))
+ if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy))
+ if (SrcVecTy->getNumElements() == DestVecTy->getNumElements()) {
+ // An element by element cast. Valid if casting the elements is valid.
+ SrcTy = SrcVecTy->getElementType();
+ DestTy = DestVecTy->getElementType();
+ }
+
+ // Get the bit sizes, we'll need these
+ unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
+ unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
+
+ // Run through the possibilities ...
+ if (DestTy->isIntegerTy()) { // Casting to integral
+ if (SrcTy->isIntegerTy()) // Casting from integral
+ return true;
+ if (SrcTy->isFloatingPointTy()) // Casting from floating pt
+ return true;
+ if (SrcTy->isVectorTy()) // Casting from vector
+ return DestBits == SrcBits;
+ // Casting from something else
+ return SrcTy->isPointerTy();
+ }
+ if (DestTy->isFloatingPointTy()) { // Casting to floating pt
+ if (SrcTy->isIntegerTy()) // Casting from integral
+ return true;
+ if (SrcTy->isFloatingPointTy()) // Casting from floating pt
+ return true;
+ if (SrcTy->isVectorTy()) // Casting from vector
+ return DestBits == SrcBits;
+ // Casting from something else
+ return false;
+ }
+ if (DestTy->isVectorTy()) // Casting to vector
+ return DestBits == SrcBits;
+ if (DestTy->isPointerTy()) { // Casting to pointer
+ if (SrcTy->isPointerTy()) // Casting from pointer
+ return true;
+ return SrcTy->isIntegerTy(); // Casting from integral
+ }
+ if (DestTy->isX86_MMXTy()) {
+ if (SrcTy->isVectorTy())
+ return DestBits == SrcBits; // 64-bit vector to MMX
+ return false;
+ } // Casting to something else
+ return false;
+}
+
+bool CastInst::isBitCastable(Type *SrcTy, Type *DestTy) {
+ if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType())
+ return false;
+
+ if (SrcTy == DestTy)
+ return true;
+
+ if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) {
+ if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) {
+ if (SrcVecTy->getNumElements() == DestVecTy->getNumElements()) {
+ // An element by element cast. Valid if casting the elements is valid.
+ SrcTy = SrcVecTy->getElementType();
+ DestTy = DestVecTy->getElementType();
+ }
+ }
+ }
+
+ if (PointerType *DestPtrTy = dyn_cast<PointerType>(DestTy)) {
+ if (PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy)) {
+ return SrcPtrTy->getAddressSpace() == DestPtrTy->getAddressSpace();
+ }
+ }
+
+ unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
+ unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
+
+ // Could still have vectors of pointers if the number of elements doesn't
+ // match
+ if (SrcBits == 0 || DestBits == 0)
+ return false;
+
+ if (SrcBits != DestBits)
+ return false;
+
+ if (DestTy->isX86_MMXTy() || SrcTy->isX86_MMXTy())
+ return false;
+
+ return true;
+}
+
+bool CastInst::isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy,
+ const DataLayout &DL) {
+ if (auto *PtrTy = dyn_cast<PointerType>(SrcTy))
+ if (auto *IntTy = dyn_cast<IntegerType>(DestTy))
+ return IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy);
+ if (auto *PtrTy = dyn_cast<PointerType>(DestTy))
+ if (auto *IntTy = dyn_cast<IntegerType>(SrcTy))
+ return IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy);
+
+ return isBitCastable(SrcTy, DestTy);
+}
+
+// Provide a way to get a "cast" where the cast opcode is inferred from the
+// types and size of the operand. This, basically, is a parallel of the
+// logic in the castIsValid function below. This axiom should hold:
+// castIsValid( getCastOpcode(Val, Ty), Val, Ty)
+// should not assert in castIsValid. In other words, this produces a "correct"
+// casting opcode for the arguments passed to it.
+// This routine must be kept in sync with isCastable.
+Instruction::CastOps
+CastInst::getCastOpcode(
+ const Value *Src, bool SrcIsSigned, Type *DestTy, bool DestIsSigned) {
+ Type *SrcTy = Src->getType();
+
+ assert(SrcTy->isFirstClassType() && DestTy->isFirstClassType() &&
+ "Only first class types are castable!");
+
+ if (SrcTy == DestTy)
+ return BitCast;
+
+ // FIXME: Check address space sizes here
+ if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy))
+ if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy))
+ if (SrcVecTy->getNumElements() == DestVecTy->getNumElements()) {
+ // An element by element cast. Find the appropriate opcode based on the
+ // element types.
+ SrcTy = SrcVecTy->getElementType();
+ DestTy = DestVecTy->getElementType();
+ }
+
+ // Get the bit sizes, we'll need these
+ unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
+ unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
+
+ // Run through the possibilities ...
+ if (DestTy->isIntegerTy()) { // Casting to integral
+ if (SrcTy->isIntegerTy()) { // Casting from integral
+ if (DestBits < SrcBits)
+ return Trunc; // int -> smaller int
+ else if (DestBits > SrcBits) { // its an extension
+ if (SrcIsSigned)
+ return SExt; // signed -> SEXT
+ else
+ return ZExt; // unsigned -> ZEXT
+ } else {
+ return BitCast; // Same size, No-op cast
+ }
+ } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
+ if (DestIsSigned)
+ return FPToSI; // FP -> sint
+ else
+ return FPToUI; // FP -> uint
+ } else if (SrcTy->isVectorTy()) {
+ assert(DestBits == SrcBits &&
+ "Casting vector to integer of different width");
+ return BitCast; // Same size, no-op cast
+ } else {
+ assert(SrcTy->isPointerTy() &&
+ "Casting from a value that is not first-class type");
+ return PtrToInt; // ptr -> int
+ }
+ } else if (DestTy->isFloatingPointTy()) { // Casting to floating pt
+ if (SrcTy->isIntegerTy()) { // Casting from integral
+ if (SrcIsSigned)
+ return SIToFP; // sint -> FP
+ else
+ return UIToFP; // uint -> FP
+ } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
+ if (DestBits < SrcBits) {
+ return FPTrunc; // FP -> smaller FP
+ } else if (DestBits > SrcBits) {
+ return FPExt; // FP -> larger FP
+ } else {
+ return BitCast; // same size, no-op cast
+ }
+ } else if (SrcTy->isVectorTy()) {
+ assert(DestBits == SrcBits &&
+ "Casting vector to floating point of different width");
+ return BitCast; // same size, no-op cast
+ }
+ llvm_unreachable("Casting pointer or non-first class to float");
+ } else if (DestTy->isVectorTy()) {
+ assert(DestBits == SrcBits &&
+ "Illegal cast to vector (wrong type or size)");
+ return BitCast;
+ } else if (DestTy->isPointerTy()) {
+ if (SrcTy->isPointerTy()) {
+ if (DestTy->getPointerAddressSpace() != SrcTy->getPointerAddressSpace())
+ return AddrSpaceCast;
+ return BitCast; // ptr -> ptr
+ } else if (SrcTy->isIntegerTy()) {
+ return IntToPtr; // int -> ptr
+ }
+ llvm_unreachable("Casting pointer to other than pointer or int");
+ } else if (DestTy->isX86_MMXTy()) {
+ if (SrcTy->isVectorTy()) {
+ assert(DestBits == SrcBits && "Casting vector of wrong width to X86_MMX");
+ return BitCast; // 64-bit vector to MMX
+ }
+ llvm_unreachable("Illegal cast to X86_MMX");
+ }
+ llvm_unreachable("Casting to type that is not first-class");
+}
+
+//===----------------------------------------------------------------------===//
+// CastInst SubClass Constructors
+//===----------------------------------------------------------------------===//
+
+/// Check that the construction parameters for a CastInst are correct. This
+/// could be broken out into the separate constructors but it is useful to have
+/// it in one place and to eliminate the redundant code for getting the sizes
+/// of the types involved.
+bool
+CastInst::castIsValid(Instruction::CastOps op, Value *S, Type *DstTy) {
+
+ // Check for type sanity on the arguments
+ Type *SrcTy = S->getType();
+
+ if (!SrcTy->isFirstClassType() || !DstTy->isFirstClassType() ||
+ SrcTy->isAggregateType() || DstTy->isAggregateType())
+ return false;
+
+ // Get the size of the types in bits, we'll need this later
+ unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
+ unsigned DstBitSize = DstTy->getScalarSizeInBits();
+
+ // If these are vector types, get the lengths of the vectors (using zero for
+ // scalar types means that checking that vector lengths match also checks that
+ // scalars are not being converted to vectors or vectors to scalars).
+ unsigned SrcLength = SrcTy->isVectorTy() ?
+ cast<VectorType>(SrcTy)->getNumElements() : 0;
+ unsigned DstLength = DstTy->isVectorTy() ?
+ cast<VectorType>(DstTy)->getNumElements() : 0;
+
+ // Switch on the opcode provided
+ switch (op) {
+ default: return false; // This is an input error
+ case Instruction::Trunc:
+ return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
+ SrcLength == DstLength && SrcBitSize > DstBitSize;
+ case Instruction::ZExt:
+ return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
+ SrcLength == DstLength && SrcBitSize < DstBitSize;
+ case Instruction::SExt:
+ return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
+ SrcLength == DstLength && SrcBitSize < DstBitSize;
+ case Instruction::FPTrunc:
+ return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
+ SrcLength == DstLength && SrcBitSize > DstBitSize;
+ case Instruction::FPExt:
+ return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
+ SrcLength == DstLength && SrcBitSize < DstBitSize;
+ case Instruction::UIToFP:
+ case Instruction::SIToFP:
+ return SrcTy->isIntOrIntVectorTy() && DstTy->isFPOrFPVectorTy() &&
+ SrcLength == DstLength;
+ case Instruction::FPToUI:
+ case Instruction::FPToSI:
+ return SrcTy->isFPOrFPVectorTy() && DstTy->isIntOrIntVectorTy() &&
+ SrcLength == DstLength;
+ case Instruction::PtrToInt:
+ if (isa<VectorType>(SrcTy) != isa<VectorType>(DstTy))
+ return false;
+ if (VectorType *VT = dyn_cast<VectorType>(SrcTy))
+ if (VT->getNumElements() != cast<VectorType>(DstTy)->getNumElements())
+ return false;
+ return SrcTy->getScalarType()->isPointerTy() &&
+ DstTy->getScalarType()->isIntegerTy();
+ case Instruction::IntToPtr:
+ if (isa<VectorType>(SrcTy) != isa<VectorType>(DstTy))
+ return false;
+ if (VectorType *VT = dyn_cast<VectorType>(SrcTy))
+ if (VT->getNumElements() != cast<VectorType>(DstTy)->getNumElements())
+ return false;
+ return SrcTy->getScalarType()->isIntegerTy() &&
+ DstTy->getScalarType()->isPointerTy();
+ case Instruction::BitCast: {
+ PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
+ PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
+
+ // BitCast implies a no-op cast of type only. No bits change.
+ // However, you can't cast pointers to anything but pointers.
+ if (!SrcPtrTy != !DstPtrTy)
+ return false;
+
+ // For non-pointer cases, the cast is okay if the source and destination bit
+ // widths are identical.
+ if (!SrcPtrTy)
+ return SrcTy->getPrimitiveSizeInBits() == DstTy->getPrimitiveSizeInBits();
+
+ // If both are pointers then the address spaces must match.
+ if (SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace())
+ return false;
+
+ // A vector of pointers must have the same number of elements.
+ if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) {
+ if (VectorType *DstVecTy = dyn_cast<VectorType>(DstTy))
+ return (SrcVecTy->getNumElements() == DstVecTy->getNumElements());
+
+ return false;
+ }
+
+ return true;
+ }
+ case Instruction::AddrSpaceCast: {
+ PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
+ if (!SrcPtrTy)
+ return false;
+
+ PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
+ if (!DstPtrTy)
+ return false;
+
+ if (SrcPtrTy->getAddressSpace() == DstPtrTy->getAddressSpace())
+ return false;
+
+ if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) {
+ if (VectorType *DstVecTy = dyn_cast<VectorType>(DstTy))
+ return (SrcVecTy->getNumElements() == DstVecTy->getNumElements());
+
+ return false;
+ }
+
+ return true;
+ }
+ }
+}
+
+TruncInst::TruncInst(
+ Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
+) : CastInst(Ty, Trunc, S, Name, InsertBefore) {
+ assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
+}
+
+TruncInst::TruncInst(
+ Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
+) : CastInst(Ty, Trunc, S, Name, InsertAtEnd) {
+ assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
+}
+
+ZExtInst::ZExtInst(
+ Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
+) : CastInst(Ty, ZExt, S, Name, InsertBefore) {
+ assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
+}
+
+ZExtInst::ZExtInst(
+ Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
+) : CastInst(Ty, ZExt, S, Name, InsertAtEnd) {
+ assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
+}
+SExtInst::SExtInst(
+ Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
+) : CastInst(Ty, SExt, S, Name, InsertBefore) {
+ assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
+}
+
+SExtInst::SExtInst(
+ Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
+) : CastInst(Ty, SExt, S, Name, InsertAtEnd) {
+ assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
+}
+
+FPTruncInst::FPTruncInst(
+ Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
+) : CastInst(Ty, FPTrunc, S, Name, InsertBefore) {
+ assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
+}
+
+FPTruncInst::FPTruncInst(
+ Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
+) : CastInst(Ty, FPTrunc, S, Name, InsertAtEnd) {
+ assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
+}
+
+FPExtInst::FPExtInst(
+ Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
+) : CastInst(Ty, FPExt, S, Name, InsertBefore) {
+ assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
+}
+
+FPExtInst::FPExtInst(
+ Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
+) : CastInst(Ty, FPExt, S, Name, InsertAtEnd) {
+ assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
+}
+
+UIToFPInst::UIToFPInst(
+ Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
+) : CastInst(Ty, UIToFP, S, Name, InsertBefore) {
+ assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
+}
+
+UIToFPInst::UIToFPInst(
+ Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
+) : CastInst(Ty, UIToFP, S, Name, InsertAtEnd) {
+ assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
+}
+
+SIToFPInst::SIToFPInst(
+ Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
+) : CastInst(Ty, SIToFP, S, Name, InsertBefore) {
+ assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
+}
+
+SIToFPInst::SIToFPInst(
+ Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
+) : CastInst(Ty, SIToFP, S, Name, InsertAtEnd) {
+ assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
+}
+
+FPToUIInst::FPToUIInst(
+ Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
+) : CastInst(Ty, FPToUI, S, Name, InsertBefore) {
+ assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
+}
+
+FPToUIInst::FPToUIInst(
+ Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
+) : CastInst(Ty, FPToUI, S, Name, InsertAtEnd) {
+ assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
+}
+
+FPToSIInst::FPToSIInst(
+ Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
+) : CastInst(Ty, FPToSI, S, Name, InsertBefore) {
+ assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
+}
+
+FPToSIInst::FPToSIInst(
+ Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
+) : CastInst(Ty, FPToSI, S, Name, InsertAtEnd) {
+ assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
+}
+
+PtrToIntInst::PtrToIntInst(
+ Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
+) : CastInst(Ty, PtrToInt, S, Name, InsertBefore) {
+ assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
+}
+
+PtrToIntInst::PtrToIntInst(
+ Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
+) : CastInst(Ty, PtrToInt, S, Name, InsertAtEnd) {
+ assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
+}
+
+IntToPtrInst::IntToPtrInst(
+ Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
+) : CastInst(Ty, IntToPtr, S, Name, InsertBefore) {
+ assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
+}
+
+IntToPtrInst::IntToPtrInst(
+ Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
+) : CastInst(Ty, IntToPtr, S, Name, InsertAtEnd) {
+ assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
+}
+
+BitCastInst::BitCastInst(
+ Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
+) : CastInst(Ty, BitCast, S, Name, InsertBefore) {
+ assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
+}
+
+BitCastInst::BitCastInst(
+ Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
+) : CastInst(Ty, BitCast, S, Name, InsertAtEnd) {
+ assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
+}
+
+AddrSpaceCastInst::AddrSpaceCastInst(
+ Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
+) : CastInst(Ty, AddrSpaceCast, S, Name, InsertBefore) {
+ assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast");
+}
+
+AddrSpaceCastInst::AddrSpaceCastInst(
+ Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
+) : CastInst(Ty, AddrSpaceCast, S, Name, InsertAtEnd) {
+ assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast");
+}
+
+//===----------------------------------------------------------------------===//
+// CmpInst Classes
+//===----------------------------------------------------------------------===//
+
+void CmpInst::anchor() {}
+
+CmpInst::CmpInst(Type *ty, OtherOps op, Predicate predicate, Value *LHS,
+ Value *RHS, const Twine &Name, Instruction *InsertBefore)
+ : Instruction(ty, op,
+ OperandTraits<CmpInst>::op_begin(this),
+ OperandTraits<CmpInst>::operands(this),
+ InsertBefore) {
+ Op<0>() = LHS;
+ Op<1>() = RHS;
+ setPredicate((Predicate)predicate);
+ setName(Name);
+}
+
+CmpInst::CmpInst(Type *ty, OtherOps op, Predicate predicate, Value *LHS,
+ Value *RHS, const Twine &Name, BasicBlock *InsertAtEnd)
+ : Instruction(ty, op,
+ OperandTraits<CmpInst>::op_begin(this),
+ OperandTraits<CmpInst>::operands(this),
+ InsertAtEnd) {
+ Op<0>() = LHS;
+ Op<1>() = RHS;
+ setPredicate((Predicate)predicate);
+ setName(Name);
+}
+
+CmpInst *
+CmpInst::Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2,
+ const Twine &Name, Instruction *InsertBefore) {
+ if (Op == Instruction::ICmp) {
+ if (InsertBefore)
+ return new ICmpInst(InsertBefore, CmpInst::Predicate(predicate),
+ S1, S2, Name);
+ else
+ return new ICmpInst(CmpInst::Predicate(predicate),
+ S1, S2, Name);
+ }
+
+ if (InsertBefore)
+ return new FCmpInst(InsertBefore, CmpInst::Predicate(predicate),
+ S1, S2, Name);
+ else
+ return new FCmpInst(CmpInst::Predicate(predicate),
+ S1, S2, Name);
+}
+
+CmpInst *
+CmpInst::Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2,
+ const Twine &Name, BasicBlock *InsertAtEnd) {
+ if (Op == Instruction::ICmp) {
+ return new ICmpInst(*InsertAtEnd, CmpInst::Predicate(predicate),
+ S1, S2, Name);
+ }
+ return new FCmpInst(*InsertAtEnd, CmpInst::Predicate(predicate),
+ S1, S2, Name);
+}
+
+void CmpInst::swapOperands() {
+ if (ICmpInst *IC = dyn_cast<ICmpInst>(this))
+ IC->swapOperands();
+ else
+ cast<FCmpInst>(this)->swapOperands();
+}
+
+bool CmpInst::isCommutative() const {
+ if (const ICmpInst *IC = dyn_cast<ICmpInst>(this))
+ return IC->isCommutative();
+ return cast<FCmpInst>(this)->isCommutative();
+}
+
+bool CmpInst::isEquality() const {
+ if (const ICmpInst *IC = dyn_cast<ICmpInst>(this))
+ return IC->isEquality();
+ return cast<FCmpInst>(this)->isEquality();
+}
+
+
+CmpInst::Predicate CmpInst::getInversePredicate(Predicate pred) {
+ switch (pred) {
+ default: llvm_unreachable("Unknown cmp predicate!");
+ case ICMP_EQ: return ICMP_NE;
+ case ICMP_NE: return ICMP_EQ;
+ case ICMP_UGT: return ICMP_ULE;
+ case ICMP_ULT: return ICMP_UGE;
+ case ICMP_UGE: return ICMP_ULT;
+ case ICMP_ULE: return ICMP_UGT;
+ case ICMP_SGT: return ICMP_SLE;
+ case ICMP_SLT: return ICMP_SGE;
+ case ICMP_SGE: return ICMP_SLT;
+ case ICMP_SLE: return ICMP_SGT;
+
+ case FCMP_OEQ: return FCMP_UNE;
+ case FCMP_ONE: return FCMP_UEQ;
+ case FCMP_OGT: return FCMP_ULE;
+ case FCMP_OLT: return FCMP_UGE;
+ case FCMP_OGE: return FCMP_ULT;
+ case FCMP_OLE: return FCMP_UGT;
+ case FCMP_UEQ: return FCMP_ONE;
+ case FCMP_UNE: return FCMP_OEQ;
+ case FCMP_UGT: return FCMP_OLE;
+ case FCMP_ULT: return FCMP_OGE;
+ case FCMP_UGE: return FCMP_OLT;
+ case FCMP_ULE: return FCMP_OGT;
+ case FCMP_ORD: return FCMP_UNO;
+ case FCMP_UNO: return FCMP_ORD;
+ case FCMP_TRUE: return FCMP_FALSE;
+ case FCMP_FALSE: return FCMP_TRUE;
+ }
+}
+
+void ICmpInst::anchor() {}
+
+ICmpInst::Predicate ICmpInst::getSignedPredicate(Predicate pred) {
+ switch (pred) {
+ default: llvm_unreachable("Unknown icmp predicate!");
+ case ICMP_EQ: case ICMP_NE:
+ case ICMP_SGT: case ICMP_SLT: case ICMP_SGE: case ICMP_SLE:
+ return pred;
+ case ICMP_UGT: return ICMP_SGT;
+ case ICMP_ULT: return ICMP_SLT;
+ case ICMP_UGE: return ICMP_SGE;
+ case ICMP_ULE: return ICMP_SLE;
+ }
+}
+
+ICmpInst::Predicate ICmpInst::getUnsignedPredicate(Predicate pred) {
+ switch (pred) {
+ default: llvm_unreachable("Unknown icmp predicate!");
+ case ICMP_EQ: case ICMP_NE:
+ case ICMP_UGT: case ICMP_ULT: case ICMP_UGE: case ICMP_ULE:
+ return pred;
+ case ICMP_SGT: return ICMP_UGT;
+ case ICMP_SLT: return ICMP_ULT;
+ case ICMP_SGE: return ICMP_UGE;
+ case ICMP_SLE: return ICMP_ULE;
+ }
+}
+
+/// Initialize a set of values that all satisfy the condition with C.
+///
+ConstantRange
+ICmpInst::makeConstantRange(Predicate pred, const APInt &C) {
+ APInt Lower(C);
+ APInt Upper(C);
+ uint32_t BitWidth = C.getBitWidth();
+ switch (pred) {
+ default: llvm_unreachable("Invalid ICmp opcode to ConstantRange ctor!");
+ case ICmpInst::ICMP_EQ: ++Upper; break;
+ case ICmpInst::ICMP_NE: ++Lower; break;
+ case ICmpInst::ICMP_ULT:
+ Lower = APInt::getMinValue(BitWidth);
+ // Check for an empty-set condition.
+ if (Lower == Upper)
+ return ConstantRange(BitWidth, /*isFullSet=*/false);
+ break;
+ case ICmpInst::ICMP_SLT:
+ Lower = APInt::getSignedMinValue(BitWidth);
+ // Check for an empty-set condition.
+ if (Lower == Upper)
+ return ConstantRange(BitWidth, /*isFullSet=*/false);
+ break;
+ case ICmpInst::ICMP_UGT:
+ ++Lower; Upper = APInt::getMinValue(BitWidth); // Min = Next(Max)
+ // Check for an empty-set condition.
+ if (Lower == Upper)
+ return ConstantRange(BitWidth, /*isFullSet=*/false);
+ break;
+ case ICmpInst::ICMP_SGT:
+ ++Lower; Upper = APInt::getSignedMinValue(BitWidth); // Min = Next(Max)
+ // Check for an empty-set condition.
+ if (Lower == Upper)
+ return ConstantRange(BitWidth, /*isFullSet=*/false);
+ break;
+ case ICmpInst::ICMP_ULE:
+ Lower = APInt::getMinValue(BitWidth); ++Upper;
+ // Check for a full-set condition.
+ if (Lower == Upper)
+ return ConstantRange(BitWidth, /*isFullSet=*/true);
+ break;
+ case ICmpInst::ICMP_SLE:
+ Lower = APInt::getSignedMinValue(BitWidth); ++Upper;
+ // Check for a full-set condition.
+ if (Lower == Upper)
+ return ConstantRange(BitWidth, /*isFullSet=*/true);
+ break;
+ case ICmpInst::ICMP_UGE:
+ Upper = APInt::getMinValue(BitWidth); // Min = Next(Max)
+ // Check for a full-set condition.
+ if (Lower == Upper)
+ return ConstantRange(BitWidth, /*isFullSet=*/true);
+ break;
+ case ICmpInst::ICMP_SGE:
+ Upper = APInt::getSignedMinValue(BitWidth); // Min = Next(Max)
+ // Check for a full-set condition.
+ if (Lower == Upper)
+ return ConstantRange(BitWidth, /*isFullSet=*/true);
+ break;
+ }
+ return ConstantRange(Lower, Upper);
+}
+
+CmpInst::Predicate CmpInst::getSwappedPredicate(Predicate pred) {
+ switch (pred) {
+ default: llvm_unreachable("Unknown cmp predicate!");
+ case ICMP_EQ: case ICMP_NE:
+ return pred;
+ case ICMP_SGT: return ICMP_SLT;
+ case ICMP_SLT: return ICMP_SGT;
+ case ICMP_SGE: return ICMP_SLE;
+ case ICMP_SLE: return ICMP_SGE;
+ case ICMP_UGT: return ICMP_ULT;
+ case ICMP_ULT: return ICMP_UGT;
+ case ICMP_UGE: return ICMP_ULE;
+ case ICMP_ULE: return ICMP_UGE;
+
+ case FCMP_FALSE: case FCMP_TRUE:
+ case FCMP_OEQ: case FCMP_ONE:
+ case FCMP_UEQ: case FCMP_UNE:
+ case FCMP_ORD: case FCMP_UNO:
+ return pred;
+ case FCMP_OGT: return FCMP_OLT;
+ case FCMP_OLT: return FCMP_OGT;
+ case FCMP_OGE: return FCMP_OLE;
+ case FCMP_OLE: return FCMP_OGE;
+ case FCMP_UGT: return FCMP_ULT;
+ case FCMP_ULT: return FCMP_UGT;
+ case FCMP_UGE: return FCMP_ULE;
+ case FCMP_ULE: return FCMP_UGE;
+ }
+}
+
+CmpInst::Predicate CmpInst::getSignedPredicate(Predicate pred) {
+ assert(CmpInst::isUnsigned(pred) && "Call only with signed predicates!");
+
+ switch (pred) {
+ default:
+ llvm_unreachable("Unknown predicate!");
+ case CmpInst::ICMP_ULT:
+ return CmpInst::ICMP_SLT;
+ case CmpInst::ICMP_ULE:
+ return CmpInst::ICMP_SLE;
+ case CmpInst::ICMP_UGT:
+ return CmpInst::ICMP_SGT;
+ case CmpInst::ICMP_UGE:
+ return CmpInst::ICMP_SGE;
+ }
+}
+
+bool CmpInst::isUnsigned(Predicate predicate) {
+ switch (predicate) {
+ default: return false;
+ case ICmpInst::ICMP_ULT: case ICmpInst::ICMP_ULE: case ICmpInst::ICMP_UGT:
+ case ICmpInst::ICMP_UGE: return true;
+ }
+}
+
+bool CmpInst::isSigned(Predicate predicate) {
+ switch (predicate) {
+ default: return false;
+ case ICmpInst::ICMP_SLT: case ICmpInst::ICMP_SLE: case ICmpInst::ICMP_SGT:
+ case ICmpInst::ICMP_SGE: return true;
+ }
+}
+
+bool CmpInst::isOrdered(Predicate predicate) {
+ switch (predicate) {
+ default: return false;
+ case FCmpInst::FCMP_OEQ: case FCmpInst::FCMP_ONE: case FCmpInst::FCMP_OGT:
+ case FCmpInst::FCMP_OLT: case FCmpInst::FCMP_OGE: case FCmpInst::FCMP_OLE:
+ case FCmpInst::FCMP_ORD: return true;
+ }
+}
+
+bool CmpInst::isUnordered(Predicate predicate) {
+ switch (predicate) {
+ default: return false;
+ case FCmpInst::FCMP_UEQ: case FCmpInst::FCMP_UNE: case FCmpInst::FCMP_UGT:
+ case FCmpInst::FCMP_ULT: case FCmpInst::FCMP_UGE: case FCmpInst::FCMP_ULE:
+ case FCmpInst::FCMP_UNO: return true;
+ }
+}
+
+bool CmpInst::isTrueWhenEqual(Predicate predicate) {
+ switch(predicate) {
+ default: return false;
+ case ICMP_EQ: case ICMP_UGE: case ICMP_ULE: case ICMP_SGE: case ICMP_SLE:
+ case FCMP_TRUE: case FCMP_UEQ: case FCMP_UGE: case FCMP_ULE: return true;
+ }
+}
+
+bool CmpInst::isFalseWhenEqual(Predicate predicate) {
+ switch(predicate) {
+ case ICMP_NE: case ICMP_UGT: case ICMP_ULT: case ICMP_SGT: case ICMP_SLT:
+ case FCMP_FALSE: case FCMP_ONE: case FCMP_OGT: case FCMP_OLT: return true;
+ default: return false;
+ }
+}
+
+
+//===----------------------------------------------------------------------===//
+// SwitchInst Implementation
+//===----------------------------------------------------------------------===//
+
+void SwitchInst::init(Value *Value, BasicBlock *Default, unsigned NumReserved) {
+ assert(Value && Default && NumReserved);
+ ReservedSpace = NumReserved;
+ setNumHungOffUseOperands(2);
+ allocHungoffUses(ReservedSpace);
+
+ Op<0>() = Value;
+ Op<1>() = Default;
+}
+
+/// SwitchInst ctor - Create a new switch instruction, specifying a value to
+/// switch on and a default destination. The number of additional cases can
+/// be specified here to make memory allocation more efficient. This
+/// constructor can also autoinsert before another instruction.
+SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
+ Instruction *InsertBefore)
+ : TerminatorInst(Type::getVoidTy(Value->getContext()), Instruction::Switch,
+ nullptr, 0, InsertBefore) {
+ init(Value, Default, 2+NumCases*2);
+}
+
+/// SwitchInst ctor - Create a new switch instruction, specifying a value to
+/// switch on and a default destination. The number of additional cases can
+/// be specified here to make memory allocation more efficient. This
+/// constructor also autoinserts at the end of the specified BasicBlock.
+SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
+ BasicBlock *InsertAtEnd)
+ : TerminatorInst(Type::getVoidTy(Value->getContext()), Instruction::Switch,
+ nullptr, 0, InsertAtEnd) {
+ init(Value, Default, 2+NumCases*2);
+}
+
+SwitchInst::SwitchInst(const SwitchInst &SI)
+ : TerminatorInst(SI.getType(), Instruction::Switch, nullptr, 0) {
+ init(SI.getCondition(), SI.getDefaultDest(), SI.getNumOperands());
+ setNumHungOffUseOperands(SI.getNumOperands());
+ Use *OL = getOperandList();
+ const Use *InOL = SI.getOperandList();
+ for (unsigned i = 2, E = SI.getNumOperands(); i != E; i += 2) {
+ OL[i] = InOL[i];
+ OL[i+1] = InOL[i+1];
+ }
+ SubclassOptionalData = SI.SubclassOptionalData;
+}
+
+
+/// addCase - Add an entry to the switch instruction...
+///
+void SwitchInst::addCase(ConstantInt *OnVal, BasicBlock *Dest) {
+ unsigned NewCaseIdx = getNumCases();
+ unsigned OpNo = getNumOperands();
+ if (OpNo+2 > ReservedSpace)
+ growOperands(); // Get more space!
+ // Initialize some new operands.
+ assert(OpNo+1 < ReservedSpace && "Growing didn't work!");
+ setNumHungOffUseOperands(OpNo+2);
+ CaseIt Case(this, NewCaseIdx);
+ Case.setValue(OnVal);
+ Case.setSuccessor(Dest);
+}
+
+/// removeCase - This method removes the specified case and its successor
+/// from the switch instruction.
+void SwitchInst::removeCase(CaseIt i) {
+ unsigned idx = i.getCaseIndex();
+
+ assert(2 + idx*2 < getNumOperands() && "Case index out of range!!!");
+
+ unsigned NumOps = getNumOperands();
+ Use *OL = getOperandList();
+
+ // Overwrite this case with the end of the list.
+ if (2 + (idx + 1) * 2 != NumOps) {
+ OL[2 + idx * 2] = OL[NumOps - 2];
+ OL[2 + idx * 2 + 1] = OL[NumOps - 1];
+ }
+
+ // Nuke the last value.
+ OL[NumOps-2].set(nullptr);
+ OL[NumOps-2+1].set(nullptr);
+ setNumHungOffUseOperands(NumOps-2);
+}
+
+/// growOperands - grow operands - This grows the operand list in response
+/// to a push_back style of operation. This grows the number of ops by 3 times.
+///
+void SwitchInst::growOperands() {
+ unsigned e = getNumOperands();
+ unsigned NumOps = e*3;
+
+ ReservedSpace = NumOps;
+ growHungoffUses(ReservedSpace);
+}
+
+
+BasicBlock *SwitchInst::getSuccessorV(unsigned idx) const {
+ return getSuccessor(idx);
+}
+unsigned SwitchInst::getNumSuccessorsV() const {
+ return getNumSuccessors();
+}
+void SwitchInst::setSuccessorV(unsigned idx, BasicBlock *B) {
+ setSuccessor(idx, B);
+}
+
+//===----------------------------------------------------------------------===//
+// IndirectBrInst Implementation
+//===----------------------------------------------------------------------===//
+
+void IndirectBrInst::init(Value *Address, unsigned NumDests) {
+ assert(Address && Address->getType()->isPointerTy() &&
+ "Address of indirectbr must be a pointer");
+ ReservedSpace = 1+NumDests;
+ setNumHungOffUseOperands(1);
+ allocHungoffUses(ReservedSpace);
+
+ Op<0>() = Address;
+}
+
+
+/// growOperands - grow operands - This grows the operand list in response
+/// to a push_back style of operation. This grows the number of ops by 2 times.
+///
+void IndirectBrInst::growOperands() {
+ unsigned e = getNumOperands();
+ unsigned NumOps = e*2;
+
+ ReservedSpace = NumOps;
+ growHungoffUses(ReservedSpace);
+}
+
+IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
+ Instruction *InsertBefore)
+: TerminatorInst(Type::getVoidTy(Address->getContext()),Instruction::IndirectBr,
+ nullptr, 0, InsertBefore) {
+ init(Address, NumCases);
+}
+
+IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
+ BasicBlock *InsertAtEnd)
+: TerminatorInst(Type::getVoidTy(Address->getContext()),Instruction::IndirectBr,
+ nullptr, 0, InsertAtEnd) {
+ init(Address, NumCases);
+}
+
+IndirectBrInst::IndirectBrInst(const IndirectBrInst &IBI)
+ : TerminatorInst(Type::getVoidTy(IBI.getContext()), Instruction::IndirectBr,
+ nullptr, IBI.getNumOperands()) {
+ allocHungoffUses(IBI.getNumOperands());
+ Use *OL = getOperandList();
+ const Use *InOL = IBI.getOperandList();
+ for (unsigned i = 0, E = IBI.getNumOperands(); i != E; ++i)
+ OL[i] = InOL[i];
+ SubclassOptionalData = IBI.SubclassOptionalData;
+}
+
+/// addDestination - Add a destination.
+///
+void IndirectBrInst::addDestination(BasicBlock *DestBB) {
+ unsigned OpNo = getNumOperands();
+ if (OpNo+1 > ReservedSpace)
+ growOperands(); // Get more space!
+ // Initialize some new operands.
+ assert(OpNo < ReservedSpace && "Growing didn't work!");
+ setNumHungOffUseOperands(OpNo+1);
+ getOperandList()[OpNo] = DestBB;
+}
+
+/// removeDestination - This method removes the specified successor from the
+/// indirectbr instruction.
+void IndirectBrInst::removeDestination(unsigned idx) {
+ assert(idx < getNumOperands()-1 && "Successor index out of range!");
+
+ unsigned NumOps = getNumOperands();
+ Use *OL = getOperandList();
+
+ // Replace this value with the last one.
+ OL[idx+1] = OL[NumOps-1];
+
+ // Nuke the last value.
+ OL[NumOps-1].set(nullptr);
+ setNumHungOffUseOperands(NumOps-1);
+}
+
+BasicBlock *IndirectBrInst::getSuccessorV(unsigned idx) const {
+ return getSuccessor(idx);
+}
+unsigned IndirectBrInst::getNumSuccessorsV() const {
+ return getNumSuccessors();
+}
+void IndirectBrInst::setSuccessorV(unsigned idx, BasicBlock *B) {
+ setSuccessor(idx, B);
+}
+
+//===----------------------------------------------------------------------===//
+// cloneImpl() implementations
+//===----------------------------------------------------------------------===//
+
+// Define these methods here so vtables don't get emitted into every translation
+// unit that uses these classes.
+
+GetElementPtrInst *GetElementPtrInst::cloneImpl() const {
+ return new (getNumOperands()) GetElementPtrInst(*this);
+}
+
+BinaryOperator *BinaryOperator::cloneImpl() const {
+ return Create(getOpcode(), Op<0>(), Op<1>());
+}
+
+FCmpInst *FCmpInst::cloneImpl() const {
+ return new FCmpInst(getPredicate(), Op<0>(), Op<1>());
+}
+
+ICmpInst *ICmpInst::cloneImpl() const {
+ return new ICmpInst(getPredicate(), Op<0>(), Op<1>());
+}
+
+ExtractValueInst *ExtractValueInst::cloneImpl() const {
+ return new ExtractValueInst(*this);
+}
+
+InsertValueInst *InsertValueInst::cloneImpl() const {
+ return new InsertValueInst(*this);
+}
+
+AllocaInst *AllocaInst::cloneImpl() const {
+ AllocaInst *Result = new AllocaInst(getAllocatedType(),
+ (Value *)getOperand(0), getAlignment());
+ Result->setUsedWithInAlloca(isUsedWithInAlloca());
+ return Result;
+}
+
+LoadInst *LoadInst::cloneImpl() const {
+ return new LoadInst(getOperand(0), Twine(), isVolatile(),
+ getAlignment(), getOrdering(), getSynchScope());
+}
+
+StoreInst *StoreInst::cloneImpl() const {
+ return new StoreInst(getOperand(0), getOperand(1), isVolatile(),
+ getAlignment(), getOrdering(), getSynchScope());
+
+}
+
+AtomicCmpXchgInst *AtomicCmpXchgInst::cloneImpl() const {
+ AtomicCmpXchgInst *Result =
+ new AtomicCmpXchgInst(getOperand(0), getOperand(1), getOperand(2),
+ getSuccessOrdering(), getFailureOrdering(),
+ getSynchScope());
+ Result->setVolatile(isVolatile());
+ Result->setWeak(isWeak());
+ return Result;
+}
+
+AtomicRMWInst *AtomicRMWInst::cloneImpl() const {
+ AtomicRMWInst *Result =
+ new AtomicRMWInst(getOperation(),getOperand(0), getOperand(1),
+ getOrdering(), getSynchScope());
+ Result->setVolatile(isVolatile());
+ return Result;
+}
+
+FenceInst *FenceInst::cloneImpl() const {
+ return new FenceInst(getContext(), getOrdering(), getSynchScope());
+}
+
+TruncInst *TruncInst::cloneImpl() const {
+ return new TruncInst(getOperand(0), getType());
+}
+
+ZExtInst *ZExtInst::cloneImpl() const {
+ return new ZExtInst(getOperand(0), getType());
+}
+
+SExtInst *SExtInst::cloneImpl() const {
+ return new SExtInst(getOperand(0), getType());
+}
+
+FPTruncInst *FPTruncInst::cloneImpl() const {
+ return new FPTruncInst(getOperand(0), getType());
+}
+
+FPExtInst *FPExtInst::cloneImpl() const {
+ return new FPExtInst(getOperand(0), getType());
+}
+
+UIToFPInst *UIToFPInst::cloneImpl() const {
+ return new UIToFPInst(getOperand(0), getType());
+}
+
+SIToFPInst *SIToFPInst::cloneImpl() const {
+ return new SIToFPInst(getOperand(0), getType());
+}
+
+FPToUIInst *FPToUIInst::cloneImpl() const {
+ return new FPToUIInst(getOperand(0), getType());
+}
+
+FPToSIInst *FPToSIInst::cloneImpl() const {
+ return new FPToSIInst(getOperand(0), getType());
+}
+
+PtrToIntInst *PtrToIntInst::cloneImpl() const {
+ return new PtrToIntInst(getOperand(0), getType());
+}
+
+IntToPtrInst *IntToPtrInst::cloneImpl() const {
+ return new IntToPtrInst(getOperand(0), getType());
+}
+
+BitCastInst *BitCastInst::cloneImpl() const {
+ return new BitCastInst(getOperand(0), getType());
+}
+
+AddrSpaceCastInst *AddrSpaceCastInst::cloneImpl() const {
+ return new AddrSpaceCastInst(getOperand(0), getType());
+}
+
+CallInst *CallInst::cloneImpl() const {
+ if (hasOperandBundles()) {
+ unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo);
+ return new(getNumOperands(), DescriptorBytes) CallInst(*this);
+ }
+ return new(getNumOperands()) CallInst(*this);
+}
+
+SelectInst *SelectInst::cloneImpl() const {
+ return SelectInst::Create(getOperand(0), getOperand(1), getOperand(2));
+}
+
+VAArgInst *VAArgInst::cloneImpl() const {
+ return new VAArgInst(getOperand(0), getType());
+}
+
+ExtractElementInst *ExtractElementInst::cloneImpl() const {
+ return ExtractElementInst::Create(getOperand(0), getOperand(1));
+}
+
+InsertElementInst *InsertElementInst::cloneImpl() const {
+ return InsertElementInst::Create(getOperand(0), getOperand(1), getOperand(2));
+}
+
+ShuffleVectorInst *ShuffleVectorInst::cloneImpl() const {
+ return new ShuffleVectorInst(getOperand(0), getOperand(1), getOperand(2));
+}
+
+PHINode *PHINode::cloneImpl() const { return new PHINode(*this); }
+
+LandingPadInst *LandingPadInst::cloneImpl() const {
+ return new LandingPadInst(*this);
+}
+
+ReturnInst *ReturnInst::cloneImpl() const {
+ return new(getNumOperands()) ReturnInst(*this);
+}
+
+BranchInst *BranchInst::cloneImpl() const {
+ return new(getNumOperands()) BranchInst(*this);
+}
+
+SwitchInst *SwitchInst::cloneImpl() const { return new SwitchInst(*this); }
+
+IndirectBrInst *IndirectBrInst::cloneImpl() const {
+ return new IndirectBrInst(*this);
+}
+
+InvokeInst *InvokeInst::cloneImpl() const {
+ if (hasOperandBundles()) {
+ unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo);
+ return new(getNumOperands(), DescriptorBytes) InvokeInst(*this);
+ }
+ return new(getNumOperands()) InvokeInst(*this);
+}
+
+ResumeInst *ResumeInst::cloneImpl() const { return new (1) ResumeInst(*this); }
+
+CleanupReturnInst *CleanupReturnInst::cloneImpl() const {
+ return new (getNumOperands()) CleanupReturnInst(*this);
+}
+
+CatchReturnInst *CatchReturnInst::cloneImpl() const {
+ return new (getNumOperands()) CatchReturnInst(*this);
+}
+
+CatchSwitchInst *CatchSwitchInst::cloneImpl() const {
+ return new CatchSwitchInst(*this);
+}
+
+FuncletPadInst *FuncletPadInst::cloneImpl() const {
+ return new (getNumOperands()) FuncletPadInst(*this);
+}
+
+UnreachableInst *UnreachableInst::cloneImpl() const {
+ LLVMContext &Context = getContext();
+ return new UnreachableInst(Context);
+}
diff --git a/contrib/llvm/lib/IR/IntrinsicInst.cpp b/contrib/llvm/lib/IR/IntrinsicInst.cpp
new file mode 100644
index 0000000..b9b5a29
--- /dev/null
+++ b/contrib/llvm/lib/IR/IntrinsicInst.cpp
@@ -0,0 +1,81 @@
+//===-- InstrinsicInst.cpp - Intrinsic Instruction Wrappers ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements methods that make it really easy to deal with intrinsic
+// functions.
+//
+// All intrinsic function calls are instances of the call instruction, so these
+// are all subclasses of the CallInst class. Note that none of these classes
+// has state or virtual methods, which is an important part of this gross/neat
+// hack working.
+//
+// In some cases, arguments to intrinsics need to be generic and are defined as
+// type pointer to empty struct { }*. To access the real item of interest the
+// cast instruction needs to be stripped away.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/Metadata.h"
+using namespace llvm;
+
+//===----------------------------------------------------------------------===//
+/// DbgInfoIntrinsic - This is the common base class for debug info intrinsics
+///
+
+static Value *CastOperand(Value *C) {
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
+ if (CE->isCast())
+ return CE->getOperand(0);
+ return nullptr;
+}
+
+Value *DbgInfoIntrinsic::StripCast(Value *C) {
+ if (Value *CO = CastOperand(C)) {
+ C = StripCast(CO);
+ } else if (GlobalVariable *GV = dyn_cast<GlobalVariable>(C)) {
+ if (GV->hasInitializer())
+ if (Value *CO = CastOperand(GV->getInitializer()))
+ C = StripCast(CO);
+ }
+ return dyn_cast<GlobalVariable>(C);
+}
+
+static Value *getValueImpl(Value *Op) {
+ auto *MD = cast<MetadataAsValue>(Op)->getMetadata();
+ if (auto *V = dyn_cast<ValueAsMetadata>(MD))
+ return V->getValue();
+
+ // When the value goes to null, it gets replaced by an empty MDNode.
+ assert(!cast<MDNode>(MD)->getNumOperands() && "Expected an empty MDNode");
+ return nullptr;
+}
+
+//===----------------------------------------------------------------------===//
+/// DbgDeclareInst - This represents the llvm.dbg.declare instruction.
+///
+
+Value *DbgDeclareInst::getAddress() const {
+ if (!getArgOperand(0))
+ return nullptr;
+
+ return getValueImpl(getArgOperand(0));
+}
+
+//===----------------------------------------------------------------------===//
+/// DbgValueInst - This represents the llvm.dbg.value instruction.
+///
+
+const Value *DbgValueInst::getValue() const {
+ return const_cast<DbgValueInst *>(this)->getValue();
+}
+
+Value *DbgValueInst::getValue() { return getValueImpl(getArgOperand(0)); }
diff --git a/contrib/llvm/lib/IR/LLVMContext.cpp b/contrib/llvm/lib/IR/LLVMContext.cpp
new file mode 100644
index 0000000..48b53b0
--- /dev/null
+++ b/contrib/llvm/lib/IR/LLVMContext.cpp
@@ -0,0 +1,322 @@
+//===-- LLVMContext.cpp - Implement LLVMContext ---------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements LLVMContext, as a wrapper around the opaque
+// class LLVMContextImpl.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/LLVMContext.h"
+#include "LLVMContextImpl.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DebugLoc.h"
+#include "llvm/IR/DiagnosticInfo.h"
+#include "llvm/IR/DiagnosticPrinter.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/SourceMgr.h"
+#include <cctype>
+using namespace llvm;
+
+static ManagedStatic<LLVMContext> GlobalContext;
+
+LLVMContext& llvm::getGlobalContext() {
+ return *GlobalContext;
+}
+
+LLVMContext::LLVMContext() : pImpl(new LLVMContextImpl(*this)) {
+ // Create the fixed metadata kinds. This is done in the same order as the
+ // MD_* enum values so that they correspond.
+
+ // Create the 'dbg' metadata kind.
+ unsigned DbgID = getMDKindID("dbg");
+ assert(DbgID == MD_dbg && "dbg kind id drifted"); (void)DbgID;
+
+ // Create the 'tbaa' metadata kind.
+ unsigned TBAAID = getMDKindID("tbaa");
+ assert(TBAAID == MD_tbaa && "tbaa kind id drifted"); (void)TBAAID;
+
+ // Create the 'prof' metadata kind.
+ unsigned ProfID = getMDKindID("prof");
+ assert(ProfID == MD_prof && "prof kind id drifted"); (void)ProfID;
+
+ // Create the 'fpmath' metadata kind.
+ unsigned FPAccuracyID = getMDKindID("fpmath");
+ assert(FPAccuracyID == MD_fpmath && "fpmath kind id drifted");
+ (void)FPAccuracyID;
+
+ // Create the 'range' metadata kind.
+ unsigned RangeID = getMDKindID("range");
+ assert(RangeID == MD_range && "range kind id drifted");
+ (void)RangeID;
+
+ // Create the 'tbaa.struct' metadata kind.
+ unsigned TBAAStructID = getMDKindID("tbaa.struct");
+ assert(TBAAStructID == MD_tbaa_struct && "tbaa.struct kind id drifted");
+ (void)TBAAStructID;
+
+ // Create the 'invariant.load' metadata kind.
+ unsigned InvariantLdId = getMDKindID("invariant.load");
+ assert(InvariantLdId == MD_invariant_load && "invariant.load kind id drifted");
+ (void)InvariantLdId;
+
+ // Create the 'alias.scope' metadata kind.
+ unsigned AliasScopeID = getMDKindID("alias.scope");
+ assert(AliasScopeID == MD_alias_scope && "alias.scope kind id drifted");
+ (void)AliasScopeID;
+
+ // Create the 'noalias' metadata kind.
+ unsigned NoAliasID = getMDKindID("noalias");
+ assert(NoAliasID == MD_noalias && "noalias kind id drifted");
+ (void)NoAliasID;
+
+ // Create the 'nontemporal' metadata kind.
+ unsigned NonTemporalID = getMDKindID("nontemporal");
+ assert(NonTemporalID == MD_nontemporal && "nontemporal kind id drifted");
+ (void)NonTemporalID;
+
+ // Create the 'llvm.mem.parallel_loop_access' metadata kind.
+ unsigned MemParallelLoopAccessID = getMDKindID("llvm.mem.parallel_loop_access");
+ assert(MemParallelLoopAccessID == MD_mem_parallel_loop_access &&
+ "mem_parallel_loop_access kind id drifted");
+ (void)MemParallelLoopAccessID;
+
+ // Create the 'nonnull' metadata kind.
+ unsigned NonNullID = getMDKindID("nonnull");
+ assert(NonNullID == MD_nonnull && "nonnull kind id drifted");
+ (void)NonNullID;
+
+ // Create the 'dereferenceable' metadata kind.
+ unsigned DereferenceableID = getMDKindID("dereferenceable");
+ assert(DereferenceableID == MD_dereferenceable &&
+ "dereferenceable kind id drifted");
+ (void)DereferenceableID;
+
+ // Create the 'dereferenceable_or_null' metadata kind.
+ unsigned DereferenceableOrNullID = getMDKindID("dereferenceable_or_null");
+ assert(DereferenceableOrNullID == MD_dereferenceable_or_null &&
+ "dereferenceable_or_null kind id drifted");
+ (void)DereferenceableOrNullID;
+
+ // Create the 'make.implicit' metadata kind.
+ unsigned MakeImplicitID = getMDKindID("make.implicit");
+ assert(MakeImplicitID == MD_make_implicit &&
+ "make.implicit kind id drifted");
+ (void)MakeImplicitID;
+
+ // Create the 'unpredictable' metadata kind.
+ unsigned UnpredictableID = getMDKindID("unpredictable");
+ assert(UnpredictableID == MD_unpredictable &&
+ "unpredictable kind id drifted");
+ (void)UnpredictableID;
+
+ // Create the 'invariant.group' metadata kind.
+ unsigned InvariantGroupId = getMDKindID("invariant.group");
+ assert(InvariantGroupId == MD_invariant_group &&
+ "invariant.group kind id drifted");
+ (void)InvariantGroupId;
+
+ // Create the 'align' metadata kind.
+ unsigned AlignID = getMDKindID("align");
+ assert(AlignID == MD_align && "align kind id drifted");
+ (void)AlignID;
+
+ auto *DeoptEntry = pImpl->getOrInsertBundleTag("deopt");
+ assert(DeoptEntry->second == LLVMContext::OB_deopt &&
+ "deopt operand bundle id drifted!");
+ (void)DeoptEntry;
+
+ auto *FuncletEntry = pImpl->getOrInsertBundleTag("funclet");
+ assert(FuncletEntry->second == LLVMContext::OB_funclet &&
+ "funclet operand bundle id drifted!");
+ (void)FuncletEntry;
+}
+LLVMContext::~LLVMContext() { delete pImpl; }
+
+void LLVMContext::addModule(Module *M) {
+ pImpl->OwnedModules.insert(M);
+}
+
+void LLVMContext::removeModule(Module *M) {
+ pImpl->OwnedModules.erase(M);
+}
+
+//===----------------------------------------------------------------------===//
+// Recoverable Backend Errors
+//===----------------------------------------------------------------------===//
+
+void LLVMContext::
+setInlineAsmDiagnosticHandler(InlineAsmDiagHandlerTy DiagHandler,
+ void *DiagContext) {
+ pImpl->InlineAsmDiagHandler = DiagHandler;
+ pImpl->InlineAsmDiagContext = DiagContext;
+}
+
+/// getInlineAsmDiagnosticHandler - Return the diagnostic handler set by
+/// setInlineAsmDiagnosticHandler.
+LLVMContext::InlineAsmDiagHandlerTy
+LLVMContext::getInlineAsmDiagnosticHandler() const {
+ return pImpl->InlineAsmDiagHandler;
+}
+
+/// getInlineAsmDiagnosticContext - Return the diagnostic context set by
+/// setInlineAsmDiagnosticHandler.
+void *LLVMContext::getInlineAsmDiagnosticContext() const {
+ return pImpl->InlineAsmDiagContext;
+}
+
+void LLVMContext::setDiagnosticHandler(DiagnosticHandlerTy DiagnosticHandler,
+ void *DiagnosticContext,
+ bool RespectFilters) {
+ pImpl->DiagnosticHandler = DiagnosticHandler;
+ pImpl->DiagnosticContext = DiagnosticContext;
+ pImpl->RespectDiagnosticFilters = RespectFilters;
+}
+
+LLVMContext::DiagnosticHandlerTy LLVMContext::getDiagnosticHandler() const {
+ return pImpl->DiagnosticHandler;
+}
+
+void *LLVMContext::getDiagnosticContext() const {
+ return pImpl->DiagnosticContext;
+}
+
+void LLVMContext::setYieldCallback(YieldCallbackTy Callback, void *OpaqueHandle)
+{
+ pImpl->YieldCallback = Callback;
+ pImpl->YieldOpaqueHandle = OpaqueHandle;
+}
+
+void LLVMContext::yield() {
+ if (pImpl->YieldCallback)
+ pImpl->YieldCallback(this, pImpl->YieldOpaqueHandle);
+}
+
+void LLVMContext::emitError(const Twine &ErrorStr) {
+ diagnose(DiagnosticInfoInlineAsm(ErrorStr));
+}
+
+void LLVMContext::emitError(const Instruction *I, const Twine &ErrorStr) {
+ assert (I && "Invalid instruction");
+ diagnose(DiagnosticInfoInlineAsm(*I, ErrorStr));
+}
+
+static bool isDiagnosticEnabled(const DiagnosticInfo &DI) {
+ // Optimization remarks are selective. They need to check whether the regexp
+ // pattern, passed via one of the -pass-remarks* flags, matches the name of
+ // the pass that is emitting the diagnostic. If there is no match, ignore the
+ // diagnostic and return.
+ switch (DI.getKind()) {
+ case llvm::DK_OptimizationRemark:
+ if (!cast<DiagnosticInfoOptimizationRemark>(DI).isEnabled())
+ return false;
+ break;
+ case llvm::DK_OptimizationRemarkMissed:
+ if (!cast<DiagnosticInfoOptimizationRemarkMissed>(DI).isEnabled())
+ return false;
+ break;
+ case llvm::DK_OptimizationRemarkAnalysis:
+ if (!cast<DiagnosticInfoOptimizationRemarkAnalysis>(DI).isEnabled())
+ return false;
+ break;
+ case llvm::DK_OptimizationRemarkAnalysisFPCommute:
+ if (!cast<DiagnosticInfoOptimizationRemarkAnalysisFPCommute>(DI)
+ .isEnabled())
+ return false;
+ break;
+ default:
+ break;
+ }
+ return true;
+}
+
+static const char *getDiagnosticMessagePrefix(DiagnosticSeverity Severity) {
+ switch (Severity) {
+ case DS_Error:
+ return "error";
+ case DS_Warning:
+ return "warning";
+ case DS_Remark:
+ return "remark";
+ case DS_Note:
+ return "note";
+ }
+ llvm_unreachable("Unknown DiagnosticSeverity");
+}
+
+void LLVMContext::diagnose(const DiagnosticInfo &DI) {
+ // If there is a report handler, use it.
+ if (pImpl->DiagnosticHandler) {
+ if (!pImpl->RespectDiagnosticFilters || isDiagnosticEnabled(DI))
+ pImpl->DiagnosticHandler(DI, pImpl->DiagnosticContext);
+ return;
+ }
+
+ if (!isDiagnosticEnabled(DI))
+ return;
+
+ // Otherwise, print the message with a prefix based on the severity.
+ DiagnosticPrinterRawOStream DP(errs());
+ errs() << getDiagnosticMessagePrefix(DI.getSeverity()) << ": ";
+ DI.print(DP);
+ errs() << "\n";
+ if (DI.getSeverity() == DS_Error)
+ exit(1);
+}
+
+void LLVMContext::emitError(unsigned LocCookie, const Twine &ErrorStr) {
+ diagnose(DiagnosticInfoInlineAsm(LocCookie, ErrorStr));
+}
+
+//===----------------------------------------------------------------------===//
+// Metadata Kind Uniquing
+//===----------------------------------------------------------------------===//
+
+/// Return a unique non-zero ID for the specified metadata kind.
+unsigned LLVMContext::getMDKindID(StringRef Name) const {
+ // If this is new, assign it its ID.
+ return pImpl->CustomMDKindNames.insert(
+ std::make_pair(
+ Name, pImpl->CustomMDKindNames.size()))
+ .first->second;
+}
+
+/// getHandlerNames - Populate client-supplied smallvector using custom
+/// metadata name and ID.
+void LLVMContext::getMDKindNames(SmallVectorImpl<StringRef> &Names) const {
+ Names.resize(pImpl->CustomMDKindNames.size());
+ for (StringMap<unsigned>::const_iterator I = pImpl->CustomMDKindNames.begin(),
+ E = pImpl->CustomMDKindNames.end(); I != E; ++I)
+ Names[I->second] = I->first();
+}
+
+void LLVMContext::getOperandBundleTags(SmallVectorImpl<StringRef> &Tags) const {
+ pImpl->getOperandBundleTags(Tags);
+}
+
+uint32_t LLVMContext::getOperandBundleTagID(StringRef Tag) const {
+ return pImpl->getOperandBundleTagID(Tag);
+}
+
+void LLVMContext::setGC(const Function &Fn, std::string GCName) {
+ auto It = pImpl->GCNames.find(&Fn);
+
+ if (It == pImpl->GCNames.end()) {
+ pImpl->GCNames.insert(std::make_pair(&Fn, std::move(GCName)));
+ return;
+ }
+ It->second = std::move(GCName);
+}
+const std::string &LLVMContext::getGC(const Function &Fn) {
+ return pImpl->GCNames[&Fn];
+}
+void LLVMContext::deleteGC(const Function &Fn) {
+ pImpl->GCNames.erase(&Fn);
+}
diff --git a/contrib/llvm/lib/IR/LLVMContextImpl.cpp b/contrib/llvm/lib/IR/LLVMContextImpl.cpp
new file mode 100644
index 0000000..5239b4f
--- /dev/null
+++ b/contrib/llvm/lib/IR/LLVMContextImpl.cpp
@@ -0,0 +1,259 @@
+//===-- LLVMContextImpl.cpp - Implement LLVMContextImpl -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the opaque LLVMContextImpl.
+//
+//===----------------------------------------------------------------------===//
+
+#include "LLVMContextImpl.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/DiagnosticInfo.h"
+#include "llvm/IR/Module.h"
+#include <algorithm>
+using namespace llvm;
+
+LLVMContextImpl::LLVMContextImpl(LLVMContext &C)
+ : TheTrueVal(nullptr), TheFalseVal(nullptr),
+ VoidTy(C, Type::VoidTyID),
+ LabelTy(C, Type::LabelTyID),
+ HalfTy(C, Type::HalfTyID),
+ FloatTy(C, Type::FloatTyID),
+ DoubleTy(C, Type::DoubleTyID),
+ MetadataTy(C, Type::MetadataTyID),
+ TokenTy(C, Type::TokenTyID),
+ X86_FP80Ty(C, Type::X86_FP80TyID),
+ FP128Ty(C, Type::FP128TyID),
+ PPC_FP128Ty(C, Type::PPC_FP128TyID),
+ X86_MMXTy(C, Type::X86_MMXTyID),
+ Int1Ty(C, 1),
+ Int8Ty(C, 8),
+ Int16Ty(C, 16),
+ Int32Ty(C, 32),
+ Int64Ty(C, 64),
+ Int128Ty(C, 128) {
+ InlineAsmDiagHandler = nullptr;
+ InlineAsmDiagContext = nullptr;
+ DiagnosticHandler = nullptr;
+ DiagnosticContext = nullptr;
+ RespectDiagnosticFilters = false;
+ YieldCallback = nullptr;
+ YieldOpaqueHandle = nullptr;
+ NamedStructTypesUniqueID = 0;
+}
+
+namespace {
+struct DropReferences {
+ // Takes the value_type of a ConstantUniqueMap's internal map, whose 'second'
+ // is a Constant*.
+ template <typename PairT> void operator()(const PairT &P) {
+ P.second->dropAllReferences();
+ }
+};
+
+// Temporary - drops pair.first instead of second.
+struct DropFirst {
+ // Takes the value_type of a ConstantUniqueMap's internal map, whose 'second'
+ // is a Constant*.
+ template<typename PairT>
+ void operator()(const PairT &P) {
+ P.first->dropAllReferences();
+ }
+};
+}
+
+LLVMContextImpl::~LLVMContextImpl() {
+ // NOTE: We need to delete the contents of OwnedModules, but Module's dtor
+ // will call LLVMContextImpl::removeModule, thus invalidating iterators into
+ // the container. Avoid iterators during this operation:
+ while (!OwnedModules.empty())
+ delete *OwnedModules.begin();
+
+ // Drop references for MDNodes. Do this before Values get deleted to avoid
+ // unnecessary RAUW when nodes are still unresolved.
+ for (auto *I : DistinctMDNodes)
+ I->dropAllReferences();
+#define HANDLE_MDNODE_LEAF_UNIQUABLE(CLASS) \
+ for (auto *I : CLASS##s) \
+ I->dropAllReferences();
+#include "llvm/IR/Metadata.def"
+
+ // Also drop references that come from the Value bridges.
+ for (auto &Pair : ValuesAsMetadata)
+ Pair.second->dropUsers();
+ for (auto &Pair : MetadataAsValues)
+ Pair.second->dropUse();
+
+ // Destroy MDNodes.
+ for (MDNode *I : DistinctMDNodes)
+ I->deleteAsSubclass();
+#define HANDLE_MDNODE_LEAF_UNIQUABLE(CLASS) \
+ for (CLASS * I : CLASS##s) \
+ delete I;
+#include "llvm/IR/Metadata.def"
+
+ // Free the constants.
+ std::for_each(ExprConstants.map_begin(), ExprConstants.map_end(),
+ DropFirst());
+ std::for_each(ArrayConstants.map_begin(), ArrayConstants.map_end(),
+ DropFirst());
+ std::for_each(StructConstants.map_begin(), StructConstants.map_end(),
+ DropFirst());
+ std::for_each(VectorConstants.map_begin(), VectorConstants.map_end(),
+ DropFirst());
+ ExprConstants.freeConstants();
+ ArrayConstants.freeConstants();
+ StructConstants.freeConstants();
+ VectorConstants.freeConstants();
+ DeleteContainerSeconds(CAZConstants);
+ DeleteContainerSeconds(CPNConstants);
+ DeleteContainerSeconds(UVConstants);
+ InlineAsms.freeConstants();
+ DeleteContainerSeconds(IntConstants);
+ DeleteContainerSeconds(FPConstants);
+
+ for (StringMap<ConstantDataSequential*>::iterator I = CDSConstants.begin(),
+ E = CDSConstants.end(); I != E; ++I)
+ delete I->second;
+ CDSConstants.clear();
+
+ // Destroy attributes.
+ for (FoldingSetIterator<AttributeImpl> I = AttrsSet.begin(),
+ E = AttrsSet.end(); I != E; ) {
+ FoldingSetIterator<AttributeImpl> Elem = I++;
+ delete &*Elem;
+ }
+
+ // Destroy attribute lists.
+ for (FoldingSetIterator<AttributeSetImpl> I = AttrsLists.begin(),
+ E = AttrsLists.end(); I != E; ) {
+ FoldingSetIterator<AttributeSetImpl> Elem = I++;
+ delete &*Elem;
+ }
+
+ // Destroy attribute node lists.
+ for (FoldingSetIterator<AttributeSetNode> I = AttrsSetNodes.begin(),
+ E = AttrsSetNodes.end(); I != E; ) {
+ FoldingSetIterator<AttributeSetNode> Elem = I++;
+ delete &*Elem;
+ }
+
+ // Destroy MetadataAsValues.
+ {
+ SmallVector<MetadataAsValue *, 8> MDVs;
+ MDVs.reserve(MetadataAsValues.size());
+ for (auto &Pair : MetadataAsValues)
+ MDVs.push_back(Pair.second);
+ MetadataAsValues.clear();
+ for (auto *V : MDVs)
+ delete V;
+ }
+
+ // Destroy ValuesAsMetadata.
+ for (auto &Pair : ValuesAsMetadata)
+ delete Pair.second;
+
+ // Destroy MDStrings.
+ MDStringCache.clear();
+}
+
+void LLVMContextImpl::dropTriviallyDeadConstantArrays() {
+ bool Changed;
+ do {
+ Changed = false;
+
+ for (auto I = ArrayConstants.map_begin(), E = ArrayConstants.map_end();
+ I != E; ) {
+ auto *C = I->first;
+ I++;
+ if (C->use_empty()) {
+ Changed = true;
+ C->destroyConstant();
+ }
+ }
+
+ } while (Changed);
+}
+
+void Module::dropTriviallyDeadConstantArrays() {
+ Context.pImpl->dropTriviallyDeadConstantArrays();
+}
+
+namespace llvm {
+/// \brief Make MDOperand transparent for hashing.
+///
+/// This overload of an implementation detail of the hashing library makes
+/// MDOperand hash to the same value as a \a Metadata pointer.
+///
+/// Note that overloading \a hash_value() as follows:
+///
+/// \code
+/// size_t hash_value(const MDOperand &X) { return hash_value(X.get()); }
+/// \endcode
+///
+/// does not cause MDOperand to be transparent. In particular, a bare pointer
+/// doesn't get hashed before it's combined, whereas \a MDOperand would.
+static const Metadata *get_hashable_data(const MDOperand &X) { return X.get(); }
+}
+
+unsigned MDNodeOpsKey::calculateHash(MDNode *N, unsigned Offset) {
+ unsigned Hash = hash_combine_range(N->op_begin() + Offset, N->op_end());
+#ifndef NDEBUG
+ {
+ SmallVector<Metadata *, 8> MDs(N->op_begin() + Offset, N->op_end());
+ unsigned RawHash = calculateHash(MDs);
+ assert(Hash == RawHash &&
+ "Expected hash of MDOperand to equal hash of Metadata*");
+ }
+#endif
+ return Hash;
+}
+
+unsigned MDNodeOpsKey::calculateHash(ArrayRef<Metadata *> Ops) {
+ return hash_combine_range(Ops.begin(), Ops.end());
+}
+
+StringMapEntry<uint32_t> *LLVMContextImpl::getOrInsertBundleTag(StringRef Tag) {
+ uint32_t NewIdx = BundleTagCache.size();
+ return &*(BundleTagCache.insert(std::make_pair(Tag, NewIdx)).first);
+}
+
+void LLVMContextImpl::getOperandBundleTags(SmallVectorImpl<StringRef> &Tags) const {
+ Tags.resize(BundleTagCache.size());
+ for (const auto &T : BundleTagCache)
+ Tags[T.second] = T.first();
+}
+
+uint32_t LLVMContextImpl::getOperandBundleTagID(StringRef Tag) const {
+ auto I = BundleTagCache.find(Tag);
+ assert(I != BundleTagCache.end() && "Unknown tag!");
+ return I->second;
+}
+
+// ConstantsContext anchors
+void UnaryConstantExpr::anchor() { }
+
+void BinaryConstantExpr::anchor() { }
+
+void SelectConstantExpr::anchor() { }
+
+void ExtractElementConstantExpr::anchor() { }
+
+void InsertElementConstantExpr::anchor() { }
+
+void ShuffleVectorConstantExpr::anchor() { }
+
+void ExtractValueConstantExpr::anchor() { }
+
+void InsertValueConstantExpr::anchor() { }
+
+void GetElementPtrConstantExpr::anchor() { }
+
+void CompareConstantExpr::anchor() { }
+
diff --git a/contrib/llvm/lib/IR/LLVMContextImpl.h b/contrib/llvm/lib/IR/LLVMContextImpl.h
new file mode 100644
index 0000000..d42047d
--- /dev/null
+++ b/contrib/llvm/lib/IR/LLVMContextImpl.h
@@ -0,0 +1,1046 @@
+//===-- LLVMContextImpl.h - The LLVMContextImpl opaque class ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares LLVMContextImpl, the opaque implementation
+// of LLVMContext.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_IR_LLVMCONTEXTIMPL_H
+#define LLVM_LIB_IR_LLVMCONTEXTIMPL_H
+
+#include "AttributeImpl.h"
+#include "ConstantsContext.h"
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/Hashing.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DebugInfoMetadata.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/IR/ValueHandle.h"
+#include <vector>
+
+namespace llvm {
+
+class ConstantInt;
+class ConstantFP;
+class DiagnosticInfoOptimizationRemark;
+class DiagnosticInfoOptimizationRemarkMissed;
+class DiagnosticInfoOptimizationRemarkAnalysis;
+class GCStrategy;
+class LLVMContext;
+class Type;
+class Value;
+
+struct DenseMapAPIntKeyInfo {
+ static inline APInt getEmptyKey() {
+ APInt V(nullptr, 0);
+ V.VAL = 0;
+ return V;
+ }
+ static inline APInt getTombstoneKey() {
+ APInt V(nullptr, 0);
+ V.VAL = 1;
+ return V;
+ }
+ static unsigned getHashValue(const APInt &Key) {
+ return static_cast<unsigned>(hash_value(Key));
+ }
+ static bool isEqual(const APInt &LHS, const APInt &RHS) {
+ return LHS.getBitWidth() == RHS.getBitWidth() && LHS == RHS;
+ }
+};
+
+struct DenseMapAPFloatKeyInfo {
+ static inline APFloat getEmptyKey() { return APFloat(APFloat::Bogus, 1); }
+ static inline APFloat getTombstoneKey() { return APFloat(APFloat::Bogus, 2); }
+ static unsigned getHashValue(const APFloat &Key) {
+ return static_cast<unsigned>(hash_value(Key));
+ }
+ static bool isEqual(const APFloat &LHS, const APFloat &RHS) {
+ return LHS.bitwiseIsEqual(RHS);
+ }
+};
+
+struct AnonStructTypeKeyInfo {
+ struct KeyTy {
+ ArrayRef<Type*> ETypes;
+ bool isPacked;
+ KeyTy(const ArrayRef<Type*>& E, bool P) :
+ ETypes(E), isPacked(P) {}
+ KeyTy(const StructType *ST)
+ : ETypes(ST->elements()), isPacked(ST->isPacked()) {}
+ bool operator==(const KeyTy& that) const {
+ if (isPacked != that.isPacked)
+ return false;
+ if (ETypes != that.ETypes)
+ return false;
+ return true;
+ }
+ bool operator!=(const KeyTy& that) const {
+ return !this->operator==(that);
+ }
+ };
+ static inline StructType* getEmptyKey() {
+ return DenseMapInfo<StructType*>::getEmptyKey();
+ }
+ static inline StructType* getTombstoneKey() {
+ return DenseMapInfo<StructType*>::getTombstoneKey();
+ }
+ static unsigned getHashValue(const KeyTy& Key) {
+ return hash_combine(hash_combine_range(Key.ETypes.begin(),
+ Key.ETypes.end()),
+ Key.isPacked);
+ }
+ static unsigned getHashValue(const StructType *ST) {
+ return getHashValue(KeyTy(ST));
+ }
+ static bool isEqual(const KeyTy& LHS, const StructType *RHS) {
+ if (RHS == getEmptyKey() || RHS == getTombstoneKey())
+ return false;
+ return LHS == KeyTy(RHS);
+ }
+ static bool isEqual(const StructType *LHS, const StructType *RHS) {
+ return LHS == RHS;
+ }
+};
+
+struct FunctionTypeKeyInfo {
+ struct KeyTy {
+ const Type *ReturnType;
+ ArrayRef<Type*> Params;
+ bool isVarArg;
+ KeyTy(const Type* R, const ArrayRef<Type*>& P, bool V) :
+ ReturnType(R), Params(P), isVarArg(V) {}
+ KeyTy(const FunctionType *FT)
+ : ReturnType(FT->getReturnType()), Params(FT->params()),
+ isVarArg(FT->isVarArg()) {}
+ bool operator==(const KeyTy& that) const {
+ if (ReturnType != that.ReturnType)
+ return false;
+ if (isVarArg != that.isVarArg)
+ return false;
+ if (Params != that.Params)
+ return false;
+ return true;
+ }
+ bool operator!=(const KeyTy& that) const {
+ return !this->operator==(that);
+ }
+ };
+ static inline FunctionType* getEmptyKey() {
+ return DenseMapInfo<FunctionType*>::getEmptyKey();
+ }
+ static inline FunctionType* getTombstoneKey() {
+ return DenseMapInfo<FunctionType*>::getTombstoneKey();
+ }
+ static unsigned getHashValue(const KeyTy& Key) {
+ return hash_combine(Key.ReturnType,
+ hash_combine_range(Key.Params.begin(),
+ Key.Params.end()),
+ Key.isVarArg);
+ }
+ static unsigned getHashValue(const FunctionType *FT) {
+ return getHashValue(KeyTy(FT));
+ }
+ static bool isEqual(const KeyTy& LHS, const FunctionType *RHS) {
+ if (RHS == getEmptyKey() || RHS == getTombstoneKey())
+ return false;
+ return LHS == KeyTy(RHS);
+ }
+ static bool isEqual(const FunctionType *LHS, const FunctionType *RHS) {
+ return LHS == RHS;
+ }
+};
+
+/// \brief Structure for hashing arbitrary MDNode operands.
+class MDNodeOpsKey {
+ ArrayRef<Metadata *> RawOps;
+ ArrayRef<MDOperand> Ops;
+
+ unsigned Hash;
+
+protected:
+ MDNodeOpsKey(ArrayRef<Metadata *> Ops)
+ : RawOps(Ops), Hash(calculateHash(Ops)) {}
+
+ template <class NodeTy>
+ MDNodeOpsKey(const NodeTy *N, unsigned Offset = 0)
+ : Ops(N->op_begin() + Offset, N->op_end()), Hash(N->getHash()) {}
+
+ template <class NodeTy>
+ bool compareOps(const NodeTy *RHS, unsigned Offset = 0) const {
+ if (getHash() != RHS->getHash())
+ return false;
+
+ assert((RawOps.empty() || Ops.empty()) && "Two sets of operands?");
+ return RawOps.empty() ? compareOps(Ops, RHS, Offset)
+ : compareOps(RawOps, RHS, Offset);
+ }
+
+ static unsigned calculateHash(MDNode *N, unsigned Offset = 0);
+
+private:
+ template <class T>
+ static bool compareOps(ArrayRef<T> Ops, const MDNode *RHS, unsigned Offset) {
+ if (Ops.size() != RHS->getNumOperands() - Offset)
+ return false;
+ return std::equal(Ops.begin(), Ops.end(), RHS->op_begin() + Offset);
+ }
+
+ static unsigned calculateHash(ArrayRef<Metadata *> Ops);
+
+public:
+ unsigned getHash() const { return Hash; }
+};
+
+template <class NodeTy> struct MDNodeKeyImpl;
+template <class NodeTy> struct MDNodeInfo;
+
+/// \brief DenseMapInfo for MDTuple.
+///
+/// Note that we don't need the is-function-local bit, since that's implicit in
+/// the operands.
+template <> struct MDNodeKeyImpl<MDTuple> : MDNodeOpsKey {
+ MDNodeKeyImpl(ArrayRef<Metadata *> Ops) : MDNodeOpsKey(Ops) {}
+ MDNodeKeyImpl(const MDTuple *N) : MDNodeOpsKey(N) {}
+
+ bool isKeyOf(const MDTuple *RHS) const { return compareOps(RHS); }
+
+ unsigned getHashValue() const { return getHash(); }
+
+ static unsigned calculateHash(MDTuple *N) {
+ return MDNodeOpsKey::calculateHash(N);
+ }
+};
+
+/// \brief DenseMapInfo for DILocation.
+template <> struct MDNodeKeyImpl<DILocation> {
+ unsigned Line;
+ unsigned Column;
+ Metadata *Scope;
+ Metadata *InlinedAt;
+
+ MDNodeKeyImpl(unsigned Line, unsigned Column, Metadata *Scope,
+ Metadata *InlinedAt)
+ : Line(Line), Column(Column), Scope(Scope), InlinedAt(InlinedAt) {}
+
+ MDNodeKeyImpl(const DILocation *L)
+ : Line(L->getLine()), Column(L->getColumn()), Scope(L->getRawScope()),
+ InlinedAt(L->getRawInlinedAt()) {}
+
+ bool isKeyOf(const DILocation *RHS) const {
+ return Line == RHS->getLine() && Column == RHS->getColumn() &&
+ Scope == RHS->getRawScope() && InlinedAt == RHS->getRawInlinedAt();
+ }
+ unsigned getHashValue() const {
+ return hash_combine(Line, Column, Scope, InlinedAt);
+ }
+};
+
+/// \brief DenseMapInfo for GenericDINode.
+template <> struct MDNodeKeyImpl<GenericDINode> : MDNodeOpsKey {
+ unsigned Tag;
+ StringRef Header;
+ MDNodeKeyImpl(unsigned Tag, StringRef Header, ArrayRef<Metadata *> DwarfOps)
+ : MDNodeOpsKey(DwarfOps), Tag(Tag), Header(Header) {}
+ MDNodeKeyImpl(const GenericDINode *N)
+ : MDNodeOpsKey(N, 1), Tag(N->getTag()), Header(N->getHeader()) {}
+
+ bool isKeyOf(const GenericDINode *RHS) const {
+ return Tag == RHS->getTag() && Header == RHS->getHeader() &&
+ compareOps(RHS, 1);
+ }
+
+ unsigned getHashValue() const { return hash_combine(getHash(), Tag, Header); }
+
+ static unsigned calculateHash(GenericDINode *N) {
+ return MDNodeOpsKey::calculateHash(N, 1);
+ }
+};
+
+template <> struct MDNodeKeyImpl<DISubrange> {
+ int64_t Count;
+ int64_t LowerBound;
+
+ MDNodeKeyImpl(int64_t Count, int64_t LowerBound)
+ : Count(Count), LowerBound(LowerBound) {}
+ MDNodeKeyImpl(const DISubrange *N)
+ : Count(N->getCount()), LowerBound(N->getLowerBound()) {}
+
+ bool isKeyOf(const DISubrange *RHS) const {
+ return Count == RHS->getCount() && LowerBound == RHS->getLowerBound();
+ }
+ unsigned getHashValue() const { return hash_combine(Count, LowerBound); }
+};
+
+template <> struct MDNodeKeyImpl<DIEnumerator> {
+ int64_t Value;
+ StringRef Name;
+
+ MDNodeKeyImpl(int64_t Value, StringRef Name) : Value(Value), Name(Name) {}
+ MDNodeKeyImpl(const DIEnumerator *N)
+ : Value(N->getValue()), Name(N->getName()) {}
+
+ bool isKeyOf(const DIEnumerator *RHS) const {
+ return Value == RHS->getValue() && Name == RHS->getName();
+ }
+ unsigned getHashValue() const { return hash_combine(Value, Name); }
+};
+
+template <> struct MDNodeKeyImpl<DIBasicType> {
+ unsigned Tag;
+ StringRef Name;
+ uint64_t SizeInBits;
+ uint64_t AlignInBits;
+ unsigned Encoding;
+
+ MDNodeKeyImpl(unsigned Tag, StringRef Name, uint64_t SizeInBits,
+ uint64_t AlignInBits, unsigned Encoding)
+ : Tag(Tag), Name(Name), SizeInBits(SizeInBits), AlignInBits(AlignInBits),
+ Encoding(Encoding) {}
+ MDNodeKeyImpl(const DIBasicType *N)
+ : Tag(N->getTag()), Name(N->getName()), SizeInBits(N->getSizeInBits()),
+ AlignInBits(N->getAlignInBits()), Encoding(N->getEncoding()) {}
+
+ bool isKeyOf(const DIBasicType *RHS) const {
+ return Tag == RHS->getTag() && Name == RHS->getName() &&
+ SizeInBits == RHS->getSizeInBits() &&
+ AlignInBits == RHS->getAlignInBits() &&
+ Encoding == RHS->getEncoding();
+ }
+ unsigned getHashValue() const {
+ return hash_combine(Tag, Name, SizeInBits, AlignInBits, Encoding);
+ }
+};
+
+template <> struct MDNodeKeyImpl<DIDerivedType> {
+ unsigned Tag;
+ StringRef Name;
+ Metadata *File;
+ unsigned Line;
+ Metadata *Scope;
+ Metadata *BaseType;
+ uint64_t SizeInBits;
+ uint64_t AlignInBits;
+ uint64_t OffsetInBits;
+ unsigned Flags;
+ Metadata *ExtraData;
+
+ MDNodeKeyImpl(unsigned Tag, StringRef Name, Metadata *File, unsigned Line,
+ Metadata *Scope, Metadata *BaseType, uint64_t SizeInBits,
+ uint64_t AlignInBits, uint64_t OffsetInBits, unsigned Flags,
+ Metadata *ExtraData)
+ : Tag(Tag), Name(Name), File(File), Line(Line), Scope(Scope),
+ BaseType(BaseType), SizeInBits(SizeInBits), AlignInBits(AlignInBits),
+ OffsetInBits(OffsetInBits), Flags(Flags), ExtraData(ExtraData) {}
+ MDNodeKeyImpl(const DIDerivedType *N)
+ : Tag(N->getTag()), Name(N->getName()), File(N->getRawFile()),
+ Line(N->getLine()), Scope(N->getRawScope()),
+ BaseType(N->getRawBaseType()), SizeInBits(N->getSizeInBits()),
+ AlignInBits(N->getAlignInBits()), OffsetInBits(N->getOffsetInBits()),
+ Flags(N->getFlags()), ExtraData(N->getRawExtraData()) {}
+
+ bool isKeyOf(const DIDerivedType *RHS) const {
+ return Tag == RHS->getTag() && Name == RHS->getName() &&
+ File == RHS->getRawFile() && Line == RHS->getLine() &&
+ Scope == RHS->getRawScope() && BaseType == RHS->getRawBaseType() &&
+ SizeInBits == RHS->getSizeInBits() &&
+ AlignInBits == RHS->getAlignInBits() &&
+ OffsetInBits == RHS->getOffsetInBits() && Flags == RHS->getFlags() &&
+ ExtraData == RHS->getRawExtraData();
+ }
+ unsigned getHashValue() const {
+ return hash_combine(Tag, Name, File, Line, Scope, BaseType, SizeInBits,
+ AlignInBits, OffsetInBits, Flags, ExtraData);
+ }
+};
+
+template <> struct MDNodeKeyImpl<DICompositeType> {
+ unsigned Tag;
+ StringRef Name;
+ Metadata *File;
+ unsigned Line;
+ Metadata *Scope;
+ Metadata *BaseType;
+ uint64_t SizeInBits;
+ uint64_t AlignInBits;
+ uint64_t OffsetInBits;
+ unsigned Flags;
+ Metadata *Elements;
+ unsigned RuntimeLang;
+ Metadata *VTableHolder;
+ Metadata *TemplateParams;
+ StringRef Identifier;
+
+ MDNodeKeyImpl(unsigned Tag, StringRef Name, Metadata *File, unsigned Line,
+ Metadata *Scope, Metadata *BaseType, uint64_t SizeInBits,
+ uint64_t AlignInBits, uint64_t OffsetInBits, unsigned Flags,
+ Metadata *Elements, unsigned RuntimeLang,
+ Metadata *VTableHolder, Metadata *TemplateParams,
+ StringRef Identifier)
+ : Tag(Tag), Name(Name), File(File), Line(Line), Scope(Scope),
+ BaseType(BaseType), SizeInBits(SizeInBits), AlignInBits(AlignInBits),
+ OffsetInBits(OffsetInBits), Flags(Flags), Elements(Elements),
+ RuntimeLang(RuntimeLang), VTableHolder(VTableHolder),
+ TemplateParams(TemplateParams), Identifier(Identifier) {}
+ MDNodeKeyImpl(const DICompositeType *N)
+ : Tag(N->getTag()), Name(N->getName()), File(N->getRawFile()),
+ Line(N->getLine()), Scope(N->getRawScope()),
+ BaseType(N->getRawBaseType()), SizeInBits(N->getSizeInBits()),
+ AlignInBits(N->getAlignInBits()), OffsetInBits(N->getOffsetInBits()),
+ Flags(N->getFlags()), Elements(N->getRawElements()),
+ RuntimeLang(N->getRuntimeLang()), VTableHolder(N->getRawVTableHolder()),
+ TemplateParams(N->getRawTemplateParams()),
+ Identifier(N->getIdentifier()) {}
+
+ bool isKeyOf(const DICompositeType *RHS) const {
+ return Tag == RHS->getTag() && Name == RHS->getName() &&
+ File == RHS->getRawFile() && Line == RHS->getLine() &&
+ Scope == RHS->getRawScope() && BaseType == RHS->getRawBaseType() &&
+ SizeInBits == RHS->getSizeInBits() &&
+ AlignInBits == RHS->getAlignInBits() &&
+ OffsetInBits == RHS->getOffsetInBits() && Flags == RHS->getFlags() &&
+ Elements == RHS->getRawElements() &&
+ RuntimeLang == RHS->getRuntimeLang() &&
+ VTableHolder == RHS->getRawVTableHolder() &&
+ TemplateParams == RHS->getRawTemplateParams() &&
+ Identifier == RHS->getIdentifier();
+ }
+ unsigned getHashValue() const {
+ return hash_combine(Tag, Name, File, Line, Scope, BaseType, SizeInBits,
+ AlignInBits, OffsetInBits, Flags, Elements, RuntimeLang,
+ VTableHolder, TemplateParams, Identifier);
+ }
+};
+
+template <> struct MDNodeKeyImpl<DISubroutineType> {
+ unsigned Flags;
+ Metadata *TypeArray;
+
+ MDNodeKeyImpl(int64_t Flags, Metadata *TypeArray)
+ : Flags(Flags), TypeArray(TypeArray) {}
+ MDNodeKeyImpl(const DISubroutineType *N)
+ : Flags(N->getFlags()), TypeArray(N->getRawTypeArray()) {}
+
+ bool isKeyOf(const DISubroutineType *RHS) const {
+ return Flags == RHS->getFlags() && TypeArray == RHS->getRawTypeArray();
+ }
+ unsigned getHashValue() const { return hash_combine(Flags, TypeArray); }
+};
+
+template <> struct MDNodeKeyImpl<DIFile> {
+ StringRef Filename;
+ StringRef Directory;
+
+ MDNodeKeyImpl(StringRef Filename, StringRef Directory)
+ : Filename(Filename), Directory(Directory) {}
+ MDNodeKeyImpl(const DIFile *N)
+ : Filename(N->getFilename()), Directory(N->getDirectory()) {}
+
+ bool isKeyOf(const DIFile *RHS) const {
+ return Filename == RHS->getFilename() && Directory == RHS->getDirectory();
+ }
+ unsigned getHashValue() const { return hash_combine(Filename, Directory); }
+};
+
+template <> struct MDNodeKeyImpl<DISubprogram> {
+ Metadata *Scope;
+ StringRef Name;
+ StringRef LinkageName;
+ Metadata *File;
+ unsigned Line;
+ Metadata *Type;
+ bool IsLocalToUnit;
+ bool IsDefinition;
+ unsigned ScopeLine;
+ Metadata *ContainingType;
+ unsigned Virtuality;
+ unsigned VirtualIndex;
+ unsigned Flags;
+ bool IsOptimized;
+ Metadata *TemplateParams;
+ Metadata *Declaration;
+ Metadata *Variables;
+
+ MDNodeKeyImpl(Metadata *Scope, StringRef Name, StringRef LinkageName,
+ Metadata *File, unsigned Line, Metadata *Type,
+ bool IsLocalToUnit, bool IsDefinition, unsigned ScopeLine,
+ Metadata *ContainingType, unsigned Virtuality,
+ unsigned VirtualIndex, unsigned Flags, bool IsOptimized,
+ Metadata *TemplateParams, Metadata *Declaration,
+ Metadata *Variables)
+ : Scope(Scope), Name(Name), LinkageName(LinkageName), File(File),
+ Line(Line), Type(Type), IsLocalToUnit(IsLocalToUnit),
+ IsDefinition(IsDefinition), ScopeLine(ScopeLine),
+ ContainingType(ContainingType), Virtuality(Virtuality),
+ VirtualIndex(VirtualIndex), Flags(Flags), IsOptimized(IsOptimized),
+ TemplateParams(TemplateParams), Declaration(Declaration),
+ Variables(Variables) {}
+ MDNodeKeyImpl(const DISubprogram *N)
+ : Scope(N->getRawScope()), Name(N->getName()),
+ LinkageName(N->getLinkageName()), File(N->getRawFile()),
+ Line(N->getLine()), Type(N->getRawType()),
+ IsLocalToUnit(N->isLocalToUnit()), IsDefinition(N->isDefinition()),
+ ScopeLine(N->getScopeLine()), ContainingType(N->getRawContainingType()),
+ Virtuality(N->getVirtuality()), VirtualIndex(N->getVirtualIndex()),
+ Flags(N->getFlags()), IsOptimized(N->isOptimized()),
+ TemplateParams(N->getRawTemplateParams()),
+ Declaration(N->getRawDeclaration()), Variables(N->getRawVariables()) {}
+
+ bool isKeyOf(const DISubprogram *RHS) const {
+ return Scope == RHS->getRawScope() && Name == RHS->getName() &&
+ LinkageName == RHS->getLinkageName() && File == RHS->getRawFile() &&
+ Line == RHS->getLine() && Type == RHS->getRawType() &&
+ IsLocalToUnit == RHS->isLocalToUnit() &&
+ IsDefinition == RHS->isDefinition() &&
+ ScopeLine == RHS->getScopeLine() &&
+ ContainingType == RHS->getRawContainingType() &&
+ Virtuality == RHS->getVirtuality() &&
+ VirtualIndex == RHS->getVirtualIndex() && Flags == RHS->getFlags() &&
+ IsOptimized == RHS->isOptimized() &&
+ TemplateParams == RHS->getRawTemplateParams() &&
+ Declaration == RHS->getRawDeclaration() &&
+ Variables == RHS->getRawVariables();
+ }
+ unsigned getHashValue() const {
+ return hash_combine(Scope, Name, LinkageName, File, Line, Type,
+ IsLocalToUnit, IsDefinition, ScopeLine, ContainingType,
+ Virtuality, VirtualIndex, Flags, IsOptimized,
+ TemplateParams, Declaration, Variables);
+ }
+};
+
+template <> struct MDNodeKeyImpl<DILexicalBlock> {
+ Metadata *Scope;
+ Metadata *File;
+ unsigned Line;
+ unsigned Column;
+
+ MDNodeKeyImpl(Metadata *Scope, Metadata *File, unsigned Line, unsigned Column)
+ : Scope(Scope), File(File), Line(Line), Column(Column) {}
+ MDNodeKeyImpl(const DILexicalBlock *N)
+ : Scope(N->getRawScope()), File(N->getRawFile()), Line(N->getLine()),
+ Column(N->getColumn()) {}
+
+ bool isKeyOf(const DILexicalBlock *RHS) const {
+ return Scope == RHS->getRawScope() && File == RHS->getRawFile() &&
+ Line == RHS->getLine() && Column == RHS->getColumn();
+ }
+ unsigned getHashValue() const {
+ return hash_combine(Scope, File, Line, Column);
+ }
+};
+
+template <> struct MDNodeKeyImpl<DILexicalBlockFile> {
+ Metadata *Scope;
+ Metadata *File;
+ unsigned Discriminator;
+
+ MDNodeKeyImpl(Metadata *Scope, Metadata *File, unsigned Discriminator)
+ : Scope(Scope), File(File), Discriminator(Discriminator) {}
+ MDNodeKeyImpl(const DILexicalBlockFile *N)
+ : Scope(N->getRawScope()), File(N->getRawFile()),
+ Discriminator(N->getDiscriminator()) {}
+
+ bool isKeyOf(const DILexicalBlockFile *RHS) const {
+ return Scope == RHS->getRawScope() && File == RHS->getRawFile() &&
+ Discriminator == RHS->getDiscriminator();
+ }
+ unsigned getHashValue() const {
+ return hash_combine(Scope, File, Discriminator);
+ }
+};
+
+template <> struct MDNodeKeyImpl<DINamespace> {
+ Metadata *Scope;
+ Metadata *File;
+ StringRef Name;
+ unsigned Line;
+
+ MDNodeKeyImpl(Metadata *Scope, Metadata *File, StringRef Name, unsigned Line)
+ : Scope(Scope), File(File), Name(Name), Line(Line) {}
+ MDNodeKeyImpl(const DINamespace *N)
+ : Scope(N->getRawScope()), File(N->getRawFile()), Name(N->getName()),
+ Line(N->getLine()) {}
+
+ bool isKeyOf(const DINamespace *RHS) const {
+ return Scope == RHS->getRawScope() && File == RHS->getRawFile() &&
+ Name == RHS->getName() && Line == RHS->getLine();
+ }
+ unsigned getHashValue() const {
+ return hash_combine(Scope, File, Name, Line);
+ }
+};
+
+template <> struct MDNodeKeyImpl<DIModule> {
+ Metadata *Scope;
+ StringRef Name;
+ StringRef ConfigurationMacros;
+ StringRef IncludePath;
+ StringRef ISysRoot;
+ MDNodeKeyImpl(Metadata *Scope, StringRef Name,
+ StringRef ConfigurationMacros,
+ StringRef IncludePath,
+ StringRef ISysRoot)
+ : Scope(Scope), Name(Name), ConfigurationMacros(ConfigurationMacros),
+ IncludePath(IncludePath), ISysRoot(ISysRoot) {}
+ MDNodeKeyImpl(const DIModule *N)
+ : Scope(N->getRawScope()), Name(N->getName()),
+ ConfigurationMacros(N->getConfigurationMacros()),
+ IncludePath(N->getIncludePath()), ISysRoot(N->getISysRoot()) {}
+
+ bool isKeyOf(const DIModule *RHS) const {
+ return Scope == RHS->getRawScope() && Name == RHS->getName() &&
+ ConfigurationMacros == RHS->getConfigurationMacros() &&
+ IncludePath == RHS->getIncludePath() &&
+ ISysRoot == RHS->getISysRoot();
+ }
+ unsigned getHashValue() const {
+ return hash_combine(Scope, Name,
+ ConfigurationMacros, IncludePath, ISysRoot);
+ }
+};
+
+template <> struct MDNodeKeyImpl<DITemplateTypeParameter> {
+ StringRef Name;
+ Metadata *Type;
+
+ MDNodeKeyImpl(StringRef Name, Metadata *Type) : Name(Name), Type(Type) {}
+ MDNodeKeyImpl(const DITemplateTypeParameter *N)
+ : Name(N->getName()), Type(N->getRawType()) {}
+
+ bool isKeyOf(const DITemplateTypeParameter *RHS) const {
+ return Name == RHS->getName() && Type == RHS->getRawType();
+ }
+ unsigned getHashValue() const { return hash_combine(Name, Type); }
+};
+
+template <> struct MDNodeKeyImpl<DITemplateValueParameter> {
+ unsigned Tag;
+ StringRef Name;
+ Metadata *Type;
+ Metadata *Value;
+
+ MDNodeKeyImpl(unsigned Tag, StringRef Name, Metadata *Type, Metadata *Value)
+ : Tag(Tag), Name(Name), Type(Type), Value(Value) {}
+ MDNodeKeyImpl(const DITemplateValueParameter *N)
+ : Tag(N->getTag()), Name(N->getName()), Type(N->getRawType()),
+ Value(N->getValue()) {}
+
+ bool isKeyOf(const DITemplateValueParameter *RHS) const {
+ return Tag == RHS->getTag() && Name == RHS->getName() &&
+ Type == RHS->getRawType() && Value == RHS->getValue();
+ }
+ unsigned getHashValue() const { return hash_combine(Tag, Name, Type, Value); }
+};
+
+template <> struct MDNodeKeyImpl<DIGlobalVariable> {
+ Metadata *Scope;
+ StringRef Name;
+ StringRef LinkageName;
+ Metadata *File;
+ unsigned Line;
+ Metadata *Type;
+ bool IsLocalToUnit;
+ bool IsDefinition;
+ Metadata *Variable;
+ Metadata *StaticDataMemberDeclaration;
+
+ MDNodeKeyImpl(Metadata *Scope, StringRef Name, StringRef LinkageName,
+ Metadata *File, unsigned Line, Metadata *Type,
+ bool IsLocalToUnit, bool IsDefinition, Metadata *Variable,
+ Metadata *StaticDataMemberDeclaration)
+ : Scope(Scope), Name(Name), LinkageName(LinkageName), File(File),
+ Line(Line), Type(Type), IsLocalToUnit(IsLocalToUnit),
+ IsDefinition(IsDefinition), Variable(Variable),
+ StaticDataMemberDeclaration(StaticDataMemberDeclaration) {}
+ MDNodeKeyImpl(const DIGlobalVariable *N)
+ : Scope(N->getRawScope()), Name(N->getName()),
+ LinkageName(N->getLinkageName()), File(N->getRawFile()),
+ Line(N->getLine()), Type(N->getRawType()),
+ IsLocalToUnit(N->isLocalToUnit()), IsDefinition(N->isDefinition()),
+ Variable(N->getRawVariable()),
+ StaticDataMemberDeclaration(N->getRawStaticDataMemberDeclaration()) {}
+
+ bool isKeyOf(const DIGlobalVariable *RHS) const {
+ return Scope == RHS->getRawScope() && Name == RHS->getName() &&
+ LinkageName == RHS->getLinkageName() && File == RHS->getRawFile() &&
+ Line == RHS->getLine() && Type == RHS->getRawType() &&
+ IsLocalToUnit == RHS->isLocalToUnit() &&
+ IsDefinition == RHS->isDefinition() &&
+ Variable == RHS->getRawVariable() &&
+ StaticDataMemberDeclaration ==
+ RHS->getRawStaticDataMemberDeclaration();
+ }
+ unsigned getHashValue() const {
+ return hash_combine(Scope, Name, LinkageName, File, Line, Type,
+ IsLocalToUnit, IsDefinition, Variable,
+ StaticDataMemberDeclaration);
+ }
+};
+
+template <> struct MDNodeKeyImpl<DILocalVariable> {
+ Metadata *Scope;
+ StringRef Name;
+ Metadata *File;
+ unsigned Line;
+ Metadata *Type;
+ unsigned Arg;
+ unsigned Flags;
+
+ MDNodeKeyImpl(Metadata *Scope, StringRef Name, Metadata *File, unsigned Line,
+ Metadata *Type, unsigned Arg, unsigned Flags)
+ : Scope(Scope), Name(Name), File(File), Line(Line), Type(Type), Arg(Arg),
+ Flags(Flags) {}
+ MDNodeKeyImpl(const DILocalVariable *N)
+ : Scope(N->getRawScope()), Name(N->getName()), File(N->getRawFile()),
+ Line(N->getLine()), Type(N->getRawType()), Arg(N->getArg()),
+ Flags(N->getFlags()) {}
+
+ bool isKeyOf(const DILocalVariable *RHS) const {
+ return Scope == RHS->getRawScope() && Name == RHS->getName() &&
+ File == RHS->getRawFile() && Line == RHS->getLine() &&
+ Type == RHS->getRawType() && Arg == RHS->getArg() &&
+ Flags == RHS->getFlags();
+ }
+ unsigned getHashValue() const {
+ return hash_combine(Scope, Name, File, Line, Type, Arg, Flags);
+ }
+};
+
+template <> struct MDNodeKeyImpl<DIExpression> {
+ ArrayRef<uint64_t> Elements;
+
+ MDNodeKeyImpl(ArrayRef<uint64_t> Elements) : Elements(Elements) {}
+ MDNodeKeyImpl(const DIExpression *N) : Elements(N->getElements()) {}
+
+ bool isKeyOf(const DIExpression *RHS) const {
+ return Elements == RHS->getElements();
+ }
+ unsigned getHashValue() const {
+ return hash_combine_range(Elements.begin(), Elements.end());
+ }
+};
+
+template <> struct MDNodeKeyImpl<DIObjCProperty> {
+ StringRef Name;
+ Metadata *File;
+ unsigned Line;
+ StringRef GetterName;
+ StringRef SetterName;
+ unsigned Attributes;
+ Metadata *Type;
+
+ MDNodeKeyImpl(StringRef Name, Metadata *File, unsigned Line,
+ StringRef GetterName, StringRef SetterName, unsigned Attributes,
+ Metadata *Type)
+ : Name(Name), File(File), Line(Line), GetterName(GetterName),
+ SetterName(SetterName), Attributes(Attributes), Type(Type) {}
+ MDNodeKeyImpl(const DIObjCProperty *N)
+ : Name(N->getName()), File(N->getRawFile()), Line(N->getLine()),
+ GetterName(N->getGetterName()), SetterName(N->getSetterName()),
+ Attributes(N->getAttributes()), Type(N->getRawType()) {}
+
+ bool isKeyOf(const DIObjCProperty *RHS) const {
+ return Name == RHS->getName() && File == RHS->getRawFile() &&
+ Line == RHS->getLine() && GetterName == RHS->getGetterName() &&
+ SetterName == RHS->getSetterName() &&
+ Attributes == RHS->getAttributes() && Type == RHS->getRawType();
+ }
+ unsigned getHashValue() const {
+ return hash_combine(Name, File, Line, GetterName, SetterName, Attributes,
+ Type);
+ }
+};
+
+template <> struct MDNodeKeyImpl<DIImportedEntity> {
+ unsigned Tag;
+ Metadata *Scope;
+ Metadata *Entity;
+ unsigned Line;
+ StringRef Name;
+
+ MDNodeKeyImpl(unsigned Tag, Metadata *Scope, Metadata *Entity, unsigned Line,
+ StringRef Name)
+ : Tag(Tag), Scope(Scope), Entity(Entity), Line(Line), Name(Name) {}
+ MDNodeKeyImpl(const DIImportedEntity *N)
+ : Tag(N->getTag()), Scope(N->getRawScope()), Entity(N->getRawEntity()),
+ Line(N->getLine()), Name(N->getName()) {}
+
+ bool isKeyOf(const DIImportedEntity *RHS) const {
+ return Tag == RHS->getTag() && Scope == RHS->getRawScope() &&
+ Entity == RHS->getRawEntity() && Line == RHS->getLine() &&
+ Name == RHS->getName();
+ }
+ unsigned getHashValue() const {
+ return hash_combine(Tag, Scope, Entity, Line, Name);
+ }
+};
+
+template <> struct MDNodeKeyImpl<DIMacro> {
+ unsigned MIType;
+ unsigned Line;
+ StringRef Name;
+ StringRef Value;
+
+ MDNodeKeyImpl(unsigned MIType, unsigned Line, StringRef Name, StringRef Value)
+ : MIType(MIType), Line(Line), Name(Name), Value(Value) {}
+ MDNodeKeyImpl(const DIMacro *N)
+ : MIType(N->getMacinfoType()), Line(N->getLine()), Name(N->getName()),
+ Value(N->getValue()) {}
+
+ bool isKeyOf(const DIMacro *RHS) const {
+ return MIType == RHS->getMacinfoType() && Line == RHS->getLine() &&
+ Name == RHS->getName() && Value == RHS->getValue();
+ }
+ unsigned getHashValue() const {
+ return hash_combine(MIType, Line, Name, Value);
+ }
+};
+
+template <> struct MDNodeKeyImpl<DIMacroFile> {
+ unsigned MIType;
+ unsigned Line;
+ Metadata *File;
+ Metadata *Elements;
+
+ MDNodeKeyImpl(unsigned MIType, unsigned Line, Metadata *File,
+ Metadata *Elements)
+ : MIType(MIType), Line(Line), File(File), Elements(Elements) {}
+ MDNodeKeyImpl(const DIMacroFile *N)
+ : MIType(N->getMacinfoType()), Line(N->getLine()), File(N->getRawFile()),
+ Elements(N->getRawElements()) {}
+
+ bool isKeyOf(const DIMacroFile *RHS) const {
+ return MIType == RHS->getMacinfoType() && Line == RHS->getLine() &&
+ File == RHS->getRawFile() && File == RHS->getRawElements();
+ }
+ unsigned getHashValue() const {
+ return hash_combine(MIType, Line, File, Elements);
+ }
+};
+
+/// \brief DenseMapInfo for MDNode subclasses.
+template <class NodeTy> struct MDNodeInfo {
+ typedef MDNodeKeyImpl<NodeTy> KeyTy;
+ static inline NodeTy *getEmptyKey() {
+ return DenseMapInfo<NodeTy *>::getEmptyKey();
+ }
+ static inline NodeTy *getTombstoneKey() {
+ return DenseMapInfo<NodeTy *>::getTombstoneKey();
+ }
+ static unsigned getHashValue(const KeyTy &Key) { return Key.getHashValue(); }
+ static unsigned getHashValue(const NodeTy *N) {
+ return KeyTy(N).getHashValue();
+ }
+ static bool isEqual(const KeyTy &LHS, const NodeTy *RHS) {
+ if (RHS == getEmptyKey() || RHS == getTombstoneKey())
+ return false;
+ return LHS.isKeyOf(RHS);
+ }
+ static bool isEqual(const NodeTy *LHS, const NodeTy *RHS) {
+ return LHS == RHS;
+ }
+};
+
+#define HANDLE_MDNODE_LEAF(CLASS) typedef MDNodeInfo<CLASS> CLASS##Info;
+#include "llvm/IR/Metadata.def"
+
+/// \brief Map-like storage for metadata attachments.
+class MDAttachmentMap {
+ SmallVector<std::pair<unsigned, TrackingMDNodeRef>, 2> Attachments;
+
+public:
+ bool empty() const { return Attachments.empty(); }
+ size_t size() const { return Attachments.size(); }
+
+ /// \brief Get a particular attachment (if any).
+ MDNode *lookup(unsigned ID) const;
+
+ /// \brief Set an attachment to a particular node.
+ ///
+ /// Set the \c ID attachment to \c MD, replacing the current attachment at \c
+ /// ID (if anyway).
+ void set(unsigned ID, MDNode &MD);
+
+ /// \brief Remove an attachment.
+ ///
+ /// Remove the attachment at \c ID, if any.
+ void erase(unsigned ID);
+
+ /// \brief Copy out all the attachments.
+ ///
+ /// Copies all the current attachments into \c Result, sorting by attachment
+ /// ID. This function does \em not clear \c Result.
+ void getAll(SmallVectorImpl<std::pair<unsigned, MDNode *>> &Result) const;
+
+ /// \brief Erase matching attachments.
+ ///
+ /// Erases all attachments matching the \c shouldRemove predicate.
+ template <class PredTy> void remove_if(PredTy shouldRemove) {
+ Attachments.erase(
+ std::remove_if(Attachments.begin(), Attachments.end(), shouldRemove),
+ Attachments.end());
+ }
+};
+
+class LLVMContextImpl {
+public:
+ /// OwnedModules - The set of modules instantiated in this context, and which
+ /// will be automatically deleted if this context is deleted.
+ SmallPtrSet<Module*, 4> OwnedModules;
+
+ LLVMContext::InlineAsmDiagHandlerTy InlineAsmDiagHandler;
+ void *InlineAsmDiagContext;
+
+ LLVMContext::DiagnosticHandlerTy DiagnosticHandler;
+ void *DiagnosticContext;
+ bool RespectDiagnosticFilters;
+
+ LLVMContext::YieldCallbackTy YieldCallback;
+ void *YieldOpaqueHandle;
+
+ typedef DenseMap<APInt, ConstantInt *, DenseMapAPIntKeyInfo> IntMapTy;
+ IntMapTy IntConstants;
+
+ typedef DenseMap<APFloat, ConstantFP *, DenseMapAPFloatKeyInfo> FPMapTy;
+ FPMapTy FPConstants;
+
+ FoldingSet<AttributeImpl> AttrsSet;
+ FoldingSet<AttributeSetImpl> AttrsLists;
+ FoldingSet<AttributeSetNode> AttrsSetNodes;
+
+ StringMap<MDString> MDStringCache;
+ DenseMap<Value *, ValueAsMetadata *> ValuesAsMetadata;
+ DenseMap<Metadata *, MetadataAsValue *> MetadataAsValues;
+
+ DenseMap<const Value*, ValueName*> ValueNames;
+
+#define HANDLE_MDNODE_LEAF_UNIQUABLE(CLASS) \
+ DenseSet<CLASS *, CLASS##Info> CLASS##s;
+#include "llvm/IR/Metadata.def"
+
+ // MDNodes may be uniqued or not uniqued. When they're not uniqued, they
+ // aren't in the MDNodeSet, but they're still shared between objects, so no
+ // one object can destroy them. This set allows us to at least destroy them
+ // on Context destruction.
+ SmallPtrSet<MDNode *, 1> DistinctMDNodes;
+
+ DenseMap<Type*, ConstantAggregateZero*> CAZConstants;
+
+ typedef ConstantUniqueMap<ConstantArray> ArrayConstantsTy;
+ ArrayConstantsTy ArrayConstants;
+
+ typedef ConstantUniqueMap<ConstantStruct> StructConstantsTy;
+ StructConstantsTy StructConstants;
+
+ typedef ConstantUniqueMap<ConstantVector> VectorConstantsTy;
+ VectorConstantsTy VectorConstants;
+
+ DenseMap<PointerType*, ConstantPointerNull*> CPNConstants;
+
+ DenseMap<Type*, UndefValue*> UVConstants;
+
+ StringMap<ConstantDataSequential*> CDSConstants;
+
+ DenseMap<std::pair<const Function *, const BasicBlock *>, BlockAddress *>
+ BlockAddresses;
+ ConstantUniqueMap<ConstantExpr> ExprConstants;
+
+ ConstantUniqueMap<InlineAsm> InlineAsms;
+
+ ConstantInt *TheTrueVal;
+ ConstantInt *TheFalseVal;
+
+ std::unique_ptr<ConstantTokenNone> TheNoneToken;
+
+ // Basic type instances.
+ Type VoidTy, LabelTy, HalfTy, FloatTy, DoubleTy, MetadataTy, TokenTy;
+ Type X86_FP80Ty, FP128Ty, PPC_FP128Ty, X86_MMXTy;
+ IntegerType Int1Ty, Int8Ty, Int16Ty, Int32Ty, Int64Ty, Int128Ty;
+
+
+ /// TypeAllocator - All dynamically allocated types are allocated from this.
+ /// They live forever until the context is torn down.
+ BumpPtrAllocator TypeAllocator;
+
+ DenseMap<unsigned, IntegerType*> IntegerTypes;
+
+ typedef DenseSet<FunctionType *, FunctionTypeKeyInfo> FunctionTypeSet;
+ FunctionTypeSet FunctionTypes;
+ typedef DenseSet<StructType *, AnonStructTypeKeyInfo> StructTypeSet;
+ StructTypeSet AnonStructTypes;
+ StringMap<StructType*> NamedStructTypes;
+ unsigned NamedStructTypesUniqueID;
+
+ DenseMap<std::pair<Type *, uint64_t>, ArrayType*> ArrayTypes;
+ DenseMap<std::pair<Type *, unsigned>, VectorType*> VectorTypes;
+ DenseMap<Type*, PointerType*> PointerTypes; // Pointers in AddrSpace = 0
+ DenseMap<std::pair<Type*, unsigned>, PointerType*> ASPointerTypes;
+
+
+ /// ValueHandles - This map keeps track of all of the value handles that are
+ /// watching a Value*. The Value::HasValueHandle bit is used to know
+ /// whether or not a value has an entry in this map.
+ typedef DenseMap<Value*, ValueHandleBase*> ValueHandlesTy;
+ ValueHandlesTy ValueHandles;
+
+ /// CustomMDKindNames - Map to hold the metadata string to ID mapping.
+ StringMap<unsigned> CustomMDKindNames;
+
+ /// Collection of per-instruction metadata used in this context.
+ DenseMap<const Instruction *, MDAttachmentMap> InstructionMetadata;
+
+ /// Collection of per-function metadata used in this context.
+ DenseMap<const Function *, MDAttachmentMap> FunctionMetadata;
+
+ /// DiscriminatorTable - This table maps file:line locations to an
+ /// integer representing the next DWARF path discriminator to assign to
+ /// instructions in different blocks at the same location.
+ DenseMap<std::pair<const char *, unsigned>, unsigned> DiscriminatorTable;
+
+ int getOrAddScopeRecordIdxEntry(MDNode *N, int ExistingIdx);
+ int getOrAddScopeInlinedAtIdxEntry(MDNode *Scope, MDNode *IA,int ExistingIdx);
+
+ /// \brief A set of interned tags for operand bundles. The StringMap maps
+ /// bundle tags to their IDs.
+ ///
+ /// \see LLVMContext::getOperandBundleTagID
+ StringMap<uint32_t> BundleTagCache;
+
+ StringMapEntry<uint32_t> *getOrInsertBundleTag(StringRef Tag);
+ void getOperandBundleTags(SmallVectorImpl<StringRef> &Tags) const;
+ uint32_t getOperandBundleTagID(StringRef Tag) const;
+
+ /// Maintain the GC name for each function.
+ ///
+ /// This saves allocating an additional word in Function for programs which
+ /// do not use GC (i.e., most programs) at the cost of increased overhead for
+ /// clients which do use GC.
+ DenseMap<const Function*, std::string> GCNames;
+
+ LLVMContextImpl(LLVMContext &C);
+ ~LLVMContextImpl();
+
+ /// Destroy the ConstantArrays if they are not used.
+ void dropTriviallyDeadConstantArrays();
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/lib/IR/LegacyPassManager.cpp b/contrib/llvm/lib/IR/LegacyPassManager.cpp
new file mode 100644
index 0000000..63d89f2
--- /dev/null
+++ b/contrib/llvm/lib/IR/LegacyPassManager.cpp
@@ -0,0 +1,1934 @@
+//===- LegacyPassManager.cpp - LLVM Pass Infrastructure Implementation ----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the legacy LLVM Pass Manager infrastructure.
+//
+//===----------------------------------------------------------------------===//
+
+
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/IRPrintingPasses.h"
+#include "llvm/IR/LegacyPassManager.h"
+#include "llvm/IR/LegacyPassManagers.h"
+#include "llvm/IR/LegacyPassNameParser.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/Mutex.h"
+#include "llvm/Support/TimeValue.h"
+#include "llvm/Support/Timer.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <map>
+#include <unordered_set>
+using namespace llvm;
+using namespace llvm::legacy;
+
+// See PassManagers.h for Pass Manager infrastructure overview.
+
+//===----------------------------------------------------------------------===//
+// Pass debugging information. Often it is useful to find out what pass is
+// running when a crash occurs in a utility. When this library is compiled with
+// debugging on, a command line option (--debug-pass) is enabled that causes the
+// pass name to be printed before it executes.
+//
+
+namespace {
+// Different debug levels that can be enabled...
+enum PassDebugLevel {
+ Disabled, Arguments, Structure, Executions, Details
+};
+}
+
+static cl::opt<enum PassDebugLevel>
+PassDebugging("debug-pass", cl::Hidden,
+ cl::desc("Print PassManager debugging information"),
+ cl::values(
+ clEnumVal(Disabled , "disable debug output"),
+ clEnumVal(Arguments , "print pass arguments to pass to 'opt'"),
+ clEnumVal(Structure , "print pass structure before run()"),
+ clEnumVal(Executions, "print pass name before it is executed"),
+ clEnumVal(Details , "print pass details when it is executed"),
+ clEnumValEnd));
+
+namespace {
+typedef llvm::cl::list<const llvm::PassInfo *, bool, PassNameParser>
+PassOptionList;
+}
+
+// Print IR out before/after specified passes.
+static PassOptionList
+PrintBefore("print-before",
+ llvm::cl::desc("Print IR before specified passes"),
+ cl::Hidden);
+
+static PassOptionList
+PrintAfter("print-after",
+ llvm::cl::desc("Print IR after specified passes"),
+ cl::Hidden);
+
+static cl::opt<bool>
+PrintBeforeAll("print-before-all",
+ llvm::cl::desc("Print IR before each pass"),
+ cl::init(false));
+static cl::opt<bool>
+PrintAfterAll("print-after-all",
+ llvm::cl::desc("Print IR after each pass"),
+ cl::init(false));
+
+static cl::list<std::string>
+ PrintFuncsList("filter-print-funcs", cl::value_desc("function names"),
+ cl::desc("Only print IR for functions whose name "
+ "match this for all print-[before|after][-all] "
+ "options"),
+ cl::CommaSeparated);
+
+/// This is a helper to determine whether to print IR before or
+/// after a pass.
+
+static bool ShouldPrintBeforeOrAfterPass(const PassInfo *PI,
+ PassOptionList &PassesToPrint) {
+ for (auto *PassInf : PassesToPrint) {
+ if (PassInf)
+ if (PassInf->getPassArgument() == PI->getPassArgument()) {
+ return true;
+ }
+ }
+ return false;
+}
+
+/// This is a utility to check whether a pass should have IR dumped
+/// before it.
+static bool ShouldPrintBeforePass(const PassInfo *PI) {
+ return PrintBeforeAll || ShouldPrintBeforeOrAfterPass(PI, PrintBefore);
+}
+
+/// This is a utility to check whether a pass should have IR dumped
+/// after it.
+static bool ShouldPrintAfterPass(const PassInfo *PI) {
+ return PrintAfterAll || ShouldPrintBeforeOrAfterPass(PI, PrintAfter);
+}
+
+bool llvm::isFunctionInPrintList(StringRef FunctionName) {
+ static std::unordered_set<std::string> PrintFuncNames(PrintFuncsList.begin(),
+ PrintFuncsList.end());
+ return PrintFuncNames.empty() || PrintFuncNames.count(FunctionName);
+}
+/// isPassDebuggingExecutionsOrMore - Return true if -debug-pass=Executions
+/// or higher is specified.
+bool PMDataManager::isPassDebuggingExecutionsOrMore() const {
+ return PassDebugging >= Executions;
+}
+
+
+
+
+void PassManagerPrettyStackEntry::print(raw_ostream &OS) const {
+ if (!V && !M)
+ OS << "Releasing pass '";
+ else
+ OS << "Running pass '";
+
+ OS << P->getPassName() << "'";
+
+ if (M) {
+ OS << " on module '" << M->getModuleIdentifier() << "'.\n";
+ return;
+ }
+ if (!V) {
+ OS << '\n';
+ return;
+ }
+
+ OS << " on ";
+ if (isa<Function>(V))
+ OS << "function";
+ else if (isa<BasicBlock>(V))
+ OS << "basic block";
+ else
+ OS << "value";
+
+ OS << " '";
+ V->printAsOperand(OS, /*PrintTy=*/false, M);
+ OS << "'\n";
+}
+
+
+namespace {
+//===----------------------------------------------------------------------===//
+// BBPassManager
+//
+/// BBPassManager manages BasicBlockPass. It batches all the
+/// pass together and sequence them to process one basic block before
+/// processing next basic block.
+class BBPassManager : public PMDataManager, public FunctionPass {
+
+public:
+ static char ID;
+ explicit BBPassManager()
+ : PMDataManager(), FunctionPass(ID) {}
+
+ /// Execute all of the passes scheduled for execution. Keep track of
+ /// whether any of the passes modifies the function, and if so, return true.
+ bool runOnFunction(Function &F) override;
+
+ /// Pass Manager itself does not invalidate any analysis info.
+ void getAnalysisUsage(AnalysisUsage &Info) const override {
+ Info.setPreservesAll();
+ }
+
+ bool doInitialization(Module &M) override;
+ bool doInitialization(Function &F);
+ bool doFinalization(Module &M) override;
+ bool doFinalization(Function &F);
+
+ PMDataManager *getAsPMDataManager() override { return this; }
+ Pass *getAsPass() override { return this; }
+
+ const char *getPassName() const override {
+ return "BasicBlock Pass Manager";
+ }
+
+ // Print passes managed by this manager
+ void dumpPassStructure(unsigned Offset) override {
+ dbgs().indent(Offset*2) << "BasicBlockPass Manager\n";
+ for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) {
+ BasicBlockPass *BP = getContainedPass(Index);
+ BP->dumpPassStructure(Offset + 1);
+ dumpLastUses(BP, Offset+1);
+ }
+ }
+
+ BasicBlockPass *getContainedPass(unsigned N) {
+ assert(N < PassVector.size() && "Pass number out of range!");
+ BasicBlockPass *BP = static_cast<BasicBlockPass *>(PassVector[N]);
+ return BP;
+ }
+
+ PassManagerType getPassManagerType() const override {
+ return PMT_BasicBlockPassManager;
+ }
+};
+
+char BBPassManager::ID = 0;
+} // End anonymous namespace
+
+namespace llvm {
+namespace legacy {
+//===----------------------------------------------------------------------===//
+// FunctionPassManagerImpl
+//
+/// FunctionPassManagerImpl manages FPPassManagers
+class FunctionPassManagerImpl : public Pass,
+ public PMDataManager,
+ public PMTopLevelManager {
+ virtual void anchor();
+private:
+ bool wasRun;
+public:
+ static char ID;
+ explicit FunctionPassManagerImpl() :
+ Pass(PT_PassManager, ID), PMDataManager(),
+ PMTopLevelManager(new FPPassManager()), wasRun(false) {}
+
+ /// \copydoc FunctionPassManager::add()
+ void add(Pass *P) {
+ schedulePass(P);
+ }
+
+ /// createPrinterPass - Get a function printer pass.
+ Pass *createPrinterPass(raw_ostream &O,
+ const std::string &Banner) const override {
+ return createPrintFunctionPass(O, Banner);
+ }
+
+ // Prepare for running an on the fly pass, freeing memory if needed
+ // from a previous run.
+ void releaseMemoryOnTheFly();
+
+ /// run - Execute all of the passes scheduled for execution. Keep track of
+ /// whether any of the passes modifies the module, and if so, return true.
+ bool run(Function &F);
+
+ /// doInitialization - Run all of the initializers for the function passes.
+ ///
+ bool doInitialization(Module &M) override;
+
+ /// doFinalization - Run all of the finalizers for the function passes.
+ ///
+ bool doFinalization(Module &M) override;
+
+
+ PMDataManager *getAsPMDataManager() override { return this; }
+ Pass *getAsPass() override { return this; }
+ PassManagerType getTopLevelPassManagerType() override {
+ return PMT_FunctionPassManager;
+ }
+
+ /// Pass Manager itself does not invalidate any analysis info.
+ void getAnalysisUsage(AnalysisUsage &Info) const override {
+ Info.setPreservesAll();
+ }
+
+ FPPassManager *getContainedManager(unsigned N) {
+ assert(N < PassManagers.size() && "Pass number out of range!");
+ FPPassManager *FP = static_cast<FPPassManager *>(PassManagers[N]);
+ return FP;
+ }
+};
+
+void FunctionPassManagerImpl::anchor() {}
+
+char FunctionPassManagerImpl::ID = 0;
+} // End of legacy namespace
+} // End of llvm namespace
+
+namespace {
+//===----------------------------------------------------------------------===//
+// MPPassManager
+//
+/// MPPassManager manages ModulePasses and function pass managers.
+/// It batches all Module passes and function pass managers together and
+/// sequences them to process one module.
+class MPPassManager : public Pass, public PMDataManager {
+public:
+ static char ID;
+ explicit MPPassManager() :
+ Pass(PT_PassManager, ID), PMDataManager() { }
+
+ // Delete on the fly managers.
+ ~MPPassManager() override {
+ for (auto &OnTheFlyManager : OnTheFlyManagers) {
+ FunctionPassManagerImpl *FPP = OnTheFlyManager.second;
+ delete FPP;
+ }
+ }
+
+ /// createPrinterPass - Get a module printer pass.
+ Pass *createPrinterPass(raw_ostream &O,
+ const std::string &Banner) const override {
+ return createPrintModulePass(O, Banner);
+ }
+
+ /// run - Execute all of the passes scheduled for execution. Keep track of
+ /// whether any of the passes modifies the module, and if so, return true.
+ bool runOnModule(Module &M);
+
+ using llvm::Pass::doInitialization;
+ using llvm::Pass::doFinalization;
+
+ /// doInitialization - Run all of the initializers for the module passes.
+ ///
+ bool doInitialization();
+
+ /// doFinalization - Run all of the finalizers for the module passes.
+ ///
+ bool doFinalization();
+
+ /// Pass Manager itself does not invalidate any analysis info.
+ void getAnalysisUsage(AnalysisUsage &Info) const override {
+ Info.setPreservesAll();
+ }
+
+ /// Add RequiredPass into list of lower level passes required by pass P.
+ /// RequiredPass is run on the fly by Pass Manager when P requests it
+ /// through getAnalysis interface.
+ void addLowerLevelRequiredPass(Pass *P, Pass *RequiredPass) override;
+
+ /// Return function pass corresponding to PassInfo PI, that is
+ /// required by module pass MP. Instantiate analysis pass, by using
+ /// its runOnFunction() for function F.
+ Pass* getOnTheFlyPass(Pass *MP, AnalysisID PI, Function &F) override;
+
+ const char *getPassName() const override {
+ return "Module Pass Manager";
+ }
+
+ PMDataManager *getAsPMDataManager() override { return this; }
+ Pass *getAsPass() override { return this; }
+
+ // Print passes managed by this manager
+ void dumpPassStructure(unsigned Offset) override {
+ dbgs().indent(Offset*2) << "ModulePass Manager\n";
+ for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) {
+ ModulePass *MP = getContainedPass(Index);
+ MP->dumpPassStructure(Offset + 1);
+ std::map<Pass *, FunctionPassManagerImpl *>::const_iterator I =
+ OnTheFlyManagers.find(MP);
+ if (I != OnTheFlyManagers.end())
+ I->second->dumpPassStructure(Offset + 2);
+ dumpLastUses(MP, Offset+1);
+ }
+ }
+
+ ModulePass *getContainedPass(unsigned N) {
+ assert(N < PassVector.size() && "Pass number out of range!");
+ return static_cast<ModulePass *>(PassVector[N]);
+ }
+
+ PassManagerType getPassManagerType() const override {
+ return PMT_ModulePassManager;
+ }
+
+ private:
+ /// Collection of on the fly FPPassManagers. These managers manage
+ /// function passes that are required by module passes.
+ std::map<Pass *, FunctionPassManagerImpl *> OnTheFlyManagers;
+};
+
+char MPPassManager::ID = 0;
+} // End anonymous namespace
+
+namespace llvm {
+namespace legacy {
+//===----------------------------------------------------------------------===//
+// PassManagerImpl
+//
+
+/// PassManagerImpl manages MPPassManagers
+class PassManagerImpl : public Pass,
+ public PMDataManager,
+ public PMTopLevelManager {
+ virtual void anchor();
+
+public:
+ static char ID;
+ explicit PassManagerImpl() :
+ Pass(PT_PassManager, ID), PMDataManager(),
+ PMTopLevelManager(new MPPassManager()) {}
+
+ /// \copydoc PassManager::add()
+ void add(Pass *P) {
+ schedulePass(P);
+ }
+
+ /// createPrinterPass - Get a module printer pass.
+ Pass *createPrinterPass(raw_ostream &O,
+ const std::string &Banner) const override {
+ return createPrintModulePass(O, Banner);
+ }
+
+ /// run - Execute all of the passes scheduled for execution. Keep track of
+ /// whether any of the passes modifies the module, and if so, return true.
+ bool run(Module &M);
+
+ using llvm::Pass::doInitialization;
+ using llvm::Pass::doFinalization;
+
+ /// doInitialization - Run all of the initializers for the module passes.
+ ///
+ bool doInitialization();
+
+ /// doFinalization - Run all of the finalizers for the module passes.
+ ///
+ bool doFinalization();
+
+ /// Pass Manager itself does not invalidate any analysis info.
+ void getAnalysisUsage(AnalysisUsage &Info) const override {
+ Info.setPreservesAll();
+ }
+
+ PMDataManager *getAsPMDataManager() override { return this; }
+ Pass *getAsPass() override { return this; }
+ PassManagerType getTopLevelPassManagerType() override {
+ return PMT_ModulePassManager;
+ }
+
+ MPPassManager *getContainedManager(unsigned N) {
+ assert(N < PassManagers.size() && "Pass number out of range!");
+ MPPassManager *MP = static_cast<MPPassManager *>(PassManagers[N]);
+ return MP;
+ }
+};
+
+void PassManagerImpl::anchor() {}
+
+char PassManagerImpl::ID = 0;
+} // End of legacy namespace
+} // End of llvm namespace
+
+namespace {
+
+//===----------------------------------------------------------------------===//
+/// TimingInfo Class - This class is used to calculate information about the
+/// amount of time each pass takes to execute. This only happens when
+/// -time-passes is enabled on the command line.
+///
+
+static ManagedStatic<sys::SmartMutex<true> > TimingInfoMutex;
+
+class TimingInfo {
+ DenseMap<Pass*, Timer*> TimingData;
+ TimerGroup TG;
+public:
+ // Use 'create' member to get this.
+ TimingInfo() : TG("... Pass execution timing report ...") {}
+
+ // TimingDtor - Print out information about timing information
+ ~TimingInfo() {
+ // Delete all of the timers, which accumulate their info into the
+ // TimerGroup.
+ for (auto &I : TimingData)
+ delete I.second;
+ // TimerGroup is deleted next, printing the report.
+ }
+
+ // createTheTimeInfo - This method either initializes the TheTimeInfo pointer
+ // to a non-null value (if the -time-passes option is enabled) or it leaves it
+ // null. It may be called multiple times.
+ static void createTheTimeInfo();
+
+ /// getPassTimer - Return the timer for the specified pass if it exists.
+ Timer *getPassTimer(Pass *P) {
+ if (P->getAsPMDataManager())
+ return nullptr;
+
+ sys::SmartScopedLock<true> Lock(*TimingInfoMutex);
+ Timer *&T = TimingData[P];
+ if (!T)
+ T = new Timer(P->getPassName(), TG);
+ return T;
+ }
+};
+
+} // End of anon namespace
+
+static TimingInfo *TheTimeInfo;
+
+//===----------------------------------------------------------------------===//
+// PMTopLevelManager implementation
+
+/// Initialize top level manager. Create first pass manager.
+PMTopLevelManager::PMTopLevelManager(PMDataManager *PMDM) {
+ PMDM->setTopLevelManager(this);
+ addPassManager(PMDM);
+ activeStack.push(PMDM);
+}
+
+/// Set pass P as the last user of the given analysis passes.
+void
+PMTopLevelManager::setLastUser(ArrayRef<Pass*> AnalysisPasses, Pass *P) {
+ unsigned PDepth = 0;
+ if (P->getResolver())
+ PDepth = P->getResolver()->getPMDataManager().getDepth();
+
+ for (Pass *AP : AnalysisPasses) {
+ LastUser[AP] = P;
+
+ if (P == AP)
+ continue;
+
+ // Update the last users of passes that are required transitive by AP.
+ AnalysisUsage *AnUsage = findAnalysisUsage(AP);
+ const AnalysisUsage::VectorType &IDs = AnUsage->getRequiredTransitiveSet();
+ SmallVector<Pass *, 12> LastUses;
+ SmallVector<Pass *, 12> LastPMUses;
+ for (AnalysisUsage::VectorType::const_iterator I = IDs.begin(),
+ E = IDs.end(); I != E; ++I) {
+ Pass *AnalysisPass = findAnalysisPass(*I);
+ assert(AnalysisPass && "Expected analysis pass to exist.");
+ AnalysisResolver *AR = AnalysisPass->getResolver();
+ assert(AR && "Expected analysis resolver to exist.");
+ unsigned APDepth = AR->getPMDataManager().getDepth();
+
+ if (PDepth == APDepth)
+ LastUses.push_back(AnalysisPass);
+ else if (PDepth > APDepth)
+ LastPMUses.push_back(AnalysisPass);
+ }
+
+ setLastUser(LastUses, P);
+
+ // If this pass has a corresponding pass manager, push higher level
+ // analysis to this pass manager.
+ if (P->getResolver())
+ setLastUser(LastPMUses, P->getResolver()->getPMDataManager().getAsPass());
+
+
+ // If AP is the last user of other passes then make P last user of
+ // such passes.
+ for (DenseMap<Pass *, Pass *>::iterator LUI = LastUser.begin(),
+ LUE = LastUser.end(); LUI != LUE; ++LUI) {
+ if (LUI->second == AP)
+ // DenseMap iterator is not invalidated here because
+ // this is just updating existing entries.
+ LastUser[LUI->first] = P;
+ }
+ }
+}
+
+/// Collect passes whose last user is P
+void PMTopLevelManager::collectLastUses(SmallVectorImpl<Pass *> &LastUses,
+ Pass *P) {
+ DenseMap<Pass *, SmallPtrSet<Pass *, 8> >::iterator DMI =
+ InversedLastUser.find(P);
+ if (DMI == InversedLastUser.end())
+ return;
+
+ SmallPtrSet<Pass *, 8> &LU = DMI->second;
+ for (Pass *LUP : LU) {
+ LastUses.push_back(LUP);
+ }
+
+}
+
+AnalysisUsage *PMTopLevelManager::findAnalysisUsage(Pass *P) {
+ AnalysisUsage *AnUsage = nullptr;
+ auto DMI = AnUsageMap.find(P);
+ if (DMI != AnUsageMap.end())
+ AnUsage = DMI->second;
+ else {
+ // Look up the analysis usage from the pass instance (different instances
+ // of the same pass can produce different results), but unique the
+ // resulting object to reduce memory usage. This helps to greatly reduce
+ // memory usage when we have many instances of only a few pass types
+ // (e.g. instcombine, simplifycfg, etc...) which tend to share a fixed set
+ // of dependencies.
+ AnalysisUsage AU;
+ P->getAnalysisUsage(AU);
+
+ AUFoldingSetNode* Node = nullptr;
+ FoldingSetNodeID ID;
+ AUFoldingSetNode::Profile(ID, AU);
+ void *IP = nullptr;
+ if (auto *N = UniqueAnalysisUsages.FindNodeOrInsertPos(ID, IP))
+ Node = N;
+ else {
+ Node = new (AUFoldingSetNodeAllocator.Allocate()) AUFoldingSetNode(AU);
+ UniqueAnalysisUsages.InsertNode(Node, IP);
+ }
+ assert(Node && "cached analysis usage must be non null");
+
+ AnUsageMap[P] = &Node->AU;
+ AnUsage = &Node->AU;;
+ }
+ return AnUsage;
+}
+
+/// Schedule pass P for execution. Make sure that passes required by
+/// P are run before P is run. Update analysis info maintained by
+/// the manager. Remove dead passes. This is a recursive function.
+void PMTopLevelManager::schedulePass(Pass *P) {
+
+ // TODO : Allocate function manager for this pass, other wise required set
+ // may be inserted into previous function manager
+
+ // Give pass a chance to prepare the stage.
+ P->preparePassManager(activeStack);
+
+ // If P is an analysis pass and it is available then do not
+ // generate the analysis again. Stale analysis info should not be
+ // available at this point.
+ const PassInfo *PI = findAnalysisPassInfo(P->getPassID());
+ if (PI && PI->isAnalysis() && findAnalysisPass(P->getPassID())) {
+ delete P;
+ return;
+ }
+
+ AnalysisUsage *AnUsage = findAnalysisUsage(P);
+
+ bool checkAnalysis = true;
+ while (checkAnalysis) {
+ checkAnalysis = false;
+
+ const AnalysisUsage::VectorType &RequiredSet = AnUsage->getRequiredSet();
+ for (AnalysisUsage::VectorType::const_iterator I = RequiredSet.begin(),
+ E = RequiredSet.end(); I != E; ++I) {
+
+ Pass *AnalysisPass = findAnalysisPass(*I);
+ if (!AnalysisPass) {
+ const PassInfo *PI = findAnalysisPassInfo(*I);
+
+ if (!PI) {
+ // Pass P is not in the global PassRegistry
+ dbgs() << "Pass '" << P->getPassName() << "' is not initialized." << "\n";
+ dbgs() << "Verify if there is a pass dependency cycle." << "\n";
+ dbgs() << "Required Passes:" << "\n";
+ for (AnalysisUsage::VectorType::const_iterator I2 = RequiredSet.begin(),
+ E = RequiredSet.end(); I2 != E && I2 != I; ++I2) {
+ Pass *AnalysisPass2 = findAnalysisPass(*I2);
+ if (AnalysisPass2) {
+ dbgs() << "\t" << AnalysisPass2->getPassName() << "\n";
+ } else {
+ dbgs() << "\t" << "Error: Required pass not found! Possible causes:" << "\n";
+ dbgs() << "\t\t" << "- Pass misconfiguration (e.g.: missing macros)" << "\n";
+ dbgs() << "\t\t" << "- Corruption of the global PassRegistry" << "\n";
+ }
+ }
+ }
+
+ assert(PI && "Expected required passes to be initialized");
+ AnalysisPass = PI->createPass();
+ if (P->getPotentialPassManagerType () ==
+ AnalysisPass->getPotentialPassManagerType())
+ // Schedule analysis pass that is managed by the same pass manager.
+ schedulePass(AnalysisPass);
+ else if (P->getPotentialPassManagerType () >
+ AnalysisPass->getPotentialPassManagerType()) {
+ // Schedule analysis pass that is managed by a new manager.
+ schedulePass(AnalysisPass);
+ // Recheck analysis passes to ensure that required analyses that
+ // are already checked are still available.
+ checkAnalysis = true;
+ } else
+ // Do not schedule this analysis. Lower level analysis
+ // passes are run on the fly.
+ delete AnalysisPass;
+ }
+ }
+ }
+
+ // Now all required passes are available.
+ if (ImmutablePass *IP = P->getAsImmutablePass()) {
+ // P is a immutable pass and it will be managed by this
+ // top level manager. Set up analysis resolver to connect them.
+ PMDataManager *DM = getAsPMDataManager();
+ AnalysisResolver *AR = new AnalysisResolver(*DM);
+ P->setResolver(AR);
+ DM->initializeAnalysisImpl(P);
+ addImmutablePass(IP);
+ DM->recordAvailableAnalysis(IP);
+ return;
+ }
+
+ if (PI && !PI->isAnalysis() && ShouldPrintBeforePass(PI)) {
+ Pass *PP = P->createPrinterPass(
+ dbgs(), std::string("*** IR Dump Before ") + P->getPassName() + " ***");
+ PP->assignPassManager(activeStack, getTopLevelPassManagerType());
+ }
+
+ // Add the requested pass to the best available pass manager.
+ P->assignPassManager(activeStack, getTopLevelPassManagerType());
+
+ if (PI && !PI->isAnalysis() && ShouldPrintAfterPass(PI)) {
+ Pass *PP = P->createPrinterPass(
+ dbgs(), std::string("*** IR Dump After ") + P->getPassName() + " ***");
+ PP->assignPassManager(activeStack, getTopLevelPassManagerType());
+ }
+}
+
+/// Find the pass that implements Analysis AID. Search immutable
+/// passes and all pass managers. If desired pass is not found
+/// then return NULL.
+Pass *PMTopLevelManager::findAnalysisPass(AnalysisID AID) {
+ // For immutable passes we have a direct mapping from ID to pass, so check
+ // that first.
+ if (Pass *P = ImmutablePassMap.lookup(AID))
+ return P;
+
+ // Check pass managers
+ for (PMDataManager *PassManager : PassManagers)
+ if (Pass *P = PassManager->findAnalysisPass(AID, false))
+ return P;
+
+ // Check other pass managers
+ for (PMDataManager *IndirectPassManager : IndirectPassManagers)
+ if (Pass *P = IndirectPassManager->findAnalysisPass(AID, false))
+ return P;
+
+ return nullptr;
+}
+
+const PassInfo *PMTopLevelManager::findAnalysisPassInfo(AnalysisID AID) const {
+ const PassInfo *&PI = AnalysisPassInfos[AID];
+ if (!PI)
+ PI = PassRegistry::getPassRegistry()->getPassInfo(AID);
+ else
+ assert(PI == PassRegistry::getPassRegistry()->getPassInfo(AID) &&
+ "The pass info pointer changed for an analysis ID!");
+
+ return PI;
+}
+
+void PMTopLevelManager::addImmutablePass(ImmutablePass *P) {
+ P->initializePass();
+ ImmutablePasses.push_back(P);
+
+ // Add this pass to the map from its analysis ID. We clobber any prior runs
+ // of the pass in the map so that the last one added is the one found when
+ // doing lookups.
+ AnalysisID AID = P->getPassID();
+ ImmutablePassMap[AID] = P;
+
+ // Also add any interfaces implemented by the immutable pass to the map for
+ // fast lookup.
+ const PassInfo *PassInf = findAnalysisPassInfo(AID);
+ assert(PassInf && "Expected all immutable passes to be initialized");
+ for (const PassInfo *ImmPI : PassInf->getInterfacesImplemented())
+ ImmutablePassMap[ImmPI->getTypeInfo()] = P;
+}
+
+// Print passes managed by this top level manager.
+void PMTopLevelManager::dumpPasses() const {
+
+ if (PassDebugging < Structure)
+ return;
+
+ // Print out the immutable passes
+ for (unsigned i = 0, e = ImmutablePasses.size(); i != e; ++i) {
+ ImmutablePasses[i]->dumpPassStructure(0);
+ }
+
+ // Every class that derives from PMDataManager also derives from Pass
+ // (sometimes indirectly), but there's no inheritance relationship
+ // between PMDataManager and Pass, so we have to getAsPass to get
+ // from a PMDataManager* to a Pass*.
+ for (PMDataManager *Manager : PassManagers)
+ Manager->getAsPass()->dumpPassStructure(1);
+}
+
+void PMTopLevelManager::dumpArguments() const {
+
+ if (PassDebugging < Arguments)
+ return;
+
+ dbgs() << "Pass Arguments: ";
+ for (SmallVectorImpl<ImmutablePass *>::const_iterator I =
+ ImmutablePasses.begin(), E = ImmutablePasses.end(); I != E; ++I)
+ if (const PassInfo *PI = findAnalysisPassInfo((*I)->getPassID())) {
+ assert(PI && "Expected all immutable passes to be initialized");
+ if (!PI->isAnalysisGroup())
+ dbgs() << " -" << PI->getPassArgument();
+ }
+ for (SmallVectorImpl<PMDataManager *>::const_iterator I =
+ PassManagers.begin(), E = PassManagers.end(); I != E; ++I)
+ (*I)->dumpPassArguments();
+ dbgs() << "\n";
+}
+
+void PMTopLevelManager::initializeAllAnalysisInfo() {
+ for (SmallVectorImpl<PMDataManager *>::iterator I = PassManagers.begin(),
+ E = PassManagers.end(); I != E; ++I)
+ (*I)->initializeAnalysisInfo();
+
+ // Initailize other pass managers
+ for (SmallVectorImpl<PMDataManager *>::iterator
+ I = IndirectPassManagers.begin(), E = IndirectPassManagers.end();
+ I != E; ++I)
+ (*I)->initializeAnalysisInfo();
+
+ for (DenseMap<Pass *, Pass *>::iterator DMI = LastUser.begin(),
+ DME = LastUser.end(); DMI != DME; ++DMI) {
+ SmallPtrSet<Pass *, 8> &L = InversedLastUser[DMI->second];
+ L.insert(DMI->first);
+ }
+}
+
+/// Destructor
+PMTopLevelManager::~PMTopLevelManager() {
+ for (SmallVectorImpl<PMDataManager *>::iterator I = PassManagers.begin(),
+ E = PassManagers.end(); I != E; ++I)
+ delete *I;
+
+ for (SmallVectorImpl<ImmutablePass *>::iterator
+ I = ImmutablePasses.begin(), E = ImmutablePasses.end(); I != E; ++I)
+ delete *I;
+}
+
+//===----------------------------------------------------------------------===//
+// PMDataManager implementation
+
+/// Augement AvailableAnalysis by adding analysis made available by pass P.
+void PMDataManager::recordAvailableAnalysis(Pass *P) {
+ AnalysisID PI = P->getPassID();
+
+ AvailableAnalysis[PI] = P;
+
+ assert(!AvailableAnalysis.empty());
+
+ // This pass is the current implementation of all of the interfaces it
+ // implements as well.
+ const PassInfo *PInf = TPM->findAnalysisPassInfo(PI);
+ if (!PInf) return;
+ const std::vector<const PassInfo*> &II = PInf->getInterfacesImplemented();
+ for (unsigned i = 0, e = II.size(); i != e; ++i)
+ AvailableAnalysis[II[i]->getTypeInfo()] = P;
+}
+
+// Return true if P preserves high level analysis used by other
+// passes managed by this manager
+bool PMDataManager::preserveHigherLevelAnalysis(Pass *P) {
+ AnalysisUsage *AnUsage = TPM->findAnalysisUsage(P);
+ if (AnUsage->getPreservesAll())
+ return true;
+
+ const AnalysisUsage::VectorType &PreservedSet = AnUsage->getPreservedSet();
+ for (SmallVectorImpl<Pass *>::iterator I = HigherLevelAnalysis.begin(),
+ E = HigherLevelAnalysis.end(); I != E; ++I) {
+ Pass *P1 = *I;
+ if (P1->getAsImmutablePass() == nullptr &&
+ std::find(PreservedSet.begin(), PreservedSet.end(),
+ P1->getPassID()) ==
+ PreservedSet.end())
+ return false;
+ }
+
+ return true;
+}
+
+/// verifyPreservedAnalysis -- Verify analysis preserved by pass P.
+void PMDataManager::verifyPreservedAnalysis(Pass *P) {
+ // Don't do this unless assertions are enabled.
+#ifdef NDEBUG
+ return;
+#endif
+ AnalysisUsage *AnUsage = TPM->findAnalysisUsage(P);
+ const AnalysisUsage::VectorType &PreservedSet = AnUsage->getPreservedSet();
+
+ // Verify preserved analysis
+ for (AnalysisUsage::VectorType::const_iterator I = PreservedSet.begin(),
+ E = PreservedSet.end(); I != E; ++I) {
+ AnalysisID AID = *I;
+ if (Pass *AP = findAnalysisPass(AID, true)) {
+ TimeRegion PassTimer(getPassTimer(AP));
+ AP->verifyAnalysis();
+ }
+ }
+}
+
+/// Remove Analysis not preserved by Pass P
+void PMDataManager::removeNotPreservedAnalysis(Pass *P) {
+ AnalysisUsage *AnUsage = TPM->findAnalysisUsage(P);
+ if (AnUsage->getPreservesAll())
+ return;
+
+ const AnalysisUsage::VectorType &PreservedSet = AnUsage->getPreservedSet();
+ for (DenseMap<AnalysisID, Pass*>::iterator I = AvailableAnalysis.begin(),
+ E = AvailableAnalysis.end(); I != E; ) {
+ DenseMap<AnalysisID, Pass*>::iterator Info = I++;
+ if (Info->second->getAsImmutablePass() == nullptr &&
+ std::find(PreservedSet.begin(), PreservedSet.end(), Info->first) ==
+ PreservedSet.end()) {
+ // Remove this analysis
+ if (PassDebugging >= Details) {
+ Pass *S = Info->second;
+ dbgs() << " -- '" << P->getPassName() << "' is not preserving '";
+ dbgs() << S->getPassName() << "'\n";
+ }
+ AvailableAnalysis.erase(Info);
+ }
+ }
+
+ // Check inherited analysis also. If P is not preserving analysis
+ // provided by parent manager then remove it here.
+ for (unsigned Index = 0; Index < PMT_Last; ++Index) {
+
+ if (!InheritedAnalysis[Index])
+ continue;
+
+ for (DenseMap<AnalysisID, Pass*>::iterator
+ I = InheritedAnalysis[Index]->begin(),
+ E = InheritedAnalysis[Index]->end(); I != E; ) {
+ DenseMap<AnalysisID, Pass *>::iterator Info = I++;
+ if (Info->second->getAsImmutablePass() == nullptr &&
+ std::find(PreservedSet.begin(), PreservedSet.end(), Info->first) ==
+ PreservedSet.end()) {
+ // Remove this analysis
+ if (PassDebugging >= Details) {
+ Pass *S = Info->second;
+ dbgs() << " -- '" << P->getPassName() << "' is not preserving '";
+ dbgs() << S->getPassName() << "'\n";
+ }
+ InheritedAnalysis[Index]->erase(Info);
+ }
+ }
+ }
+}
+
+/// Remove analysis passes that are not used any longer
+void PMDataManager::removeDeadPasses(Pass *P, StringRef Msg,
+ enum PassDebuggingString DBG_STR) {
+
+ SmallVector<Pass *, 12> DeadPasses;
+
+ // If this is a on the fly manager then it does not have TPM.
+ if (!TPM)
+ return;
+
+ TPM->collectLastUses(DeadPasses, P);
+
+ if (PassDebugging >= Details && !DeadPasses.empty()) {
+ dbgs() << " -*- '" << P->getPassName();
+ dbgs() << "' is the last user of following pass instances.";
+ dbgs() << " Free these instances\n";
+ }
+
+ for (SmallVectorImpl<Pass *>::iterator I = DeadPasses.begin(),
+ E = DeadPasses.end(); I != E; ++I)
+ freePass(*I, Msg, DBG_STR);
+}
+
+void PMDataManager::freePass(Pass *P, StringRef Msg,
+ enum PassDebuggingString DBG_STR) {
+ dumpPassInfo(P, FREEING_MSG, DBG_STR, Msg);
+
+ {
+ // If the pass crashes releasing memory, remember this.
+ PassManagerPrettyStackEntry X(P);
+ TimeRegion PassTimer(getPassTimer(P));
+
+ P->releaseMemory();
+ }
+
+ AnalysisID PI = P->getPassID();
+ if (const PassInfo *PInf = TPM->findAnalysisPassInfo(PI)) {
+ // Remove the pass itself (if it is not already removed).
+ AvailableAnalysis.erase(PI);
+
+ // Remove all interfaces this pass implements, for which it is also
+ // listed as the available implementation.
+ const std::vector<const PassInfo*> &II = PInf->getInterfacesImplemented();
+ for (unsigned i = 0, e = II.size(); i != e; ++i) {
+ DenseMap<AnalysisID, Pass*>::iterator Pos =
+ AvailableAnalysis.find(II[i]->getTypeInfo());
+ if (Pos != AvailableAnalysis.end() && Pos->second == P)
+ AvailableAnalysis.erase(Pos);
+ }
+ }
+}
+
+/// Add pass P into the PassVector. Update
+/// AvailableAnalysis appropriately if ProcessAnalysis is true.
+void PMDataManager::add(Pass *P, bool ProcessAnalysis) {
+ // This manager is going to manage pass P. Set up analysis resolver
+ // to connect them.
+ AnalysisResolver *AR = new AnalysisResolver(*this);
+ P->setResolver(AR);
+
+ // If a FunctionPass F is the last user of ModulePass info M
+ // then the F's manager, not F, records itself as a last user of M.
+ SmallVector<Pass *, 12> TransferLastUses;
+
+ if (!ProcessAnalysis) {
+ // Add pass
+ PassVector.push_back(P);
+ return;
+ }
+
+ // At the moment, this pass is the last user of all required passes.
+ SmallVector<Pass *, 12> LastUses;
+ SmallVector<Pass *, 8> UsedPasses;
+ SmallVector<AnalysisID, 8> ReqAnalysisNotAvailable;
+
+ unsigned PDepth = this->getDepth();
+
+ collectRequiredAndUsedAnalyses(UsedPasses, ReqAnalysisNotAvailable, P);
+ for (Pass *PUsed : UsedPasses) {
+ unsigned RDepth = 0;
+
+ assert(PUsed->getResolver() && "Analysis Resolver is not set");
+ PMDataManager &DM = PUsed->getResolver()->getPMDataManager();
+ RDepth = DM.getDepth();
+
+ if (PDepth == RDepth)
+ LastUses.push_back(PUsed);
+ else if (PDepth > RDepth) {
+ // Let the parent claim responsibility of last use
+ TransferLastUses.push_back(PUsed);
+ // Keep track of higher level analysis used by this manager.
+ HigherLevelAnalysis.push_back(PUsed);
+ } else
+ llvm_unreachable("Unable to accommodate Used Pass");
+ }
+
+ // Set P as P's last user until someone starts using P.
+ // However, if P is a Pass Manager then it does not need
+ // to record its last user.
+ if (!P->getAsPMDataManager())
+ LastUses.push_back(P);
+ TPM->setLastUser(LastUses, P);
+
+ if (!TransferLastUses.empty()) {
+ Pass *My_PM = getAsPass();
+ TPM->setLastUser(TransferLastUses, My_PM);
+ TransferLastUses.clear();
+ }
+
+ // Now, take care of required analyses that are not available.
+ for (AnalysisID ID : ReqAnalysisNotAvailable) {
+ const PassInfo *PI = TPM->findAnalysisPassInfo(ID);
+ Pass *AnalysisPass = PI->createPass();
+ this->addLowerLevelRequiredPass(P, AnalysisPass);
+ }
+
+ // Take a note of analysis required and made available by this pass.
+ // Remove the analysis not preserved by this pass
+ removeNotPreservedAnalysis(P);
+ recordAvailableAnalysis(P);
+
+ // Add pass
+ PassVector.push_back(P);
+}
+
+
+/// Populate UP with analysis pass that are used or required by
+/// pass P and are available. Populate RP_NotAvail with analysis
+/// pass that are required by pass P but are not available.
+void PMDataManager::collectRequiredAndUsedAnalyses(
+ SmallVectorImpl<Pass *> &UP, SmallVectorImpl<AnalysisID> &RP_NotAvail,
+ Pass *P) {
+ AnalysisUsage *AnUsage = TPM->findAnalysisUsage(P);
+
+ for (const auto &UsedID : AnUsage->getUsedSet())
+ if (Pass *AnalysisPass = findAnalysisPass(UsedID, true))
+ UP.push_back(AnalysisPass);
+
+ for (const auto &RequiredID : AnUsage->getRequiredSet())
+ if (Pass *AnalysisPass = findAnalysisPass(RequiredID, true))
+ UP.push_back(AnalysisPass);
+ else
+ RP_NotAvail.push_back(RequiredID);
+
+ for (const auto &RequiredID : AnUsage->getRequiredTransitiveSet())
+ if (Pass *AnalysisPass = findAnalysisPass(RequiredID, true))
+ UP.push_back(AnalysisPass);
+ else
+ RP_NotAvail.push_back(RequiredID);
+}
+
+// All Required analyses should be available to the pass as it runs! Here
+// we fill in the AnalysisImpls member of the pass so that it can
+// successfully use the getAnalysis() method to retrieve the
+// implementations it needs.
+//
+void PMDataManager::initializeAnalysisImpl(Pass *P) {
+ AnalysisUsage *AnUsage = TPM->findAnalysisUsage(P);
+
+ for (AnalysisUsage::VectorType::const_iterator
+ I = AnUsage->getRequiredSet().begin(),
+ E = AnUsage->getRequiredSet().end(); I != E; ++I) {
+ Pass *Impl = findAnalysisPass(*I, true);
+ if (!Impl)
+ // This may be analysis pass that is initialized on the fly.
+ // If that is not the case then it will raise an assert when it is used.
+ continue;
+ AnalysisResolver *AR = P->getResolver();
+ assert(AR && "Analysis Resolver is not set");
+ AR->addAnalysisImplsPair(*I, Impl);
+ }
+}
+
+/// Find the pass that implements Analysis AID. If desired pass is not found
+/// then return NULL.
+Pass *PMDataManager::findAnalysisPass(AnalysisID AID, bool SearchParent) {
+
+ // Check if AvailableAnalysis map has one entry.
+ DenseMap<AnalysisID, Pass*>::const_iterator I = AvailableAnalysis.find(AID);
+
+ if (I != AvailableAnalysis.end())
+ return I->second;
+
+ // Search Parents through TopLevelManager
+ if (SearchParent)
+ return TPM->findAnalysisPass(AID);
+
+ return nullptr;
+}
+
+// Print list of passes that are last used by P.
+void PMDataManager::dumpLastUses(Pass *P, unsigned Offset) const{
+
+ SmallVector<Pass *, 12> LUses;
+
+ // If this is a on the fly manager then it does not have TPM.
+ if (!TPM)
+ return;
+
+ TPM->collectLastUses(LUses, P);
+
+ for (SmallVectorImpl<Pass *>::iterator I = LUses.begin(),
+ E = LUses.end(); I != E; ++I) {
+ dbgs() << "--" << std::string(Offset*2, ' ');
+ (*I)->dumpPassStructure(0);
+ }
+}
+
+void PMDataManager::dumpPassArguments() const {
+ for (SmallVectorImpl<Pass *>::const_iterator I = PassVector.begin(),
+ E = PassVector.end(); I != E; ++I) {
+ if (PMDataManager *PMD = (*I)->getAsPMDataManager())
+ PMD->dumpPassArguments();
+ else
+ if (const PassInfo *PI =
+ TPM->findAnalysisPassInfo((*I)->getPassID()))
+ if (!PI->isAnalysisGroup())
+ dbgs() << " -" << PI->getPassArgument();
+ }
+}
+
+void PMDataManager::dumpPassInfo(Pass *P, enum PassDebuggingString S1,
+ enum PassDebuggingString S2,
+ StringRef Msg) {
+ if (PassDebugging < Executions)
+ return;
+ dbgs() << "[" << sys::TimeValue::now().str() << "] " << (void *)this
+ << std::string(getDepth() * 2 + 1, ' ');
+ switch (S1) {
+ case EXECUTION_MSG:
+ dbgs() << "Executing Pass '" << P->getPassName();
+ break;
+ case MODIFICATION_MSG:
+ dbgs() << "Made Modification '" << P->getPassName();
+ break;
+ case FREEING_MSG:
+ dbgs() << " Freeing Pass '" << P->getPassName();
+ break;
+ default:
+ break;
+ }
+ switch (S2) {
+ case ON_BASICBLOCK_MSG:
+ dbgs() << "' on BasicBlock '" << Msg << "'...\n";
+ break;
+ case ON_FUNCTION_MSG:
+ dbgs() << "' on Function '" << Msg << "'...\n";
+ break;
+ case ON_MODULE_MSG:
+ dbgs() << "' on Module '" << Msg << "'...\n";
+ break;
+ case ON_REGION_MSG:
+ dbgs() << "' on Region '" << Msg << "'...\n";
+ break;
+ case ON_LOOP_MSG:
+ dbgs() << "' on Loop '" << Msg << "'...\n";
+ break;
+ case ON_CG_MSG:
+ dbgs() << "' on Call Graph Nodes '" << Msg << "'...\n";
+ break;
+ default:
+ break;
+ }
+}
+
+void PMDataManager::dumpRequiredSet(const Pass *P) const {
+ if (PassDebugging < Details)
+ return;
+
+ AnalysisUsage analysisUsage;
+ P->getAnalysisUsage(analysisUsage);
+ dumpAnalysisUsage("Required", P, analysisUsage.getRequiredSet());
+}
+
+void PMDataManager::dumpPreservedSet(const Pass *P) const {
+ if (PassDebugging < Details)
+ return;
+
+ AnalysisUsage analysisUsage;
+ P->getAnalysisUsage(analysisUsage);
+ dumpAnalysisUsage("Preserved", P, analysisUsage.getPreservedSet());
+}
+
+void PMDataManager::dumpUsedSet(const Pass *P) const {
+ if (PassDebugging < Details)
+ return;
+
+ AnalysisUsage analysisUsage;
+ P->getAnalysisUsage(analysisUsage);
+ dumpAnalysisUsage("Used", P, analysisUsage.getUsedSet());
+}
+
+void PMDataManager::dumpAnalysisUsage(StringRef Msg, const Pass *P,
+ const AnalysisUsage::VectorType &Set) const {
+ assert(PassDebugging >= Details);
+ if (Set.empty())
+ return;
+ dbgs() << (const void*)P << std::string(getDepth()*2+3, ' ') << Msg << " Analyses:";
+ for (unsigned i = 0; i != Set.size(); ++i) {
+ if (i) dbgs() << ',';
+ const PassInfo *PInf = TPM->findAnalysisPassInfo(Set[i]);
+ if (!PInf) {
+ // Some preserved passes, such as AliasAnalysis, may not be initialized by
+ // all drivers.
+ dbgs() << " Uninitialized Pass";
+ continue;
+ }
+ dbgs() << ' ' << PInf->getPassName();
+ }
+ dbgs() << '\n';
+}
+
+/// Add RequiredPass into list of lower level passes required by pass P.
+/// RequiredPass is run on the fly by Pass Manager when P requests it
+/// through getAnalysis interface.
+/// This should be handled by specific pass manager.
+void PMDataManager::addLowerLevelRequiredPass(Pass *P, Pass *RequiredPass) {
+ if (TPM) {
+ TPM->dumpArguments();
+ TPM->dumpPasses();
+ }
+
+ // Module Level pass may required Function Level analysis info
+ // (e.g. dominator info). Pass manager uses on the fly function pass manager
+ // to provide this on demand. In that case, in Pass manager terminology,
+ // module level pass is requiring lower level analysis info managed by
+ // lower level pass manager.
+
+ // When Pass manager is not able to order required analysis info, Pass manager
+ // checks whether any lower level manager will be able to provide this
+ // analysis info on demand or not.
+#ifndef NDEBUG
+ dbgs() << "Unable to schedule '" << RequiredPass->getPassName();
+ dbgs() << "' required by '" << P->getPassName() << "'\n";
+#endif
+ llvm_unreachable("Unable to schedule pass");
+}
+
+Pass *PMDataManager::getOnTheFlyPass(Pass *P, AnalysisID PI, Function &F) {
+ llvm_unreachable("Unable to find on the fly pass");
+}
+
+// Destructor
+PMDataManager::~PMDataManager() {
+ for (SmallVectorImpl<Pass *>::iterator I = PassVector.begin(),
+ E = PassVector.end(); I != E; ++I)
+ delete *I;
+}
+
+//===----------------------------------------------------------------------===//
+// NOTE: Is this the right place to define this method ?
+// getAnalysisIfAvailable - Return analysis result or null if it doesn't exist.
+Pass *AnalysisResolver::getAnalysisIfAvailable(AnalysisID ID, bool dir) const {
+ return PM.findAnalysisPass(ID, dir);
+}
+
+Pass *AnalysisResolver::findImplPass(Pass *P, AnalysisID AnalysisPI,
+ Function &F) {
+ return PM.getOnTheFlyPass(P, AnalysisPI, F);
+}
+
+//===----------------------------------------------------------------------===//
+// BBPassManager implementation
+
+/// Execute all of the passes scheduled for execution by invoking
+/// runOnBasicBlock method. Keep track of whether any of the passes modifies
+/// the function, and if so, return true.
+bool BBPassManager::runOnFunction(Function &F) {
+ if (F.isDeclaration())
+ return false;
+
+ bool Changed = doInitialization(F);
+
+ for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I)
+ for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) {
+ BasicBlockPass *BP = getContainedPass(Index);
+ bool LocalChanged = false;
+
+ dumpPassInfo(BP, EXECUTION_MSG, ON_BASICBLOCK_MSG, I->getName());
+ dumpRequiredSet(BP);
+
+ initializeAnalysisImpl(BP);
+
+ {
+ // If the pass crashes, remember this.
+ PassManagerPrettyStackEntry X(BP, *I);
+ TimeRegion PassTimer(getPassTimer(BP));
+
+ LocalChanged |= BP->runOnBasicBlock(*I);
+ }
+
+ Changed |= LocalChanged;
+ if (LocalChanged)
+ dumpPassInfo(BP, MODIFICATION_MSG, ON_BASICBLOCK_MSG,
+ I->getName());
+ dumpPreservedSet(BP);
+ dumpUsedSet(BP);
+
+ verifyPreservedAnalysis(BP);
+ removeNotPreservedAnalysis(BP);
+ recordAvailableAnalysis(BP);
+ removeDeadPasses(BP, I->getName(), ON_BASICBLOCK_MSG);
+ }
+
+ return doFinalization(F) || Changed;
+}
+
+// Implement doInitialization and doFinalization
+bool BBPassManager::doInitialization(Module &M) {
+ bool Changed = false;
+
+ for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index)
+ Changed |= getContainedPass(Index)->doInitialization(M);
+
+ return Changed;
+}
+
+bool BBPassManager::doFinalization(Module &M) {
+ bool Changed = false;
+
+ for (int Index = getNumContainedPasses() - 1; Index >= 0; --Index)
+ Changed |= getContainedPass(Index)->doFinalization(M);
+
+ return Changed;
+}
+
+bool BBPassManager::doInitialization(Function &F) {
+ bool Changed = false;
+
+ for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) {
+ BasicBlockPass *BP = getContainedPass(Index);
+ Changed |= BP->doInitialization(F);
+ }
+
+ return Changed;
+}
+
+bool BBPassManager::doFinalization(Function &F) {
+ bool Changed = false;
+
+ for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) {
+ BasicBlockPass *BP = getContainedPass(Index);
+ Changed |= BP->doFinalization(F);
+ }
+
+ return Changed;
+}
+
+
+//===----------------------------------------------------------------------===//
+// FunctionPassManager implementation
+
+/// Create new Function pass manager
+FunctionPassManager::FunctionPassManager(Module *m) : M(m) {
+ FPM = new FunctionPassManagerImpl();
+ // FPM is the top level manager.
+ FPM->setTopLevelManager(FPM);
+
+ AnalysisResolver *AR = new AnalysisResolver(*FPM);
+ FPM->setResolver(AR);
+}
+
+FunctionPassManager::~FunctionPassManager() {
+ delete FPM;
+}
+
+void FunctionPassManager::add(Pass *P) {
+ FPM->add(P);
+}
+
+/// run - Execute all of the passes scheduled for execution. Keep
+/// track of whether any of the passes modifies the function, and if
+/// so, return true.
+///
+bool FunctionPassManager::run(Function &F) {
+ if (std::error_code EC = F.materialize())
+ report_fatal_error("Error reading bitcode file: " + EC.message());
+ return FPM->run(F);
+}
+
+
+/// doInitialization - Run all of the initializers for the function passes.
+///
+bool FunctionPassManager::doInitialization() {
+ return FPM->doInitialization(*M);
+}
+
+/// doFinalization - Run all of the finalizers for the function passes.
+///
+bool FunctionPassManager::doFinalization() {
+ return FPM->doFinalization(*M);
+}
+
+//===----------------------------------------------------------------------===//
+// FunctionPassManagerImpl implementation
+//
+bool FunctionPassManagerImpl::doInitialization(Module &M) {
+ bool Changed = false;
+
+ dumpArguments();
+ dumpPasses();
+
+ for (ImmutablePass *ImPass : getImmutablePasses())
+ Changed |= ImPass->doInitialization(M);
+
+ for (unsigned Index = 0; Index < getNumContainedManagers(); ++Index)
+ Changed |= getContainedManager(Index)->doInitialization(M);
+
+ return Changed;
+}
+
+bool FunctionPassManagerImpl::doFinalization(Module &M) {
+ bool Changed = false;
+
+ for (int Index = getNumContainedManagers() - 1; Index >= 0; --Index)
+ Changed |= getContainedManager(Index)->doFinalization(M);
+
+ for (ImmutablePass *ImPass : getImmutablePasses())
+ Changed |= ImPass->doFinalization(M);
+
+ return Changed;
+}
+
+/// cleanup - After running all passes, clean up pass manager cache.
+void FPPassManager::cleanup() {
+ for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) {
+ FunctionPass *FP = getContainedPass(Index);
+ AnalysisResolver *AR = FP->getResolver();
+ assert(AR && "Analysis Resolver is not set");
+ AR->clearAnalysisImpls();
+ }
+}
+
+void FunctionPassManagerImpl::releaseMemoryOnTheFly() {
+ if (!wasRun)
+ return;
+ for (unsigned Index = 0; Index < getNumContainedManagers(); ++Index) {
+ FPPassManager *FPPM = getContainedManager(Index);
+ for (unsigned Index = 0; Index < FPPM->getNumContainedPasses(); ++Index) {
+ FPPM->getContainedPass(Index)->releaseMemory();
+ }
+ }
+ wasRun = false;
+}
+
+// Execute all the passes managed by this top level manager.
+// Return true if any function is modified by a pass.
+bool FunctionPassManagerImpl::run(Function &F) {
+ bool Changed = false;
+ TimingInfo::createTheTimeInfo();
+
+ initializeAllAnalysisInfo();
+ for (unsigned Index = 0; Index < getNumContainedManagers(); ++Index) {
+ Changed |= getContainedManager(Index)->runOnFunction(F);
+ F.getContext().yield();
+ }
+
+ for (unsigned Index = 0; Index < getNumContainedManagers(); ++Index)
+ getContainedManager(Index)->cleanup();
+
+ wasRun = true;
+ return Changed;
+}
+
+//===----------------------------------------------------------------------===//
+// FPPassManager implementation
+
+char FPPassManager::ID = 0;
+/// Print passes managed by this manager
+void FPPassManager::dumpPassStructure(unsigned Offset) {
+ dbgs().indent(Offset*2) << "FunctionPass Manager\n";
+ for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) {
+ FunctionPass *FP = getContainedPass(Index);
+ FP->dumpPassStructure(Offset + 1);
+ dumpLastUses(FP, Offset+1);
+ }
+}
+
+
+/// Execute all of the passes scheduled for execution by invoking
+/// runOnFunction method. Keep track of whether any of the passes modifies
+/// the function, and if so, return true.
+bool FPPassManager::runOnFunction(Function &F) {
+ if (F.isDeclaration())
+ return false;
+
+ bool Changed = false;
+
+ // Collect inherited analysis from Module level pass manager.
+ populateInheritedAnalysis(TPM->activeStack);
+
+ for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) {
+ FunctionPass *FP = getContainedPass(Index);
+ bool LocalChanged = false;
+
+ dumpPassInfo(FP, EXECUTION_MSG, ON_FUNCTION_MSG, F.getName());
+ dumpRequiredSet(FP);
+
+ initializeAnalysisImpl(FP);
+
+ {
+ PassManagerPrettyStackEntry X(FP, F);
+ TimeRegion PassTimer(getPassTimer(FP));
+
+ LocalChanged |= FP->runOnFunction(F);
+ }
+
+ Changed |= LocalChanged;
+ if (LocalChanged)
+ dumpPassInfo(FP, MODIFICATION_MSG, ON_FUNCTION_MSG, F.getName());
+ dumpPreservedSet(FP);
+ dumpUsedSet(FP);
+
+ verifyPreservedAnalysis(FP);
+ removeNotPreservedAnalysis(FP);
+ recordAvailableAnalysis(FP);
+ removeDeadPasses(FP, F.getName(), ON_FUNCTION_MSG);
+ }
+ return Changed;
+}
+
+bool FPPassManager::runOnModule(Module &M) {
+ bool Changed = false;
+
+ for (Function &F : M)
+ Changed |= runOnFunction(F);
+
+ return Changed;
+}
+
+bool FPPassManager::doInitialization(Module &M) {
+ bool Changed = false;
+
+ for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index)
+ Changed |= getContainedPass(Index)->doInitialization(M);
+
+ return Changed;
+}
+
+bool FPPassManager::doFinalization(Module &M) {
+ bool Changed = false;
+
+ for (int Index = getNumContainedPasses() - 1; Index >= 0; --Index)
+ Changed |= getContainedPass(Index)->doFinalization(M);
+
+ return Changed;
+}
+
+//===----------------------------------------------------------------------===//
+// MPPassManager implementation
+
+/// Execute all of the passes scheduled for execution by invoking
+/// runOnModule method. Keep track of whether any of the passes modifies
+/// the module, and if so, return true.
+bool
+MPPassManager::runOnModule(Module &M) {
+ bool Changed = false;
+
+ // Initialize on-the-fly passes
+ for (auto &OnTheFlyManager : OnTheFlyManagers) {
+ FunctionPassManagerImpl *FPP = OnTheFlyManager.second;
+ Changed |= FPP->doInitialization(M);
+ }
+
+ // Initialize module passes
+ for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index)
+ Changed |= getContainedPass(Index)->doInitialization(M);
+
+ for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) {
+ ModulePass *MP = getContainedPass(Index);
+ bool LocalChanged = false;
+
+ dumpPassInfo(MP, EXECUTION_MSG, ON_MODULE_MSG, M.getModuleIdentifier());
+ dumpRequiredSet(MP);
+
+ initializeAnalysisImpl(MP);
+
+ {
+ PassManagerPrettyStackEntry X(MP, M);
+ TimeRegion PassTimer(getPassTimer(MP));
+
+ LocalChanged |= MP->runOnModule(M);
+ }
+
+ Changed |= LocalChanged;
+ if (LocalChanged)
+ dumpPassInfo(MP, MODIFICATION_MSG, ON_MODULE_MSG,
+ M.getModuleIdentifier());
+ dumpPreservedSet(MP);
+ dumpUsedSet(MP);
+
+ verifyPreservedAnalysis(MP);
+ removeNotPreservedAnalysis(MP);
+ recordAvailableAnalysis(MP);
+ removeDeadPasses(MP, M.getModuleIdentifier(), ON_MODULE_MSG);
+ }
+
+ // Finalize module passes
+ for (int Index = getNumContainedPasses() - 1; Index >= 0; --Index)
+ Changed |= getContainedPass(Index)->doFinalization(M);
+
+ // Finalize on-the-fly passes
+ for (auto &OnTheFlyManager : OnTheFlyManagers) {
+ FunctionPassManagerImpl *FPP = OnTheFlyManager.second;
+ // We don't know when is the last time an on-the-fly pass is run,
+ // so we need to releaseMemory / finalize here
+ FPP->releaseMemoryOnTheFly();
+ Changed |= FPP->doFinalization(M);
+ }
+
+ return Changed;
+}
+
+/// Add RequiredPass into list of lower level passes required by pass P.
+/// RequiredPass is run on the fly by Pass Manager when P requests it
+/// through getAnalysis interface.
+void MPPassManager::addLowerLevelRequiredPass(Pass *P, Pass *RequiredPass) {
+ assert(P->getPotentialPassManagerType() == PMT_ModulePassManager &&
+ "Unable to handle Pass that requires lower level Analysis pass");
+ assert((P->getPotentialPassManagerType() <
+ RequiredPass->getPotentialPassManagerType()) &&
+ "Unable to handle Pass that requires lower level Analysis pass");
+ if (!RequiredPass)
+ return;
+
+ FunctionPassManagerImpl *FPP = OnTheFlyManagers[P];
+ if (!FPP) {
+ FPP = new FunctionPassManagerImpl();
+ // FPP is the top level manager.
+ FPP->setTopLevelManager(FPP);
+
+ OnTheFlyManagers[P] = FPP;
+ }
+ const PassInfo *RequiredPassPI =
+ TPM->findAnalysisPassInfo(RequiredPass->getPassID());
+
+ Pass *FoundPass = nullptr;
+ if (RequiredPassPI && RequiredPassPI->isAnalysis()) {
+ FoundPass =
+ ((PMTopLevelManager*)FPP)->findAnalysisPass(RequiredPass->getPassID());
+ }
+ if (!FoundPass) {
+ FoundPass = RequiredPass;
+ // This should be guaranteed to add RequiredPass to the passmanager given
+ // that we checked for an available analysis above.
+ FPP->add(RequiredPass);
+ }
+ // Register P as the last user of FoundPass or RequiredPass.
+ SmallVector<Pass *, 1> LU;
+ LU.push_back(FoundPass);
+ FPP->setLastUser(LU, P);
+}
+
+/// Return function pass corresponding to PassInfo PI, that is
+/// required by module pass MP. Instantiate analysis pass, by using
+/// its runOnFunction() for function F.
+Pass* MPPassManager::getOnTheFlyPass(Pass *MP, AnalysisID PI, Function &F){
+ FunctionPassManagerImpl *FPP = OnTheFlyManagers[MP];
+ assert(FPP && "Unable to find on the fly pass");
+
+ FPP->releaseMemoryOnTheFly();
+ FPP->run(F);
+ return ((PMTopLevelManager*)FPP)->findAnalysisPass(PI);
+}
+
+
+//===----------------------------------------------------------------------===//
+// PassManagerImpl implementation
+
+//
+/// run - Execute all of the passes scheduled for execution. Keep track of
+/// whether any of the passes modifies the module, and if so, return true.
+bool PassManagerImpl::run(Module &M) {
+ bool Changed = false;
+ TimingInfo::createTheTimeInfo();
+
+ dumpArguments();
+ dumpPasses();
+
+ for (ImmutablePass *ImPass : getImmutablePasses())
+ Changed |= ImPass->doInitialization(M);
+
+ initializeAllAnalysisInfo();
+ for (unsigned Index = 0; Index < getNumContainedManagers(); ++Index) {
+ Changed |= getContainedManager(Index)->runOnModule(M);
+ M.getContext().yield();
+ }
+
+ for (ImmutablePass *ImPass : getImmutablePasses())
+ Changed |= ImPass->doFinalization(M);
+
+ return Changed;
+}
+
+//===----------------------------------------------------------------------===//
+// PassManager implementation
+
+/// Create new pass manager
+PassManager::PassManager() {
+ PM = new PassManagerImpl();
+ // PM is the top level manager
+ PM->setTopLevelManager(PM);
+}
+
+PassManager::~PassManager() {
+ delete PM;
+}
+
+void PassManager::add(Pass *P) {
+ PM->add(P);
+}
+
+/// run - Execute all of the passes scheduled for execution. Keep track of
+/// whether any of the passes modifies the module, and if so, return true.
+bool PassManager::run(Module &M) {
+ return PM->run(M);
+}
+
+//===----------------------------------------------------------------------===//
+// TimingInfo implementation
+
+bool llvm::TimePassesIsEnabled = false;
+static cl::opt<bool,true>
+EnableTiming("time-passes", cl::location(TimePassesIsEnabled),
+ cl::desc("Time each pass, printing elapsed time for each on exit"));
+
+// createTheTimeInfo - This method either initializes the TheTimeInfo pointer to
+// a non-null value (if the -time-passes option is enabled) or it leaves it
+// null. It may be called multiple times.
+void TimingInfo::createTheTimeInfo() {
+ if (!TimePassesIsEnabled || TheTimeInfo) return;
+
+ // Constructed the first time this is called, iff -time-passes is enabled.
+ // This guarantees that the object will be constructed before static globals,
+ // thus it will be destroyed before them.
+ static ManagedStatic<TimingInfo> TTI;
+ TheTimeInfo = &*TTI;
+}
+
+/// If TimingInfo is enabled then start pass timer.
+Timer *llvm::getPassTimer(Pass *P) {
+ if (TheTimeInfo)
+ return TheTimeInfo->getPassTimer(P);
+ return nullptr;
+}
+
+//===----------------------------------------------------------------------===//
+// PMStack implementation
+//
+
+// Pop Pass Manager from the stack and clear its analysis info.
+void PMStack::pop() {
+
+ PMDataManager *Top = this->top();
+ Top->initializeAnalysisInfo();
+
+ S.pop_back();
+}
+
+// Push PM on the stack and set its top level manager.
+void PMStack::push(PMDataManager *PM) {
+ assert(PM && "Unable to push. Pass Manager expected");
+ assert(PM->getDepth()==0 && "Pass Manager depth set too early");
+
+ if (!this->empty()) {
+ assert(PM->getPassManagerType() > this->top()->getPassManagerType()
+ && "pushing bad pass manager to PMStack");
+ PMTopLevelManager *TPM = this->top()->getTopLevelManager();
+
+ assert(TPM && "Unable to find top level manager");
+ TPM->addIndirectPassManager(PM);
+ PM->setTopLevelManager(TPM);
+ PM->setDepth(this->top()->getDepth()+1);
+ } else {
+ assert((PM->getPassManagerType() == PMT_ModulePassManager
+ || PM->getPassManagerType() == PMT_FunctionPassManager)
+ && "pushing bad pass manager to PMStack");
+ PM->setDepth(1);
+ }
+
+ S.push_back(PM);
+}
+
+// Dump content of the pass manager stack.
+void PMStack::dump() const {
+ for (PMDataManager *Manager : S)
+ dbgs() << Manager->getAsPass()->getPassName() << ' ';
+
+ if (!S.empty())
+ dbgs() << '\n';
+}
+
+/// Find appropriate Module Pass Manager in the PM Stack and
+/// add self into that manager.
+void ModulePass::assignPassManager(PMStack &PMS,
+ PassManagerType PreferredType) {
+ // Find Module Pass Manager
+ while (!PMS.empty()) {
+ PassManagerType TopPMType = PMS.top()->getPassManagerType();
+ if (TopPMType == PreferredType)
+ break; // We found desired pass manager
+ else if (TopPMType > PMT_ModulePassManager)
+ PMS.pop(); // Pop children pass managers
+ else
+ break;
+ }
+ assert(!PMS.empty() && "Unable to find appropriate Pass Manager");
+ PMS.top()->add(this);
+}
+
+/// Find appropriate Function Pass Manager or Call Graph Pass Manager
+/// in the PM Stack and add self into that manager.
+void FunctionPass::assignPassManager(PMStack &PMS,
+ PassManagerType PreferredType) {
+
+ // Find Function Pass Manager
+ while (!PMS.empty()) {
+ if (PMS.top()->getPassManagerType() > PMT_FunctionPassManager)
+ PMS.pop();
+ else
+ break;
+ }
+
+ // Create new Function Pass Manager if needed.
+ FPPassManager *FPP;
+ if (PMS.top()->getPassManagerType() == PMT_FunctionPassManager) {
+ FPP = (FPPassManager *)PMS.top();
+ } else {
+ assert(!PMS.empty() && "Unable to create Function Pass Manager");
+ PMDataManager *PMD = PMS.top();
+
+ // [1] Create new Function Pass Manager
+ FPP = new FPPassManager();
+ FPP->populateInheritedAnalysis(PMS);
+
+ // [2] Set up new manager's top level manager
+ PMTopLevelManager *TPM = PMD->getTopLevelManager();
+ TPM->addIndirectPassManager(FPP);
+
+ // [3] Assign manager to manage this new manager. This may create
+ // and push new managers into PMS
+ FPP->assignPassManager(PMS, PMD->getPassManagerType());
+
+ // [4] Push new manager into PMS
+ PMS.push(FPP);
+ }
+
+ // Assign FPP as the manager of this pass.
+ FPP->add(this);
+}
+
+/// Find appropriate Basic Pass Manager or Call Graph Pass Manager
+/// in the PM Stack and add self into that manager.
+void BasicBlockPass::assignPassManager(PMStack &PMS,
+ PassManagerType PreferredType) {
+ BBPassManager *BBP;
+
+ // Basic Pass Manager is a leaf pass manager. It does not handle
+ // any other pass manager.
+ if (!PMS.empty() &&
+ PMS.top()->getPassManagerType() == PMT_BasicBlockPassManager) {
+ BBP = (BBPassManager *)PMS.top();
+ } else {
+ // If leaf manager is not Basic Block Pass manager then create new
+ // basic Block Pass manager.
+ assert(!PMS.empty() && "Unable to create BasicBlock Pass Manager");
+ PMDataManager *PMD = PMS.top();
+
+ // [1] Create new Basic Block Manager
+ BBP = new BBPassManager();
+
+ // [2] Set up new manager's top level manager
+ // Basic Block Pass Manager does not live by itself
+ PMTopLevelManager *TPM = PMD->getTopLevelManager();
+ TPM->addIndirectPassManager(BBP);
+
+ // [3] Assign manager to manage this new manager. This may create
+ // and push new managers into PMS
+ BBP->assignPassManager(PMS, PreferredType);
+
+ // [4] Push new manager into PMS
+ PMS.push(BBP);
+ }
+
+ // Assign BBP as the manager of this pass.
+ BBP->add(this);
+}
+
+PassManagerBase::~PassManagerBase() {}
diff --git a/contrib/llvm/lib/IR/MDBuilder.cpp b/contrib/llvm/lib/IR/MDBuilder.cpp
new file mode 100644
index 0000000..4ce3ea2
--- /dev/null
+++ b/contrib/llvm/lib/IR/MDBuilder.cpp
@@ -0,0 +1,175 @@
+//===---- llvm/MDBuilder.cpp - Builder for LLVM metadata ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the MDBuilder class, which is used as a convenient way to
+// create LLVM metadata with a consistent and simplified interface.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/MDBuilder.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Metadata.h"
+using namespace llvm;
+
+MDString *MDBuilder::createString(StringRef Str) {
+ return MDString::get(Context, Str);
+}
+
+ConstantAsMetadata *MDBuilder::createConstant(Constant *C) {
+ return ConstantAsMetadata::get(C);
+}
+
+MDNode *MDBuilder::createFPMath(float Accuracy) {
+ if (Accuracy == 0.0)
+ return nullptr;
+ assert(Accuracy > 0.0 && "Invalid fpmath accuracy!");
+ auto *Op =
+ createConstant(ConstantFP::get(Type::getFloatTy(Context), Accuracy));
+ return MDNode::get(Context, Op);
+}
+
+MDNode *MDBuilder::createBranchWeights(uint32_t TrueWeight,
+ uint32_t FalseWeight) {
+ return createBranchWeights({TrueWeight, FalseWeight});
+}
+
+MDNode *MDBuilder::createBranchWeights(ArrayRef<uint32_t> Weights) {
+ assert(Weights.size() >= 2 && "Need at least two branch weights!");
+
+ SmallVector<Metadata *, 4> Vals(Weights.size() + 1);
+ Vals[0] = createString("branch_weights");
+
+ Type *Int32Ty = Type::getInt32Ty(Context);
+ for (unsigned i = 0, e = Weights.size(); i != e; ++i)
+ Vals[i + 1] = createConstant(ConstantInt::get(Int32Ty, Weights[i]));
+
+ return MDNode::get(Context, Vals);
+}
+
+MDNode *MDBuilder::createUnpredictable() {
+ return MDNode::get(Context, None);
+}
+
+MDNode *MDBuilder::createFunctionEntryCount(uint64_t Count) {
+ Type *Int64Ty = Type::getInt64Ty(Context);
+ return MDNode::get(Context,
+ {createString("function_entry_count"),
+ createConstant(ConstantInt::get(Int64Ty, Count))});
+}
+
+MDNode *MDBuilder::createRange(const APInt &Lo, const APInt &Hi) {
+ assert(Lo.getBitWidth() == Hi.getBitWidth() && "Mismatched bitwidths!");
+
+ Type *Ty = IntegerType::get(Context, Lo.getBitWidth());
+ return createRange(ConstantInt::get(Ty, Lo), ConstantInt::get(Ty, Hi));
+}
+
+MDNode *MDBuilder::createRange(Constant *Lo, Constant *Hi) {
+ // If the range is everything then it is useless.
+ if (Hi == Lo)
+ return nullptr;
+
+ // Return the range [Lo, Hi).
+ return MDNode::get(Context, {createConstant(Lo), createConstant(Hi)});
+}
+
+MDNode *MDBuilder::createAnonymousAARoot(StringRef Name, MDNode *Extra) {
+ // To ensure uniqueness the root node is self-referential.
+ auto Dummy = MDNode::getTemporary(Context, None);
+
+ SmallVector<Metadata *, 3> Args(1, Dummy.get());
+ if (Extra)
+ Args.push_back(Extra);
+ if (!Name.empty())
+ Args.push_back(createString(Name));
+ MDNode *Root = MDNode::get(Context, Args);
+
+ // At this point we have
+ // !0 = metadata !{} <- dummy
+ // !1 = metadata !{metadata !0} <- root
+ // Replace the dummy operand with the root node itself and delete the dummy.
+ Root->replaceOperandWith(0, Root);
+
+ // We now have
+ // !1 = metadata !{metadata !1} <- self-referential root
+ return Root;
+}
+
+MDNode *MDBuilder::createTBAARoot(StringRef Name) {
+ return MDNode::get(Context, createString(Name));
+}
+
+/// \brief Return metadata for a non-root TBAA node with the given name,
+/// parent in the TBAA tree, and value for 'pointsToConstantMemory'.
+MDNode *MDBuilder::createTBAANode(StringRef Name, MDNode *Parent,
+ bool isConstant) {
+ if (isConstant) {
+ Constant *Flags = ConstantInt::get(Type::getInt64Ty(Context), 1);
+ return MDNode::get(Context,
+ {createString(Name), Parent, createConstant(Flags)});
+ }
+ return MDNode::get(Context, {createString(Name), Parent});
+}
+
+MDNode *MDBuilder::createAliasScopeDomain(StringRef Name) {
+ return MDNode::get(Context, createString(Name));
+}
+
+MDNode *MDBuilder::createAliasScope(StringRef Name, MDNode *Domain) {
+ return MDNode::get(Context, {createString(Name), Domain});
+}
+
+/// \brief Return metadata for a tbaa.struct node with the given
+/// struct field descriptions.
+MDNode *MDBuilder::createTBAAStructNode(ArrayRef<TBAAStructField> Fields) {
+ SmallVector<Metadata *, 4> Vals(Fields.size() * 3);
+ Type *Int64 = Type::getInt64Ty(Context);
+ for (unsigned i = 0, e = Fields.size(); i != e; ++i) {
+ Vals[i * 3 + 0] = createConstant(ConstantInt::get(Int64, Fields[i].Offset));
+ Vals[i * 3 + 1] = createConstant(ConstantInt::get(Int64, Fields[i].Size));
+ Vals[i * 3 + 2] = Fields[i].TBAA;
+ }
+ return MDNode::get(Context, Vals);
+}
+
+/// \brief Return metadata for a TBAA struct node in the type DAG
+/// with the given name, a list of pairs (offset, field type in the type DAG).
+MDNode *MDBuilder::createTBAAStructTypeNode(
+ StringRef Name, ArrayRef<std::pair<MDNode *, uint64_t>> Fields) {
+ SmallVector<Metadata *, 4> Ops(Fields.size() * 2 + 1);
+ Type *Int64 = Type::getInt64Ty(Context);
+ Ops[0] = createString(Name);
+ for (unsigned i = 0, e = Fields.size(); i != e; ++i) {
+ Ops[i * 2 + 1] = Fields[i].first;
+ Ops[i * 2 + 2] = createConstant(ConstantInt::get(Int64, Fields[i].second));
+ }
+ return MDNode::get(Context, Ops);
+}
+
+/// \brief Return metadata for a TBAA scalar type node with the
+/// given name, an offset and a parent in the TBAA type DAG.
+MDNode *MDBuilder::createTBAAScalarTypeNode(StringRef Name, MDNode *Parent,
+ uint64_t Offset) {
+ ConstantInt *Off = ConstantInt::get(Type::getInt64Ty(Context), Offset);
+ return MDNode::get(Context,
+ {createString(Name), Parent, createConstant(Off)});
+}
+
+/// \brief Return metadata for a TBAA tag node with the given
+/// base type, access type and offset relative to the base type.
+MDNode *MDBuilder::createTBAAStructTagNode(MDNode *BaseType, MDNode *AccessType,
+ uint64_t Offset, bool IsConstant) {
+ IntegerType *Int64 = Type::getInt64Ty(Context);
+ ConstantInt *Off = ConstantInt::get(Int64, Offset);
+ if (IsConstant) {
+ return MDNode::get(Context, {BaseType, AccessType, createConstant(Off),
+ createConstant(ConstantInt::get(Int64, 1))});
+ }
+ return MDNode::get(Context, {BaseType, AccessType, createConstant(Off)});
+}
diff --git a/contrib/llvm/lib/IR/Mangler.cpp b/contrib/llvm/lib/IR/Mangler.cpp
new file mode 100644
index 0000000..016cb9e
--- /dev/null
+++ b/contrib/llvm/lib/IR/Mangler.cpp
@@ -0,0 +1,174 @@
+//===-- Mangler.cpp - Self-contained c/asm llvm name mangler --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Unified name mangler for assembly backends.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/Mangler.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+namespace {
+enum ManglerPrefixTy {
+ Default, ///< Emit default string before each symbol.
+ Private, ///< Emit "private" prefix before each symbol.
+ LinkerPrivate ///< Emit "linker private" prefix before each symbol.
+};
+}
+
+static void getNameWithPrefixImpl(raw_ostream &OS, const Twine &GVName,
+ ManglerPrefixTy PrefixTy,
+ const DataLayout &DL, char Prefix) {
+ SmallString<256> TmpData;
+ StringRef Name = GVName.toStringRef(TmpData);
+ assert(!Name.empty() && "getNameWithPrefix requires non-empty name");
+
+ // No need to do anything special if the global has the special "do not
+ // mangle" flag in the name.
+ if (Name[0] == '\1') {
+ OS << Name.substr(1);
+ return;
+ }
+
+ if (PrefixTy == Private)
+ OS << DL.getPrivateGlobalPrefix();
+ else if (PrefixTy == LinkerPrivate)
+ OS << DL.getLinkerPrivateGlobalPrefix();
+
+ if (Prefix != '\0')
+ OS << Prefix;
+
+ // If this is a simple string that doesn't need escaping, just append it.
+ OS << Name;
+}
+
+static void getNameWithPrefixImpl(raw_ostream &OS, const Twine &GVName,
+ const DataLayout &DL,
+ ManglerPrefixTy PrefixTy) {
+ char Prefix = DL.getGlobalPrefix();
+ return getNameWithPrefixImpl(OS, GVName, PrefixTy, DL, Prefix);
+}
+
+void Mangler::getNameWithPrefix(raw_ostream &OS, const Twine &GVName,
+ const DataLayout &DL) {
+ return getNameWithPrefixImpl(OS, GVName, DL, Default);
+}
+
+void Mangler::getNameWithPrefix(SmallVectorImpl<char> &OutName,
+ const Twine &GVName, const DataLayout &DL) {
+ raw_svector_ostream OS(OutName);
+ char Prefix = DL.getGlobalPrefix();
+ return getNameWithPrefixImpl(OS, GVName, Default, DL, Prefix);
+}
+
+static bool hasByteCountSuffix(CallingConv::ID CC) {
+ switch (CC) {
+ case CallingConv::X86_FastCall:
+ case CallingConv::X86_StdCall:
+ case CallingConv::X86_VectorCall:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/// Microsoft fastcall and stdcall functions require a suffix on their name
+/// indicating the number of words of arguments they take.
+static void addByteCountSuffix(raw_ostream &OS, const Function *F,
+ const DataLayout &DL) {
+ // Calculate arguments size total.
+ unsigned ArgWords = 0;
+ for (Function::const_arg_iterator AI = F->arg_begin(), AE = F->arg_end();
+ AI != AE; ++AI) {
+ Type *Ty = AI->getType();
+ // 'Dereference' type in case of byval or inalloca parameter attribute.
+ if (AI->hasByValOrInAllocaAttr())
+ Ty = cast<PointerType>(Ty)->getElementType();
+ // Size should be aligned to pointer size.
+ unsigned PtrSize = DL.getPointerSize();
+ ArgWords += RoundUpToAlignment(DL.getTypeAllocSize(Ty), PtrSize);
+ }
+
+ OS << '@' << ArgWords;
+}
+
+void Mangler::getNameWithPrefix(raw_ostream &OS, const GlobalValue *GV,
+ bool CannotUsePrivateLabel) const {
+ ManglerPrefixTy PrefixTy = Default;
+ if (GV->hasPrivateLinkage()) {
+ if (CannotUsePrivateLabel)
+ PrefixTy = LinkerPrivate;
+ else
+ PrefixTy = Private;
+ }
+
+ const DataLayout &DL = GV->getParent()->getDataLayout();
+ if (!GV->hasName()) {
+ // Get the ID for the global, assigning a new one if we haven't got one
+ // already.
+ unsigned &ID = AnonGlobalIDs[GV];
+ if (ID == 0)
+ ID = NextAnonGlobalID++;
+
+ // Must mangle the global into a unique ID.
+ getNameWithPrefixImpl(OS, "__unnamed_" + Twine(ID), DL, PrefixTy);
+ return;
+ }
+
+ StringRef Name = GV->getName();
+ char Prefix = DL.getGlobalPrefix();
+
+ // Mangle functions with Microsoft calling conventions specially. Only do
+ // this mangling for x86_64 vectorcall and 32-bit x86.
+ const Function *MSFunc = dyn_cast<Function>(GV);
+ if (Name.startswith("\01"))
+ MSFunc = nullptr; // Don't mangle when \01 is present.
+ CallingConv::ID CC =
+ MSFunc ? MSFunc->getCallingConv() : (unsigned)CallingConv::C;
+ if (!DL.hasMicrosoftFastStdCallMangling() &&
+ CC != CallingConv::X86_VectorCall)
+ MSFunc = nullptr;
+ if (MSFunc) {
+ if (CC == CallingConv::X86_FastCall)
+ Prefix = '@'; // fastcall functions have an @ prefix instead of _.
+ else if (CC == CallingConv::X86_VectorCall)
+ Prefix = '\0'; // vectorcall functions have no prefix.
+ }
+
+ getNameWithPrefixImpl(OS, Name, PrefixTy, DL, Prefix);
+
+ if (!MSFunc)
+ return;
+
+ // If we are supposed to add a microsoft-style suffix for stdcall, fastcall,
+ // or vectorcall, add it. These functions have a suffix of @N where N is the
+ // cumulative byte size of all of the parameters to the function in decimal.
+ if (CC == CallingConv::X86_VectorCall)
+ OS << '@'; // vectorcall functions use a double @ suffix.
+ FunctionType *FT = MSFunc->getFunctionType();
+ if (hasByteCountSuffix(CC) &&
+ // "Pure" variadic functions do not receive @0 suffix.
+ (!FT->isVarArg() || FT->getNumParams() == 0 ||
+ (FT->getNumParams() == 1 && MSFunc->hasStructRetAttr())))
+ addByteCountSuffix(OS, MSFunc, DL);
+}
+
+void Mangler::getNameWithPrefix(SmallVectorImpl<char> &OutName,
+ const GlobalValue *GV,
+ bool CannotUsePrivateLabel) const {
+ raw_svector_ostream OS(OutName);
+ getNameWithPrefix(OS, GV, CannotUsePrivateLabel);
+}
diff --git a/contrib/llvm/lib/IR/Metadata.cpp b/contrib/llvm/lib/IR/Metadata.cpp
new file mode 100644
index 0000000..9a9a501
--- /dev/null
+++ b/contrib/llvm/lib/IR/Metadata.cpp
@@ -0,0 +1,1322 @@
+//===- Metadata.cpp - Implement Metadata classes --------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Metadata classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/Metadata.h"
+#include "LLVMContextImpl.h"
+#include "MetadataImpl.h"
+#include "SymbolTableListTraitsImpl.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/IR/ConstantRange.h"
+#include "llvm/IR/DebugInfoMetadata.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/ValueHandle.h"
+
+using namespace llvm;
+
+MetadataAsValue::MetadataAsValue(Type *Ty, Metadata *MD)
+ : Value(Ty, MetadataAsValueVal), MD(MD) {
+ track();
+}
+
+MetadataAsValue::~MetadataAsValue() {
+ getType()->getContext().pImpl->MetadataAsValues.erase(MD);
+ untrack();
+}
+
+/// \brief Canonicalize metadata arguments to intrinsics.
+///
+/// To support bitcode upgrades (and assembly semantic sugar) for \a
+/// MetadataAsValue, we need to canonicalize certain metadata.
+///
+/// - nullptr is replaced by an empty MDNode.
+/// - An MDNode with a single null operand is replaced by an empty MDNode.
+/// - An MDNode whose only operand is a \a ConstantAsMetadata gets skipped.
+///
+/// This maintains readability of bitcode from when metadata was a type of
+/// value, and these bridges were unnecessary.
+static Metadata *canonicalizeMetadataForValue(LLVMContext &Context,
+ Metadata *MD) {
+ if (!MD)
+ // !{}
+ return MDNode::get(Context, None);
+
+ // Return early if this isn't a single-operand MDNode.
+ auto *N = dyn_cast<MDNode>(MD);
+ if (!N || N->getNumOperands() != 1)
+ return MD;
+
+ if (!N->getOperand(0))
+ // !{}
+ return MDNode::get(Context, None);
+
+ if (auto *C = dyn_cast<ConstantAsMetadata>(N->getOperand(0)))
+ // Look through the MDNode.
+ return C;
+
+ return MD;
+}
+
+MetadataAsValue *MetadataAsValue::get(LLVMContext &Context, Metadata *MD) {
+ MD = canonicalizeMetadataForValue(Context, MD);
+ auto *&Entry = Context.pImpl->MetadataAsValues[MD];
+ if (!Entry)
+ Entry = new MetadataAsValue(Type::getMetadataTy(Context), MD);
+ return Entry;
+}
+
+MetadataAsValue *MetadataAsValue::getIfExists(LLVMContext &Context,
+ Metadata *MD) {
+ MD = canonicalizeMetadataForValue(Context, MD);
+ auto &Store = Context.pImpl->MetadataAsValues;
+ return Store.lookup(MD);
+}
+
+void MetadataAsValue::handleChangedMetadata(Metadata *MD) {
+ LLVMContext &Context = getContext();
+ MD = canonicalizeMetadataForValue(Context, MD);
+ auto &Store = Context.pImpl->MetadataAsValues;
+
+ // Stop tracking the old metadata.
+ Store.erase(this->MD);
+ untrack();
+ this->MD = nullptr;
+
+ // Start tracking MD, or RAUW if necessary.
+ auto *&Entry = Store[MD];
+ if (Entry) {
+ replaceAllUsesWith(Entry);
+ delete this;
+ return;
+ }
+
+ this->MD = MD;
+ track();
+ Entry = this;
+}
+
+void MetadataAsValue::track() {
+ if (MD)
+ MetadataTracking::track(&MD, *MD, *this);
+}
+
+void MetadataAsValue::untrack() {
+ if (MD)
+ MetadataTracking::untrack(MD);
+}
+
+bool MetadataTracking::track(void *Ref, Metadata &MD, OwnerTy Owner) {
+ assert(Ref && "Expected live reference");
+ assert((Owner || *static_cast<Metadata **>(Ref) == &MD) &&
+ "Reference without owner must be direct");
+ if (auto *R = ReplaceableMetadataImpl::get(MD)) {
+ R->addRef(Ref, Owner);
+ return true;
+ }
+ return false;
+}
+
+void MetadataTracking::untrack(void *Ref, Metadata &MD) {
+ assert(Ref && "Expected live reference");
+ if (auto *R = ReplaceableMetadataImpl::get(MD))
+ R->dropRef(Ref);
+}
+
+bool MetadataTracking::retrack(void *Ref, Metadata &MD, void *New) {
+ assert(Ref && "Expected live reference");
+ assert(New && "Expected live reference");
+ assert(Ref != New && "Expected change");
+ if (auto *R = ReplaceableMetadataImpl::get(MD)) {
+ R->moveRef(Ref, New, MD);
+ return true;
+ }
+ return false;
+}
+
+bool MetadataTracking::isReplaceable(const Metadata &MD) {
+ return ReplaceableMetadataImpl::get(const_cast<Metadata &>(MD));
+}
+
+void ReplaceableMetadataImpl::addRef(void *Ref, OwnerTy Owner) {
+ bool WasInserted =
+ UseMap.insert(std::make_pair(Ref, std::make_pair(Owner, NextIndex)))
+ .second;
+ (void)WasInserted;
+ assert(WasInserted && "Expected to add a reference");
+
+ ++NextIndex;
+ assert(NextIndex != 0 && "Unexpected overflow");
+}
+
+void ReplaceableMetadataImpl::dropRef(void *Ref) {
+ bool WasErased = UseMap.erase(Ref);
+ (void)WasErased;
+ assert(WasErased && "Expected to drop a reference");
+}
+
+void ReplaceableMetadataImpl::moveRef(void *Ref, void *New,
+ const Metadata &MD) {
+ auto I = UseMap.find(Ref);
+ assert(I != UseMap.end() && "Expected to move a reference");
+ auto OwnerAndIndex = I->second;
+ UseMap.erase(I);
+ bool WasInserted = UseMap.insert(std::make_pair(New, OwnerAndIndex)).second;
+ (void)WasInserted;
+ assert(WasInserted && "Expected to add a reference");
+
+ // Check that the references are direct if there's no owner.
+ (void)MD;
+ assert((OwnerAndIndex.first || *static_cast<Metadata **>(Ref) == &MD) &&
+ "Reference without owner must be direct");
+ assert((OwnerAndIndex.first || *static_cast<Metadata **>(New) == &MD) &&
+ "Reference without owner must be direct");
+}
+
+void ReplaceableMetadataImpl::replaceAllUsesWith(Metadata *MD) {
+ assert(!(MD && isa<MDNode>(MD) && cast<MDNode>(MD)->isTemporary()) &&
+ "Expected non-temp node");
+ assert(CanReplace &&
+ "Attempted to replace Metadata marked for no replacement");
+
+ if (UseMap.empty())
+ return;
+
+ // Copy out uses since UseMap will get touched below.
+ typedef std::pair<void *, std::pair<OwnerTy, uint64_t>> UseTy;
+ SmallVector<UseTy, 8> Uses(UseMap.begin(), UseMap.end());
+ std::sort(Uses.begin(), Uses.end(), [](const UseTy &L, const UseTy &R) {
+ return L.second.second < R.second.second;
+ });
+ for (const auto &Pair : Uses) {
+ // Check that this Ref hasn't disappeared after RAUW (when updating a
+ // previous Ref).
+ if (!UseMap.count(Pair.first))
+ continue;
+
+ OwnerTy Owner = Pair.second.first;
+ if (!Owner) {
+ // Update unowned tracking references directly.
+ Metadata *&Ref = *static_cast<Metadata **>(Pair.first);
+ Ref = MD;
+ if (MD)
+ MetadataTracking::track(Ref);
+ UseMap.erase(Pair.first);
+ continue;
+ }
+
+ // Check for MetadataAsValue.
+ if (Owner.is<MetadataAsValue *>()) {
+ Owner.get<MetadataAsValue *>()->handleChangedMetadata(MD);
+ continue;
+ }
+
+ // There's a Metadata owner -- dispatch.
+ Metadata *OwnerMD = Owner.get<Metadata *>();
+ switch (OwnerMD->getMetadataID()) {
+#define HANDLE_METADATA_LEAF(CLASS) \
+ case Metadata::CLASS##Kind: \
+ cast<CLASS>(OwnerMD)->handleChangedOperand(Pair.first, MD); \
+ continue;
+#include "llvm/IR/Metadata.def"
+ default:
+ llvm_unreachable("Invalid metadata subclass");
+ }
+ }
+ assert(UseMap.empty() && "Expected all uses to be replaced");
+}
+
+void ReplaceableMetadataImpl::resolveAllUses(bool ResolveUsers) {
+ if (UseMap.empty())
+ return;
+
+ if (!ResolveUsers) {
+ UseMap.clear();
+ return;
+ }
+
+ // Copy out uses since UseMap could get touched below.
+ typedef std::pair<void *, std::pair<OwnerTy, uint64_t>> UseTy;
+ SmallVector<UseTy, 8> Uses(UseMap.begin(), UseMap.end());
+ std::sort(Uses.begin(), Uses.end(), [](const UseTy &L, const UseTy &R) {
+ return L.second.second < R.second.second;
+ });
+ UseMap.clear();
+ for (const auto &Pair : Uses) {
+ auto Owner = Pair.second.first;
+ if (!Owner)
+ continue;
+ if (Owner.is<MetadataAsValue *>())
+ continue;
+
+ // Resolve MDNodes that point at this.
+ auto *OwnerMD = dyn_cast<MDNode>(Owner.get<Metadata *>());
+ if (!OwnerMD)
+ continue;
+ if (OwnerMD->isResolved())
+ continue;
+ OwnerMD->decrementUnresolvedOperandCount();
+ }
+}
+
+ReplaceableMetadataImpl *ReplaceableMetadataImpl::get(Metadata &MD) {
+ if (auto *N = dyn_cast<MDNode>(&MD))
+ return N->Context.getReplaceableUses();
+ return dyn_cast<ValueAsMetadata>(&MD);
+}
+
+static Function *getLocalFunction(Value *V) {
+ assert(V && "Expected value");
+ if (auto *A = dyn_cast<Argument>(V))
+ return A->getParent();
+ if (BasicBlock *BB = cast<Instruction>(V)->getParent())
+ return BB->getParent();
+ return nullptr;
+}
+
+ValueAsMetadata *ValueAsMetadata::get(Value *V) {
+ assert(V && "Unexpected null Value");
+
+ auto &Context = V->getContext();
+ auto *&Entry = Context.pImpl->ValuesAsMetadata[V];
+ if (!Entry) {
+ assert((isa<Constant>(V) || isa<Argument>(V) || isa<Instruction>(V)) &&
+ "Expected constant or function-local value");
+ assert(!V->IsUsedByMD &&
+ "Expected this to be the only metadata use");
+ V->IsUsedByMD = true;
+ if (auto *C = dyn_cast<Constant>(V))
+ Entry = new ConstantAsMetadata(C);
+ else
+ Entry = new LocalAsMetadata(V);
+ }
+
+ return Entry;
+}
+
+ValueAsMetadata *ValueAsMetadata::getIfExists(Value *V) {
+ assert(V && "Unexpected null Value");
+ return V->getContext().pImpl->ValuesAsMetadata.lookup(V);
+}
+
+void ValueAsMetadata::handleDeletion(Value *V) {
+ assert(V && "Expected valid value");
+
+ auto &Store = V->getType()->getContext().pImpl->ValuesAsMetadata;
+ auto I = Store.find(V);
+ if (I == Store.end())
+ return;
+
+ // Remove old entry from the map.
+ ValueAsMetadata *MD = I->second;
+ assert(MD && "Expected valid metadata");
+ assert(MD->getValue() == V && "Expected valid mapping");
+ Store.erase(I);
+
+ // Delete the metadata.
+ MD->replaceAllUsesWith(nullptr);
+ delete MD;
+}
+
+void ValueAsMetadata::handleRAUW(Value *From, Value *To) {
+ assert(From && "Expected valid value");
+ assert(To && "Expected valid value");
+ assert(From != To && "Expected changed value");
+ assert(From->getType() == To->getType() && "Unexpected type change");
+
+ LLVMContext &Context = From->getType()->getContext();
+ auto &Store = Context.pImpl->ValuesAsMetadata;
+ auto I = Store.find(From);
+ if (I == Store.end()) {
+ assert(!From->IsUsedByMD &&
+ "Expected From not to be used by metadata");
+ return;
+ }
+
+ // Remove old entry from the map.
+ assert(From->IsUsedByMD &&
+ "Expected From to be used by metadata");
+ From->IsUsedByMD = false;
+ ValueAsMetadata *MD = I->second;
+ assert(MD && "Expected valid metadata");
+ assert(MD->getValue() == From && "Expected valid mapping");
+ Store.erase(I);
+
+ if (isa<LocalAsMetadata>(MD)) {
+ if (auto *C = dyn_cast<Constant>(To)) {
+ // Local became a constant.
+ MD->replaceAllUsesWith(ConstantAsMetadata::get(C));
+ delete MD;
+ return;
+ }
+ if (getLocalFunction(From) && getLocalFunction(To) &&
+ getLocalFunction(From) != getLocalFunction(To)) {
+ // Function changed.
+ MD->replaceAllUsesWith(nullptr);
+ delete MD;
+ return;
+ }
+ } else if (!isa<Constant>(To)) {
+ // Changed to function-local value.
+ MD->replaceAllUsesWith(nullptr);
+ delete MD;
+ return;
+ }
+
+ auto *&Entry = Store[To];
+ if (Entry) {
+ // The target already exists.
+ MD->replaceAllUsesWith(Entry);
+ delete MD;
+ return;
+ }
+
+ // Update MD in place (and update the map entry).
+ assert(!To->IsUsedByMD &&
+ "Expected this to be the only metadata use");
+ To->IsUsedByMD = true;
+ MD->V = To;
+ Entry = MD;
+}
+
+//===----------------------------------------------------------------------===//
+// MDString implementation.
+//
+
+MDString *MDString::get(LLVMContext &Context, StringRef Str) {
+ auto &Store = Context.pImpl->MDStringCache;
+ auto I = Store.find(Str);
+ if (I != Store.end())
+ return &I->second;
+
+ auto *Entry =
+ StringMapEntry<MDString>::Create(Str, Store.getAllocator(), MDString());
+ bool WasInserted = Store.insert(Entry);
+ (void)WasInserted;
+ assert(WasInserted && "Expected entry to be inserted");
+ Entry->second.Entry = Entry;
+ return &Entry->second;
+}
+
+StringRef MDString::getString() const {
+ assert(Entry && "Expected to find string map entry");
+ return Entry->first();
+}
+
+//===----------------------------------------------------------------------===//
+// MDNode implementation.
+//
+
+// Assert that the MDNode types will not be unaligned by the objects
+// prepended to them.
+#define HANDLE_MDNODE_LEAF(CLASS) \
+ static_assert( \
+ llvm::AlignOf<uint64_t>::Alignment >= llvm::AlignOf<CLASS>::Alignment, \
+ "Alignment is insufficient after objects prepended to " #CLASS);
+#include "llvm/IR/Metadata.def"
+
+void *MDNode::operator new(size_t Size, unsigned NumOps) {
+ size_t OpSize = NumOps * sizeof(MDOperand);
+ // uint64_t is the most aligned type we need support (ensured by static_assert
+ // above)
+ OpSize = RoundUpToAlignment(OpSize, llvm::alignOf<uint64_t>());
+ void *Ptr = reinterpret_cast<char *>(::operator new(OpSize + Size)) + OpSize;
+ MDOperand *O = static_cast<MDOperand *>(Ptr);
+ for (MDOperand *E = O - NumOps; O != E; --O)
+ (void)new (O - 1) MDOperand;
+ return Ptr;
+}
+
+void MDNode::operator delete(void *Mem) {
+ MDNode *N = static_cast<MDNode *>(Mem);
+ size_t OpSize = N->NumOperands * sizeof(MDOperand);
+ OpSize = RoundUpToAlignment(OpSize, llvm::alignOf<uint64_t>());
+
+ MDOperand *O = static_cast<MDOperand *>(Mem);
+ for (MDOperand *E = O - N->NumOperands; O != E; --O)
+ (O - 1)->~MDOperand();
+ ::operator delete(reinterpret_cast<char *>(Mem) - OpSize);
+}
+
+MDNode::MDNode(LLVMContext &Context, unsigned ID, StorageType Storage,
+ ArrayRef<Metadata *> Ops1, ArrayRef<Metadata *> Ops2)
+ : Metadata(ID, Storage), NumOperands(Ops1.size() + Ops2.size()),
+ NumUnresolved(0), Context(Context) {
+ unsigned Op = 0;
+ for (Metadata *MD : Ops1)
+ setOperand(Op++, MD);
+ for (Metadata *MD : Ops2)
+ setOperand(Op++, MD);
+
+ if (isDistinct())
+ return;
+
+ if (isUniqued())
+ // Check whether any operands are unresolved, requiring re-uniquing. If
+ // not, don't support RAUW.
+ if (!countUnresolvedOperands())
+ return;
+
+ this->Context.makeReplaceable(make_unique<ReplaceableMetadataImpl>(Context));
+}
+
+TempMDNode MDNode::clone() const {
+ switch (getMetadataID()) {
+ default:
+ llvm_unreachable("Invalid MDNode subclass");
+#define HANDLE_MDNODE_LEAF(CLASS) \
+ case CLASS##Kind: \
+ return cast<CLASS>(this)->cloneImpl();
+#include "llvm/IR/Metadata.def"
+ }
+}
+
+static bool isOperandUnresolved(Metadata *Op) {
+ if (auto *N = dyn_cast_or_null<MDNode>(Op))
+ return !N->isResolved();
+ return false;
+}
+
+unsigned MDNode::countUnresolvedOperands() {
+ assert(NumUnresolved == 0 && "Expected unresolved ops to be uncounted");
+ NumUnresolved = std::count_if(op_begin(), op_end(), isOperandUnresolved);
+ return NumUnresolved;
+}
+
+void MDNode::makeUniqued() {
+ assert(isTemporary() && "Expected this to be temporary");
+ assert(!isResolved() && "Expected this to be unresolved");
+
+ // Enable uniquing callbacks.
+ for (auto &Op : mutable_operands())
+ Op.reset(Op.get(), this);
+
+ // Make this 'uniqued'.
+ Storage = Uniqued;
+ if (!countUnresolvedOperands())
+ resolve();
+
+ assert(isUniqued() && "Expected this to be uniqued");
+}
+
+void MDNode::makeDistinct() {
+ assert(isTemporary() && "Expected this to be temporary");
+ assert(!isResolved() && "Expected this to be unresolved");
+
+ // Pretend to be uniqued, resolve the node, and then store in distinct table.
+ Storage = Uniqued;
+ resolve();
+ storeDistinctInContext();
+
+ assert(isDistinct() && "Expected this to be distinct");
+ assert(isResolved() && "Expected this to be resolved");
+}
+
+void MDNode::resolve() {
+ assert(isUniqued() && "Expected this to be uniqued");
+ assert(!isResolved() && "Expected this to be unresolved");
+
+ // Move the map, so that this immediately looks resolved.
+ auto Uses = Context.takeReplaceableUses();
+ NumUnresolved = 0;
+ assert(isResolved() && "Expected this to be resolved");
+
+ // Drop RAUW support.
+ Uses->resolveAllUses();
+}
+
+void MDNode::resolveAfterOperandChange(Metadata *Old, Metadata *New) {
+ assert(NumUnresolved != 0 && "Expected unresolved operands");
+
+ // Check if an operand was resolved.
+ if (!isOperandUnresolved(Old)) {
+ if (isOperandUnresolved(New))
+ // An operand was un-resolved!
+ ++NumUnresolved;
+ } else if (!isOperandUnresolved(New))
+ decrementUnresolvedOperandCount();
+}
+
+void MDNode::decrementUnresolvedOperandCount() {
+ if (!--NumUnresolved)
+ // Last unresolved operand has just been resolved.
+ resolve();
+}
+
+void MDNode::resolveRecursivelyImpl(bool AllowTemps) {
+ if (isResolved())
+ return;
+
+ // Resolve this node immediately.
+ resolve();
+
+ // Resolve all operands.
+ for (const auto &Op : operands()) {
+ auto *N = dyn_cast_or_null<MDNode>(Op);
+ if (!N)
+ continue;
+
+ if (N->isTemporary() && AllowTemps)
+ continue;
+ assert(!N->isTemporary() &&
+ "Expected all forward declarations to be resolved");
+ if (!N->isResolved())
+ N->resolveCycles();
+ }
+}
+
+static bool hasSelfReference(MDNode *N) {
+ for (Metadata *MD : N->operands())
+ if (MD == N)
+ return true;
+ return false;
+}
+
+MDNode *MDNode::replaceWithPermanentImpl() {
+ switch (getMetadataID()) {
+ default:
+ // If this type isn't uniquable, replace with a distinct node.
+ return replaceWithDistinctImpl();
+
+#define HANDLE_MDNODE_LEAF_UNIQUABLE(CLASS) \
+ case CLASS##Kind: \
+ break;
+#include "llvm/IR/Metadata.def"
+ }
+
+ // Even if this type is uniquable, self-references have to be distinct.
+ if (hasSelfReference(this))
+ return replaceWithDistinctImpl();
+ return replaceWithUniquedImpl();
+}
+
+MDNode *MDNode::replaceWithUniquedImpl() {
+ // Try to uniquify in place.
+ MDNode *UniquedNode = uniquify();
+
+ if (UniquedNode == this) {
+ makeUniqued();
+ return this;
+ }
+
+ // Collision, so RAUW instead.
+ replaceAllUsesWith(UniquedNode);
+ deleteAsSubclass();
+ return UniquedNode;
+}
+
+MDNode *MDNode::replaceWithDistinctImpl() {
+ makeDistinct();
+ return this;
+}
+
+void MDTuple::recalculateHash() {
+ setHash(MDTupleInfo::KeyTy::calculateHash(this));
+}
+
+void MDNode::dropAllReferences() {
+ for (unsigned I = 0, E = NumOperands; I != E; ++I)
+ setOperand(I, nullptr);
+ if (!isResolved()) {
+ Context.getReplaceableUses()->resolveAllUses(/* ResolveUsers */ false);
+ (void)Context.takeReplaceableUses();
+ }
+}
+
+void MDNode::handleChangedOperand(void *Ref, Metadata *New) {
+ unsigned Op = static_cast<MDOperand *>(Ref) - op_begin();
+ assert(Op < getNumOperands() && "Expected valid operand");
+
+ if (!isUniqued()) {
+ // This node is not uniqued. Just set the operand and be done with it.
+ setOperand(Op, New);
+ return;
+ }
+
+ // This node is uniqued.
+ eraseFromStore();
+
+ Metadata *Old = getOperand(Op);
+ setOperand(Op, New);
+
+ // Drop uniquing for self-reference cycles.
+ if (New == this) {
+ if (!isResolved())
+ resolve();
+ storeDistinctInContext();
+ return;
+ }
+
+ // Re-unique the node.
+ auto *Uniqued = uniquify();
+ if (Uniqued == this) {
+ if (!isResolved())
+ resolveAfterOperandChange(Old, New);
+ return;
+ }
+
+ // Collision.
+ if (!isResolved()) {
+ // Still unresolved, so RAUW.
+ //
+ // First, clear out all operands to prevent any recursion (similar to
+ // dropAllReferences(), but we still need the use-list).
+ for (unsigned O = 0, E = getNumOperands(); O != E; ++O)
+ setOperand(O, nullptr);
+ Context.getReplaceableUses()->replaceAllUsesWith(Uniqued);
+ deleteAsSubclass();
+ return;
+ }
+
+ // Store in non-uniqued form if RAUW isn't possible.
+ storeDistinctInContext();
+}
+
+void MDNode::deleteAsSubclass() {
+ switch (getMetadataID()) {
+ default:
+ llvm_unreachable("Invalid subclass of MDNode");
+#define HANDLE_MDNODE_LEAF(CLASS) \
+ case CLASS##Kind: \
+ delete cast<CLASS>(this); \
+ break;
+#include "llvm/IR/Metadata.def"
+ }
+}
+
+template <class T, class InfoT>
+static T *uniquifyImpl(T *N, DenseSet<T *, InfoT> &Store) {
+ if (T *U = getUniqued(Store, N))
+ return U;
+
+ Store.insert(N);
+ return N;
+}
+
+template <class NodeTy> struct MDNode::HasCachedHash {
+ typedef char Yes[1];
+ typedef char No[2];
+ template <class U, U Val> struct SFINAE {};
+
+ template <class U>
+ static Yes &check(SFINAE<void (U::*)(unsigned), &U::setHash> *);
+ template <class U> static No &check(...);
+
+ static const bool value = sizeof(check<NodeTy>(nullptr)) == sizeof(Yes);
+};
+
+MDNode *MDNode::uniquify() {
+ assert(!hasSelfReference(this) && "Cannot uniquify a self-referencing node");
+
+ // Try to insert into uniquing store.
+ switch (getMetadataID()) {
+ default:
+ llvm_unreachable("Invalid or non-uniquable subclass of MDNode");
+#define HANDLE_MDNODE_LEAF_UNIQUABLE(CLASS) \
+ case CLASS##Kind: { \
+ CLASS *SubclassThis = cast<CLASS>(this); \
+ std::integral_constant<bool, HasCachedHash<CLASS>::value> \
+ ShouldRecalculateHash; \
+ dispatchRecalculateHash(SubclassThis, ShouldRecalculateHash); \
+ return uniquifyImpl(SubclassThis, getContext().pImpl->CLASS##s); \
+ }
+#include "llvm/IR/Metadata.def"
+ }
+}
+
+void MDNode::eraseFromStore() {
+ switch (getMetadataID()) {
+ default:
+ llvm_unreachable("Invalid or non-uniquable subclass of MDNode");
+#define HANDLE_MDNODE_LEAF_UNIQUABLE(CLASS) \
+ case CLASS##Kind: \
+ getContext().pImpl->CLASS##s.erase(cast<CLASS>(this)); \
+ break;
+#include "llvm/IR/Metadata.def"
+ }
+}
+
+MDTuple *MDTuple::getImpl(LLVMContext &Context, ArrayRef<Metadata *> MDs,
+ StorageType Storage, bool ShouldCreate) {
+ unsigned Hash = 0;
+ if (Storage == Uniqued) {
+ MDTupleInfo::KeyTy Key(MDs);
+ if (auto *N = getUniqued(Context.pImpl->MDTuples, Key))
+ return N;
+ if (!ShouldCreate)
+ return nullptr;
+ Hash = Key.getHash();
+ } else {
+ assert(ShouldCreate && "Expected non-uniqued nodes to always be created");
+ }
+
+ return storeImpl(new (MDs.size()) MDTuple(Context, Storage, Hash, MDs),
+ Storage, Context.pImpl->MDTuples);
+}
+
+void MDNode::deleteTemporary(MDNode *N) {
+ assert(N->isTemporary() && "Expected temporary node");
+ N->replaceAllUsesWith(nullptr);
+ N->deleteAsSubclass();
+}
+
+void MDNode::storeDistinctInContext() {
+ assert(isResolved() && "Expected resolved nodes");
+ Storage = Distinct;
+
+ // Reset the hash.
+ switch (getMetadataID()) {
+ default:
+ llvm_unreachable("Invalid subclass of MDNode");
+#define HANDLE_MDNODE_LEAF(CLASS) \
+ case CLASS##Kind: { \
+ std::integral_constant<bool, HasCachedHash<CLASS>::value> ShouldResetHash; \
+ dispatchResetHash(cast<CLASS>(this), ShouldResetHash); \
+ break; \
+ }
+#include "llvm/IR/Metadata.def"
+ }
+
+ getContext().pImpl->DistinctMDNodes.insert(this);
+}
+
+void MDNode::replaceOperandWith(unsigned I, Metadata *New) {
+ if (getOperand(I) == New)
+ return;
+
+ if (!isUniqued()) {
+ setOperand(I, New);
+ return;
+ }
+
+ handleChangedOperand(mutable_begin() + I, New);
+}
+
+void MDNode::setOperand(unsigned I, Metadata *New) {
+ assert(I < NumOperands);
+ mutable_begin()[I].reset(New, isUniqued() ? this : nullptr);
+}
+
+/// \brief Get a node, or a self-reference that looks like it.
+///
+/// Special handling for finding self-references, for use by \a
+/// MDNode::concatenate() and \a MDNode::intersect() to maintain behaviour from
+/// when self-referencing nodes were still uniqued. If the first operand has
+/// the same operands as \c Ops, return the first operand instead.
+static MDNode *getOrSelfReference(LLVMContext &Context,
+ ArrayRef<Metadata *> Ops) {
+ if (!Ops.empty())
+ if (MDNode *N = dyn_cast_or_null<MDNode>(Ops[0]))
+ if (N->getNumOperands() == Ops.size() && N == N->getOperand(0)) {
+ for (unsigned I = 1, E = Ops.size(); I != E; ++I)
+ if (Ops[I] != N->getOperand(I))
+ return MDNode::get(Context, Ops);
+ return N;
+ }
+
+ return MDNode::get(Context, Ops);
+}
+
+MDNode *MDNode::concatenate(MDNode *A, MDNode *B) {
+ if (!A)
+ return B;
+ if (!B)
+ return A;
+
+ SmallVector<Metadata *, 4> MDs;
+ MDs.reserve(A->getNumOperands() + B->getNumOperands());
+ MDs.append(A->op_begin(), A->op_end());
+ MDs.append(B->op_begin(), B->op_end());
+
+ // FIXME: This preserves long-standing behaviour, but is it really the right
+ // behaviour? Or was that an unintended side-effect of node uniquing?
+ return getOrSelfReference(A->getContext(), MDs);
+}
+
+MDNode *MDNode::intersect(MDNode *A, MDNode *B) {
+ if (!A || !B)
+ return nullptr;
+
+ SmallVector<Metadata *, 4> MDs;
+ for (Metadata *MD : A->operands())
+ if (std::find(B->op_begin(), B->op_end(), MD) != B->op_end())
+ MDs.push_back(MD);
+
+ // FIXME: This preserves long-standing behaviour, but is it really the right
+ // behaviour? Or was that an unintended side-effect of node uniquing?
+ return getOrSelfReference(A->getContext(), MDs);
+}
+
+MDNode *MDNode::getMostGenericAliasScope(MDNode *A, MDNode *B) {
+ if (!A || !B)
+ return nullptr;
+
+ SmallVector<Metadata *, 4> MDs(B->op_begin(), B->op_end());
+ for (Metadata *MD : A->operands())
+ if (std::find(B->op_begin(), B->op_end(), MD) == B->op_end())
+ MDs.push_back(MD);
+
+ // FIXME: This preserves long-standing behaviour, but is it really the right
+ // behaviour? Or was that an unintended side-effect of node uniquing?
+ return getOrSelfReference(A->getContext(), MDs);
+}
+
+MDNode *MDNode::getMostGenericFPMath(MDNode *A, MDNode *B) {
+ if (!A || !B)
+ return nullptr;
+
+ APFloat AVal = mdconst::extract<ConstantFP>(A->getOperand(0))->getValueAPF();
+ APFloat BVal = mdconst::extract<ConstantFP>(B->getOperand(0))->getValueAPF();
+ if (AVal.compare(BVal) == APFloat::cmpLessThan)
+ return A;
+ return B;
+}
+
+static bool isContiguous(const ConstantRange &A, const ConstantRange &B) {
+ return A.getUpper() == B.getLower() || A.getLower() == B.getUpper();
+}
+
+static bool canBeMerged(const ConstantRange &A, const ConstantRange &B) {
+ return !A.intersectWith(B).isEmptySet() || isContiguous(A, B);
+}
+
+static bool tryMergeRange(SmallVectorImpl<ConstantInt *> &EndPoints,
+ ConstantInt *Low, ConstantInt *High) {
+ ConstantRange NewRange(Low->getValue(), High->getValue());
+ unsigned Size = EndPoints.size();
+ APInt LB = EndPoints[Size - 2]->getValue();
+ APInt LE = EndPoints[Size - 1]->getValue();
+ ConstantRange LastRange(LB, LE);
+ if (canBeMerged(NewRange, LastRange)) {
+ ConstantRange Union = LastRange.unionWith(NewRange);
+ Type *Ty = High->getType();
+ EndPoints[Size - 2] =
+ cast<ConstantInt>(ConstantInt::get(Ty, Union.getLower()));
+ EndPoints[Size - 1] =
+ cast<ConstantInt>(ConstantInt::get(Ty, Union.getUpper()));
+ return true;
+ }
+ return false;
+}
+
+static void addRange(SmallVectorImpl<ConstantInt *> &EndPoints,
+ ConstantInt *Low, ConstantInt *High) {
+ if (!EndPoints.empty())
+ if (tryMergeRange(EndPoints, Low, High))
+ return;
+
+ EndPoints.push_back(Low);
+ EndPoints.push_back(High);
+}
+
+MDNode *MDNode::getMostGenericRange(MDNode *A, MDNode *B) {
+ // Given two ranges, we want to compute the union of the ranges. This
+ // is slightly complitade by having to combine the intervals and merge
+ // the ones that overlap.
+
+ if (!A || !B)
+ return nullptr;
+
+ if (A == B)
+ return A;
+
+ // First, walk both lists in older of the lower boundary of each interval.
+ // At each step, try to merge the new interval to the last one we adedd.
+ SmallVector<ConstantInt *, 4> EndPoints;
+ int AI = 0;
+ int BI = 0;
+ int AN = A->getNumOperands() / 2;
+ int BN = B->getNumOperands() / 2;
+ while (AI < AN && BI < BN) {
+ ConstantInt *ALow = mdconst::extract<ConstantInt>(A->getOperand(2 * AI));
+ ConstantInt *BLow = mdconst::extract<ConstantInt>(B->getOperand(2 * BI));
+
+ if (ALow->getValue().slt(BLow->getValue())) {
+ addRange(EndPoints, ALow,
+ mdconst::extract<ConstantInt>(A->getOperand(2 * AI + 1)));
+ ++AI;
+ } else {
+ addRange(EndPoints, BLow,
+ mdconst::extract<ConstantInt>(B->getOperand(2 * BI + 1)));
+ ++BI;
+ }
+ }
+ while (AI < AN) {
+ addRange(EndPoints, mdconst::extract<ConstantInt>(A->getOperand(2 * AI)),
+ mdconst::extract<ConstantInt>(A->getOperand(2 * AI + 1)));
+ ++AI;
+ }
+ while (BI < BN) {
+ addRange(EndPoints, mdconst::extract<ConstantInt>(B->getOperand(2 * BI)),
+ mdconst::extract<ConstantInt>(B->getOperand(2 * BI + 1)));
+ ++BI;
+ }
+
+ // If we have more than 2 ranges (4 endpoints) we have to try to merge
+ // the last and first ones.
+ unsigned Size = EndPoints.size();
+ if (Size > 4) {
+ ConstantInt *FB = EndPoints[0];
+ ConstantInt *FE = EndPoints[1];
+ if (tryMergeRange(EndPoints, FB, FE)) {
+ for (unsigned i = 0; i < Size - 2; ++i) {
+ EndPoints[i] = EndPoints[i + 2];
+ }
+ EndPoints.resize(Size - 2);
+ }
+ }
+
+ // If in the end we have a single range, it is possible that it is now the
+ // full range. Just drop the metadata in that case.
+ if (EndPoints.size() == 2) {
+ ConstantRange Range(EndPoints[0]->getValue(), EndPoints[1]->getValue());
+ if (Range.isFullSet())
+ return nullptr;
+ }
+
+ SmallVector<Metadata *, 4> MDs;
+ MDs.reserve(EndPoints.size());
+ for (auto *I : EndPoints)
+ MDs.push_back(ConstantAsMetadata::get(I));
+ return MDNode::get(A->getContext(), MDs);
+}
+
+MDNode *MDNode::getMostGenericAlignmentOrDereferenceable(MDNode *A, MDNode *B) {
+ if (!A || !B)
+ return nullptr;
+
+ ConstantInt *AVal = mdconst::extract<ConstantInt>(A->getOperand(0));
+ ConstantInt *BVal = mdconst::extract<ConstantInt>(B->getOperand(0));
+ if (AVal->getZExtValue() < BVal->getZExtValue())
+ return A;
+ return B;
+}
+
+//===----------------------------------------------------------------------===//
+// NamedMDNode implementation.
+//
+
+static SmallVector<TrackingMDRef, 4> &getNMDOps(void *Operands) {
+ return *(SmallVector<TrackingMDRef, 4> *)Operands;
+}
+
+NamedMDNode::NamedMDNode(const Twine &N)
+ : Name(N.str()), Parent(nullptr),
+ Operands(new SmallVector<TrackingMDRef, 4>()) {}
+
+NamedMDNode::~NamedMDNode() {
+ dropAllReferences();
+ delete &getNMDOps(Operands);
+}
+
+unsigned NamedMDNode::getNumOperands() const {
+ return (unsigned)getNMDOps(Operands).size();
+}
+
+MDNode *NamedMDNode::getOperand(unsigned i) const {
+ assert(i < getNumOperands() && "Invalid Operand number!");
+ auto *N = getNMDOps(Operands)[i].get();
+ return cast_or_null<MDNode>(N);
+}
+
+void NamedMDNode::addOperand(MDNode *M) { getNMDOps(Operands).emplace_back(M); }
+
+void NamedMDNode::setOperand(unsigned I, MDNode *New) {
+ assert(I < getNumOperands() && "Invalid operand number");
+ getNMDOps(Operands)[I].reset(New);
+}
+
+void NamedMDNode::eraseFromParent() {
+ getParent()->eraseNamedMetadata(this);
+}
+
+void NamedMDNode::dropAllReferences() {
+ getNMDOps(Operands).clear();
+}
+
+StringRef NamedMDNode::getName() const {
+ return StringRef(Name);
+}
+
+//===----------------------------------------------------------------------===//
+// Instruction Metadata method implementations.
+//
+void MDAttachmentMap::set(unsigned ID, MDNode &MD) {
+ for (auto &I : Attachments)
+ if (I.first == ID) {
+ I.second.reset(&MD);
+ return;
+ }
+ Attachments.emplace_back(std::piecewise_construct, std::make_tuple(ID),
+ std::make_tuple(&MD));
+}
+
+void MDAttachmentMap::erase(unsigned ID) {
+ if (empty())
+ return;
+
+ // Common case is one/last value.
+ if (Attachments.back().first == ID) {
+ Attachments.pop_back();
+ return;
+ }
+
+ for (auto I = Attachments.begin(), E = std::prev(Attachments.end()); I != E;
+ ++I)
+ if (I->first == ID) {
+ *I = std::move(Attachments.back());
+ Attachments.pop_back();
+ return;
+ }
+}
+
+MDNode *MDAttachmentMap::lookup(unsigned ID) const {
+ for (const auto &I : Attachments)
+ if (I.first == ID)
+ return I.second;
+ return nullptr;
+}
+
+void MDAttachmentMap::getAll(
+ SmallVectorImpl<std::pair<unsigned, MDNode *>> &Result) const {
+ Result.append(Attachments.begin(), Attachments.end());
+
+ // Sort the resulting array so it is stable.
+ if (Result.size() > 1)
+ array_pod_sort(Result.begin(), Result.end());
+}
+
+void Instruction::setMetadata(StringRef Kind, MDNode *Node) {
+ if (!Node && !hasMetadata())
+ return;
+ setMetadata(getContext().getMDKindID(Kind), Node);
+}
+
+MDNode *Instruction::getMetadataImpl(StringRef Kind) const {
+ return getMetadataImpl(getContext().getMDKindID(Kind));
+}
+
+void Instruction::dropUnknownNonDebugMetadata(ArrayRef<unsigned> KnownIDs) {
+ SmallSet<unsigned, 5> KnownSet;
+ KnownSet.insert(KnownIDs.begin(), KnownIDs.end());
+
+ if (!hasMetadataHashEntry())
+ return; // Nothing to remove!
+
+ auto &InstructionMetadata = getContext().pImpl->InstructionMetadata;
+
+ if (KnownSet.empty()) {
+ // Just drop our entry at the store.
+ InstructionMetadata.erase(this);
+ setHasMetadataHashEntry(false);
+ return;
+ }
+
+ auto &Info = InstructionMetadata[this];
+ Info.remove_if([&KnownSet](const std::pair<unsigned, TrackingMDNodeRef> &I) {
+ return !KnownSet.count(I.first);
+ });
+
+ if (Info.empty()) {
+ // Drop our entry at the store.
+ InstructionMetadata.erase(this);
+ setHasMetadataHashEntry(false);
+ }
+}
+
+/// setMetadata - Set the metadata of the specified kind to the specified
+/// node. This updates/replaces metadata if already present, or removes it if
+/// Node is null.
+void Instruction::setMetadata(unsigned KindID, MDNode *Node) {
+ if (!Node && !hasMetadata())
+ return;
+
+ // Handle 'dbg' as a special case since it is not stored in the hash table.
+ if (KindID == LLVMContext::MD_dbg) {
+ DbgLoc = DebugLoc(Node);
+ return;
+ }
+
+ // Handle the case when we're adding/updating metadata on an instruction.
+ if (Node) {
+ auto &Info = getContext().pImpl->InstructionMetadata[this];
+ assert(!Info.empty() == hasMetadataHashEntry() &&
+ "HasMetadata bit is wonked");
+ if (Info.empty())
+ setHasMetadataHashEntry(true);
+ Info.set(KindID, *Node);
+ return;
+ }
+
+ // Otherwise, we're removing metadata from an instruction.
+ assert((hasMetadataHashEntry() ==
+ (getContext().pImpl->InstructionMetadata.count(this) > 0)) &&
+ "HasMetadata bit out of date!");
+ if (!hasMetadataHashEntry())
+ return; // Nothing to remove!
+ auto &Info = getContext().pImpl->InstructionMetadata[this];
+
+ // Handle removal of an existing value.
+ Info.erase(KindID);
+
+ if (!Info.empty())
+ return;
+
+ getContext().pImpl->InstructionMetadata.erase(this);
+ setHasMetadataHashEntry(false);
+}
+
+void Instruction::setAAMetadata(const AAMDNodes &N) {
+ setMetadata(LLVMContext::MD_tbaa, N.TBAA);
+ setMetadata(LLVMContext::MD_alias_scope, N.Scope);
+ setMetadata(LLVMContext::MD_noalias, N.NoAlias);
+}
+
+MDNode *Instruction::getMetadataImpl(unsigned KindID) const {
+ // Handle 'dbg' as a special case since it is not stored in the hash table.
+ if (KindID == LLVMContext::MD_dbg)
+ return DbgLoc.getAsMDNode();
+
+ if (!hasMetadataHashEntry())
+ return nullptr;
+ auto &Info = getContext().pImpl->InstructionMetadata[this];
+ assert(!Info.empty() && "bit out of sync with hash table");
+
+ return Info.lookup(KindID);
+}
+
+void Instruction::getAllMetadataImpl(
+ SmallVectorImpl<std::pair<unsigned, MDNode *>> &Result) const {
+ Result.clear();
+
+ // Handle 'dbg' as a special case since it is not stored in the hash table.
+ if (DbgLoc) {
+ Result.push_back(
+ std::make_pair((unsigned)LLVMContext::MD_dbg, DbgLoc.getAsMDNode()));
+ if (!hasMetadataHashEntry()) return;
+ }
+
+ assert(hasMetadataHashEntry() &&
+ getContext().pImpl->InstructionMetadata.count(this) &&
+ "Shouldn't have called this");
+ const auto &Info = getContext().pImpl->InstructionMetadata.find(this)->second;
+ assert(!Info.empty() && "Shouldn't have called this");
+ Info.getAll(Result);
+}
+
+void Instruction::getAllMetadataOtherThanDebugLocImpl(
+ SmallVectorImpl<std::pair<unsigned, MDNode *>> &Result) const {
+ Result.clear();
+ assert(hasMetadataHashEntry() &&
+ getContext().pImpl->InstructionMetadata.count(this) &&
+ "Shouldn't have called this");
+ const auto &Info = getContext().pImpl->InstructionMetadata.find(this)->second;
+ assert(!Info.empty() && "Shouldn't have called this");
+ Info.getAll(Result);
+}
+
+/// clearMetadataHashEntries - Clear all hashtable-based metadata from
+/// this instruction.
+void Instruction::clearMetadataHashEntries() {
+ assert(hasMetadataHashEntry() && "Caller should check");
+ getContext().pImpl->InstructionMetadata.erase(this);
+ setHasMetadataHashEntry(false);
+}
+
+MDNode *Function::getMetadata(unsigned KindID) const {
+ if (!hasMetadata())
+ return nullptr;
+ return getContext().pImpl->FunctionMetadata[this].lookup(KindID);
+}
+
+MDNode *Function::getMetadata(StringRef Kind) const {
+ if (!hasMetadata())
+ return nullptr;
+ return getMetadata(getContext().getMDKindID(Kind));
+}
+
+void Function::setMetadata(unsigned KindID, MDNode *MD) {
+ if (MD) {
+ if (!hasMetadata())
+ setHasMetadataHashEntry(true);
+
+ getContext().pImpl->FunctionMetadata[this].set(KindID, *MD);
+ return;
+ }
+
+ // Nothing to unset.
+ if (!hasMetadata())
+ return;
+
+ auto &Store = getContext().pImpl->FunctionMetadata[this];
+ Store.erase(KindID);
+ if (Store.empty())
+ clearMetadata();
+}
+
+void Function::setMetadata(StringRef Kind, MDNode *MD) {
+ if (!MD && !hasMetadata())
+ return;
+ setMetadata(getContext().getMDKindID(Kind), MD);
+}
+
+void Function::getAllMetadata(
+ SmallVectorImpl<std::pair<unsigned, MDNode *>> &MDs) const {
+ MDs.clear();
+
+ if (!hasMetadata())
+ return;
+
+ getContext().pImpl->FunctionMetadata[this].getAll(MDs);
+}
+
+void Function::dropUnknownMetadata(ArrayRef<unsigned> KnownIDs) {
+ if (!hasMetadata())
+ return;
+ if (KnownIDs.empty()) {
+ clearMetadata();
+ return;
+ }
+
+ SmallSet<unsigned, 5> KnownSet;
+ KnownSet.insert(KnownIDs.begin(), KnownIDs.end());
+
+ auto &Store = getContext().pImpl->FunctionMetadata[this];
+ assert(!Store.empty());
+
+ Store.remove_if([&KnownSet](const std::pair<unsigned, TrackingMDNodeRef> &I) {
+ return !KnownSet.count(I.first);
+ });
+
+ if (Store.empty())
+ clearMetadata();
+}
+
+void Function::clearMetadata() {
+ if (!hasMetadata())
+ return;
+ getContext().pImpl->FunctionMetadata.erase(this);
+ setHasMetadataHashEntry(false);
+}
+
+void Function::setSubprogram(DISubprogram *SP) {
+ setMetadata(LLVMContext::MD_dbg, SP);
+}
+
+DISubprogram *Function::getSubprogram() const {
+ return cast_or_null<DISubprogram>(getMetadata(LLVMContext::MD_dbg));
+}
diff --git a/contrib/llvm/lib/IR/MetadataImpl.h b/contrib/llvm/lib/IR/MetadataImpl.h
new file mode 100644
index 0000000..b913746
--- /dev/null
+++ b/contrib/llvm/lib/IR/MetadataImpl.h
@@ -0,0 +1,59 @@
+//===- MetadataImpl.h - Helpers for implementing metadata -----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file has private helpers for implementing metadata types.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_METADATAIMPL_H
+#define LLVM_IR_METADATAIMPL_H
+
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/IR/Metadata.h"
+
+namespace llvm {
+
+template <class T, class InfoT>
+static T *getUniqued(DenseSet<T *, InfoT> &Store,
+ const typename InfoT::KeyTy &Key) {
+ auto I = Store.find_as(Key);
+ return I == Store.end() ? nullptr : *I;
+}
+
+template <class T> T *MDNode::storeImpl(T *N, StorageType Storage) {
+ switch (Storage) {
+ case Uniqued:
+ llvm_unreachable("Cannot unique without a uniquing-store");
+ case Distinct:
+ N->storeDistinctInContext();
+ break;
+ case Temporary:
+ break;
+ }
+ return N;
+}
+
+template <class T, class StoreT>
+T *MDNode::storeImpl(T *N, StorageType Storage, StoreT &Store) {
+ switch (Storage) {
+ case Uniqued:
+ Store.insert(N);
+ break;
+ case Distinct:
+ N->storeDistinctInContext();
+ break;
+ case Temporary:
+ break;
+ }
+ return N;
+}
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/IR/Module.cpp b/contrib/llvm/lib/IR/Module.cpp
new file mode 100644
index 0000000..ac578d6
--- /dev/null
+++ b/contrib/llvm/lib/IR/Module.cpp
@@ -0,0 +1,487 @@
+//===-- Module.cpp - Implement the Module class ---------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Module class for the IR library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/Module.h"
+#include "SymbolTableListTraitsImpl.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/GVMaterializer.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/TypeFinder.h"
+#include "llvm/Support/Dwarf.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/RandomNumberGenerator.h"
+#include <algorithm>
+#include <cstdarg>
+#include <cstdlib>
+
+using namespace llvm;
+
+//===----------------------------------------------------------------------===//
+// Methods to implement the globals and functions lists.
+//
+
+// Explicit instantiations of SymbolTableListTraits since some of the methods
+// are not in the public header file.
+template class llvm::SymbolTableListTraits<Function>;
+template class llvm::SymbolTableListTraits<GlobalVariable>;
+template class llvm::SymbolTableListTraits<GlobalAlias>;
+
+//===----------------------------------------------------------------------===//
+// Primitive Module methods.
+//
+
+Module::Module(StringRef MID, LLVMContext &C)
+ : Context(C), Materializer(), ModuleID(MID), DL("") {
+ ValSymTab = new ValueSymbolTable();
+ NamedMDSymTab = new StringMap<NamedMDNode *>();
+ Context.addModule(this);
+}
+
+Module::~Module() {
+ Context.removeModule(this);
+ dropAllReferences();
+ GlobalList.clear();
+ FunctionList.clear();
+ AliasList.clear();
+ NamedMDList.clear();
+ delete ValSymTab;
+ delete static_cast<StringMap<NamedMDNode *> *>(NamedMDSymTab);
+}
+
+RandomNumberGenerator *Module::createRNG(const Pass* P) const {
+ SmallString<32> Salt(P->getPassName());
+
+ // This RNG is guaranteed to produce the same random stream only
+ // when the Module ID and thus the input filename is the same. This
+ // might be problematic if the input filename extension changes
+ // (e.g. from .c to .bc or .ll).
+ //
+ // We could store this salt in NamedMetadata, but this would make
+ // the parameter non-const. This would unfortunately make this
+ // interface unusable by any Machine passes, since they only have a
+ // const reference to their IR Module. Alternatively we can always
+ // store salt metadata from the Module constructor.
+ Salt += sys::path::filename(getModuleIdentifier());
+
+ return new RandomNumberGenerator(Salt);
+}
+
+/// getNamedValue - Return the first global value in the module with
+/// the specified name, of arbitrary type. This method returns null
+/// if a global with the specified name is not found.
+GlobalValue *Module::getNamedValue(StringRef Name) const {
+ return cast_or_null<GlobalValue>(getValueSymbolTable().lookup(Name));
+}
+
+/// getMDKindID - Return a unique non-zero ID for the specified metadata kind.
+/// This ID is uniqued across modules in the current LLVMContext.
+unsigned Module::getMDKindID(StringRef Name) const {
+ return Context.getMDKindID(Name);
+}
+
+/// getMDKindNames - Populate client supplied SmallVector with the name for
+/// custom metadata IDs registered in this LLVMContext. ID #0 is not used,
+/// so it is filled in as an empty string.
+void Module::getMDKindNames(SmallVectorImpl<StringRef> &Result) const {
+ return Context.getMDKindNames(Result);
+}
+
+void Module::getOperandBundleTags(SmallVectorImpl<StringRef> &Result) const {
+ return Context.getOperandBundleTags(Result);
+}
+
+//===----------------------------------------------------------------------===//
+// Methods for easy access to the functions in the module.
+//
+
+// getOrInsertFunction - Look up the specified function in the module symbol
+// table. If it does not exist, add a prototype for the function and return
+// it. This is nice because it allows most passes to get away with not handling
+// the symbol table directly for this common task.
+//
+Constant *Module::getOrInsertFunction(StringRef Name,
+ FunctionType *Ty,
+ AttributeSet AttributeList) {
+ // See if we have a definition for the specified function already.
+ GlobalValue *F = getNamedValue(Name);
+ if (!F) {
+ // Nope, add it
+ Function *New = Function::Create(Ty, GlobalVariable::ExternalLinkage, Name);
+ if (!New->isIntrinsic()) // Intrinsics get attrs set on construction
+ New->setAttributes(AttributeList);
+ FunctionList.push_back(New);
+ return New; // Return the new prototype.
+ }
+
+ // If the function exists but has the wrong type, return a bitcast to the
+ // right type.
+ if (F->getType() != PointerType::getUnqual(Ty))
+ return ConstantExpr::getBitCast(F, PointerType::getUnqual(Ty));
+
+ // Otherwise, we just found the existing function or a prototype.
+ return F;
+}
+
+Constant *Module::getOrInsertFunction(StringRef Name,
+ FunctionType *Ty) {
+ return getOrInsertFunction(Name, Ty, AttributeSet());
+}
+
+// getOrInsertFunction - Look up the specified function in the module symbol
+// table. If it does not exist, add a prototype for the function and return it.
+// This version of the method takes a null terminated list of function
+// arguments, which makes it easier for clients to use.
+//
+Constant *Module::getOrInsertFunction(StringRef Name,
+ AttributeSet AttributeList,
+ Type *RetTy, ...) {
+ va_list Args;
+ va_start(Args, RetTy);
+
+ // Build the list of argument types...
+ std::vector<Type*> ArgTys;
+ while (Type *ArgTy = va_arg(Args, Type*))
+ ArgTys.push_back(ArgTy);
+
+ va_end(Args);
+
+ // Build the function type and chain to the other getOrInsertFunction...
+ return getOrInsertFunction(Name,
+ FunctionType::get(RetTy, ArgTys, false),
+ AttributeList);
+}
+
+Constant *Module::getOrInsertFunction(StringRef Name,
+ Type *RetTy, ...) {
+ va_list Args;
+ va_start(Args, RetTy);
+
+ // Build the list of argument types...
+ std::vector<Type*> ArgTys;
+ while (Type *ArgTy = va_arg(Args, Type*))
+ ArgTys.push_back(ArgTy);
+
+ va_end(Args);
+
+ // Build the function type and chain to the other getOrInsertFunction...
+ return getOrInsertFunction(Name,
+ FunctionType::get(RetTy, ArgTys, false),
+ AttributeSet());
+}
+
+// getFunction - Look up the specified function in the module symbol table.
+// If it does not exist, return null.
+//
+Function *Module::getFunction(StringRef Name) const {
+ return dyn_cast_or_null<Function>(getNamedValue(Name));
+}
+
+//===----------------------------------------------------------------------===//
+// Methods for easy access to the global variables in the module.
+//
+
+/// getGlobalVariable - Look up the specified global variable in the module
+/// symbol table. If it does not exist, return null. The type argument
+/// should be the underlying type of the global, i.e., it should not have
+/// the top-level PointerType, which represents the address of the global.
+/// If AllowLocal is set to true, this function will return types that
+/// have an local. By default, these types are not returned.
+///
+GlobalVariable *Module::getGlobalVariable(StringRef Name, bool AllowLocal) {
+ if (GlobalVariable *Result =
+ dyn_cast_or_null<GlobalVariable>(getNamedValue(Name)))
+ if (AllowLocal || !Result->hasLocalLinkage())
+ return Result;
+ return nullptr;
+}
+
+/// getOrInsertGlobal - Look up the specified global in the module symbol table.
+/// 1. If it does not exist, add a declaration of the global and return it.
+/// 2. Else, the global exists but has the wrong type: return the function
+/// with a constantexpr cast to the right type.
+/// 3. Finally, if the existing global is the correct declaration, return the
+/// existing global.
+Constant *Module::getOrInsertGlobal(StringRef Name, Type *Ty) {
+ // See if we have a definition for the specified global already.
+ GlobalVariable *GV = dyn_cast_or_null<GlobalVariable>(getNamedValue(Name));
+ if (!GV) {
+ // Nope, add it
+ GlobalVariable *New =
+ new GlobalVariable(*this, Ty, false, GlobalVariable::ExternalLinkage,
+ nullptr, Name);
+ return New; // Return the new declaration.
+ }
+
+ // If the variable exists but has the wrong type, return a bitcast to the
+ // right type.
+ Type *GVTy = GV->getType();
+ PointerType *PTy = PointerType::get(Ty, GVTy->getPointerAddressSpace());
+ if (GVTy != PTy)
+ return ConstantExpr::getBitCast(GV, PTy);
+
+ // Otherwise, we just found the existing function or a prototype.
+ return GV;
+}
+
+//===----------------------------------------------------------------------===//
+// Methods for easy access to the global variables in the module.
+//
+
+// getNamedAlias - Look up the specified global in the module symbol table.
+// If it does not exist, return null.
+//
+GlobalAlias *Module::getNamedAlias(StringRef Name) const {
+ return dyn_cast_or_null<GlobalAlias>(getNamedValue(Name));
+}
+
+/// getNamedMetadata - Return the first NamedMDNode in the module with the
+/// specified name. This method returns null if a NamedMDNode with the
+/// specified name is not found.
+NamedMDNode *Module::getNamedMetadata(const Twine &Name) const {
+ SmallString<256> NameData;
+ StringRef NameRef = Name.toStringRef(NameData);
+ return static_cast<StringMap<NamedMDNode*> *>(NamedMDSymTab)->lookup(NameRef);
+}
+
+/// getOrInsertNamedMetadata - Return the first named MDNode in the module
+/// with the specified name. This method returns a new NamedMDNode if a
+/// NamedMDNode with the specified name is not found.
+NamedMDNode *Module::getOrInsertNamedMetadata(StringRef Name) {
+ NamedMDNode *&NMD =
+ (*static_cast<StringMap<NamedMDNode *> *>(NamedMDSymTab))[Name];
+ if (!NMD) {
+ NMD = new NamedMDNode(Name);
+ NMD->setParent(this);
+ NamedMDList.push_back(NMD);
+ }
+ return NMD;
+}
+
+/// eraseNamedMetadata - Remove the given NamedMDNode from this module and
+/// delete it.
+void Module::eraseNamedMetadata(NamedMDNode *NMD) {
+ static_cast<StringMap<NamedMDNode *> *>(NamedMDSymTab)->erase(NMD->getName());
+ NamedMDList.erase(NMD->getIterator());
+}
+
+bool Module::isValidModFlagBehavior(Metadata *MD, ModFlagBehavior &MFB) {
+ if (ConstantInt *Behavior = mdconst::dyn_extract_or_null<ConstantInt>(MD)) {
+ uint64_t Val = Behavior->getLimitedValue();
+ if (Val >= ModFlagBehaviorFirstVal && Val <= ModFlagBehaviorLastVal) {
+ MFB = static_cast<ModFlagBehavior>(Val);
+ return true;
+ }
+ }
+ return false;
+}
+
+/// getModuleFlagsMetadata - Returns the module flags in the provided vector.
+void Module::
+getModuleFlagsMetadata(SmallVectorImpl<ModuleFlagEntry> &Flags) const {
+ const NamedMDNode *ModFlags = getModuleFlagsMetadata();
+ if (!ModFlags) return;
+
+ for (const MDNode *Flag : ModFlags->operands()) {
+ ModFlagBehavior MFB;
+ if (Flag->getNumOperands() >= 3 &&
+ isValidModFlagBehavior(Flag->getOperand(0), MFB) &&
+ dyn_cast_or_null<MDString>(Flag->getOperand(1))) {
+ // Check the operands of the MDNode before accessing the operands.
+ // The verifier will actually catch these failures.
+ MDString *Key = cast<MDString>(Flag->getOperand(1));
+ Metadata *Val = Flag->getOperand(2);
+ Flags.push_back(ModuleFlagEntry(MFB, Key, Val));
+ }
+ }
+}
+
+/// Return the corresponding value if Key appears in module flags, otherwise
+/// return null.
+Metadata *Module::getModuleFlag(StringRef Key) const {
+ SmallVector<Module::ModuleFlagEntry, 8> ModuleFlags;
+ getModuleFlagsMetadata(ModuleFlags);
+ for (const ModuleFlagEntry &MFE : ModuleFlags) {
+ if (Key == MFE.Key->getString())
+ return MFE.Val;
+ }
+ return nullptr;
+}
+
+/// getModuleFlagsMetadata - Returns the NamedMDNode in the module that
+/// represents module-level flags. This method returns null if there are no
+/// module-level flags.
+NamedMDNode *Module::getModuleFlagsMetadata() const {
+ return getNamedMetadata("llvm.module.flags");
+}
+
+/// getOrInsertModuleFlagsMetadata - Returns the NamedMDNode in the module that
+/// represents module-level flags. If module-level flags aren't found, it
+/// creates the named metadata that contains them.
+NamedMDNode *Module::getOrInsertModuleFlagsMetadata() {
+ return getOrInsertNamedMetadata("llvm.module.flags");
+}
+
+/// addModuleFlag - Add a module-level flag to the module-level flags
+/// metadata. It will create the module-level flags named metadata if it doesn't
+/// already exist.
+void Module::addModuleFlag(ModFlagBehavior Behavior, StringRef Key,
+ Metadata *Val) {
+ Type *Int32Ty = Type::getInt32Ty(Context);
+ Metadata *Ops[3] = {
+ ConstantAsMetadata::get(ConstantInt::get(Int32Ty, Behavior)),
+ MDString::get(Context, Key), Val};
+ getOrInsertModuleFlagsMetadata()->addOperand(MDNode::get(Context, Ops));
+}
+void Module::addModuleFlag(ModFlagBehavior Behavior, StringRef Key,
+ Constant *Val) {
+ addModuleFlag(Behavior, Key, ConstantAsMetadata::get(Val));
+}
+void Module::addModuleFlag(ModFlagBehavior Behavior, StringRef Key,
+ uint32_t Val) {
+ Type *Int32Ty = Type::getInt32Ty(Context);
+ addModuleFlag(Behavior, Key, ConstantInt::get(Int32Ty, Val));
+}
+void Module::addModuleFlag(MDNode *Node) {
+ assert(Node->getNumOperands() == 3 &&
+ "Invalid number of operands for module flag!");
+ assert(mdconst::hasa<ConstantInt>(Node->getOperand(0)) &&
+ isa<MDString>(Node->getOperand(1)) &&
+ "Invalid operand types for module flag!");
+ getOrInsertModuleFlagsMetadata()->addOperand(Node);
+}
+
+void Module::setDataLayout(StringRef Desc) {
+ DL.reset(Desc);
+}
+
+void Module::setDataLayout(const DataLayout &Other) { DL = Other; }
+
+const DataLayout &Module::getDataLayout() const { return DL; }
+
+//===----------------------------------------------------------------------===//
+// Methods to control the materialization of GlobalValues in the Module.
+//
+void Module::setMaterializer(GVMaterializer *GVM) {
+ assert(!Materializer &&
+ "Module already has a GVMaterializer. Call materializeAll"
+ " to clear it out before setting another one.");
+ Materializer.reset(GVM);
+}
+
+std::error_code Module::materialize(GlobalValue *GV) {
+ if (!Materializer)
+ return std::error_code();
+
+ return Materializer->materialize(GV);
+}
+
+std::error_code Module::materializeAll() {
+ if (!Materializer)
+ return std::error_code();
+ std::unique_ptr<GVMaterializer> M = std::move(Materializer);
+ return M->materializeModule();
+}
+
+std::error_code Module::materializeMetadata() {
+ if (!Materializer)
+ return std::error_code();
+ return Materializer->materializeMetadata();
+}
+
+//===----------------------------------------------------------------------===//
+// Other module related stuff.
+//
+
+std::vector<StructType *> Module::getIdentifiedStructTypes() const {
+ // If we have a materializer, it is possible that some unread function
+ // uses a type that is currently not visible to a TypeFinder, so ask
+ // the materializer which types it created.
+ if (Materializer)
+ return Materializer->getIdentifiedStructTypes();
+
+ std::vector<StructType *> Ret;
+ TypeFinder SrcStructTypes;
+ SrcStructTypes.run(*this, true);
+ Ret.assign(SrcStructTypes.begin(), SrcStructTypes.end());
+ return Ret;
+}
+
+// dropAllReferences() - This function causes all the subelements to "let go"
+// of all references that they are maintaining. This allows one to 'delete' a
+// whole module at a time, even though there may be circular references... first
+// all references are dropped, and all use counts go to zero. Then everything
+// is deleted for real. Note that no operations are valid on an object that
+// has "dropped all references", except operator delete.
+//
+void Module::dropAllReferences() {
+ for (Function &F : *this)
+ F.dropAllReferences();
+
+ for (GlobalVariable &GV : globals())
+ GV.dropAllReferences();
+
+ for (GlobalAlias &GA : aliases())
+ GA.dropAllReferences();
+}
+
+unsigned Module::getDwarfVersion() const {
+ auto *Val = cast_or_null<ConstantAsMetadata>(getModuleFlag("Dwarf Version"));
+ if (!Val)
+ return 0;
+ return cast<ConstantInt>(Val->getValue())->getZExtValue();
+}
+
+unsigned Module::getCodeViewFlag() const {
+ auto *Val = cast_or_null<ConstantAsMetadata>(getModuleFlag("CodeView"));
+ if (!Val)
+ return 0;
+ return cast<ConstantInt>(Val->getValue())->getZExtValue();
+}
+
+Comdat *Module::getOrInsertComdat(StringRef Name) {
+ auto &Entry = *ComdatSymTab.insert(std::make_pair(Name, Comdat())).first;
+ Entry.second.Name = &Entry;
+ return &Entry.second;
+}
+
+PICLevel::Level Module::getPICLevel() const {
+ auto *Val = cast_or_null<ConstantAsMetadata>(getModuleFlag("PIC Level"));
+
+ if (!Val)
+ return PICLevel::Default;
+
+ return static_cast<PICLevel::Level>(
+ cast<ConstantInt>(Val->getValue())->getZExtValue());
+}
+
+void Module::setPICLevel(PICLevel::Level PL) {
+ addModuleFlag(ModFlagBehavior::Error, "PIC Level", PL);
+}
+
+void Module::setMaximumFunctionCount(uint64_t Count) {
+ addModuleFlag(ModFlagBehavior::Error, "MaxFunctionCount", Count);
+}
+
+Optional<uint64_t> Module::getMaximumFunctionCount() {
+ auto *Val =
+ cast_or_null<ConstantAsMetadata>(getModuleFlag("MaxFunctionCount"));
+ if (!Val)
+ return None;
+ return cast<ConstantInt>(Val->getValue())->getZExtValue();
+}
diff --git a/contrib/llvm/lib/IR/Operator.cpp b/contrib/llvm/lib/IR/Operator.cpp
new file mode 100644
index 0000000..77dc680
--- /dev/null
+++ b/contrib/llvm/lib/IR/Operator.cpp
@@ -0,0 +1,44 @@
+#include "llvm/IR/Operator.h"
+#include "llvm/IR/GetElementPtrTypeIterator.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Type.h"
+
+#include "ConstantsContext.h"
+
+namespace llvm {
+Type *GEPOperator::getSourceElementType() const {
+ if (auto *I = dyn_cast<GetElementPtrInst>(this))
+ return I->getSourceElementType();
+ return cast<GetElementPtrConstantExpr>(this)->getSourceElementType();
+}
+
+bool GEPOperator::accumulateConstantOffset(const DataLayout &DL,
+ APInt &Offset) const {
+ assert(Offset.getBitWidth() ==
+ DL.getPointerSizeInBits(getPointerAddressSpace()) &&
+ "The offset must have exactly as many bits as our pointer.");
+
+ for (gep_type_iterator GTI = gep_type_begin(this), GTE = gep_type_end(this);
+ GTI != GTE; ++GTI) {
+ ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand());
+ if (!OpC)
+ return false;
+ if (OpC->isZero())
+ continue;
+
+ // Handle a struct index, which adds its field offset to the pointer.
+ if (StructType *STy = dyn_cast<StructType>(*GTI)) {
+ unsigned ElementIdx = OpC->getZExtValue();
+ const StructLayout *SL = DL.getStructLayout(STy);
+ Offset += APInt(Offset.getBitWidth(), SL->getElementOffset(ElementIdx));
+ continue;
+ }
+
+ // For array or vector indices, scale the index by the size of the type.
+ APInt Index = OpC->getValue().sextOrTrunc(Offset.getBitWidth());
+ Offset += Index * APInt(Offset.getBitWidth(),
+ DL.getTypeAllocSize(GTI.getIndexedType()));
+ }
+ return true;
+}
+}
diff --git a/contrib/llvm/lib/IR/Pass.cpp b/contrib/llvm/lib/IR/Pass.cpp
new file mode 100644
index 0000000..df45460
--- /dev/null
+++ b/contrib/llvm/lib/IR/Pass.cpp
@@ -0,0 +1,290 @@
+//===- Pass.cpp - LLVM Pass Infrastructure Implementation -----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the LLVM Pass infrastructure. It is primarily
+// responsible with ensuring that passes are executed and batched together
+// optimally.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Pass.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/IRPrintingPasses.h"
+#include "llvm/IR/LegacyPassNameParser.h"
+#include "llvm/PassRegistry.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "ir"
+
+//===----------------------------------------------------------------------===//
+// Pass Implementation
+//
+
+// Force out-of-line virtual method.
+Pass::~Pass() {
+ delete Resolver;
+}
+
+// Force out-of-line virtual method.
+ModulePass::~ModulePass() { }
+
+Pass *ModulePass::createPrinterPass(raw_ostream &O,
+ const std::string &Banner) const {
+ return createPrintModulePass(O, Banner);
+}
+
+PassManagerType ModulePass::getPotentialPassManagerType() const {
+ return PMT_ModulePassManager;
+}
+
+bool Pass::mustPreserveAnalysisID(char &AID) const {
+ return Resolver->getAnalysisIfAvailable(&AID, true) != nullptr;
+}
+
+// dumpPassStructure - Implement the -debug-pass=Structure option
+void Pass::dumpPassStructure(unsigned Offset) {
+ dbgs().indent(Offset*2) << getPassName() << "\n";
+}
+
+/// getPassName - Return a nice clean name for a pass. This usually
+/// implemented in terms of the name that is registered by one of the
+/// Registration templates, but can be overloaded directly.
+///
+const char *Pass::getPassName() const {
+ AnalysisID AID = getPassID();
+ const PassInfo *PI = PassRegistry::getPassRegistry()->getPassInfo(AID);
+ if (PI)
+ return PI->getPassName();
+ return "Unnamed pass: implement Pass::getPassName()";
+}
+
+void Pass::preparePassManager(PMStack &) {
+ // By default, don't do anything.
+}
+
+PassManagerType Pass::getPotentialPassManagerType() const {
+ // Default implementation.
+ return PMT_Unknown;
+}
+
+void Pass::getAnalysisUsage(AnalysisUsage &) const {
+ // By default, no analysis results are used, all are invalidated.
+}
+
+void Pass::releaseMemory() {
+ // By default, don't do anything.
+}
+
+void Pass::verifyAnalysis() const {
+ // By default, don't do anything.
+}
+
+void *Pass::getAdjustedAnalysisPointer(AnalysisID AID) {
+ return this;
+}
+
+ImmutablePass *Pass::getAsImmutablePass() {
+ return nullptr;
+}
+
+PMDataManager *Pass::getAsPMDataManager() {
+ return nullptr;
+}
+
+void Pass::setResolver(AnalysisResolver *AR) {
+ assert(!Resolver && "Resolver is already set");
+ Resolver = AR;
+}
+
+// print - Print out the internal state of the pass. This is called by Analyze
+// to print out the contents of an analysis. Otherwise it is not necessary to
+// implement this method.
+//
+void Pass::print(raw_ostream &O,const Module*) const {
+ O << "Pass::print not implemented for pass: '" << getPassName() << "'!\n";
+}
+
+// dump - call print(cerr);
+void Pass::dump() const {
+ print(dbgs(), nullptr);
+}
+
+//===----------------------------------------------------------------------===//
+// ImmutablePass Implementation
+//
+// Force out-of-line virtual method.
+ImmutablePass::~ImmutablePass() { }
+
+void ImmutablePass::initializePass() {
+ // By default, don't do anything.
+}
+
+//===----------------------------------------------------------------------===//
+// FunctionPass Implementation
+//
+
+Pass *FunctionPass::createPrinterPass(raw_ostream &O,
+ const std::string &Banner) const {
+ return createPrintFunctionPass(O, Banner);
+}
+
+PassManagerType FunctionPass::getPotentialPassManagerType() const {
+ return PMT_FunctionPassManager;
+}
+
+bool FunctionPass::skipOptnoneFunction(const Function &F) const {
+ if (F.hasFnAttribute(Attribute::OptimizeNone)) {
+ DEBUG(dbgs() << "Skipping pass '" << getPassName()
+ << "' on function " << F.getName() << "\n");
+ return true;
+ }
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// BasicBlockPass Implementation
+//
+
+Pass *BasicBlockPass::createPrinterPass(raw_ostream &O,
+ const std::string &Banner) const {
+ return createPrintBasicBlockPass(O, Banner);
+}
+
+bool BasicBlockPass::doInitialization(Function &) {
+ // By default, don't do anything.
+ return false;
+}
+
+bool BasicBlockPass::doFinalization(Function &) {
+ // By default, don't do anything.
+ return false;
+}
+
+bool BasicBlockPass::skipOptnoneFunction(const BasicBlock &BB) const {
+ const Function *F = BB.getParent();
+ if (F && F->hasFnAttribute(Attribute::OptimizeNone)) {
+ // Report this only once per function.
+ if (&BB == &F->getEntryBlock())
+ DEBUG(dbgs() << "Skipping pass '" << getPassName()
+ << "' on function " << F->getName() << "\n");
+ return true;
+ }
+ return false;
+}
+
+PassManagerType BasicBlockPass::getPotentialPassManagerType() const {
+ return PMT_BasicBlockPassManager;
+}
+
+const PassInfo *Pass::lookupPassInfo(const void *TI) {
+ return PassRegistry::getPassRegistry()->getPassInfo(TI);
+}
+
+const PassInfo *Pass::lookupPassInfo(StringRef Arg) {
+ return PassRegistry::getPassRegistry()->getPassInfo(Arg);
+}
+
+Pass *Pass::createPass(AnalysisID ID) {
+ const PassInfo *PI = PassRegistry::getPassRegistry()->getPassInfo(ID);
+ if (!PI)
+ return nullptr;
+ return PI->createPass();
+}
+
+//===----------------------------------------------------------------------===//
+// Analysis Group Implementation Code
+//===----------------------------------------------------------------------===//
+
+// RegisterAGBase implementation
+//
+RegisterAGBase::RegisterAGBase(const char *Name, const void *InterfaceID,
+ const void *PassID, bool isDefault)
+ : PassInfo(Name, InterfaceID) {
+ PassRegistry::getPassRegistry()->registerAnalysisGroup(InterfaceID, PassID,
+ *this, isDefault);
+}
+
+//===----------------------------------------------------------------------===//
+// PassRegistrationListener implementation
+//
+
+// enumeratePasses - Iterate over the registered passes, calling the
+// passEnumerate callback on each PassInfo object.
+//
+void PassRegistrationListener::enumeratePasses() {
+ PassRegistry::getPassRegistry()->enumerateWith(this);
+}
+
+PassNameParser::PassNameParser(cl::Option &O)
+ : cl::parser<const PassInfo *>(O) {
+ PassRegistry::getPassRegistry()->addRegistrationListener(this);
+}
+
+PassNameParser::~PassNameParser() {
+ // This only gets called during static destruction, in which case the
+ // PassRegistry will have already been destroyed by llvm_shutdown(). So
+ // attempting to remove the registration listener is an error.
+}
+
+//===----------------------------------------------------------------------===//
+// AnalysisUsage Class Implementation
+//
+
+namespace {
+ struct GetCFGOnlyPasses : public PassRegistrationListener {
+ typedef AnalysisUsage::VectorType VectorType;
+ VectorType &CFGOnlyList;
+ GetCFGOnlyPasses(VectorType &L) : CFGOnlyList(L) {}
+
+ void passEnumerate(const PassInfo *P) override {
+ if (P->isCFGOnlyPass())
+ CFGOnlyList.push_back(P->getTypeInfo());
+ }
+ };
+}
+
+// setPreservesCFG - This function should be called to by the pass, iff they do
+// not:
+//
+// 1. Add or remove basic blocks from the function
+// 2. Modify terminator instructions in any way.
+//
+// This function annotates the AnalysisUsage info object to say that analyses
+// that only depend on the CFG are preserved by this pass.
+//
+void AnalysisUsage::setPreservesCFG() {
+ // Since this transformation doesn't modify the CFG, it preserves all analyses
+ // that only depend on the CFG (like dominators, loop info, etc...)
+ GetCFGOnlyPasses(Preserved).enumeratePasses();
+}
+
+AnalysisUsage &AnalysisUsage::addPreserved(StringRef Arg) {
+ const PassInfo *PI = Pass::lookupPassInfo(Arg);
+ // If the pass exists, preserve it. Otherwise silently do nothing.
+ if (PI) Preserved.push_back(PI->getTypeInfo());
+ return *this;
+}
+
+AnalysisUsage &AnalysisUsage::addRequiredID(const void *ID) {
+ Required.push_back(ID);
+ return *this;
+}
+
+AnalysisUsage &AnalysisUsage::addRequiredID(char &ID) {
+ Required.push_back(&ID);
+ return *this;
+}
+
+AnalysisUsage &AnalysisUsage::addRequiredTransitiveID(char &ID) {
+ Required.push_back(&ID);
+ RequiredTransitive.push_back(&ID);
+ return *this;
+}
diff --git a/contrib/llvm/lib/IR/PassManager.cpp b/contrib/llvm/lib/IR/PassManager.cpp
new file mode 100644
index 0000000..a5f407c
--- /dev/null
+++ b/contrib/llvm/lib/IR/PassManager.cpp
@@ -0,0 +1,43 @@
+//===- PassManager.cpp - Infrastructure for managing & running IR passes --===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/PassManager.h"
+
+using namespace llvm;
+
+char FunctionAnalysisManagerModuleProxy::PassID;
+
+FunctionAnalysisManagerModuleProxy::Result
+FunctionAnalysisManagerModuleProxy::run(Module &M) {
+ assert(FAM->empty() && "Function analyses ran prior to the module proxy!");
+ return Result(*FAM);
+}
+
+FunctionAnalysisManagerModuleProxy::Result::~Result() {
+ // Clear out the analysis manager if we're being destroyed -- it means we
+ // didn't even see an invalidate call when we got invalidated.
+ FAM->clear();
+}
+
+bool FunctionAnalysisManagerModuleProxy::Result::invalidate(
+ Module &M, const PreservedAnalyses &PA) {
+ // If this proxy isn't marked as preserved, then we can't even invalidate
+ // individual function analyses, there may be an invalid set of Function
+ // objects in the cache making it impossible to incrementally preserve them.
+ // Just clear the entire manager.
+ if (!PA.preserved(ID()))
+ FAM->clear();
+
+ // Return false to indicate that this result is still a valid proxy.
+ return false;
+}
+
+char ModuleAnalysisManagerFunctionProxy::PassID;
diff --git a/contrib/llvm/lib/IR/PassRegistry.cpp b/contrib/llvm/lib/IR/PassRegistry.cpp
new file mode 100644
index 0000000..b879fef
--- /dev/null
+++ b/contrib/llvm/lib/IR/PassRegistry.cpp
@@ -0,0 +1,130 @@
+//===- PassRegistry.cpp - Pass Registration Implementation ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the PassRegistry, with which passes are registered on
+// initialization, and supports the PassManager in dependency resolution.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/PassRegistry.h"
+#include "llvm/IR/Function.h"
+#include "llvm/PassSupport.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/RWMutex.h"
+#include <vector>
+
+using namespace llvm;
+
+// FIXME: We use ManagedStatic to erase the pass registrar on shutdown.
+// Unfortunately, passes are registered with static ctors, and having
+// llvm_shutdown clear this map prevents successful resurrection after
+// llvm_shutdown is run. Ideally we should find a solution so that we don't
+// leak the map, AND can still resurrect after shutdown.
+static ManagedStatic<PassRegistry> PassRegistryObj;
+PassRegistry *PassRegistry::getPassRegistry() {
+ return &*PassRegistryObj;
+}
+
+//===----------------------------------------------------------------------===//
+// Accessors
+//
+
+PassRegistry::~PassRegistry() {}
+
+const PassInfo *PassRegistry::getPassInfo(const void *TI) const {
+ sys::SmartScopedReader<true> Guard(Lock);
+ MapType::const_iterator I = PassInfoMap.find(TI);
+ return I != PassInfoMap.end() ? I->second : nullptr;
+}
+
+const PassInfo *PassRegistry::getPassInfo(StringRef Arg) const {
+ sys::SmartScopedReader<true> Guard(Lock);
+ StringMapType::const_iterator I = PassInfoStringMap.find(Arg);
+ return I != PassInfoStringMap.end() ? I->second : nullptr;
+}
+
+//===----------------------------------------------------------------------===//
+// Pass Registration mechanism
+//
+
+void PassRegistry::registerPass(const PassInfo &PI, bool ShouldFree) {
+ sys::SmartScopedWriter<true> Guard(Lock);
+ bool Inserted =
+ PassInfoMap.insert(std::make_pair(PI.getTypeInfo(), &PI)).second;
+ assert(Inserted && "Pass registered multiple times!");
+ (void)Inserted;
+ PassInfoStringMap[PI.getPassArgument()] = &PI;
+
+ // Notify any listeners.
+ for (auto *Listener : Listeners)
+ Listener->passRegistered(&PI);
+
+ if (ShouldFree)
+ ToFree.push_back(std::unique_ptr<const PassInfo>(&PI));
+}
+
+void PassRegistry::enumerateWith(PassRegistrationListener *L) {
+ sys::SmartScopedReader<true> Guard(Lock);
+ for (auto PassInfoPair : PassInfoMap)
+ L->passEnumerate(PassInfoPair.second);
+}
+
+/// Analysis Group Mechanisms.
+void PassRegistry::registerAnalysisGroup(const void *InterfaceID,
+ const void *PassID,
+ PassInfo &Registeree, bool isDefault,
+ bool ShouldFree) {
+ PassInfo *InterfaceInfo = const_cast<PassInfo *>(getPassInfo(InterfaceID));
+ if (!InterfaceInfo) {
+ // First reference to Interface, register it now.
+ registerPass(Registeree);
+ InterfaceInfo = &Registeree;
+ }
+ assert(Registeree.isAnalysisGroup() &&
+ "Trying to join an analysis group that is a normal pass!");
+
+ if (PassID) {
+ PassInfo *ImplementationInfo = const_cast<PassInfo *>(getPassInfo(PassID));
+ assert(ImplementationInfo &&
+ "Must register pass before adding to AnalysisGroup!");
+
+ sys::SmartScopedWriter<true> Guard(Lock);
+
+ // Make sure we keep track of the fact that the implementation implements
+ // the interface.
+ ImplementationInfo->addInterfaceImplemented(InterfaceInfo);
+
+ if (isDefault) {
+ assert(InterfaceInfo->getNormalCtor() == nullptr &&
+ "Default implementation for analysis group already specified!");
+ assert(
+ ImplementationInfo->getNormalCtor() &&
+ "Cannot specify pass as default if it does not have a default ctor");
+ InterfaceInfo->setNormalCtor(ImplementationInfo->getNormalCtor());
+ InterfaceInfo->setTargetMachineCtor(
+ ImplementationInfo->getTargetMachineCtor());
+ }
+ }
+
+ if (ShouldFree)
+ ToFree.push_back(std::unique_ptr<const PassInfo>(&Registeree));
+}
+
+void PassRegistry::addRegistrationListener(PassRegistrationListener *L) {
+ sys::SmartScopedWriter<true> Guard(Lock);
+ Listeners.push_back(L);
+}
+
+void PassRegistry::removeRegistrationListener(PassRegistrationListener *L) {
+ sys::SmartScopedWriter<true> Guard(Lock);
+
+ auto I = std::find(Listeners.begin(), Listeners.end(), L);
+ Listeners.erase(I);
+}
diff --git a/contrib/llvm/lib/IR/Statepoint.cpp b/contrib/llvm/lib/IR/Statepoint.cpp
new file mode 100644
index 0000000..27a990e
--- /dev/null
+++ b/contrib/llvm/lib/IR/Statepoint.cpp
@@ -0,0 +1,61 @@
+//===-- IR/Statepoint.cpp -- gc.statepoint utilities --- -----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Constant.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Statepoint.h"
+#include "llvm/Support/CommandLine.h"
+
+using namespace std;
+using namespace llvm;
+
+bool llvm::isStatepoint(const ImmutableCallSite &CS) {
+ if (!CS.getInstruction()) {
+ // This is not a call site
+ return false;
+ }
+
+ const Function *F = CS.getCalledFunction();
+ return (F && F->getIntrinsicID() == Intrinsic::experimental_gc_statepoint);
+}
+bool llvm::isStatepoint(const Value *inst) {
+ if (isa<InvokeInst>(inst) || isa<CallInst>(inst)) {
+ ImmutableCallSite CS(inst);
+ return isStatepoint(CS);
+ }
+ return false;
+}
+bool llvm::isStatepoint(const Value &inst) {
+ return isStatepoint(&inst);
+}
+
+bool llvm::isGCRelocate(const ImmutableCallSite &CS) {
+ return CS.getInstruction() && isa<GCRelocateInst>(CS.getInstruction());
+}
+
+bool llvm::isGCResult(const ImmutableCallSite &CS) {
+ if (!CS.getInstruction()) {
+ // This is not a call site
+ return false;
+ }
+
+ return isGCResult(CS.getInstruction());
+}
+bool llvm::isGCResult(const Value *inst) {
+ if (const CallInst *call = dyn_cast<CallInst>(inst)) {
+ if (Function *F = call->getCalledFunction()) {
+ return F->getIntrinsicID() == Intrinsic::experimental_gc_result;
+ }
+ }
+ return false;
+}
diff --git a/contrib/llvm/lib/IR/SymbolTableListTraitsImpl.h b/contrib/llvm/lib/IR/SymbolTableListTraitsImpl.h
new file mode 100644
index 0000000..50573d8
--- /dev/null
+++ b/contrib/llvm/lib/IR/SymbolTableListTraitsImpl.h
@@ -0,0 +1,114 @@
+//===-- llvm/SymbolTableListTraitsImpl.h - Implementation ------*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the stickier parts of the SymbolTableListTraits class,
+// and is explicitly instantiated where needed to avoid defining all this code
+// in a widely used header.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_IR_SYMBOLTABLELISTTRAITSIMPL_H
+#define LLVM_LIB_IR_SYMBOLTABLELISTTRAITSIMPL_H
+
+#include "llvm/IR/SymbolTableListTraits.h"
+#include "llvm/IR/ValueSymbolTable.h"
+
+namespace llvm {
+
+/// setSymTabObject - This is called when (f.e.) the parent of a basic block
+/// changes. This requires us to remove all the instruction symtab entries from
+/// the current function and reinsert them into the new function.
+template <typename ValueSubClass>
+template <typename TPtr>
+void SymbolTableListTraits<ValueSubClass>::setSymTabObject(TPtr *Dest,
+ TPtr Src) {
+ // Get the old symtab and value list before doing the assignment.
+ ValueSymbolTable *OldST = getSymTab(getListOwner());
+
+ // Do it.
+ *Dest = Src;
+
+ // Get the new SymTab object.
+ ValueSymbolTable *NewST = getSymTab(getListOwner());
+
+ // If there is nothing to do, quick exit.
+ if (OldST == NewST) return;
+
+ // Move all the elements from the old symtab to the new one.
+ ListTy &ItemList = getList(getListOwner());
+ if (ItemList.empty()) return;
+
+ if (OldST) {
+ // Remove all entries from the previous symtab.
+ for (auto I = ItemList.begin(); I != ItemList.end(); ++I)
+ if (I->hasName())
+ OldST->removeValueName(I->getValueName());
+ }
+
+ if (NewST) {
+ // Add all of the items to the new symtab.
+ for (auto I = ItemList.begin(); I != ItemList.end(); ++I)
+ if (I->hasName())
+ NewST->reinsertValue(&*I);
+ }
+
+}
+
+template <typename ValueSubClass>
+void SymbolTableListTraits<ValueSubClass>::addNodeToList(ValueSubClass *V) {
+ assert(!V->getParent() && "Value already in a container!!");
+ ItemParentClass *Owner = getListOwner();
+ V->setParent(Owner);
+ if (V->hasName())
+ if (ValueSymbolTable *ST = getSymTab(Owner))
+ ST->reinsertValue(V);
+}
+
+template <typename ValueSubClass>
+void SymbolTableListTraits<ValueSubClass>::removeNodeFromList(
+ ValueSubClass *V) {
+ V->setParent(nullptr);
+ if (V->hasName())
+ if (ValueSymbolTable *ST = getSymTab(getListOwner()))
+ ST->removeValueName(V->getValueName());
+}
+
+template <typename ValueSubClass>
+void SymbolTableListTraits<ValueSubClass>::transferNodesFromList(
+ SymbolTableListTraits &L2, ilist_iterator<ValueSubClass> first,
+ ilist_iterator<ValueSubClass> last) {
+ // We only have to do work here if transferring instructions between BBs
+ ItemParentClass *NewIP = getListOwner(), *OldIP = L2.getListOwner();
+ if (NewIP == OldIP) return; // No work to do at all...
+
+ // We only have to update symbol table entries if we are transferring the
+ // instructions to a different symtab object...
+ ValueSymbolTable *NewST = getSymTab(NewIP);
+ ValueSymbolTable *OldST = getSymTab(OldIP);
+ if (NewST != OldST) {
+ for (; first != last; ++first) {
+ ValueSubClass &V = *first;
+ bool HasName = V.hasName();
+ if (OldST && HasName)
+ OldST->removeValueName(V.getValueName());
+ V.setParent(NewIP);
+ if (NewST && HasName)
+ NewST->reinsertValue(&V);
+ }
+ } else {
+ // Just transferring between blocks in the same function, simply update the
+ // parent fields in the instructions...
+ for (; first != last; ++first)
+ first->setParent(NewIP);
+ }
+}
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/lib/IR/Type.cpp b/contrib/llvm/lib/IR/Type.cpp
new file mode 100644
index 0000000..4c1baf5
--- /dev/null
+++ b/contrib/llvm/lib/IR/Type.cpp
@@ -0,0 +1,719 @@
+//===-- Type.cpp - Implement the Type class -------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Type class for the IR library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/Type.h"
+#include "LLVMContextImpl.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/IR/Module.h"
+#include <algorithm>
+#include <cstdarg>
+using namespace llvm;
+
+//===----------------------------------------------------------------------===//
+// Type Class Implementation
+//===----------------------------------------------------------------------===//
+
+Type *Type::getPrimitiveType(LLVMContext &C, TypeID IDNumber) {
+ switch (IDNumber) {
+ case VoidTyID : return getVoidTy(C);
+ case HalfTyID : return getHalfTy(C);
+ case FloatTyID : return getFloatTy(C);
+ case DoubleTyID : return getDoubleTy(C);
+ case X86_FP80TyID : return getX86_FP80Ty(C);
+ case FP128TyID : return getFP128Ty(C);
+ case PPC_FP128TyID : return getPPC_FP128Ty(C);
+ case LabelTyID : return getLabelTy(C);
+ case MetadataTyID : return getMetadataTy(C);
+ case X86_MMXTyID : return getX86_MMXTy(C);
+ case TokenTyID : return getTokenTy(C);
+ default:
+ return nullptr;
+ }
+}
+
+/// getScalarType - If this is a vector type, return the element type,
+/// otherwise return this.
+Type *Type::getScalarType() const {
+ if (auto *VTy = dyn_cast<VectorType>(this))
+ return VTy->getElementType();
+ return const_cast<Type*>(this);
+}
+
+/// isIntegerTy - Return true if this is an IntegerType of the specified width.
+bool Type::isIntegerTy(unsigned Bitwidth) const {
+ return isIntegerTy() && cast<IntegerType>(this)->getBitWidth() == Bitwidth;
+}
+
+// canLosslesslyBitCastTo - Return true if this type can be converted to
+// 'Ty' without any reinterpretation of bits. For example, i8* to i32*.
+//
+bool Type::canLosslesslyBitCastTo(Type *Ty) const {
+ // Identity cast means no change so return true
+ if (this == Ty)
+ return true;
+
+ // They are not convertible unless they are at least first class types
+ if (!this->isFirstClassType() || !Ty->isFirstClassType())
+ return false;
+
+ // Vector -> Vector conversions are always lossless if the two vector types
+ // have the same size, otherwise not. Also, 64-bit vector types can be
+ // converted to x86mmx.
+ if (auto *thisPTy = dyn_cast<VectorType>(this)) {
+ if (auto *thatPTy = dyn_cast<VectorType>(Ty))
+ return thisPTy->getBitWidth() == thatPTy->getBitWidth();
+ if (Ty->getTypeID() == Type::X86_MMXTyID &&
+ thisPTy->getBitWidth() == 64)
+ return true;
+ }
+
+ if (this->getTypeID() == Type::X86_MMXTyID)
+ if (auto *thatPTy = dyn_cast<VectorType>(Ty))
+ if (thatPTy->getBitWidth() == 64)
+ return true;
+
+ // At this point we have only various mismatches of the first class types
+ // remaining and ptr->ptr. Just select the lossless conversions. Everything
+ // else is not lossless. Conservatively assume we can't losslessly convert
+ // between pointers with different address spaces.
+ if (auto *PTy = dyn_cast<PointerType>(this)) {
+ if (auto *OtherPTy = dyn_cast<PointerType>(Ty))
+ return PTy->getAddressSpace() == OtherPTy->getAddressSpace();
+ return false;
+ }
+ return false; // Other types have no identity values
+}
+
+bool Type::isEmptyTy() const {
+ if (auto *ATy = dyn_cast<ArrayType>(this)) {
+ unsigned NumElements = ATy->getNumElements();
+ return NumElements == 0 || ATy->getElementType()->isEmptyTy();
+ }
+
+ if (auto *STy = dyn_cast<StructType>(this)) {
+ unsigned NumElements = STy->getNumElements();
+ for (unsigned i = 0; i < NumElements; ++i)
+ if (!STy->getElementType(i)->isEmptyTy())
+ return false;
+ return true;
+ }
+
+ return false;
+}
+
+unsigned Type::getPrimitiveSizeInBits() const {
+ switch (getTypeID()) {
+ case Type::HalfTyID: return 16;
+ case Type::FloatTyID: return 32;
+ case Type::DoubleTyID: return 64;
+ case Type::X86_FP80TyID: return 80;
+ case Type::FP128TyID: return 128;
+ case Type::PPC_FP128TyID: return 128;
+ case Type::X86_MMXTyID: return 64;
+ case Type::IntegerTyID: return cast<IntegerType>(this)->getBitWidth();
+ case Type::VectorTyID: return cast<VectorType>(this)->getBitWidth();
+ default: return 0;
+ }
+}
+
+/// getScalarSizeInBits - If this is a vector type, return the
+/// getPrimitiveSizeInBits value for the element type. Otherwise return the
+/// getPrimitiveSizeInBits value for this type.
+unsigned Type::getScalarSizeInBits() const {
+ return getScalarType()->getPrimitiveSizeInBits();
+}
+
+/// getFPMantissaWidth - Return the width of the mantissa of this type. This
+/// is only valid on floating point types. If the FP type does not
+/// have a stable mantissa (e.g. ppc long double), this method returns -1.
+int Type::getFPMantissaWidth() const {
+ if (auto *VTy = dyn_cast<VectorType>(this))
+ return VTy->getElementType()->getFPMantissaWidth();
+ assert(isFloatingPointTy() && "Not a floating point type!");
+ if (getTypeID() == HalfTyID) return 11;
+ if (getTypeID() == FloatTyID) return 24;
+ if (getTypeID() == DoubleTyID) return 53;
+ if (getTypeID() == X86_FP80TyID) return 64;
+ if (getTypeID() == FP128TyID) return 113;
+ assert(getTypeID() == PPC_FP128TyID && "unknown fp type");
+ return -1;
+}
+
+/// isSizedDerivedType - Derived types like structures and arrays are sized
+/// iff all of the members of the type are sized as well. Since asking for
+/// their size is relatively uncommon, move this operation out of line.
+bool Type::isSizedDerivedType(SmallPtrSetImpl<Type*> *Visited) const {
+ if (auto *ATy = dyn_cast<ArrayType>(this))
+ return ATy->getElementType()->isSized(Visited);
+
+ if (auto *VTy = dyn_cast<VectorType>(this))
+ return VTy->getElementType()->isSized(Visited);
+
+ return cast<StructType>(this)->isSized(Visited);
+}
+
+//===----------------------------------------------------------------------===//
+// Primitive 'Type' data
+//===----------------------------------------------------------------------===//
+
+Type *Type::getVoidTy(LLVMContext &C) { return &C.pImpl->VoidTy; }
+Type *Type::getLabelTy(LLVMContext &C) { return &C.pImpl->LabelTy; }
+Type *Type::getHalfTy(LLVMContext &C) { return &C.pImpl->HalfTy; }
+Type *Type::getFloatTy(LLVMContext &C) { return &C.pImpl->FloatTy; }
+Type *Type::getDoubleTy(LLVMContext &C) { return &C.pImpl->DoubleTy; }
+Type *Type::getMetadataTy(LLVMContext &C) { return &C.pImpl->MetadataTy; }
+Type *Type::getTokenTy(LLVMContext &C) { return &C.pImpl->TokenTy; }
+Type *Type::getX86_FP80Ty(LLVMContext &C) { return &C.pImpl->X86_FP80Ty; }
+Type *Type::getFP128Ty(LLVMContext &C) { return &C.pImpl->FP128Ty; }
+Type *Type::getPPC_FP128Ty(LLVMContext &C) { return &C.pImpl->PPC_FP128Ty; }
+Type *Type::getX86_MMXTy(LLVMContext &C) { return &C.pImpl->X86_MMXTy; }
+
+IntegerType *Type::getInt1Ty(LLVMContext &C) { return &C.pImpl->Int1Ty; }
+IntegerType *Type::getInt8Ty(LLVMContext &C) { return &C.pImpl->Int8Ty; }
+IntegerType *Type::getInt16Ty(LLVMContext &C) { return &C.pImpl->Int16Ty; }
+IntegerType *Type::getInt32Ty(LLVMContext &C) { return &C.pImpl->Int32Ty; }
+IntegerType *Type::getInt64Ty(LLVMContext &C) { return &C.pImpl->Int64Ty; }
+IntegerType *Type::getInt128Ty(LLVMContext &C) { return &C.pImpl->Int128Ty; }
+
+IntegerType *Type::getIntNTy(LLVMContext &C, unsigned N) {
+ return IntegerType::get(C, N);
+}
+
+PointerType *Type::getHalfPtrTy(LLVMContext &C, unsigned AS) {
+ return getHalfTy(C)->getPointerTo(AS);
+}
+
+PointerType *Type::getFloatPtrTy(LLVMContext &C, unsigned AS) {
+ return getFloatTy(C)->getPointerTo(AS);
+}
+
+PointerType *Type::getDoublePtrTy(LLVMContext &C, unsigned AS) {
+ return getDoubleTy(C)->getPointerTo(AS);
+}
+
+PointerType *Type::getX86_FP80PtrTy(LLVMContext &C, unsigned AS) {
+ return getX86_FP80Ty(C)->getPointerTo(AS);
+}
+
+PointerType *Type::getFP128PtrTy(LLVMContext &C, unsigned AS) {
+ return getFP128Ty(C)->getPointerTo(AS);
+}
+
+PointerType *Type::getPPC_FP128PtrTy(LLVMContext &C, unsigned AS) {
+ return getPPC_FP128Ty(C)->getPointerTo(AS);
+}
+
+PointerType *Type::getX86_MMXPtrTy(LLVMContext &C, unsigned AS) {
+ return getX86_MMXTy(C)->getPointerTo(AS);
+}
+
+PointerType *Type::getIntNPtrTy(LLVMContext &C, unsigned N, unsigned AS) {
+ return getIntNTy(C, N)->getPointerTo(AS);
+}
+
+PointerType *Type::getInt1PtrTy(LLVMContext &C, unsigned AS) {
+ return getInt1Ty(C)->getPointerTo(AS);
+}
+
+PointerType *Type::getInt8PtrTy(LLVMContext &C, unsigned AS) {
+ return getInt8Ty(C)->getPointerTo(AS);
+}
+
+PointerType *Type::getInt16PtrTy(LLVMContext &C, unsigned AS) {
+ return getInt16Ty(C)->getPointerTo(AS);
+}
+
+PointerType *Type::getInt32PtrTy(LLVMContext &C, unsigned AS) {
+ return getInt32Ty(C)->getPointerTo(AS);
+}
+
+PointerType *Type::getInt64PtrTy(LLVMContext &C, unsigned AS) {
+ return getInt64Ty(C)->getPointerTo(AS);
+}
+
+
+//===----------------------------------------------------------------------===//
+// IntegerType Implementation
+//===----------------------------------------------------------------------===//
+
+IntegerType *IntegerType::get(LLVMContext &C, unsigned NumBits) {
+ assert(NumBits >= MIN_INT_BITS && "bitwidth too small");
+ assert(NumBits <= MAX_INT_BITS && "bitwidth too large");
+
+ // Check for the built-in integer types
+ switch (NumBits) {
+ case 1: return cast<IntegerType>(Type::getInt1Ty(C));
+ case 8: return cast<IntegerType>(Type::getInt8Ty(C));
+ case 16: return cast<IntegerType>(Type::getInt16Ty(C));
+ case 32: return cast<IntegerType>(Type::getInt32Ty(C));
+ case 64: return cast<IntegerType>(Type::getInt64Ty(C));
+ case 128: return cast<IntegerType>(Type::getInt128Ty(C));
+ default:
+ break;
+ }
+
+ IntegerType *&Entry = C.pImpl->IntegerTypes[NumBits];
+
+ if (!Entry)
+ Entry = new (C.pImpl->TypeAllocator) IntegerType(C, NumBits);
+
+ return Entry;
+}
+
+bool IntegerType::isPowerOf2ByteWidth() const {
+ unsigned BitWidth = getBitWidth();
+ return (BitWidth > 7) && isPowerOf2_32(BitWidth);
+}
+
+APInt IntegerType::getMask() const {
+ return APInt::getAllOnesValue(getBitWidth());
+}
+
+//===----------------------------------------------------------------------===//
+// FunctionType Implementation
+//===----------------------------------------------------------------------===//
+
+FunctionType::FunctionType(Type *Result, ArrayRef<Type*> Params,
+ bool IsVarArgs)
+ : Type(Result->getContext(), FunctionTyID) {
+ Type **SubTys = reinterpret_cast<Type**>(this+1);
+ assert(isValidReturnType(Result) && "invalid return type for function");
+ setSubclassData(IsVarArgs);
+
+ SubTys[0] = Result;
+
+ for (unsigned i = 0, e = Params.size(); i != e; ++i) {
+ assert(isValidArgumentType(Params[i]) &&
+ "Not a valid type for function argument!");
+ SubTys[i+1] = Params[i];
+ }
+
+ ContainedTys = SubTys;
+ NumContainedTys = Params.size() + 1; // + 1 for result type
+}
+
+// FunctionType::get - The factory function for the FunctionType class.
+FunctionType *FunctionType::get(Type *ReturnType,
+ ArrayRef<Type*> Params, bool isVarArg) {
+ LLVMContextImpl *pImpl = ReturnType->getContext().pImpl;
+ FunctionTypeKeyInfo::KeyTy Key(ReturnType, Params, isVarArg);
+ auto I = pImpl->FunctionTypes.find_as(Key);
+ FunctionType *FT;
+
+ if (I == pImpl->FunctionTypes.end()) {
+ FT = (FunctionType*) pImpl->TypeAllocator.
+ Allocate(sizeof(FunctionType) + sizeof(Type*) * (Params.size() + 1),
+ AlignOf<FunctionType>::Alignment);
+ new (FT) FunctionType(ReturnType, Params, isVarArg);
+ pImpl->FunctionTypes.insert(FT);
+ } else {
+ FT = *I;
+ }
+
+ return FT;
+}
+
+FunctionType *FunctionType::get(Type *Result, bool isVarArg) {
+ return get(Result, None, isVarArg);
+}
+
+/// isValidReturnType - Return true if the specified type is valid as a return
+/// type.
+bool FunctionType::isValidReturnType(Type *RetTy) {
+ return !RetTy->isFunctionTy() && !RetTy->isLabelTy() &&
+ !RetTy->isMetadataTy();
+}
+
+/// isValidArgumentType - Return true if the specified type is valid as an
+/// argument type.
+bool FunctionType::isValidArgumentType(Type *ArgTy) {
+ return ArgTy->isFirstClassType();
+}
+
+//===----------------------------------------------------------------------===//
+// StructType Implementation
+//===----------------------------------------------------------------------===//
+
+// Primitive Constructors.
+
+StructType *StructType::get(LLVMContext &Context, ArrayRef<Type*> ETypes,
+ bool isPacked) {
+ LLVMContextImpl *pImpl = Context.pImpl;
+ AnonStructTypeKeyInfo::KeyTy Key(ETypes, isPacked);
+ auto I = pImpl->AnonStructTypes.find_as(Key);
+ StructType *ST;
+
+ if (I == pImpl->AnonStructTypes.end()) {
+ // Value not found. Create a new type!
+ ST = new (Context.pImpl->TypeAllocator) StructType(Context);
+ ST->setSubclassData(SCDB_IsLiteral); // Literal struct.
+ ST->setBody(ETypes, isPacked);
+ Context.pImpl->AnonStructTypes.insert(ST);
+ } else {
+ ST = *I;
+ }
+
+ return ST;
+}
+
+void StructType::setBody(ArrayRef<Type*> Elements, bool isPacked) {
+ assert(isOpaque() && "Struct body already set!");
+
+ setSubclassData(getSubclassData() | SCDB_HasBody);
+ if (isPacked)
+ setSubclassData(getSubclassData() | SCDB_Packed);
+
+ NumContainedTys = Elements.size();
+
+ if (Elements.empty()) {
+ ContainedTys = nullptr;
+ return;
+ }
+
+ ContainedTys = Elements.copy(getContext().pImpl->TypeAllocator).data();
+}
+
+void StructType::setName(StringRef Name) {
+ if (Name == getName()) return;
+
+ StringMap<StructType *> &SymbolTable = getContext().pImpl->NamedStructTypes;
+ typedef StringMap<StructType *>::MapEntryTy EntryTy;
+
+ // If this struct already had a name, remove its symbol table entry. Don't
+ // delete the data yet because it may be part of the new name.
+ if (SymbolTableEntry)
+ SymbolTable.remove((EntryTy *)SymbolTableEntry);
+
+ // If this is just removing the name, we're done.
+ if (Name.empty()) {
+ if (SymbolTableEntry) {
+ // Delete the old string data.
+ ((EntryTy *)SymbolTableEntry)->Destroy(SymbolTable.getAllocator());
+ SymbolTableEntry = nullptr;
+ }
+ return;
+ }
+
+ // Look up the entry for the name.
+ auto IterBool =
+ getContext().pImpl->NamedStructTypes.insert(std::make_pair(Name, this));
+
+ // While we have a name collision, try a random rename.
+ if (!IterBool.second) {
+ SmallString<64> TempStr(Name);
+ TempStr.push_back('.');
+ raw_svector_ostream TmpStream(TempStr);
+ unsigned NameSize = Name.size();
+
+ do {
+ TempStr.resize(NameSize + 1);
+ TmpStream << getContext().pImpl->NamedStructTypesUniqueID++;
+
+ IterBool = getContext().pImpl->NamedStructTypes.insert(
+ std::make_pair(TmpStream.str(), this));
+ } while (!IterBool.second);
+ }
+
+ // Delete the old string data.
+ if (SymbolTableEntry)
+ ((EntryTy *)SymbolTableEntry)->Destroy(SymbolTable.getAllocator());
+ SymbolTableEntry = &*IterBool.first;
+}
+
+//===----------------------------------------------------------------------===//
+// StructType Helper functions.
+
+StructType *StructType::create(LLVMContext &Context, StringRef Name) {
+ StructType *ST = new (Context.pImpl->TypeAllocator) StructType(Context);
+ if (!Name.empty())
+ ST->setName(Name);
+ return ST;
+}
+
+StructType *StructType::get(LLVMContext &Context, bool isPacked) {
+ return get(Context, None, isPacked);
+}
+
+StructType *StructType::get(Type *type, ...) {
+ assert(type && "Cannot create a struct type with no elements with this");
+ LLVMContext &Ctx = type->getContext();
+ va_list ap;
+ SmallVector<llvm::Type*, 8> StructFields;
+ va_start(ap, type);
+ while (type) {
+ StructFields.push_back(type);
+ type = va_arg(ap, llvm::Type*);
+ }
+ auto *Ret = llvm::StructType::get(Ctx, StructFields);
+ va_end(ap);
+ return Ret;
+}
+
+StructType *StructType::create(LLVMContext &Context, ArrayRef<Type*> Elements,
+ StringRef Name, bool isPacked) {
+ StructType *ST = create(Context, Name);
+ ST->setBody(Elements, isPacked);
+ return ST;
+}
+
+StructType *StructType::create(LLVMContext &Context, ArrayRef<Type*> Elements) {
+ return create(Context, Elements, StringRef());
+}
+
+StructType *StructType::create(LLVMContext &Context) {
+ return create(Context, StringRef());
+}
+
+StructType *StructType::create(ArrayRef<Type*> Elements, StringRef Name,
+ bool isPacked) {
+ assert(!Elements.empty() &&
+ "This method may not be invoked with an empty list");
+ return create(Elements[0]->getContext(), Elements, Name, isPacked);
+}
+
+StructType *StructType::create(ArrayRef<Type*> Elements) {
+ assert(!Elements.empty() &&
+ "This method may not be invoked with an empty list");
+ return create(Elements[0]->getContext(), Elements, StringRef());
+}
+
+StructType *StructType::create(StringRef Name, Type *type, ...) {
+ assert(type && "Cannot create a struct type with no elements with this");
+ LLVMContext &Ctx = type->getContext();
+ va_list ap;
+ SmallVector<llvm::Type*, 8> StructFields;
+ va_start(ap, type);
+ while (type) {
+ StructFields.push_back(type);
+ type = va_arg(ap, llvm::Type*);
+ }
+ auto *Ret = llvm::StructType::create(Ctx, StructFields, Name);
+ va_end(ap);
+ return Ret;
+}
+
+bool StructType::isSized(SmallPtrSetImpl<Type*> *Visited) const {
+ if ((getSubclassData() & SCDB_IsSized) != 0)
+ return true;
+ if (isOpaque())
+ return false;
+
+ if (Visited && !Visited->insert(const_cast<StructType*>(this)).second)
+ return false;
+
+ // Okay, our struct is sized if all of the elements are, but if one of the
+ // elements is opaque, the struct isn't sized *yet*, but may become sized in
+ // the future, so just bail out without caching.
+ for (element_iterator I = element_begin(), E = element_end(); I != E; ++I)
+ if (!(*I)->isSized(Visited))
+ return false;
+
+ // Here we cheat a bit and cast away const-ness. The goal is to memoize when
+ // we find a sized type, as types can only move from opaque to sized, not the
+ // other way.
+ const_cast<StructType*>(this)->setSubclassData(
+ getSubclassData() | SCDB_IsSized);
+ return true;
+}
+
+StringRef StructType::getName() const {
+ assert(!isLiteral() && "Literal structs never have names");
+ if (!SymbolTableEntry) return StringRef();
+
+ return ((StringMapEntry<StructType*> *)SymbolTableEntry)->getKey();
+}
+
+void StructType::setBody(Type *type, ...) {
+ assert(type && "Cannot create a struct type with no elements with this");
+ va_list ap;
+ SmallVector<llvm::Type*, 8> StructFields;
+ va_start(ap, type);
+ while (type) {
+ StructFields.push_back(type);
+ type = va_arg(ap, llvm::Type*);
+ }
+ setBody(StructFields);
+ va_end(ap);
+}
+
+bool StructType::isValidElementType(Type *ElemTy) {
+ return !ElemTy->isVoidTy() && !ElemTy->isLabelTy() &&
+ !ElemTy->isMetadataTy() && !ElemTy->isFunctionTy() &&
+ !ElemTy->isTokenTy();
+}
+
+/// isLayoutIdentical - Return true if this is layout identical to the
+/// specified struct.
+bool StructType::isLayoutIdentical(StructType *Other) const {
+ if (this == Other) return true;
+
+ if (isPacked() != Other->isPacked())
+ return false;
+
+ return elements() == Other->elements();
+}
+
+/// getTypeByName - Return the type with the specified name, or null if there
+/// is none by that name.
+StructType *Module::getTypeByName(StringRef Name) const {
+ return getContext().pImpl->NamedStructTypes.lookup(Name);
+}
+
+
+//===----------------------------------------------------------------------===//
+// CompositeType Implementation
+//===----------------------------------------------------------------------===//
+
+Type *CompositeType::getTypeAtIndex(const Value *V) const {
+ if (auto *STy = dyn_cast<StructType>(this)) {
+ unsigned Idx =
+ (unsigned)cast<Constant>(V)->getUniqueInteger().getZExtValue();
+ assert(indexValid(Idx) && "Invalid structure index!");
+ return STy->getElementType(Idx);
+ }
+
+ return cast<SequentialType>(this)->getElementType();
+}
+
+Type *CompositeType::getTypeAtIndex(unsigned Idx) const{
+ if (auto *STy = dyn_cast<StructType>(this)) {
+ assert(indexValid(Idx) && "Invalid structure index!");
+ return STy->getElementType(Idx);
+ }
+
+ return cast<SequentialType>(this)->getElementType();
+}
+
+bool CompositeType::indexValid(const Value *V) const {
+ if (auto *STy = dyn_cast<StructType>(this)) {
+ // Structure indexes require (vectors of) 32-bit integer constants. In the
+ // vector case all of the indices must be equal.
+ if (!V->getType()->getScalarType()->isIntegerTy(32))
+ return false;
+ const Constant *C = dyn_cast<Constant>(V);
+ if (C && V->getType()->isVectorTy())
+ C = C->getSplatValue();
+ const ConstantInt *CU = dyn_cast_or_null<ConstantInt>(C);
+ return CU && CU->getZExtValue() < STy->getNumElements();
+ }
+
+ // Sequential types can be indexed by any integer.
+ return V->getType()->isIntOrIntVectorTy();
+}
+
+bool CompositeType::indexValid(unsigned Idx) const {
+ if (auto *STy = dyn_cast<StructType>(this))
+ return Idx < STy->getNumElements();
+ // Sequential types can be indexed by any integer.
+ return true;
+}
+
+
+//===----------------------------------------------------------------------===//
+// ArrayType Implementation
+//===----------------------------------------------------------------------===//
+
+ArrayType::ArrayType(Type *ElType, uint64_t NumEl)
+ : SequentialType(ArrayTyID, ElType) {
+ NumElements = NumEl;
+}
+
+ArrayType *ArrayType::get(Type *ElementType, uint64_t NumElements) {
+ assert(isValidElementType(ElementType) && "Invalid type for array element!");
+
+ LLVMContextImpl *pImpl = ElementType->getContext().pImpl;
+ ArrayType *&Entry =
+ pImpl->ArrayTypes[std::make_pair(ElementType, NumElements)];
+
+ if (!Entry)
+ Entry = new (pImpl->TypeAllocator) ArrayType(ElementType, NumElements);
+ return Entry;
+}
+
+bool ArrayType::isValidElementType(Type *ElemTy) {
+ return !ElemTy->isVoidTy() && !ElemTy->isLabelTy() &&
+ !ElemTy->isMetadataTy() && !ElemTy->isFunctionTy() &&
+ !ElemTy->isTokenTy();
+}
+
+//===----------------------------------------------------------------------===//
+// VectorType Implementation
+//===----------------------------------------------------------------------===//
+
+VectorType::VectorType(Type *ElType, unsigned NumEl)
+ : SequentialType(VectorTyID, ElType) {
+ NumElements = NumEl;
+}
+
+VectorType *VectorType::get(Type *ElementType, unsigned NumElements) {
+ assert(NumElements > 0 && "#Elements of a VectorType must be greater than 0");
+ assert(isValidElementType(ElementType) && "Element type of a VectorType must "
+ "be an integer, floating point, or "
+ "pointer type.");
+
+ LLVMContextImpl *pImpl = ElementType->getContext().pImpl;
+ VectorType *&Entry = ElementType->getContext().pImpl
+ ->VectorTypes[std::make_pair(ElementType, NumElements)];
+
+ if (!Entry)
+ Entry = new (pImpl->TypeAllocator) VectorType(ElementType, NumElements);
+ return Entry;
+}
+
+bool VectorType::isValidElementType(Type *ElemTy) {
+ return ElemTy->isIntegerTy() || ElemTy->isFloatingPointTy() ||
+ ElemTy->isPointerTy();
+}
+
+//===----------------------------------------------------------------------===//
+// PointerType Implementation
+//===----------------------------------------------------------------------===//
+
+PointerType *PointerType::get(Type *EltTy, unsigned AddressSpace) {
+ assert(EltTy && "Can't get a pointer to <null> type!");
+ assert(isValidElementType(EltTy) && "Invalid type for pointer element!");
+
+ LLVMContextImpl *CImpl = EltTy->getContext().pImpl;
+
+ // Since AddressSpace #0 is the common case, we special case it.
+ PointerType *&Entry = AddressSpace == 0 ? CImpl->PointerTypes[EltTy]
+ : CImpl->ASPointerTypes[std::make_pair(EltTy, AddressSpace)];
+
+ if (!Entry)
+ Entry = new (CImpl->TypeAllocator) PointerType(EltTy, AddressSpace);
+ return Entry;
+}
+
+
+PointerType::PointerType(Type *E, unsigned AddrSpace)
+ : SequentialType(PointerTyID, E) {
+#ifndef NDEBUG
+ const unsigned oldNCT = NumContainedTys;
+#endif
+ setSubclassData(AddrSpace);
+ // Check for miscompile. PR11652.
+ assert(oldNCT == NumContainedTys && "bitfield written out of bounds?");
+}
+
+PointerType *Type::getPointerTo(unsigned addrs) const {
+ return PointerType::get(const_cast<Type*>(this), addrs);
+}
+
+bool PointerType::isValidElementType(Type *ElemTy) {
+ return !ElemTy->isVoidTy() && !ElemTy->isLabelTy() &&
+ !ElemTy->isMetadataTy() && !ElemTy->isTokenTy();
+}
+
+bool PointerType::isLoadableOrStorableType(Type *ElemTy) {
+ return isValidElementType(ElemTy) && !ElemTy->isFunctionTy();
+}
diff --git a/contrib/llvm/lib/IR/TypeFinder.cpp b/contrib/llvm/lib/IR/TypeFinder.cpp
new file mode 100644
index 0000000..b5bdab0
--- /dev/null
+++ b/contrib/llvm/lib/IR/TypeFinder.cpp
@@ -0,0 +1,174 @@
+//===-- TypeFinder.cpp - Implement the TypeFinder class -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the TypeFinder class for the IR library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/TypeFinder.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/IR/Module.h"
+using namespace llvm;
+
+void TypeFinder::run(const Module &M, bool onlyNamed) {
+ OnlyNamed = onlyNamed;
+
+ // Get types from global variables.
+ for (Module::const_global_iterator I = M.global_begin(),
+ E = M.global_end(); I != E; ++I) {
+ incorporateType(I->getType());
+ if (I->hasInitializer())
+ incorporateValue(I->getInitializer());
+ }
+
+ // Get types from aliases.
+ for (Module::const_alias_iterator I = M.alias_begin(),
+ E = M.alias_end(); I != E; ++I) {
+ incorporateType(I->getType());
+ if (const Value *Aliasee = I->getAliasee())
+ incorporateValue(Aliasee);
+ }
+
+ // Get types from functions.
+ SmallVector<std::pair<unsigned, MDNode *>, 4> MDForInst;
+ for (Module::const_iterator FI = M.begin(), E = M.end(); FI != E; ++FI) {
+ incorporateType(FI->getType());
+
+ for (const Use &U : FI->operands())
+ incorporateValue(U.get());
+
+ // First incorporate the arguments.
+ for (Function::const_arg_iterator AI = FI->arg_begin(),
+ AE = FI->arg_end(); AI != AE; ++AI)
+ incorporateValue(&*AI);
+
+ for (Function::const_iterator BB = FI->begin(), E = FI->end();
+ BB != E;++BB)
+ for (BasicBlock::const_iterator II = BB->begin(),
+ E = BB->end(); II != E; ++II) {
+ const Instruction &I = *II;
+
+ // Incorporate the type of the instruction.
+ incorporateType(I.getType());
+
+ // Incorporate non-instruction operand types. (We are incorporating all
+ // instructions with this loop.)
+ for (User::const_op_iterator OI = I.op_begin(), OE = I.op_end();
+ OI != OE; ++OI)
+ if (*OI && !isa<Instruction>(OI))
+ incorporateValue(*OI);
+
+ // Incorporate types hiding in metadata.
+ I.getAllMetadataOtherThanDebugLoc(MDForInst);
+ for (unsigned i = 0, e = MDForInst.size(); i != e; ++i)
+ incorporateMDNode(MDForInst[i].second);
+
+ MDForInst.clear();
+ }
+ }
+
+ for (Module::const_named_metadata_iterator I = M.named_metadata_begin(),
+ E = M.named_metadata_end(); I != E; ++I) {
+ const NamedMDNode *NMD = &*I;
+ for (unsigned i = 0, e = NMD->getNumOperands(); i != e; ++i)
+ incorporateMDNode(NMD->getOperand(i));
+ }
+}
+
+void TypeFinder::clear() {
+ VisitedConstants.clear();
+ VisitedTypes.clear();
+ StructTypes.clear();
+}
+
+/// incorporateType - This method adds the type to the list of used structures
+/// if it's not in there already.
+void TypeFinder::incorporateType(Type *Ty) {
+ // Check to see if we've already visited this type.
+ if (!VisitedTypes.insert(Ty).second)
+ return;
+
+ SmallVector<Type *, 4> TypeWorklist;
+ TypeWorklist.push_back(Ty);
+ do {
+ Ty = TypeWorklist.pop_back_val();
+
+ // If this is a structure or opaque type, add a name for the type.
+ if (StructType *STy = dyn_cast<StructType>(Ty))
+ if (!OnlyNamed || STy->hasName())
+ StructTypes.push_back(STy);
+
+ // Add all unvisited subtypes to worklist for processing
+ for (Type::subtype_reverse_iterator I = Ty->subtype_rbegin(),
+ E = Ty->subtype_rend();
+ I != E; ++I)
+ if (VisitedTypes.insert(*I).second)
+ TypeWorklist.push_back(*I);
+ } while (!TypeWorklist.empty());
+}
+
+/// incorporateValue - This method is used to walk operand lists finding types
+/// hiding in constant expressions and other operands that won't be walked in
+/// other ways. GlobalValues, basic blocks, instructions, and inst operands are
+/// all explicitly enumerated.
+void TypeFinder::incorporateValue(const Value *V) {
+ if (const auto *M = dyn_cast<MetadataAsValue>(V)) {
+ if (const auto *N = dyn_cast<MDNode>(M->getMetadata()))
+ return incorporateMDNode(N);
+ if (const auto *MDV = dyn_cast<ValueAsMetadata>(M->getMetadata()))
+ return incorporateValue(MDV->getValue());
+ return;
+ }
+
+ if (!isa<Constant>(V) || isa<GlobalValue>(V)) return;
+
+ // Already visited?
+ if (!VisitedConstants.insert(V).second)
+ return;
+
+ // Check this type.
+ incorporateType(V->getType());
+
+ // If this is an instruction, we incorporate it separately.
+ if (isa<Instruction>(V))
+ return;
+
+ // Look in operands for types.
+ const User *U = cast<User>(V);
+ for (Constant::const_op_iterator I = U->op_begin(),
+ E = U->op_end(); I != E;++I)
+ incorporateValue(*I);
+}
+
+/// incorporateMDNode - This method is used to walk the operands of an MDNode to
+/// find types hiding within.
+void TypeFinder::incorporateMDNode(const MDNode *V) {
+ // Already visited?
+ if (!VisitedMetadata.insert(V).second)
+ return;
+
+ // Look in operands for types.
+ for (unsigned i = 0, e = V->getNumOperands(); i != e; ++i) {
+ Metadata *Op = V->getOperand(i);
+ if (!Op)
+ continue;
+ if (auto *N = dyn_cast<MDNode>(Op)) {
+ incorporateMDNode(N);
+ continue;
+ }
+ if (auto *C = dyn_cast<ConstantAsMetadata>(Op)) {
+ incorporateValue(C->getValue());
+ continue;
+ }
+ }
+}
diff --git a/contrib/llvm/lib/IR/Use.cpp b/contrib/llvm/lib/IR/Use.cpp
new file mode 100644
index 0000000..cae845d
--- /dev/null
+++ b/contrib/llvm/lib/IR/Use.cpp
@@ -0,0 +1,127 @@
+//===-- Use.cpp - Implement the Use class ---------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/Use.h"
+#include "llvm/IR/User.h"
+#include "llvm/IR/Value.h"
+#include <new>
+
+namespace llvm {
+
+void Use::swap(Use &RHS) {
+ if (Val == RHS.Val)
+ return;
+
+ if (Val)
+ removeFromList();
+
+ Value *OldVal = Val;
+ if (RHS.Val) {
+ RHS.removeFromList();
+ Val = RHS.Val;
+ Val->addUse(*this);
+ } else {
+ Val = nullptr;
+ }
+
+ if (OldVal) {
+ RHS.Val = OldVal;
+ RHS.Val->addUse(RHS);
+ } else {
+ RHS.Val = nullptr;
+ }
+}
+
+User *Use::getUser() const {
+ const Use *End = getImpliedUser();
+ const UserRef *ref = reinterpret_cast<const UserRef *>(End);
+ return ref->getInt() ? ref->getPointer()
+ : reinterpret_cast<User *>(const_cast<Use *>(End));
+}
+
+unsigned Use::getOperandNo() const {
+ return this - getUser()->op_begin();
+}
+
+// Sets up the waymarking algorithm's tags for a series of Uses. See the
+// algorithm details here:
+//
+// http://www.llvm.org/docs/ProgrammersManual.html#the-waymarking-algorithm
+//
+Use *Use::initTags(Use *const Start, Use *Stop) {
+ ptrdiff_t Done = 0;
+ while (Done < 20) {
+ if (Start == Stop--)
+ return Start;
+ static const PrevPtrTag tags[20] = {
+ fullStopTag, oneDigitTag, stopTag, oneDigitTag, oneDigitTag,
+ stopTag, zeroDigitTag, oneDigitTag, oneDigitTag, stopTag,
+ zeroDigitTag, oneDigitTag, zeroDigitTag, oneDigitTag, stopTag,
+ oneDigitTag, oneDigitTag, oneDigitTag, oneDigitTag, stopTag};
+ new (Stop) Use(tags[Done++]);
+ }
+
+ ptrdiff_t Count = Done;
+ while (Start != Stop) {
+ --Stop;
+ if (!Count) {
+ new (Stop) Use(stopTag);
+ ++Done;
+ Count = Done;
+ } else {
+ new (Stop) Use(PrevPtrTag(Count & 1));
+ Count >>= 1;
+ ++Done;
+ }
+ }
+
+ return Start;
+}
+
+void Use::zap(Use *Start, const Use *Stop, bool del) {
+ while (Start != Stop)
+ (--Stop)->~Use();
+ if (del)
+ ::operator delete(Start);
+}
+
+const Use *Use::getImpliedUser() const {
+ const Use *Current = this;
+
+ while (true) {
+ unsigned Tag = (Current++)->Prev.getInt();
+ switch (Tag) {
+ case zeroDigitTag:
+ case oneDigitTag:
+ continue;
+
+ case stopTag: {
+ ++Current;
+ ptrdiff_t Offset = 1;
+ while (true) {
+ unsigned Tag = Current->Prev.getInt();
+ switch (Tag) {
+ case zeroDigitTag:
+ case oneDigitTag:
+ ++Current;
+ Offset = (Offset << 1) + Tag;
+ continue;
+ default:
+ return Current + Offset;
+ }
+ }
+ }
+
+ case fullStopTag:
+ return Current;
+ }
+ }
+}
+
+} // End llvm namespace
diff --git a/contrib/llvm/lib/IR/User.cpp b/contrib/llvm/lib/IR/User.cpp
new file mode 100644
index 0000000..a75abe6
--- /dev/null
+++ b/contrib/llvm/lib/IR/User.cpp
@@ -0,0 +1,205 @@
+//===-- User.cpp - Implement the User class -------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/User.h"
+#include "llvm/IR/Constant.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/Operator.h"
+
+namespace llvm {
+class BasicBlock;
+
+//===----------------------------------------------------------------------===//
+// User Class
+//===----------------------------------------------------------------------===//
+
+void User::anchor() {}
+
+void User::replaceUsesOfWith(Value *From, Value *To) {
+ if (From == To) return; // Duh what?
+
+ assert((!isa<Constant>(this) || isa<GlobalValue>(this)) &&
+ "Cannot call User::replaceUsesOfWith on a constant!");
+
+ for (unsigned i = 0, E = getNumOperands(); i != E; ++i)
+ if (getOperand(i) == From) { // Is This operand is pointing to oldval?
+ // The side effects of this setOperand call include linking to
+ // "To", adding "this" to the uses list of To, and
+ // most importantly, removing "this" from the use list of "From".
+ setOperand(i, To); // Fix it now...
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// User allocHungoffUses Implementation
+//===----------------------------------------------------------------------===//
+
+void User::allocHungoffUses(unsigned N, bool IsPhi) {
+ assert(HasHungOffUses && "alloc must have hung off uses");
+
+ static_assert(AlignOf<Use>::Alignment >= AlignOf<Use::UserRef>::Alignment,
+ "Alignment is insufficient for 'hung-off-uses' pieces");
+ static_assert(AlignOf<Use::UserRef>::Alignment >=
+ AlignOf<BasicBlock *>::Alignment,
+ "Alignment is insufficient for 'hung-off-uses' pieces");
+
+ // Allocate the array of Uses, followed by a pointer (with bottom bit set) to
+ // the User.
+ size_t size = N * sizeof(Use) + sizeof(Use::UserRef);
+ if (IsPhi)
+ size += N * sizeof(BasicBlock *);
+ Use *Begin = static_cast<Use*>(::operator new(size));
+ Use *End = Begin + N;
+ (void) new(End) Use::UserRef(const_cast<User*>(this), 1);
+ setOperandList(Use::initTags(Begin, End));
+}
+
+void User::growHungoffUses(unsigned NewNumUses, bool IsPhi) {
+ assert(HasHungOffUses && "realloc must have hung off uses");
+
+ unsigned OldNumUses = getNumOperands();
+
+ // We don't support shrinking the number of uses. We wouldn't have enough
+ // space to copy the old uses in to the new space.
+ assert(NewNumUses > OldNumUses && "realloc must grow num uses");
+
+ Use *OldOps = getOperandList();
+ allocHungoffUses(NewNumUses, IsPhi);
+ Use *NewOps = getOperandList();
+
+ // Now copy from the old operands list to the new one.
+ std::copy(OldOps, OldOps + OldNumUses, NewOps);
+
+ // If this is a Phi, then we need to copy the BB pointers too.
+ if (IsPhi) {
+ auto *OldPtr =
+ reinterpret_cast<char *>(OldOps + OldNumUses) + sizeof(Use::UserRef);
+ auto *NewPtr =
+ reinterpret_cast<char *>(NewOps + NewNumUses) + sizeof(Use::UserRef);
+ std::copy(OldPtr, OldPtr + (OldNumUses * sizeof(BasicBlock *)), NewPtr);
+ }
+ Use::zap(OldOps, OldOps + OldNumUses, true);
+}
+
+
+// This is a private struct used by `User` to track the co-allocated descriptor
+// section.
+struct DescriptorInfo {
+ intptr_t SizeInBytes;
+};
+
+ArrayRef<const uint8_t> User::getDescriptor() const {
+ auto MutableARef = const_cast<User *>(this)->getDescriptor();
+ return {MutableARef.begin(), MutableARef.end()};
+}
+
+MutableArrayRef<uint8_t> User::getDescriptor() {
+ assert(HasDescriptor && "Don't call otherwise!");
+ assert(!HasHungOffUses && "Invariant!");
+
+ auto *DI = reinterpret_cast<DescriptorInfo *>(getIntrusiveOperands()) - 1;
+ assert(DI->SizeInBytes != 0 && "Should not have had a descriptor otherwise!");
+
+ return MutableArrayRef<uint8_t>(
+ reinterpret_cast<uint8_t *>(DI) - DI->SizeInBytes, DI->SizeInBytes);
+}
+
+//===----------------------------------------------------------------------===//
+// User operator new Implementations
+//===----------------------------------------------------------------------===//
+
+void *User::allocateFixedOperandUser(size_t Size, unsigned Us,
+ unsigned DescBytes) {
+ assert(Us < (1u << NumUserOperandsBits) && "Too many operands");
+
+ static_assert(sizeof(DescriptorInfo) % sizeof(void *) == 0, "Required below");
+
+ unsigned DescBytesToAllocate =
+ DescBytes == 0 ? 0 : (DescBytes + sizeof(DescriptorInfo));
+ assert(DescBytesToAllocate % sizeof(void *) == 0 &&
+ "We need this to satisfy alignment constraints for Uses");
+
+ uint8_t *Storage = static_cast<uint8_t *>(
+ ::operator new(Size + sizeof(Use) * Us + DescBytesToAllocate));
+ Use *Start = reinterpret_cast<Use *>(Storage + DescBytesToAllocate);
+ Use *End = Start + Us;
+ User *Obj = reinterpret_cast<User*>(End);
+ Obj->NumUserOperands = Us;
+ Obj->HasHungOffUses = false;
+ Obj->HasDescriptor = DescBytes != 0;
+ Use::initTags(Start, End);
+
+ if (DescBytes != 0) {
+ auto *DescInfo = reinterpret_cast<DescriptorInfo *>(Storage + DescBytes);
+ DescInfo->SizeInBytes = DescBytes;
+ }
+
+ return Obj;
+}
+
+void *User::operator new(size_t Size, unsigned Us) {
+ return allocateFixedOperandUser(Size, Us, 0);
+}
+
+void *User::operator new(size_t Size, unsigned Us, unsigned DescBytes) {
+ return allocateFixedOperandUser(Size, Us, DescBytes);
+}
+
+void *User::operator new(size_t Size) {
+ // Allocate space for a single Use*
+ void *Storage = ::operator new(Size + sizeof(Use *));
+ Use **HungOffOperandList = static_cast<Use **>(Storage);
+ User *Obj = reinterpret_cast<User *>(HungOffOperandList + 1);
+ Obj->NumUserOperands = 0;
+ Obj->HasHungOffUses = true;
+ Obj->HasDescriptor = false;
+ *HungOffOperandList = nullptr;
+ return Obj;
+}
+
+//===----------------------------------------------------------------------===//
+// User operator delete Implementation
+//===----------------------------------------------------------------------===//
+
+void User::operator delete(void *Usr) {
+ // Hung off uses use a single Use* before the User, while other subclasses
+ // use a Use[] allocated prior to the user.
+ User *Obj = static_cast<User *>(Usr);
+ if (Obj->HasHungOffUses) {
+ assert(!Obj->HasDescriptor && "not supported!");
+
+ Use **HungOffOperandList = static_cast<Use **>(Usr) - 1;
+ // drop the hung off uses.
+ Use::zap(*HungOffOperandList, *HungOffOperandList + Obj->NumUserOperands,
+ /* Delete */ true);
+ ::operator delete(HungOffOperandList);
+ } else if (Obj->HasDescriptor) {
+ Use *UseBegin = static_cast<Use *>(Usr) - Obj->NumUserOperands;
+ Use::zap(UseBegin, UseBegin + Obj->NumUserOperands, /* Delete */ false);
+
+ auto *DI = reinterpret_cast<DescriptorInfo *>(UseBegin) - 1;
+ uint8_t *Storage = reinterpret_cast<uint8_t *>(DI) - DI->SizeInBytes;
+ ::operator delete(Storage);
+ } else {
+ Use *Storage = static_cast<Use *>(Usr) - Obj->NumUserOperands;
+ Use::zap(Storage, Storage + Obj->NumUserOperands,
+ /* Delete */ false);
+ ::operator delete(Storage);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Operator Class
+//===----------------------------------------------------------------------===//
+
+Operator::~Operator() {
+ llvm_unreachable("should never destroy an Operator");
+}
+
+} // End llvm namespace
diff --git a/contrib/llvm/lib/IR/Value.cpp b/contrib/llvm/lib/IR/Value.cpp
new file mode 100644
index 0000000..eb9deb6
--- /dev/null
+++ b/contrib/llvm/lib/IR/Value.cpp
@@ -0,0 +1,767 @@
+//===-- Value.cpp - Implement the Value class -----------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Value, ValueHandle, and User classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/Value.h"
+#include "LLVMContextImpl.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/Constant.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/GetElementPtrTypeIterator.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/IR/Statepoint.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/IR/ValueSymbolTable.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+using namespace llvm;
+
+//===----------------------------------------------------------------------===//
+// Value Class
+//===----------------------------------------------------------------------===//
+static inline Type *checkType(Type *Ty) {
+ assert(Ty && "Value defined with a null type: Error!");
+ return Ty;
+}
+
+Value::Value(Type *ty, unsigned scid)
+ : VTy(checkType(ty)), UseList(nullptr), SubclassID(scid),
+ HasValueHandle(0), SubclassOptionalData(0), SubclassData(0),
+ NumUserOperands(0), IsUsedByMD(false), HasName(false) {
+ // FIXME: Why isn't this in the subclass gunk??
+ // Note, we cannot call isa<CallInst> before the CallInst has been
+ // constructed.
+ if (SubclassID == Instruction::Call || SubclassID == Instruction::Invoke)
+ assert((VTy->isFirstClassType() || VTy->isVoidTy() || VTy->isStructTy()) &&
+ "invalid CallInst type!");
+ else if (SubclassID != BasicBlockVal &&
+ (SubclassID < ConstantFirstVal || SubclassID > ConstantLastVal))
+ assert((VTy->isFirstClassType() || VTy->isVoidTy()) &&
+ "Cannot create non-first-class values except for constants!");
+}
+
+Value::~Value() {
+ // Notify all ValueHandles (if present) that this value is going away.
+ if (HasValueHandle)
+ ValueHandleBase::ValueIsDeleted(this);
+ if (isUsedByMetadata())
+ ValueAsMetadata::handleDeletion(this);
+
+#ifndef NDEBUG // Only in -g mode...
+ // Check to make sure that there are no uses of this value that are still
+ // around when the value is destroyed. If there are, then we have a dangling
+ // reference and something is wrong. This code is here to print out where
+ // the value is still being referenced.
+ //
+ if (!use_empty()) {
+ dbgs() << "While deleting: " << *VTy << " %" << getName() << "\n";
+ for (auto *U : users())
+ dbgs() << "Use still stuck around after Def is destroyed:" << *U << "\n";
+ }
+#endif
+ assert(use_empty() && "Uses remain when a value is destroyed!");
+
+ // If this value is named, destroy the name. This should not be in a symtab
+ // at this point.
+ destroyValueName();
+}
+
+void Value::destroyValueName() {
+ ValueName *Name = getValueName();
+ if (Name)
+ Name->Destroy();
+ setValueName(nullptr);
+}
+
+bool Value::hasNUses(unsigned N) const {
+ const_use_iterator UI = use_begin(), E = use_end();
+
+ for (; N; --N, ++UI)
+ if (UI == E) return false; // Too few.
+ return UI == E;
+}
+
+bool Value::hasNUsesOrMore(unsigned N) const {
+ const_use_iterator UI = use_begin(), E = use_end();
+
+ for (; N; --N, ++UI)
+ if (UI == E) return false; // Too few.
+
+ return true;
+}
+
+bool Value::isUsedInBasicBlock(const BasicBlock *BB) const {
+ // This can be computed either by scanning the instructions in BB, or by
+ // scanning the use list of this Value. Both lists can be very long, but
+ // usually one is quite short.
+ //
+ // Scan both lists simultaneously until one is exhausted. This limits the
+ // search to the shorter list.
+ BasicBlock::const_iterator BI = BB->begin(), BE = BB->end();
+ const_user_iterator UI = user_begin(), UE = user_end();
+ for (; BI != BE && UI != UE; ++BI, ++UI) {
+ // Scan basic block: Check if this Value is used by the instruction at BI.
+ if (std::find(BI->op_begin(), BI->op_end(), this) != BI->op_end())
+ return true;
+ // Scan use list: Check if the use at UI is in BB.
+ const Instruction *User = dyn_cast<Instruction>(*UI);
+ if (User && User->getParent() == BB)
+ return true;
+ }
+ return false;
+}
+
+unsigned Value::getNumUses() const {
+ return (unsigned)std::distance(use_begin(), use_end());
+}
+
+static bool getSymTab(Value *V, ValueSymbolTable *&ST) {
+ ST = nullptr;
+ if (Instruction *I = dyn_cast<Instruction>(V)) {
+ if (BasicBlock *P = I->getParent())
+ if (Function *PP = P->getParent())
+ ST = &PP->getValueSymbolTable();
+ } else if (BasicBlock *BB = dyn_cast<BasicBlock>(V)) {
+ if (Function *P = BB->getParent())
+ ST = &P->getValueSymbolTable();
+ } else if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
+ if (Module *P = GV->getParent())
+ ST = &P->getValueSymbolTable();
+ } else if (Argument *A = dyn_cast<Argument>(V)) {
+ if (Function *P = A->getParent())
+ ST = &P->getValueSymbolTable();
+ } else {
+ assert(isa<Constant>(V) && "Unknown value type!");
+ return true; // no name is setable for this.
+ }
+ return false;
+}
+
+ValueName *Value::getValueName() const {
+ if (!HasName) return nullptr;
+
+ LLVMContext &Ctx = getContext();
+ auto I = Ctx.pImpl->ValueNames.find(this);
+ assert(I != Ctx.pImpl->ValueNames.end() &&
+ "No name entry found!");
+
+ return I->second;
+}
+
+void Value::setValueName(ValueName *VN) {
+ LLVMContext &Ctx = getContext();
+
+ assert(HasName == Ctx.pImpl->ValueNames.count(this) &&
+ "HasName bit out of sync!");
+
+ if (!VN) {
+ if (HasName)
+ Ctx.pImpl->ValueNames.erase(this);
+ HasName = false;
+ return;
+ }
+
+ HasName = true;
+ Ctx.pImpl->ValueNames[this] = VN;
+}
+
+StringRef Value::getName() const {
+ // Make sure the empty string is still a C string. For historical reasons,
+ // some clients want to call .data() on the result and expect it to be null
+ // terminated.
+ if (!hasName())
+ return StringRef("", 0);
+ return getValueName()->getKey();
+}
+
+void Value::setNameImpl(const Twine &NewName) {
+ // Fast path for common IRBuilder case of setName("") when there is no name.
+ if (NewName.isTriviallyEmpty() && !hasName())
+ return;
+
+ SmallString<256> NameData;
+ StringRef NameRef = NewName.toStringRef(NameData);
+ assert(NameRef.find_first_of(0) == StringRef::npos &&
+ "Null bytes are not allowed in names");
+
+ // Name isn't changing?
+ if (getName() == NameRef)
+ return;
+
+ assert(!getType()->isVoidTy() && "Cannot assign a name to void values!");
+
+ // Get the symbol table to update for this object.
+ ValueSymbolTable *ST;
+ if (getSymTab(this, ST))
+ return; // Cannot set a name on this value (e.g. constant).
+
+ if (!ST) { // No symbol table to update? Just do the change.
+ if (NameRef.empty()) {
+ // Free the name for this value.
+ destroyValueName();
+ return;
+ }
+
+ // NOTE: Could optimize for the case the name is shrinking to not deallocate
+ // then reallocated.
+ destroyValueName();
+
+ // Create the new name.
+ setValueName(ValueName::Create(NameRef));
+ getValueName()->setValue(this);
+ return;
+ }
+
+ // NOTE: Could optimize for the case the name is shrinking to not deallocate
+ // then reallocated.
+ if (hasName()) {
+ // Remove old name.
+ ST->removeValueName(getValueName());
+ destroyValueName();
+
+ if (NameRef.empty())
+ return;
+ }
+
+ // Name is changing to something new.
+ setValueName(ST->createValueName(NameRef, this));
+}
+
+void Value::setName(const Twine &NewName) {
+ setNameImpl(NewName);
+ if (Function *F = dyn_cast<Function>(this))
+ F->recalculateIntrinsicID();
+}
+
+void Value::takeName(Value *V) {
+ ValueSymbolTable *ST = nullptr;
+ // If this value has a name, drop it.
+ if (hasName()) {
+ // Get the symtab this is in.
+ if (getSymTab(this, ST)) {
+ // We can't set a name on this value, but we need to clear V's name if
+ // it has one.
+ if (V->hasName()) V->setName("");
+ return; // Cannot set a name on this value (e.g. constant).
+ }
+
+ // Remove old name.
+ if (ST)
+ ST->removeValueName(getValueName());
+ destroyValueName();
+ }
+
+ // Now we know that this has no name.
+
+ // If V has no name either, we're done.
+ if (!V->hasName()) return;
+
+ // Get this's symtab if we didn't before.
+ if (!ST) {
+ if (getSymTab(this, ST)) {
+ // Clear V's name.
+ V->setName("");
+ return; // Cannot set a name on this value (e.g. constant).
+ }
+ }
+
+ // Get V's ST, this should always succed, because V has a name.
+ ValueSymbolTable *VST;
+ bool Failure = getSymTab(V, VST);
+ assert(!Failure && "V has a name, so it should have a ST!"); (void)Failure;
+
+ // If these values are both in the same symtab, we can do this very fast.
+ // This works even if both values have no symtab yet.
+ if (ST == VST) {
+ // Take the name!
+ setValueName(V->getValueName());
+ V->setValueName(nullptr);
+ getValueName()->setValue(this);
+ return;
+ }
+
+ // Otherwise, things are slightly more complex. Remove V's name from VST and
+ // then reinsert it into ST.
+
+ if (VST)
+ VST->removeValueName(V->getValueName());
+ setValueName(V->getValueName());
+ V->setValueName(nullptr);
+ getValueName()->setValue(this);
+
+ if (ST)
+ ST->reinsertValue(this);
+}
+
+#ifndef NDEBUG
+void Value::assertModuleIsMaterialized() const {
+ const GlobalValue *GV = dyn_cast<GlobalValue>(this);
+ if (!GV)
+ return;
+ const Module *M = GV->getParent();
+ if (!M)
+ return;
+ assert(M->isMaterialized());
+}
+
+static bool contains(SmallPtrSetImpl<ConstantExpr *> &Cache, ConstantExpr *Expr,
+ Constant *C) {
+ if (!Cache.insert(Expr).second)
+ return false;
+
+ for (auto &O : Expr->operands()) {
+ if (O == C)
+ return true;
+ auto *CE = dyn_cast<ConstantExpr>(O);
+ if (!CE)
+ continue;
+ if (contains(Cache, CE, C))
+ return true;
+ }
+ return false;
+}
+
+static bool contains(Value *Expr, Value *V) {
+ if (Expr == V)
+ return true;
+
+ auto *C = dyn_cast<Constant>(V);
+ if (!C)
+ return false;
+
+ auto *CE = dyn_cast<ConstantExpr>(Expr);
+ if (!CE)
+ return false;
+
+ SmallPtrSet<ConstantExpr *, 4> Cache;
+ return contains(Cache, CE, C);
+}
+#endif
+
+void Value::replaceAllUsesWith(Value *New) {
+ assert(New && "Value::replaceAllUsesWith(<null>) is invalid!");
+ assert(!contains(New, this) &&
+ "this->replaceAllUsesWith(expr(this)) is NOT valid!");
+ assert(New->getType() == getType() &&
+ "replaceAllUses of value with new value of different type!");
+
+ // Notify all ValueHandles (if present) that this value is going away.
+ if (HasValueHandle)
+ ValueHandleBase::ValueIsRAUWd(this, New);
+ if (isUsedByMetadata())
+ ValueAsMetadata::handleRAUW(this, New);
+
+ while (!use_empty()) {
+ Use &U = *UseList;
+ // Must handle Constants specially, we cannot call replaceUsesOfWith on a
+ // constant because they are uniqued.
+ if (auto *C = dyn_cast<Constant>(U.getUser())) {
+ if (!isa<GlobalValue>(C)) {
+ C->handleOperandChange(this, New, &U);
+ continue;
+ }
+ }
+
+ U.set(New);
+ }
+
+ if (BasicBlock *BB = dyn_cast<BasicBlock>(this))
+ BB->replaceSuccessorsPhiUsesWith(cast<BasicBlock>(New));
+}
+
+// Like replaceAllUsesWith except it does not handle constants or basic blocks.
+// This routine leaves uses within BB.
+void Value::replaceUsesOutsideBlock(Value *New, BasicBlock *BB) {
+ assert(New && "Value::replaceUsesOutsideBlock(<null>, BB) is invalid!");
+ assert(!contains(New, this) &&
+ "this->replaceUsesOutsideBlock(expr(this), BB) is NOT valid!");
+ assert(New->getType() == getType() &&
+ "replaceUses of value with new value of different type!");
+ assert(BB && "Basic block that may contain a use of 'New' must be defined\n");
+
+ use_iterator UI = use_begin(), E = use_end();
+ for (; UI != E;) {
+ Use &U = *UI;
+ ++UI;
+ auto *Usr = dyn_cast<Instruction>(U.getUser());
+ if (Usr && Usr->getParent() == BB)
+ continue;
+ U.set(New);
+ }
+ return;
+}
+
+namespace {
+// Various metrics for how much to strip off of pointers.
+enum PointerStripKind {
+ PSK_ZeroIndices,
+ PSK_ZeroIndicesAndAliases,
+ PSK_InBoundsConstantIndices,
+ PSK_InBounds
+};
+
+template <PointerStripKind StripKind>
+static Value *stripPointerCastsAndOffsets(Value *V) {
+ if (!V->getType()->isPointerTy())
+ return V;
+
+ // Even though we don't look through PHI nodes, we could be called on an
+ // instruction in an unreachable block, which may be on a cycle.
+ SmallPtrSet<Value *, 4> Visited;
+
+ Visited.insert(V);
+ do {
+ if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
+ switch (StripKind) {
+ case PSK_ZeroIndicesAndAliases:
+ case PSK_ZeroIndices:
+ if (!GEP->hasAllZeroIndices())
+ return V;
+ break;
+ case PSK_InBoundsConstantIndices:
+ if (!GEP->hasAllConstantIndices())
+ return V;
+ // fallthrough
+ case PSK_InBounds:
+ if (!GEP->isInBounds())
+ return V;
+ break;
+ }
+ V = GEP->getPointerOperand();
+ } else if (Operator::getOpcode(V) == Instruction::BitCast ||
+ Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
+ V = cast<Operator>(V)->getOperand(0);
+ } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
+ if (StripKind == PSK_ZeroIndices || GA->mayBeOverridden())
+ return V;
+ V = GA->getAliasee();
+ } else {
+ return V;
+ }
+ assert(V->getType()->isPointerTy() && "Unexpected operand type!");
+ } while (Visited.insert(V).second);
+
+ return V;
+}
+} // namespace
+
+Value *Value::stripPointerCasts() {
+ return stripPointerCastsAndOffsets<PSK_ZeroIndicesAndAliases>(this);
+}
+
+Value *Value::stripPointerCastsNoFollowAliases() {
+ return stripPointerCastsAndOffsets<PSK_ZeroIndices>(this);
+}
+
+Value *Value::stripInBoundsConstantOffsets() {
+ return stripPointerCastsAndOffsets<PSK_InBoundsConstantIndices>(this);
+}
+
+Value *Value::stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL,
+ APInt &Offset) {
+ if (!getType()->isPointerTy())
+ return this;
+
+ assert(Offset.getBitWidth() == DL.getPointerSizeInBits(cast<PointerType>(
+ getType())->getAddressSpace()) &&
+ "The offset must have exactly as many bits as our pointer.");
+
+ // Even though we don't look through PHI nodes, we could be called on an
+ // instruction in an unreachable block, which may be on a cycle.
+ SmallPtrSet<Value *, 4> Visited;
+ Visited.insert(this);
+ Value *V = this;
+ do {
+ if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
+ if (!GEP->isInBounds())
+ return V;
+ APInt GEPOffset(Offset);
+ if (!GEP->accumulateConstantOffset(DL, GEPOffset))
+ return V;
+ Offset = GEPOffset;
+ V = GEP->getPointerOperand();
+ } else if (Operator::getOpcode(V) == Instruction::BitCast) {
+ V = cast<Operator>(V)->getOperand(0);
+ } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
+ V = GA->getAliasee();
+ } else {
+ return V;
+ }
+ assert(V->getType()->isPointerTy() && "Unexpected operand type!");
+ } while (Visited.insert(V).second);
+
+ return V;
+}
+
+Value *Value::stripInBoundsOffsets() {
+ return stripPointerCastsAndOffsets<PSK_InBounds>(this);
+}
+
+Value *Value::DoPHITranslation(const BasicBlock *CurBB,
+ const BasicBlock *PredBB) {
+ PHINode *PN = dyn_cast<PHINode>(this);
+ if (PN && PN->getParent() == CurBB)
+ return PN->getIncomingValueForBlock(PredBB);
+ return this;
+}
+
+LLVMContext &Value::getContext() const { return VTy->getContext(); }
+
+void Value::reverseUseList() {
+ if (!UseList || !UseList->Next)
+ // No need to reverse 0 or 1 uses.
+ return;
+
+ Use *Head = UseList;
+ Use *Current = UseList->Next;
+ Head->Next = nullptr;
+ while (Current) {
+ Use *Next = Current->Next;
+ Current->Next = Head;
+ Head->setPrev(&Current->Next);
+ Head = Current;
+ Current = Next;
+ }
+ UseList = Head;
+ Head->setPrev(&UseList);
+}
+
+//===----------------------------------------------------------------------===//
+// ValueHandleBase Class
+//===----------------------------------------------------------------------===//
+
+void ValueHandleBase::AddToExistingUseList(ValueHandleBase **List) {
+ assert(List && "Handle list is null?");
+
+ // Splice ourselves into the list.
+ Next = *List;
+ *List = this;
+ setPrevPtr(List);
+ if (Next) {
+ Next->setPrevPtr(&Next);
+ assert(V == Next->V && "Added to wrong list?");
+ }
+}
+
+void ValueHandleBase::AddToExistingUseListAfter(ValueHandleBase *List) {
+ assert(List && "Must insert after existing node");
+
+ Next = List->Next;
+ setPrevPtr(&List->Next);
+ List->Next = this;
+ if (Next)
+ Next->setPrevPtr(&Next);
+}
+
+void ValueHandleBase::AddToUseList() {
+ assert(V && "Null pointer doesn't have a use list!");
+
+ LLVMContextImpl *pImpl = V->getContext().pImpl;
+
+ if (V->HasValueHandle) {
+ // If this value already has a ValueHandle, then it must be in the
+ // ValueHandles map already.
+ ValueHandleBase *&Entry = pImpl->ValueHandles[V];
+ assert(Entry && "Value doesn't have any handles?");
+ AddToExistingUseList(&Entry);
+ return;
+ }
+
+ // Ok, it doesn't have any handles yet, so we must insert it into the
+ // DenseMap. However, doing this insertion could cause the DenseMap to
+ // reallocate itself, which would invalidate all of the PrevP pointers that
+ // point into the old table. Handle this by checking for reallocation and
+ // updating the stale pointers only if needed.
+ DenseMap<Value*, ValueHandleBase*> &Handles = pImpl->ValueHandles;
+ const void *OldBucketPtr = Handles.getPointerIntoBucketsArray();
+
+ ValueHandleBase *&Entry = Handles[V];
+ assert(!Entry && "Value really did already have handles?");
+ AddToExistingUseList(&Entry);
+ V->HasValueHandle = true;
+
+ // If reallocation didn't happen or if this was the first insertion, don't
+ // walk the table.
+ if (Handles.isPointerIntoBucketsArray(OldBucketPtr) ||
+ Handles.size() == 1) {
+ return;
+ }
+
+ // Okay, reallocation did happen. Fix the Prev Pointers.
+ for (DenseMap<Value*, ValueHandleBase*>::iterator I = Handles.begin(),
+ E = Handles.end(); I != E; ++I) {
+ assert(I->second && I->first == I->second->V &&
+ "List invariant broken!");
+ I->second->setPrevPtr(&I->second);
+ }
+}
+
+void ValueHandleBase::RemoveFromUseList() {
+ assert(V && V->HasValueHandle &&
+ "Pointer doesn't have a use list!");
+
+ // Unlink this from its use list.
+ ValueHandleBase **PrevPtr = getPrevPtr();
+ assert(*PrevPtr == this && "List invariant broken");
+
+ *PrevPtr = Next;
+ if (Next) {
+ assert(Next->getPrevPtr() == &Next && "List invariant broken");
+ Next->setPrevPtr(PrevPtr);
+ return;
+ }
+
+ // If the Next pointer was null, then it is possible that this was the last
+ // ValueHandle watching VP. If so, delete its entry from the ValueHandles
+ // map.
+ LLVMContextImpl *pImpl = V->getContext().pImpl;
+ DenseMap<Value*, ValueHandleBase*> &Handles = pImpl->ValueHandles;
+ if (Handles.isPointerIntoBucketsArray(PrevPtr)) {
+ Handles.erase(V);
+ V->HasValueHandle = false;
+ }
+}
+
+
+void ValueHandleBase::ValueIsDeleted(Value *V) {
+ assert(V->HasValueHandle && "Should only be called if ValueHandles present");
+
+ // Get the linked list base, which is guaranteed to exist since the
+ // HasValueHandle flag is set.
+ LLVMContextImpl *pImpl = V->getContext().pImpl;
+ ValueHandleBase *Entry = pImpl->ValueHandles[V];
+ assert(Entry && "Value bit set but no entries exist");
+
+ // We use a local ValueHandleBase as an iterator so that ValueHandles can add
+ // and remove themselves from the list without breaking our iteration. This
+ // is not really an AssertingVH; we just have to give ValueHandleBase a kind.
+ // Note that we deliberately do not the support the case when dropping a value
+ // handle results in a new value handle being permanently added to the list
+ // (as might occur in theory for CallbackVH's): the new value handle will not
+ // be processed and the checking code will mete out righteous punishment if
+ // the handle is still present once we have finished processing all the other
+ // value handles (it is fine to momentarily add then remove a value handle).
+ for (ValueHandleBase Iterator(Assert, *Entry); Entry; Entry = Iterator.Next) {
+ Iterator.RemoveFromUseList();
+ Iterator.AddToExistingUseListAfter(Entry);
+ assert(Entry->Next == &Iterator && "Loop invariant broken.");
+
+ switch (Entry->getKind()) {
+ case Assert:
+ break;
+ case Tracking:
+ // Mark that this value has been deleted by setting it to an invalid Value
+ // pointer.
+ Entry->operator=(DenseMapInfo<Value *>::getTombstoneKey());
+ break;
+ case Weak:
+ // Weak just goes to null, which will unlink it from the list.
+ Entry->operator=(nullptr);
+ break;
+ case Callback:
+ // Forward to the subclass's implementation.
+ static_cast<CallbackVH*>(Entry)->deleted();
+ break;
+ }
+ }
+
+ // All callbacks, weak references, and assertingVHs should be dropped by now.
+ if (V->HasValueHandle) {
+#ifndef NDEBUG // Only in +Asserts mode...
+ dbgs() << "While deleting: " << *V->getType() << " %" << V->getName()
+ << "\n";
+ if (pImpl->ValueHandles[V]->getKind() == Assert)
+ llvm_unreachable("An asserting value handle still pointed to this"
+ " value!");
+
+#endif
+ llvm_unreachable("All references to V were not removed?");
+ }
+}
+
+
+void ValueHandleBase::ValueIsRAUWd(Value *Old, Value *New) {
+ assert(Old->HasValueHandle &&"Should only be called if ValueHandles present");
+ assert(Old != New && "Changing value into itself!");
+ assert(Old->getType() == New->getType() &&
+ "replaceAllUses of value with new value of different type!");
+
+ // Get the linked list base, which is guaranteed to exist since the
+ // HasValueHandle flag is set.
+ LLVMContextImpl *pImpl = Old->getContext().pImpl;
+ ValueHandleBase *Entry = pImpl->ValueHandles[Old];
+
+ assert(Entry && "Value bit set but no entries exist");
+
+ // We use a local ValueHandleBase as an iterator so that
+ // ValueHandles can add and remove themselves from the list without
+ // breaking our iteration. This is not really an AssertingVH; we
+ // just have to give ValueHandleBase some kind.
+ for (ValueHandleBase Iterator(Assert, *Entry); Entry; Entry = Iterator.Next) {
+ Iterator.RemoveFromUseList();
+ Iterator.AddToExistingUseListAfter(Entry);
+ assert(Entry->Next == &Iterator && "Loop invariant broken.");
+
+ switch (Entry->getKind()) {
+ case Assert:
+ // Asserting handle does not follow RAUW implicitly.
+ break;
+ case Tracking:
+ // Tracking goes to new value like a WeakVH. Note that this may make it
+ // something incompatible with its templated type. We don't want to have a
+ // virtual (or inline) interface to handle this though, so instead we make
+ // the TrackingVH accessors guarantee that a client never sees this value.
+
+ // FALLTHROUGH
+ case Weak:
+ // Weak goes to the new value, which will unlink it from Old's list.
+ Entry->operator=(New);
+ break;
+ case Callback:
+ // Forward to the subclass's implementation.
+ static_cast<CallbackVH*>(Entry)->allUsesReplacedWith(New);
+ break;
+ }
+ }
+
+#ifndef NDEBUG
+ // If any new tracking or weak value handles were added while processing the
+ // list, then complain about it now.
+ if (Old->HasValueHandle)
+ for (Entry = pImpl->ValueHandles[Old]; Entry; Entry = Entry->Next)
+ switch (Entry->getKind()) {
+ case Tracking:
+ case Weak:
+ dbgs() << "After RAUW from " << *Old->getType() << " %"
+ << Old->getName() << " to " << *New->getType() << " %"
+ << New->getName() << "\n";
+ llvm_unreachable("A tracking or weak value handle still pointed to the"
+ " old value!\n");
+ default:
+ break;
+ }
+#endif
+}
+
+// Pin the vtable to this file.
+void CallbackVH::anchor() {}
diff --git a/contrib/llvm/lib/IR/ValueSymbolTable.cpp b/contrib/llvm/lib/IR/ValueSymbolTable.cpp
new file mode 100644
index 0000000..deb6e75
--- /dev/null
+++ b/contrib/llvm/lib/IR/ValueSymbolTable.cpp
@@ -0,0 +1,107 @@
+//===-- ValueSymbolTable.cpp - Implement the ValueSymbolTable class -------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the ValueSymbolTable class for the IR library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/ValueSymbolTable.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/Type.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "valuesymtab"
+
+// Class destructor
+ValueSymbolTable::~ValueSymbolTable() {
+#ifndef NDEBUG // Only do this in -g mode...
+ for (iterator VI = vmap.begin(), VE = vmap.end(); VI != VE; ++VI)
+ dbgs() << "Value still in symbol table! Type = '"
+ << *VI->getValue()->getType() << "' Name = '"
+ << VI->getKeyData() << "'\n";
+ assert(vmap.empty() && "Values remain in symbol table!");
+#endif
+}
+
+ValueName *ValueSymbolTable::makeUniqueName(Value *V,
+ SmallString<256> &UniqueName) {
+ unsigned BaseSize = UniqueName.size();
+ while (1) {
+ // Trim any suffix off and append the next number.
+ UniqueName.resize(BaseSize);
+ raw_svector_ostream S(UniqueName);
+ if (isa<GlobalValue>(V))
+ S << ".";
+ S << ++LastUnique;
+
+ // Try insert the vmap entry with this suffix.
+ auto IterBool = vmap.insert(std::make_pair(UniqueName, V));
+ if (IterBool.second)
+ return &*IterBool.first;
+ }
+}
+
+// Insert a value into the symbol table with the specified name...
+//
+void ValueSymbolTable::reinsertValue(Value* V) {
+ assert(V->hasName() && "Can't insert nameless Value into symbol table");
+
+ // Try inserting the name, assuming it won't conflict.
+ if (vmap.insert(V->getValueName())) {
+ //DEBUG(dbgs() << " Inserted value: " << V->getValueName() << ": " << *V << "\n");
+ return;
+ }
+
+ // Otherwise, there is a naming conflict. Rename this value.
+ SmallString<256> UniqueName(V->getName().begin(), V->getName().end());
+
+ // The name is too already used, just free it so we can allocate a new name.
+ V->getValueName()->Destroy();
+
+ ValueName *VN = makeUniqueName(V, UniqueName);
+ V->setValueName(VN);
+}
+
+void ValueSymbolTable::removeValueName(ValueName *V) {
+ //DEBUG(dbgs() << " Removing Value: " << V->getKeyData() << "\n");
+ // Remove the value from the symbol table.
+ vmap.remove(V);
+}
+
+/// createValueName - This method attempts to create a value name and insert
+/// it into the symbol table with the specified name. If it conflicts, it
+/// auto-renames the name and returns that instead.
+ValueName *ValueSymbolTable::createValueName(StringRef Name, Value *V) {
+ // In the common case, the name is not already in the symbol table.
+ auto IterBool = vmap.insert(std::make_pair(Name, V));
+ if (IterBool.second) {
+ //DEBUG(dbgs() << " Inserted value: " << Entry.getKeyData() << ": "
+ // << *V << "\n");
+ return &*IterBool.first;
+ }
+
+ // Otherwise, there is a naming conflict. Rename this value.
+ SmallString<256> UniqueName(Name.begin(), Name.end());
+ return makeUniqueName(V, UniqueName);
+}
+
+
+// dump - print out the symbol table
+//
+void ValueSymbolTable::dump() const {
+ //DEBUG(dbgs() << "ValueSymbolTable:\n");
+ for (const_iterator I = begin(), E = end(); I != E; ++I) {
+ //DEBUG(dbgs() << " '" << I->getKeyData() << "' = ");
+ I->getValue()->dump();
+ //DEBUG(dbgs() << "\n");
+ }
+}
diff --git a/contrib/llvm/lib/IR/ValueTypes.cpp b/contrib/llvm/lib/IR/ValueTypes.cpp
new file mode 100644
index 0000000..f293230
--- /dev/null
+++ b/contrib/llvm/lib/IR/ValueTypes.cpp
@@ -0,0 +1,316 @@
+//===----------- ValueTypes.cpp - Implementation of EVT methods -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements methods in the CodeGen/ValueTypes.h header.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Type.h"
+#include "llvm/Support/ErrorHandling.h"
+using namespace llvm;
+
+EVT EVT::changeExtendedTypeToInteger() const {
+ LLVMContext &Context = LLVMTy->getContext();
+ return getIntegerVT(Context, getSizeInBits());
+}
+
+EVT EVT::changeExtendedVectorElementTypeToInteger() const {
+ LLVMContext &Context = LLVMTy->getContext();
+ EVT IntTy = getIntegerVT(Context, getVectorElementType().getSizeInBits());
+ return getVectorVT(Context, IntTy, getVectorNumElements());
+}
+
+EVT EVT::getExtendedIntegerVT(LLVMContext &Context, unsigned BitWidth) {
+ EVT VT;
+ VT.LLVMTy = IntegerType::get(Context, BitWidth);
+ assert(VT.isExtended() && "Type is not extended!");
+ return VT;
+}
+
+EVT EVT::getExtendedVectorVT(LLVMContext &Context, EVT VT,
+ unsigned NumElements) {
+ EVT ResultVT;
+ ResultVT.LLVMTy = VectorType::get(VT.getTypeForEVT(Context), NumElements);
+ assert(ResultVT.isExtended() && "Type is not extended!");
+ return ResultVT;
+}
+
+bool EVT::isExtendedFloatingPoint() const {
+ assert(isExtended() && "Type is not extended!");
+ return LLVMTy->isFPOrFPVectorTy();
+}
+
+bool EVT::isExtendedInteger() const {
+ assert(isExtended() && "Type is not extended!");
+ return LLVMTy->isIntOrIntVectorTy();
+}
+
+bool EVT::isExtendedVector() const {
+ assert(isExtended() && "Type is not extended!");
+ return LLVMTy->isVectorTy();
+}
+
+bool EVT::isExtended16BitVector() const {
+ return isExtendedVector() && getExtendedSizeInBits() == 16;
+}
+
+bool EVT::isExtended32BitVector() const {
+ return isExtendedVector() && getExtendedSizeInBits() == 32;
+}
+
+bool EVT::isExtended64BitVector() const {
+ return isExtendedVector() && getExtendedSizeInBits() == 64;
+}
+
+bool EVT::isExtended128BitVector() const {
+ return isExtendedVector() && getExtendedSizeInBits() == 128;
+}
+
+bool EVT::isExtended256BitVector() const {
+ return isExtendedVector() && getExtendedSizeInBits() == 256;
+}
+
+bool EVT::isExtended512BitVector() const {
+ return isExtendedVector() && getExtendedSizeInBits() == 512;
+}
+
+bool EVT::isExtended1024BitVector() const {
+ return isExtendedVector() && getExtendedSizeInBits() == 1024;
+}
+
+bool EVT::isExtended2048BitVector() const {
+ return isExtendedVector() && getExtendedSizeInBits() == 2048;
+}
+
+EVT EVT::getExtendedVectorElementType() const {
+ assert(isExtended() && "Type is not extended!");
+ return EVT::getEVT(cast<VectorType>(LLVMTy)->getElementType());
+}
+
+unsigned EVT::getExtendedVectorNumElements() const {
+ assert(isExtended() && "Type is not extended!");
+ return cast<VectorType>(LLVMTy)->getNumElements();
+}
+
+unsigned EVT::getExtendedSizeInBits() const {
+ assert(isExtended() && "Type is not extended!");
+ if (IntegerType *ITy = dyn_cast<IntegerType>(LLVMTy))
+ return ITy->getBitWidth();
+ if (VectorType *VTy = dyn_cast<VectorType>(LLVMTy))
+ return VTy->getBitWidth();
+ llvm_unreachable("Unrecognized extended type!");
+}
+
+/// getEVTString - This function returns value type as a string, e.g. "i32".
+std::string EVT::getEVTString() const {
+ switch (V.SimpleTy) {
+ default:
+ if (isVector())
+ return "v" + utostr(getVectorNumElements()) +
+ getVectorElementType().getEVTString();
+ if (isInteger())
+ return "i" + utostr(getSizeInBits());
+ llvm_unreachable("Invalid EVT!");
+ case MVT::i1: return "i1";
+ case MVT::i8: return "i8";
+ case MVT::i16: return "i16";
+ case MVT::i32: return "i32";
+ case MVT::i64: return "i64";
+ case MVT::i128: return "i128";
+ case MVT::f16: return "f16";
+ case MVT::f32: return "f32";
+ case MVT::f64: return "f64";
+ case MVT::f80: return "f80";
+ case MVT::f128: return "f128";
+ case MVT::ppcf128: return "ppcf128";
+ case MVT::isVoid: return "isVoid";
+ case MVT::Other: return "ch";
+ case MVT::Glue: return "glue";
+ case MVT::x86mmx: return "x86mmx";
+ case MVT::v2i1: return "v2i1";
+ case MVT::v4i1: return "v4i1";
+ case MVT::v8i1: return "v8i1";
+ case MVT::v16i1: return "v16i1";
+ case MVT::v32i1: return "v32i1";
+ case MVT::v64i1: return "v64i1";
+ case MVT::v512i1: return "v512i1";
+ case MVT::v1024i1: return "v1024i1";
+ case MVT::v1i8: return "v1i8";
+ case MVT::v2i8: return "v2i8";
+ case MVT::v4i8: return "v4i8";
+ case MVT::v8i8: return "v8i8";
+ case MVT::v16i8: return "v16i8";
+ case MVT::v32i8: return "v32i8";
+ case MVT::v64i8: return "v64i8";
+ case MVT::v128i8: return "v128i8";
+ case MVT::v256i8: return "v256i8";
+ case MVT::v1i16: return "v1i16";
+ case MVT::v2i16: return "v2i16";
+ case MVT::v4i16: return "v4i16";
+ case MVT::v8i16: return "v8i16";
+ case MVT::v16i16: return "v16i16";
+ case MVT::v32i16: return "v32i16";
+ case MVT::v64i16: return "v64i16";
+ case MVT::v128i16: return "v128i16";
+ case MVT::v1i32: return "v1i32";
+ case MVT::v2i32: return "v2i32";
+ case MVT::v4i32: return "v4i32";
+ case MVT::v8i32: return "v8i32";
+ case MVT::v16i32: return "v16i32";
+ case MVT::v32i32: return "v32i32";
+ case MVT::v64i32: return "v64i32";
+ case MVT::v1i64: return "v1i64";
+ case MVT::v2i64: return "v2i64";
+ case MVT::v4i64: return "v4i64";
+ case MVT::v8i64: return "v8i64";
+ case MVT::v16i64: return "v16i64";
+ case MVT::v32i64: return "v32i64";
+ case MVT::v1i128: return "v1i128";
+ case MVT::v1f32: return "v1f32";
+ case MVT::v2f32: return "v2f32";
+ case MVT::v2f16: return "v2f16";
+ case MVT::v4f16: return "v4f16";
+ case MVT::v8f16: return "v8f16";
+ case MVT::v4f32: return "v4f32";
+ case MVT::v8f32: return "v8f32";
+ case MVT::v16f32: return "v16f32";
+ case MVT::v1f64: return "v1f64";
+ case MVT::v2f64: return "v2f64";
+ case MVT::v4f64: return "v4f64";
+ case MVT::v8f64: return "v8f64";
+ case MVT::Metadata:return "Metadata";
+ case MVT::Untyped: return "Untyped";
+ }
+}
+
+/// getTypeForEVT - This method returns an LLVM type corresponding to the
+/// specified EVT. For integer types, this returns an unsigned type. Note
+/// that this will abort for types that cannot be represented.
+Type *EVT::getTypeForEVT(LLVMContext &Context) const {
+ switch (V.SimpleTy) {
+ default:
+ assert(isExtended() && "Type is not extended!");
+ return LLVMTy;
+ case MVT::isVoid: return Type::getVoidTy(Context);
+ case MVT::i1: return Type::getInt1Ty(Context);
+ case MVT::i8: return Type::getInt8Ty(Context);
+ case MVT::i16: return Type::getInt16Ty(Context);
+ case MVT::i32: return Type::getInt32Ty(Context);
+ case MVT::i64: return Type::getInt64Ty(Context);
+ case MVT::i128: return IntegerType::get(Context, 128);
+ case MVT::f16: return Type::getHalfTy(Context);
+ case MVT::f32: return Type::getFloatTy(Context);
+ case MVT::f64: return Type::getDoubleTy(Context);
+ case MVT::f80: return Type::getX86_FP80Ty(Context);
+ case MVT::f128: return Type::getFP128Ty(Context);
+ case MVT::ppcf128: return Type::getPPC_FP128Ty(Context);
+ case MVT::x86mmx: return Type::getX86_MMXTy(Context);
+ case MVT::v2i1: return VectorType::get(Type::getInt1Ty(Context), 2);
+ case MVT::v4i1: return VectorType::get(Type::getInt1Ty(Context), 4);
+ case MVT::v8i1: return VectorType::get(Type::getInt1Ty(Context), 8);
+ case MVT::v16i1: return VectorType::get(Type::getInt1Ty(Context), 16);
+ case MVT::v32i1: return VectorType::get(Type::getInt1Ty(Context), 32);
+ case MVT::v64i1: return VectorType::get(Type::getInt1Ty(Context), 64);
+ case MVT::v512i1: return VectorType::get(Type::getInt1Ty(Context), 512);
+ case MVT::v1024i1: return VectorType::get(Type::getInt1Ty(Context), 1024);
+ case MVT::v1i8: return VectorType::get(Type::getInt8Ty(Context), 1);
+ case MVT::v2i8: return VectorType::get(Type::getInt8Ty(Context), 2);
+ case MVT::v4i8: return VectorType::get(Type::getInt8Ty(Context), 4);
+ case MVT::v8i8: return VectorType::get(Type::getInt8Ty(Context), 8);
+ case MVT::v16i8: return VectorType::get(Type::getInt8Ty(Context), 16);
+ case MVT::v32i8: return VectorType::get(Type::getInt8Ty(Context), 32);
+ case MVT::v64i8: return VectorType::get(Type::getInt8Ty(Context), 64);
+ case MVT::v128i8: return VectorType::get(Type::getInt8Ty(Context), 128);
+ case MVT::v256i8: return VectorType::get(Type::getInt8Ty(Context), 256);
+ case MVT::v1i16: return VectorType::get(Type::getInt16Ty(Context), 1);
+ case MVT::v2i16: return VectorType::get(Type::getInt16Ty(Context), 2);
+ case MVT::v4i16: return VectorType::get(Type::getInt16Ty(Context), 4);
+ case MVT::v8i16: return VectorType::get(Type::getInt16Ty(Context), 8);
+ case MVT::v16i16: return VectorType::get(Type::getInt16Ty(Context), 16);
+ case MVT::v32i16: return VectorType::get(Type::getInt16Ty(Context), 32);
+ case MVT::v64i16: return VectorType::get(Type::getInt16Ty(Context), 64);
+ case MVT::v128i16: return VectorType::get(Type::getInt16Ty(Context), 128);
+ case MVT::v1i32: return VectorType::get(Type::getInt32Ty(Context), 1);
+ case MVT::v2i32: return VectorType::get(Type::getInt32Ty(Context), 2);
+ case MVT::v4i32: return VectorType::get(Type::getInt32Ty(Context), 4);
+ case MVT::v8i32: return VectorType::get(Type::getInt32Ty(Context), 8);
+ case MVT::v16i32: return VectorType::get(Type::getInt32Ty(Context), 16);
+ case MVT::v32i32: return VectorType::get(Type::getInt32Ty(Context), 32);
+ case MVT::v64i32: return VectorType::get(Type::getInt32Ty(Context), 64);
+ case MVT::v1i64: return VectorType::get(Type::getInt64Ty(Context), 1);
+ case MVT::v2i64: return VectorType::get(Type::getInt64Ty(Context), 2);
+ case MVT::v4i64: return VectorType::get(Type::getInt64Ty(Context), 4);
+ case MVT::v8i64: return VectorType::get(Type::getInt64Ty(Context), 8);
+ case MVT::v16i64: return VectorType::get(Type::getInt64Ty(Context), 16);
+ case MVT::v32i64: return VectorType::get(Type::getInt64Ty(Context), 32);
+ case MVT::v1i128: return VectorType::get(Type::getInt128Ty(Context), 1);
+ case MVT::v2f16: return VectorType::get(Type::getHalfTy(Context), 2);
+ case MVT::v4f16: return VectorType::get(Type::getHalfTy(Context), 4);
+ case MVT::v8f16: return VectorType::get(Type::getHalfTy(Context), 8);
+ case MVT::v1f32: return VectorType::get(Type::getFloatTy(Context), 1);
+ case MVT::v2f32: return VectorType::get(Type::getFloatTy(Context), 2);
+ case MVT::v4f32: return VectorType::get(Type::getFloatTy(Context), 4);
+ case MVT::v8f32: return VectorType::get(Type::getFloatTy(Context), 8);
+ case MVT::v16f32: return VectorType::get(Type::getFloatTy(Context), 16);
+ case MVT::v1f64: return VectorType::get(Type::getDoubleTy(Context), 1);
+ case MVT::v2f64: return VectorType::get(Type::getDoubleTy(Context), 2);
+ case MVT::v4f64: return VectorType::get(Type::getDoubleTy(Context), 4);
+ case MVT::v8f64: return VectorType::get(Type::getDoubleTy(Context), 8);
+ case MVT::Metadata: return Type::getMetadataTy(Context);
+ }
+}
+
+/// Return the value type corresponding to the specified type. This returns all
+/// pointers as MVT::iPTR. If HandleUnknown is true, unknown types are returned
+/// as Other, otherwise they are invalid.
+MVT MVT::getVT(Type *Ty, bool HandleUnknown){
+ switch (Ty->getTypeID()) {
+ default:
+ if (HandleUnknown) return MVT(MVT::Other);
+ llvm_unreachable("Unknown type!");
+ case Type::VoidTyID:
+ return MVT::isVoid;
+ case Type::IntegerTyID:
+ return getIntegerVT(cast<IntegerType>(Ty)->getBitWidth());
+ case Type::HalfTyID: return MVT(MVT::f16);
+ case Type::FloatTyID: return MVT(MVT::f32);
+ case Type::DoubleTyID: return MVT(MVT::f64);
+ case Type::X86_FP80TyID: return MVT(MVT::f80);
+ case Type::X86_MMXTyID: return MVT(MVT::x86mmx);
+ case Type::FP128TyID: return MVT(MVT::f128);
+ case Type::PPC_FP128TyID: return MVT(MVT::ppcf128);
+ case Type::PointerTyID: return MVT(MVT::iPTR);
+ case Type::VectorTyID: {
+ VectorType *VTy = cast<VectorType>(Ty);
+ return getVectorVT(
+ getVT(VTy->getElementType(), false), VTy->getNumElements());
+ }
+ }
+}
+
+/// getEVT - Return the value type corresponding to the specified type. This
+/// returns all pointers as MVT::iPTR. If HandleUnknown is true, unknown types
+/// are returned as Other, otherwise they are invalid.
+EVT EVT::getEVT(Type *Ty, bool HandleUnknown){
+ switch (Ty->getTypeID()) {
+ default:
+ return MVT::getVT(Ty, HandleUnknown);
+ case Type::IntegerTyID:
+ return getIntegerVT(Ty->getContext(), cast<IntegerType>(Ty)->getBitWidth());
+ case Type::VectorTyID: {
+ VectorType *VTy = cast<VectorType>(Ty);
+ return getVectorVT(Ty->getContext(), getEVT(VTy->getElementType(), false),
+ VTy->getNumElements());
+ }
+ }
+}
diff --git a/contrib/llvm/lib/IR/Verifier.cpp b/contrib/llvm/lib/IR/Verifier.cpp
new file mode 100644
index 0000000..9198b0e
--- /dev/null
+++ b/contrib/llvm/lib/IR/Verifier.cpp
@@ -0,0 +1,4268 @@
+//===-- Verifier.cpp - Implement the Module Verifier -----------------------==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the function verifier interface, that can be used for some
+// sanity checking of input to the system.
+//
+// Note that this does not provide full `Java style' security and verifications,
+// instead it just tries to ensure that code is well-formed.
+//
+// * Both of a binary operator's parameters are of the same type
+// * Verify that the indices of mem access instructions match other operands
+// * Verify that arithmetic and other things are only performed on first-class
+// types. Verify that shifts & logicals only happen on integrals f.e.
+// * All of the constants in a switch statement are of the correct type
+// * The code is in valid SSA form
+// * It should be illegal to put a label into any other type (like a structure)
+// or to return one. [except constant arrays!]
+// * Only phi nodes can be self referential: 'add i32 %0, %0 ; <int>:0' is bad
+// * PHI nodes must have an entry for each predecessor, with no extras.
+// * PHI nodes must be the first thing in a basic block, all grouped together
+// * PHI nodes must have at least one entry
+// * All basic blocks should only end with terminator insts, not contain them
+// * The entry node to a function must not have predecessors
+// * All Instructions must be embedded into a basic block
+// * Functions cannot take a void-typed parameter
+// * Verify that a function's argument list agrees with it's declared type.
+// * It is illegal to specify a name for a void value.
+// * It is illegal to have a internal global value with no initializer
+// * It is illegal to have a ret instruction that returns a value that does not
+// agree with the function return value type.
+// * Function call argument types match the function prototype
+// * A landing pad is defined by a landingpad instruction, and can be jumped to
+// only by the unwind edge of an invoke instruction.
+// * A landingpad instruction must be the first non-PHI instruction in the
+// block.
+// * Landingpad instructions must be in a function with a personality function.
+// * All other things that are tested by asserts spread about the code...
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/Verifier.h"
+#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/IR/CFG.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/CallingConv.h"
+#include "llvm/IR/ConstantRange.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DebugInfo.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/InlineAsm.h"
+#include "llvm/IR/InstIterator.h"
+#include "llvm/IR/InstVisitor.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/IR/Statepoint.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cstdarg>
+using namespace llvm;
+
+static cl::opt<bool> VerifyDebugInfo("verify-debug-info", cl::init(true));
+
+namespace {
+struct VerifierSupport {
+ raw_ostream &OS;
+ const Module *M;
+
+ /// \brief Track the brokenness of the module while recursively visiting.
+ bool Broken;
+
+ explicit VerifierSupport(raw_ostream &OS)
+ : OS(OS), M(nullptr), Broken(false) {}
+
+private:
+ template <class NodeTy> void Write(const ilist_iterator<NodeTy> &I) {
+ Write(&*I);
+ }
+
+ void Write(const Module *M) {
+ if (!M)
+ return;
+ OS << "; ModuleID = '" << M->getModuleIdentifier() << "'\n";
+ }
+
+ void Write(const Value *V) {
+ if (!V)
+ return;
+ if (isa<Instruction>(V)) {
+ OS << *V << '\n';
+ } else {
+ V->printAsOperand(OS, true, M);
+ OS << '\n';
+ }
+ }
+ void Write(ImmutableCallSite CS) {
+ Write(CS.getInstruction());
+ }
+
+ void Write(const Metadata *MD) {
+ if (!MD)
+ return;
+ MD->print(OS, M);
+ OS << '\n';
+ }
+
+ template <class T> void Write(const MDTupleTypedArrayWrapper<T> &MD) {
+ Write(MD.get());
+ }
+
+ void Write(const NamedMDNode *NMD) {
+ if (!NMD)
+ return;
+ NMD->print(OS);
+ OS << '\n';
+ }
+
+ void Write(Type *T) {
+ if (!T)
+ return;
+ OS << ' ' << *T;
+ }
+
+ void Write(const Comdat *C) {
+ if (!C)
+ return;
+ OS << *C;
+ }
+
+ template <typename T> void Write(ArrayRef<T> Vs) {
+ for (const T &V : Vs)
+ Write(V);
+ }
+
+ template <typename T1, typename... Ts>
+ void WriteTs(const T1 &V1, const Ts &... Vs) {
+ Write(V1);
+ WriteTs(Vs...);
+ }
+
+ template <typename... Ts> void WriteTs() {}
+
+public:
+ /// \brief A check failed, so printout out the condition and the message.
+ ///
+ /// This provides a nice place to put a breakpoint if you want to see why
+ /// something is not correct.
+ void CheckFailed(const Twine &Message) {
+ OS << Message << '\n';
+ Broken = true;
+ }
+
+ /// \brief A check failed (with values to print).
+ ///
+ /// This calls the Message-only version so that the above is easier to set a
+ /// breakpoint on.
+ template <typename T1, typename... Ts>
+ void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs) {
+ CheckFailed(Message);
+ WriteTs(V1, Vs...);
+ }
+};
+
+class Verifier : public InstVisitor<Verifier>, VerifierSupport {
+ friend class InstVisitor<Verifier>;
+
+ LLVMContext *Context;
+ DominatorTree DT;
+
+ /// \brief When verifying a basic block, keep track of all of the
+ /// instructions we have seen so far.
+ ///
+ /// This allows us to do efficient dominance checks for the case when an
+ /// instruction has an operand that is an instruction in the same block.
+ SmallPtrSet<Instruction *, 16> InstsInThisBlock;
+
+ /// \brief Keep track of the metadata nodes that have been checked already.
+ SmallPtrSet<const Metadata *, 32> MDNodes;
+
+ /// \brief Track unresolved string-based type references.
+ SmallDenseMap<const MDString *, const MDNode *, 32> UnresolvedTypeRefs;
+
+ /// \brief The result type for a landingpad.
+ Type *LandingPadResultTy;
+
+ /// \brief Whether we've seen a call to @llvm.localescape in this function
+ /// already.
+ bool SawFrameEscape;
+
+ /// Stores the count of how many objects were passed to llvm.localescape for a
+ /// given function and the largest index passed to llvm.localrecover.
+ DenseMap<Function *, std::pair<unsigned, unsigned>> FrameEscapeInfo;
+
+ // Maps catchswitches and cleanuppads that unwind to siblings to the
+ // terminators that indicate the unwind, used to detect cycles therein.
+ MapVector<Instruction *, TerminatorInst *> SiblingFuncletInfo;
+
+ /// Cache of constants visited in search of ConstantExprs.
+ SmallPtrSet<const Constant *, 32> ConstantExprVisited;
+
+ void checkAtomicMemAccessSize(const Module *M, Type *Ty,
+ const Instruction *I);
+public:
+ explicit Verifier(raw_ostream &OS)
+ : VerifierSupport(OS), Context(nullptr), LandingPadResultTy(nullptr),
+ SawFrameEscape(false) {}
+
+ bool verify(const Function &F) {
+ M = F.getParent();
+ Context = &M->getContext();
+
+ // First ensure the function is well-enough formed to compute dominance
+ // information.
+ if (F.empty()) {
+ OS << "Function '" << F.getName()
+ << "' does not contain an entry block!\n";
+ return false;
+ }
+ for (Function::const_iterator I = F.begin(), E = F.end(); I != E; ++I) {
+ if (I->empty() || !I->back().isTerminator()) {
+ OS << "Basic Block in function '" << F.getName()
+ << "' does not have terminator!\n";
+ I->printAsOperand(OS, true);
+ OS << "\n";
+ return false;
+ }
+ }
+
+ // Now directly compute a dominance tree. We don't rely on the pass
+ // manager to provide this as it isolates us from a potentially
+ // out-of-date dominator tree and makes it significantly more complex to
+ // run this code outside of a pass manager.
+ // FIXME: It's really gross that we have to cast away constness here.
+ DT.recalculate(const_cast<Function &>(F));
+
+ Broken = false;
+ // FIXME: We strip const here because the inst visitor strips const.
+ visit(const_cast<Function &>(F));
+ verifySiblingFuncletUnwinds();
+ InstsInThisBlock.clear();
+ LandingPadResultTy = nullptr;
+ SawFrameEscape = false;
+ SiblingFuncletInfo.clear();
+
+ return !Broken;
+ }
+
+ bool verify(const Module &M) {
+ this->M = &M;
+ Context = &M.getContext();
+ Broken = false;
+
+ // Scan through, checking all of the external function's linkage now...
+ for (Module::const_iterator I = M.begin(), E = M.end(); I != E; ++I) {
+ visitGlobalValue(*I);
+
+ // Check to make sure function prototypes are okay.
+ if (I->isDeclaration())
+ visitFunction(*I);
+ }
+
+ // Now that we've visited every function, verify that we never asked to
+ // recover a frame index that wasn't escaped.
+ verifyFrameRecoverIndices();
+
+ for (Module::const_global_iterator I = M.global_begin(), E = M.global_end();
+ I != E; ++I)
+ visitGlobalVariable(*I);
+
+ for (Module::const_alias_iterator I = M.alias_begin(), E = M.alias_end();
+ I != E; ++I)
+ visitGlobalAlias(*I);
+
+ for (Module::const_named_metadata_iterator I = M.named_metadata_begin(),
+ E = M.named_metadata_end();
+ I != E; ++I)
+ visitNamedMDNode(*I);
+
+ for (const StringMapEntry<Comdat> &SMEC : M.getComdatSymbolTable())
+ visitComdat(SMEC.getValue());
+
+ visitModuleFlags(M);
+ visitModuleIdents(M);
+
+ // Verify type referneces last.
+ verifyTypeRefs();
+
+ return !Broken;
+ }
+
+private:
+ // Verification methods...
+ void visitGlobalValue(const GlobalValue &GV);
+ void visitGlobalVariable(const GlobalVariable &GV);
+ void visitGlobalAlias(const GlobalAlias &GA);
+ void visitAliaseeSubExpr(const GlobalAlias &A, const Constant &C);
+ void visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias *> &Visited,
+ const GlobalAlias &A, const Constant &C);
+ void visitNamedMDNode(const NamedMDNode &NMD);
+ void visitMDNode(const MDNode &MD);
+ void visitMetadataAsValue(const MetadataAsValue &MD, Function *F);
+ void visitValueAsMetadata(const ValueAsMetadata &MD, Function *F);
+ void visitComdat(const Comdat &C);
+ void visitModuleIdents(const Module &M);
+ void visitModuleFlags(const Module &M);
+ void visitModuleFlag(const MDNode *Op,
+ DenseMap<const MDString *, const MDNode *> &SeenIDs,
+ SmallVectorImpl<const MDNode *> &Requirements);
+ void visitFunction(const Function &F);
+ void visitBasicBlock(BasicBlock &BB);
+ void visitRangeMetadata(Instruction& I, MDNode* Range, Type* Ty);
+ void visitDereferenceableMetadata(Instruction& I, MDNode* MD);
+
+ template <class Ty> bool isValidMetadataArray(const MDTuple &N);
+#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) void visit##CLASS(const CLASS &N);
+#include "llvm/IR/Metadata.def"
+ void visitDIScope(const DIScope &N);
+ void visitDIVariable(const DIVariable &N);
+ void visitDILexicalBlockBase(const DILexicalBlockBase &N);
+ void visitDITemplateParameter(const DITemplateParameter &N);
+
+ void visitTemplateParams(const MDNode &N, const Metadata &RawParams);
+
+ /// \brief Check for a valid string-based type reference.
+ ///
+ /// Checks if \c MD is a string-based type reference. If it is, keeps track
+ /// of it (and its user, \c N) for error messages later.
+ bool isValidUUID(const MDNode &N, const Metadata *MD);
+
+ /// \brief Check for a valid type reference.
+ ///
+ /// Checks for subclasses of \a DIType, or \a isValidUUID().
+ bool isTypeRef(const MDNode &N, const Metadata *MD);
+
+ /// \brief Check for a valid scope reference.
+ ///
+ /// Checks for subclasses of \a DIScope, or \a isValidUUID().
+ bool isScopeRef(const MDNode &N, const Metadata *MD);
+
+ /// \brief Check for a valid debug info reference.
+ ///
+ /// Checks for subclasses of \a DINode, or \a isValidUUID().
+ bool isDIRef(const MDNode &N, const Metadata *MD);
+
+ // InstVisitor overrides...
+ using InstVisitor<Verifier>::visit;
+ void visit(Instruction &I);
+
+ void visitTruncInst(TruncInst &I);
+ void visitZExtInst(ZExtInst &I);
+ void visitSExtInst(SExtInst &I);
+ void visitFPTruncInst(FPTruncInst &I);
+ void visitFPExtInst(FPExtInst &I);
+ void visitFPToUIInst(FPToUIInst &I);
+ void visitFPToSIInst(FPToSIInst &I);
+ void visitUIToFPInst(UIToFPInst &I);
+ void visitSIToFPInst(SIToFPInst &I);
+ void visitIntToPtrInst(IntToPtrInst &I);
+ void visitPtrToIntInst(PtrToIntInst &I);
+ void visitBitCastInst(BitCastInst &I);
+ void visitAddrSpaceCastInst(AddrSpaceCastInst &I);
+ void visitPHINode(PHINode &PN);
+ void visitBinaryOperator(BinaryOperator &B);
+ void visitICmpInst(ICmpInst &IC);
+ void visitFCmpInst(FCmpInst &FC);
+ void visitExtractElementInst(ExtractElementInst &EI);
+ void visitInsertElementInst(InsertElementInst &EI);
+ void visitShuffleVectorInst(ShuffleVectorInst &EI);
+ void visitVAArgInst(VAArgInst &VAA) { visitInstruction(VAA); }
+ void visitCallInst(CallInst &CI);
+ void visitInvokeInst(InvokeInst &II);
+ void visitGetElementPtrInst(GetElementPtrInst &GEP);
+ void visitLoadInst(LoadInst &LI);
+ void visitStoreInst(StoreInst &SI);
+ void verifyDominatesUse(Instruction &I, unsigned i);
+ void visitInstruction(Instruction &I);
+ void visitTerminatorInst(TerminatorInst &I);
+ void visitBranchInst(BranchInst &BI);
+ void visitReturnInst(ReturnInst &RI);
+ void visitSwitchInst(SwitchInst &SI);
+ void visitIndirectBrInst(IndirectBrInst &BI);
+ void visitSelectInst(SelectInst &SI);
+ void visitUserOp1(Instruction &I);
+ void visitUserOp2(Instruction &I) { visitUserOp1(I); }
+ void visitIntrinsicCallSite(Intrinsic::ID ID, CallSite CS);
+ template <class DbgIntrinsicTy>
+ void visitDbgIntrinsic(StringRef Kind, DbgIntrinsicTy &DII);
+ void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI);
+ void visitAtomicRMWInst(AtomicRMWInst &RMWI);
+ void visitFenceInst(FenceInst &FI);
+ void visitAllocaInst(AllocaInst &AI);
+ void visitExtractValueInst(ExtractValueInst &EVI);
+ void visitInsertValueInst(InsertValueInst &IVI);
+ void visitEHPadPredecessors(Instruction &I);
+ void visitLandingPadInst(LandingPadInst &LPI);
+ void visitCatchPadInst(CatchPadInst &CPI);
+ void visitCatchReturnInst(CatchReturnInst &CatchReturn);
+ void visitCleanupPadInst(CleanupPadInst &CPI);
+ void visitFuncletPadInst(FuncletPadInst &FPI);
+ void visitCatchSwitchInst(CatchSwitchInst &CatchSwitch);
+ void visitCleanupReturnInst(CleanupReturnInst &CRI);
+
+ void VerifyCallSite(CallSite CS);
+ void verifyMustTailCall(CallInst &CI);
+ bool PerformTypeCheck(Intrinsic::ID ID, Function *F, Type *Ty, int VT,
+ unsigned ArgNo, std::string &Suffix);
+ bool VerifyIntrinsicType(Type *Ty, ArrayRef<Intrinsic::IITDescriptor> &Infos,
+ SmallVectorImpl<Type *> &ArgTys);
+ bool VerifyIntrinsicIsVarArg(bool isVarArg,
+ ArrayRef<Intrinsic::IITDescriptor> &Infos);
+ bool VerifyAttributeCount(AttributeSet Attrs, unsigned Params);
+ void VerifyAttributeTypes(AttributeSet Attrs, unsigned Idx, bool isFunction,
+ const Value *V);
+ void VerifyParameterAttrs(AttributeSet Attrs, unsigned Idx, Type *Ty,
+ bool isReturnValue, const Value *V);
+ void VerifyFunctionAttrs(FunctionType *FT, AttributeSet Attrs,
+ const Value *V);
+ void VerifyFunctionMetadata(
+ const SmallVector<std::pair<unsigned, MDNode *>, 4> MDs);
+
+ void visitConstantExprsRecursively(const Constant *EntryC);
+ void visitConstantExpr(const ConstantExpr *CE);
+ void VerifyStatepoint(ImmutableCallSite CS);
+ void verifyFrameRecoverIndices();
+ void verifySiblingFuncletUnwinds();
+
+ // Module-level debug info verification...
+ void verifyTypeRefs();
+ template <class MapTy>
+ void verifyBitPieceExpression(const DbgInfoIntrinsic &I,
+ const MapTy &TypeRefs);
+ void visitUnresolvedTypeRef(const MDString *S, const MDNode *N);
+};
+} // End anonymous namespace
+
+// Assert - We know that cond should be true, if not print an error message.
+#define Assert(C, ...) \
+ do { if (!(C)) { CheckFailed(__VA_ARGS__); return; } } while (0)
+
+void Verifier::visit(Instruction &I) {
+ for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
+ Assert(I.getOperand(i) != nullptr, "Operand is null", &I);
+ InstVisitor<Verifier>::visit(I);
+}
+
+
+void Verifier::visitGlobalValue(const GlobalValue &GV) {
+ Assert(!GV.isDeclaration() || GV.hasExternalLinkage() ||
+ GV.hasExternalWeakLinkage(),
+ "Global is external, but doesn't have external or weak linkage!", &GV);
+
+ Assert(GV.getAlignment() <= Value::MaximumAlignment,
+ "huge alignment values are unsupported", &GV);
+ Assert(!GV.hasAppendingLinkage() || isa<GlobalVariable>(GV),
+ "Only global variables can have appending linkage!", &GV);
+
+ if (GV.hasAppendingLinkage()) {
+ const GlobalVariable *GVar = dyn_cast<GlobalVariable>(&GV);
+ Assert(GVar && GVar->getValueType()->isArrayTy(),
+ "Only global arrays can have appending linkage!", GVar);
+ }
+
+ if (GV.isDeclarationForLinker())
+ Assert(!GV.hasComdat(), "Declaration may not be in a Comdat!", &GV);
+}
+
+void Verifier::visitGlobalVariable(const GlobalVariable &GV) {
+ if (GV.hasInitializer()) {
+ Assert(GV.getInitializer()->getType() == GV.getType()->getElementType(),
+ "Global variable initializer type does not match global "
+ "variable type!",
+ &GV);
+
+ // If the global has common linkage, it must have a zero initializer and
+ // cannot be constant.
+ if (GV.hasCommonLinkage()) {
+ Assert(GV.getInitializer()->isNullValue(),
+ "'common' global must have a zero initializer!", &GV);
+ Assert(!GV.isConstant(), "'common' global may not be marked constant!",
+ &GV);
+ Assert(!GV.hasComdat(), "'common' global may not be in a Comdat!", &GV);
+ }
+ } else {
+ Assert(GV.hasExternalLinkage() || GV.hasExternalWeakLinkage(),
+ "invalid linkage type for global declaration", &GV);
+ }
+
+ if (GV.hasName() && (GV.getName() == "llvm.global_ctors" ||
+ GV.getName() == "llvm.global_dtors")) {
+ Assert(!GV.hasInitializer() || GV.hasAppendingLinkage(),
+ "invalid linkage for intrinsic global variable", &GV);
+ // Don't worry about emitting an error for it not being an array,
+ // visitGlobalValue will complain on appending non-array.
+ if (ArrayType *ATy = dyn_cast<ArrayType>(GV.getValueType())) {
+ StructType *STy = dyn_cast<StructType>(ATy->getElementType());
+ PointerType *FuncPtrTy =
+ FunctionType::get(Type::getVoidTy(*Context), false)->getPointerTo();
+ // FIXME: Reject the 2-field form in LLVM 4.0.
+ Assert(STy &&
+ (STy->getNumElements() == 2 || STy->getNumElements() == 3) &&
+ STy->getTypeAtIndex(0u)->isIntegerTy(32) &&
+ STy->getTypeAtIndex(1) == FuncPtrTy,
+ "wrong type for intrinsic global variable", &GV);
+ if (STy->getNumElements() == 3) {
+ Type *ETy = STy->getTypeAtIndex(2);
+ Assert(ETy->isPointerTy() &&
+ cast<PointerType>(ETy)->getElementType()->isIntegerTy(8),
+ "wrong type for intrinsic global variable", &GV);
+ }
+ }
+ }
+
+ if (GV.hasName() && (GV.getName() == "llvm.used" ||
+ GV.getName() == "llvm.compiler.used")) {
+ Assert(!GV.hasInitializer() || GV.hasAppendingLinkage(),
+ "invalid linkage for intrinsic global variable", &GV);
+ Type *GVType = GV.getValueType();
+ if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
+ PointerType *PTy = dyn_cast<PointerType>(ATy->getElementType());
+ Assert(PTy, "wrong type for intrinsic global variable", &GV);
+ if (GV.hasInitializer()) {
+ const Constant *Init = GV.getInitializer();
+ const ConstantArray *InitArray = dyn_cast<ConstantArray>(Init);
+ Assert(InitArray, "wrong initalizer for intrinsic global variable",
+ Init);
+ for (unsigned i = 0, e = InitArray->getNumOperands(); i != e; ++i) {
+ Value *V = Init->getOperand(i)->stripPointerCastsNoFollowAliases();
+ Assert(isa<GlobalVariable>(V) || isa<Function>(V) ||
+ isa<GlobalAlias>(V),
+ "invalid llvm.used member", V);
+ Assert(V->hasName(), "members of llvm.used must be named", V);
+ }
+ }
+ }
+ }
+
+ Assert(!GV.hasDLLImportStorageClass() ||
+ (GV.isDeclaration() && GV.hasExternalLinkage()) ||
+ GV.hasAvailableExternallyLinkage(),
+ "Global is marked as dllimport, but not external", &GV);
+
+ if (!GV.hasInitializer()) {
+ visitGlobalValue(GV);
+ return;
+ }
+
+ // Walk any aggregate initializers looking for bitcasts between address spaces
+ visitConstantExprsRecursively(GV.getInitializer());
+
+ visitGlobalValue(GV);
+}
+
+void Verifier::visitAliaseeSubExpr(const GlobalAlias &GA, const Constant &C) {
+ SmallPtrSet<const GlobalAlias*, 4> Visited;
+ Visited.insert(&GA);
+ visitAliaseeSubExpr(Visited, GA, C);
+}
+
+void Verifier::visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias*> &Visited,
+ const GlobalAlias &GA, const Constant &C) {
+ if (const auto *GV = dyn_cast<GlobalValue>(&C)) {
+ Assert(!GV->isDeclarationForLinker(), "Alias must point to a definition",
+ &GA);
+
+ if (const auto *GA2 = dyn_cast<GlobalAlias>(GV)) {
+ Assert(Visited.insert(GA2).second, "Aliases cannot form a cycle", &GA);
+
+ Assert(!GA2->mayBeOverridden(), "Alias cannot point to a weak alias",
+ &GA);
+ } else {
+ // Only continue verifying subexpressions of GlobalAliases.
+ // Do not recurse into global initializers.
+ return;
+ }
+ }
+
+ if (const auto *CE = dyn_cast<ConstantExpr>(&C))
+ visitConstantExprsRecursively(CE);
+
+ for (const Use &U : C.operands()) {
+ Value *V = &*U;
+ if (const auto *GA2 = dyn_cast<GlobalAlias>(V))
+ visitAliaseeSubExpr(Visited, GA, *GA2->getAliasee());
+ else if (const auto *C2 = dyn_cast<Constant>(V))
+ visitAliaseeSubExpr(Visited, GA, *C2);
+ }
+}
+
+void Verifier::visitGlobalAlias(const GlobalAlias &GA) {
+ Assert(GlobalAlias::isValidLinkage(GA.getLinkage()),
+ "Alias should have private, internal, linkonce, weak, linkonce_odr, "
+ "weak_odr, or external linkage!",
+ &GA);
+ const Constant *Aliasee = GA.getAliasee();
+ Assert(Aliasee, "Aliasee cannot be NULL!", &GA);
+ Assert(GA.getType() == Aliasee->getType(),
+ "Alias and aliasee types should match!", &GA);
+
+ Assert(isa<GlobalValue>(Aliasee) || isa<ConstantExpr>(Aliasee),
+ "Aliasee should be either GlobalValue or ConstantExpr", &GA);
+
+ visitAliaseeSubExpr(GA, *Aliasee);
+
+ visitGlobalValue(GA);
+}
+
+void Verifier::visitNamedMDNode(const NamedMDNode &NMD) {
+ for (unsigned i = 0, e = NMD.getNumOperands(); i != e; ++i) {
+ MDNode *MD = NMD.getOperand(i);
+
+ if (NMD.getName() == "llvm.dbg.cu") {
+ Assert(MD && isa<DICompileUnit>(MD), "invalid compile unit", &NMD, MD);
+ }
+
+ if (!MD)
+ continue;
+
+ visitMDNode(*MD);
+ }
+}
+
+void Verifier::visitMDNode(const MDNode &MD) {
+ // Only visit each node once. Metadata can be mutually recursive, so this
+ // avoids infinite recursion here, as well as being an optimization.
+ if (!MDNodes.insert(&MD).second)
+ return;
+
+ switch (MD.getMetadataID()) {
+ default:
+ llvm_unreachable("Invalid MDNode subclass");
+ case Metadata::MDTupleKind:
+ break;
+#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) \
+ case Metadata::CLASS##Kind: \
+ visit##CLASS(cast<CLASS>(MD)); \
+ break;
+#include "llvm/IR/Metadata.def"
+ }
+
+ for (unsigned i = 0, e = MD.getNumOperands(); i != e; ++i) {
+ Metadata *Op = MD.getOperand(i);
+ if (!Op)
+ continue;
+ Assert(!isa<LocalAsMetadata>(Op), "Invalid operand for global metadata!",
+ &MD, Op);
+ if (auto *N = dyn_cast<MDNode>(Op)) {
+ visitMDNode(*N);
+ continue;
+ }
+ if (auto *V = dyn_cast<ValueAsMetadata>(Op)) {
+ visitValueAsMetadata(*V, nullptr);
+ continue;
+ }
+ }
+
+ // Check these last, so we diagnose problems in operands first.
+ Assert(!MD.isTemporary(), "Expected no forward declarations!", &MD);
+ Assert(MD.isResolved(), "All nodes should be resolved!", &MD);
+}
+
+void Verifier::visitValueAsMetadata(const ValueAsMetadata &MD, Function *F) {
+ Assert(MD.getValue(), "Expected valid value", &MD);
+ Assert(!MD.getValue()->getType()->isMetadataTy(),
+ "Unexpected metadata round-trip through values", &MD, MD.getValue());
+
+ auto *L = dyn_cast<LocalAsMetadata>(&MD);
+ if (!L)
+ return;
+
+ Assert(F, "function-local metadata used outside a function", L);
+
+ // If this was an instruction, bb, or argument, verify that it is in the
+ // function that we expect.
+ Function *ActualF = nullptr;
+ if (Instruction *I = dyn_cast<Instruction>(L->getValue())) {
+ Assert(I->getParent(), "function-local metadata not in basic block", L, I);
+ ActualF = I->getParent()->getParent();
+ } else if (BasicBlock *BB = dyn_cast<BasicBlock>(L->getValue()))
+ ActualF = BB->getParent();
+ else if (Argument *A = dyn_cast<Argument>(L->getValue()))
+ ActualF = A->getParent();
+ assert(ActualF && "Unimplemented function local metadata case!");
+
+ Assert(ActualF == F, "function-local metadata used in wrong function", L);
+}
+
+void Verifier::visitMetadataAsValue(const MetadataAsValue &MDV, Function *F) {
+ Metadata *MD = MDV.getMetadata();
+ if (auto *N = dyn_cast<MDNode>(MD)) {
+ visitMDNode(*N);
+ return;
+ }
+
+ // Only visit each node once. Metadata can be mutually recursive, so this
+ // avoids infinite recursion here, as well as being an optimization.
+ if (!MDNodes.insert(MD).second)
+ return;
+
+ if (auto *V = dyn_cast<ValueAsMetadata>(MD))
+ visitValueAsMetadata(*V, F);
+}
+
+bool Verifier::isValidUUID(const MDNode &N, const Metadata *MD) {
+ auto *S = dyn_cast<MDString>(MD);
+ if (!S)
+ return false;
+ if (S->getString().empty())
+ return false;
+
+ // Keep track of names of types referenced via UUID so we can check that they
+ // actually exist.
+ UnresolvedTypeRefs.insert(std::make_pair(S, &N));
+ return true;
+}
+
+/// \brief Check if a value can be a reference to a type.
+bool Verifier::isTypeRef(const MDNode &N, const Metadata *MD) {
+ return !MD || isValidUUID(N, MD) || isa<DIType>(MD);
+}
+
+/// \brief Check if a value can be a ScopeRef.
+bool Verifier::isScopeRef(const MDNode &N, const Metadata *MD) {
+ return !MD || isValidUUID(N, MD) || isa<DIScope>(MD);
+}
+
+/// \brief Check if a value can be a debug info ref.
+bool Verifier::isDIRef(const MDNode &N, const Metadata *MD) {
+ return !MD || isValidUUID(N, MD) || isa<DINode>(MD);
+}
+
+template <class Ty>
+bool isValidMetadataArrayImpl(const MDTuple &N, bool AllowNull) {
+ for (Metadata *MD : N.operands()) {
+ if (MD) {
+ if (!isa<Ty>(MD))
+ return false;
+ } else {
+ if (!AllowNull)
+ return false;
+ }
+ }
+ return true;
+}
+
+template <class Ty>
+bool isValidMetadataArray(const MDTuple &N) {
+ return isValidMetadataArrayImpl<Ty>(N, /* AllowNull */ false);
+}
+
+template <class Ty>
+bool isValidMetadataNullArray(const MDTuple &N) {
+ return isValidMetadataArrayImpl<Ty>(N, /* AllowNull */ true);
+}
+
+void Verifier::visitDILocation(const DILocation &N) {
+ Assert(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
+ "location requires a valid scope", &N, N.getRawScope());
+ if (auto *IA = N.getRawInlinedAt())
+ Assert(isa<DILocation>(IA), "inlined-at should be a location", &N, IA);
+}
+
+void Verifier::visitGenericDINode(const GenericDINode &N) {
+ Assert(N.getTag(), "invalid tag", &N);
+}
+
+void Verifier::visitDIScope(const DIScope &N) {
+ if (auto *F = N.getRawFile())
+ Assert(isa<DIFile>(F), "invalid file", &N, F);
+}
+
+void Verifier::visitDISubrange(const DISubrange &N) {
+ Assert(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
+ Assert(N.getCount() >= -1, "invalid subrange count", &N);
+}
+
+void Verifier::visitDIEnumerator(const DIEnumerator &N) {
+ Assert(N.getTag() == dwarf::DW_TAG_enumerator, "invalid tag", &N);
+}
+
+void Verifier::visitDIBasicType(const DIBasicType &N) {
+ Assert(N.getTag() == dwarf::DW_TAG_base_type ||
+ N.getTag() == dwarf::DW_TAG_unspecified_type,
+ "invalid tag", &N);
+}
+
+void Verifier::visitDIDerivedType(const DIDerivedType &N) {
+ // Common scope checks.
+ visitDIScope(N);
+
+ Assert(N.getTag() == dwarf::DW_TAG_typedef ||
+ N.getTag() == dwarf::DW_TAG_pointer_type ||
+ N.getTag() == dwarf::DW_TAG_ptr_to_member_type ||
+ N.getTag() == dwarf::DW_TAG_reference_type ||
+ N.getTag() == dwarf::DW_TAG_rvalue_reference_type ||
+ N.getTag() == dwarf::DW_TAG_const_type ||
+ N.getTag() == dwarf::DW_TAG_volatile_type ||
+ N.getTag() == dwarf::DW_TAG_restrict_type ||
+ N.getTag() == dwarf::DW_TAG_member ||
+ N.getTag() == dwarf::DW_TAG_inheritance ||
+ N.getTag() == dwarf::DW_TAG_friend,
+ "invalid tag", &N);
+ if (N.getTag() == dwarf::DW_TAG_ptr_to_member_type) {
+ Assert(isTypeRef(N, N.getExtraData()), "invalid pointer to member type", &N,
+ N.getExtraData());
+ }
+
+ Assert(isScopeRef(N, N.getScope()), "invalid scope", &N, N.getScope());
+ Assert(isTypeRef(N, N.getBaseType()), "invalid base type", &N,
+ N.getBaseType());
+}
+
+static bool hasConflictingReferenceFlags(unsigned Flags) {
+ return (Flags & DINode::FlagLValueReference) &&
+ (Flags & DINode::FlagRValueReference);
+}
+
+void Verifier::visitTemplateParams(const MDNode &N, const Metadata &RawParams) {
+ auto *Params = dyn_cast<MDTuple>(&RawParams);
+ Assert(Params, "invalid template params", &N, &RawParams);
+ for (Metadata *Op : Params->operands()) {
+ Assert(Op && isa<DITemplateParameter>(Op), "invalid template parameter", &N,
+ Params, Op);
+ }
+}
+
+void Verifier::visitDICompositeType(const DICompositeType &N) {
+ // Common scope checks.
+ visitDIScope(N);
+
+ Assert(N.getTag() == dwarf::DW_TAG_array_type ||
+ N.getTag() == dwarf::DW_TAG_structure_type ||
+ N.getTag() == dwarf::DW_TAG_union_type ||
+ N.getTag() == dwarf::DW_TAG_enumeration_type ||
+ N.getTag() == dwarf::DW_TAG_class_type,
+ "invalid tag", &N);
+
+ Assert(isScopeRef(N, N.getScope()), "invalid scope", &N, N.getScope());
+ Assert(isTypeRef(N, N.getBaseType()), "invalid base type", &N,
+ N.getBaseType());
+
+ Assert(!N.getRawElements() || isa<MDTuple>(N.getRawElements()),
+ "invalid composite elements", &N, N.getRawElements());
+ Assert(isTypeRef(N, N.getRawVTableHolder()), "invalid vtable holder", &N,
+ N.getRawVTableHolder());
+ Assert(!hasConflictingReferenceFlags(N.getFlags()), "invalid reference flags",
+ &N);
+ if (auto *Params = N.getRawTemplateParams())
+ visitTemplateParams(N, *Params);
+
+ if (N.getTag() == dwarf::DW_TAG_class_type ||
+ N.getTag() == dwarf::DW_TAG_union_type) {
+ Assert(N.getFile() && !N.getFile()->getFilename().empty(),
+ "class/union requires a filename", &N, N.getFile());
+ }
+}
+
+void Verifier::visitDISubroutineType(const DISubroutineType &N) {
+ Assert(N.getTag() == dwarf::DW_TAG_subroutine_type, "invalid tag", &N);
+ if (auto *Types = N.getRawTypeArray()) {
+ Assert(isa<MDTuple>(Types), "invalid composite elements", &N, Types);
+ for (Metadata *Ty : N.getTypeArray()->operands()) {
+ Assert(isTypeRef(N, Ty), "invalid subroutine type ref", &N, Types, Ty);
+ }
+ }
+ Assert(!hasConflictingReferenceFlags(N.getFlags()), "invalid reference flags",
+ &N);
+}
+
+void Verifier::visitDIFile(const DIFile &N) {
+ Assert(N.getTag() == dwarf::DW_TAG_file_type, "invalid tag", &N);
+}
+
+void Verifier::visitDICompileUnit(const DICompileUnit &N) {
+ Assert(N.isDistinct(), "compile units must be distinct", &N);
+ Assert(N.getTag() == dwarf::DW_TAG_compile_unit, "invalid tag", &N);
+
+ // Don't bother verifying the compilation directory or producer string
+ // as those could be empty.
+ Assert(N.getRawFile() && isa<DIFile>(N.getRawFile()), "invalid file", &N,
+ N.getRawFile());
+ Assert(!N.getFile()->getFilename().empty(), "invalid filename", &N,
+ N.getFile());
+
+ if (auto *Array = N.getRawEnumTypes()) {
+ Assert(isa<MDTuple>(Array), "invalid enum list", &N, Array);
+ for (Metadata *Op : N.getEnumTypes()->operands()) {
+ auto *Enum = dyn_cast_or_null<DICompositeType>(Op);
+ Assert(Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type,
+ "invalid enum type", &N, N.getEnumTypes(), Op);
+ }
+ }
+ if (auto *Array = N.getRawRetainedTypes()) {
+ Assert(isa<MDTuple>(Array), "invalid retained type list", &N, Array);
+ for (Metadata *Op : N.getRetainedTypes()->operands()) {
+ Assert(Op && isa<DIType>(Op), "invalid retained type", &N, Op);
+ }
+ }
+ if (auto *Array = N.getRawSubprograms()) {
+ Assert(isa<MDTuple>(Array), "invalid subprogram list", &N, Array);
+ for (Metadata *Op : N.getSubprograms()->operands()) {
+ Assert(Op && isa<DISubprogram>(Op), "invalid subprogram ref", &N, Op);
+ }
+ }
+ if (auto *Array = N.getRawGlobalVariables()) {
+ Assert(isa<MDTuple>(Array), "invalid global variable list", &N, Array);
+ for (Metadata *Op : N.getGlobalVariables()->operands()) {
+ Assert(Op && isa<DIGlobalVariable>(Op), "invalid global variable ref", &N,
+ Op);
+ }
+ }
+ if (auto *Array = N.getRawImportedEntities()) {
+ Assert(isa<MDTuple>(Array), "invalid imported entity list", &N, Array);
+ for (Metadata *Op : N.getImportedEntities()->operands()) {
+ Assert(Op && isa<DIImportedEntity>(Op), "invalid imported entity ref", &N,
+ Op);
+ }
+ }
+ if (auto *Array = N.getRawMacros()) {
+ Assert(isa<MDTuple>(Array), "invalid macro list", &N, Array);
+ for (Metadata *Op : N.getMacros()->operands()) {
+ Assert(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
+ }
+ }
+}
+
+void Verifier::visitDISubprogram(const DISubprogram &N) {
+ Assert(N.getTag() == dwarf::DW_TAG_subprogram, "invalid tag", &N);
+ Assert(isScopeRef(N, N.getRawScope()), "invalid scope", &N, N.getRawScope());
+ if (auto *T = N.getRawType())
+ Assert(isa<DISubroutineType>(T), "invalid subroutine type", &N, T);
+ Assert(isTypeRef(N, N.getRawContainingType()), "invalid containing type", &N,
+ N.getRawContainingType());
+ if (auto *Params = N.getRawTemplateParams())
+ visitTemplateParams(N, *Params);
+ if (auto *S = N.getRawDeclaration()) {
+ Assert(isa<DISubprogram>(S) && !cast<DISubprogram>(S)->isDefinition(),
+ "invalid subprogram declaration", &N, S);
+ }
+ if (auto *RawVars = N.getRawVariables()) {
+ auto *Vars = dyn_cast<MDTuple>(RawVars);
+ Assert(Vars, "invalid variable list", &N, RawVars);
+ for (Metadata *Op : Vars->operands()) {
+ Assert(Op && isa<DILocalVariable>(Op), "invalid local variable", &N, Vars,
+ Op);
+ }
+ }
+ Assert(!hasConflictingReferenceFlags(N.getFlags()), "invalid reference flags",
+ &N);
+
+ if (N.isDefinition())
+ Assert(N.isDistinct(), "subprogram definitions must be distinct", &N);
+}
+
+void Verifier::visitDILexicalBlockBase(const DILexicalBlockBase &N) {
+ Assert(N.getTag() == dwarf::DW_TAG_lexical_block, "invalid tag", &N);
+ Assert(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
+ "invalid local scope", &N, N.getRawScope());
+}
+
+void Verifier::visitDILexicalBlock(const DILexicalBlock &N) {
+ visitDILexicalBlockBase(N);
+
+ Assert(N.getLine() || !N.getColumn(),
+ "cannot have column info without line info", &N);
+}
+
+void Verifier::visitDILexicalBlockFile(const DILexicalBlockFile &N) {
+ visitDILexicalBlockBase(N);
+}
+
+void Verifier::visitDINamespace(const DINamespace &N) {
+ Assert(N.getTag() == dwarf::DW_TAG_namespace, "invalid tag", &N);
+ if (auto *S = N.getRawScope())
+ Assert(isa<DIScope>(S), "invalid scope ref", &N, S);
+}
+
+void Verifier::visitDIMacro(const DIMacro &N) {
+ Assert(N.getMacinfoType() == dwarf::DW_MACINFO_define ||
+ N.getMacinfoType() == dwarf::DW_MACINFO_undef,
+ "invalid macinfo type", &N);
+ Assert(!N.getName().empty(), "anonymous macro", &N);
+ if (!N.getValue().empty()) {
+ assert(N.getValue().data()[0] != ' ' && "Macro value has a space prefix");
+ }
+}
+
+void Verifier::visitDIMacroFile(const DIMacroFile &N) {
+ Assert(N.getMacinfoType() == dwarf::DW_MACINFO_start_file,
+ "invalid macinfo type", &N);
+ if (auto *F = N.getRawFile())
+ Assert(isa<DIFile>(F), "invalid file", &N, F);
+
+ if (auto *Array = N.getRawElements()) {
+ Assert(isa<MDTuple>(Array), "invalid macro list", &N, Array);
+ for (Metadata *Op : N.getElements()->operands()) {
+ Assert(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
+ }
+ }
+}
+
+void Verifier::visitDIModule(const DIModule &N) {
+ Assert(N.getTag() == dwarf::DW_TAG_module, "invalid tag", &N);
+ Assert(!N.getName().empty(), "anonymous module", &N);
+}
+
+void Verifier::visitDITemplateParameter(const DITemplateParameter &N) {
+ Assert(isTypeRef(N, N.getType()), "invalid type ref", &N, N.getType());
+}
+
+void Verifier::visitDITemplateTypeParameter(const DITemplateTypeParameter &N) {
+ visitDITemplateParameter(N);
+
+ Assert(N.getTag() == dwarf::DW_TAG_template_type_parameter, "invalid tag",
+ &N);
+}
+
+void Verifier::visitDITemplateValueParameter(
+ const DITemplateValueParameter &N) {
+ visitDITemplateParameter(N);
+
+ Assert(N.getTag() == dwarf::DW_TAG_template_value_parameter ||
+ N.getTag() == dwarf::DW_TAG_GNU_template_template_param ||
+ N.getTag() == dwarf::DW_TAG_GNU_template_parameter_pack,
+ "invalid tag", &N);
+}
+
+void Verifier::visitDIVariable(const DIVariable &N) {
+ if (auto *S = N.getRawScope())
+ Assert(isa<DIScope>(S), "invalid scope", &N, S);
+ Assert(isTypeRef(N, N.getRawType()), "invalid type ref", &N, N.getRawType());
+ if (auto *F = N.getRawFile())
+ Assert(isa<DIFile>(F), "invalid file", &N, F);
+}
+
+void Verifier::visitDIGlobalVariable(const DIGlobalVariable &N) {
+ // Checks common to all variables.
+ visitDIVariable(N);
+
+ Assert(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
+ Assert(!N.getName().empty(), "missing global variable name", &N);
+ if (auto *V = N.getRawVariable()) {
+ Assert(isa<ConstantAsMetadata>(V) &&
+ !isa<Function>(cast<ConstantAsMetadata>(V)->getValue()),
+ "invalid global varaible ref", &N, V);
+ }
+ if (auto *Member = N.getRawStaticDataMemberDeclaration()) {
+ Assert(isa<DIDerivedType>(Member), "invalid static data member declaration",
+ &N, Member);
+ }
+}
+
+void Verifier::visitDILocalVariable(const DILocalVariable &N) {
+ // Checks common to all variables.
+ visitDIVariable(N);
+
+ Assert(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
+ Assert(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
+ "local variable requires a valid scope", &N, N.getRawScope());
+}
+
+void Verifier::visitDIExpression(const DIExpression &N) {
+ Assert(N.isValid(), "invalid expression", &N);
+}
+
+void Verifier::visitDIObjCProperty(const DIObjCProperty &N) {
+ Assert(N.getTag() == dwarf::DW_TAG_APPLE_property, "invalid tag", &N);
+ if (auto *T = N.getRawType())
+ Assert(isTypeRef(N, T), "invalid type ref", &N, T);
+ if (auto *F = N.getRawFile())
+ Assert(isa<DIFile>(F), "invalid file", &N, F);
+}
+
+void Verifier::visitDIImportedEntity(const DIImportedEntity &N) {
+ Assert(N.getTag() == dwarf::DW_TAG_imported_module ||
+ N.getTag() == dwarf::DW_TAG_imported_declaration,
+ "invalid tag", &N);
+ if (auto *S = N.getRawScope())
+ Assert(isa<DIScope>(S), "invalid scope for imported entity", &N, S);
+ Assert(isDIRef(N, N.getEntity()), "invalid imported entity", &N,
+ N.getEntity());
+}
+
+void Verifier::visitComdat(const Comdat &C) {
+ // The Module is invalid if the GlobalValue has private linkage. Entities
+ // with private linkage don't have entries in the symbol table.
+ if (const GlobalValue *GV = M->getNamedValue(C.getName()))
+ Assert(!GV->hasPrivateLinkage(), "comdat global value has private linkage",
+ GV);
+}
+
+void Verifier::visitModuleIdents(const Module &M) {
+ const NamedMDNode *Idents = M.getNamedMetadata("llvm.ident");
+ if (!Idents)
+ return;
+
+ // llvm.ident takes a list of metadata entry. Each entry has only one string.
+ // Scan each llvm.ident entry and make sure that this requirement is met.
+ for (unsigned i = 0, e = Idents->getNumOperands(); i != e; ++i) {
+ const MDNode *N = Idents->getOperand(i);
+ Assert(N->getNumOperands() == 1,
+ "incorrect number of operands in llvm.ident metadata", N);
+ Assert(dyn_cast_or_null<MDString>(N->getOperand(0)),
+ ("invalid value for llvm.ident metadata entry operand"
+ "(the operand should be a string)"),
+ N->getOperand(0));
+ }
+}
+
+void Verifier::visitModuleFlags(const Module &M) {
+ const NamedMDNode *Flags = M.getModuleFlagsMetadata();
+ if (!Flags) return;
+
+ // Scan each flag, and track the flags and requirements.
+ DenseMap<const MDString*, const MDNode*> SeenIDs;
+ SmallVector<const MDNode*, 16> Requirements;
+ for (unsigned I = 0, E = Flags->getNumOperands(); I != E; ++I) {
+ visitModuleFlag(Flags->getOperand(I), SeenIDs, Requirements);
+ }
+
+ // Validate that the requirements in the module are valid.
+ for (unsigned I = 0, E = Requirements.size(); I != E; ++I) {
+ const MDNode *Requirement = Requirements[I];
+ const MDString *Flag = cast<MDString>(Requirement->getOperand(0));
+ const Metadata *ReqValue = Requirement->getOperand(1);
+
+ const MDNode *Op = SeenIDs.lookup(Flag);
+ if (!Op) {
+ CheckFailed("invalid requirement on flag, flag is not present in module",
+ Flag);
+ continue;
+ }
+
+ if (Op->getOperand(2) != ReqValue) {
+ CheckFailed(("invalid requirement on flag, "
+ "flag does not have the required value"),
+ Flag);
+ continue;
+ }
+ }
+}
+
+void
+Verifier::visitModuleFlag(const MDNode *Op,
+ DenseMap<const MDString *, const MDNode *> &SeenIDs,
+ SmallVectorImpl<const MDNode *> &Requirements) {
+ // Each module flag should have three arguments, the merge behavior (a
+ // constant int), the flag ID (an MDString), and the value.
+ Assert(Op->getNumOperands() == 3,
+ "incorrect number of operands in module flag", Op);
+ Module::ModFlagBehavior MFB;
+ if (!Module::isValidModFlagBehavior(Op->getOperand(0), MFB)) {
+ Assert(
+ mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(0)),
+ "invalid behavior operand in module flag (expected constant integer)",
+ Op->getOperand(0));
+ Assert(false,
+ "invalid behavior operand in module flag (unexpected constant)",
+ Op->getOperand(0));
+ }
+ MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
+ Assert(ID, "invalid ID operand in module flag (expected metadata string)",
+ Op->getOperand(1));
+
+ // Sanity check the values for behaviors with additional requirements.
+ switch (MFB) {
+ case Module::Error:
+ case Module::Warning:
+ case Module::Override:
+ // These behavior types accept any value.
+ break;
+
+ case Module::Require: {
+ // The value should itself be an MDNode with two operands, a flag ID (an
+ // MDString), and a value.
+ MDNode *Value = dyn_cast<MDNode>(Op->getOperand(2));
+ Assert(Value && Value->getNumOperands() == 2,
+ "invalid value for 'require' module flag (expected metadata pair)",
+ Op->getOperand(2));
+ Assert(isa<MDString>(Value->getOperand(0)),
+ ("invalid value for 'require' module flag "
+ "(first value operand should be a string)"),
+ Value->getOperand(0));
+
+ // Append it to the list of requirements, to check once all module flags are
+ // scanned.
+ Requirements.push_back(Value);
+ break;
+ }
+
+ case Module::Append:
+ case Module::AppendUnique: {
+ // These behavior types require the operand be an MDNode.
+ Assert(isa<MDNode>(Op->getOperand(2)),
+ "invalid value for 'append'-type module flag "
+ "(expected a metadata node)",
+ Op->getOperand(2));
+ break;
+ }
+ }
+
+ // Unless this is a "requires" flag, check the ID is unique.
+ if (MFB != Module::Require) {
+ bool Inserted = SeenIDs.insert(std::make_pair(ID, Op)).second;
+ Assert(Inserted,
+ "module flag identifiers must be unique (or of 'require' type)", ID);
+ }
+}
+
+void Verifier::VerifyAttributeTypes(AttributeSet Attrs, unsigned Idx,
+ bool isFunction, const Value *V) {
+ unsigned Slot = ~0U;
+ for (unsigned I = 0, E = Attrs.getNumSlots(); I != E; ++I)
+ if (Attrs.getSlotIndex(I) == Idx) {
+ Slot = I;
+ break;
+ }
+
+ assert(Slot != ~0U && "Attribute set inconsistency!");
+
+ for (AttributeSet::iterator I = Attrs.begin(Slot), E = Attrs.end(Slot);
+ I != E; ++I) {
+ if (I->isStringAttribute())
+ continue;
+
+ if (I->getKindAsEnum() == Attribute::NoReturn ||
+ I->getKindAsEnum() == Attribute::NoUnwind ||
+ I->getKindAsEnum() == Attribute::NoInline ||
+ I->getKindAsEnum() == Attribute::AlwaysInline ||
+ I->getKindAsEnum() == Attribute::OptimizeForSize ||
+ I->getKindAsEnum() == Attribute::StackProtect ||
+ I->getKindAsEnum() == Attribute::StackProtectReq ||
+ I->getKindAsEnum() == Attribute::StackProtectStrong ||
+ I->getKindAsEnum() == Attribute::SafeStack ||
+ I->getKindAsEnum() == Attribute::NoRedZone ||
+ I->getKindAsEnum() == Attribute::NoImplicitFloat ||
+ I->getKindAsEnum() == Attribute::Naked ||
+ I->getKindAsEnum() == Attribute::InlineHint ||
+ I->getKindAsEnum() == Attribute::StackAlignment ||
+ I->getKindAsEnum() == Attribute::UWTable ||
+ I->getKindAsEnum() == Attribute::NonLazyBind ||
+ I->getKindAsEnum() == Attribute::ReturnsTwice ||
+ I->getKindAsEnum() == Attribute::SanitizeAddress ||
+ I->getKindAsEnum() == Attribute::SanitizeThread ||
+ I->getKindAsEnum() == Attribute::SanitizeMemory ||
+ I->getKindAsEnum() == Attribute::MinSize ||
+ I->getKindAsEnum() == Attribute::NoDuplicate ||
+ I->getKindAsEnum() == Attribute::Builtin ||
+ I->getKindAsEnum() == Attribute::NoBuiltin ||
+ I->getKindAsEnum() == Attribute::Cold ||
+ I->getKindAsEnum() == Attribute::OptimizeNone ||
+ I->getKindAsEnum() == Attribute::JumpTable ||
+ I->getKindAsEnum() == Attribute::Convergent ||
+ I->getKindAsEnum() == Attribute::ArgMemOnly ||
+ I->getKindAsEnum() == Attribute::NoRecurse ||
+ I->getKindAsEnum() == Attribute::InaccessibleMemOnly ||
+ I->getKindAsEnum() == Attribute::InaccessibleMemOrArgMemOnly) {
+ if (!isFunction) {
+ CheckFailed("Attribute '" + I->getAsString() +
+ "' only applies to functions!", V);
+ return;
+ }
+ } else if (I->getKindAsEnum() == Attribute::ReadOnly ||
+ I->getKindAsEnum() == Attribute::ReadNone) {
+ if (Idx == 0) {
+ CheckFailed("Attribute '" + I->getAsString() +
+ "' does not apply to function returns");
+ return;
+ }
+ } else if (isFunction) {
+ CheckFailed("Attribute '" + I->getAsString() +
+ "' does not apply to functions!", V);
+ return;
+ }
+ }
+}
+
+// VerifyParameterAttrs - Check the given attributes for an argument or return
+// value of the specified type. The value V is printed in error messages.
+void Verifier::VerifyParameterAttrs(AttributeSet Attrs, unsigned Idx, Type *Ty,
+ bool isReturnValue, const Value *V) {
+ if (!Attrs.hasAttributes(Idx))
+ return;
+
+ VerifyAttributeTypes(Attrs, Idx, false, V);
+
+ if (isReturnValue)
+ Assert(!Attrs.hasAttribute(Idx, Attribute::ByVal) &&
+ !Attrs.hasAttribute(Idx, Attribute::Nest) &&
+ !Attrs.hasAttribute(Idx, Attribute::StructRet) &&
+ !Attrs.hasAttribute(Idx, Attribute::NoCapture) &&
+ !Attrs.hasAttribute(Idx, Attribute::Returned) &&
+ !Attrs.hasAttribute(Idx, Attribute::InAlloca),
+ "Attributes 'byval', 'inalloca', 'nest', 'sret', 'nocapture', and "
+ "'returned' do not apply to return values!",
+ V);
+
+ // Check for mutually incompatible attributes. Only inreg is compatible with
+ // sret.
+ unsigned AttrCount = 0;
+ AttrCount += Attrs.hasAttribute(Idx, Attribute::ByVal);
+ AttrCount += Attrs.hasAttribute(Idx, Attribute::InAlloca);
+ AttrCount += Attrs.hasAttribute(Idx, Attribute::StructRet) ||
+ Attrs.hasAttribute(Idx, Attribute::InReg);
+ AttrCount += Attrs.hasAttribute(Idx, Attribute::Nest);
+ Assert(AttrCount <= 1, "Attributes 'byval', 'inalloca', 'inreg', 'nest', "
+ "and 'sret' are incompatible!",
+ V);
+
+ Assert(!(Attrs.hasAttribute(Idx, Attribute::InAlloca) &&
+ Attrs.hasAttribute(Idx, Attribute::ReadOnly)),
+ "Attributes "
+ "'inalloca and readonly' are incompatible!",
+ V);
+
+ Assert(!(Attrs.hasAttribute(Idx, Attribute::StructRet) &&
+ Attrs.hasAttribute(Idx, Attribute::Returned)),
+ "Attributes "
+ "'sret and returned' are incompatible!",
+ V);
+
+ Assert(!(Attrs.hasAttribute(Idx, Attribute::ZExt) &&
+ Attrs.hasAttribute(Idx, Attribute::SExt)),
+ "Attributes "
+ "'zeroext and signext' are incompatible!",
+ V);
+
+ Assert(!(Attrs.hasAttribute(Idx, Attribute::ReadNone) &&
+ Attrs.hasAttribute(Idx, Attribute::ReadOnly)),
+ "Attributes "
+ "'readnone and readonly' are incompatible!",
+ V);
+
+ Assert(!(Attrs.hasAttribute(Idx, Attribute::NoInline) &&
+ Attrs.hasAttribute(Idx, Attribute::AlwaysInline)),
+ "Attributes "
+ "'noinline and alwaysinline' are incompatible!",
+ V);
+
+ Assert(!AttrBuilder(Attrs, Idx)
+ .overlaps(AttributeFuncs::typeIncompatible(Ty)),
+ "Wrong types for attribute: " +
+ AttributeSet::get(*Context, Idx,
+ AttributeFuncs::typeIncompatible(Ty)).getAsString(Idx),
+ V);
+
+ if (PointerType *PTy = dyn_cast<PointerType>(Ty)) {
+ SmallPtrSet<Type*, 4> Visited;
+ if (!PTy->getElementType()->isSized(&Visited)) {
+ Assert(!Attrs.hasAttribute(Idx, Attribute::ByVal) &&
+ !Attrs.hasAttribute(Idx, Attribute::InAlloca),
+ "Attributes 'byval' and 'inalloca' do not support unsized types!",
+ V);
+ }
+ } else {
+ Assert(!Attrs.hasAttribute(Idx, Attribute::ByVal),
+ "Attribute 'byval' only applies to parameters with pointer type!",
+ V);
+ }
+}
+
+// VerifyFunctionAttrs - Check parameter attributes against a function type.
+// The value V is printed in error messages.
+void Verifier::VerifyFunctionAttrs(FunctionType *FT, AttributeSet Attrs,
+ const Value *V) {
+ if (Attrs.isEmpty())
+ return;
+
+ bool SawNest = false;
+ bool SawReturned = false;
+ bool SawSRet = false;
+
+ for (unsigned i = 0, e = Attrs.getNumSlots(); i != e; ++i) {
+ unsigned Idx = Attrs.getSlotIndex(i);
+
+ Type *Ty;
+ if (Idx == 0)
+ Ty = FT->getReturnType();
+ else if (Idx-1 < FT->getNumParams())
+ Ty = FT->getParamType(Idx-1);
+ else
+ break; // VarArgs attributes, verified elsewhere.
+
+ VerifyParameterAttrs(Attrs, Idx, Ty, Idx == 0, V);
+
+ if (Idx == 0)
+ continue;
+
+ if (Attrs.hasAttribute(Idx, Attribute::Nest)) {
+ Assert(!SawNest, "More than one parameter has attribute nest!", V);
+ SawNest = true;
+ }
+
+ if (Attrs.hasAttribute(Idx, Attribute::Returned)) {
+ Assert(!SawReturned, "More than one parameter has attribute returned!",
+ V);
+ Assert(Ty->canLosslesslyBitCastTo(FT->getReturnType()),
+ "Incompatible "
+ "argument and return types for 'returned' attribute",
+ V);
+ SawReturned = true;
+ }
+
+ if (Attrs.hasAttribute(Idx, Attribute::StructRet)) {
+ Assert(!SawSRet, "Cannot have multiple 'sret' parameters!", V);
+ Assert(Idx == 1 || Idx == 2,
+ "Attribute 'sret' is not on first or second parameter!", V);
+ SawSRet = true;
+ }
+
+ if (Attrs.hasAttribute(Idx, Attribute::InAlloca)) {
+ Assert(Idx == FT->getNumParams(), "inalloca isn't on the last parameter!",
+ V);
+ }
+ }
+
+ if (!Attrs.hasAttributes(AttributeSet::FunctionIndex))
+ return;
+
+ VerifyAttributeTypes(Attrs, AttributeSet::FunctionIndex, true, V);
+
+ Assert(
+ !(Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::ReadNone) &&
+ Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::ReadOnly)),
+ "Attributes 'readnone and readonly' are incompatible!", V);
+
+ Assert(
+ !(Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::ReadNone) &&
+ Attrs.hasAttribute(AttributeSet::FunctionIndex,
+ Attribute::InaccessibleMemOrArgMemOnly)),
+ "Attributes 'readnone and inaccessiblemem_or_argmemonly' are incompatible!", V);
+
+ Assert(
+ !(Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::ReadNone) &&
+ Attrs.hasAttribute(AttributeSet::FunctionIndex,
+ Attribute::InaccessibleMemOnly)),
+ "Attributes 'readnone and inaccessiblememonly' are incompatible!", V);
+
+ Assert(
+ !(Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::NoInline) &&
+ Attrs.hasAttribute(AttributeSet::FunctionIndex,
+ Attribute::AlwaysInline)),
+ "Attributes 'noinline and alwaysinline' are incompatible!", V);
+
+ if (Attrs.hasAttribute(AttributeSet::FunctionIndex,
+ Attribute::OptimizeNone)) {
+ Assert(Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::NoInline),
+ "Attribute 'optnone' requires 'noinline'!", V);
+
+ Assert(!Attrs.hasAttribute(AttributeSet::FunctionIndex,
+ Attribute::OptimizeForSize),
+ "Attributes 'optsize and optnone' are incompatible!", V);
+
+ Assert(!Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::MinSize),
+ "Attributes 'minsize and optnone' are incompatible!", V);
+ }
+
+ if (Attrs.hasAttribute(AttributeSet::FunctionIndex,
+ Attribute::JumpTable)) {
+ const GlobalValue *GV = cast<GlobalValue>(V);
+ Assert(GV->hasUnnamedAddr(),
+ "Attribute 'jumptable' requires 'unnamed_addr'", V);
+ }
+}
+
+void Verifier::VerifyFunctionMetadata(
+ const SmallVector<std::pair<unsigned, MDNode *>, 4> MDs) {
+ if (MDs.empty())
+ return;
+
+ for (unsigned i = 0; i < MDs.size(); i++) {
+ if (MDs[i].first == LLVMContext::MD_prof) {
+ MDNode *MD = MDs[i].second;
+ Assert(MD->getNumOperands() == 2,
+ "!prof annotations should have exactly 2 operands", MD);
+
+ // Check first operand.
+ Assert(MD->getOperand(0) != nullptr, "first operand should not be null",
+ MD);
+ Assert(isa<MDString>(MD->getOperand(0)),
+ "expected string with name of the !prof annotation", MD);
+ MDString *MDS = cast<MDString>(MD->getOperand(0));
+ StringRef ProfName = MDS->getString();
+ Assert(ProfName.equals("function_entry_count"),
+ "first operand should be 'function_entry_count'", MD);
+
+ // Check second operand.
+ Assert(MD->getOperand(1) != nullptr, "second operand should not be null",
+ MD);
+ Assert(isa<ConstantAsMetadata>(MD->getOperand(1)),
+ "expected integer argument to function_entry_count", MD);
+ }
+ }
+}
+
+void Verifier::visitConstantExprsRecursively(const Constant *EntryC) {
+ if (!ConstantExprVisited.insert(EntryC).second)
+ return;
+
+ SmallVector<const Constant *, 16> Stack;
+ Stack.push_back(EntryC);
+
+ while (!Stack.empty()) {
+ const Constant *C = Stack.pop_back_val();
+
+ // Check this constant expression.
+ if (const auto *CE = dyn_cast<ConstantExpr>(C))
+ visitConstantExpr(CE);
+
+ // Visit all sub-expressions.
+ for (const Use &U : C->operands()) {
+ const auto *OpC = dyn_cast<Constant>(U);
+ if (!OpC)
+ continue;
+ if (isa<GlobalValue>(OpC))
+ continue; // Global values get visited separately.
+ if (!ConstantExprVisited.insert(OpC).second)
+ continue;
+ Stack.push_back(OpC);
+ }
+ }
+}
+
+void Verifier::visitConstantExpr(const ConstantExpr *CE) {
+ if (CE->getOpcode() != Instruction::BitCast)
+ return;
+
+ Assert(CastInst::castIsValid(Instruction::BitCast, CE->getOperand(0),
+ CE->getType()),
+ "Invalid bitcast", CE);
+}
+
+bool Verifier::VerifyAttributeCount(AttributeSet Attrs, unsigned Params) {
+ if (Attrs.getNumSlots() == 0)
+ return true;
+
+ unsigned LastSlot = Attrs.getNumSlots() - 1;
+ unsigned LastIndex = Attrs.getSlotIndex(LastSlot);
+ if (LastIndex <= Params
+ || (LastIndex == AttributeSet::FunctionIndex
+ && (LastSlot == 0 || Attrs.getSlotIndex(LastSlot - 1) <= Params)))
+ return true;
+
+ return false;
+}
+
+/// \brief Verify that statepoint intrinsic is well formed.
+void Verifier::VerifyStatepoint(ImmutableCallSite CS) {
+ assert(CS.getCalledFunction() &&
+ CS.getCalledFunction()->getIntrinsicID() ==
+ Intrinsic::experimental_gc_statepoint);
+
+ const Instruction &CI = *CS.getInstruction();
+
+ Assert(!CS.doesNotAccessMemory() && !CS.onlyReadsMemory() &&
+ !CS.onlyAccessesArgMemory(),
+ "gc.statepoint must read and write all memory to preserve "
+ "reordering restrictions required by safepoint semantics",
+ &CI);
+
+ const Value *IDV = CS.getArgument(0);
+ Assert(isa<ConstantInt>(IDV), "gc.statepoint ID must be a constant integer",
+ &CI);
+
+ const Value *NumPatchBytesV = CS.getArgument(1);
+ Assert(isa<ConstantInt>(NumPatchBytesV),
+ "gc.statepoint number of patchable bytes must be a constant integer",
+ &CI);
+ const int64_t NumPatchBytes =
+ cast<ConstantInt>(NumPatchBytesV)->getSExtValue();
+ assert(isInt<32>(NumPatchBytes) && "NumPatchBytesV is an i32!");
+ Assert(NumPatchBytes >= 0, "gc.statepoint number of patchable bytes must be "
+ "positive",
+ &CI);
+
+ const Value *Target = CS.getArgument(2);
+ auto *PT = dyn_cast<PointerType>(Target->getType());
+ Assert(PT && PT->getElementType()->isFunctionTy(),
+ "gc.statepoint callee must be of function pointer type", &CI, Target);
+ FunctionType *TargetFuncType = cast<FunctionType>(PT->getElementType());
+
+ const Value *NumCallArgsV = CS.getArgument(3);
+ Assert(isa<ConstantInt>(NumCallArgsV),
+ "gc.statepoint number of arguments to underlying call "
+ "must be constant integer",
+ &CI);
+ const int NumCallArgs = cast<ConstantInt>(NumCallArgsV)->getZExtValue();
+ Assert(NumCallArgs >= 0,
+ "gc.statepoint number of arguments to underlying call "
+ "must be positive",
+ &CI);
+ const int NumParams = (int)TargetFuncType->getNumParams();
+ if (TargetFuncType->isVarArg()) {
+ Assert(NumCallArgs >= NumParams,
+ "gc.statepoint mismatch in number of vararg call args", &CI);
+
+ // TODO: Remove this limitation
+ Assert(TargetFuncType->getReturnType()->isVoidTy(),
+ "gc.statepoint doesn't support wrapping non-void "
+ "vararg functions yet",
+ &CI);
+ } else
+ Assert(NumCallArgs == NumParams,
+ "gc.statepoint mismatch in number of call args", &CI);
+
+ const Value *FlagsV = CS.getArgument(4);
+ Assert(isa<ConstantInt>(FlagsV),
+ "gc.statepoint flags must be constant integer", &CI);
+ const uint64_t Flags = cast<ConstantInt>(FlagsV)->getZExtValue();
+ Assert((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0,
+ "unknown flag used in gc.statepoint flags argument", &CI);
+
+ // Verify that the types of the call parameter arguments match
+ // the type of the wrapped callee.
+ for (int i = 0; i < NumParams; i++) {
+ Type *ParamType = TargetFuncType->getParamType(i);
+ Type *ArgType = CS.getArgument(5 + i)->getType();
+ Assert(ArgType == ParamType,
+ "gc.statepoint call argument does not match wrapped "
+ "function type",
+ &CI);
+ }
+
+ const int EndCallArgsInx = 4 + NumCallArgs;
+
+ const Value *NumTransitionArgsV = CS.getArgument(EndCallArgsInx+1);
+ Assert(isa<ConstantInt>(NumTransitionArgsV),
+ "gc.statepoint number of transition arguments "
+ "must be constant integer",
+ &CI);
+ const int NumTransitionArgs =
+ cast<ConstantInt>(NumTransitionArgsV)->getZExtValue();
+ Assert(NumTransitionArgs >= 0,
+ "gc.statepoint number of transition arguments must be positive", &CI);
+ const int EndTransitionArgsInx = EndCallArgsInx + 1 + NumTransitionArgs;
+
+ const Value *NumDeoptArgsV = CS.getArgument(EndTransitionArgsInx+1);
+ Assert(isa<ConstantInt>(NumDeoptArgsV),
+ "gc.statepoint number of deoptimization arguments "
+ "must be constant integer",
+ &CI);
+ const int NumDeoptArgs = cast<ConstantInt>(NumDeoptArgsV)->getZExtValue();
+ Assert(NumDeoptArgs >= 0, "gc.statepoint number of deoptimization arguments "
+ "must be positive",
+ &CI);
+
+ const int ExpectedNumArgs =
+ 7 + NumCallArgs + NumTransitionArgs + NumDeoptArgs;
+ Assert(ExpectedNumArgs <= (int)CS.arg_size(),
+ "gc.statepoint too few arguments according to length fields", &CI);
+
+ // Check that the only uses of this gc.statepoint are gc.result or
+ // gc.relocate calls which are tied to this statepoint and thus part
+ // of the same statepoint sequence
+ for (const User *U : CI.users()) {
+ const CallInst *Call = dyn_cast<const CallInst>(U);
+ Assert(Call, "illegal use of statepoint token", &CI, U);
+ if (!Call) continue;
+ Assert(isa<GCRelocateInst>(Call) || isGCResult(Call),
+ "gc.result or gc.relocate are the only value uses"
+ "of a gc.statepoint",
+ &CI, U);
+ if (isGCResult(Call)) {
+ Assert(Call->getArgOperand(0) == &CI,
+ "gc.result connected to wrong gc.statepoint", &CI, Call);
+ } else if (isa<GCRelocateInst>(Call)) {
+ Assert(Call->getArgOperand(0) == &CI,
+ "gc.relocate connected to wrong gc.statepoint", &CI, Call);
+ }
+ }
+
+ // Note: It is legal for a single derived pointer to be listed multiple
+ // times. It's non-optimal, but it is legal. It can also happen after
+ // insertion if we strip a bitcast away.
+ // Note: It is really tempting to check that each base is relocated and
+ // that a derived pointer is never reused as a base pointer. This turns
+ // out to be problematic since optimizations run after safepoint insertion
+ // can recognize equality properties that the insertion logic doesn't know
+ // about. See example statepoint.ll in the verifier subdirectory
+}
+
+void Verifier::verifyFrameRecoverIndices() {
+ for (auto &Counts : FrameEscapeInfo) {
+ Function *F = Counts.first;
+ unsigned EscapedObjectCount = Counts.second.first;
+ unsigned MaxRecoveredIndex = Counts.second.second;
+ Assert(MaxRecoveredIndex <= EscapedObjectCount,
+ "all indices passed to llvm.localrecover must be less than the "
+ "number of arguments passed ot llvm.localescape in the parent "
+ "function",
+ F);
+ }
+}
+
+static Instruction *getSuccPad(TerminatorInst *Terminator) {
+ BasicBlock *UnwindDest;
+ if (auto *II = dyn_cast<InvokeInst>(Terminator))
+ UnwindDest = II->getUnwindDest();
+ else if (auto *CSI = dyn_cast<CatchSwitchInst>(Terminator))
+ UnwindDest = CSI->getUnwindDest();
+ else
+ UnwindDest = cast<CleanupReturnInst>(Terminator)->getUnwindDest();
+ return UnwindDest->getFirstNonPHI();
+}
+
+void Verifier::verifySiblingFuncletUnwinds() {
+ SmallPtrSet<Instruction *, 8> Visited;
+ SmallPtrSet<Instruction *, 8> Active;
+ for (const auto &Pair : SiblingFuncletInfo) {
+ Instruction *PredPad = Pair.first;
+ if (Visited.count(PredPad))
+ continue;
+ Active.insert(PredPad);
+ TerminatorInst *Terminator = Pair.second;
+ do {
+ Instruction *SuccPad = getSuccPad(Terminator);
+ if (Active.count(SuccPad)) {
+ // Found a cycle; report error
+ Instruction *CyclePad = SuccPad;
+ SmallVector<Instruction *, 8> CycleNodes;
+ do {
+ CycleNodes.push_back(CyclePad);
+ TerminatorInst *CycleTerminator = SiblingFuncletInfo[CyclePad];
+ if (CycleTerminator != CyclePad)
+ CycleNodes.push_back(CycleTerminator);
+ CyclePad = getSuccPad(CycleTerminator);
+ } while (CyclePad != SuccPad);
+ Assert(false, "EH pads can't handle each other's exceptions",
+ ArrayRef<Instruction *>(CycleNodes));
+ }
+ // Don't re-walk a node we've already checked
+ if (!Visited.insert(SuccPad).second)
+ break;
+ // Walk to this successor if it has a map entry.
+ PredPad = SuccPad;
+ auto TermI = SiblingFuncletInfo.find(PredPad);
+ if (TermI == SiblingFuncletInfo.end())
+ break;
+ Terminator = TermI->second;
+ Active.insert(PredPad);
+ } while (true);
+ // Each node only has one successor, so we've walked all the active
+ // nodes' successors.
+ Active.clear();
+ }
+}
+
+// visitFunction - Verify that a function is ok.
+//
+void Verifier::visitFunction(const Function &F) {
+ // Check function arguments.
+ FunctionType *FT = F.getFunctionType();
+ unsigned NumArgs = F.arg_size();
+
+ Assert(Context == &F.getContext(),
+ "Function context does not match Module context!", &F);
+
+ Assert(!F.hasCommonLinkage(), "Functions may not have common linkage", &F);
+ Assert(FT->getNumParams() == NumArgs,
+ "# formal arguments must match # of arguments for function type!", &F,
+ FT);
+ Assert(F.getReturnType()->isFirstClassType() ||
+ F.getReturnType()->isVoidTy() || F.getReturnType()->isStructTy(),
+ "Functions cannot return aggregate values!", &F);
+
+ Assert(!F.hasStructRetAttr() || F.getReturnType()->isVoidTy(),
+ "Invalid struct return type!", &F);
+
+ AttributeSet Attrs = F.getAttributes();
+
+ Assert(VerifyAttributeCount(Attrs, FT->getNumParams()),
+ "Attribute after last parameter!", &F);
+
+ // Check function attributes.
+ VerifyFunctionAttrs(FT, Attrs, &F);
+
+ // On function declarations/definitions, we do not support the builtin
+ // attribute. We do not check this in VerifyFunctionAttrs since that is
+ // checking for Attributes that can/can not ever be on functions.
+ Assert(!Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::Builtin),
+ "Attribute 'builtin' can only be applied to a callsite.", &F);
+
+ // Check that this function meets the restrictions on this calling convention.
+ // Sometimes varargs is used for perfectly forwarding thunks, so some of these
+ // restrictions can be lifted.
+ switch (F.getCallingConv()) {
+ default:
+ case CallingConv::C:
+ break;
+ case CallingConv::Fast:
+ case CallingConv::Cold:
+ case CallingConv::Intel_OCL_BI:
+ case CallingConv::PTX_Kernel:
+ case CallingConv::PTX_Device:
+ Assert(!F.isVarArg(), "Calling convention does not support varargs or "
+ "perfect forwarding!",
+ &F);
+ break;
+ }
+
+ bool isLLVMdotName = F.getName().size() >= 5 &&
+ F.getName().substr(0, 5) == "llvm.";
+
+ // Check that the argument values match the function type for this function...
+ unsigned i = 0;
+ for (Function::const_arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E;
+ ++I, ++i) {
+ Assert(I->getType() == FT->getParamType(i),
+ "Argument value does not match function argument type!", I,
+ FT->getParamType(i));
+ Assert(I->getType()->isFirstClassType(),
+ "Function arguments must have first-class types!", I);
+ if (!isLLVMdotName) {
+ Assert(!I->getType()->isMetadataTy(),
+ "Function takes metadata but isn't an intrinsic", I, &F);
+ Assert(!I->getType()->isTokenTy(),
+ "Function takes token but isn't an intrinsic", I, &F);
+ }
+ }
+
+ if (!isLLVMdotName)
+ Assert(!F.getReturnType()->isTokenTy(),
+ "Functions returns a token but isn't an intrinsic", &F);
+
+ // Get the function metadata attachments.
+ SmallVector<std::pair<unsigned, MDNode *>, 4> MDs;
+ F.getAllMetadata(MDs);
+ assert(F.hasMetadata() != MDs.empty() && "Bit out-of-sync");
+ VerifyFunctionMetadata(MDs);
+
+ // Check validity of the personality function
+ if (F.hasPersonalityFn()) {
+ auto *Per = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts());
+ if (Per)
+ Assert(Per->getParent() == F.getParent(),
+ "Referencing personality function in another module!",
+ &F, F.getParent(), Per, Per->getParent());
+ }
+
+ if (F.isMaterializable()) {
+ // Function has a body somewhere we can't see.
+ Assert(MDs.empty(), "unmaterialized function cannot have metadata", &F,
+ MDs.empty() ? nullptr : MDs.front().second);
+ } else if (F.isDeclaration()) {
+ Assert(F.hasExternalLinkage() || F.hasExternalWeakLinkage(),
+ "invalid linkage type for function declaration", &F);
+ Assert(MDs.empty(), "function without a body cannot have metadata", &F,
+ MDs.empty() ? nullptr : MDs.front().second);
+ Assert(!F.hasPersonalityFn(),
+ "Function declaration shouldn't have a personality routine", &F);
+ } else {
+ // Verify that this function (which has a body) is not named "llvm.*". It
+ // is not legal to define intrinsics.
+ Assert(!isLLVMdotName, "llvm intrinsics cannot be defined!", &F);
+
+ // Check the entry node
+ const BasicBlock *Entry = &F.getEntryBlock();
+ Assert(pred_empty(Entry),
+ "Entry block to function must not have predecessors!", Entry);
+
+ // The address of the entry block cannot be taken, unless it is dead.
+ if (Entry->hasAddressTaken()) {
+ Assert(!BlockAddress::lookup(Entry)->isConstantUsed(),
+ "blockaddress may not be used with the entry block!", Entry);
+ }
+
+ // Visit metadata attachments.
+ for (const auto &I : MDs) {
+ // Verify that the attachment is legal.
+ switch (I.first) {
+ default:
+ break;
+ case LLVMContext::MD_dbg:
+ Assert(isa<DISubprogram>(I.second),
+ "function !dbg attachment must be a subprogram", &F, I.second);
+ break;
+ }
+
+ // Verify the metadata itself.
+ visitMDNode(*I.second);
+ }
+ }
+
+ // If this function is actually an intrinsic, verify that it is only used in
+ // direct call/invokes, never having its "address taken".
+ // Only do this if the module is materialized, otherwise we don't have all the
+ // uses.
+ if (F.getIntrinsicID() && F.getParent()->isMaterialized()) {
+ const User *U;
+ if (F.hasAddressTaken(&U))
+ Assert(0, "Invalid user of intrinsic instruction!", U);
+ }
+
+ Assert(!F.hasDLLImportStorageClass() ||
+ (F.isDeclaration() && F.hasExternalLinkage()) ||
+ F.hasAvailableExternallyLinkage(),
+ "Function is marked as dllimport, but not external.", &F);
+
+ auto *N = F.getSubprogram();
+ if (!N)
+ return;
+
+ // Check that all !dbg attachments lead to back to N (or, at least, another
+ // subprogram that describes the same function).
+ //
+ // FIXME: Check this incrementally while visiting !dbg attachments.
+ // FIXME: Only check when N is the canonical subprogram for F.
+ SmallPtrSet<const MDNode *, 32> Seen;
+ for (auto &BB : F)
+ for (auto &I : BB) {
+ // Be careful about using DILocation here since we might be dealing with
+ // broken code (this is the Verifier after all).
+ DILocation *DL =
+ dyn_cast_or_null<DILocation>(I.getDebugLoc().getAsMDNode());
+ if (!DL)
+ continue;
+ if (!Seen.insert(DL).second)
+ continue;
+
+ DILocalScope *Scope = DL->getInlinedAtScope();
+ if (Scope && !Seen.insert(Scope).second)
+ continue;
+
+ DISubprogram *SP = Scope ? Scope->getSubprogram() : nullptr;
+
+ // Scope and SP could be the same MDNode and we don't want to skip
+ // validation in that case
+ if (SP && ((Scope != SP) && !Seen.insert(SP).second))
+ continue;
+
+ // FIXME: Once N is canonical, check "SP == &N".
+ Assert(SP->describes(&F),
+ "!dbg attachment points at wrong subprogram for function", N, &F,
+ &I, DL, Scope, SP);
+ }
+}
+
+// verifyBasicBlock - Verify that a basic block is well formed...
+//
+void Verifier::visitBasicBlock(BasicBlock &BB) {
+ InstsInThisBlock.clear();
+
+ // Ensure that basic blocks have terminators!
+ Assert(BB.getTerminator(), "Basic Block does not have terminator!", &BB);
+
+ // Check constraints that this basic block imposes on all of the PHI nodes in
+ // it.
+ if (isa<PHINode>(BB.front())) {
+ SmallVector<BasicBlock*, 8> Preds(pred_begin(&BB), pred_end(&BB));
+ SmallVector<std::pair<BasicBlock*, Value*>, 8> Values;
+ std::sort(Preds.begin(), Preds.end());
+ PHINode *PN;
+ for (BasicBlock::iterator I = BB.begin(); (PN = dyn_cast<PHINode>(I));++I) {
+ // Ensure that PHI nodes have at least one entry!
+ Assert(PN->getNumIncomingValues() != 0,
+ "PHI nodes must have at least one entry. If the block is dead, "
+ "the PHI should be removed!",
+ PN);
+ Assert(PN->getNumIncomingValues() == Preds.size(),
+ "PHINode should have one entry for each predecessor of its "
+ "parent basic block!",
+ PN);
+
+ // Get and sort all incoming values in the PHI node...
+ Values.clear();
+ Values.reserve(PN->getNumIncomingValues());
+ for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
+ Values.push_back(std::make_pair(PN->getIncomingBlock(i),
+ PN->getIncomingValue(i)));
+ std::sort(Values.begin(), Values.end());
+
+ for (unsigned i = 0, e = Values.size(); i != e; ++i) {
+ // Check to make sure that if there is more than one entry for a
+ // particular basic block in this PHI node, that the incoming values are
+ // all identical.
+ //
+ Assert(i == 0 || Values[i].first != Values[i - 1].first ||
+ Values[i].second == Values[i - 1].second,
+ "PHI node has multiple entries for the same basic block with "
+ "different incoming values!",
+ PN, Values[i].first, Values[i].second, Values[i - 1].second);
+
+ // Check to make sure that the predecessors and PHI node entries are
+ // matched up.
+ Assert(Values[i].first == Preds[i],
+ "PHI node entries do not match predecessors!", PN,
+ Values[i].first, Preds[i]);
+ }
+ }
+ }
+
+ // Check that all instructions have their parent pointers set up correctly.
+ for (auto &I : BB)
+ {
+ Assert(I.getParent() == &BB, "Instruction has bogus parent pointer!");
+ }
+}
+
+void Verifier::visitTerminatorInst(TerminatorInst &I) {
+ // Ensure that terminators only exist at the end of the basic block.
+ Assert(&I == I.getParent()->getTerminator(),
+ "Terminator found in the middle of a basic block!", I.getParent());
+ visitInstruction(I);
+}
+
+void Verifier::visitBranchInst(BranchInst &BI) {
+ if (BI.isConditional()) {
+ Assert(BI.getCondition()->getType()->isIntegerTy(1),
+ "Branch condition is not 'i1' type!", &BI, BI.getCondition());
+ }
+ visitTerminatorInst(BI);
+}
+
+void Verifier::visitReturnInst(ReturnInst &RI) {
+ Function *F = RI.getParent()->getParent();
+ unsigned N = RI.getNumOperands();
+ if (F->getReturnType()->isVoidTy())
+ Assert(N == 0,
+ "Found return instr that returns non-void in Function of void "
+ "return type!",
+ &RI, F->getReturnType());
+ else
+ Assert(N == 1 && F->getReturnType() == RI.getOperand(0)->getType(),
+ "Function return type does not match operand "
+ "type of return inst!",
+ &RI, F->getReturnType());
+
+ // Check to make sure that the return value has necessary properties for
+ // terminators...
+ visitTerminatorInst(RI);
+}
+
+void Verifier::visitSwitchInst(SwitchInst &SI) {
+ // Check to make sure that all of the constants in the switch instruction
+ // have the same type as the switched-on value.
+ Type *SwitchTy = SI.getCondition()->getType();
+ SmallPtrSet<ConstantInt*, 32> Constants;
+ for (SwitchInst::CaseIt i = SI.case_begin(), e = SI.case_end(); i != e; ++i) {
+ Assert(i.getCaseValue()->getType() == SwitchTy,
+ "Switch constants must all be same type as switch value!", &SI);
+ Assert(Constants.insert(i.getCaseValue()).second,
+ "Duplicate integer as switch case", &SI, i.getCaseValue());
+ }
+
+ visitTerminatorInst(SI);
+}
+
+void Verifier::visitIndirectBrInst(IndirectBrInst &BI) {
+ Assert(BI.getAddress()->getType()->isPointerTy(),
+ "Indirectbr operand must have pointer type!", &BI);
+ for (unsigned i = 0, e = BI.getNumDestinations(); i != e; ++i)
+ Assert(BI.getDestination(i)->getType()->isLabelTy(),
+ "Indirectbr destinations must all have pointer type!", &BI);
+
+ visitTerminatorInst(BI);
+}
+
+void Verifier::visitSelectInst(SelectInst &SI) {
+ Assert(!SelectInst::areInvalidOperands(SI.getOperand(0), SI.getOperand(1),
+ SI.getOperand(2)),
+ "Invalid operands for select instruction!", &SI);
+
+ Assert(SI.getTrueValue()->getType() == SI.getType(),
+ "Select values must have same type as select instruction!", &SI);
+ visitInstruction(SI);
+}
+
+/// visitUserOp1 - User defined operators shouldn't live beyond the lifetime of
+/// a pass, if any exist, it's an error.
+///
+void Verifier::visitUserOp1(Instruction &I) {
+ Assert(0, "User-defined operators should not live outside of a pass!", &I);
+}
+
+void Verifier::visitTruncInst(TruncInst &I) {
+ // Get the source and destination types
+ Type *SrcTy = I.getOperand(0)->getType();
+ Type *DestTy = I.getType();
+
+ // Get the size of the types in bits, we'll need this later
+ unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
+ unsigned DestBitSize = DestTy->getScalarSizeInBits();
+
+ Assert(SrcTy->isIntOrIntVectorTy(), "Trunc only operates on integer", &I);
+ Assert(DestTy->isIntOrIntVectorTy(), "Trunc only produces integer", &I);
+ Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(),
+ "trunc source and destination must both be a vector or neither", &I);
+ Assert(SrcBitSize > DestBitSize, "DestTy too big for Trunc", &I);
+
+ visitInstruction(I);
+}
+
+void Verifier::visitZExtInst(ZExtInst &I) {
+ // Get the source and destination types
+ Type *SrcTy = I.getOperand(0)->getType();
+ Type *DestTy = I.getType();
+
+ // Get the size of the types in bits, we'll need this later
+ Assert(SrcTy->isIntOrIntVectorTy(), "ZExt only operates on integer", &I);
+ Assert(DestTy->isIntOrIntVectorTy(), "ZExt only produces an integer", &I);
+ Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(),
+ "zext source and destination must both be a vector or neither", &I);
+ unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
+ unsigned DestBitSize = DestTy->getScalarSizeInBits();
+
+ Assert(SrcBitSize < DestBitSize, "Type too small for ZExt", &I);
+
+ visitInstruction(I);
+}
+
+void Verifier::visitSExtInst(SExtInst &I) {
+ // Get the source and destination types
+ Type *SrcTy = I.getOperand(0)->getType();
+ Type *DestTy = I.getType();
+
+ // Get the size of the types in bits, we'll need this later
+ unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
+ unsigned DestBitSize = DestTy->getScalarSizeInBits();
+
+ Assert(SrcTy->isIntOrIntVectorTy(), "SExt only operates on integer", &I);
+ Assert(DestTy->isIntOrIntVectorTy(), "SExt only produces an integer", &I);
+ Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(),
+ "sext source and destination must both be a vector or neither", &I);
+ Assert(SrcBitSize < DestBitSize, "Type too small for SExt", &I);
+
+ visitInstruction(I);
+}
+
+void Verifier::visitFPTruncInst(FPTruncInst &I) {
+ // Get the source and destination types
+ Type *SrcTy = I.getOperand(0)->getType();
+ Type *DestTy = I.getType();
+ // Get the size of the types in bits, we'll need this later
+ unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
+ unsigned DestBitSize = DestTy->getScalarSizeInBits();
+
+ Assert(SrcTy->isFPOrFPVectorTy(), "FPTrunc only operates on FP", &I);
+ Assert(DestTy->isFPOrFPVectorTy(), "FPTrunc only produces an FP", &I);
+ Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(),
+ "fptrunc source and destination must both be a vector or neither", &I);
+ Assert(SrcBitSize > DestBitSize, "DestTy too big for FPTrunc", &I);
+
+ visitInstruction(I);
+}
+
+void Verifier::visitFPExtInst(FPExtInst &I) {
+ // Get the source and destination types
+ Type *SrcTy = I.getOperand(0)->getType();
+ Type *DestTy = I.getType();
+
+ // Get the size of the types in bits, we'll need this later
+ unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
+ unsigned DestBitSize = DestTy->getScalarSizeInBits();
+
+ Assert(SrcTy->isFPOrFPVectorTy(), "FPExt only operates on FP", &I);
+ Assert(DestTy->isFPOrFPVectorTy(), "FPExt only produces an FP", &I);
+ Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(),
+ "fpext source and destination must both be a vector or neither", &I);
+ Assert(SrcBitSize < DestBitSize, "DestTy too small for FPExt", &I);
+
+ visitInstruction(I);
+}
+
+void Verifier::visitUIToFPInst(UIToFPInst &I) {
+ // Get the source and destination types
+ Type *SrcTy = I.getOperand(0)->getType();
+ Type *DestTy = I.getType();
+
+ bool SrcVec = SrcTy->isVectorTy();
+ bool DstVec = DestTy->isVectorTy();
+
+ Assert(SrcVec == DstVec,
+ "UIToFP source and dest must both be vector or scalar", &I);
+ Assert(SrcTy->isIntOrIntVectorTy(),
+ "UIToFP source must be integer or integer vector", &I);
+ Assert(DestTy->isFPOrFPVectorTy(), "UIToFP result must be FP or FP vector",
+ &I);
+
+ if (SrcVec && DstVec)
+ Assert(cast<VectorType>(SrcTy)->getNumElements() ==
+ cast<VectorType>(DestTy)->getNumElements(),
+ "UIToFP source and dest vector length mismatch", &I);
+
+ visitInstruction(I);
+}
+
+void Verifier::visitSIToFPInst(SIToFPInst &I) {
+ // Get the source and destination types
+ Type *SrcTy = I.getOperand(0)->getType();
+ Type *DestTy = I.getType();
+
+ bool SrcVec = SrcTy->isVectorTy();
+ bool DstVec = DestTy->isVectorTy();
+
+ Assert(SrcVec == DstVec,
+ "SIToFP source and dest must both be vector or scalar", &I);
+ Assert(SrcTy->isIntOrIntVectorTy(),
+ "SIToFP source must be integer or integer vector", &I);
+ Assert(DestTy->isFPOrFPVectorTy(), "SIToFP result must be FP or FP vector",
+ &I);
+
+ if (SrcVec && DstVec)
+ Assert(cast<VectorType>(SrcTy)->getNumElements() ==
+ cast<VectorType>(DestTy)->getNumElements(),
+ "SIToFP source and dest vector length mismatch", &I);
+
+ visitInstruction(I);
+}
+
+void Verifier::visitFPToUIInst(FPToUIInst &I) {
+ // Get the source and destination types
+ Type *SrcTy = I.getOperand(0)->getType();
+ Type *DestTy = I.getType();
+
+ bool SrcVec = SrcTy->isVectorTy();
+ bool DstVec = DestTy->isVectorTy();
+
+ Assert(SrcVec == DstVec,
+ "FPToUI source and dest must both be vector or scalar", &I);
+ Assert(SrcTy->isFPOrFPVectorTy(), "FPToUI source must be FP or FP vector",
+ &I);
+ Assert(DestTy->isIntOrIntVectorTy(),
+ "FPToUI result must be integer or integer vector", &I);
+
+ if (SrcVec && DstVec)
+ Assert(cast<VectorType>(SrcTy)->getNumElements() ==
+ cast<VectorType>(DestTy)->getNumElements(),
+ "FPToUI source and dest vector length mismatch", &I);
+
+ visitInstruction(I);
+}
+
+void Verifier::visitFPToSIInst(FPToSIInst &I) {
+ // Get the source and destination types
+ Type *SrcTy = I.getOperand(0)->getType();
+ Type *DestTy = I.getType();
+
+ bool SrcVec = SrcTy->isVectorTy();
+ bool DstVec = DestTy->isVectorTy();
+
+ Assert(SrcVec == DstVec,
+ "FPToSI source and dest must both be vector or scalar", &I);
+ Assert(SrcTy->isFPOrFPVectorTy(), "FPToSI source must be FP or FP vector",
+ &I);
+ Assert(DestTy->isIntOrIntVectorTy(),
+ "FPToSI result must be integer or integer vector", &I);
+
+ if (SrcVec && DstVec)
+ Assert(cast<VectorType>(SrcTy)->getNumElements() ==
+ cast<VectorType>(DestTy)->getNumElements(),
+ "FPToSI source and dest vector length mismatch", &I);
+
+ visitInstruction(I);
+}
+
+void Verifier::visitPtrToIntInst(PtrToIntInst &I) {
+ // Get the source and destination types
+ Type *SrcTy = I.getOperand(0)->getType();
+ Type *DestTy = I.getType();
+
+ Assert(SrcTy->getScalarType()->isPointerTy(),
+ "PtrToInt source must be pointer", &I);
+ Assert(DestTy->getScalarType()->isIntegerTy(),
+ "PtrToInt result must be integral", &I);
+ Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToInt type mismatch",
+ &I);
+
+ if (SrcTy->isVectorTy()) {
+ VectorType *VSrc = dyn_cast<VectorType>(SrcTy);
+ VectorType *VDest = dyn_cast<VectorType>(DestTy);
+ Assert(VSrc->getNumElements() == VDest->getNumElements(),
+ "PtrToInt Vector width mismatch", &I);
+ }
+
+ visitInstruction(I);
+}
+
+void Verifier::visitIntToPtrInst(IntToPtrInst &I) {
+ // Get the source and destination types
+ Type *SrcTy = I.getOperand(0)->getType();
+ Type *DestTy = I.getType();
+
+ Assert(SrcTy->getScalarType()->isIntegerTy(),
+ "IntToPtr source must be an integral", &I);
+ Assert(DestTy->getScalarType()->isPointerTy(),
+ "IntToPtr result must be a pointer", &I);
+ Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(), "IntToPtr type mismatch",
+ &I);
+ if (SrcTy->isVectorTy()) {
+ VectorType *VSrc = dyn_cast<VectorType>(SrcTy);
+ VectorType *VDest = dyn_cast<VectorType>(DestTy);
+ Assert(VSrc->getNumElements() == VDest->getNumElements(),
+ "IntToPtr Vector width mismatch", &I);
+ }
+ visitInstruction(I);
+}
+
+void Verifier::visitBitCastInst(BitCastInst &I) {
+ Assert(
+ CastInst::castIsValid(Instruction::BitCast, I.getOperand(0), I.getType()),
+ "Invalid bitcast", &I);
+ visitInstruction(I);
+}
+
+void Verifier::visitAddrSpaceCastInst(AddrSpaceCastInst &I) {
+ Type *SrcTy = I.getOperand(0)->getType();
+ Type *DestTy = I.getType();
+
+ Assert(SrcTy->isPtrOrPtrVectorTy(), "AddrSpaceCast source must be a pointer",
+ &I);
+ Assert(DestTy->isPtrOrPtrVectorTy(), "AddrSpaceCast result must be a pointer",
+ &I);
+ Assert(SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace(),
+ "AddrSpaceCast must be between different address spaces", &I);
+ if (SrcTy->isVectorTy())
+ Assert(SrcTy->getVectorNumElements() == DestTy->getVectorNumElements(),
+ "AddrSpaceCast vector pointer number of elements mismatch", &I);
+ visitInstruction(I);
+}
+
+/// visitPHINode - Ensure that a PHI node is well formed.
+///
+void Verifier::visitPHINode(PHINode &PN) {
+ // Ensure that the PHI nodes are all grouped together at the top of the block.
+ // This can be tested by checking whether the instruction before this is
+ // either nonexistent (because this is begin()) or is a PHI node. If not,
+ // then there is some other instruction before a PHI.
+ Assert(&PN == &PN.getParent()->front() ||
+ isa<PHINode>(--BasicBlock::iterator(&PN)),
+ "PHI nodes not grouped at top of basic block!", &PN, PN.getParent());
+
+ // Check that a PHI doesn't yield a Token.
+ Assert(!PN.getType()->isTokenTy(), "PHI nodes cannot have token type!");
+
+ // Check that all of the values of the PHI node have the same type as the
+ // result, and that the incoming blocks are really basic blocks.
+ for (Value *IncValue : PN.incoming_values()) {
+ Assert(PN.getType() == IncValue->getType(),
+ "PHI node operands are not the same type as the result!", &PN);
+ }
+
+ // All other PHI node constraints are checked in the visitBasicBlock method.
+
+ visitInstruction(PN);
+}
+
+void Verifier::VerifyCallSite(CallSite CS) {
+ Instruction *I = CS.getInstruction();
+
+ Assert(CS.getCalledValue()->getType()->isPointerTy(),
+ "Called function must be a pointer!", I);
+ PointerType *FPTy = cast<PointerType>(CS.getCalledValue()->getType());
+
+ Assert(FPTy->getElementType()->isFunctionTy(),
+ "Called function is not pointer to function type!", I);
+
+ Assert(FPTy->getElementType() == CS.getFunctionType(),
+ "Called function is not the same type as the call!", I);
+
+ FunctionType *FTy = CS.getFunctionType();
+
+ // Verify that the correct number of arguments are being passed
+ if (FTy->isVarArg())
+ Assert(CS.arg_size() >= FTy->getNumParams(),
+ "Called function requires more parameters than were provided!", I);
+ else
+ Assert(CS.arg_size() == FTy->getNumParams(),
+ "Incorrect number of arguments passed to called function!", I);
+
+ // Verify that all arguments to the call match the function type.
+ for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
+ Assert(CS.getArgument(i)->getType() == FTy->getParamType(i),
+ "Call parameter type does not match function signature!",
+ CS.getArgument(i), FTy->getParamType(i), I);
+
+ AttributeSet Attrs = CS.getAttributes();
+
+ Assert(VerifyAttributeCount(Attrs, CS.arg_size()),
+ "Attribute after last parameter!", I);
+
+ // Verify call attributes.
+ VerifyFunctionAttrs(FTy, Attrs, I);
+
+ // Conservatively check the inalloca argument.
+ // We have a bug if we can find that there is an underlying alloca without
+ // inalloca.
+ if (CS.hasInAllocaArgument()) {
+ Value *InAllocaArg = CS.getArgument(FTy->getNumParams() - 1);
+ if (auto AI = dyn_cast<AllocaInst>(InAllocaArg->stripInBoundsOffsets()))
+ Assert(AI->isUsedWithInAlloca(),
+ "inalloca argument for call has mismatched alloca", AI, I);
+ }
+
+ if (FTy->isVarArg()) {
+ // FIXME? is 'nest' even legal here?
+ bool SawNest = false;
+ bool SawReturned = false;
+
+ for (unsigned Idx = 1; Idx < 1 + FTy->getNumParams(); ++Idx) {
+ if (Attrs.hasAttribute(Idx, Attribute::Nest))
+ SawNest = true;
+ if (Attrs.hasAttribute(Idx, Attribute::Returned))
+ SawReturned = true;
+ }
+
+ // Check attributes on the varargs part.
+ for (unsigned Idx = 1 + FTy->getNumParams(); Idx <= CS.arg_size(); ++Idx) {
+ Type *Ty = CS.getArgument(Idx-1)->getType();
+ VerifyParameterAttrs(Attrs, Idx, Ty, false, I);
+
+ if (Attrs.hasAttribute(Idx, Attribute::Nest)) {
+ Assert(!SawNest, "More than one parameter has attribute nest!", I);
+ SawNest = true;
+ }
+
+ if (Attrs.hasAttribute(Idx, Attribute::Returned)) {
+ Assert(!SawReturned, "More than one parameter has attribute returned!",
+ I);
+ Assert(Ty->canLosslesslyBitCastTo(FTy->getReturnType()),
+ "Incompatible argument and return types for 'returned' "
+ "attribute",
+ I);
+ SawReturned = true;
+ }
+
+ Assert(!Attrs.hasAttribute(Idx, Attribute::StructRet),
+ "Attribute 'sret' cannot be used for vararg call arguments!", I);
+
+ if (Attrs.hasAttribute(Idx, Attribute::InAlloca))
+ Assert(Idx == CS.arg_size(), "inalloca isn't on the last argument!", I);
+ }
+ }
+
+ // Verify that there's no metadata unless it's a direct call to an intrinsic.
+ if (CS.getCalledFunction() == nullptr ||
+ !CS.getCalledFunction()->getName().startswith("llvm.")) {
+ for (Type *ParamTy : FTy->params()) {
+ Assert(!ParamTy->isMetadataTy(),
+ "Function has metadata parameter but isn't an intrinsic", I);
+ Assert(!ParamTy->isTokenTy(),
+ "Function has token parameter but isn't an intrinsic", I);
+ }
+ }
+
+ // Verify that indirect calls don't return tokens.
+ if (CS.getCalledFunction() == nullptr)
+ Assert(!FTy->getReturnType()->isTokenTy(),
+ "Return type cannot be token for indirect call!");
+
+ if (Function *F = CS.getCalledFunction())
+ if (Intrinsic::ID ID = (Intrinsic::ID)F->getIntrinsicID())
+ visitIntrinsicCallSite(ID, CS);
+
+ // Verify that a callsite has at most one "deopt" and one "funclet" operand
+ // bundle.
+ bool FoundDeoptBundle = false, FoundFuncletBundle = false;
+ for (unsigned i = 0, e = CS.getNumOperandBundles(); i < e; ++i) {
+ OperandBundleUse BU = CS.getOperandBundleAt(i);
+ uint32_t Tag = BU.getTagID();
+ if (Tag == LLVMContext::OB_deopt) {
+ Assert(!FoundDeoptBundle, "Multiple deopt operand bundles", I);
+ FoundDeoptBundle = true;
+ }
+ if (Tag == LLVMContext::OB_funclet) {
+ Assert(!FoundFuncletBundle, "Multiple funclet operand bundles", I);
+ FoundFuncletBundle = true;
+ Assert(BU.Inputs.size() == 1,
+ "Expected exactly one funclet bundle operand", I);
+ Assert(isa<FuncletPadInst>(BU.Inputs.front()),
+ "Funclet bundle operands should correspond to a FuncletPadInst",
+ I);
+ }
+ }
+
+ visitInstruction(*I);
+}
+
+/// Two types are "congruent" if they are identical, or if they are both pointer
+/// types with different pointee types and the same address space.
+static bool isTypeCongruent(Type *L, Type *R) {
+ if (L == R)
+ return true;
+ PointerType *PL = dyn_cast<PointerType>(L);
+ PointerType *PR = dyn_cast<PointerType>(R);
+ if (!PL || !PR)
+ return false;
+ return PL->getAddressSpace() == PR->getAddressSpace();
+}
+
+static AttrBuilder getParameterABIAttributes(int I, AttributeSet Attrs) {
+ static const Attribute::AttrKind ABIAttrs[] = {
+ Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca,
+ Attribute::InReg, Attribute::Returned};
+ AttrBuilder Copy;
+ for (auto AK : ABIAttrs) {
+ if (Attrs.hasAttribute(I + 1, AK))
+ Copy.addAttribute(AK);
+ }
+ if (Attrs.hasAttribute(I + 1, Attribute::Alignment))
+ Copy.addAlignmentAttr(Attrs.getParamAlignment(I + 1));
+ return Copy;
+}
+
+void Verifier::verifyMustTailCall(CallInst &CI) {
+ Assert(!CI.isInlineAsm(), "cannot use musttail call with inline asm", &CI);
+
+ // - The caller and callee prototypes must match. Pointer types of
+ // parameters or return types may differ in pointee type, but not
+ // address space.
+ Function *F = CI.getParent()->getParent();
+ FunctionType *CallerTy = F->getFunctionType();
+ FunctionType *CalleeTy = CI.getFunctionType();
+ Assert(CallerTy->getNumParams() == CalleeTy->getNumParams(),
+ "cannot guarantee tail call due to mismatched parameter counts", &CI);
+ Assert(CallerTy->isVarArg() == CalleeTy->isVarArg(),
+ "cannot guarantee tail call due to mismatched varargs", &CI);
+ Assert(isTypeCongruent(CallerTy->getReturnType(), CalleeTy->getReturnType()),
+ "cannot guarantee tail call due to mismatched return types", &CI);
+ for (int I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
+ Assert(
+ isTypeCongruent(CallerTy->getParamType(I), CalleeTy->getParamType(I)),
+ "cannot guarantee tail call due to mismatched parameter types", &CI);
+ }
+
+ // - The calling conventions of the caller and callee must match.
+ Assert(F->getCallingConv() == CI.getCallingConv(),
+ "cannot guarantee tail call due to mismatched calling conv", &CI);
+
+ // - All ABI-impacting function attributes, such as sret, byval, inreg,
+ // returned, and inalloca, must match.
+ AttributeSet CallerAttrs = F->getAttributes();
+ AttributeSet CalleeAttrs = CI.getAttributes();
+ for (int I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
+ AttrBuilder CallerABIAttrs = getParameterABIAttributes(I, CallerAttrs);
+ AttrBuilder CalleeABIAttrs = getParameterABIAttributes(I, CalleeAttrs);
+ Assert(CallerABIAttrs == CalleeABIAttrs,
+ "cannot guarantee tail call due to mismatched ABI impacting "
+ "function attributes",
+ &CI, CI.getOperand(I));
+ }
+
+ // - The call must immediately precede a :ref:`ret <i_ret>` instruction,
+ // or a pointer bitcast followed by a ret instruction.
+ // - The ret instruction must return the (possibly bitcasted) value
+ // produced by the call or void.
+ Value *RetVal = &CI;
+ Instruction *Next = CI.getNextNode();
+
+ // Handle the optional bitcast.
+ if (BitCastInst *BI = dyn_cast_or_null<BitCastInst>(Next)) {
+ Assert(BI->getOperand(0) == RetVal,
+ "bitcast following musttail call must use the call", BI);
+ RetVal = BI;
+ Next = BI->getNextNode();
+ }
+
+ // Check the return.
+ ReturnInst *Ret = dyn_cast_or_null<ReturnInst>(Next);
+ Assert(Ret, "musttail call must be precede a ret with an optional bitcast",
+ &CI);
+ Assert(!Ret->getReturnValue() || Ret->getReturnValue() == RetVal,
+ "musttail call result must be returned", Ret);
+}
+
+void Verifier::visitCallInst(CallInst &CI) {
+ VerifyCallSite(&CI);
+
+ if (CI.isMustTailCall())
+ verifyMustTailCall(CI);
+}
+
+void Verifier::visitInvokeInst(InvokeInst &II) {
+ VerifyCallSite(&II);
+
+ // Verify that the first non-PHI instruction of the unwind destination is an
+ // exception handling instruction.
+ Assert(
+ II.getUnwindDest()->isEHPad(),
+ "The unwind destination does not have an exception handling instruction!",
+ &II);
+
+ visitTerminatorInst(II);
+}
+
+/// visitBinaryOperator - Check that both arguments to the binary operator are
+/// of the same type!
+///
+void Verifier::visitBinaryOperator(BinaryOperator &B) {
+ Assert(B.getOperand(0)->getType() == B.getOperand(1)->getType(),
+ "Both operands to a binary operator are not of the same type!", &B);
+
+ switch (B.getOpcode()) {
+ // Check that integer arithmetic operators are only used with
+ // integral operands.
+ case Instruction::Add:
+ case Instruction::Sub:
+ case Instruction::Mul:
+ case Instruction::SDiv:
+ case Instruction::UDiv:
+ case Instruction::SRem:
+ case Instruction::URem:
+ Assert(B.getType()->isIntOrIntVectorTy(),
+ "Integer arithmetic operators only work with integral types!", &B);
+ Assert(B.getType() == B.getOperand(0)->getType(),
+ "Integer arithmetic operators must have same type "
+ "for operands and result!",
+ &B);
+ break;
+ // Check that floating-point arithmetic operators are only used with
+ // floating-point operands.
+ case Instruction::FAdd:
+ case Instruction::FSub:
+ case Instruction::FMul:
+ case Instruction::FDiv:
+ case Instruction::FRem:
+ Assert(B.getType()->isFPOrFPVectorTy(),
+ "Floating-point arithmetic operators only work with "
+ "floating-point types!",
+ &B);
+ Assert(B.getType() == B.getOperand(0)->getType(),
+ "Floating-point arithmetic operators must have same type "
+ "for operands and result!",
+ &B);
+ break;
+ // Check that logical operators are only used with integral operands.
+ case Instruction::And:
+ case Instruction::Or:
+ case Instruction::Xor:
+ Assert(B.getType()->isIntOrIntVectorTy(),
+ "Logical operators only work with integral types!", &B);
+ Assert(B.getType() == B.getOperand(0)->getType(),
+ "Logical operators must have same type for operands and result!",
+ &B);
+ break;
+ case Instruction::Shl:
+ case Instruction::LShr:
+ case Instruction::AShr:
+ Assert(B.getType()->isIntOrIntVectorTy(),
+ "Shifts only work with integral types!", &B);
+ Assert(B.getType() == B.getOperand(0)->getType(),
+ "Shift return type must be same as operands!", &B);
+ break;
+ default:
+ llvm_unreachable("Unknown BinaryOperator opcode!");
+ }
+
+ visitInstruction(B);
+}
+
+void Verifier::visitICmpInst(ICmpInst &IC) {
+ // Check that the operands are the same type
+ Type *Op0Ty = IC.getOperand(0)->getType();
+ Type *Op1Ty = IC.getOperand(1)->getType();
+ Assert(Op0Ty == Op1Ty,
+ "Both operands to ICmp instruction are not of the same type!", &IC);
+ // Check that the operands are the right type
+ Assert(Op0Ty->isIntOrIntVectorTy() || Op0Ty->getScalarType()->isPointerTy(),
+ "Invalid operand types for ICmp instruction", &IC);
+ // Check that the predicate is valid.
+ Assert(IC.getPredicate() >= CmpInst::FIRST_ICMP_PREDICATE &&
+ IC.getPredicate() <= CmpInst::LAST_ICMP_PREDICATE,
+ "Invalid predicate in ICmp instruction!", &IC);
+
+ visitInstruction(IC);
+}
+
+void Verifier::visitFCmpInst(FCmpInst &FC) {
+ // Check that the operands are the same type
+ Type *Op0Ty = FC.getOperand(0)->getType();
+ Type *Op1Ty = FC.getOperand(1)->getType();
+ Assert(Op0Ty == Op1Ty,
+ "Both operands to FCmp instruction are not of the same type!", &FC);
+ // Check that the operands are the right type
+ Assert(Op0Ty->isFPOrFPVectorTy(),
+ "Invalid operand types for FCmp instruction", &FC);
+ // Check that the predicate is valid.
+ Assert(FC.getPredicate() >= CmpInst::FIRST_FCMP_PREDICATE &&
+ FC.getPredicate() <= CmpInst::LAST_FCMP_PREDICATE,
+ "Invalid predicate in FCmp instruction!", &FC);
+
+ visitInstruction(FC);
+}
+
+void Verifier::visitExtractElementInst(ExtractElementInst &EI) {
+ Assert(
+ ExtractElementInst::isValidOperands(EI.getOperand(0), EI.getOperand(1)),
+ "Invalid extractelement operands!", &EI);
+ visitInstruction(EI);
+}
+
+void Verifier::visitInsertElementInst(InsertElementInst &IE) {
+ Assert(InsertElementInst::isValidOperands(IE.getOperand(0), IE.getOperand(1),
+ IE.getOperand(2)),
+ "Invalid insertelement operands!", &IE);
+ visitInstruction(IE);
+}
+
+void Verifier::visitShuffleVectorInst(ShuffleVectorInst &SV) {
+ Assert(ShuffleVectorInst::isValidOperands(SV.getOperand(0), SV.getOperand(1),
+ SV.getOperand(2)),
+ "Invalid shufflevector operands!", &SV);
+ visitInstruction(SV);
+}
+
+void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
+ Type *TargetTy = GEP.getPointerOperandType()->getScalarType();
+
+ Assert(isa<PointerType>(TargetTy),
+ "GEP base pointer is not a vector or a vector of pointers", &GEP);
+ Assert(GEP.getSourceElementType()->isSized(), "GEP into unsized type!", &GEP);
+ SmallVector<Value*, 16> Idxs(GEP.idx_begin(), GEP.idx_end());
+ Type *ElTy =
+ GetElementPtrInst::getIndexedType(GEP.getSourceElementType(), Idxs);
+ Assert(ElTy, "Invalid indices for GEP pointer type!", &GEP);
+
+ Assert(GEP.getType()->getScalarType()->isPointerTy() &&
+ GEP.getResultElementType() == ElTy,
+ "GEP is not of right type for indices!", &GEP, ElTy);
+
+ if (GEP.getType()->isVectorTy()) {
+ // Additional checks for vector GEPs.
+ unsigned GEPWidth = GEP.getType()->getVectorNumElements();
+ if (GEP.getPointerOperandType()->isVectorTy())
+ Assert(GEPWidth == GEP.getPointerOperandType()->getVectorNumElements(),
+ "Vector GEP result width doesn't match operand's", &GEP);
+ for (unsigned i = 0, e = Idxs.size(); i != e; ++i) {
+ Type *IndexTy = Idxs[i]->getType();
+ if (IndexTy->isVectorTy()) {
+ unsigned IndexWidth = IndexTy->getVectorNumElements();
+ Assert(IndexWidth == GEPWidth, "Invalid GEP index vector width", &GEP);
+ }
+ Assert(IndexTy->getScalarType()->isIntegerTy(),
+ "All GEP indices should be of integer type");
+ }
+ }
+ visitInstruction(GEP);
+}
+
+static bool isContiguous(const ConstantRange &A, const ConstantRange &B) {
+ return A.getUpper() == B.getLower() || A.getLower() == B.getUpper();
+}
+
+void Verifier::visitRangeMetadata(Instruction& I,
+ MDNode* Range, Type* Ty) {
+ assert(Range &&
+ Range == I.getMetadata(LLVMContext::MD_range) &&
+ "precondition violation");
+
+ unsigned NumOperands = Range->getNumOperands();
+ Assert(NumOperands % 2 == 0, "Unfinished range!", Range);
+ unsigned NumRanges = NumOperands / 2;
+ Assert(NumRanges >= 1, "It should have at least one range!", Range);
+
+ ConstantRange LastRange(1); // Dummy initial value
+ for (unsigned i = 0; i < NumRanges; ++i) {
+ ConstantInt *Low =
+ mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i));
+ Assert(Low, "The lower limit must be an integer!", Low);
+ ConstantInt *High =
+ mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i + 1));
+ Assert(High, "The upper limit must be an integer!", High);
+ Assert(High->getType() == Low->getType() && High->getType() == Ty,
+ "Range types must match instruction type!", &I);
+
+ APInt HighV = High->getValue();
+ APInt LowV = Low->getValue();
+ ConstantRange CurRange(LowV, HighV);
+ Assert(!CurRange.isEmptySet() && !CurRange.isFullSet(),
+ "Range must not be empty!", Range);
+ if (i != 0) {
+ Assert(CurRange.intersectWith(LastRange).isEmptySet(),
+ "Intervals are overlapping", Range);
+ Assert(LowV.sgt(LastRange.getLower()), "Intervals are not in order",
+ Range);
+ Assert(!isContiguous(CurRange, LastRange), "Intervals are contiguous",
+ Range);
+ }
+ LastRange = ConstantRange(LowV, HighV);
+ }
+ if (NumRanges > 2) {
+ APInt FirstLow =
+ mdconst::dyn_extract<ConstantInt>(Range->getOperand(0))->getValue();
+ APInt FirstHigh =
+ mdconst::dyn_extract<ConstantInt>(Range->getOperand(1))->getValue();
+ ConstantRange FirstRange(FirstLow, FirstHigh);
+ Assert(FirstRange.intersectWith(LastRange).isEmptySet(),
+ "Intervals are overlapping", Range);
+ Assert(!isContiguous(FirstRange, LastRange), "Intervals are contiguous",
+ Range);
+ }
+}
+
+void Verifier::checkAtomicMemAccessSize(const Module *M, Type *Ty,
+ const Instruction *I) {
+ unsigned Size = M->getDataLayout().getTypeSizeInBits(Ty);
+ Assert(Size >= 8, "atomic memory access' size must be byte-sized", Ty, I);
+ Assert(!(Size & (Size - 1)),
+ "atomic memory access' operand must have a power-of-two size", Ty, I);
+}
+
+void Verifier::visitLoadInst(LoadInst &LI) {
+ PointerType *PTy = dyn_cast<PointerType>(LI.getOperand(0)->getType());
+ Assert(PTy, "Load operand must be a pointer.", &LI);
+ Type *ElTy = LI.getType();
+ Assert(LI.getAlignment() <= Value::MaximumAlignment,
+ "huge alignment values are unsupported", &LI);
+ if (LI.isAtomic()) {
+ Assert(LI.getOrdering() != Release && LI.getOrdering() != AcquireRelease,
+ "Load cannot have Release ordering", &LI);
+ Assert(LI.getAlignment() != 0,
+ "Atomic load must specify explicit alignment", &LI);
+ Assert(ElTy->isIntegerTy() || ElTy->isPointerTy() ||
+ ElTy->isFloatingPointTy(),
+ "atomic load operand must have integer, pointer, or floating point "
+ "type!",
+ ElTy, &LI);
+ checkAtomicMemAccessSize(M, ElTy, &LI);
+ } else {
+ Assert(LI.getSynchScope() == CrossThread,
+ "Non-atomic load cannot have SynchronizationScope specified", &LI);
+ }
+
+ visitInstruction(LI);
+}
+
+void Verifier::visitStoreInst(StoreInst &SI) {
+ PointerType *PTy = dyn_cast<PointerType>(SI.getOperand(1)->getType());
+ Assert(PTy, "Store operand must be a pointer.", &SI);
+ Type *ElTy = PTy->getElementType();
+ Assert(ElTy == SI.getOperand(0)->getType(),
+ "Stored value type does not match pointer operand type!", &SI, ElTy);
+ Assert(SI.getAlignment() <= Value::MaximumAlignment,
+ "huge alignment values are unsupported", &SI);
+ if (SI.isAtomic()) {
+ Assert(SI.getOrdering() != Acquire && SI.getOrdering() != AcquireRelease,
+ "Store cannot have Acquire ordering", &SI);
+ Assert(SI.getAlignment() != 0,
+ "Atomic store must specify explicit alignment", &SI);
+ Assert(ElTy->isIntegerTy() || ElTy->isPointerTy() ||
+ ElTy->isFloatingPointTy(),
+ "atomic store operand must have integer, pointer, or floating point "
+ "type!",
+ ElTy, &SI);
+ checkAtomicMemAccessSize(M, ElTy, &SI);
+ } else {
+ Assert(SI.getSynchScope() == CrossThread,
+ "Non-atomic store cannot have SynchronizationScope specified", &SI);
+ }
+ visitInstruction(SI);
+}
+
+void Verifier::visitAllocaInst(AllocaInst &AI) {
+ SmallPtrSet<Type*, 4> Visited;
+ PointerType *PTy = AI.getType();
+ Assert(PTy->getAddressSpace() == 0,
+ "Allocation instruction pointer not in the generic address space!",
+ &AI);
+ Assert(AI.getAllocatedType()->isSized(&Visited),
+ "Cannot allocate unsized type", &AI);
+ Assert(AI.getArraySize()->getType()->isIntegerTy(),
+ "Alloca array size must have integer type", &AI);
+ Assert(AI.getAlignment() <= Value::MaximumAlignment,
+ "huge alignment values are unsupported", &AI);
+
+ visitInstruction(AI);
+}
+
+void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
+
+ // FIXME: more conditions???
+ Assert(CXI.getSuccessOrdering() != NotAtomic,
+ "cmpxchg instructions must be atomic.", &CXI);
+ Assert(CXI.getFailureOrdering() != NotAtomic,
+ "cmpxchg instructions must be atomic.", &CXI);
+ Assert(CXI.getSuccessOrdering() != Unordered,
+ "cmpxchg instructions cannot be unordered.", &CXI);
+ Assert(CXI.getFailureOrdering() != Unordered,
+ "cmpxchg instructions cannot be unordered.", &CXI);
+ Assert(CXI.getSuccessOrdering() >= CXI.getFailureOrdering(),
+ "cmpxchg instructions be at least as constrained on success as fail",
+ &CXI);
+ Assert(CXI.getFailureOrdering() != Release &&
+ CXI.getFailureOrdering() != AcquireRelease,
+ "cmpxchg failure ordering cannot include release semantics", &CXI);
+
+ PointerType *PTy = dyn_cast<PointerType>(CXI.getOperand(0)->getType());
+ Assert(PTy, "First cmpxchg operand must be a pointer.", &CXI);
+ Type *ElTy = PTy->getElementType();
+ Assert(ElTy->isIntegerTy(), "cmpxchg operand must have integer type!", &CXI,
+ ElTy);
+ checkAtomicMemAccessSize(M, ElTy, &CXI);
+ Assert(ElTy == CXI.getOperand(1)->getType(),
+ "Expected value type does not match pointer operand type!", &CXI,
+ ElTy);
+ Assert(ElTy == CXI.getOperand(2)->getType(),
+ "Stored value type does not match pointer operand type!", &CXI, ElTy);
+ visitInstruction(CXI);
+}
+
+void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
+ Assert(RMWI.getOrdering() != NotAtomic,
+ "atomicrmw instructions must be atomic.", &RMWI);
+ Assert(RMWI.getOrdering() != Unordered,
+ "atomicrmw instructions cannot be unordered.", &RMWI);
+ PointerType *PTy = dyn_cast<PointerType>(RMWI.getOperand(0)->getType());
+ Assert(PTy, "First atomicrmw operand must be a pointer.", &RMWI);
+ Type *ElTy = PTy->getElementType();
+ Assert(ElTy->isIntegerTy(), "atomicrmw operand must have integer type!",
+ &RMWI, ElTy);
+ checkAtomicMemAccessSize(M, ElTy, &RMWI);
+ Assert(ElTy == RMWI.getOperand(1)->getType(),
+ "Argument value type does not match pointer operand type!", &RMWI,
+ ElTy);
+ Assert(AtomicRMWInst::FIRST_BINOP <= RMWI.getOperation() &&
+ RMWI.getOperation() <= AtomicRMWInst::LAST_BINOP,
+ "Invalid binary operation!", &RMWI);
+ visitInstruction(RMWI);
+}
+
+void Verifier::visitFenceInst(FenceInst &FI) {
+ const AtomicOrdering Ordering = FI.getOrdering();
+ Assert(Ordering == Acquire || Ordering == Release ||
+ Ordering == AcquireRelease || Ordering == SequentiallyConsistent,
+ "fence instructions may only have "
+ "acquire, release, acq_rel, or seq_cst ordering.",
+ &FI);
+ visitInstruction(FI);
+}
+
+void Verifier::visitExtractValueInst(ExtractValueInst &EVI) {
+ Assert(ExtractValueInst::getIndexedType(EVI.getAggregateOperand()->getType(),
+ EVI.getIndices()) == EVI.getType(),
+ "Invalid ExtractValueInst operands!", &EVI);
+
+ visitInstruction(EVI);
+}
+
+void Verifier::visitInsertValueInst(InsertValueInst &IVI) {
+ Assert(ExtractValueInst::getIndexedType(IVI.getAggregateOperand()->getType(),
+ IVI.getIndices()) ==
+ IVI.getOperand(1)->getType(),
+ "Invalid InsertValueInst operands!", &IVI);
+
+ visitInstruction(IVI);
+}
+
+static Value *getParentPad(Value *EHPad) {
+ if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
+ return FPI->getParentPad();
+
+ return cast<CatchSwitchInst>(EHPad)->getParentPad();
+}
+
+void Verifier::visitEHPadPredecessors(Instruction &I) {
+ assert(I.isEHPad());
+
+ BasicBlock *BB = I.getParent();
+ Function *F = BB->getParent();
+
+ Assert(BB != &F->getEntryBlock(), "EH pad cannot be in entry block.", &I);
+
+ if (auto *LPI = dyn_cast<LandingPadInst>(&I)) {
+ // The landingpad instruction defines its parent as a landing pad block. The
+ // landing pad block may be branched to only by the unwind edge of an
+ // invoke.
+ for (BasicBlock *PredBB : predecessors(BB)) {
+ const auto *II = dyn_cast<InvokeInst>(PredBB->getTerminator());
+ Assert(II && II->getUnwindDest() == BB && II->getNormalDest() != BB,
+ "Block containing LandingPadInst must be jumped to "
+ "only by the unwind edge of an invoke.",
+ LPI);
+ }
+ return;
+ }
+ if (auto *CPI = dyn_cast<CatchPadInst>(&I)) {
+ if (!pred_empty(BB))
+ Assert(BB->getUniquePredecessor() == CPI->getCatchSwitch()->getParent(),
+ "Block containg CatchPadInst must be jumped to "
+ "only by its catchswitch.",
+ CPI);
+ Assert(BB != CPI->getCatchSwitch()->getUnwindDest(),
+ "Catchswitch cannot unwind to one of its catchpads",
+ CPI->getCatchSwitch(), CPI);
+ return;
+ }
+
+ // Verify that each pred has a legal terminator with a legal to/from EH
+ // pad relationship.
+ Instruction *ToPad = &I;
+ Value *ToPadParent = getParentPad(ToPad);
+ for (BasicBlock *PredBB : predecessors(BB)) {
+ TerminatorInst *TI = PredBB->getTerminator();
+ Value *FromPad;
+ if (auto *II = dyn_cast<InvokeInst>(TI)) {
+ Assert(II->getUnwindDest() == BB && II->getNormalDest() != BB,
+ "EH pad must be jumped to via an unwind edge", ToPad, II);
+ if (auto Bundle = II->getOperandBundle(LLVMContext::OB_funclet))
+ FromPad = Bundle->Inputs[0];
+ else
+ FromPad = ConstantTokenNone::get(II->getContext());
+ } else if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
+ FromPad = CRI->getCleanupPad();
+ Assert(FromPad != ToPadParent, "A cleanupret must exit its cleanup", CRI);
+ } else if (auto *CSI = dyn_cast<CatchSwitchInst>(TI)) {
+ FromPad = CSI;
+ } else {
+ Assert(false, "EH pad must be jumped to via an unwind edge", ToPad, TI);
+ }
+
+ // The edge may exit from zero or more nested pads.
+ for (;; FromPad = getParentPad(FromPad)) {
+ Assert(FromPad != ToPad,
+ "EH pad cannot handle exceptions raised within it", FromPad, TI);
+ if (FromPad == ToPadParent) {
+ // This is a legal unwind edge.
+ break;
+ }
+ Assert(!isa<ConstantTokenNone>(FromPad),
+ "A single unwind edge may only enter one EH pad", TI);
+ }
+ }
+}
+
+void Verifier::visitLandingPadInst(LandingPadInst &LPI) {
+ // The landingpad instruction is ill-formed if it doesn't have any clauses and
+ // isn't a cleanup.
+ Assert(LPI.getNumClauses() > 0 || LPI.isCleanup(),
+ "LandingPadInst needs at least one clause or to be a cleanup.", &LPI);
+
+ visitEHPadPredecessors(LPI);
+
+ if (!LandingPadResultTy)
+ LandingPadResultTy = LPI.getType();
+ else
+ Assert(LandingPadResultTy == LPI.getType(),
+ "The landingpad instruction should have a consistent result type "
+ "inside a function.",
+ &LPI);
+
+ Function *F = LPI.getParent()->getParent();
+ Assert(F->hasPersonalityFn(),
+ "LandingPadInst needs to be in a function with a personality.", &LPI);
+
+ // The landingpad instruction must be the first non-PHI instruction in the
+ // block.
+ Assert(LPI.getParent()->getLandingPadInst() == &LPI,
+ "LandingPadInst not the first non-PHI instruction in the block.",
+ &LPI);
+
+ for (unsigned i = 0, e = LPI.getNumClauses(); i < e; ++i) {
+ Constant *Clause = LPI.getClause(i);
+ if (LPI.isCatch(i)) {
+ Assert(isa<PointerType>(Clause->getType()),
+ "Catch operand does not have pointer type!", &LPI);
+ } else {
+ Assert(LPI.isFilter(i), "Clause is neither catch nor filter!", &LPI);
+ Assert(isa<ConstantArray>(Clause) || isa<ConstantAggregateZero>(Clause),
+ "Filter operand is not an array of constants!", &LPI);
+ }
+ }
+
+ visitInstruction(LPI);
+}
+
+void Verifier::visitCatchPadInst(CatchPadInst &CPI) {
+ visitEHPadPredecessors(CPI);
+
+ BasicBlock *BB = CPI.getParent();
+
+ Function *F = BB->getParent();
+ Assert(F->hasPersonalityFn(),
+ "CatchPadInst needs to be in a function with a personality.", &CPI);
+
+ Assert(isa<CatchSwitchInst>(CPI.getParentPad()),
+ "CatchPadInst needs to be directly nested in a CatchSwitchInst.",
+ CPI.getParentPad());
+
+ // The catchpad instruction must be the first non-PHI instruction in the
+ // block.
+ Assert(BB->getFirstNonPHI() == &CPI,
+ "CatchPadInst not the first non-PHI instruction in the block.", &CPI);
+
+ visitFuncletPadInst(CPI);
+}
+
+void Verifier::visitCatchReturnInst(CatchReturnInst &CatchReturn) {
+ Assert(isa<CatchPadInst>(CatchReturn.getOperand(0)),
+ "CatchReturnInst needs to be provided a CatchPad", &CatchReturn,
+ CatchReturn.getOperand(0));
+
+ visitTerminatorInst(CatchReturn);
+}
+
+void Verifier::visitCleanupPadInst(CleanupPadInst &CPI) {
+ visitEHPadPredecessors(CPI);
+
+ BasicBlock *BB = CPI.getParent();
+
+ Function *F = BB->getParent();
+ Assert(F->hasPersonalityFn(),
+ "CleanupPadInst needs to be in a function with a personality.", &CPI);
+
+ // The cleanuppad instruction must be the first non-PHI instruction in the
+ // block.
+ Assert(BB->getFirstNonPHI() == &CPI,
+ "CleanupPadInst not the first non-PHI instruction in the block.",
+ &CPI);
+
+ auto *ParentPad = CPI.getParentPad();
+ Assert(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
+ "CleanupPadInst has an invalid parent.", &CPI);
+
+ visitFuncletPadInst(CPI);
+}
+
+void Verifier::visitFuncletPadInst(FuncletPadInst &FPI) {
+ User *FirstUser = nullptr;
+ Value *FirstUnwindPad = nullptr;
+ SmallVector<FuncletPadInst *, 8> Worklist({&FPI});
+ while (!Worklist.empty()) {
+ FuncletPadInst *CurrentPad = Worklist.pop_back_val();
+ Value *UnresolvedAncestorPad = nullptr;
+ for (User *U : CurrentPad->users()) {
+ BasicBlock *UnwindDest;
+ if (auto *CRI = dyn_cast<CleanupReturnInst>(U)) {
+ UnwindDest = CRI->getUnwindDest();
+ } else if (auto *CSI = dyn_cast<CatchSwitchInst>(U)) {
+ // We allow catchswitch unwind to caller to nest
+ // within an outer pad that unwinds somewhere else,
+ // because catchswitch doesn't have a nounwind variant.
+ // See e.g. SimplifyCFGOpt::SimplifyUnreachable.
+ if (CSI->unwindsToCaller())
+ continue;
+ UnwindDest = CSI->getUnwindDest();
+ } else if (auto *II = dyn_cast<InvokeInst>(U)) {
+ UnwindDest = II->getUnwindDest();
+ } else if (isa<CallInst>(U)) {
+ // Calls which don't unwind may be found inside funclet
+ // pads that unwind somewhere else. We don't *require*
+ // such calls to be annotated nounwind.
+ continue;
+ } else if (auto *CPI = dyn_cast<CleanupPadInst>(U)) {
+ // The unwind dest for a cleanup can only be found by
+ // recursive search. Add it to the worklist, and we'll
+ // search for its first use that determines where it unwinds.
+ Worklist.push_back(CPI);
+ continue;
+ } else {
+ Assert(isa<CatchReturnInst>(U), "Bogus funclet pad use", U);
+ continue;
+ }
+
+ Value *UnwindPad;
+ bool ExitsFPI;
+ if (UnwindDest) {
+ UnwindPad = UnwindDest->getFirstNonPHI();
+ Value *UnwindParent = getParentPad(UnwindPad);
+ // Ignore unwind edges that don't exit CurrentPad.
+ if (UnwindParent == CurrentPad)
+ continue;
+ // Determine whether the original funclet pad is exited,
+ // and if we are scanning nested pads determine how many
+ // of them are exited so we can stop searching their
+ // children.
+ Value *ExitedPad = CurrentPad;
+ ExitsFPI = false;
+ do {
+ if (ExitedPad == &FPI) {
+ ExitsFPI = true;
+ // Now we can resolve any ancestors of CurrentPad up to
+ // FPI, but not including FPI since we need to make sure
+ // to check all direct users of FPI for consistency.
+ UnresolvedAncestorPad = &FPI;
+ break;
+ }
+ Value *ExitedParent = getParentPad(ExitedPad);
+ if (ExitedParent == UnwindParent) {
+ // ExitedPad is the ancestor-most pad which this unwind
+ // edge exits, so we can resolve up to it, meaning that
+ // ExitedParent is the first ancestor still unresolved.
+ UnresolvedAncestorPad = ExitedParent;
+ break;
+ }
+ ExitedPad = ExitedParent;
+ } while (!isa<ConstantTokenNone>(ExitedPad));
+ } else {
+ // Unwinding to caller exits all pads.
+ UnwindPad = ConstantTokenNone::get(FPI.getContext());
+ ExitsFPI = true;
+ UnresolvedAncestorPad = &FPI;
+ }
+
+ if (ExitsFPI) {
+ // This unwind edge exits FPI. Make sure it agrees with other
+ // such edges.
+ if (FirstUser) {
+ Assert(UnwindPad == FirstUnwindPad, "Unwind edges out of a funclet "
+ "pad must have the same unwind "
+ "dest",
+ &FPI, U, FirstUser);
+ } else {
+ FirstUser = U;
+ FirstUnwindPad = UnwindPad;
+ // Record cleanup sibling unwinds for verifySiblingFuncletUnwinds
+ if (isa<CleanupPadInst>(&FPI) && !isa<ConstantTokenNone>(UnwindPad) &&
+ getParentPad(UnwindPad) == getParentPad(&FPI))
+ SiblingFuncletInfo[&FPI] = cast<TerminatorInst>(U);
+ }
+ }
+ // Make sure we visit all uses of FPI, but for nested pads stop as
+ // soon as we know where they unwind to.
+ if (CurrentPad != &FPI)
+ break;
+ }
+ if (UnresolvedAncestorPad) {
+ if (CurrentPad == UnresolvedAncestorPad) {
+ // When CurrentPad is FPI itself, we don't mark it as resolved even if
+ // we've found an unwind edge that exits it, because we need to verify
+ // all direct uses of FPI.
+ assert(CurrentPad == &FPI);
+ continue;
+ }
+ // Pop off the worklist any nested pads that we've found an unwind
+ // destination for. The pads on the worklist are the uncles,
+ // great-uncles, etc. of CurrentPad. We've found an unwind destination
+ // for all ancestors of CurrentPad up to but not including
+ // UnresolvedAncestorPad.
+ Value *ResolvedPad = CurrentPad;
+ while (!Worklist.empty()) {
+ Value *UnclePad = Worklist.back();
+ Value *AncestorPad = getParentPad(UnclePad);
+ // Walk ResolvedPad up the ancestor list until we either find the
+ // uncle's parent or the last resolved ancestor.
+ while (ResolvedPad != AncestorPad) {
+ Value *ResolvedParent = getParentPad(ResolvedPad);
+ if (ResolvedParent == UnresolvedAncestorPad) {
+ break;
+ }
+ ResolvedPad = ResolvedParent;
+ }
+ // If the resolved ancestor search didn't find the uncle's parent,
+ // then the uncle is not yet resolved.
+ if (ResolvedPad != AncestorPad)
+ break;
+ // This uncle is resolved, so pop it from the worklist.
+ Worklist.pop_back();
+ }
+ }
+ }
+
+ if (FirstUnwindPad) {
+ if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(FPI.getParentPad())) {
+ BasicBlock *SwitchUnwindDest = CatchSwitch->getUnwindDest();
+ Value *SwitchUnwindPad;
+ if (SwitchUnwindDest)
+ SwitchUnwindPad = SwitchUnwindDest->getFirstNonPHI();
+ else
+ SwitchUnwindPad = ConstantTokenNone::get(FPI.getContext());
+ Assert(SwitchUnwindPad == FirstUnwindPad,
+ "Unwind edges out of a catch must have the same unwind dest as "
+ "the parent catchswitch",
+ &FPI, FirstUser, CatchSwitch);
+ }
+ }
+
+ visitInstruction(FPI);
+}
+
+void Verifier::visitCatchSwitchInst(CatchSwitchInst &CatchSwitch) {
+ visitEHPadPredecessors(CatchSwitch);
+
+ BasicBlock *BB = CatchSwitch.getParent();
+
+ Function *F = BB->getParent();
+ Assert(F->hasPersonalityFn(),
+ "CatchSwitchInst needs to be in a function with a personality.",
+ &CatchSwitch);
+
+ // The catchswitch instruction must be the first non-PHI instruction in the
+ // block.
+ Assert(BB->getFirstNonPHI() == &CatchSwitch,
+ "CatchSwitchInst not the first non-PHI instruction in the block.",
+ &CatchSwitch);
+
+ auto *ParentPad = CatchSwitch.getParentPad();
+ Assert(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
+ "CatchSwitchInst has an invalid parent.", ParentPad);
+
+ if (BasicBlock *UnwindDest = CatchSwitch.getUnwindDest()) {
+ Instruction *I = UnwindDest->getFirstNonPHI();
+ Assert(I->isEHPad() && !isa<LandingPadInst>(I),
+ "CatchSwitchInst must unwind to an EH block which is not a "
+ "landingpad.",
+ &CatchSwitch);
+
+ // Record catchswitch sibling unwinds for verifySiblingFuncletUnwinds
+ if (getParentPad(I) == ParentPad)
+ SiblingFuncletInfo[&CatchSwitch] = &CatchSwitch;
+ }
+
+ Assert(CatchSwitch.getNumHandlers() != 0,
+ "CatchSwitchInst cannot have empty handler list", &CatchSwitch);
+
+ for (BasicBlock *Handler : CatchSwitch.handlers()) {
+ Assert(isa<CatchPadInst>(Handler->getFirstNonPHI()),
+ "CatchSwitchInst handlers must be catchpads", &CatchSwitch, Handler);
+ }
+
+ visitTerminatorInst(CatchSwitch);
+}
+
+void Verifier::visitCleanupReturnInst(CleanupReturnInst &CRI) {
+ Assert(isa<CleanupPadInst>(CRI.getOperand(0)),
+ "CleanupReturnInst needs to be provided a CleanupPad", &CRI,
+ CRI.getOperand(0));
+
+ if (BasicBlock *UnwindDest = CRI.getUnwindDest()) {
+ Instruction *I = UnwindDest->getFirstNonPHI();
+ Assert(I->isEHPad() && !isa<LandingPadInst>(I),
+ "CleanupReturnInst must unwind to an EH block which is not a "
+ "landingpad.",
+ &CRI);
+ }
+
+ visitTerminatorInst(CRI);
+}
+
+void Verifier::verifyDominatesUse(Instruction &I, unsigned i) {
+ Instruction *Op = cast<Instruction>(I.getOperand(i));
+ // If the we have an invalid invoke, don't try to compute the dominance.
+ // We already reject it in the invoke specific checks and the dominance
+ // computation doesn't handle multiple edges.
+ if (InvokeInst *II = dyn_cast<InvokeInst>(Op)) {
+ if (II->getNormalDest() == II->getUnwindDest())
+ return;
+ }
+
+ const Use &U = I.getOperandUse(i);
+ Assert(InstsInThisBlock.count(Op) || DT.dominates(Op, U),
+ "Instruction does not dominate all uses!", Op, &I);
+}
+
+void Verifier::visitDereferenceableMetadata(Instruction& I, MDNode* MD) {
+ Assert(I.getType()->isPointerTy(), "dereferenceable, dereferenceable_or_null "
+ "apply only to pointer types", &I);
+ Assert(isa<LoadInst>(I),
+ "dereferenceable, dereferenceable_or_null apply only to load"
+ " instructions, use attributes for calls or invokes", &I);
+ Assert(MD->getNumOperands() == 1, "dereferenceable, dereferenceable_or_null "
+ "take one operand!", &I);
+ ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(MD->getOperand(0));
+ Assert(CI && CI->getType()->isIntegerTy(64), "dereferenceable, "
+ "dereferenceable_or_null metadata value must be an i64!", &I);
+}
+
+/// verifyInstruction - Verify that an instruction is well formed.
+///
+void Verifier::visitInstruction(Instruction &I) {
+ BasicBlock *BB = I.getParent();
+ Assert(BB, "Instruction not embedded in basic block!", &I);
+
+ if (!isa<PHINode>(I)) { // Check that non-phi nodes are not self referential
+ for (User *U : I.users()) {
+ Assert(U != (User *)&I || !DT.isReachableFromEntry(BB),
+ "Only PHI nodes may reference their own value!", &I);
+ }
+ }
+
+ // Check that void typed values don't have names
+ Assert(!I.getType()->isVoidTy() || !I.hasName(),
+ "Instruction has a name, but provides a void value!", &I);
+
+ // Check that the return value of the instruction is either void or a legal
+ // value type.
+ Assert(I.getType()->isVoidTy() || I.getType()->isFirstClassType(),
+ "Instruction returns a non-scalar type!", &I);
+
+ // Check that the instruction doesn't produce metadata. Calls are already
+ // checked against the callee type.
+ Assert(!I.getType()->isMetadataTy() || isa<CallInst>(I) || isa<InvokeInst>(I),
+ "Invalid use of metadata!", &I);
+
+ // Check that all uses of the instruction, if they are instructions
+ // themselves, actually have parent basic blocks. If the use is not an
+ // instruction, it is an error!
+ for (Use &U : I.uses()) {
+ if (Instruction *Used = dyn_cast<Instruction>(U.getUser()))
+ Assert(Used->getParent() != nullptr,
+ "Instruction referencing"
+ " instruction not embedded in a basic block!",
+ &I, Used);
+ else {
+ CheckFailed("Use of instruction is not an instruction!", U);
+ return;
+ }
+ }
+
+ for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
+ Assert(I.getOperand(i) != nullptr, "Instruction has null operand!", &I);
+
+ // Check to make sure that only first-class-values are operands to
+ // instructions.
+ if (!I.getOperand(i)->getType()->isFirstClassType()) {
+ Assert(0, "Instruction operands must be first-class values!", &I);
+ }
+
+ if (Function *F = dyn_cast<Function>(I.getOperand(i))) {
+ // Check to make sure that the "address of" an intrinsic function is never
+ // taken.
+ Assert(
+ !F->isIntrinsic() ||
+ i == (isa<CallInst>(I) ? e - 1 : isa<InvokeInst>(I) ? e - 3 : 0),
+ "Cannot take the address of an intrinsic!", &I);
+ Assert(
+ !F->isIntrinsic() || isa<CallInst>(I) ||
+ F->getIntrinsicID() == Intrinsic::donothing ||
+ F->getIntrinsicID() == Intrinsic::experimental_patchpoint_void ||
+ F->getIntrinsicID() == Intrinsic::experimental_patchpoint_i64 ||
+ F->getIntrinsicID() == Intrinsic::experimental_gc_statepoint,
+ "Cannot invoke an intrinsinc other than"
+ " donothing or patchpoint",
+ &I);
+ Assert(F->getParent() == M, "Referencing function in another module!",
+ &I, M, F, F->getParent());
+ } else if (BasicBlock *OpBB = dyn_cast<BasicBlock>(I.getOperand(i))) {
+ Assert(OpBB->getParent() == BB->getParent(),
+ "Referring to a basic block in another function!", &I);
+ } else if (Argument *OpArg = dyn_cast<Argument>(I.getOperand(i))) {
+ Assert(OpArg->getParent() == BB->getParent(),
+ "Referring to an argument in another function!", &I);
+ } else if (GlobalValue *GV = dyn_cast<GlobalValue>(I.getOperand(i))) {
+ Assert(GV->getParent() == M, "Referencing global in another module!", &I, M, GV, GV->getParent());
+ } else if (isa<Instruction>(I.getOperand(i))) {
+ verifyDominatesUse(I, i);
+ } else if (isa<InlineAsm>(I.getOperand(i))) {
+ Assert((i + 1 == e && isa<CallInst>(I)) ||
+ (i + 3 == e && isa<InvokeInst>(I)),
+ "Cannot take the address of an inline asm!", &I);
+ } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(I.getOperand(i))) {
+ if (CE->getType()->isPtrOrPtrVectorTy()) {
+ // If we have a ConstantExpr pointer, we need to see if it came from an
+ // illegal bitcast (inttoptr <constant int> )
+ visitConstantExprsRecursively(CE);
+ }
+ }
+ }
+
+ if (MDNode *MD = I.getMetadata(LLVMContext::MD_fpmath)) {
+ Assert(I.getType()->isFPOrFPVectorTy(),
+ "fpmath requires a floating point result!", &I);
+ Assert(MD->getNumOperands() == 1, "fpmath takes one operand!", &I);
+ if (ConstantFP *CFP0 =
+ mdconst::dyn_extract_or_null<ConstantFP>(MD->getOperand(0))) {
+ APFloat Accuracy = CFP0->getValueAPF();
+ Assert(Accuracy.isFiniteNonZero() && !Accuracy.isNegative(),
+ "fpmath accuracy not a positive number!", &I);
+ } else {
+ Assert(false, "invalid fpmath accuracy!", &I);
+ }
+ }
+
+ if (MDNode *Range = I.getMetadata(LLVMContext::MD_range)) {
+ Assert(isa<LoadInst>(I) || isa<CallInst>(I) || isa<InvokeInst>(I),
+ "Ranges are only for loads, calls and invokes!", &I);
+ visitRangeMetadata(I, Range, I.getType());
+ }
+
+ if (I.getMetadata(LLVMContext::MD_nonnull)) {
+ Assert(I.getType()->isPointerTy(), "nonnull applies only to pointer types",
+ &I);
+ Assert(isa<LoadInst>(I),
+ "nonnull applies only to load instructions, use attributes"
+ " for calls or invokes",
+ &I);
+ }
+
+ if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable))
+ visitDereferenceableMetadata(I, MD);
+
+ if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable_or_null))
+ visitDereferenceableMetadata(I, MD);
+
+ if (MDNode *AlignMD = I.getMetadata(LLVMContext::MD_align)) {
+ Assert(I.getType()->isPointerTy(), "align applies only to pointer types",
+ &I);
+ Assert(isa<LoadInst>(I), "align applies only to load instructions, "
+ "use attributes for calls or invokes", &I);
+ Assert(AlignMD->getNumOperands() == 1, "align takes one operand!", &I);
+ ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(AlignMD->getOperand(0));
+ Assert(CI && CI->getType()->isIntegerTy(64),
+ "align metadata value must be an i64!", &I);
+ uint64_t Align = CI->getZExtValue();
+ Assert(isPowerOf2_64(Align),
+ "align metadata value must be a power of 2!", &I);
+ Assert(Align <= Value::MaximumAlignment,
+ "alignment is larger that implementation defined limit", &I);
+ }
+
+ if (MDNode *N = I.getDebugLoc().getAsMDNode()) {
+ Assert(isa<DILocation>(N), "invalid !dbg metadata attachment", &I, N);
+ visitMDNode(*N);
+ }
+
+ InstsInThisBlock.insert(&I);
+}
+
+/// VerifyIntrinsicType - Verify that the specified type (which comes from an
+/// intrinsic argument or return value) matches the type constraints specified
+/// by the .td file (e.g. an "any integer" argument really is an integer).
+///
+/// This return true on error but does not print a message.
+bool Verifier::VerifyIntrinsicType(Type *Ty,
+ ArrayRef<Intrinsic::IITDescriptor> &Infos,
+ SmallVectorImpl<Type*> &ArgTys) {
+ using namespace Intrinsic;
+
+ // If we ran out of descriptors, there are too many arguments.
+ if (Infos.empty()) return true;
+ IITDescriptor D = Infos.front();
+ Infos = Infos.slice(1);
+
+ switch (D.Kind) {
+ case IITDescriptor::Void: return !Ty->isVoidTy();
+ case IITDescriptor::VarArg: return true;
+ case IITDescriptor::MMX: return !Ty->isX86_MMXTy();
+ case IITDescriptor::Token: return !Ty->isTokenTy();
+ case IITDescriptor::Metadata: return !Ty->isMetadataTy();
+ case IITDescriptor::Half: return !Ty->isHalfTy();
+ case IITDescriptor::Float: return !Ty->isFloatTy();
+ case IITDescriptor::Double: return !Ty->isDoubleTy();
+ case IITDescriptor::Integer: return !Ty->isIntegerTy(D.Integer_Width);
+ case IITDescriptor::Vector: {
+ VectorType *VT = dyn_cast<VectorType>(Ty);
+ return !VT || VT->getNumElements() != D.Vector_Width ||
+ VerifyIntrinsicType(VT->getElementType(), Infos, ArgTys);
+ }
+ case IITDescriptor::Pointer: {
+ PointerType *PT = dyn_cast<PointerType>(Ty);
+ return !PT || PT->getAddressSpace() != D.Pointer_AddressSpace ||
+ VerifyIntrinsicType(PT->getElementType(), Infos, ArgTys);
+ }
+
+ case IITDescriptor::Struct: {
+ StructType *ST = dyn_cast<StructType>(Ty);
+ if (!ST || ST->getNumElements() != D.Struct_NumElements)
+ return true;
+
+ for (unsigned i = 0, e = D.Struct_NumElements; i != e; ++i)
+ if (VerifyIntrinsicType(ST->getElementType(i), Infos, ArgTys))
+ return true;
+ return false;
+ }
+
+ case IITDescriptor::Argument:
+ // Two cases here - If this is the second occurrence of an argument, verify
+ // that the later instance matches the previous instance.
+ if (D.getArgumentNumber() < ArgTys.size())
+ return Ty != ArgTys[D.getArgumentNumber()];
+
+ // Otherwise, if this is the first instance of an argument, record it and
+ // verify the "Any" kind.
+ assert(D.getArgumentNumber() == ArgTys.size() && "Table consistency error");
+ ArgTys.push_back(Ty);
+
+ switch (D.getArgumentKind()) {
+ case IITDescriptor::AK_Any: return false; // Success
+ case IITDescriptor::AK_AnyInteger: return !Ty->isIntOrIntVectorTy();
+ case IITDescriptor::AK_AnyFloat: return !Ty->isFPOrFPVectorTy();
+ case IITDescriptor::AK_AnyVector: return !isa<VectorType>(Ty);
+ case IITDescriptor::AK_AnyPointer: return !isa<PointerType>(Ty);
+ }
+ llvm_unreachable("all argument kinds not covered");
+
+ case IITDescriptor::ExtendArgument: {
+ // This may only be used when referring to a previous vector argument.
+ if (D.getArgumentNumber() >= ArgTys.size())
+ return true;
+
+ Type *NewTy = ArgTys[D.getArgumentNumber()];
+ if (VectorType *VTy = dyn_cast<VectorType>(NewTy))
+ NewTy = VectorType::getExtendedElementVectorType(VTy);
+ else if (IntegerType *ITy = dyn_cast<IntegerType>(NewTy))
+ NewTy = IntegerType::get(ITy->getContext(), 2 * ITy->getBitWidth());
+ else
+ return true;
+
+ return Ty != NewTy;
+ }
+ case IITDescriptor::TruncArgument: {
+ // This may only be used when referring to a previous vector argument.
+ if (D.getArgumentNumber() >= ArgTys.size())
+ return true;
+
+ Type *NewTy = ArgTys[D.getArgumentNumber()];
+ if (VectorType *VTy = dyn_cast<VectorType>(NewTy))
+ NewTy = VectorType::getTruncatedElementVectorType(VTy);
+ else if (IntegerType *ITy = dyn_cast<IntegerType>(NewTy))
+ NewTy = IntegerType::get(ITy->getContext(), ITy->getBitWidth() / 2);
+ else
+ return true;
+
+ return Ty != NewTy;
+ }
+ case IITDescriptor::HalfVecArgument:
+ // This may only be used when referring to a previous vector argument.
+ return D.getArgumentNumber() >= ArgTys.size() ||
+ !isa<VectorType>(ArgTys[D.getArgumentNumber()]) ||
+ VectorType::getHalfElementsVectorType(
+ cast<VectorType>(ArgTys[D.getArgumentNumber()])) != Ty;
+ case IITDescriptor::SameVecWidthArgument: {
+ if (D.getArgumentNumber() >= ArgTys.size())
+ return true;
+ VectorType * ReferenceType =
+ dyn_cast<VectorType>(ArgTys[D.getArgumentNumber()]);
+ VectorType *ThisArgType = dyn_cast<VectorType>(Ty);
+ if (!ThisArgType || !ReferenceType ||
+ (ReferenceType->getVectorNumElements() !=
+ ThisArgType->getVectorNumElements()))
+ return true;
+ return VerifyIntrinsicType(ThisArgType->getVectorElementType(),
+ Infos, ArgTys);
+ }
+ case IITDescriptor::PtrToArgument: {
+ if (D.getArgumentNumber() >= ArgTys.size())
+ return true;
+ Type * ReferenceType = ArgTys[D.getArgumentNumber()];
+ PointerType *ThisArgType = dyn_cast<PointerType>(Ty);
+ return (!ThisArgType || ThisArgType->getElementType() != ReferenceType);
+ }
+ case IITDescriptor::VecOfPtrsToElt: {
+ if (D.getArgumentNumber() >= ArgTys.size())
+ return true;
+ VectorType * ReferenceType =
+ dyn_cast<VectorType> (ArgTys[D.getArgumentNumber()]);
+ VectorType *ThisArgVecTy = dyn_cast<VectorType>(Ty);
+ if (!ThisArgVecTy || !ReferenceType ||
+ (ReferenceType->getVectorNumElements() !=
+ ThisArgVecTy->getVectorNumElements()))
+ return true;
+ PointerType *ThisArgEltTy =
+ dyn_cast<PointerType>(ThisArgVecTy->getVectorElementType());
+ if (!ThisArgEltTy)
+ return true;
+ return ThisArgEltTy->getElementType() !=
+ ReferenceType->getVectorElementType();
+ }
+ }
+ llvm_unreachable("unhandled");
+}
+
+/// \brief Verify if the intrinsic has variable arguments.
+/// This method is intended to be called after all the fixed arguments have been
+/// verified first.
+///
+/// This method returns true on error and does not print an error message.
+bool
+Verifier::VerifyIntrinsicIsVarArg(bool isVarArg,
+ ArrayRef<Intrinsic::IITDescriptor> &Infos) {
+ using namespace Intrinsic;
+
+ // If there are no descriptors left, then it can't be a vararg.
+ if (Infos.empty())
+ return isVarArg;
+
+ // There should be only one descriptor remaining at this point.
+ if (Infos.size() != 1)
+ return true;
+
+ // Check and verify the descriptor.
+ IITDescriptor D = Infos.front();
+ Infos = Infos.slice(1);
+ if (D.Kind == IITDescriptor::VarArg)
+ return !isVarArg;
+
+ return true;
+}
+
+/// Allow intrinsics to be verified in different ways.
+void Verifier::visitIntrinsicCallSite(Intrinsic::ID ID, CallSite CS) {
+ Function *IF = CS.getCalledFunction();
+ Assert(IF->isDeclaration(), "Intrinsic functions should never be defined!",
+ IF);
+
+ // Verify that the intrinsic prototype lines up with what the .td files
+ // describe.
+ FunctionType *IFTy = IF->getFunctionType();
+ bool IsVarArg = IFTy->isVarArg();
+
+ SmallVector<Intrinsic::IITDescriptor, 8> Table;
+ getIntrinsicInfoTableEntries(ID, Table);
+ ArrayRef<Intrinsic::IITDescriptor> TableRef = Table;
+
+ SmallVector<Type *, 4> ArgTys;
+ Assert(!VerifyIntrinsicType(IFTy->getReturnType(), TableRef, ArgTys),
+ "Intrinsic has incorrect return type!", IF);
+ for (unsigned i = 0, e = IFTy->getNumParams(); i != e; ++i)
+ Assert(!VerifyIntrinsicType(IFTy->getParamType(i), TableRef, ArgTys),
+ "Intrinsic has incorrect argument type!", IF);
+
+ // Verify if the intrinsic call matches the vararg property.
+ if (IsVarArg)
+ Assert(!VerifyIntrinsicIsVarArg(IsVarArg, TableRef),
+ "Intrinsic was not defined with variable arguments!", IF);
+ else
+ Assert(!VerifyIntrinsicIsVarArg(IsVarArg, TableRef),
+ "Callsite was not defined with variable arguments!", IF);
+
+ // All descriptors should be absorbed by now.
+ Assert(TableRef.empty(), "Intrinsic has too few arguments!", IF);
+
+ // Now that we have the intrinsic ID and the actual argument types (and we
+ // know they are legal for the intrinsic!) get the intrinsic name through the
+ // usual means. This allows us to verify the mangling of argument types into
+ // the name.
+ const std::string ExpectedName = Intrinsic::getName(ID, ArgTys);
+ Assert(ExpectedName == IF->getName(),
+ "Intrinsic name not mangled correctly for type arguments! "
+ "Should be: " +
+ ExpectedName,
+ IF);
+
+ // If the intrinsic takes MDNode arguments, verify that they are either global
+ // or are local to *this* function.
+ for (Value *V : CS.args())
+ if (auto *MD = dyn_cast<MetadataAsValue>(V))
+ visitMetadataAsValue(*MD, CS.getCaller());
+
+ switch (ID) {
+ default:
+ break;
+ case Intrinsic::ctlz: // llvm.ctlz
+ case Intrinsic::cttz: // llvm.cttz
+ Assert(isa<ConstantInt>(CS.getArgOperand(1)),
+ "is_zero_undef argument of bit counting intrinsics must be a "
+ "constant int",
+ CS);
+ break;
+ case Intrinsic::dbg_declare: // llvm.dbg.declare
+ Assert(isa<MetadataAsValue>(CS.getArgOperand(0)),
+ "invalid llvm.dbg.declare intrinsic call 1", CS);
+ visitDbgIntrinsic("declare", cast<DbgDeclareInst>(*CS.getInstruction()));
+ break;
+ case Intrinsic::dbg_value: // llvm.dbg.value
+ visitDbgIntrinsic("value", cast<DbgValueInst>(*CS.getInstruction()));
+ break;
+ case Intrinsic::memcpy:
+ case Intrinsic::memmove:
+ case Intrinsic::memset: {
+ ConstantInt *AlignCI = dyn_cast<ConstantInt>(CS.getArgOperand(3));
+ Assert(AlignCI,
+ "alignment argument of memory intrinsics must be a constant int",
+ CS);
+ const APInt &AlignVal = AlignCI->getValue();
+ Assert(AlignCI->isZero() || AlignVal.isPowerOf2(),
+ "alignment argument of memory intrinsics must be a power of 2", CS);
+ Assert(isa<ConstantInt>(CS.getArgOperand(4)),
+ "isvolatile argument of memory intrinsics must be a constant int",
+ CS);
+ break;
+ }
+ case Intrinsic::gcroot:
+ case Intrinsic::gcwrite:
+ case Intrinsic::gcread:
+ if (ID == Intrinsic::gcroot) {
+ AllocaInst *AI =
+ dyn_cast<AllocaInst>(CS.getArgOperand(0)->stripPointerCasts());
+ Assert(AI, "llvm.gcroot parameter #1 must be an alloca.", CS);
+ Assert(isa<Constant>(CS.getArgOperand(1)),
+ "llvm.gcroot parameter #2 must be a constant.", CS);
+ if (!AI->getAllocatedType()->isPointerTy()) {
+ Assert(!isa<ConstantPointerNull>(CS.getArgOperand(1)),
+ "llvm.gcroot parameter #1 must either be a pointer alloca, "
+ "or argument #2 must be a non-null constant.",
+ CS);
+ }
+ }
+
+ Assert(CS.getParent()->getParent()->hasGC(),
+ "Enclosing function does not use GC.", CS);
+ break;
+ case Intrinsic::init_trampoline:
+ Assert(isa<Function>(CS.getArgOperand(1)->stripPointerCasts()),
+ "llvm.init_trampoline parameter #2 must resolve to a function.",
+ CS);
+ break;
+ case Intrinsic::prefetch:
+ Assert(isa<ConstantInt>(CS.getArgOperand(1)) &&
+ isa<ConstantInt>(CS.getArgOperand(2)) &&
+ cast<ConstantInt>(CS.getArgOperand(1))->getZExtValue() < 2 &&
+ cast<ConstantInt>(CS.getArgOperand(2))->getZExtValue() < 4,
+ "invalid arguments to llvm.prefetch", CS);
+ break;
+ case Intrinsic::stackprotector:
+ Assert(isa<AllocaInst>(CS.getArgOperand(1)->stripPointerCasts()),
+ "llvm.stackprotector parameter #2 must resolve to an alloca.", CS);
+ break;
+ case Intrinsic::lifetime_start:
+ case Intrinsic::lifetime_end:
+ case Intrinsic::invariant_start:
+ Assert(isa<ConstantInt>(CS.getArgOperand(0)),
+ "size argument of memory use markers must be a constant integer",
+ CS);
+ break;
+ case Intrinsic::invariant_end:
+ Assert(isa<ConstantInt>(CS.getArgOperand(1)),
+ "llvm.invariant.end parameter #2 must be a constant integer", CS);
+ break;
+
+ case Intrinsic::localescape: {
+ BasicBlock *BB = CS.getParent();
+ Assert(BB == &BB->getParent()->front(),
+ "llvm.localescape used outside of entry block", CS);
+ Assert(!SawFrameEscape,
+ "multiple calls to llvm.localescape in one function", CS);
+ for (Value *Arg : CS.args()) {
+ if (isa<ConstantPointerNull>(Arg))
+ continue; // Null values are allowed as placeholders.
+ auto *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
+ Assert(AI && AI->isStaticAlloca(),
+ "llvm.localescape only accepts static allocas", CS);
+ }
+ FrameEscapeInfo[BB->getParent()].first = CS.getNumArgOperands();
+ SawFrameEscape = true;
+ break;
+ }
+ case Intrinsic::localrecover: {
+ Value *FnArg = CS.getArgOperand(0)->stripPointerCasts();
+ Function *Fn = dyn_cast<Function>(FnArg);
+ Assert(Fn && !Fn->isDeclaration(),
+ "llvm.localrecover first "
+ "argument must be function defined in this module",
+ CS);
+ auto *IdxArg = dyn_cast<ConstantInt>(CS.getArgOperand(2));
+ Assert(IdxArg, "idx argument of llvm.localrecover must be a constant int",
+ CS);
+ auto &Entry = FrameEscapeInfo[Fn];
+ Entry.second = unsigned(
+ std::max(uint64_t(Entry.second), IdxArg->getLimitedValue(~0U) + 1));
+ break;
+ }
+
+ case Intrinsic::experimental_gc_statepoint:
+ Assert(!CS.isInlineAsm(),
+ "gc.statepoint support for inline assembly unimplemented", CS);
+ Assert(CS.getParent()->getParent()->hasGC(),
+ "Enclosing function does not use GC.", CS);
+
+ VerifyStatepoint(CS);
+ break;
+ case Intrinsic::experimental_gc_result: {
+ Assert(CS.getParent()->getParent()->hasGC(),
+ "Enclosing function does not use GC.", CS);
+ // Are we tied to a statepoint properly?
+ CallSite StatepointCS(CS.getArgOperand(0));
+ const Function *StatepointFn =
+ StatepointCS.getInstruction() ? StatepointCS.getCalledFunction() : nullptr;
+ Assert(StatepointFn && StatepointFn->isDeclaration() &&
+ StatepointFn->getIntrinsicID() ==
+ Intrinsic::experimental_gc_statepoint,
+ "gc.result operand #1 must be from a statepoint", CS,
+ CS.getArgOperand(0));
+
+ // Assert that result type matches wrapped callee.
+ const Value *Target = StatepointCS.getArgument(2);
+ auto *PT = cast<PointerType>(Target->getType());
+ auto *TargetFuncType = cast<FunctionType>(PT->getElementType());
+ Assert(CS.getType() == TargetFuncType->getReturnType(),
+ "gc.result result type does not match wrapped callee", CS);
+ break;
+ }
+ case Intrinsic::experimental_gc_relocate: {
+ Assert(CS.getNumArgOperands() == 3, "wrong number of arguments", CS);
+
+ Assert(isa<PointerType>(CS.getType()->getScalarType()),
+ "gc.relocate must return a pointer or a vector of pointers", CS);
+
+ // Check that this relocate is correctly tied to the statepoint
+
+ // This is case for relocate on the unwinding path of an invoke statepoint
+ if (LandingPadInst *LandingPad =
+ dyn_cast<LandingPadInst>(CS.getArgOperand(0))) {
+
+ const BasicBlock *InvokeBB =
+ LandingPad->getParent()->getUniquePredecessor();
+
+ // Landingpad relocates should have only one predecessor with invoke
+ // statepoint terminator
+ Assert(InvokeBB, "safepoints should have unique landingpads",
+ LandingPad->getParent());
+ Assert(InvokeBB->getTerminator(), "safepoint block should be well formed",
+ InvokeBB);
+ Assert(isStatepoint(InvokeBB->getTerminator()),
+ "gc relocate should be linked to a statepoint", InvokeBB);
+ }
+ else {
+ // In all other cases relocate should be tied to the statepoint directly.
+ // This covers relocates on a normal return path of invoke statepoint and
+ // relocates of a call statepoint
+ auto Token = CS.getArgOperand(0);
+ Assert(isa<Instruction>(Token) && isStatepoint(cast<Instruction>(Token)),
+ "gc relocate is incorrectly tied to the statepoint", CS, Token);
+ }
+
+ // Verify rest of the relocate arguments
+
+ ImmutableCallSite StatepointCS(
+ cast<GCRelocateInst>(*CS.getInstruction()).getStatepoint());
+
+ // Both the base and derived must be piped through the safepoint
+ Value* Base = CS.getArgOperand(1);
+ Assert(isa<ConstantInt>(Base),
+ "gc.relocate operand #2 must be integer offset", CS);
+
+ Value* Derived = CS.getArgOperand(2);
+ Assert(isa<ConstantInt>(Derived),
+ "gc.relocate operand #3 must be integer offset", CS);
+
+ const int BaseIndex = cast<ConstantInt>(Base)->getZExtValue();
+ const int DerivedIndex = cast<ConstantInt>(Derived)->getZExtValue();
+ // Check the bounds
+ Assert(0 <= BaseIndex && BaseIndex < (int)StatepointCS.arg_size(),
+ "gc.relocate: statepoint base index out of bounds", CS);
+ Assert(0 <= DerivedIndex && DerivedIndex < (int)StatepointCS.arg_size(),
+ "gc.relocate: statepoint derived index out of bounds", CS);
+
+ // Check that BaseIndex and DerivedIndex fall within the 'gc parameters'
+ // section of the statepoint's argument
+ Assert(StatepointCS.arg_size() > 0,
+ "gc.statepoint: insufficient arguments");
+ Assert(isa<ConstantInt>(StatepointCS.getArgument(3)),
+ "gc.statement: number of call arguments must be constant integer");
+ const unsigned NumCallArgs =
+ cast<ConstantInt>(StatepointCS.getArgument(3))->getZExtValue();
+ Assert(StatepointCS.arg_size() > NumCallArgs + 5,
+ "gc.statepoint: mismatch in number of call arguments");
+ Assert(isa<ConstantInt>(StatepointCS.getArgument(NumCallArgs + 5)),
+ "gc.statepoint: number of transition arguments must be "
+ "a constant integer");
+ const int NumTransitionArgs =
+ cast<ConstantInt>(StatepointCS.getArgument(NumCallArgs + 5))
+ ->getZExtValue();
+ const int DeoptArgsStart = 4 + NumCallArgs + 1 + NumTransitionArgs + 1;
+ Assert(isa<ConstantInt>(StatepointCS.getArgument(DeoptArgsStart)),
+ "gc.statepoint: number of deoptimization arguments must be "
+ "a constant integer");
+ const int NumDeoptArgs =
+ cast<ConstantInt>(StatepointCS.getArgument(DeoptArgsStart))->getZExtValue();
+ const int GCParamArgsStart = DeoptArgsStart + 1 + NumDeoptArgs;
+ const int GCParamArgsEnd = StatepointCS.arg_size();
+ Assert(GCParamArgsStart <= BaseIndex && BaseIndex < GCParamArgsEnd,
+ "gc.relocate: statepoint base index doesn't fall within the "
+ "'gc parameters' section of the statepoint call",
+ CS);
+ Assert(GCParamArgsStart <= DerivedIndex && DerivedIndex < GCParamArgsEnd,
+ "gc.relocate: statepoint derived index doesn't fall within the "
+ "'gc parameters' section of the statepoint call",
+ CS);
+
+ // Relocated value must be either a pointer type or vector-of-pointer type,
+ // but gc_relocate does not need to return the same pointer type as the
+ // relocated pointer. It can be casted to the correct type later if it's
+ // desired. However, they must have the same address space and 'vectorness'
+ GCRelocateInst &Relocate = cast<GCRelocateInst>(*CS.getInstruction());
+ Assert(Relocate.getDerivedPtr()->getType()->getScalarType()->isPointerTy(),
+ "gc.relocate: relocated value must be a gc pointer", CS);
+
+ auto ResultType = CS.getType();
+ auto DerivedType = Relocate.getDerivedPtr()->getType();
+ Assert(ResultType->isVectorTy() == DerivedType->isVectorTy(),
+ "gc.relocate: vector relocates to vector and pointer to pointer", CS);
+ Assert(ResultType->getPointerAddressSpace() ==
+ DerivedType->getPointerAddressSpace(),
+ "gc.relocate: relocating a pointer shouldn't change its address space", CS);
+ break;
+ }
+ case Intrinsic::eh_exceptioncode:
+ case Intrinsic::eh_exceptionpointer: {
+ Assert(isa<CatchPadInst>(CS.getArgOperand(0)),
+ "eh.exceptionpointer argument must be a catchpad", CS);
+ break;
+ }
+ };
+}
+
+/// \brief Carefully grab the subprogram from a local scope.
+///
+/// This carefully grabs the subprogram from a local scope, avoiding the
+/// built-in assertions that would typically fire.
+static DISubprogram *getSubprogram(Metadata *LocalScope) {
+ if (!LocalScope)
+ return nullptr;
+
+ if (auto *SP = dyn_cast<DISubprogram>(LocalScope))
+ return SP;
+
+ if (auto *LB = dyn_cast<DILexicalBlockBase>(LocalScope))
+ return getSubprogram(LB->getRawScope());
+
+ // Just return null; broken scope chains are checked elsewhere.
+ assert(!isa<DILocalScope>(LocalScope) && "Unknown type of local scope");
+ return nullptr;
+}
+
+template <class DbgIntrinsicTy>
+void Verifier::visitDbgIntrinsic(StringRef Kind, DbgIntrinsicTy &DII) {
+ auto *MD = cast<MetadataAsValue>(DII.getArgOperand(0))->getMetadata();
+ Assert(isa<ValueAsMetadata>(MD) ||
+ (isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands()),
+ "invalid llvm.dbg." + Kind + " intrinsic address/value", &DII, MD);
+ Assert(isa<DILocalVariable>(DII.getRawVariable()),
+ "invalid llvm.dbg." + Kind + " intrinsic variable", &DII,
+ DII.getRawVariable());
+ Assert(isa<DIExpression>(DII.getRawExpression()),
+ "invalid llvm.dbg." + Kind + " intrinsic expression", &DII,
+ DII.getRawExpression());
+
+ // Ignore broken !dbg attachments; they're checked elsewhere.
+ if (MDNode *N = DII.getDebugLoc().getAsMDNode())
+ if (!isa<DILocation>(N))
+ return;
+
+ BasicBlock *BB = DII.getParent();
+ Function *F = BB ? BB->getParent() : nullptr;
+
+ // The scopes for variables and !dbg attachments must agree.
+ DILocalVariable *Var = DII.getVariable();
+ DILocation *Loc = DII.getDebugLoc();
+ Assert(Loc, "llvm.dbg." + Kind + " intrinsic requires a !dbg attachment",
+ &DII, BB, F);
+
+ DISubprogram *VarSP = getSubprogram(Var->getRawScope());
+ DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
+ if (!VarSP || !LocSP)
+ return; // Broken scope chains are checked elsewhere.
+
+ Assert(VarSP == LocSP, "mismatched subprogram between llvm.dbg." + Kind +
+ " variable and !dbg attachment",
+ &DII, BB, F, Var, Var->getScope()->getSubprogram(), Loc,
+ Loc->getScope()->getSubprogram());
+}
+
+template <class MapTy>
+static uint64_t getVariableSize(const DILocalVariable &V, const MapTy &Map) {
+ // Be careful of broken types (checked elsewhere).
+ const Metadata *RawType = V.getRawType();
+ while (RawType) {
+ // Try to get the size directly.
+ if (auto *T = dyn_cast<DIType>(RawType))
+ if (uint64_t Size = T->getSizeInBits())
+ return Size;
+
+ if (auto *DT = dyn_cast<DIDerivedType>(RawType)) {
+ // Look at the base type.
+ RawType = DT->getRawBaseType();
+ continue;
+ }
+
+ if (auto *S = dyn_cast<MDString>(RawType)) {
+ // Don't error on missing types (checked elsewhere).
+ RawType = Map.lookup(S);
+ continue;
+ }
+
+ // Missing type or size.
+ break;
+ }
+
+ // Fail gracefully.
+ return 0;
+}
+
+template <class MapTy>
+void Verifier::verifyBitPieceExpression(const DbgInfoIntrinsic &I,
+ const MapTy &TypeRefs) {
+ DILocalVariable *V;
+ DIExpression *E;
+ if (auto *DVI = dyn_cast<DbgValueInst>(&I)) {
+ V = dyn_cast_or_null<DILocalVariable>(DVI->getRawVariable());
+ E = dyn_cast_or_null<DIExpression>(DVI->getRawExpression());
+ } else {
+ auto *DDI = cast<DbgDeclareInst>(&I);
+ V = dyn_cast_or_null<DILocalVariable>(DDI->getRawVariable());
+ E = dyn_cast_or_null<DIExpression>(DDI->getRawExpression());
+ }
+
+ // We don't know whether this intrinsic verified correctly.
+ if (!V || !E || !E->isValid())
+ return;
+
+ // Nothing to do if this isn't a bit piece expression.
+ if (!E->isBitPiece())
+ return;
+
+ // The frontend helps out GDB by emitting the members of local anonymous
+ // unions as artificial local variables with shared storage. When SROA splits
+ // the storage for artificial local variables that are smaller than the entire
+ // union, the overhang piece will be outside of the allotted space for the
+ // variable and this check fails.
+ // FIXME: Remove this check as soon as clang stops doing this; it hides bugs.
+ if (V->isArtificial())
+ return;
+
+ // If there's no size, the type is broken, but that should be checked
+ // elsewhere.
+ uint64_t VarSize = getVariableSize(*V, TypeRefs);
+ if (!VarSize)
+ return;
+
+ unsigned PieceSize = E->getBitPieceSize();
+ unsigned PieceOffset = E->getBitPieceOffset();
+ Assert(PieceSize + PieceOffset <= VarSize,
+ "piece is larger than or outside of variable", &I, V, E);
+ Assert(PieceSize != VarSize, "piece covers entire variable", &I, V, E);
+}
+
+void Verifier::visitUnresolvedTypeRef(const MDString *S, const MDNode *N) {
+ // This is in its own function so we get an error for each bad type ref (not
+ // just the first).
+ Assert(false, "unresolved type ref", S, N);
+}
+
+void Verifier::verifyTypeRefs() {
+ auto *CUs = M->getNamedMetadata("llvm.dbg.cu");
+ if (!CUs)
+ return;
+
+ // Visit all the compile units again to map the type references.
+ SmallDenseMap<const MDString *, const DIType *, 32> TypeRefs;
+ for (auto *CU : CUs->operands())
+ if (auto Ts = cast<DICompileUnit>(CU)->getRetainedTypes())
+ for (DIType *Op : Ts)
+ if (auto *T = dyn_cast_or_null<DICompositeType>(Op))
+ if (auto *S = T->getRawIdentifier()) {
+ UnresolvedTypeRefs.erase(S);
+ TypeRefs.insert(std::make_pair(S, T));
+ }
+
+ // Verify debug info intrinsic bit piece expressions. This needs a second
+ // pass through the intructions, since we haven't built TypeRefs yet when
+ // verifying functions, and simply queuing the DbgInfoIntrinsics to evaluate
+ // later/now would queue up some that could be later deleted.
+ for (const Function &F : *M)
+ for (const BasicBlock &BB : F)
+ for (const Instruction &I : BB)
+ if (auto *DII = dyn_cast<DbgInfoIntrinsic>(&I))
+ verifyBitPieceExpression(*DII, TypeRefs);
+
+ // Return early if all typerefs were resolved.
+ if (UnresolvedTypeRefs.empty())
+ return;
+
+ // Sort the unresolved references by name so the output is deterministic.
+ typedef std::pair<const MDString *, const MDNode *> TypeRef;
+ SmallVector<TypeRef, 32> Unresolved(UnresolvedTypeRefs.begin(),
+ UnresolvedTypeRefs.end());
+ std::sort(Unresolved.begin(), Unresolved.end(),
+ [](const TypeRef &LHS, const TypeRef &RHS) {
+ return LHS.first->getString() < RHS.first->getString();
+ });
+
+ // Visit the unresolved refs (printing out the errors).
+ for (const TypeRef &TR : Unresolved)
+ visitUnresolvedTypeRef(TR.first, TR.second);
+}
+
+//===----------------------------------------------------------------------===//
+// Implement the public interfaces to this file...
+//===----------------------------------------------------------------------===//
+
+bool llvm::verifyFunction(const Function &f, raw_ostream *OS) {
+ Function &F = const_cast<Function &>(f);
+ assert(!F.isDeclaration() && "Cannot verify external functions");
+
+ raw_null_ostream NullStr;
+ Verifier V(OS ? *OS : NullStr);
+
+ // Note that this function's return value is inverted from what you would
+ // expect of a function called "verify".
+ return !V.verify(F);
+}
+
+bool llvm::verifyModule(const Module &M, raw_ostream *OS) {
+ raw_null_ostream NullStr;
+ Verifier V(OS ? *OS : NullStr);
+
+ bool Broken = false;
+ for (Module::const_iterator I = M.begin(), E = M.end(); I != E; ++I)
+ if (!I->isDeclaration() && !I->isMaterializable())
+ Broken |= !V.verify(*I);
+
+ // Note that this function's return value is inverted from what you would
+ // expect of a function called "verify".
+ return !V.verify(M) || Broken;
+}
+
+namespace {
+struct VerifierLegacyPass : public FunctionPass {
+ static char ID;
+
+ Verifier V;
+ bool FatalErrors;
+
+ VerifierLegacyPass() : FunctionPass(ID), V(dbgs()), FatalErrors(true) {
+ initializeVerifierLegacyPassPass(*PassRegistry::getPassRegistry());
+ }
+ explicit VerifierLegacyPass(bool FatalErrors)
+ : FunctionPass(ID), V(dbgs()), FatalErrors(FatalErrors) {
+ initializeVerifierLegacyPassPass(*PassRegistry::getPassRegistry());
+ }
+
+ bool runOnFunction(Function &F) override {
+ if (!V.verify(F) && FatalErrors)
+ report_fatal_error("Broken function found, compilation aborted!");
+
+ return false;
+ }
+
+ bool doFinalization(Module &M) override {
+ if (!V.verify(M) && FatalErrors)
+ report_fatal_error("Broken module found, compilation aborted!");
+
+ return false;
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesAll();
+ }
+};
+}
+
+char VerifierLegacyPass::ID = 0;
+INITIALIZE_PASS(VerifierLegacyPass, "verify", "Module Verifier", false, false)
+
+FunctionPass *llvm::createVerifierPass(bool FatalErrors) {
+ return new VerifierLegacyPass(FatalErrors);
+}
+
+PreservedAnalyses VerifierPass::run(Module &M) {
+ if (verifyModule(M, &dbgs()) && FatalErrors)
+ report_fatal_error("Broken module found, compilation aborted!");
+
+ return PreservedAnalyses::all();
+}
+
+PreservedAnalyses VerifierPass::run(Function &F) {
+ if (verifyFunction(F, &dbgs()) && FatalErrors)
+ report_fatal_error("Broken function found, compilation aborted!");
+
+ return PreservedAnalyses::all();
+}
diff --git a/contrib/llvm/lib/IR/module.modulemap b/contrib/llvm/lib/IR/module.modulemap
new file mode 100644
index 0000000..9698e91
--- /dev/null
+++ b/contrib/llvm/lib/IR/module.modulemap
@@ -0,0 +1 @@
+module IR { requires cplusplus umbrella "." module * { export * } }
OpenPOWER on IntegriCloud