summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/lib/IR
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/lib/IR')
-rw-r--r--contrib/llvm/lib/IR/AsmWriter.cpp381
-rw-r--r--contrib/llvm/lib/IR/AttributeImpl.h178
-rw-r--r--contrib/llvm/lib/IR/AttributeSetNode.h106
-rw-r--r--contrib/llvm/lib/IR/Attributes.cpp1028
-rw-r--r--contrib/llvm/lib/IR/AutoUpgrade.cpp954
-rw-r--r--contrib/llvm/lib/IR/BasicBlock.cpp91
-rw-r--r--contrib/llvm/lib/IR/Comdat.cpp6
-rw-r--r--contrib/llvm/lib/IR/ConstantFold.cpp96
-rw-r--r--contrib/llvm/lib/IR/ConstantRange.cpp313
-rw-r--r--contrib/llvm/lib/IR/Constants.cpp230
-rw-r--r--contrib/llvm/lib/IR/ConstantsContext.h87
-rw-r--r--contrib/llvm/lib/IR/Core.cpp122
-rw-r--r--contrib/llvm/lib/IR/DIBuilder.cpp155
-rw-r--r--contrib/llvm/lib/IR/DataLayout.cpp158
-rw-r--r--contrib/llvm/lib/IR/DebugInfo.cpp121
-rw-r--r--contrib/llvm/lib/IR/DebugInfoMetadata.cpp158
-rw-r--r--contrib/llvm/lib/IR/DebugLoc.cpp37
-rw-r--r--contrib/llvm/lib/IR/DiagnosticInfo.cpp209
-rw-r--r--contrib/llvm/lib/IR/DiagnosticPrinter.cpp4
-rw-r--r--contrib/llvm/lib/IR/Dominators.cpp75
-rw-r--r--contrib/llvm/lib/IR/Function.cpp410
-rw-r--r--contrib/llvm/lib/IR/GCOV.cpp64
-rw-r--r--contrib/llvm/lib/IR/Globals.cpp71
-rw-r--r--contrib/llvm/lib/IR/IRBuilder.cpp148
-rw-r--r--contrib/llvm/lib/IR/IRPrintingPasses.cpp6
-rw-r--r--contrib/llvm/lib/IR/InlineAsm.cpp43
-rw-r--r--contrib/llvm/lib/IR/Instruction.cpp188
-rw-r--r--contrib/llvm/lib/IR/Instructions.cpp527
-rw-r--r--contrib/llvm/lib/IR/IntrinsicInst.cpp54
-rw-r--r--contrib/llvm/lib/IR/LLVMContext.cpp38
-rw-r--r--contrib/llvm/lib/IR/LLVMContextImpl.cpp68
-rw-r--r--contrib/llvm/lib/IR/LLVMContextImpl.h246
-rw-r--r--contrib/llvm/lib/IR/LegacyPassManager.cpp66
-rw-r--r--contrib/llvm/lib/IR/MDBuilder.cpp13
-rw-r--r--contrib/llvm/lib/IR/Mangler.cpp32
-rw-r--r--contrib/llvm/lib/IR/Metadata.cpp87
-rw-r--r--contrib/llvm/lib/IR/Module.cpp103
-rw-r--r--contrib/llvm/lib/IR/ModuleSummaryIndex.cpp73
-rw-r--r--contrib/llvm/lib/IR/Operator.cpp14
-rw-r--r--contrib/llvm/lib/IR/OptBisect.cpp35
-rw-r--r--contrib/llvm/lib/IR/Pass.cpp2
-rw-r--r--contrib/llvm/lib/IR/PassManager.cpp2
-rw-r--r--contrib/llvm/lib/IR/PassRegistry.cpp2
-rw-r--r--contrib/llvm/lib/IR/SafepointIRVerifier.cpp437
-rw-r--r--contrib/llvm/lib/IR/Statepoint.cpp19
-rw-r--r--contrib/llvm/lib/IR/Type.cpp77
-rw-r--r--contrib/llvm/lib/IR/TypeFinder.cpp11
-rw-r--r--contrib/llvm/lib/IR/User.cpp10
-rw-r--r--contrib/llvm/lib/IR/Value.cpp151
-rw-r--r--contrib/llvm/lib/IR/ValueSymbolTable.cpp11
-rw-r--r--contrib/llvm/lib/IR/ValueTypes.cpp2
-rw-r--r--contrib/llvm/lib/IR/Verifier.cpp767
52 files changed, 4976 insertions, 3310 deletions
diff --git a/contrib/llvm/lib/IR/AsmWriter.cpp b/contrib/llvm/lib/IR/AsmWriter.cpp
index eecef94..170bc54 100644
--- a/contrib/llvm/lib/IR/AsmWriter.cpp
+++ b/contrib/llvm/lib/IR/AsmWriter.cpp
@@ -1,5 +1,4 @@
-
-//===-- AsmWriter.cpp - Printing LLVM as an assembly file -----------------===//
+//===- AsmWriter.cpp - Printing LLVM as an assembly file ------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -15,62 +14,105 @@
//
//===----------------------------------------------------------------------===//
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/BinaryFormat/Dwarf.h"
+#include "llvm/IR/Argument.h"
#include "llvm/IR/AssemblyAnnotationWriter.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CFG.h"
+#include "llvm/IR/CallSite.h"
#include "llvm/IR/CallingConv.h"
+#include "llvm/IR/Comdat.h"
+#include "llvm/IR/Constant.h"
#include "llvm/IR/Constants.h"
-#include "llvm/IR/DebugInfo.h"
+#include "llvm/IR/DebugInfoMetadata.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GlobalAlias.h"
+#include "llvm/IR/GlobalIFunc.h"
+#include "llvm/IR/GlobalIndirectSymbol.h"
+#include "llvm/IR/GlobalObject.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/IRPrintingPasses.h"
#include "llvm/IR/InlineAsm.h"
-#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Instructions.h"
#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/ModuleSlotTracker.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/Statepoint.h"
+#include "llvm/IR/Type.h"
#include "llvm/IR/TypeFinder.h"
+#include "llvm/IR/Use.h"
#include "llvm/IR/UseListOrder.h"
-#include "llvm/IR/ValueSymbolTable.h"
+#include "llvm/IR/User.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/AtomicOrdering.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/Dwarf.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/FormattedStream.h"
-#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
+#include <cassert>
#include <cctype>
+#include <cstddef>
+#include <cstdint>
+#include <iterator>
+#include <memory>
+#include <string>
+#include <tuple>
+#include <utility>
+#include <vector>
+
using namespace llvm;
// Make virtual table appear in this compilation unit.
-AssemblyAnnotationWriter::~AssemblyAnnotationWriter() {}
+AssemblyAnnotationWriter::~AssemblyAnnotationWriter() = default;
//===----------------------------------------------------------------------===//
// Helper Functions
//===----------------------------------------------------------------------===//
namespace {
+
struct OrderMap {
DenseMap<const Value *, std::pair<unsigned, bool>> IDs;
unsigned size() const { return IDs.size(); }
std::pair<unsigned, bool> &operator[](const Value *V) { return IDs[V]; }
+
std::pair<unsigned, bool> lookup(const Value *V) const {
return IDs.lookup(V);
}
+
void index(const Value *V) {
// Explicitly sequence get-size and insert-value operations to avoid UB.
unsigned ID = IDs.size() + 1;
IDs[V].first = ID;
}
};
-}
+
+} // end anonymous namespace
static void orderValue(const Value *V, OrderMap &OM) {
if (OM.lookup(V).first)
@@ -138,7 +180,7 @@ static void predictValueUseListOrderImpl(const Value *V, const Function *F,
unsigned ID, const OrderMap &OM,
UseListOrderStack &Stack) {
// Predict use-list order for this one.
- typedef std::pair<const Use *, unsigned> Entry;
+ using Entry = std::pair<const Use *, unsigned>;
SmallVector<Entry, 64> List;
for (const Use &U : V->uses())
// Check if this user will be serialized.
@@ -323,7 +365,7 @@ static void PrintCallingConv(unsigned cc, raw_ostream &Out) {
case CallingConv::PTX_Kernel: Out << "ptx_kernel"; break;
case CallingConv::PTX_Device: Out << "ptx_device"; break;
case CallingConv::X86_64_SysV: Out << "x86_64_sysvcc"; break;
- case CallingConv::X86_64_Win64: Out << "x86_64_win64cc"; break;
+ case CallingConv::Win64: Out << "win64cc"; break;
case CallingConv::SPIR_FUNC: Out << "spir_func"; break;
case CallingConv::SPIR_KERNEL: Out << "spir_kernel"; break;
case CallingConv::Swift: Out << "swiftcc"; break;
@@ -331,6 +373,7 @@ static void PrintCallingConv(unsigned cc, raw_ostream &Out) {
case CallingConv::HHVM: Out << "hhvmcc"; break;
case CallingConv::HHVM_C: Out << "hhvm_ccc"; break;
case CallingConv::AMDGPU_VS: Out << "amdgpu_vs"; break;
+ case CallingConv::AMDGPU_HS: Out << "amdgpu_hs"; break;
case CallingConv::AMDGPU_GS: Out << "amdgpu_gs"; break;
case CallingConv::AMDGPU_PS: Out << "amdgpu_ps"; break;
case CallingConv::AMDGPU_CS: Out << "amdgpu_cs"; break;
@@ -419,13 +462,10 @@ static void PrintLLVMName(raw_ostream &OS, const Value *V) {
isa<GlobalValue>(V) ? GlobalPrefix : LocalPrefix);
}
-
namespace {
+
class TypePrinting {
- TypePrinting(const TypePrinting &) = delete;
- void operator=(const TypePrinting&) = delete;
public:
-
/// NamedTypes - The named types that are used by the current module.
TypeFinder NamedTypes;
@@ -433,6 +473,8 @@ public:
DenseMap<StructType*, unsigned> NumberedTypes;
TypePrinting() = default;
+ TypePrinting(const TypePrinting &) = delete;
+ TypePrinting &operator=(const TypePrinting &) = delete;
void incorporateTypes(const Module &M);
@@ -440,7 +482,8 @@ public:
void printStructBody(StructType *Ty, raw_ostream &OS);
};
-} // namespace
+
+} // end anonymous namespace
void TypePrinting::incorporateTypes(const Module &M) {
NamedTypes.run(M, false);
@@ -572,6 +615,7 @@ void TypePrinting::printStructBody(StructType *STy, raw_ostream &OS) {
}
namespace llvm {
+
//===----------------------------------------------------------------------===//
// SlotTracker Class: Enumerate slot numbers for unnamed values
//===----------------------------------------------------------------------===//
@@ -580,32 +624,33 @@ namespace llvm {
class SlotTracker {
public:
/// ValueMap - A mapping of Values to slot numbers.
- typedef DenseMap<const Value*, unsigned> ValueMap;
+ using ValueMap = DenseMap<const Value *, unsigned>;
private:
/// TheModule - The module for which we are holding slot numbers.
const Module* TheModule;
/// TheFunction - The function for which we are holding slot numbers.
- const Function* TheFunction;
- bool FunctionProcessed;
+ const Function* TheFunction = nullptr;
+ bool FunctionProcessed = false;
bool ShouldInitializeAllMetadata;
/// mMap - The slot map for the module level data.
ValueMap mMap;
- unsigned mNext;
+ unsigned mNext = 0;
/// fMap - The slot map for the function level data.
ValueMap fMap;
- unsigned fNext;
+ unsigned fNext = 0;
/// mdnMap - Map for MDNodes.
DenseMap<const MDNode*, unsigned> mdnMap;
- unsigned mdnNext;
+ unsigned mdnNext = 0;
/// asMap - The slot map for attribute sets.
DenseMap<AttributeSet, unsigned> asMap;
- unsigned asNext;
+ unsigned asNext = 0;
+
public:
/// Construct from a module.
///
@@ -614,6 +659,7 @@ public:
/// within a function (even if no functions have been initialized).
explicit SlotTracker(const Module *M,
bool ShouldInitializeAllMetadata = false);
+
/// Construct from a function, starting out in incorp state.
///
/// If \c ShouldInitializeAllMetadata, initializes all metadata in all
@@ -622,6 +668,9 @@ public:
explicit SlotTracker(const Function *F,
bool ShouldInitializeAllMetadata = false);
+ SlotTracker(const SlotTracker &) = delete;
+ SlotTracker &operator=(const SlotTracker &) = delete;
+
/// Return the slot number of the specified value in it's type
/// plane. If something is not in the SlotTracker, return -1.
int getLocalSlot(const Value *V);
@@ -644,14 +693,16 @@ public:
void purgeFunction();
/// MDNode map iterators.
- typedef DenseMap<const MDNode*, unsigned>::iterator mdn_iterator;
+ using mdn_iterator = DenseMap<const MDNode*, unsigned>::iterator;
+
mdn_iterator mdn_begin() { return mdnMap.begin(); }
mdn_iterator mdn_end() { return mdnMap.end(); }
unsigned mdn_size() const { return mdnMap.size(); }
bool mdn_empty() const { return mdnMap.empty(); }
/// AttributeSet map iterators.
- typedef DenseMap<AttributeSet, unsigned>::iterator as_iterator;
+ using as_iterator = DenseMap<AttributeSet, unsigned>::iterator;
+
as_iterator as_begin() { return asMap.begin(); }
as_iterator as_end() { return asMap.end(); }
unsigned as_size() const { return asMap.size(); }
@@ -689,11 +740,9 @@ private:
/// Add all of the metadata from an instruction.
void processInstructionMetadata(const Instruction &I);
-
- SlotTracker(const SlotTracker &) = delete;
- void operator=(const SlotTracker &) = delete;
};
-} // namespace llvm
+
+} // end namespace llvm
ModuleSlotTracker::ModuleSlotTracker(SlotTracker &Machine, const Module *M,
const Function *F)
@@ -704,7 +753,7 @@ ModuleSlotTracker::ModuleSlotTracker(const Module *M,
: ShouldCreateStorage(M),
ShouldInitializeAllMetadata(ShouldInitializeAllMetadata), M(M) {}
-ModuleSlotTracker::~ModuleSlotTracker() {}
+ModuleSlotTracker::~ModuleSlotTracker() = default;
SlotTracker *ModuleSlotTracker::getMachine() {
if (!ShouldCreateStorage)
@@ -771,17 +820,13 @@ static SlotTracker *createSlotTracker(const Value *V) {
// Module level constructor. Causes the contents of the Module (sans functions)
// to be added to the slot table.
SlotTracker::SlotTracker(const Module *M, bool ShouldInitializeAllMetadata)
- : TheModule(M), TheFunction(nullptr), FunctionProcessed(false),
- ShouldInitializeAllMetadata(ShouldInitializeAllMetadata), mNext(0),
- fNext(0), mdnNext(0), asNext(0) {}
+ : TheModule(M), ShouldInitializeAllMetadata(ShouldInitializeAllMetadata) {}
// Function level constructor. Causes the contents of the Module and the one
// function provided to be added to the slot table.
SlotTracker::SlotTracker(const Function *F, bool ShouldInitializeAllMetadata)
: TheModule(F ? F->getParent() : nullptr), TheFunction(F),
- FunctionProcessed(false),
- ShouldInitializeAllMetadata(ShouldInitializeAllMetadata), mNext(0),
- fNext(0), mdnNext(0), asNext(0) {}
+ ShouldInitializeAllMetadata(ShouldInitializeAllMetadata) {}
inline void SlotTracker::initialize() {
if (TheModule) {
@@ -803,6 +848,9 @@ void SlotTracker::processModule() {
if (!Var.hasName())
CreateModuleSlot(&Var);
processGlobalObjectMetadata(Var);
+ auto Attrs = Var.getAttributes();
+ if (Attrs.hasAttributes())
+ CreateAttributeSetSlot(Attrs);
}
for (const GlobalAlias &A : TheModule->aliases()) {
@@ -832,7 +880,7 @@ void SlotTracker::processModule() {
// Add all the function attributes to the table.
// FIXME: Add attributes of other objects?
AttributeSet FnAttrs = F.getAttributes().getFnAttributes();
- if (FnAttrs.hasAttributes(AttributeSet::FunctionIndex))
+ if (FnAttrs.hasAttributes())
CreateAttributeSetSlot(FnAttrs);
}
@@ -867,15 +915,10 @@ void SlotTracker::processFunction() {
// We allow direct calls to any llvm.foo function here, because the
// target may not be linked into the optimizer.
- if (const CallInst *CI = dyn_cast<CallInst>(&I)) {
- // Add all the call attributes to the table.
- AttributeSet Attrs = CI->getAttributes().getFnAttributes();
- if (Attrs.hasAttributes(AttributeSet::FunctionIndex))
- CreateAttributeSetSlot(Attrs);
- } else if (const InvokeInst *II = dyn_cast<InvokeInst>(&I)) {
+ if (auto CS = ImmutableCallSite(&I)) {
// Add all the call attributes to the table.
- AttributeSet Attrs = II->getAttributes().getFnAttributes();
- if (Attrs.hasAttributes(AttributeSet::FunctionIndex))
+ AttributeSet Attrs = CS.getAttributes().getFnAttributes();
+ if (Attrs.hasAttributes())
CreateAttributeSetSlot(Attrs);
}
}
@@ -949,7 +992,6 @@ int SlotTracker::getMetadataSlot(const MDNode *N) {
return MI == mdnMap.end() ? -1 : (int)MI->second;
}
-
/// getLocalSlot - Get the slot number for a value that is local to a function.
int SlotTracker::getLocalSlot(const Value *V) {
assert(!isa<Constant>(V) && "Can't get a constant or global slot with this!");
@@ -1016,8 +1058,7 @@ void SlotTracker::CreateMetadataSlot(const MDNode *N) {
}
void SlotTracker::CreateAttributeSetSlot(AttributeSet AS) {
- assert(AS.hasAttributes(AttributeSet::FunctionIndex) &&
- "Doesn't need a slot!");
+ assert(AS.hasAttributes() && "Doesn't need a slot!");
as_iterator I = asMap.find(AS);
if (I != asMap.end())
@@ -1073,6 +1114,8 @@ static void WriteOptimizationInfo(raw_ostream &Out, const User *U) {
Out << " nsz";
if (FPO->hasAllowReciprocal())
Out << " arcp";
+ if (FPO->hasAllowContract())
+ Out << " contract";
}
}
@@ -1106,35 +1149,34 @@ static void WriteConstantInternal(raw_ostream &Out, const Constant *CV,
}
if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CV)) {
- if (&CFP->getValueAPF().getSemantics() == &APFloat::IEEEsingle() ||
- &CFP->getValueAPF().getSemantics() == &APFloat::IEEEdouble()) {
+ const APFloat &APF = CFP->getValueAPF();
+ if (&APF.getSemantics() == &APFloat::IEEEsingle() ||
+ &APF.getSemantics() == &APFloat::IEEEdouble()) {
// We would like to output the FP constant value in exponential notation,
// but we cannot do this if doing so will lose precision. Check here to
// make sure that we only output it in exponential format if we can parse
// the value back and get the same value.
//
bool ignored;
- bool isDouble = &CFP->getValueAPF().getSemantics()==&APFloat::IEEEdouble();
- bool isInf = CFP->getValueAPF().isInfinity();
- bool isNaN = CFP->getValueAPF().isNaN();
+ bool isDouble = &APF.getSemantics() == &APFloat::IEEEdouble();
+ bool isInf = APF.isInfinity();
+ bool isNaN = APF.isNaN();
if (!isInf && !isNaN) {
- double Val = isDouble ? CFP->getValueAPF().convertToDouble() :
- CFP->getValueAPF().convertToFloat();
+ double Val = isDouble ? APF.convertToDouble() : APF.convertToFloat();
SmallString<128> StrVal;
- raw_svector_ostream(StrVal) << Val;
-
+ APF.toString(StrVal, 6, 0, false);
// Check to make sure that the stringized number is not some string like
// "Inf" or NaN, that atof will accept, but the lexer will not. Check
// that the string matches the "[-+]?[0-9]" regex.
//
- if ((StrVal[0] >= '0' && StrVal[0] <= '9') ||
- ((StrVal[0] == '-' || StrVal[0] == '+') &&
- (StrVal[1] >= '0' && StrVal[1] <= '9'))) {
- // Reparse stringized version!
- if (APFloat(APFloat::IEEEdouble(), StrVal).convertToDouble() == Val) {
- Out << StrVal;
- return;
- }
+ assert(((StrVal[0] >= '0' && StrVal[0] <= '9') ||
+ ((StrVal[0] == '-' || StrVal[0] == '+') &&
+ (StrVal[1] >= '0' && StrVal[1] <= '9'))) &&
+ "[-+]?[0-9] regex does not match!");
+ // Reparse stringized version!
+ if (APFloat(APFloat::IEEEdouble(), StrVal).convertToDouble() == Val) {
+ Out << StrVal;
+ return;
}
}
// Otherwise we could not reparse it to exactly the same value, so we must
@@ -1143,7 +1185,7 @@ static void WriteConstantInternal(raw_ostream &Out, const Constant *CV,
// x86, so we must not use these types.
static_assert(sizeof(double) == sizeof(uint64_t),
"assuming that double is 64 bits!");
- APFloat apf = CFP->getValueAPF();
+ APFloat apf = APF;
// Floats are represented in ASCII IR as double, convert.
if (!isDouble)
apf.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven,
@@ -1156,27 +1198,27 @@ static void WriteConstantInternal(raw_ostream &Out, const Constant *CV,
// These appear as a magic letter identifying the type, then a
// fixed number of hex digits.
Out << "0x";
- APInt API = CFP->getValueAPF().bitcastToAPInt();
- if (&CFP->getValueAPF().getSemantics() == &APFloat::x87DoubleExtended()) {
+ APInt API = APF.bitcastToAPInt();
+ if (&APF.getSemantics() == &APFloat::x87DoubleExtended()) {
Out << 'K';
Out << format_hex_no_prefix(API.getHiBits(16).getZExtValue(), 4,
/*Upper=*/true);
Out << format_hex_no_prefix(API.getLoBits(64).getZExtValue(), 16,
/*Upper=*/true);
return;
- } else if (&CFP->getValueAPF().getSemantics() == &APFloat::IEEEquad()) {
+ } else if (&APF.getSemantics() == &APFloat::IEEEquad()) {
Out << 'L';
Out << format_hex_no_prefix(API.getLoBits(64).getZExtValue(), 16,
/*Upper=*/true);
Out << format_hex_no_prefix(API.getHiBits(64).getZExtValue(), 16,
/*Upper=*/true);
- } else if (&CFP->getValueAPF().getSemantics() == &APFloat::PPCDoubleDouble()) {
+ } else if (&APF.getSemantics() == &APFloat::PPCDoubleDouble()) {
Out << 'M';
Out << format_hex_no_prefix(API.getLoBits(64).getZExtValue(), 16,
/*Upper=*/true);
Out << format_hex_no_prefix(API.getHiBits(64).getZExtValue(), 16,
/*Upper=*/true);
- } else if (&CFP->getValueAPF().getSemantics() == &APFloat::IEEEhalf()) {
+ } else if (&APF.getSemantics() == &APFloat::IEEEhalf()) {
Out << 'H';
Out << format_hex_no_prefix(API.getZExtValue(), 4,
/*Upper=*/true);
@@ -1248,7 +1290,6 @@ static void WriteConstantInternal(raw_ostream &Out, const Constant *CV,
return;
}
-
if (const ConstantStruct *CS = dyn_cast<ConstantStruct>(CV)) {
if (CS->getType()->isPacked())
Out << '<';
@@ -1381,11 +1422,14 @@ static void writeMDTuple(raw_ostream &Out, const MDTuple *Node,
}
namespace {
+
struct FieldSeparator {
- bool Skip;
+ bool Skip = true;
const char *Sep;
- FieldSeparator(const char *Sep = ", ") : Skip(true), Sep(Sep) {}
+
+ FieldSeparator(const char *Sep = ", ") : Sep(Sep) {}
};
+
raw_ostream &operator<<(raw_ostream &OS, FieldSeparator &FS) {
if (FS.Skip) {
FS.Skip = false;
@@ -1393,19 +1437,20 @@ raw_ostream &operator<<(raw_ostream &OS, FieldSeparator &FS) {
}
return OS << FS.Sep;
}
+
struct MDFieldPrinter {
raw_ostream &Out;
FieldSeparator FS;
- TypePrinting *TypePrinter;
- SlotTracker *Machine;
- const Module *Context;
+ TypePrinting *TypePrinter = nullptr;
+ SlotTracker *Machine = nullptr;
+ const Module *Context = nullptr;
- explicit MDFieldPrinter(raw_ostream &Out)
- : Out(Out), TypePrinter(nullptr), Machine(nullptr), Context(nullptr) {}
+ explicit MDFieldPrinter(raw_ostream &Out) : Out(Out) {}
MDFieldPrinter(raw_ostream &Out, TypePrinting *TypePrinter,
SlotTracker *Machine, const Module *Context)
: Out(Out), TypePrinter(TypePrinter), Machine(Machine), Context(Context) {
}
+
void printTag(const DINode *N);
void printMacinfoType(const DIMacroNode *N);
void printChecksumKind(const DIFile *N);
@@ -1422,7 +1467,8 @@ struct MDFieldPrinter {
bool ShouldSkipZero = true);
void printEmissionKind(StringRef Name, DICompileUnit::DebugEmissionKind EK);
};
-} // end namespace
+
+} // end anonymous namespace
void MDFieldPrinter::printTag(const DINode *N) {
Out << FS << "tag: ";
@@ -1518,7 +1564,6 @@ void MDFieldPrinter::printEmissionKind(StringRef Name,
Out << FS << Name << ": " << DICompileUnit::EmissionKindString(EK);
}
-
template <class IntTy, class Stringifier>
void MDFieldPrinter::printDwarfEnum(StringRef Name, IntTy Value,
Stringifier toString, bool ShouldSkipZero) {
@@ -1614,6 +1659,9 @@ static void writeDIDerivedType(raw_ostream &Out, const DIDerivedType *N,
Printer.printInt("offset", N->getOffsetInBits());
Printer.printDIFlags("flags", N->getFlags());
Printer.printMetadata("extraData", N->getRawExtraData());
+ if (const auto &DWARFAddressSpace = N->getDWARFAddressSpace())
+ Printer.printInt("dwarfAddressSpace", *DWARFAddressSpace,
+ /* ShouldSkipZero */ false);
Out << ")";
}
@@ -1688,6 +1736,8 @@ static void writeDICompileUnit(raw_ostream &Out, const DICompileUnit *N,
Printer.printMetadata("macros", N->getRawMacros());
Printer.printInt("dwoId", N->getDWOId());
Printer.printBool("splitDebugInlining", N->getSplitDebugInlining(), true);
+ Printer.printBool("debugInfoForProfiling", N->getDebugInfoForProfiling(),
+ false);
Out << ")";
}
@@ -1718,6 +1768,7 @@ static void writeDISubprogram(raw_ostream &Out, const DISubprogram *N,
Printer.printMetadata("templateParams", N->getRawTemplateParams());
Printer.printMetadata("declaration", N->getRawDeclaration());
Printer.printMetadata("variables", N->getRawVariables());
+ Printer.printMetadata("thrownTypes", N->getRawThrownTypes());
Out << ")";
}
@@ -1754,8 +1805,6 @@ static void writeDINamespace(raw_ostream &Out, const DINamespace *N,
MDFieldPrinter Printer(Out, TypePrinter, Machine, Context);
Printer.printString("name", N->getName());
Printer.printMetadata("scope", N->getRawScope(), /* ShouldSkipNull */ false);
- Printer.printMetadata("file", N->getRawFile());
- Printer.printInt("line", N->getLine());
Printer.printBool("exportSymbols", N->getExportSymbols(), false);
Out << ")";
}
@@ -1915,11 +1964,11 @@ static void writeDIImportedEntity(raw_ostream &Out, const DIImportedEntity *N,
Printer.printString("name", N->getName());
Printer.printMetadata("scope", N->getRawScope(), /* ShouldSkipNull */ false);
Printer.printMetadata("entity", N->getRawEntity());
+ Printer.printMetadata("file", N->getRawFile());
Printer.printInt("line", N->getLine());
Out << ")";
}
-
static void WriteMDNodeBodyInternal(raw_ostream &Out, const MDNode *Node,
TypePrinting *TypePrinter,
SlotTracker *Machine,
@@ -2058,6 +2107,7 @@ static void WriteAsOperandInternal(raw_ostream &Out, const Metadata *MD,
}
namespace {
+
class AssemblyWriter {
formatted_raw_ostream &Out;
const Module *TheModule;
@@ -2070,6 +2120,8 @@ class AssemblyWriter {
bool ShouldPreserveUseListOrder;
UseListOrderStack UseListOrders;
SmallVector<StringRef, 8> MDNames;
+ /// Synchronization scope names registered with LLVMContext.
+ SmallVector<StringRef, 8> SSNs;
public:
/// Construct an AssemblyWriter with an external SlotTracker
@@ -2083,12 +2135,17 @@ public:
void printModule(const Module *M);
void writeOperand(const Value *Op, bool PrintType);
- void writeParamOperand(const Value *Operand, AttributeSet Attrs,unsigned Idx);
+ void writeParamOperand(const Value *Operand, AttributeSet Attrs);
void writeOperandBundles(ImmutableCallSite CS);
- void writeAtomic(AtomicOrdering Ordering, SynchronizationScope SynchScope);
- void writeAtomicCmpXchg(AtomicOrdering SuccessOrdering,
+ void writeSyncScope(const LLVMContext &Context,
+ SyncScope::ID SSID);
+ void writeAtomic(const LLVMContext &Context,
+ AtomicOrdering Ordering,
+ SyncScope::ID SSID);
+ void writeAtomicCmpXchg(const LLVMContext &Context,
+ AtomicOrdering SuccessOrdering,
AtomicOrdering FailureOrdering,
- SynchronizationScope SynchScope);
+ SyncScope::ID SSID);
void writeAllMDNodes();
void writeMDNode(unsigned Slot, const MDNode *Node);
@@ -2099,7 +2156,7 @@ public:
void printIndirectSymbol(const GlobalIndirectSymbol *GIS);
void printComdat(const Comdat *C);
void printFunction(const Function *F);
- void printArgument(const Argument *FA, AttributeSet Attrs, unsigned Idx);
+ void printArgument(const Argument *FA, AttributeSet Attrs);
void printBasicBlock(const BasicBlock *BB);
void printInstructionLine(const Instruction &I);
void printInstruction(const Instruction &I);
@@ -2121,7 +2178,8 @@ private:
// intrinsic indicating base and derived pointer names.
void printGCRelocateComment(const GCRelocateInst &Relocate);
};
-} // namespace
+
+} // end anonymous namespace
AssemblyWriter::AssemblyWriter(formatted_raw_ostream &o, SlotTracker &Mac,
const Module *M, AssemblyAnnotationWriter *AAW,
@@ -2149,36 +2207,48 @@ void AssemblyWriter::writeOperand(const Value *Operand, bool PrintType) {
WriteAsOperandInternal(Out, Operand, &TypePrinter, &Machine, TheModule);
}
-void AssemblyWriter::writeAtomic(AtomicOrdering Ordering,
- SynchronizationScope SynchScope) {
- if (Ordering == AtomicOrdering::NotAtomic)
- return;
+void AssemblyWriter::writeSyncScope(const LLVMContext &Context,
+ SyncScope::ID SSID) {
+ switch (SSID) {
+ case SyncScope::System: {
+ break;
+ }
+ default: {
+ if (SSNs.empty())
+ Context.getSyncScopeNames(SSNs);
- switch (SynchScope) {
- case SingleThread: Out << " singlethread"; break;
- case CrossThread: break;
+ Out << " syncscope(\"";
+ PrintEscapedString(SSNs[SSID], Out);
+ Out << "\")";
+ break;
+ }
}
+}
+
+void AssemblyWriter::writeAtomic(const LLVMContext &Context,
+ AtomicOrdering Ordering,
+ SyncScope::ID SSID) {
+ if (Ordering == AtomicOrdering::NotAtomic)
+ return;
+ writeSyncScope(Context, SSID);
Out << " " << toIRString(Ordering);
}
-void AssemblyWriter::writeAtomicCmpXchg(AtomicOrdering SuccessOrdering,
+void AssemblyWriter::writeAtomicCmpXchg(const LLVMContext &Context,
+ AtomicOrdering SuccessOrdering,
AtomicOrdering FailureOrdering,
- SynchronizationScope SynchScope) {
+ SyncScope::ID SSID) {
assert(SuccessOrdering != AtomicOrdering::NotAtomic &&
FailureOrdering != AtomicOrdering::NotAtomic);
- switch (SynchScope) {
- case SingleThread: Out << " singlethread"; break;
- case CrossThread: break;
- }
-
+ writeSyncScope(Context, SSID);
Out << " " << toIRString(SuccessOrdering);
Out << " " << toIRString(FailureOrdering);
}
void AssemblyWriter::writeParamOperand(const Value *Operand,
- AttributeSet Attrs, unsigned Idx) {
+ AttributeSet Attrs) {
if (!Operand) {
Out << "<null operand!>";
return;
@@ -2187,8 +2257,8 @@ void AssemblyWriter::writeParamOperand(const Value *Operand,
// Print the type
TypePrinter.print(Operand->getType(), Out);
// Print parameter attributes list
- if (Attrs.hasAttributes(Idx))
- Out << ' ' << Attrs.getAsString(Idx);
+ if (Attrs.hasAttributes())
+ Out << ' ' << Attrs.getAsString();
Out << ' ';
// Print the operand
WriteAsOperandInternal(Out, Operand, &TypePrinter, &Machine, TheModule);
@@ -2501,6 +2571,10 @@ void AssemblyWriter::printGlobal(const GlobalVariable *GV) {
GV->getAllMetadata(MDs);
printMetadataAttachments(MDs, ", ");
+ auto Attrs = GV->getAttributes();
+ if (Attrs.hasAttributes())
+ Out << " #" << Machine.getAttributeGroupSlot(Attrs);
+
printInfoComment(*GV);
}
@@ -2586,7 +2660,6 @@ void AssemblyWriter::printTypeIdentities() {
}
/// printFunction - Print all aspects of a function.
-///
void AssemblyWriter::printFunction(const Function *F) {
// Print out the return type and name.
Out << '\n';
@@ -2596,19 +2669,12 @@ void AssemblyWriter::printFunction(const Function *F) {
if (F->isMaterializable())
Out << "; Materializable\n";
- const AttributeSet &Attrs = F->getAttributes();
- if (Attrs.hasAttributes(AttributeSet::FunctionIndex)) {
+ const AttributeList &Attrs = F->getAttributes();
+ if (Attrs.hasAttributes(AttributeList::FunctionIndex)) {
AttributeSet AS = Attrs.getFnAttributes();
std::string AttrStr;
- unsigned Idx = 0;
- for (unsigned E = AS.getNumSlots(); Idx != E; ++Idx)
- if (AS.getSlotIndex(Idx) == AttributeSet::FunctionIndex)
- break;
-
- for (AttributeSet::iterator I = AS.begin(Idx), E = AS.end(Idx);
- I != E; ++I) {
- Attribute Attr = *I;
+ for (const Attribute &Attr : AS) {
if (!Attr.isStringAttribute()) {
if (!AttrStr.empty()) AttrStr += ' ';
AttrStr += Attr.getAsString();
@@ -2641,8 +2707,8 @@ void AssemblyWriter::printFunction(const Function *F) {
}
FunctionType *FT = F->getFunctionType();
- if (Attrs.hasAttributes(AttributeSet::ReturnIndex))
- Out << Attrs.getAsString(AttributeSet::ReturnIndex) << ' ';
+ if (Attrs.hasAttributes(AttributeList::ReturnIndex))
+ Out << Attrs.getAsString(AttributeList::ReturnIndex) << ' ';
TypePrinter.print(F->getReturnType(), Out);
Out << ' ';
WriteAsOperandInternal(Out, F, &TypePrinter, &Machine, F->getParent());
@@ -2658,17 +2724,17 @@ void AssemblyWriter::printFunction(const Function *F) {
// Output type...
TypePrinter.print(FT->getParamType(I), Out);
- if (Attrs.hasAttributes(I + 1))
- Out << ' ' << Attrs.getAsString(I + 1);
+ AttributeSet ArgAttrs = Attrs.getParamAttributes(I);
+ if (ArgAttrs.hasAttributes())
+ Out << ' ' << ArgAttrs.getAsString();
}
} else {
// The arguments are meaningful here, print them in detail.
- unsigned Idx = 1;
for (const Argument &Arg : F->args()) {
// Insert commas as we go... the first arg doesn't get a comma
- if (Idx != 1)
+ if (Arg.getArgNo() != 0)
Out << ", ";
- printArgument(&Arg, Attrs, Idx++);
+ printArgument(&Arg, Attrs.getParamAttributes(Arg.getArgNo()));
}
}
@@ -2681,7 +2747,7 @@ void AssemblyWriter::printFunction(const Function *F) {
StringRef UA = getUnnamedAddrEncoding(F->getUnnamedAddr());
if (!UA.empty())
Out << ' ' << UA;
- if (Attrs.hasAttributes(AttributeSet::FunctionIndex))
+ if (Attrs.hasAttributes(AttributeList::FunctionIndex))
Out << " #" << Machine.getAttributeGroupSlot(Attrs.getFnAttributes());
if (F->hasSection()) {
Out << " section \"";
@@ -2729,15 +2795,13 @@ void AssemblyWriter::printFunction(const Function *F) {
/// printArgument - This member is called for every argument that is passed into
/// the function. Simply print it out
-///
-void AssemblyWriter::printArgument(const Argument *Arg,
- AttributeSet Attrs, unsigned Idx) {
+void AssemblyWriter::printArgument(const Argument *Arg, AttributeSet Attrs) {
// Output type...
TypePrinter.print(Arg->getType(), Out);
// Output parameter attributes list
- if (Attrs.hasAttributes(Idx))
- Out << ' ' << Attrs.getAsString(Idx);
+ if (Attrs.hasAttributes())
+ Out << ' ' << Attrs.getAsString();
// Output name, if available...
if (Arg->hasName()) {
@@ -2747,7 +2811,6 @@ void AssemblyWriter::printArgument(const Argument *Arg,
}
/// printBasicBlock - This member is called for each basic block in a method.
-///
void AssemblyWriter::printBasicBlock(const BasicBlock *BB) {
if (BB->hasName()) { // Print out the label if it exists...
Out << "\n";
@@ -2813,7 +2876,6 @@ void AssemblyWriter::printGCRelocateComment(const GCRelocateInst &Relocate) {
/// printInfoComment - Print a little comment after the instruction indicating
/// which slot it occupies.
-///
void AssemblyWriter::printInfoComment(const Value &V) {
if (const auto *Relocate = dyn_cast<GCRelocateInst>(&V))
printGCRelocateComment(*Relocate);
@@ -2901,12 +2963,11 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
Out << ", ";
writeOperand(SI.getDefaultDest(), true);
Out << " [";
- for (SwitchInst::ConstCaseIt i = SI.case_begin(), e = SI.case_end();
- i != e; ++i) {
+ for (auto Case : SI.cases()) {
Out << "\n ";
- writeOperand(i.getCaseValue(), true);
+ writeOperand(Case.getCaseValue(), true);
Out << ", ";
- writeOperand(i.getCaseSuccessor(), true);
+ writeOperand(Case.getCaseSuccessor(), true);
}
Out << "\n ]";
} else if (isa<IndirectBrInst>(I)) {
@@ -3015,10 +3076,10 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
Operand = CI->getCalledValue();
FunctionType *FTy = CI->getFunctionType();
Type *RetTy = FTy->getReturnType();
- const AttributeSet &PAL = CI->getAttributes();
+ const AttributeList &PAL = CI->getAttributes();
- if (PAL.hasAttributes(AttributeSet::ReturnIndex))
- Out << ' ' << PAL.getAsString(AttributeSet::ReturnIndex);
+ if (PAL.hasAttributes(AttributeList::ReturnIndex))
+ Out << ' ' << PAL.getAsString(AttributeList::ReturnIndex);
// If possible, print out the short form of the call instruction. We can
// only do this if the first argument is a pointer to a nonvararg function,
@@ -3032,7 +3093,7 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
for (unsigned op = 0, Eop = CI->getNumArgOperands(); op < Eop; ++op) {
if (op > 0)
Out << ", ";
- writeParamOperand(CI->getArgOperand(op), PAL, op + 1);
+ writeParamOperand(CI->getArgOperand(op), PAL.getParamAttributes(op));
}
// Emit an ellipsis if this is a musttail call in a vararg function. This
@@ -3043,16 +3104,15 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
Out << ", ...";
Out << ')';
- if (PAL.hasAttributes(AttributeSet::FunctionIndex))
+ if (PAL.hasAttributes(AttributeList::FunctionIndex))
Out << " #" << Machine.getAttributeGroupSlot(PAL.getFnAttributes());
writeOperandBundles(CI);
-
} else if (const InvokeInst *II = dyn_cast<InvokeInst>(&I)) {
Operand = II->getCalledValue();
FunctionType *FTy = II->getFunctionType();
Type *RetTy = FTy->getReturnType();
- const AttributeSet &PAL = II->getAttributes();
+ const AttributeList &PAL = II->getAttributes();
// Print the calling convention being used.
if (II->getCallingConv() != CallingConv::C) {
@@ -3060,8 +3120,8 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
PrintCallingConv(II->getCallingConv(), Out);
}
- if (PAL.hasAttributes(AttributeSet::ReturnIndex))
- Out << ' ' << PAL.getAsString(AttributeSet::ReturnIndex);
+ if (PAL.hasAttributes(AttributeList::ReturnIndex))
+ Out << ' ' << PAL.getAsString(AttributeList::ReturnIndex);
// If possible, print out the short form of the invoke instruction. We can
// only do this if the first argument is a pointer to a nonvararg function,
@@ -3075,11 +3135,11 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
for (unsigned op = 0, Eop = II->getNumArgOperands(); op < Eop; ++op) {
if (op)
Out << ", ";
- writeParamOperand(II->getArgOperand(op), PAL, op + 1);
+ writeParamOperand(II->getArgOperand(op), PAL.getParamAttributes(op));
}
Out << ')';
- if (PAL.hasAttributes(AttributeSet::FunctionIndex))
+ if (PAL.hasAttributes(AttributeList::FunctionIndex))
Out << " #" << Machine.getAttributeGroupSlot(PAL.getFnAttributes());
writeOperandBundles(II);
@@ -3088,7 +3148,6 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
writeOperand(II->getNormalDest(), true);
Out << " unwind ";
writeOperand(II->getUnwindDest(), true);
-
} else if (const AllocaInst *AI = dyn_cast<AllocaInst>(&I)) {
Out << ' ';
if (AI->isUsedWithInAlloca())
@@ -3109,6 +3168,11 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
if (AI->getAlignment()) {
Out << ", align " << AI->getAlignment();
}
+
+ unsigned AddrSpace = AI->getType()->getAddressSpace();
+ if (AddrSpace != 0) {
+ Out << ", addrspace(" << AddrSpace << ')';
+ }
} else if (isa<CastInst>(I)) {
if (Operand) {
Out << ' ';
@@ -3171,21 +3235,22 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
// Print atomic ordering/alignment for memory operations
if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) {
if (LI->isAtomic())
- writeAtomic(LI->getOrdering(), LI->getSynchScope());
+ writeAtomic(LI->getContext(), LI->getOrdering(), LI->getSyncScopeID());
if (LI->getAlignment())
Out << ", align " << LI->getAlignment();
} else if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) {
if (SI->isAtomic())
- writeAtomic(SI->getOrdering(), SI->getSynchScope());
+ writeAtomic(SI->getContext(), SI->getOrdering(), SI->getSyncScopeID());
if (SI->getAlignment())
Out << ", align " << SI->getAlignment();
} else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(&I)) {
- writeAtomicCmpXchg(CXI->getSuccessOrdering(), CXI->getFailureOrdering(),
- CXI->getSynchScope());
+ writeAtomicCmpXchg(CXI->getContext(), CXI->getSuccessOrdering(),
+ CXI->getFailureOrdering(), CXI->getSyncScopeID());
} else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(&I)) {
- writeAtomic(RMWI->getOrdering(), RMWI->getSynchScope());
+ writeAtomic(RMWI->getContext(), RMWI->getOrdering(),
+ RMWI->getSyncScopeID());
} else if (const FenceInst *FI = dyn_cast<FenceInst>(&I)) {
- writeAtomic(FI->getOrdering(), FI->getSynchScope());
+ writeAtomic(FI->getContext(), FI->getOrdering(), FI->getSyncScopeID());
}
// Print Metadata info.
@@ -3242,7 +3307,7 @@ void AssemblyWriter::printMDNodeBody(const MDNode *Node) {
}
void AssemblyWriter::writeAllAttributeGroups() {
- std::vector<std::pair<AttributeSet, unsigned> > asVec;
+ std::vector<std::pair<AttributeSet, unsigned>> asVec;
asVec.resize(Machine.as_size());
for (SlotTracker::as_iterator I = Machine.as_begin(), E = Machine.as_end();
@@ -3251,7 +3316,7 @@ void AssemblyWriter::writeAllAttributeGroups() {
for (const auto &I : asVec)
Out << "attributes #" << I.second << " = { "
- << I.first.getAsString(AttributeSet::FunctionIndex, true) << " }\n";
+ << I.first.getAsString(true) << " }\n";
}
void AssemblyWriter::printUseListOrder(const UseListOrder &Order) {
@@ -3535,6 +3600,7 @@ void Metadata::print(raw_ostream &OS, ModuleSlotTracker &MST,
printMetadataImpl(OS, *this, MST, M, /* OnlyAsOperand */ false);
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
// Value::dump - allow easy printing of Values from the debugger.
LLVM_DUMP_METHOD
void Value::dump() const { print(dbgs(), /*IsForDebug=*/true); dbgs() << '\n'; }
@@ -3566,3 +3632,4 @@ void Metadata::dump(const Module *M) const {
print(dbgs(), M, /*IsForDebug=*/true);
dbgs() << '\n';
}
+#endif
diff --git a/contrib/llvm/lib/IR/AttributeImpl.h b/contrib/llvm/lib/IR/AttributeImpl.h
index d0d2710..9c7b61f 100644
--- a/contrib/llvm/lib/IR/AttributeImpl.h
+++ b/contrib/llvm/lib/IR/AttributeImpl.h
@@ -1,4 +1,4 @@
-//===-- AttributeImpl.h - Attribute Internals -------------------*- C++ -*-===//
+//===- AttributeImpl.h - Attribute Internals --------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -16,15 +16,12 @@
#ifndef LLVM_LIB_IR_ATTRIBUTEIMPL_H
#define LLVM_LIB_IR_ATTRIBUTEIMPL_H
-#include "AttributeSetNode.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/IR/Attributes.h"
#include "llvm/Support/TrailingObjects.h"
-#include <algorithm>
#include <cassert>
-#include <climits>
#include <cstddef>
#include <cstdint>
#include <string>
@@ -81,11 +78,13 @@ public:
else
Profile(ID, getKindAsString(), getValueAsString());
}
+
static void Profile(FoldingSetNodeID &ID, Attribute::AttrKind Kind,
uint64_t Val) {
ID.AddInteger(Kind);
if (Val) ID.AddInteger(Val);
}
+
static void Profile(FoldingSetNodeID &ID, StringRef Kind, StringRef Values) {
ID.AddString(Kind);
if (!Values.empty()) ID.AddString(Values);
@@ -101,6 +100,7 @@ public:
class EnumAttributeImpl : public AttributeImpl {
virtual void anchor();
+
Attribute::AttrKind Kind;
protected:
@@ -115,9 +115,10 @@ public:
};
class IntAttributeImpl : public EnumAttributeImpl {
- void anchor() override;
uint64_t Val;
+ void anchor() override;
+
public:
IntAttributeImpl(Attribute::AttrKind Kind, uint64_t Val)
: EnumAttributeImpl(IntAttrEntry, Kind), Val(Val) {
@@ -133,6 +134,7 @@ public:
class StringAttributeImpl : public AttributeImpl {
virtual void anchor();
+
std::string Kind;
std::string Val;
@@ -144,122 +146,112 @@ public:
StringRef getStringValue() const { return Val; }
};
-typedef std::pair<unsigned, AttributeSetNode *> IndexAttrPair;
+//===----------------------------------------------------------------------===//
+/// \class
+/// \brief This class represents a group of attributes that apply to one
+/// element: function, return type, or parameter.
+class AttributeSetNode final
+ : public FoldingSetNode,
+ private TrailingObjects<AttributeSetNode, Attribute> {
+ friend TrailingObjects;
+
+ /// Bitset with a bit for each available attribute Attribute::AttrKind.
+ uint64_t AvailableAttrs;
+ unsigned NumAttrs; ///< Number of attributes in this node.
+
+ AttributeSetNode(ArrayRef<Attribute> Attrs);
+
+public:
+ // AttributesSetNode is uniqued, these should not be available.
+ AttributeSetNode(const AttributeSetNode &) = delete;
+ AttributeSetNode &operator=(const AttributeSetNode &) = delete;
+
+ void operator delete(void *p) { ::operator delete(p); }
+
+ static AttributeSetNode *get(LLVMContext &C, const AttrBuilder &B);
+
+ static AttributeSetNode *get(LLVMContext &C, ArrayRef<Attribute> Attrs);
+
+ /// \brief Return the number of attributes this AttributeList contains.
+ unsigned getNumAttributes() const { return NumAttrs; }
+
+ bool hasAttribute(Attribute::AttrKind Kind) const {
+ return AvailableAttrs & ((uint64_t)1) << Kind;
+ }
+ bool hasAttribute(StringRef Kind) const;
+ bool hasAttributes() const { return NumAttrs != 0; }
+
+ Attribute getAttribute(Attribute::AttrKind Kind) const;
+ Attribute getAttribute(StringRef Kind) const;
+
+ unsigned getAlignment() const;
+ unsigned getStackAlignment() const;
+ uint64_t getDereferenceableBytes() const;
+ uint64_t getDereferenceableOrNullBytes() const;
+ std::pair<unsigned, Optional<unsigned>> getAllocSizeArgs() const;
+ std::string getAsString(bool InAttrGrp) const;
+
+ using iterator = const Attribute *;
+
+ iterator begin() const { return getTrailingObjects<Attribute>(); }
+ iterator end() const { return begin() + NumAttrs; }
+
+ void Profile(FoldingSetNodeID &ID) const {
+ Profile(ID, makeArrayRef(begin(), end()));
+ }
+
+ static void Profile(FoldingSetNodeID &ID, ArrayRef<Attribute> AttrList) {
+ for (const auto &Attr : AttrList)
+ Attr.Profile(ID);
+ }
+};
+
+using IndexAttrPair = std::pair<unsigned, AttributeSet>;
//===----------------------------------------------------------------------===//
/// \class
/// \brief This class represents a set of attributes that apply to the function,
/// return type, and parameters.
-class AttributeSetImpl final
+class AttributeListImpl final
: public FoldingSetNode,
- private TrailingObjects<AttributeSetImpl, IndexAttrPair> {
- friend class AttributeSet;
+ private TrailingObjects<AttributeListImpl, AttributeSet> {
+ friend class AttributeList;
friend TrailingObjects;
private:
- LLVMContext &Context;
- unsigned NumSlots; ///< Number of entries in this set.
/// Bitset with a bit for each available attribute Attribute::AttrKind.
uint64_t AvailableFunctionAttrs;
+ LLVMContext &Context;
+ unsigned NumAttrSets; ///< Number of entries in this set.
// Helper fn for TrailingObjects class.
- size_t numTrailingObjects(OverloadToken<IndexAttrPair>) { return NumSlots; }
-
- /// \brief Return a pointer to the IndexAttrPair for the specified slot.
- const IndexAttrPair *getNode(unsigned Slot) const {
- return getTrailingObjects<IndexAttrPair>() + Slot;
- }
+ size_t numTrailingObjects(OverloadToken<AttributeSet>) { return NumAttrSets; }
public:
- AttributeSetImpl(LLVMContext &C,
- ArrayRef<std::pair<unsigned, AttributeSetNode *>> Slots)
- : Context(C), NumSlots(Slots.size()), AvailableFunctionAttrs(0) {
- static_assert(Attribute::EndAttrKinds <=
- sizeof(AvailableFunctionAttrs) * CHAR_BIT,
- "Too many attributes");
-
-#ifndef NDEBUG
- if (Slots.size() >= 2) {
- for (const std::pair<unsigned, AttributeSetNode *> *i = Slots.begin() + 1,
- *e = Slots.end();
- i != e; ++i) {
- assert((i-1)->first <= i->first && "Attribute set not ordered!");
- }
- }
-#endif
- // There's memory after the node where we can store the entries in.
- std::copy(Slots.begin(), Slots.end(), getTrailingObjects<IndexAttrPair>());
-
- // Initialize AvailableFunctionAttrs summary bitset.
- if (NumSlots > 0) {
- static_assert(AttributeSet::FunctionIndex == ~0u,
- "FunctionIndex should be biggest possible index");
- const std::pair<unsigned, AttributeSetNode *> &Last = Slots.back();
- if (Last.first == AttributeSet::FunctionIndex) {
- const AttributeSetNode *Node = Last.second;
- for (Attribute I : *Node) {
- if (!I.isStringAttribute())
- AvailableFunctionAttrs |= ((uint64_t)1) << I.getKindAsEnum();
- }
- }
- }
- }
+ AttributeListImpl(LLVMContext &C, ArrayRef<AttributeSet> Sets);
// AttributesSetImpt is uniqued, these should not be available.
- AttributeSetImpl(const AttributeSetImpl &) = delete;
- AttributeSetImpl &operator=(const AttributeSetImpl &) = delete;
+ AttributeListImpl(const AttributeListImpl &) = delete;
+ AttributeListImpl &operator=(const AttributeListImpl &) = delete;
void operator delete(void *p) { ::operator delete(p); }
- /// \brief Get the context that created this AttributeSetImpl.
+ /// \brief Get the context that created this AttributeListImpl.
LLVMContext &getContext() { return Context; }
- /// \brief Return the number of slots used in this attribute list. This is
- /// the number of arguments that have an attribute set on them (including the
- /// function itself).
- unsigned getNumSlots() const { return NumSlots; }
-
- /// \brief Get the index of the given "slot" in the AttrNodes list. This index
- /// is the index of the return, parameter, or function object that the
- /// attributes are applied to, not the index into the AttrNodes list where the
- /// attributes reside.
- unsigned getSlotIndex(unsigned Slot) const {
- return getNode(Slot)->first;
- }
-
- /// \brief Retrieve the attributes for the given "slot" in the AttrNode list.
- /// \p Slot is an index into the AttrNodes list, not the index of the return /
- /// parameter/ function which the attributes apply to.
- AttributeSet getSlotAttributes(unsigned Slot) const {
- return AttributeSet::get(Context, *getNode(Slot));
- }
-
- /// \brief Retrieve the attribute set node for the given "slot" in the
- /// AttrNode list.
- AttributeSetNode *getSlotNode(unsigned Slot) const {
- return getNode(Slot)->second;
- }
-
- /// \brief Return true if the AttributeSetNode for the FunctionIndex has an
+ /// \brief Return true if the AttributeSet or the FunctionIndex has an
/// enum attribute of the given kind.
bool hasFnAttribute(Attribute::AttrKind Kind) const {
return AvailableFunctionAttrs & ((uint64_t)1) << Kind;
}
- typedef AttributeSetNode::iterator iterator;
- iterator begin(unsigned Slot) const { return getSlotNode(Slot)->begin(); }
- iterator end(unsigned Slot) const { return getSlotNode(Slot)->end(); }
+ using iterator = const AttributeSet *;
- void Profile(FoldingSetNodeID &ID) const {
- Profile(ID, makeArrayRef(getNode(0), getNumSlots()));
- }
- static void Profile(FoldingSetNodeID &ID,
- ArrayRef<std::pair<unsigned, AttributeSetNode*>> Nodes) {
- for (const auto &Node : Nodes) {
- ID.AddInteger(Node.first);
- ID.AddPointer(Node.second);
- }
- }
+ iterator begin() const { return getTrailingObjects<AttributeSet>(); }
+ iterator end() const { return begin() + NumAttrSets; }
+
+ void Profile(FoldingSetNodeID &ID) const;
+ static void Profile(FoldingSetNodeID &ID, ArrayRef<AttributeSet> Nodes);
void dump() const;
};
diff --git a/contrib/llvm/lib/IR/AttributeSetNode.h b/contrib/llvm/lib/IR/AttributeSetNode.h
deleted file mode 100644
index 23ce371..0000000
--- a/contrib/llvm/lib/IR/AttributeSetNode.h
+++ /dev/null
@@ -1,106 +0,0 @@
-//===-- AttributeSetNode.h - AttributeSet Internal Node ---------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-///
-/// \file
-/// \brief This file defines the node class used internally by AttributeSet.
-///
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_IR_ATTRIBUTESETNODE_H
-#define LLVM_IR_ATTRIBUTESETNODE_H
-
-#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/FoldingSet.h"
-#include "llvm/ADT/Optional.h"
-#include "llvm/ADT/StringRef.h"
-#include "llvm/IR/Attributes.h"
-#include "llvm/Support/TrailingObjects.h"
-#include <algorithm>
-#include <climits>
-#include <cstdint>
-#include <string>
-#include <utility>
-
-namespace llvm {
-
-//===----------------------------------------------------------------------===//
-/// \class
-/// \brief This class represents a group of attributes that apply to one
-/// element: function, return type, or parameter.
-class AttributeSetNode final
- : public FoldingSetNode,
- private TrailingObjects<AttributeSetNode, Attribute> {
- friend TrailingObjects;
-
- unsigned NumAttrs; ///< Number of attributes in this node.
- /// Bitset with a bit for each available attribute Attribute::AttrKind.
- uint64_t AvailableAttrs;
-
- AttributeSetNode(ArrayRef<Attribute> Attrs)
- : NumAttrs(Attrs.size()), AvailableAttrs(0) {
- static_assert(Attribute::EndAttrKinds <= sizeof(AvailableAttrs) * CHAR_BIT,
- "Too many attributes for AvailableAttrs");
- // There's memory after the node where we can store the entries in.
- std::copy(Attrs.begin(), Attrs.end(), getTrailingObjects<Attribute>());
-
- for (Attribute I : *this) {
- if (!I.isStringAttribute()) {
- AvailableAttrs |= ((uint64_t)1) << I.getKindAsEnum();
- }
- }
- }
-
-public:
- // AttributesSetNode is uniqued, these should not be available.
- AttributeSetNode(const AttributeSetNode &) = delete;
- AttributeSetNode &operator=(const AttributeSetNode &) = delete;
-
- void operator delete(void *p) { ::operator delete(p); }
-
- static AttributeSetNode *get(LLVMContext &C, ArrayRef<Attribute> Attrs);
-
- static AttributeSetNode *get(AttributeSet AS, unsigned Index) {
- return AS.getAttributes(Index);
- }
-
- /// \brief Return the number of attributes this AttributeSet contains.
- unsigned getNumAttributes() const { return NumAttrs; }
-
- bool hasAttribute(Attribute::AttrKind Kind) const {
- return AvailableAttrs & ((uint64_t)1) << Kind;
- }
- bool hasAttribute(StringRef Kind) const;
- bool hasAttributes() const { return NumAttrs != 0; }
-
- Attribute getAttribute(Attribute::AttrKind Kind) const;
- Attribute getAttribute(StringRef Kind) const;
-
- unsigned getAlignment() const;
- unsigned getStackAlignment() const;
- uint64_t getDereferenceableBytes() const;
- uint64_t getDereferenceableOrNullBytes() const;
- std::pair<unsigned, Optional<unsigned>> getAllocSizeArgs() const;
- std::string getAsString(bool InAttrGrp) const;
-
- typedef const Attribute *iterator;
- iterator begin() const { return getTrailingObjects<Attribute>(); }
- iterator end() const { return begin() + NumAttrs; }
-
- void Profile(FoldingSetNodeID &ID) const {
- Profile(ID, makeArrayRef(begin(), end()));
- }
- static void Profile(FoldingSetNodeID &ID, ArrayRef<Attribute> AttrList) {
- for (const auto &Attr : AttrList)
- Attr.Profile(ID);
- }
-};
-
-} // end namespace llvm
-
-#endif // LLVM_IR_ATTRIBUTESETNODE_H
diff --git a/contrib/llvm/lib/IR/Attributes.cpp b/contrib/llvm/lib/IR/Attributes.cpp
index 1ec53cf..8f2e641 100644
--- a/contrib/llvm/lib/IR/Attributes.cpp
+++ b/contrib/llvm/lib/IR/Attributes.cpp
@@ -1,4 +1,4 @@
-//===-- Attributes.cpp - Implement AttributesList -------------------------===//
+//===- Attributes.cpp - Implement AttributesList --------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -9,23 +9,40 @@
//
// \file
// \brief This file implements the Attribute, AttributeImpl, AttrBuilder,
-// AttributeSetImpl, and AttributeSet classes.
+// AttributeListImpl, and AttributeList classes.
//
//===----------------------------------------------------------------------===//
#include "llvm/IR/Attributes.h"
-#include "llvm/IR/Function.h"
#include "AttributeImpl.h"
#include "LLVMContextImpl.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Type.h"
-#include "llvm/Support/Atomic.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/ManagedStatic.h"
-#include "llvm/Support/Mutex.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
+#include <cassert>
+#include <climits>
+#include <cstddef>
+#include <cstdint>
+#include <limits>
+#include <map>
+#include <string>
+#include <tuple>
+#include <utility>
+
using namespace llvm;
//===----------------------------------------------------------------------===//
@@ -300,6 +317,8 @@ std::string Attribute::getAsString(bool InAttrGrp) const {
return "returns_twice";
if (hasAttribute(Attribute::SExt))
return "signext";
+ if (hasAttribute(Attribute::Speculatable))
+ return "speculatable";
if (hasAttribute(Attribute::StackProtect))
return "ssp";
if (hasAttribute(Attribute::StackProtectReq))
@@ -411,9 +430,12 @@ bool Attribute::operator<(Attribute A) const {
//===----------------------------------------------------------------------===//
// Pin the vtables to this file.
-AttributeImpl::~AttributeImpl() {}
+AttributeImpl::~AttributeImpl() = default;
+
void EnumAttributeImpl::anchor() {}
+
void IntAttributeImpl::anchor() {}
+
void StringAttributeImpl::anchor() {}
bool AttributeImpl::hasAttribute(Attribute::AttrKind A) const {
@@ -473,9 +495,152 @@ bool AttributeImpl::operator<(const AttributeImpl &AI) const {
}
//===----------------------------------------------------------------------===//
+// AttributeSet Definition
+//===----------------------------------------------------------------------===//
+
+AttributeSet AttributeSet::get(LLVMContext &C, const AttrBuilder &B) {
+ return AttributeSet(AttributeSetNode::get(C, B));
+}
+
+AttributeSet AttributeSet::get(LLVMContext &C, ArrayRef<Attribute> Attrs) {
+ return AttributeSet(AttributeSetNode::get(C, Attrs));
+}
+
+AttributeSet AttributeSet::addAttribute(LLVMContext &C,
+ Attribute::AttrKind Kind) const {
+ if (hasAttribute(Kind)) return *this;
+ AttrBuilder B;
+ B.addAttribute(Kind);
+ return addAttributes(C, AttributeSet::get(C, B));
+}
+
+AttributeSet AttributeSet::addAttribute(LLVMContext &C, StringRef Kind,
+ StringRef Value) const {
+ AttrBuilder B;
+ B.addAttribute(Kind, Value);
+ return addAttributes(C, AttributeSet::get(C, B));
+}
+
+AttributeSet AttributeSet::addAttributes(LLVMContext &C,
+ const AttributeSet AS) const {
+ if (!hasAttributes())
+ return AS;
+
+ if (!AS.hasAttributes())
+ return *this;
+
+ AttrBuilder B(AS);
+ for (Attribute I : *this)
+ B.addAttribute(I);
+
+ return get(C, B);
+}
+
+AttributeSet AttributeSet::removeAttribute(LLVMContext &C,
+ Attribute::AttrKind Kind) const {
+ if (!hasAttribute(Kind)) return *this;
+ AttrBuilder B;
+ B.addAttribute(Kind);
+ return removeAttributes(C, B);
+}
+
+AttributeSet AttributeSet::removeAttribute(LLVMContext &C,
+ StringRef Kind) const {
+ if (!hasAttribute(Kind)) return *this;
+ AttrBuilder B;
+ B.addAttribute(Kind);
+ return removeAttributes(C, B);
+}
+
+AttributeSet AttributeSet::removeAttributes(LLVMContext &C,
+ const AttrBuilder &Attrs) const {
+
+ // FIXME it is not obvious how this should work for alignment.
+ // For now, say we can't pass in alignment, which no current use does.
+ assert(!Attrs.hasAlignmentAttr() && "Attempt to change alignment!");
+
+ AttrBuilder B(*this);
+ B.remove(Attrs);
+ return get(C, B);
+}
+
+unsigned AttributeSet::getNumAttributes() const {
+ return SetNode ? SetNode->getNumAttributes() : 0;
+}
+
+bool AttributeSet::hasAttribute(Attribute::AttrKind Kind) const {
+ return SetNode ? SetNode->hasAttribute(Kind) : false;
+}
+
+bool AttributeSet::hasAttribute(StringRef Kind) const {
+ return SetNode ? SetNode->hasAttribute(Kind) : false;
+}
+
+Attribute AttributeSet::getAttribute(Attribute::AttrKind Kind) const {
+ return SetNode ? SetNode->getAttribute(Kind) : Attribute();
+}
+
+Attribute AttributeSet::getAttribute(StringRef Kind) const {
+ return SetNode ? SetNode->getAttribute(Kind) : Attribute();
+}
+
+unsigned AttributeSet::getAlignment() const {
+ return SetNode ? SetNode->getAlignment() : 0;
+}
+
+unsigned AttributeSet::getStackAlignment() const {
+ return SetNode ? SetNode->getStackAlignment() : 0;
+}
+
+uint64_t AttributeSet::getDereferenceableBytes() const {
+ return SetNode ? SetNode->getDereferenceableBytes() : 0;
+}
+
+uint64_t AttributeSet::getDereferenceableOrNullBytes() const {
+ return SetNode ? SetNode->getDereferenceableOrNullBytes() : 0;
+}
+
+std::pair<unsigned, Optional<unsigned>> AttributeSet::getAllocSizeArgs() const {
+ return SetNode ? SetNode->getAllocSizeArgs()
+ : std::pair<unsigned, Optional<unsigned>>(0, 0);
+}
+
+std::string AttributeSet::getAsString(bool InAttrGrp) const {
+ return SetNode ? SetNode->getAsString(InAttrGrp) : "";
+}
+
+AttributeSet::iterator AttributeSet::begin() const {
+ return SetNode ? SetNode->begin() : nullptr;
+}
+
+AttributeSet::iterator AttributeSet::end() const {
+ return SetNode ? SetNode->end() : nullptr;
+}
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+LLVM_DUMP_METHOD void AttributeSet::dump() const {
+ dbgs() << "AS =\n";
+ dbgs() << " { ";
+ dbgs() << getAsString(true) << " }\n";
+}
+#endif
+
+//===----------------------------------------------------------------------===//
// AttributeSetNode Definition
//===----------------------------------------------------------------------===//
+AttributeSetNode::AttributeSetNode(ArrayRef<Attribute> Attrs)
+ : AvailableAttrs(0), NumAttrs(Attrs.size()) {
+ // There's memory after the node where we can store the entries in.
+ std::copy(Attrs.begin(), Attrs.end(), getTrailingObjects<Attribute>());
+
+ for (Attribute I : *this) {
+ if (!I.isStringAttribute()) {
+ AvailableAttrs |= ((uint64_t)1) << I.getKindAsEnum();
+ }
+ }
+}
+
AttributeSetNode *AttributeSetNode::get(LLVMContext &C,
ArrayRef<Attribute> Attrs) {
if (Attrs.empty())
@@ -504,10 +669,52 @@ AttributeSetNode *AttributeSetNode::get(LLVMContext &C,
pImpl->AttrsSetNodes.InsertNode(PA, InsertPoint);
}
- // Return the AttributesListNode that we found or created.
+ // Return the AttributeSetNode that we found or created.
return PA;
}
+AttributeSetNode *AttributeSetNode::get(LLVMContext &C, const AttrBuilder &B) {
+ // Add target-independent attributes.
+ SmallVector<Attribute, 8> Attrs;
+ for (Attribute::AttrKind Kind = Attribute::None;
+ Kind != Attribute::EndAttrKinds; Kind = Attribute::AttrKind(Kind + 1)) {
+ if (!B.contains(Kind))
+ continue;
+
+ Attribute Attr;
+ switch (Kind) {
+ case Attribute::Alignment:
+ Attr = Attribute::getWithAlignment(C, B.getAlignment());
+ break;
+ case Attribute::StackAlignment:
+ Attr = Attribute::getWithStackAlignment(C, B.getStackAlignment());
+ break;
+ case Attribute::Dereferenceable:
+ Attr = Attribute::getWithDereferenceableBytes(
+ C, B.getDereferenceableBytes());
+ break;
+ case Attribute::DereferenceableOrNull:
+ Attr = Attribute::getWithDereferenceableOrNullBytes(
+ C, B.getDereferenceableOrNullBytes());
+ break;
+ case Attribute::AllocSize: {
+ auto A = B.getAllocSizeArgs();
+ Attr = Attribute::getWithAllocSizeArgs(C, A.first, A.second);
+ break;
+ }
+ default:
+ Attr = Attribute::get(C, Kind);
+ }
+ Attrs.push_back(Attr);
+ }
+
+ // Add target-dependent (string) attributes.
+ for (const auto &TDA : B.td_attrs())
+ Attrs.emplace_back(Attribute::get(C, TDA.first, TDA.second));
+
+ return get(C, Attrs);
+}
+
bool AttributeSetNode::hasAttribute(StringRef Kind) const {
for (Attribute I : *this)
if (I.hasAttribute(Kind))
@@ -578,46 +785,91 @@ std::string AttributeSetNode::getAsString(bool InAttrGrp) const {
}
//===----------------------------------------------------------------------===//
-// AttributeSetImpl Definition
+// AttributeListImpl Definition
//===----------------------------------------------------------------------===//
-LLVM_DUMP_METHOD void AttributeSetImpl::dump() const {
- AttributeSet(const_cast<AttributeSetImpl *>(this)).dump();
+/// Map from AttributeList index to the internal array index. Adding one works:
+/// FunctionIndex: ~0U -> 0
+/// ReturnIndex: 0 -> 1
+/// FirstArgIndex: 1.. -> 2..
+static constexpr unsigned attrIdxToArrayIdx(unsigned Index) {
+ // MSVC warns about '~0U + 1' wrapping around when this is called on
+ // FunctionIndex, so cast to int first.
+ return static_cast<int>(Index) + 1;
+}
+
+AttributeListImpl::AttributeListImpl(LLVMContext &C,
+ ArrayRef<AttributeSet> Sets)
+ : AvailableFunctionAttrs(0), Context(C), NumAttrSets(Sets.size()) {
+ assert(!Sets.empty() && "pointless AttributeListImpl");
+
+ // There's memory after the node where we can store the entries in.
+ std::copy(Sets.begin(), Sets.end(), getTrailingObjects<AttributeSet>());
+
+ // Initialize AvailableFunctionAttrs summary bitset.
+ static_assert(Attribute::EndAttrKinds <=
+ sizeof(AvailableFunctionAttrs) * CHAR_BIT,
+ "Too many attributes");
+ static_assert(attrIdxToArrayIdx(AttributeList::FunctionIndex) == 0U,
+ "function should be stored in slot 0");
+ for (Attribute I : Sets[0]) {
+ if (!I.isStringAttribute())
+ AvailableFunctionAttrs |= 1ULL << I.getKindAsEnum();
+ }
}
+void AttributeListImpl::Profile(FoldingSetNodeID &ID) const {
+ Profile(ID, makeArrayRef(begin(), end()));
+}
+
+void AttributeListImpl::Profile(FoldingSetNodeID &ID,
+ ArrayRef<AttributeSet> Sets) {
+ for (const auto &Set : Sets)
+ ID.AddPointer(Set.SetNode);
+}
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+LLVM_DUMP_METHOD void AttributeListImpl::dump() const {
+ AttributeList(const_cast<AttributeListImpl *>(this)).dump();
+}
+#endif
+
//===----------------------------------------------------------------------===//
-// AttributeSet Construction and Mutation Methods
+// AttributeList Construction and Mutation Methods
//===----------------------------------------------------------------------===//
-AttributeSet
-AttributeSet::getImpl(LLVMContext &C,
- ArrayRef<std::pair<unsigned, AttributeSetNode*> > Attrs) {
+AttributeList AttributeList::getImpl(LLVMContext &C,
+ ArrayRef<AttributeSet> AttrSets) {
+ assert(!AttrSets.empty() && "pointless AttributeListImpl");
+
LLVMContextImpl *pImpl = C.pImpl;
FoldingSetNodeID ID;
- AttributeSetImpl::Profile(ID, Attrs);
+ AttributeListImpl::Profile(ID, AttrSets);
void *InsertPoint;
- AttributeSetImpl *PA = pImpl->AttrsLists.FindNodeOrInsertPos(ID, InsertPoint);
+ AttributeListImpl *PA =
+ pImpl->AttrsLists.FindNodeOrInsertPos(ID, InsertPoint);
// If we didn't find any existing attributes of the same shape then
// create a new one and insert it.
if (!PA) {
- // Coallocate entries after the AttributeSetImpl itself.
+ // Coallocate entries after the AttributeListImpl itself.
void *Mem = ::operator new(
- AttributeSetImpl::totalSizeToAlloc<IndexAttrPair>(Attrs.size()));
- PA = new (Mem) AttributeSetImpl(C, Attrs);
+ AttributeListImpl::totalSizeToAlloc<AttributeSet>(AttrSets.size()));
+ PA = new (Mem) AttributeListImpl(C, AttrSets);
pImpl->AttrsLists.InsertNode(PA, InsertPoint);
}
// Return the AttributesList that we found or created.
- return AttributeSet(PA);
+ return AttributeList(PA);
}
-AttributeSet AttributeSet::get(LLVMContext &C,
- ArrayRef<std::pair<unsigned, Attribute> > Attrs){
+AttributeList
+AttributeList::get(LLVMContext &C,
+ ArrayRef<std::pair<unsigned, Attribute>> Attrs) {
// If there are no attributes then return a null AttributesList pointer.
if (Attrs.empty())
- return AttributeSet();
+ return AttributeList();
assert(std::is_sorted(Attrs.begin(), Attrs.end(),
[](const std::pair<unsigned, Attribute> &LHS,
@@ -632,8 +884,8 @@ AttributeSet AttributeSet::get(LLVMContext &C,
// Create a vector if (unsigned, AttributeSetNode*) pairs from the attributes
// list.
- SmallVector<std::pair<unsigned, AttributeSetNode*>, 8> AttrPairVec;
- for (ArrayRef<std::pair<unsigned, Attribute> >::iterator I = Attrs.begin(),
+ SmallVector<std::pair<unsigned, AttributeSet>, 8> AttrPairVec;
+ for (ArrayRef<std::pair<unsigned, Attribute>>::iterator I = Attrs.begin(),
E = Attrs.end(); I != E; ) {
unsigned Index = I->first;
SmallVector<Attribute, 4> AttrVec;
@@ -642,514 +894,427 @@ AttributeSet AttributeSet::get(LLVMContext &C,
++I;
}
- AttrPairVec.emplace_back(Index, AttributeSetNode::get(C, AttrVec));
+ AttrPairVec.emplace_back(Index, AttributeSet::get(C, AttrVec));
}
- return getImpl(C, AttrPairVec);
+ return get(C, AttrPairVec);
}
-AttributeSet AttributeSet::get(LLVMContext &C,
- ArrayRef<std::pair<unsigned,
- AttributeSetNode*> > Attrs) {
+AttributeList
+AttributeList::get(LLVMContext &C,
+ ArrayRef<std::pair<unsigned, AttributeSet>> Attrs) {
// If there are no attributes then return a null AttributesList pointer.
if (Attrs.empty())
- return AttributeSet();
+ return AttributeList();
- return getImpl(C, Attrs);
-}
+ assert(std::is_sorted(Attrs.begin(), Attrs.end(),
+ [](const std::pair<unsigned, AttributeSet> &LHS,
+ const std::pair<unsigned, AttributeSet> &RHS) {
+ return LHS.first < RHS.first;
+ }) &&
+ "Misordered Attributes list!");
+ assert(none_of(Attrs,
+ [](const std::pair<unsigned, AttributeSet> &Pair) {
+ return !Pair.second.hasAttributes();
+ }) &&
+ "Pointless attribute!");
-AttributeSet AttributeSet::get(LLVMContext &C, unsigned Index,
- const AttrBuilder &B) {
- if (!B.hasAttributes())
- return AttributeSet();
+ unsigned MaxIndex = Attrs.back().first;
- // Add target-independent attributes.
- SmallVector<std::pair<unsigned, Attribute>, 8> Attrs;
- for (Attribute::AttrKind Kind = Attribute::None;
- Kind != Attribute::EndAttrKinds; Kind = Attribute::AttrKind(Kind + 1)) {
- if (!B.contains(Kind))
- continue;
+ SmallVector<AttributeSet, 4> AttrVec(attrIdxToArrayIdx(MaxIndex) + 1);
+ for (auto Pair : Attrs)
+ AttrVec[attrIdxToArrayIdx(Pair.first)] = Pair.second;
- Attribute Attr;
- switch (Kind) {
- case Attribute::Alignment:
- Attr = Attribute::getWithAlignment(C, B.getAlignment());
- break;
- case Attribute::StackAlignment:
- Attr = Attribute::getWithStackAlignment(C, B.getStackAlignment());
- break;
- case Attribute::Dereferenceable:
- Attr = Attribute::getWithDereferenceableBytes(
- C, B.getDereferenceableBytes());
- break;
- case Attribute::DereferenceableOrNull:
- Attr = Attribute::getWithDereferenceableOrNullBytes(
- C, B.getDereferenceableOrNullBytes());
- break;
- case Attribute::AllocSize: {
- auto A = B.getAllocSizeArgs();
- Attr = Attribute::getWithAllocSizeArgs(C, A.first, A.second);
+ return getImpl(C, AttrVec);
+}
+
+AttributeList AttributeList::get(LLVMContext &C, AttributeSet FnAttrs,
+ AttributeSet RetAttrs,
+ ArrayRef<AttributeSet> ArgAttrs) {
+ // Scan from the end to find the last argument with attributes. Most
+ // arguments don't have attributes, so it's nice if we can have fewer unique
+ // AttributeListImpls by dropping empty attribute sets at the end of the list.
+ unsigned NumSets = 0;
+ for (size_t I = ArgAttrs.size(); I != 0; --I) {
+ if (ArgAttrs[I - 1].hasAttributes()) {
+ NumSets = I + 2;
break;
}
- default:
- Attr = Attribute::get(C, Kind);
- }
- Attrs.emplace_back(Index, Attr);
+ }
+ if (NumSets == 0) {
+ // Check function and return attributes if we didn't have argument
+ // attributes.
+ if (RetAttrs.hasAttributes())
+ NumSets = 2;
+ else if (FnAttrs.hasAttributes())
+ NumSets = 1;
}
- // Add target-dependent (string) attributes.
- for (const auto &TDA : B.td_attrs())
- Attrs.emplace_back(Index, Attribute::get(C, TDA.first, TDA.second));
+ // If all attribute sets were empty, we can use the empty attribute list.
+ if (NumSets == 0)
+ return AttributeList();
+
+ SmallVector<AttributeSet, 8> AttrSets;
+ AttrSets.reserve(NumSets);
+ // If we have any attributes, we always have function attributes.
+ AttrSets.push_back(FnAttrs);
+ if (NumSets > 1)
+ AttrSets.push_back(RetAttrs);
+ if (NumSets > 2) {
+ // Drop the empty argument attribute sets at the end.
+ ArgAttrs = ArgAttrs.take_front(NumSets - 2);
+ AttrSets.insert(AttrSets.end(), ArgAttrs.begin(), ArgAttrs.end());
+ }
- return get(C, Attrs);
+ return getImpl(C, AttrSets);
}
-AttributeSet AttributeSet::get(LLVMContext &C, unsigned Index,
- ArrayRef<Attribute::AttrKind> Kinds) {
+AttributeList AttributeList::get(LLVMContext &C, unsigned Index,
+ const AttrBuilder &B) {
+ if (!B.hasAttributes())
+ return AttributeList();
+ Index = attrIdxToArrayIdx(Index);
+ SmallVector<AttributeSet, 8> AttrSets(Index + 1);
+ AttrSets[Index] = AttributeSet::get(C, B);
+ return getImpl(C, AttrSets);
+}
+
+AttributeList AttributeList::get(LLVMContext &C, unsigned Index,
+ ArrayRef<Attribute::AttrKind> Kinds) {
SmallVector<std::pair<unsigned, Attribute>, 8> Attrs;
for (Attribute::AttrKind K : Kinds)
Attrs.emplace_back(Index, Attribute::get(C, K));
return get(C, Attrs);
}
-AttributeSet AttributeSet::get(LLVMContext &C, unsigned Index,
- ArrayRef<StringRef> Kinds) {
+AttributeList AttributeList::get(LLVMContext &C, unsigned Index,
+ ArrayRef<StringRef> Kinds) {
SmallVector<std::pair<unsigned, Attribute>, 8> Attrs;
for (StringRef K : Kinds)
Attrs.emplace_back(Index, Attribute::get(C, K));
return get(C, Attrs);
}
-AttributeSet AttributeSet::get(LLVMContext &C, ArrayRef<AttributeSet> Attrs) {
- if (Attrs.empty()) return AttributeSet();
- if (Attrs.size() == 1) return Attrs[0];
-
- SmallVector<std::pair<unsigned, AttributeSetNode*>, 8> AttrNodeVec;
- AttributeSetImpl *A0 = Attrs[0].pImpl;
- if (A0)
- AttrNodeVec.append(A0->getNode(0), A0->getNode(A0->getNumSlots()));
- // Copy all attributes from Attrs into AttrNodeVec while keeping AttrNodeVec
- // ordered by index. Because we know that each list in Attrs is ordered by
- // index we only need to merge each successive list in rather than doing a
- // full sort.
- for (unsigned I = 1, E = Attrs.size(); I != E; ++I) {
- AttributeSetImpl *AS = Attrs[I].pImpl;
- if (!AS) continue;
- SmallVector<std::pair<unsigned, AttributeSetNode *>, 8>::iterator
- ANVI = AttrNodeVec.begin(), ANVE;
- for (const IndexAttrPair *AI = AS->getNode(0),
- *AE = AS->getNode(AS->getNumSlots());
- AI != AE; ++AI) {
- ANVE = AttrNodeVec.end();
- while (ANVI != ANVE && ANVI->first <= AI->first)
- ++ANVI;
- ANVI = AttrNodeVec.insert(ANVI, *AI) + 1;
- }
+AttributeList AttributeList::get(LLVMContext &C,
+ ArrayRef<AttributeList> Attrs) {
+ if (Attrs.empty())
+ return AttributeList();
+ if (Attrs.size() == 1)
+ return Attrs[0];
+
+ unsigned MaxSize = 0;
+ for (AttributeList List : Attrs)
+ MaxSize = std::max(MaxSize, List.getNumAttrSets());
+
+ // If every list was empty, there is no point in merging the lists.
+ if (MaxSize == 0)
+ return AttributeList();
+
+ SmallVector<AttributeSet, 8> NewAttrSets(MaxSize);
+ for (unsigned I = 0; I < MaxSize; ++I) {
+ AttrBuilder CurBuilder;
+ for (AttributeList List : Attrs)
+ CurBuilder.merge(List.getAttributes(I - 1));
+ NewAttrSets[I] = AttributeSet::get(C, CurBuilder);
}
- return getImpl(C, AttrNodeVec);
+ return getImpl(C, NewAttrSets);
}
-AttributeSet AttributeSet::addAttribute(LLVMContext &C, unsigned Index,
- Attribute::AttrKind Kind) const {
+AttributeList AttributeList::addAttribute(LLVMContext &C, unsigned Index,
+ Attribute::AttrKind Kind) const {
if (hasAttribute(Index, Kind)) return *this;
- return addAttributes(C, Index, AttributeSet::get(C, Index, Kind));
+ AttrBuilder B;
+ B.addAttribute(Kind);
+ return addAttributes(C, Index, B);
}
-AttributeSet AttributeSet::addAttribute(LLVMContext &C, unsigned Index,
- StringRef Kind, StringRef Value) const {
- llvm::AttrBuilder B;
+AttributeList AttributeList::addAttribute(LLVMContext &C, unsigned Index,
+ StringRef Kind,
+ StringRef Value) const {
+ AttrBuilder B;
B.addAttribute(Kind, Value);
- return addAttributes(C, Index, AttributeSet::get(C, Index, B));
+ return addAttributes(C, Index, B);
}
-AttributeSet AttributeSet::addAttribute(LLVMContext &C,
- ArrayRef<unsigned> Indices,
- Attribute A) const {
- unsigned I = 0, E = pImpl ? pImpl->getNumSlots() : 0;
- auto IdxI = Indices.begin(), IdxE = Indices.end();
- SmallVector<AttributeSet, 4> AttrSet;
-
- while (I != E && IdxI != IdxE) {
- if (getSlotIndex(I) < *IdxI)
- AttrSet.emplace_back(getSlotAttributes(I++));
- else if (getSlotIndex(I) > *IdxI)
- AttrSet.emplace_back(AttributeSet::get(C, std::make_pair(*IdxI++, A)));
- else {
- AttrBuilder B(getSlotAttributes(I), *IdxI);
- B.addAttribute(A);
- AttrSet.emplace_back(AttributeSet::get(C, *IdxI, B));
- ++I;
- ++IdxI;
- }
- }
-
- while (I != E)
- AttrSet.emplace_back(getSlotAttributes(I++));
-
- while (IdxI != IdxE)
- AttrSet.emplace_back(AttributeSet::get(C, std::make_pair(*IdxI++, A)));
-
- return get(C, AttrSet);
+AttributeList AttributeList::addAttribute(LLVMContext &C, unsigned Index,
+ Attribute A) const {
+ AttrBuilder B;
+ B.addAttribute(A);
+ return addAttributes(C, Index, B);
}
-AttributeSet AttributeSet::addAttributes(LLVMContext &C, unsigned Index,
- AttributeSet Attrs) const {
- if (!pImpl) return Attrs;
- if (!Attrs.pImpl) return *this;
+AttributeList AttributeList::addAttributes(LLVMContext &C, unsigned Index,
+ const AttrBuilder &B) const {
+ if (!B.hasAttributes())
+ return *this;
+
+ if (!pImpl)
+ return AttributeList::get(C, {{Index, AttributeSet::get(C, B)}});
#ifndef NDEBUG
// FIXME it is not obvious how this should work for alignment. For now, say
// we can't change a known alignment.
- unsigned OldAlign = getParamAlignment(Index);
- unsigned NewAlign = Attrs.getParamAlignment(Index);
+ unsigned OldAlign = getAttributes(Index).getAlignment();
+ unsigned NewAlign = B.getAlignment();
assert((!OldAlign || !NewAlign || OldAlign == NewAlign) &&
"Attempt to change alignment!");
#endif
- // Add the attribute slots before the one we're trying to add.
- SmallVector<AttributeSet, 4> AttrSet;
- uint64_t NumAttrs = pImpl->getNumSlots();
- AttributeSet AS;
- uint64_t LastIndex = 0;
- for (unsigned I = 0, E = NumAttrs; I != E; ++I) {
- if (getSlotIndex(I) >= Index) {
- if (getSlotIndex(I) == Index) AS = getSlotAttributes(LastIndex++);
- break;
- }
- LastIndex = I + 1;
- AttrSet.push_back(getSlotAttributes(I));
- }
+ Index = attrIdxToArrayIdx(Index);
+ SmallVector<AttributeSet, 4> AttrSets(this->begin(), this->end());
+ if (Index >= AttrSets.size())
+ AttrSets.resize(Index + 1);
- // Now add the attribute into the correct slot. There may already be an
- // AttributeSet there.
- AttrBuilder B(AS, Index);
+ AttrBuilder Merged(AttrSets[Index]);
+ Merged.merge(B);
+ AttrSets[Index] = AttributeSet::get(C, Merged);
- for (unsigned I = 0, E = Attrs.pImpl->getNumSlots(); I != E; ++I)
- if (Attrs.getSlotIndex(I) == Index) {
- for (AttributeSetImpl::iterator II = Attrs.pImpl->begin(I),
- IE = Attrs.pImpl->end(I); II != IE; ++II)
- B.addAttribute(*II);
- break;
- }
+ return getImpl(C, AttrSets);
+}
- AttrSet.push_back(AttributeSet::get(C, Index, B));
+AttributeList AttributeList::addParamAttribute(LLVMContext &C,
+ ArrayRef<unsigned> ArgNos,
+ Attribute A) const {
+ assert(std::is_sorted(ArgNos.begin(), ArgNos.end()));
- // Add the remaining attribute slots.
- for (unsigned I = LastIndex, E = NumAttrs; I < E; ++I)
- AttrSet.push_back(getSlotAttributes(I));
+ SmallVector<AttributeSet, 4> AttrSets(this->begin(), this->end());
+ unsigned MaxIndex = attrIdxToArrayIdx(ArgNos.back() + FirstArgIndex);
+ if (MaxIndex >= AttrSets.size())
+ AttrSets.resize(MaxIndex + 1);
- return get(C, AttrSet);
+ for (unsigned ArgNo : ArgNos) {
+ unsigned Index = attrIdxToArrayIdx(ArgNo + FirstArgIndex);
+ AttrBuilder B(AttrSets[Index]);
+ B.addAttribute(A);
+ AttrSets[Index] = AttributeSet::get(C, B);
+ }
+
+ return getImpl(C, AttrSets);
}
-AttributeSet AttributeSet::removeAttribute(LLVMContext &C, unsigned Index,
- Attribute::AttrKind Kind) const {
+AttributeList AttributeList::removeAttribute(LLVMContext &C, unsigned Index,
+ Attribute::AttrKind Kind) const {
if (!hasAttribute(Index, Kind)) return *this;
- return removeAttributes(C, Index, AttributeSet::get(C, Index, Kind));
+ AttrBuilder B;
+ B.addAttribute(Kind);
+ return removeAttributes(C, Index, B);
}
-AttributeSet AttributeSet::removeAttribute(LLVMContext &C, unsigned Index,
- StringRef Kind) const {
+AttributeList AttributeList::removeAttribute(LLVMContext &C, unsigned Index,
+ StringRef Kind) const {
if (!hasAttribute(Index, Kind)) return *this;
- return removeAttributes(C, Index, AttributeSet::get(C, Index, Kind));
+ AttrBuilder B;
+ B.addAttribute(Kind);
+ return removeAttributes(C, Index, B);
}
-AttributeSet AttributeSet::removeAttributes(LLVMContext &C, unsigned Index,
- AttributeSet Attrs) const {
- if (!pImpl) return AttributeSet();
- if (!Attrs.pImpl) return *this;
+AttributeList
+AttributeList::removeAttributes(LLVMContext &C, unsigned Index,
+ const AttrBuilder &AttrsToRemove) const {
+ if (!pImpl)
+ return AttributeList();
// FIXME it is not obvious how this should work for alignment.
// For now, say we can't pass in alignment, which no current use does.
- assert(!Attrs.hasAttribute(Index, Attribute::Alignment) &&
- "Attempt to change alignment!");
-
- // Add the attribute slots before the one we're trying to add.
- SmallVector<AttributeSet, 4> AttrSet;
- uint64_t NumAttrs = pImpl->getNumSlots();
- AttributeSet AS;
- uint64_t LastIndex = 0;
- for (unsigned I = 0, E = NumAttrs; I != E; ++I) {
- if (getSlotIndex(I) >= Index) {
- if (getSlotIndex(I) == Index) AS = getSlotAttributes(LastIndex++);
- break;
- }
- LastIndex = I + 1;
- AttrSet.push_back(getSlotAttributes(I));
- }
-
- // Now remove the attribute from the correct slot. There may already be an
- // AttributeSet there.
- AttrBuilder B(AS, Index);
-
- for (unsigned I = 0, E = Attrs.pImpl->getNumSlots(); I != E; ++I)
- if (Attrs.getSlotIndex(I) == Index) {
- B.removeAttributes(Attrs.pImpl->getSlotAttributes(I), Index);
- break;
- }
+ assert(!AttrsToRemove.hasAlignmentAttr() && "Attempt to change alignment!");
- AttrSet.push_back(AttributeSet::get(C, Index, B));
+ Index = attrIdxToArrayIdx(Index);
+ SmallVector<AttributeSet, 4> AttrSets(this->begin(), this->end());
+ if (Index >= AttrSets.size())
+ AttrSets.resize(Index + 1);
- // Add the remaining attribute slots.
- for (unsigned I = LastIndex, E = NumAttrs; I < E; ++I)
- AttrSet.push_back(getSlotAttributes(I));
+ AttrBuilder B(AttrSets[Index]);
+ B.remove(AttrsToRemove);
+ AttrSets[Index] = AttributeSet::get(C, B);
- return get(C, AttrSet);
+ return getImpl(C, AttrSets);
}
-AttributeSet AttributeSet::removeAttributes(LLVMContext &C, unsigned Index,
- const AttrBuilder &Attrs) const {
- if (!pImpl) return AttributeSet();
-
- // FIXME it is not obvious how this should work for alignment.
- // For now, say we can't pass in alignment, which no current use does.
- assert(!Attrs.hasAlignmentAttr() && "Attempt to change alignment!");
-
- // Add the attribute slots before the one we're trying to add.
- SmallVector<AttributeSet, 4> AttrSet;
- uint64_t NumAttrs = pImpl->getNumSlots();
- AttributeSet AS;
- uint64_t LastIndex = 0;
- for (unsigned I = 0, E = NumAttrs; I != E; ++I) {
- if (getSlotIndex(I) >= Index) {
- if (getSlotIndex(I) == Index) AS = getSlotAttributes(LastIndex++);
- break;
- }
- LastIndex = I + 1;
- AttrSet.push_back(getSlotAttributes(I));
- }
-
- // Now remove the attribute from the correct slot. There may already be an
- // AttributeSet there.
- AttrBuilder B(AS, Index);
- B.remove(Attrs);
-
- AttrSet.push_back(AttributeSet::get(C, Index, B));
-
- // Add the remaining attribute slots.
- for (unsigned I = LastIndex, E = NumAttrs; I < E; ++I)
- AttrSet.push_back(getSlotAttributes(I));
-
- return get(C, AttrSet);
+AttributeList AttributeList::removeAttributes(LLVMContext &C,
+ unsigned WithoutIndex) const {
+ if (!pImpl)
+ return AttributeList();
+ WithoutIndex = attrIdxToArrayIdx(WithoutIndex);
+ if (WithoutIndex >= getNumAttrSets())
+ return *this;
+ SmallVector<AttributeSet, 4> AttrSets(this->begin(), this->end());
+ AttrSets[WithoutIndex] = AttributeSet();
+ return getImpl(C, AttrSets);
}
-AttributeSet AttributeSet::addDereferenceableAttr(LLVMContext &C, unsigned Index,
- uint64_t Bytes) const {
- llvm::AttrBuilder B;
+AttributeList AttributeList::addDereferenceableAttr(LLVMContext &C,
+ unsigned Index,
+ uint64_t Bytes) const {
+ AttrBuilder B;
B.addDereferenceableAttr(Bytes);
- return addAttributes(C, Index, AttributeSet::get(C, Index, B));
+ return addAttributes(C, Index, B);
}
-AttributeSet AttributeSet::addDereferenceableOrNullAttr(LLVMContext &C,
- unsigned Index,
- uint64_t Bytes) const {
- llvm::AttrBuilder B;
+AttributeList
+AttributeList::addDereferenceableOrNullAttr(LLVMContext &C, unsigned Index,
+ uint64_t Bytes) const {
+ AttrBuilder B;
B.addDereferenceableOrNullAttr(Bytes);
- return addAttributes(C, Index, AttributeSet::get(C, Index, B));
+ return addAttributes(C, Index, B);
}
-AttributeSet
-AttributeSet::addAllocSizeAttr(LLVMContext &C, unsigned Index,
- unsigned ElemSizeArg,
- const Optional<unsigned> &NumElemsArg) {
- llvm::AttrBuilder B;
+AttributeList
+AttributeList::addAllocSizeAttr(LLVMContext &C, unsigned Index,
+ unsigned ElemSizeArg,
+ const Optional<unsigned> &NumElemsArg) {
+ AttrBuilder B;
B.addAllocSizeAttr(ElemSizeArg, NumElemsArg);
- return addAttributes(C, Index, AttributeSet::get(C, Index, B));
+ return addAttributes(C, Index, B);
}
//===----------------------------------------------------------------------===//
-// AttributeSet Accessor Methods
+// AttributeList Accessor Methods
//===----------------------------------------------------------------------===//
-LLVMContext &AttributeSet::getContext() const {
- return pImpl->getContext();
-}
+LLVMContext &AttributeList::getContext() const { return pImpl->getContext(); }
-AttributeSet AttributeSet::getParamAttributes(unsigned Index) const {
- return pImpl && hasAttributes(Index) ?
- AttributeSet::get(pImpl->getContext(),
- ArrayRef<std::pair<unsigned, AttributeSetNode*> >(
- std::make_pair(Index, getAttributes(Index)))) :
- AttributeSet();
+AttributeSet AttributeList::getParamAttributes(unsigned ArgNo) const {
+ return getAttributes(ArgNo + FirstArgIndex);
}
-AttributeSet AttributeSet::getRetAttributes() const {
- return pImpl && hasAttributes(ReturnIndex) ?
- AttributeSet::get(pImpl->getContext(),
- ArrayRef<std::pair<unsigned, AttributeSetNode*> >(
- std::make_pair(ReturnIndex,
- getAttributes(ReturnIndex)))) :
- AttributeSet();
+AttributeSet AttributeList::getRetAttributes() const {
+ return getAttributes(ReturnIndex);
}
-AttributeSet AttributeSet::getFnAttributes() const {
- return pImpl && hasAttributes(FunctionIndex) ?
- AttributeSet::get(pImpl->getContext(),
- ArrayRef<std::pair<unsigned, AttributeSetNode*> >(
- std::make_pair(FunctionIndex,
- getAttributes(FunctionIndex)))) :
- AttributeSet();
+AttributeSet AttributeList::getFnAttributes() const {
+ return getAttributes(FunctionIndex);
}
-bool AttributeSet::hasAttribute(unsigned Index, Attribute::AttrKind Kind) const{
- AttributeSetNode *ASN = getAttributes(Index);
- return ASN && ASN->hasAttribute(Kind);
+bool AttributeList::hasAttribute(unsigned Index,
+ Attribute::AttrKind Kind) const {
+ return getAttributes(Index).hasAttribute(Kind);
}
-bool AttributeSet::hasAttribute(unsigned Index, StringRef Kind) const {
- AttributeSetNode *ASN = getAttributes(Index);
- return ASN && ASN->hasAttribute(Kind);
+bool AttributeList::hasAttribute(unsigned Index, StringRef Kind) const {
+ return getAttributes(Index).hasAttribute(Kind);
}
-bool AttributeSet::hasAttributes(unsigned Index) const {
- AttributeSetNode *ASN = getAttributes(Index);
- return ASN && ASN->hasAttributes();
+bool AttributeList::hasAttributes(unsigned Index) const {
+ return getAttributes(Index).hasAttributes();
}
-bool AttributeSet::hasFnAttribute(Attribute::AttrKind Kind) const {
+bool AttributeList::hasFnAttribute(Attribute::AttrKind Kind) const {
return pImpl && pImpl->hasFnAttribute(Kind);
}
-bool AttributeSet::hasFnAttribute(StringRef Kind) const {
- return hasAttribute(AttributeSet::FunctionIndex, Kind);
+bool AttributeList::hasFnAttribute(StringRef Kind) const {
+ return hasAttribute(AttributeList::FunctionIndex, Kind);
+}
+
+bool AttributeList::hasParamAttribute(unsigned ArgNo,
+ Attribute::AttrKind Kind) const {
+ return hasAttribute(ArgNo + FirstArgIndex, Kind);
}
-bool AttributeSet::hasAttrSomewhere(Attribute::AttrKind Attr,
- unsigned *Index) const {
+bool AttributeList::hasAttrSomewhere(Attribute::AttrKind Attr,
+ unsigned *Index) const {
if (!pImpl) return false;
- for (unsigned I = 0, E = pImpl->getNumSlots(); I != E; ++I)
- for (AttributeSetImpl::iterator II = pImpl->begin(I),
- IE = pImpl->end(I); II != IE; ++II)
- if (II->hasAttribute(Attr)) {
- if (Index) *Index = pImpl->getSlotIndex(I);
- return true;
- }
+ for (unsigned I = index_begin(), E = index_end(); I != E; ++I) {
+ if (hasAttribute(I, Attr)) {
+ if (Index)
+ *Index = I;
+ return true;
+ }
+ }
return false;
}
-Attribute AttributeSet::getAttribute(unsigned Index,
- Attribute::AttrKind Kind) const {
- AttributeSetNode *ASN = getAttributes(Index);
- return ASN ? ASN->getAttribute(Kind) : Attribute();
+Attribute AttributeList::getAttribute(unsigned Index,
+ Attribute::AttrKind Kind) const {
+ return getAttributes(Index).getAttribute(Kind);
}
-Attribute AttributeSet::getAttribute(unsigned Index,
- StringRef Kind) const {
- AttributeSetNode *ASN = getAttributes(Index);
- return ASN ? ASN->getAttribute(Kind) : Attribute();
+Attribute AttributeList::getAttribute(unsigned Index, StringRef Kind) const {
+ return getAttributes(Index).getAttribute(Kind);
}
-unsigned AttributeSet::getParamAlignment(unsigned Index) const {
- AttributeSetNode *ASN = getAttributes(Index);
- return ASN ? ASN->getAlignment() : 0;
+unsigned AttributeList::getRetAlignment() const {
+ return getAttributes(ReturnIndex).getAlignment();
}
-unsigned AttributeSet::getStackAlignment(unsigned Index) const {
- AttributeSetNode *ASN = getAttributes(Index);
- return ASN ? ASN->getStackAlignment() : 0;
+unsigned AttributeList::getParamAlignment(unsigned ArgNo) const {
+ return getAttributes(ArgNo + FirstArgIndex).getAlignment();
}
-uint64_t AttributeSet::getDereferenceableBytes(unsigned Index) const {
- AttributeSetNode *ASN = getAttributes(Index);
- return ASN ? ASN->getDereferenceableBytes() : 0;
+unsigned AttributeList::getStackAlignment(unsigned Index) const {
+ return getAttributes(Index).getStackAlignment();
}
-uint64_t AttributeSet::getDereferenceableOrNullBytes(unsigned Index) const {
- AttributeSetNode *ASN = getAttributes(Index);
- return ASN ? ASN->getDereferenceableOrNullBytes() : 0;
+uint64_t AttributeList::getDereferenceableBytes(unsigned Index) const {
+ return getAttributes(Index).getDereferenceableBytes();
}
-std::pair<unsigned, Optional<unsigned>>
-AttributeSet::getAllocSizeArgs(unsigned Index) const {
- AttributeSetNode *ASN = getAttributes(Index);
- return ASN ? ASN->getAllocSizeArgs() : std::make_pair(0u, Optional<unsigned>(0u));
+uint64_t AttributeList::getDereferenceableOrNullBytes(unsigned Index) const {
+ return getAttributes(Index).getDereferenceableOrNullBytes();
}
-std::string AttributeSet::getAsString(unsigned Index, bool InAttrGrp) const {
- AttributeSetNode *ASN = getAttributes(Index);
- return ASN ? ASN->getAsString(InAttrGrp) : std::string("");
+std::pair<unsigned, Optional<unsigned>>
+AttributeList::getAllocSizeArgs(unsigned Index) const {
+ return getAttributes(Index).getAllocSizeArgs();
}
-AttributeSetNode *AttributeSet::getAttributes(unsigned Index) const {
- if (!pImpl) return nullptr;
-
- // Loop through to find the attribute node we want.
- for (unsigned I = 0, E = pImpl->getNumSlots(); I != E; ++I)
- if (pImpl->getSlotIndex(I) == Index)
- return pImpl->getSlotNode(I);
+std::string AttributeList::getAsString(unsigned Index, bool InAttrGrp) const {
+ return getAttributes(Index).getAsString(InAttrGrp);
+}
- return nullptr;
+AttributeSet AttributeList::getAttributes(unsigned Index) const {
+ Index = attrIdxToArrayIdx(Index);
+ if (!pImpl || Index >= getNumAttrSets())
+ return AttributeSet();
+ return pImpl->begin()[Index];
}
-AttributeSet::iterator AttributeSet::begin(unsigned Slot) const {
- if (!pImpl)
- return ArrayRef<Attribute>().begin();
- return pImpl->begin(Slot);
+AttributeList::iterator AttributeList::begin() const {
+ return pImpl ? pImpl->begin() : nullptr;
}
-AttributeSet::iterator AttributeSet::end(unsigned Slot) const {
- if (!pImpl)
- return ArrayRef<Attribute>().end();
- return pImpl->end(Slot);
+AttributeList::iterator AttributeList::end() const {
+ return pImpl ? pImpl->end() : nullptr;
}
//===----------------------------------------------------------------------===//
-// AttributeSet Introspection Methods
+// AttributeList Introspection Methods
//===----------------------------------------------------------------------===//
-unsigned AttributeSet::getNumSlots() const {
- return pImpl ? pImpl->getNumSlots() : 0;
+unsigned AttributeList::getNumAttrSets() const {
+ return pImpl ? pImpl->NumAttrSets : 0;
}
-unsigned AttributeSet::getSlotIndex(unsigned Slot) const {
- assert(pImpl && Slot < pImpl->getNumSlots() &&
- "Slot # out of range!");
- return pImpl->getSlotIndex(Slot);
-}
-
-AttributeSet AttributeSet::getSlotAttributes(unsigned Slot) const {
- assert(pImpl && Slot < pImpl->getNumSlots() &&
- "Slot # out of range!");
- return pImpl->getSlotAttributes(Slot);
-}
-
-LLVM_DUMP_METHOD void AttributeSet::dump() const {
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+LLVM_DUMP_METHOD void AttributeList::dump() const {
dbgs() << "PAL[\n";
- for (unsigned i = 0, e = getNumSlots(); i < e; ++i) {
- uint64_t Index = getSlotIndex(i);
- dbgs() << " { ";
- if (Index == ~0U)
- dbgs() << "~0U";
- else
- dbgs() << Index;
- dbgs() << " => " << getAsString(Index) << " }\n";
+ for (unsigned i = index_begin(), e = index_end(); i != e; ++i) {
+ if (getAttributes(i).hasAttributes())
+ dbgs() << " { " << i << " => " << getAsString(i) << " }\n";
}
dbgs() << "]\n";
}
+#endif
//===----------------------------------------------------------------------===//
// AttrBuilder Method Implementations
//===----------------------------------------------------------------------===//
-AttrBuilder::AttrBuilder(AttributeSet AS, unsigned Index)
- : Attrs(0), Alignment(0), StackAlignment(0), DerefBytes(0),
- DerefOrNullBytes(0), AllocSizeArgs(0) {
- AttributeSetImpl *pImpl = AS.pImpl;
- if (!pImpl) return;
-
- for (unsigned I = 0, E = pImpl->getNumSlots(); I != E; ++I) {
- if (pImpl->getSlotIndex(I) != Index) continue;
-
- for (AttributeSetImpl::iterator II = pImpl->begin(I),
- IE = pImpl->end(I); II != IE; ++II)
- addAttribute(*II);
+// FIXME: Remove this ctor, use AttributeSet.
+AttrBuilder::AttrBuilder(AttributeList AL, unsigned Index) {
+ AttributeSet AS = AL.getAttributes(Index);
+ for (const Attribute &A : AS)
+ addAttribute(A);
+}
- break;
- }
+AttrBuilder::AttrBuilder(AttributeSet AS) {
+ for (const Attribute &A : AS)
+ addAttribute(A);
}
void AttrBuilder::clear() {
@@ -1213,26 +1378,8 @@ AttrBuilder &AttrBuilder::removeAttribute(Attribute::AttrKind Val) {
return *this;
}
-AttrBuilder &AttrBuilder::removeAttributes(AttributeSet A, uint64_t Index) {
- unsigned Slot = ~0U;
- for (unsigned I = 0, E = A.getNumSlots(); I != E; ++I)
- if (A.getSlotIndex(I) == Index) {
- Slot = I;
- break;
- }
-
- assert(Slot != ~0U && "Couldn't find index in AttributeSet!");
-
- for (AttributeSet::iterator I = A.begin(Slot), E = A.end(Slot); I != E; ++I) {
- Attribute Attr = *I;
- if (Attr.isEnumAttribute() || Attr.isIntAttribute()) {
- removeAttribute(Attr.getKindAsEnum());
- } else {
- assert(Attr.isStringAttribute() && "Invalid attribute type!");
- removeAttribute(Attr.getKindAsString());
- }
- }
-
+AttrBuilder &AttrBuilder::removeAttributes(AttributeList A, uint64_t Index) {
+ remove(A.getAttributes(Index));
return *this;
}
@@ -1359,7 +1506,7 @@ bool AttrBuilder::overlaps(const AttrBuilder &B) const {
return true;
// Then check if any target dependent ones do.
- for (auto I : td_attrs())
+ for (const auto &I : td_attrs())
if (B.contains(I.first))
return true;
@@ -1374,24 +1521,16 @@ bool AttrBuilder::hasAttributes() const {
return !Attrs.none() || !TargetDepAttrs.empty();
}
-bool AttrBuilder::hasAttributes(AttributeSet A, uint64_t Index) const {
- unsigned Slot = ~0U;
- for (unsigned I = 0, E = A.getNumSlots(); I != E; ++I)
- if (A.getSlotIndex(I) == Index) {
- Slot = I;
- break;
- }
+bool AttrBuilder::hasAttributes(AttributeList AL, uint64_t Index) const {
+ AttributeSet AS = AL.getAttributes(Index);
- assert(Slot != ~0U && "Couldn't find the index!");
-
- for (AttributeSet::iterator I = A.begin(Slot), E = A.end(Slot); I != E; ++I) {
- Attribute Attr = *I;
+ for (Attribute Attr : AS) {
if (Attr.isEnumAttribute() || Attr.isIntAttribute()) {
- if (Attrs[I->getKindAsEnum()])
+ if (contains(Attr.getKindAsEnum()))
return true;
} else {
assert(Attr.isStringAttribute() && "Invalid attribute kind!");
- return TargetDepAttrs.find(Attr.getKindAsString())!=TargetDepAttrs.end();
+ return contains(Attr.getKindAsString());
}
}
@@ -1481,20 +1620,17 @@ static void adjustCallerSSPLevel(Function &Caller, const Function &Callee) {
// If upgrading the SSP attribute, clear out the old SSP Attributes first.
// Having multiple SSP attributes doesn't actually hurt, but it adds useless
// clutter to the IR.
- AttrBuilder B;
- B.addAttribute(Attribute::StackProtect)
- .addAttribute(Attribute::StackProtectStrong)
- .addAttribute(Attribute::StackProtectReq);
- AttributeSet OldSSPAttr = AttributeSet::get(Caller.getContext(),
- AttributeSet::FunctionIndex,
- B);
+ AttrBuilder OldSSPAttr;
+ OldSSPAttr.addAttribute(Attribute::StackProtect)
+ .addAttribute(Attribute::StackProtectStrong)
+ .addAttribute(Attribute::StackProtectReq);
if (Callee.hasFnAttribute(Attribute::StackProtectReq)) {
- Caller.removeAttributes(AttributeSet::FunctionIndex, OldSSPAttr);
+ Caller.removeAttributes(AttributeList::FunctionIndex, OldSSPAttr);
Caller.addFnAttr(Attribute::StackProtectReq);
} else if (Callee.hasFnAttribute(Attribute::StackProtectStrong) &&
!Caller.hasFnAttribute(Attribute::StackProtectReq)) {
- Caller.removeAttributes(AttributeSet::FunctionIndex, OldSSPAttr);
+ Caller.removeAttributes(AttributeList::FunctionIndex, OldSSPAttr);
Caller.addFnAttr(Attribute::StackProtectStrong);
} else if (Callee.hasFnAttribute(Attribute::StackProtect) &&
!Caller.hasFnAttribute(Attribute::StackProtectReq) &&
@@ -1502,6 +1638,39 @@ static void adjustCallerSSPLevel(Function &Caller, const Function &Callee) {
Caller.addFnAttr(Attribute::StackProtect);
}
+/// \brief If the inlined function required stack probes, then ensure that
+/// the calling function has those too.
+static void adjustCallerStackProbes(Function &Caller, const Function &Callee) {
+ if (!Caller.hasFnAttribute("probe-stack") &&
+ Callee.hasFnAttribute("probe-stack")) {
+ Caller.addFnAttr(Callee.getFnAttribute("probe-stack"));
+ }
+}
+
+/// \brief If the inlined function defines the size of guard region
+/// on the stack, then ensure that the calling function defines a guard region
+/// that is no larger.
+static void
+adjustCallerStackProbeSize(Function &Caller, const Function &Callee) {
+ if (Callee.hasFnAttribute("stack-probe-size")) {
+ uint64_t CalleeStackProbeSize;
+ Callee.getFnAttribute("stack-probe-size")
+ .getValueAsString()
+ .getAsInteger(0, CalleeStackProbeSize);
+ if (Caller.hasFnAttribute("stack-probe-size")) {
+ uint64_t CallerStackProbeSize;
+ Caller.getFnAttribute("stack-probe-size")
+ .getValueAsString()
+ .getAsInteger(0, CallerStackProbeSize);
+ if (CallerStackProbeSize > CalleeStackProbeSize) {
+ Caller.addFnAttr(Callee.getFnAttribute("stack-probe-size"));
+ }
+ } else {
+ Caller.addFnAttr(Callee.getFnAttribute("stack-probe-size"));
+ }
+ }
+}
+
#define GET_ATTR_COMPAT_FUNC
#include "AttributesCompatFunc.inc"
@@ -1510,7 +1679,6 @@ bool AttributeFuncs::areInlineCompatible(const Function &Caller,
return hasCompatibleFnAttrs(Caller, Callee);
}
-
void AttributeFuncs::mergeAttributesForInlining(Function &Caller,
const Function &Callee) {
mergeFnAttrs(Caller, Callee);
diff --git a/contrib/llvm/lib/IR/AutoUpgrade.cpp b/contrib/llvm/lib/IR/AutoUpgrade.cpp
index e3a7bae..a501799 100644
--- a/contrib/llvm/lib/IR/AutoUpgrade.cpp
+++ b/contrib/llvm/lib/IR/AutoUpgrade.cpp
@@ -14,6 +14,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/IR/AutoUpgrade.h"
+#include "llvm/ADT/StringSwitch.h"
#include "llvm/IR/CFG.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/Constants.h"
@@ -33,10 +34,10 @@ using namespace llvm;
static void rename(GlobalValue *GV) { GV->setName(GV->getName() + ".old"); }
-// Upgrade the declarations of the SSE4.1 functions whose arguments have
+// Upgrade the declarations of the SSE4.1 ptest intrinsics whose arguments have
// changed their type from v4f32 to v2i64.
-static bool UpgradeSSE41Function(Function* F, Intrinsic::ID IID,
- Function *&NewFn) {
+static bool UpgradePTESTIntrinsic(Function* F, Intrinsic::ID IID,
+ Function *&NewFn) {
// Check whether this is an old version of the function, which received
// v4f32 arguments.
Type *Arg0Type = F->getFunctionType()->getParamType(0);
@@ -65,6 +66,270 @@ static bool UpgradeX86IntrinsicsWith8BitMask(Function *F, Intrinsic::ID IID,
return true;
}
+static bool ShouldUpgradeX86Intrinsic(Function *F, StringRef Name) {
+ // All of the intrinsics matches below should be marked with which llvm
+ // version started autoupgrading them. At some point in the future we would
+ // like to use this information to remove upgrade code for some older
+ // intrinsics. It is currently undecided how we will determine that future
+ // point.
+ if (Name.startswith("sse2.pcmpeq.") || // Added in 3.1
+ Name.startswith("sse2.pcmpgt.") || // Added in 3.1
+ Name.startswith("avx2.pcmpeq.") || // Added in 3.1
+ Name.startswith("avx2.pcmpgt.") || // Added in 3.1
+ Name.startswith("avx512.mask.pcmpeq.") || // Added in 3.9
+ Name.startswith("avx512.mask.pcmpgt.") || // Added in 3.9
+ Name == "sse.add.ss" || // Added in 4.0
+ Name == "sse2.add.sd" || // Added in 4.0
+ Name == "sse.sub.ss" || // Added in 4.0
+ Name == "sse2.sub.sd" || // Added in 4.0
+ Name == "sse.mul.ss" || // Added in 4.0
+ Name == "sse2.mul.sd" || // Added in 4.0
+ Name == "sse.div.ss" || // Added in 4.0
+ Name == "sse2.div.sd" || // Added in 4.0
+ Name == "sse41.pmaxsb" || // Added in 3.9
+ Name == "sse2.pmaxs.w" || // Added in 3.9
+ Name == "sse41.pmaxsd" || // Added in 3.9
+ Name == "sse2.pmaxu.b" || // Added in 3.9
+ Name == "sse41.pmaxuw" || // Added in 3.9
+ Name == "sse41.pmaxud" || // Added in 3.9
+ Name == "sse41.pminsb" || // Added in 3.9
+ Name == "sse2.pmins.w" || // Added in 3.9
+ Name == "sse41.pminsd" || // Added in 3.9
+ Name == "sse2.pminu.b" || // Added in 3.9
+ Name == "sse41.pminuw" || // Added in 3.9
+ Name == "sse41.pminud" || // Added in 3.9
+ Name.startswith("avx512.mask.pshuf.b.") || // Added in 4.0
+ Name.startswith("avx2.pmax") || // Added in 3.9
+ Name.startswith("avx2.pmin") || // Added in 3.9
+ Name.startswith("avx512.mask.pmax") || // Added in 4.0
+ Name.startswith("avx512.mask.pmin") || // Added in 4.0
+ Name.startswith("avx2.vbroadcast") || // Added in 3.8
+ Name.startswith("avx2.pbroadcast") || // Added in 3.8
+ Name.startswith("avx.vpermil.") || // Added in 3.1
+ Name.startswith("sse2.pshuf") || // Added in 3.9
+ Name.startswith("avx512.pbroadcast") || // Added in 3.9
+ Name.startswith("avx512.mask.broadcast.s") || // Added in 3.9
+ Name.startswith("avx512.mask.movddup") || // Added in 3.9
+ Name.startswith("avx512.mask.movshdup") || // Added in 3.9
+ Name.startswith("avx512.mask.movsldup") || // Added in 3.9
+ Name.startswith("avx512.mask.pshuf.d.") || // Added in 3.9
+ Name.startswith("avx512.mask.pshufl.w.") || // Added in 3.9
+ Name.startswith("avx512.mask.pshufh.w.") || // Added in 3.9
+ Name.startswith("avx512.mask.shuf.p") || // Added in 4.0
+ Name.startswith("avx512.mask.vpermil.p") || // Added in 3.9
+ Name.startswith("avx512.mask.perm.df.") || // Added in 3.9
+ Name.startswith("avx512.mask.perm.di.") || // Added in 3.9
+ Name.startswith("avx512.mask.punpckl") || // Added in 3.9
+ Name.startswith("avx512.mask.punpckh") || // Added in 3.9
+ Name.startswith("avx512.mask.unpckl.") || // Added in 3.9
+ Name.startswith("avx512.mask.unpckh.") || // Added in 3.9
+ Name.startswith("avx512.mask.pand.") || // Added in 3.9
+ Name.startswith("avx512.mask.pandn.") || // Added in 3.9
+ Name.startswith("avx512.mask.por.") || // Added in 3.9
+ Name.startswith("avx512.mask.pxor.") || // Added in 3.9
+ Name.startswith("avx512.mask.and.") || // Added in 3.9
+ Name.startswith("avx512.mask.andn.") || // Added in 3.9
+ Name.startswith("avx512.mask.or.") || // Added in 3.9
+ Name.startswith("avx512.mask.xor.") || // Added in 3.9
+ Name.startswith("avx512.mask.padd.") || // Added in 4.0
+ Name.startswith("avx512.mask.psub.") || // Added in 4.0
+ Name.startswith("avx512.mask.pmull.") || // Added in 4.0
+ Name.startswith("avx512.mask.cvtdq2pd.") || // Added in 4.0
+ Name.startswith("avx512.mask.cvtudq2pd.") || // Added in 4.0
+ Name.startswith("avx512.mask.pmul.dq.") || // Added in 4.0
+ Name.startswith("avx512.mask.pmulu.dq.") || // Added in 4.0
+ Name.startswith("avx512.mask.packsswb.") || // Added in 5.0
+ Name.startswith("avx512.mask.packssdw.") || // Added in 5.0
+ Name.startswith("avx512.mask.packuswb.") || // Added in 5.0
+ Name.startswith("avx512.mask.packusdw.") || // Added in 5.0
+ Name.startswith("avx512.mask.cmp.b") || // Added in 5.0
+ Name.startswith("avx512.mask.cmp.d") || // Added in 5.0
+ Name.startswith("avx512.mask.cmp.q") || // Added in 5.0
+ Name.startswith("avx512.mask.cmp.w") || // Added in 5.0
+ Name.startswith("avx512.mask.ucmp.") || // Added in 5.0
+ Name == "avx512.mask.add.pd.128" || // Added in 4.0
+ Name == "avx512.mask.add.pd.256" || // Added in 4.0
+ Name == "avx512.mask.add.ps.128" || // Added in 4.0
+ Name == "avx512.mask.add.ps.256" || // Added in 4.0
+ Name == "avx512.mask.div.pd.128" || // Added in 4.0
+ Name == "avx512.mask.div.pd.256" || // Added in 4.0
+ Name == "avx512.mask.div.ps.128" || // Added in 4.0
+ Name == "avx512.mask.div.ps.256" || // Added in 4.0
+ Name == "avx512.mask.mul.pd.128" || // Added in 4.0
+ Name == "avx512.mask.mul.pd.256" || // Added in 4.0
+ Name == "avx512.mask.mul.ps.128" || // Added in 4.0
+ Name == "avx512.mask.mul.ps.256" || // Added in 4.0
+ Name == "avx512.mask.sub.pd.128" || // Added in 4.0
+ Name == "avx512.mask.sub.pd.256" || // Added in 4.0
+ Name == "avx512.mask.sub.ps.128" || // Added in 4.0
+ Name == "avx512.mask.sub.ps.256" || // Added in 4.0
+ Name == "avx512.mask.max.pd.128" || // Added in 5.0
+ Name == "avx512.mask.max.pd.256" || // Added in 5.0
+ Name == "avx512.mask.max.ps.128" || // Added in 5.0
+ Name == "avx512.mask.max.ps.256" || // Added in 5.0
+ Name == "avx512.mask.min.pd.128" || // Added in 5.0
+ Name == "avx512.mask.min.pd.256" || // Added in 5.0
+ Name == "avx512.mask.min.ps.128" || // Added in 5.0
+ Name == "avx512.mask.min.ps.256" || // Added in 5.0
+ Name.startswith("avx512.mask.vpermilvar.") || // Added in 4.0
+ Name.startswith("avx512.mask.psll.d") || // Added in 4.0
+ Name.startswith("avx512.mask.psll.q") || // Added in 4.0
+ Name.startswith("avx512.mask.psll.w") || // Added in 4.0
+ Name.startswith("avx512.mask.psra.d") || // Added in 4.0
+ Name.startswith("avx512.mask.psra.q") || // Added in 4.0
+ Name.startswith("avx512.mask.psra.w") || // Added in 4.0
+ Name.startswith("avx512.mask.psrl.d") || // Added in 4.0
+ Name.startswith("avx512.mask.psrl.q") || // Added in 4.0
+ Name.startswith("avx512.mask.psrl.w") || // Added in 4.0
+ Name.startswith("avx512.mask.pslli") || // Added in 4.0
+ Name.startswith("avx512.mask.psrai") || // Added in 4.0
+ Name.startswith("avx512.mask.psrli") || // Added in 4.0
+ Name.startswith("avx512.mask.psllv") || // Added in 4.0
+ Name.startswith("avx512.mask.psrav") || // Added in 4.0
+ Name.startswith("avx512.mask.psrlv") || // Added in 4.0
+ Name.startswith("sse41.pmovsx") || // Added in 3.8
+ Name.startswith("sse41.pmovzx") || // Added in 3.9
+ Name.startswith("avx2.pmovsx") || // Added in 3.9
+ Name.startswith("avx2.pmovzx") || // Added in 3.9
+ Name.startswith("avx512.mask.pmovsx") || // Added in 4.0
+ Name.startswith("avx512.mask.pmovzx") || // Added in 4.0
+ Name.startswith("avx512.mask.lzcnt.") || // Added in 5.0
+ Name == "sse2.cvtdq2pd" || // Added in 3.9
+ Name == "sse2.cvtps2pd" || // Added in 3.9
+ Name == "avx.cvtdq2.pd.256" || // Added in 3.9
+ Name == "avx.cvt.ps2.pd.256" || // Added in 3.9
+ Name.startswith("avx.vinsertf128.") || // Added in 3.7
+ Name == "avx2.vinserti128" || // Added in 3.7
+ Name.startswith("avx512.mask.insert") || // Added in 4.0
+ Name.startswith("avx.vextractf128.") || // Added in 3.7
+ Name == "avx2.vextracti128" || // Added in 3.7
+ Name.startswith("avx512.mask.vextract") || // Added in 4.0
+ Name.startswith("sse4a.movnt.") || // Added in 3.9
+ Name.startswith("avx.movnt.") || // Added in 3.2
+ Name.startswith("avx512.storent.") || // Added in 3.9
+ Name == "sse41.movntdqa" || // Added in 5.0
+ Name == "avx2.movntdqa" || // Added in 5.0
+ Name == "avx512.movntdqa" || // Added in 5.0
+ Name == "sse2.storel.dq" || // Added in 3.9
+ Name.startswith("sse.storeu.") || // Added in 3.9
+ Name.startswith("sse2.storeu.") || // Added in 3.9
+ Name.startswith("avx.storeu.") || // Added in 3.9
+ Name.startswith("avx512.mask.storeu.") || // Added in 3.9
+ Name.startswith("avx512.mask.store.p") || // Added in 3.9
+ Name.startswith("avx512.mask.store.b.") || // Added in 3.9
+ Name.startswith("avx512.mask.store.w.") || // Added in 3.9
+ Name.startswith("avx512.mask.store.d.") || // Added in 3.9
+ Name.startswith("avx512.mask.store.q.") || // Added in 3.9
+ Name.startswith("avx512.mask.loadu.") || // Added in 3.9
+ Name.startswith("avx512.mask.load.") || // Added in 3.9
+ Name == "sse42.crc32.64.8" || // Added in 3.4
+ Name.startswith("avx.vbroadcast.s") || // Added in 3.5
+ Name.startswith("avx512.mask.palignr.") || // Added in 3.9
+ Name.startswith("avx512.mask.valign.") || // Added in 4.0
+ Name.startswith("sse2.psll.dq") || // Added in 3.7
+ Name.startswith("sse2.psrl.dq") || // Added in 3.7
+ Name.startswith("avx2.psll.dq") || // Added in 3.7
+ Name.startswith("avx2.psrl.dq") || // Added in 3.7
+ Name.startswith("avx512.psll.dq") || // Added in 3.9
+ Name.startswith("avx512.psrl.dq") || // Added in 3.9
+ Name == "sse41.pblendw" || // Added in 3.7
+ Name.startswith("sse41.blendp") || // Added in 3.7
+ Name.startswith("avx.blend.p") || // Added in 3.7
+ Name == "avx2.pblendw" || // Added in 3.7
+ Name.startswith("avx2.pblendd.") || // Added in 3.7
+ Name.startswith("avx.vbroadcastf128") || // Added in 4.0
+ Name == "avx2.vbroadcasti128" || // Added in 3.7
+ Name == "xop.vpcmov" || // Added in 3.8
+ Name == "xop.vpcmov.256" || // Added in 5.0
+ Name.startswith("avx512.mask.move.s") || // Added in 4.0
+ Name.startswith("avx512.cvtmask2") || // Added in 5.0
+ (Name.startswith("xop.vpcom") && // Added in 3.2
+ F->arg_size() == 2))
+ return true;
+
+ return false;
+}
+
+static bool UpgradeX86IntrinsicFunction(Function *F, StringRef Name,
+ Function *&NewFn) {
+ // Only handle intrinsics that start with "x86.".
+ if (!Name.startswith("x86."))
+ return false;
+ // Remove "x86." prefix.
+ Name = Name.substr(4);
+
+ if (ShouldUpgradeX86Intrinsic(F, Name)) {
+ NewFn = nullptr;
+ return true;
+ }
+
+ // SSE4.1 ptest functions may have an old signature.
+ if (Name.startswith("sse41.ptest")) { // Added in 3.2
+ if (Name.substr(11) == "c")
+ return UpgradePTESTIntrinsic(F, Intrinsic::x86_sse41_ptestc, NewFn);
+ if (Name.substr(11) == "z")
+ return UpgradePTESTIntrinsic(F, Intrinsic::x86_sse41_ptestz, NewFn);
+ if (Name.substr(11) == "nzc")
+ return UpgradePTESTIntrinsic(F, Intrinsic::x86_sse41_ptestnzc, NewFn);
+ }
+ // Several blend and other instructions with masks used the wrong number of
+ // bits.
+ if (Name == "sse41.insertps") // Added in 3.6
+ return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_insertps,
+ NewFn);
+ if (Name == "sse41.dppd") // Added in 3.6
+ return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_dppd,
+ NewFn);
+ if (Name == "sse41.dpps") // Added in 3.6
+ return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_dpps,
+ NewFn);
+ if (Name == "sse41.mpsadbw") // Added in 3.6
+ return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_mpsadbw,
+ NewFn);
+ if (Name == "avx.dp.ps.256") // Added in 3.6
+ return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_avx_dp_ps_256,
+ NewFn);
+ if (Name == "avx2.mpsadbw") // Added in 3.6
+ return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_avx2_mpsadbw,
+ NewFn);
+
+ // frcz.ss/sd may need to have an argument dropped. Added in 3.2
+ if (Name.startswith("xop.vfrcz.ss") && F->arg_size() == 2) {
+ rename(F);
+ NewFn = Intrinsic::getDeclaration(F->getParent(),
+ Intrinsic::x86_xop_vfrcz_ss);
+ return true;
+ }
+ if (Name.startswith("xop.vfrcz.sd") && F->arg_size() == 2) {
+ rename(F);
+ NewFn = Intrinsic::getDeclaration(F->getParent(),
+ Intrinsic::x86_xop_vfrcz_sd);
+ return true;
+ }
+ // Upgrade any XOP PERMIL2 index operand still using a float/double vector.
+ if (Name.startswith("xop.vpermil2")) { // Added in 3.9
+ auto Idx = F->getFunctionType()->getParamType(2);
+ if (Idx->isFPOrFPVectorTy()) {
+ rename(F);
+ unsigned IdxSize = Idx->getPrimitiveSizeInBits();
+ unsigned EltSize = Idx->getScalarSizeInBits();
+ Intrinsic::ID Permil2ID;
+ if (EltSize == 64 && IdxSize == 128)
+ Permil2ID = Intrinsic::x86_xop_vpermil2pd;
+ else if (EltSize == 32 && IdxSize == 128)
+ Permil2ID = Intrinsic::x86_xop_vpermil2ps;
+ else if (EltSize == 64 && IdxSize == 256)
+ Permil2ID = Intrinsic::x86_xop_vpermil2pd_256;
+ else
+ Permil2ID = Intrinsic::x86_xop_vpermil2ps_256;
+ NewFn = Intrinsic::getDeclaration(F->getParent(), Permil2ID);
+ return true;
+ }
+ }
+
+ return false;
+}
+
static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
assert(F && "Illegal to upgrade a non-existent Function.");
@@ -155,26 +420,31 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
}
break;
}
- case 'i': {
- if (Name.startswith("invariant.start")) {
+ case 'i':
+ case 'l': {
+ bool IsLifetimeStart = Name.startswith("lifetime.start");
+ if (IsLifetimeStart || Name.startswith("invariant.start")) {
+ Intrinsic::ID ID = IsLifetimeStart ?
+ Intrinsic::lifetime_start : Intrinsic::invariant_start;
auto Args = F->getFunctionType()->params();
Type* ObjectPtr[1] = {Args[1]};
- if (F->getName() !=
- Intrinsic::getName(Intrinsic::invariant_start, ObjectPtr)) {
+ if (F->getName() != Intrinsic::getName(ID, ObjectPtr)) {
rename(F);
- NewFn = Intrinsic::getDeclaration(
- F->getParent(), Intrinsic::invariant_start, ObjectPtr);
+ NewFn = Intrinsic::getDeclaration(F->getParent(), ID, ObjectPtr);
return true;
}
}
- if (Name.startswith("invariant.end")) {
+
+ bool IsLifetimeEnd = Name.startswith("lifetime.end");
+ if (IsLifetimeEnd || Name.startswith("invariant.end")) {
+ Intrinsic::ID ID = IsLifetimeEnd ?
+ Intrinsic::lifetime_end : Intrinsic::invariant_end;
+
auto Args = F->getFunctionType()->params();
- Type* ObjectPtr[1] = {Args[2]};
- if (F->getName() !=
- Intrinsic::getName(Intrinsic::invariant_end, ObjectPtr)) {
+ Type* ObjectPtr[1] = {Args[IsLifetimeEnd ? 1 : 2]};
+ if (F->getName() != Intrinsic::getName(ID, ObjectPtr)) {
rename(F);
- NewFn = Intrinsic::getDeclaration(F->getParent(),
- Intrinsic::invariant_end, ObjectPtr);
+ NewFn = Intrinsic::getDeclaration(F->getParent(), ID, ObjectPtr);
return true;
}
}
@@ -202,18 +472,72 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
return true;
}
}
+ // Renaming gather/scatter intrinsics with no address space overloading
+ // to the new overload which includes an address space
+ if (Name.startswith("masked.gather.")) {
+ Type *Tys[] = {F->getReturnType(), F->arg_begin()->getType()};
+ if (F->getName() != Intrinsic::getName(Intrinsic::masked_gather, Tys)) {
+ rename(F);
+ NewFn = Intrinsic::getDeclaration(F->getParent(),
+ Intrinsic::masked_gather, Tys);
+ return true;
+ }
+ }
+ if (Name.startswith("masked.scatter.")) {
+ auto Args = F->getFunctionType()->params();
+ Type *Tys[] = {Args[0], Args[1]};
+ if (F->getName() != Intrinsic::getName(Intrinsic::masked_scatter, Tys)) {
+ rename(F);
+ NewFn = Intrinsic::getDeclaration(F->getParent(),
+ Intrinsic::masked_scatter, Tys);
+ return true;
+ }
+ }
break;
}
+ case 'n': {
+ if (Name.startswith("nvvm.")) {
+ Name = Name.substr(5);
+
+ // The following nvvm intrinsics correspond exactly to an LLVM intrinsic.
+ Intrinsic::ID IID = StringSwitch<Intrinsic::ID>(Name)
+ .Cases("brev32", "brev64", Intrinsic::bitreverse)
+ .Case("clz.i", Intrinsic::ctlz)
+ .Case("popc.i", Intrinsic::ctpop)
+ .Default(Intrinsic::not_intrinsic);
+ if (IID != Intrinsic::not_intrinsic && F->arg_size() == 1) {
+ NewFn = Intrinsic::getDeclaration(F->getParent(), IID,
+ {F->getReturnType()});
+ return true;
+ }
+ // The following nvvm intrinsics correspond exactly to an LLVM idiom, but
+ // not to an intrinsic alone. We expand them in UpgradeIntrinsicCall.
+ //
+ // TODO: We could add lohi.i2d.
+ bool Expand = StringSwitch<bool>(Name)
+ .Cases("abs.i", "abs.ll", true)
+ .Cases("clz.ll", "popc.ll", "h2f", true)
+ .Cases("max.i", "max.ll", "max.ui", "max.ull", true)
+ .Cases("min.i", "min.ll", "min.ui", "min.ull", true)
+ .Default(false);
+ if (Expand) {
+ NewFn = nullptr;
+ return true;
+ }
+ }
+ break;
+ }
case 'o':
// We only need to change the name to match the mangling including the
// address space.
- if (F->arg_size() == 2 && Name.startswith("objectsize.")) {
+ if (Name.startswith("objectsize.")) {
Type *Tys[2] = { F->getReturnType(), F->arg_begin()->getType() };
- if (F->getName() != Intrinsic::getName(Intrinsic::objectsize, Tys)) {
+ if (F->arg_size() == 2 ||
+ F->getName() != Intrinsic::getName(Intrinsic::objectsize, Tys)) {
rename(F);
- NewFn = Intrinsic::getDeclaration(F->getParent(),
- Intrinsic::objectsize, Tys);
+ NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::objectsize,
+ Tys);
return true;
}
}
@@ -226,236 +550,15 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
}
break;
- case 'x': {
- bool IsX86 = Name.startswith("x86.");
- if (IsX86)
- Name = Name.substr(4);
-
- // All of the intrinsics matches below should be marked with which llvm
- // version started autoupgrading them. At some point in the future we would
- // like to use this information to remove upgrade code for some older
- // intrinsics. It is currently undecided how we will determine that future
- // point.
- if (IsX86 &&
- (Name.startswith("sse2.pcmpeq.") || // Added in 3.1
- Name.startswith("sse2.pcmpgt.") || // Added in 3.1
- Name.startswith("avx2.pcmpeq.") || // Added in 3.1
- Name.startswith("avx2.pcmpgt.") || // Added in 3.1
- Name.startswith("avx512.mask.pcmpeq.") || // Added in 3.9
- Name.startswith("avx512.mask.pcmpgt.") || // Added in 3.9
- Name == "sse.add.ss" || // Added in 4.0
- Name == "sse2.add.sd" || // Added in 4.0
- Name == "sse.sub.ss" || // Added in 4.0
- Name == "sse2.sub.sd" || // Added in 4.0
- Name == "sse.mul.ss" || // Added in 4.0
- Name == "sse2.mul.sd" || // Added in 4.0
- Name == "sse.div.ss" || // Added in 4.0
- Name == "sse2.div.sd" || // Added in 4.0
- Name == "sse41.pmaxsb" || // Added in 3.9
- Name == "sse2.pmaxs.w" || // Added in 3.9
- Name == "sse41.pmaxsd" || // Added in 3.9
- Name == "sse2.pmaxu.b" || // Added in 3.9
- Name == "sse41.pmaxuw" || // Added in 3.9
- Name == "sse41.pmaxud" || // Added in 3.9
- Name == "sse41.pminsb" || // Added in 3.9
- Name == "sse2.pmins.w" || // Added in 3.9
- Name == "sse41.pminsd" || // Added in 3.9
- Name == "sse2.pminu.b" || // Added in 3.9
- Name == "sse41.pminuw" || // Added in 3.9
- Name == "sse41.pminud" || // Added in 3.9
- Name.startswith("avx512.mask.pshuf.b.") || // Added in 4.0
- Name.startswith("avx2.pmax") || // Added in 3.9
- Name.startswith("avx2.pmin") || // Added in 3.9
- Name.startswith("avx512.mask.pmax") || // Added in 4.0
- Name.startswith("avx512.mask.pmin") || // Added in 4.0
- Name.startswith("avx2.vbroadcast") || // Added in 3.8
- Name.startswith("avx2.pbroadcast") || // Added in 3.8
- Name.startswith("avx.vpermil.") || // Added in 3.1
- Name.startswith("sse2.pshuf") || // Added in 3.9
- Name.startswith("avx512.pbroadcast") || // Added in 3.9
- Name.startswith("avx512.mask.broadcast.s") || // Added in 3.9
- Name.startswith("avx512.mask.movddup") || // Added in 3.9
- Name.startswith("avx512.mask.movshdup") || // Added in 3.9
- Name.startswith("avx512.mask.movsldup") || // Added in 3.9
- Name.startswith("avx512.mask.pshuf.d.") || // Added in 3.9
- Name.startswith("avx512.mask.pshufl.w.") || // Added in 3.9
- Name.startswith("avx512.mask.pshufh.w.") || // Added in 3.9
- Name.startswith("avx512.mask.shuf.p") || // Added in 4.0
- Name.startswith("avx512.mask.vpermil.p") || // Added in 3.9
- Name.startswith("avx512.mask.perm.df.") || // Added in 3.9
- Name.startswith("avx512.mask.perm.di.") || // Added in 3.9
- Name.startswith("avx512.mask.punpckl") || // Added in 3.9
- Name.startswith("avx512.mask.punpckh") || // Added in 3.9
- Name.startswith("avx512.mask.unpckl.") || // Added in 3.9
- Name.startswith("avx512.mask.unpckh.") || // Added in 3.9
- Name.startswith("avx512.mask.pand.") || // Added in 3.9
- Name.startswith("avx512.mask.pandn.") || // Added in 3.9
- Name.startswith("avx512.mask.por.") || // Added in 3.9
- Name.startswith("avx512.mask.pxor.") || // Added in 3.9
- Name.startswith("avx512.mask.and.") || // Added in 3.9
- Name.startswith("avx512.mask.andn.") || // Added in 3.9
- Name.startswith("avx512.mask.or.") || // Added in 3.9
- Name.startswith("avx512.mask.xor.") || // Added in 3.9
- Name.startswith("avx512.mask.padd.") || // Added in 4.0
- Name.startswith("avx512.mask.psub.") || // Added in 4.0
- Name.startswith("avx512.mask.pmull.") || // Added in 4.0
- Name.startswith("avx512.mask.cvtdq2pd.") || // Added in 4.0
- Name.startswith("avx512.mask.cvtudq2pd.") || // Added in 4.0
- Name.startswith("avx512.mask.pmul.dq.") || // Added in 4.0
- Name.startswith("avx512.mask.pmulu.dq.") || // Added in 4.0
- Name == "avx512.mask.add.pd.128" || // Added in 4.0
- Name == "avx512.mask.add.pd.256" || // Added in 4.0
- Name == "avx512.mask.add.ps.128" || // Added in 4.0
- Name == "avx512.mask.add.ps.256" || // Added in 4.0
- Name == "avx512.mask.div.pd.128" || // Added in 4.0
- Name == "avx512.mask.div.pd.256" || // Added in 4.0
- Name == "avx512.mask.div.ps.128" || // Added in 4.0
- Name == "avx512.mask.div.ps.256" || // Added in 4.0
- Name == "avx512.mask.mul.pd.128" || // Added in 4.0
- Name == "avx512.mask.mul.pd.256" || // Added in 4.0
- Name == "avx512.mask.mul.ps.128" || // Added in 4.0
- Name == "avx512.mask.mul.ps.256" || // Added in 4.0
- Name == "avx512.mask.sub.pd.128" || // Added in 4.0
- Name == "avx512.mask.sub.pd.256" || // Added in 4.0
- Name == "avx512.mask.sub.ps.128" || // Added in 4.0
- Name == "avx512.mask.sub.ps.256" || // Added in 4.0
- Name.startswith("avx512.mask.vpermilvar.") || // Added in 4.0
- Name.startswith("avx512.mask.psll.d") || // Added in 4.0
- Name.startswith("avx512.mask.psll.q") || // Added in 4.0
- Name.startswith("avx512.mask.psll.w") || // Added in 4.0
- Name.startswith("avx512.mask.psra.d") || // Added in 4.0
- Name.startswith("avx512.mask.psra.q") || // Added in 4.0
- Name.startswith("avx512.mask.psra.w") || // Added in 4.0
- Name.startswith("avx512.mask.psrl.d") || // Added in 4.0
- Name.startswith("avx512.mask.psrl.q") || // Added in 4.0
- Name.startswith("avx512.mask.psrl.w") || // Added in 4.0
- Name.startswith("avx512.mask.pslli") || // Added in 4.0
- Name.startswith("avx512.mask.psrai") || // Added in 4.0
- Name.startswith("avx512.mask.psrli") || // Added in 4.0
- Name.startswith("avx512.mask.psllv") || // Added in 4.0
- Name.startswith("avx512.mask.psrav") || // Added in 4.0
- Name.startswith("avx512.mask.psrlv") || // Added in 4.0
- Name.startswith("sse41.pmovsx") || // Added in 3.8
- Name.startswith("sse41.pmovzx") || // Added in 3.9
- Name.startswith("avx2.pmovsx") || // Added in 3.9
- Name.startswith("avx2.pmovzx") || // Added in 3.9
- Name.startswith("avx512.mask.pmovsx") || // Added in 4.0
- Name.startswith("avx512.mask.pmovzx") || // Added in 4.0
- Name == "sse2.cvtdq2pd" || // Added in 3.9
- Name == "sse2.cvtps2pd" || // Added in 3.9
- Name == "avx.cvtdq2.pd.256" || // Added in 3.9
- Name == "avx.cvt.ps2.pd.256" || // Added in 3.9
- Name.startswith("avx.vinsertf128.") || // Added in 3.7
- Name == "avx2.vinserti128" || // Added in 3.7
- Name.startswith("avx512.mask.insert") || // Added in 4.0
- Name.startswith("avx.vextractf128.") || // Added in 3.7
- Name == "avx2.vextracti128" || // Added in 3.7
- Name.startswith("avx512.mask.vextract") || // Added in 4.0
- Name.startswith("sse4a.movnt.") || // Added in 3.9
- Name.startswith("avx.movnt.") || // Added in 3.2
- Name.startswith("avx512.storent.") || // Added in 3.9
- Name == "sse2.storel.dq" || // Added in 3.9
- Name.startswith("sse.storeu.") || // Added in 3.9
- Name.startswith("sse2.storeu.") || // Added in 3.9
- Name.startswith("avx.storeu.") || // Added in 3.9
- Name.startswith("avx512.mask.storeu.") || // Added in 3.9
- Name.startswith("avx512.mask.store.p") || // Added in 3.9
- Name.startswith("avx512.mask.store.b.") || // Added in 3.9
- Name.startswith("avx512.mask.store.w.") || // Added in 3.9
- Name.startswith("avx512.mask.store.d.") || // Added in 3.9
- Name.startswith("avx512.mask.store.q.") || // Added in 3.9
- Name.startswith("avx512.mask.loadu.") || // Added in 3.9
- Name.startswith("avx512.mask.load.") || // Added in 3.9
- Name == "sse42.crc32.64.8" || // Added in 3.4
- Name.startswith("avx.vbroadcast.s") || // Added in 3.5
- Name.startswith("avx512.mask.palignr.") || // Added in 3.9
- Name.startswith("avx512.mask.valign.") || // Added in 4.0
- Name.startswith("sse2.psll.dq") || // Added in 3.7
- Name.startswith("sse2.psrl.dq") || // Added in 3.7
- Name.startswith("avx2.psll.dq") || // Added in 3.7
- Name.startswith("avx2.psrl.dq") || // Added in 3.7
- Name.startswith("avx512.psll.dq") || // Added in 3.9
- Name.startswith("avx512.psrl.dq") || // Added in 3.9
- Name == "sse41.pblendw" || // Added in 3.7
- Name.startswith("sse41.blendp") || // Added in 3.7
- Name.startswith("avx.blend.p") || // Added in 3.7
- Name == "avx2.pblendw" || // Added in 3.7
- Name.startswith("avx2.pblendd.") || // Added in 3.7
- Name.startswith("avx.vbroadcastf128") || // Added in 4.0
- Name == "avx2.vbroadcasti128" || // Added in 3.7
- Name == "xop.vpcmov" || // Added in 3.8
- Name.startswith("avx512.mask.move.s") || // Added in 4.0
- (Name.startswith("xop.vpcom") && // Added in 3.2
- F->arg_size() == 2))) {
- NewFn = nullptr;
+ case 'x':
+ if (UpgradeX86IntrinsicFunction(F, Name, NewFn))
return true;
- }
- // SSE4.1 ptest functions may have an old signature.
- if (IsX86 && Name.startswith("sse41.ptest")) { // Added in 3.2
- if (Name.substr(11) == "c")
- return UpgradeSSE41Function(F, Intrinsic::x86_sse41_ptestc, NewFn);
- if (Name.substr(11) == "z")
- return UpgradeSSE41Function(F, Intrinsic::x86_sse41_ptestz, NewFn);
- if (Name.substr(11) == "nzc")
- return UpgradeSSE41Function(F, Intrinsic::x86_sse41_ptestnzc, NewFn);
- }
- // Several blend and other instructions with masks used the wrong number of
- // bits.
- if (IsX86 && Name == "sse41.insertps") // Added in 3.6
- return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_insertps,
- NewFn);
- if (IsX86 && Name == "sse41.dppd") // Added in 3.6
- return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_dppd,
- NewFn);
- if (IsX86 && Name == "sse41.dpps") // Added in 3.6
- return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_dpps,
- NewFn);
- if (IsX86 && Name == "sse41.mpsadbw") // Added in 3.6
- return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_mpsadbw,
- NewFn);
- if (IsX86 && Name == "avx.dp.ps.256") // Added in 3.6
- return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_avx_dp_ps_256,
- NewFn);
- if (IsX86 && Name == "avx2.mpsadbw") // Added in 3.6
- return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_avx2_mpsadbw,
- NewFn);
-
- // frcz.ss/sd may need to have an argument dropped. Added in 3.2
- if (IsX86 && Name.startswith("xop.vfrcz.ss") && F->arg_size() == 2) {
- rename(F);
- NewFn = Intrinsic::getDeclaration(F->getParent(),
- Intrinsic::x86_xop_vfrcz_ss);
- return true;
- }
- if (IsX86 && Name.startswith("xop.vfrcz.sd") && F->arg_size() == 2) {
- rename(F);
- NewFn = Intrinsic::getDeclaration(F->getParent(),
- Intrinsic::x86_xop_vfrcz_sd);
- return true;
- }
- // Upgrade any XOP PERMIL2 index operand still using a float/double vector.
- if (IsX86 && Name.startswith("xop.vpermil2")) { // Added in 3.9
- auto Params = F->getFunctionType()->params();
- auto Idx = Params[2];
- if (Idx->getScalarType()->isFloatingPointTy()) {
- rename(F);
- unsigned IdxSize = Idx->getPrimitiveSizeInBits();
- unsigned EltSize = Idx->getScalarSizeInBits();
- Intrinsic::ID Permil2ID;
- if (EltSize == 64 && IdxSize == 128)
- Permil2ID = Intrinsic::x86_xop_vpermil2pd;
- else if (EltSize == 32 && IdxSize == 128)
- Permil2ID = Intrinsic::x86_xop_vpermil2ps;
- else if (EltSize == 64 && IdxSize == 256)
- Permil2ID = Intrinsic::x86_xop_vpermil2pd_256;
- else
- Permil2ID = Intrinsic::x86_xop_vpermil2ps_256;
- NewFn = Intrinsic::getDeclaration(F->getParent(), Permil2ID);
- return true;
- }
- }
- break;
}
+ // Remangle our intrinsic since we upgrade the mangling
+ auto Result = llvm::Intrinsic::remangleIntrinsicFunction(F);
+ if (Result != None) {
+ NewFn = Result.getValue();
+ return true;
}
// This may not belong here. This function is effectively being overloaded
@@ -685,12 +788,30 @@ static Value *upgradeIntMinMax(IRBuilder<> &Builder, CallInst &CI,
}
static Value *upgradeMaskedCompare(IRBuilder<> &Builder, CallInst &CI,
- ICmpInst::Predicate Pred) {
+ unsigned CC, bool Signed) {
Value *Op0 = CI.getArgOperand(0);
unsigned NumElts = Op0->getType()->getVectorNumElements();
- Value *Cmp = Builder.CreateICmp(Pred, Op0, CI.getArgOperand(1));
- Value *Mask = CI.getArgOperand(2);
+ Value *Cmp;
+ if (CC == 3) {
+ Cmp = Constant::getNullValue(llvm::VectorType::get(Builder.getInt1Ty(), NumElts));
+ } else if (CC == 7) {
+ Cmp = Constant::getAllOnesValue(llvm::VectorType::get(Builder.getInt1Ty(), NumElts));
+ } else {
+ ICmpInst::Predicate Pred;
+ switch (CC) {
+ default: llvm_unreachable("Unknown condition code");
+ case 0: Pred = ICmpInst::ICMP_EQ; break;
+ case 1: Pred = Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; break;
+ case 2: Pred = Signed ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; break;
+ case 4: Pred = ICmpInst::ICMP_NE; break;
+ case 5: Pred = Signed ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; break;
+ case 6: Pred = Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; break;
+ }
+ Cmp = Builder.CreateICmp(Pred, Op0, CI.getArgOperand(1));
+ }
+
+ Value *Mask = CI.getArgOperand(CI.getNumArgOperands() - 1);
const auto *C = dyn_cast<Constant>(Mask);
if (!C || !C->isAllOnesValue())
Cmp = Builder.CreateAnd(Cmp, getX86MaskVec(Builder, Mask, NumElts));
@@ -733,6 +854,15 @@ static Value* upgradeMaskedMove(IRBuilder<> &Builder, CallInst &CI) {
return Builder.CreateInsertElement(A, Select, (uint64_t)0);
}
+
+static Value* UpgradeMaskToInt(IRBuilder<> &Builder, CallInst &CI) {
+ Value* Op = CI.getArgOperand(0);
+ Type* ReturnOp = CI.getType();
+ unsigned NumElts = CI.getType()->getVectorNumElements();
+ Value *Mask = getX86MaskVec(Builder, Op, NumElts);
+ return Builder.CreateSExt(Mask, ReturnOp, "vpmovm2");
+}
+
/// Upgrade a call to an old intrinsic. All argument and return casting must be
/// provided to seamlessly integrate with existing context.
void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
@@ -753,6 +883,9 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
bool IsX86 = Name.startswith("x86.");
if (IsX86)
Name = Name.substr(4);
+ bool IsNVVM = Name.startswith("nvvm.");
+ if (IsNVVM)
+ Name = Name.substr(5);
if (IsX86 && Name.startswith("sse4a.movnt.")) {
Module *M = F->getParent();
@@ -838,18 +971,11 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
return;
}
- if (IsX86 && (Name.startswith("avx512.mask.storeu."))) {
+ if (IsX86 && (Name.startswith("avx512.mask.store"))) {
+ // "avx512.mask.storeu." or "avx512.mask.store."
+ bool Aligned = Name[17] != 'u'; // "avx512.mask.storeu".
UpgradeMaskedStore(Builder, CI->getArgOperand(0), CI->getArgOperand(1),
- CI->getArgOperand(2), /*Aligned*/false);
-
- // Remove intrinsic.
- CI->eraseFromParent();
- return;
- }
-
- if (IsX86 && (Name.startswith("avx512.mask.store."))) {
- UpgradeMaskedStore(Builder, CI->getArgOperand(0), CI->getArgOperand(1),
- CI->getArgOperand(2), /*Aligned*/true);
+ CI->getArgOperand(2), Aligned);
// Remove intrinsic.
CI->eraseFromParent();
@@ -858,15 +984,12 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
Value *Rep;
// Upgrade packed integer vector compare intrinsics to compare instructions.
- if (IsX86 && (Name.startswith("sse2.pcmpeq.") ||
- Name.startswith("avx2.pcmpeq."))) {
- Rep = Builder.CreateICmpEQ(CI->getArgOperand(0), CI->getArgOperand(1),
- "pcmpeq");
- Rep = Builder.CreateSExt(Rep, CI->getType(), "");
- } else if (IsX86 && (Name.startswith("sse2.pcmpgt.") ||
- Name.startswith("avx2.pcmpgt."))) {
- Rep = Builder.CreateICmpSGT(CI->getArgOperand(0), CI->getArgOperand(1),
- "pcmpgt");
+ if (IsX86 && (Name.startswith("sse2.pcmp") ||
+ Name.startswith("avx2.pcmp"))) {
+ // "sse2.pcpmpeq." "sse2.pcmpgt." "avx2.pcmpeq." or "avx2.pcmpgt."
+ bool CmpEq = Name[9] == 'e';
+ Rep = Builder.CreateICmp(CmpEq ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_SGT,
+ CI->getArgOperand(0), CI->getArgOperand(1));
Rep = Builder.CreateSExt(Rep, CI->getType(), "");
} else if (IsX86 && (Name == "sse.add.ss" || Name == "sse2.add.sd")) {
Type *I32Ty = Type::getInt32Ty(C);
@@ -904,10 +1027,16 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
Rep = Builder.CreateInsertElement(CI->getArgOperand(0),
Builder.CreateFDiv(Elt0, Elt1),
ConstantInt::get(I32Ty, 0));
- } else if (IsX86 && Name.startswith("avx512.mask.pcmpeq.")) {
- Rep = upgradeMaskedCompare(Builder, *CI, ICmpInst::ICMP_EQ);
- } else if (IsX86 && Name.startswith("avx512.mask.pcmpgt.")) {
- Rep = upgradeMaskedCompare(Builder, *CI, ICmpInst::ICMP_SGT);
+ } else if (IsX86 && Name.startswith("avx512.mask.pcmp")) {
+ // "avx512.mask.pcmpeq." or "avx512.mask.pcmpgt."
+ bool CmpEq = Name[16] == 'e';
+ Rep = upgradeMaskedCompare(Builder, *CI, CmpEq ? 0 : 6, true);
+ } else if (IsX86 && Name.startswith("avx512.mask.cmp")) {
+ unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
+ Rep = upgradeMaskedCompare(Builder, *CI, Imm, true);
+ } else if (IsX86 && Name.startswith("avx512.mask.ucmp")) {
+ unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
+ Rep = upgradeMaskedCompare(Builder, *CI, Imm, false);
} else if (IsX86 && (Name == "sse41.pmaxsb" ||
Name == "sse2.pmaxs.w" ||
Name == "sse41.pmaxsd" ||
@@ -1019,15 +1148,11 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
Rep =
Builder.CreateCall(VPCOM, {CI->getArgOperand(0), CI->getArgOperand(1),
Builder.getInt8(Imm)});
- } else if (IsX86 && Name == "xop.vpcmov") {
- Value *Arg0 = CI->getArgOperand(0);
- Value *Arg1 = CI->getArgOperand(1);
+ } else if (IsX86 && Name.startswith("xop.vpcmov")) {
Value *Sel = CI->getArgOperand(2);
- unsigned NumElts = CI->getType()->getVectorNumElements();
- Constant *MinusOne = ConstantVector::getSplat(NumElts, Builder.getInt64(-1));
- Value *NotSel = Builder.CreateXor(Sel, MinusOne);
- Value *Sel0 = Builder.CreateAnd(Arg0, Sel);
- Value *Sel1 = Builder.CreateAnd(Arg1, NotSel);
+ Value *NotSel = Builder.CreateNot(Sel);
+ Value *Sel0 = Builder.CreateAnd(CI->getArgOperand(0), Sel);
+ Value *Sel1 = Builder.CreateAnd(CI->getArgOperand(1), NotSel);
Rep = Builder.CreateOr(Sel0, Sel1);
} else if (IsX86 && Name == "sse42.crc32.64.8") {
Function *CRC32 = Intrinsic::getDeclaration(F->getParent(),
@@ -1461,6 +1586,43 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
Rep = Builder.CreateFSub(CI->getArgOperand(0), CI->getArgOperand(1));
Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
CI->getArgOperand(2));
+ } else if (IsX86 && Name.startswith("avx512.mask.lzcnt.")) {
+ Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(),
+ Intrinsic::ctlz,
+ CI->getType()),
+ { CI->getArgOperand(0), Builder.getInt1(false) });
+ Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
+ CI->getArgOperand(1));
+ } else if (IsX86 && (Name.startswith("avx512.mask.max.p") ||
+ Name.startswith("avx512.mask.min.p"))) {
+ bool IsMin = Name[13] == 'i';
+ VectorType *VecTy = cast<VectorType>(CI->getType());
+ unsigned VecWidth = VecTy->getPrimitiveSizeInBits();
+ unsigned EltWidth = VecTy->getScalarSizeInBits();
+ Intrinsic::ID IID;
+ if (!IsMin && VecWidth == 128 && EltWidth == 32)
+ IID = Intrinsic::x86_sse_max_ps;
+ else if (!IsMin && VecWidth == 128 && EltWidth == 64)
+ IID = Intrinsic::x86_sse2_max_pd;
+ else if (!IsMin && VecWidth == 256 && EltWidth == 32)
+ IID = Intrinsic::x86_avx_max_ps_256;
+ else if (!IsMin && VecWidth == 256 && EltWidth == 64)
+ IID = Intrinsic::x86_avx_max_pd_256;
+ else if (IsMin && VecWidth == 128 && EltWidth == 32)
+ IID = Intrinsic::x86_sse_min_ps;
+ else if (IsMin && VecWidth == 128 && EltWidth == 64)
+ IID = Intrinsic::x86_sse2_min_pd;
+ else if (IsMin && VecWidth == 256 && EltWidth == 32)
+ IID = Intrinsic::x86_avx_min_ps_256;
+ else if (IsMin && VecWidth == 256 && EltWidth == 64)
+ IID = Intrinsic::x86_avx_min_pd_256;
+ else
+ llvm_unreachable("Unexpected intrinsic");
+
+ Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
+ { CI->getArgOperand(0), CI->getArgOperand(1) });
+ Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
+ CI->getArgOperand(2));
} else if (IsX86 && Name.startswith("avx512.mask.pshuf.b.")) {
VectorType *VecTy = cast<VectorType>(CI->getType());
Intrinsic::ID IID;
@@ -1501,6 +1663,42 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
{ CI->getArgOperand(0), CI->getArgOperand(1) });
Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
CI->getArgOperand(2));
+ } else if (IsX86 && Name.startswith("avx512.mask.pack")) {
+ bool IsUnsigned = Name[16] == 'u';
+ bool IsDW = Name[18] == 'd';
+ VectorType *VecTy = cast<VectorType>(CI->getType());
+ Intrinsic::ID IID;
+ if (!IsUnsigned && !IsDW && VecTy->getPrimitiveSizeInBits() == 128)
+ IID = Intrinsic::x86_sse2_packsswb_128;
+ else if (!IsUnsigned && !IsDW && VecTy->getPrimitiveSizeInBits() == 256)
+ IID = Intrinsic::x86_avx2_packsswb;
+ else if (!IsUnsigned && !IsDW && VecTy->getPrimitiveSizeInBits() == 512)
+ IID = Intrinsic::x86_avx512_packsswb_512;
+ else if (!IsUnsigned && IsDW && VecTy->getPrimitiveSizeInBits() == 128)
+ IID = Intrinsic::x86_sse2_packssdw_128;
+ else if (!IsUnsigned && IsDW && VecTy->getPrimitiveSizeInBits() == 256)
+ IID = Intrinsic::x86_avx2_packssdw;
+ else if (!IsUnsigned && IsDW && VecTy->getPrimitiveSizeInBits() == 512)
+ IID = Intrinsic::x86_avx512_packssdw_512;
+ else if (IsUnsigned && !IsDW && VecTy->getPrimitiveSizeInBits() == 128)
+ IID = Intrinsic::x86_sse2_packuswb_128;
+ else if (IsUnsigned && !IsDW && VecTy->getPrimitiveSizeInBits() == 256)
+ IID = Intrinsic::x86_avx2_packuswb;
+ else if (IsUnsigned && !IsDW && VecTy->getPrimitiveSizeInBits() == 512)
+ IID = Intrinsic::x86_avx512_packuswb_512;
+ else if (IsUnsigned && IsDW && VecTy->getPrimitiveSizeInBits() == 128)
+ IID = Intrinsic::x86_sse41_packusdw;
+ else if (IsUnsigned && IsDW && VecTy->getPrimitiveSizeInBits() == 256)
+ IID = Intrinsic::x86_avx2_packusdw;
+ else if (IsUnsigned && IsDW && VecTy->getPrimitiveSizeInBits() == 512)
+ IID = Intrinsic::x86_avx512_packusdw_512;
+ else
+ llvm_unreachable("Unexpected intrinsic");
+
+ Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
+ { CI->getArgOperand(0), CI->getArgOperand(1) });
+ Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
+ CI->getArgOperand(2));
} else if (IsX86 && Name.startswith("avx512.mask.psll")) {
bool IsImmediate = Name[16] == 'i' ||
(Name.size() > 18 && Name[18] == 'i');
@@ -1705,6 +1903,8 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
Rep = UpgradeX86MaskedShift(Builder, *CI, IID);
} else if (IsX86 && Name.startswith("avx512.mask.move.s")) {
Rep = upgradeMaskedMove(Builder, *CI);
+ } else if (IsX86 && Name.startswith("avx512.cvtmask2")) {
+ Rep = UpgradeMaskToInt(Builder, *CI);
} else if (IsX86 && Name.startswith("avx512.mask.vpermilvar.")) {
Intrinsic::ID IID;
if (Name.endswith("ps.128"))
@@ -1727,6 +1927,64 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
{ CI->getArgOperand(0), CI->getArgOperand(1) });
Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
CI->getArgOperand(2));
+ } else if (IsX86 && Name.endswith(".movntdqa")) {
+ Module *M = F->getParent();
+ MDNode *Node = MDNode::get(
+ C, ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1)));
+
+ Value *Ptr = CI->getArgOperand(0);
+ VectorType *VTy = cast<VectorType>(CI->getType());
+
+ // Convert the type of the pointer to a pointer to the stored type.
+ Value *BC =
+ Builder.CreateBitCast(Ptr, PointerType::getUnqual(VTy), "cast");
+ LoadInst *LI = Builder.CreateAlignedLoad(BC, VTy->getBitWidth() / 8);
+ LI->setMetadata(M->getMDKindID("nontemporal"), Node);
+ Rep = LI;
+ } else if (IsNVVM && (Name == "abs.i" || Name == "abs.ll")) {
+ Value *Arg = CI->getArgOperand(0);
+ Value *Neg = Builder.CreateNeg(Arg, "neg");
+ Value *Cmp = Builder.CreateICmpSGE(
+ Arg, llvm::Constant::getNullValue(Arg->getType()), "abs.cond");
+ Rep = Builder.CreateSelect(Cmp, Arg, Neg, "abs");
+ } else if (IsNVVM && (Name == "max.i" || Name == "max.ll" ||
+ Name == "max.ui" || Name == "max.ull")) {
+ Value *Arg0 = CI->getArgOperand(0);
+ Value *Arg1 = CI->getArgOperand(1);
+ Value *Cmp = Name.endswith(".ui") || Name.endswith(".ull")
+ ? Builder.CreateICmpUGE(Arg0, Arg1, "max.cond")
+ : Builder.CreateICmpSGE(Arg0, Arg1, "max.cond");
+ Rep = Builder.CreateSelect(Cmp, Arg0, Arg1, "max");
+ } else if (IsNVVM && (Name == "min.i" || Name == "min.ll" ||
+ Name == "min.ui" || Name == "min.ull")) {
+ Value *Arg0 = CI->getArgOperand(0);
+ Value *Arg1 = CI->getArgOperand(1);
+ Value *Cmp = Name.endswith(".ui") || Name.endswith(".ull")
+ ? Builder.CreateICmpULE(Arg0, Arg1, "min.cond")
+ : Builder.CreateICmpSLE(Arg0, Arg1, "min.cond");
+ Rep = Builder.CreateSelect(Cmp, Arg0, Arg1, "min");
+ } else if (IsNVVM && Name == "clz.ll") {
+ // llvm.nvvm.clz.ll returns an i32, but llvm.ctlz.i64 and returns an i64.
+ Value *Arg = CI->getArgOperand(0);
+ Value *Ctlz = Builder.CreateCall(
+ Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctlz,
+ {Arg->getType()}),
+ {Arg, Builder.getFalse()}, "ctlz");
+ Rep = Builder.CreateTrunc(Ctlz, Builder.getInt32Ty(), "ctlz.trunc");
+ } else if (IsNVVM && Name == "popc.ll") {
+ // llvm.nvvm.popc.ll returns an i32, but llvm.ctpop.i64 and returns an
+ // i64.
+ Value *Arg = CI->getArgOperand(0);
+ Value *Popc = Builder.CreateCall(
+ Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctpop,
+ {Arg->getType()}),
+ Arg, "ctpop");
+ Rep = Builder.CreateTrunc(Popc, Builder.getInt32Ty(), "ctpop.trunc");
+ } else if (IsNVVM && Name == "h2f") {
+ Rep = Builder.CreateCall(Intrinsic::getDeclaration(
+ F->getParent(), Intrinsic::convert_from_fp16,
+ {Builder.getFloatTy()}),
+ CI->getArgOperand(0), "h2f");
} else {
llvm_unreachable("Unknown function for CallInst upgrade.");
}
@@ -1737,13 +1995,16 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
return;
}
- std::string Name = CI->getName();
- if (!Name.empty())
- CI->setName(Name + ".old");
-
+ CallInst *NewCall = nullptr;
switch (NewFn->getIntrinsicID()) {
- default:
- llvm_unreachable("Unknown function for CallInst upgrade.");
+ default: {
+ // Handle generic mangling change, but nothing else
+ assert(
+ (CI->getCalledFunction()->getName() != NewFn->getName()) &&
+ "Unknown function for CallInst upgrade and isn't just a name change");
+ CI->setCalledFunction(NewFn);
+ return;
+ }
case Intrinsic::arm_neon_vld1:
case Intrinsic::arm_neon_vld2:
@@ -1761,43 +2022,43 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
case Intrinsic::arm_neon_vst4lane: {
SmallVector<Value *, 4> Args(CI->arg_operands().begin(),
CI->arg_operands().end());
- CI->replaceAllUsesWith(Builder.CreateCall(NewFn, Args));
- CI->eraseFromParent();
- return;
+ NewCall = Builder.CreateCall(NewFn, Args);
+ break;
}
case Intrinsic::bitreverse:
- CI->replaceAllUsesWith(Builder.CreateCall(NewFn, {CI->getArgOperand(0)}));
- CI->eraseFromParent();
- return;
+ NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0)});
+ break;
case Intrinsic::ctlz:
case Intrinsic::cttz:
assert(CI->getNumArgOperands() == 1 &&
"Mismatch between function args and call args");
- CI->replaceAllUsesWith(Builder.CreateCall(
- NewFn, {CI->getArgOperand(0), Builder.getFalse()}, Name));
- CI->eraseFromParent();
- return;
-
- case Intrinsic::objectsize:
- CI->replaceAllUsesWith(Builder.CreateCall(
- NewFn, {CI->getArgOperand(0), CI->getArgOperand(1)}, Name));
- CI->eraseFromParent();
- return;
+ NewCall =
+ Builder.CreateCall(NewFn, {CI->getArgOperand(0), Builder.getFalse()});
+ break;
- case Intrinsic::ctpop: {
- CI->replaceAllUsesWith(Builder.CreateCall(NewFn, {CI->getArgOperand(0)}));
- CI->eraseFromParent();
- return;
+ case Intrinsic::objectsize: {
+ Value *NullIsUnknownSize = CI->getNumArgOperands() == 2
+ ? Builder.getFalse()
+ : CI->getArgOperand(2);
+ NewCall = Builder.CreateCall(
+ NewFn, {CI->getArgOperand(0), CI->getArgOperand(1), NullIsUnknownSize});
+ break;
}
+ case Intrinsic::ctpop:
+ NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0)});
+ break;
+
+ case Intrinsic::convert_from_fp16:
+ NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0)});
+ break;
+
case Intrinsic::x86_xop_vfrcz_ss:
case Intrinsic::x86_xop_vfrcz_sd:
- CI->replaceAllUsesWith(
- Builder.CreateCall(NewFn, {CI->getArgOperand(1)}, Name));
- CI->eraseFromParent();
- return;
+ NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(1)});
+ break;
case Intrinsic::x86_xop_vpermil2pd:
case Intrinsic::x86_xop_vpermil2ps:
@@ -1808,9 +2069,8 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
VectorType *FltIdxTy = cast<VectorType>(Args[2]->getType());
VectorType *IntIdxTy = VectorType::getInteger(FltIdxTy);
Args[2] = Builder.CreateBitCast(Args[2], IntIdxTy);
- CI->replaceAllUsesWith(Builder.CreateCall(NewFn, Args, Name));
- CI->eraseFromParent();
- return;
+ NewCall = Builder.CreateCall(NewFn, Args);
+ break;
}
case Intrinsic::x86_sse41_ptestc:
@@ -1832,10 +2092,8 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
Value *BC0 = Builder.CreateBitCast(Arg0, NewVecTy, "cast");
Value *BC1 = Builder.CreateBitCast(Arg1, NewVecTy, "cast");
- CallInst *NewCall = Builder.CreateCall(NewFn, {BC0, BC1}, Name);
- CI->replaceAllUsesWith(NewCall);
- CI->eraseFromParent();
- return;
+ NewCall = Builder.CreateCall(NewFn, {BC0, BC1});
+ break;
}
case Intrinsic::x86_sse41_insertps:
@@ -1851,30 +2109,36 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
// Replace the last argument with a trunc.
Args.back() = Builder.CreateTrunc(Args.back(), Type::getInt8Ty(C), "trunc");
-
- CallInst *NewCall = Builder.CreateCall(NewFn, Args);
- CI->replaceAllUsesWith(NewCall);
- CI->eraseFromParent();
- return;
+ NewCall = Builder.CreateCall(NewFn, Args);
+ break;
}
case Intrinsic::thread_pointer: {
- CI->replaceAllUsesWith(Builder.CreateCall(NewFn, {}));
- CI->eraseFromParent();
- return;
+ NewCall = Builder.CreateCall(NewFn, {});
+ break;
}
case Intrinsic::invariant_start:
case Intrinsic::invariant_end:
case Intrinsic::masked_load:
- case Intrinsic::masked_store: {
+ case Intrinsic::masked_store:
+ case Intrinsic::masked_gather:
+ case Intrinsic::masked_scatter: {
SmallVector<Value *, 4> Args(CI->arg_operands().begin(),
CI->arg_operands().end());
- CI->replaceAllUsesWith(Builder.CreateCall(NewFn, Args));
- CI->eraseFromParent();
- return;
+ NewCall = Builder.CreateCall(NewFn, Args);
+ break;
}
}
+ assert(NewCall && "Should have either set this variable or returned through "
+ "the default case");
+ std::string Name = CI->getName();
+ if (!Name.empty()) {
+ CI->setName(Name + ".old");
+ NewCall->setName(Name);
+ }
+ CI->replaceAllUsesWith(NewCall);
+ CI->eraseFromParent();
}
void llvm::UpgradeCallsToIntrinsic(Function *F) {
@@ -1975,14 +2239,14 @@ bool llvm::UpgradeDebugInfo(Module &M) {
}
bool llvm::UpgradeModuleFlags(Module &M) {
- const NamedMDNode *ModFlags = M.getModuleFlagsMetadata();
+ NamedMDNode *ModFlags = M.getModuleFlagsMetadata();
if (!ModFlags)
return false;
- bool HasObjCFlag = false, HasClassProperties = false;
+ bool HasObjCFlag = false, HasClassProperties = false, Changed = false;
for (unsigned I = 0, E = ModFlags->getNumOperands(); I != E; ++I) {
MDNode *Op = ModFlags->getOperand(I);
- if (Op->getNumOperands() < 2)
+ if (Op->getNumOperands() != 3)
continue;
MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
if (!ID)
@@ -1991,7 +2255,24 @@ bool llvm::UpgradeModuleFlags(Module &M) {
HasObjCFlag = true;
if (ID->getString() == "Objective-C Class Properties")
HasClassProperties = true;
+ // Upgrade PIC/PIE Module Flags. The module flag behavior for these two
+ // field was Error and now they are Max.
+ if (ID->getString() == "PIC Level" || ID->getString() == "PIE Level") {
+ if (auto *Behavior =
+ mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(0))) {
+ if (Behavior->getLimitedValue() == Module::Error) {
+ Type *Int32Ty = Type::getInt32Ty(M.getContext());
+ Metadata *Ops[3] = {
+ ConstantAsMetadata::get(ConstantInt::get(Int32Ty, Module::Max)),
+ MDString::get(M.getContext(), ID->getString()),
+ Op->getOperand(2)};
+ ModFlags->setOperand(I, MDNode::get(M.getContext(), Ops));
+ Changed = true;
+ }
+ }
+ }
}
+
// "Objective-C Class Properties" is recently added for Objective-C. We
// upgrade ObjC bitcodes to contain a "Objective-C Class Properties" module
// flag of value 0, so we can correclty downgrade this flag when trying to
@@ -2000,9 +2281,10 @@ bool llvm::UpgradeModuleFlags(Module &M) {
if (HasObjCFlag && !HasClassProperties) {
M.addModuleFlag(llvm::Module::Override, "Objective-C Class Properties",
(uint32_t)0);
- return true;
+ Changed = true;
}
- return false;
+
+ return Changed;
}
static bool isOldLoopArgument(Metadata *MD) {
diff --git a/contrib/llvm/lib/IR/BasicBlock.cpp b/contrib/llvm/lib/IR/BasicBlock.cpp
index 19e7849..2b780ad 100644
--- a/contrib/llvm/lib/IR/BasicBlock.cpp
+++ b/contrib/llvm/lib/IR/BasicBlock.cpp
@@ -117,28 +117,19 @@ const Module *BasicBlock::getModule() const {
return getParent()->getParent();
}
-Module *BasicBlock::getModule() {
- return getParent()->getParent();
-}
-
-TerminatorInst *BasicBlock::getTerminator() {
- if (InstList.empty()) return nullptr;
- return dyn_cast<TerminatorInst>(&InstList.back());
-}
-
const TerminatorInst *BasicBlock::getTerminator() const {
if (InstList.empty()) return nullptr;
return dyn_cast<TerminatorInst>(&InstList.back());
}
-CallInst *BasicBlock::getTerminatingMustTailCall() {
+const CallInst *BasicBlock::getTerminatingMustTailCall() const {
if (InstList.empty())
return nullptr;
- ReturnInst *RI = dyn_cast<ReturnInst>(&InstList.back());
+ const ReturnInst *RI = dyn_cast<ReturnInst>(&InstList.back());
if (!RI || RI == &InstList.front())
return nullptr;
- Instruction *Prev = RI->getPrevNode();
+ const Instruction *Prev = RI->getPrevNode();
if (!Prev)
return nullptr;
@@ -162,7 +153,7 @@ CallInst *BasicBlock::getTerminatingMustTailCall() {
return nullptr;
}
-CallInst *BasicBlock::getTerminatingDeoptimizeCall() {
+const CallInst *BasicBlock::getTerminatingDeoptimizeCall() const {
if (InstList.empty())
return nullptr;
auto *RI = dyn_cast<ReturnInst>(&InstList.back());
@@ -177,22 +168,22 @@ CallInst *BasicBlock::getTerminatingDeoptimizeCall() {
return nullptr;
}
-Instruction* BasicBlock::getFirstNonPHI() {
- for (Instruction &I : *this)
+const Instruction* BasicBlock::getFirstNonPHI() const {
+ for (const Instruction &I : *this)
if (!isa<PHINode>(I))
return &I;
return nullptr;
}
-Instruction* BasicBlock::getFirstNonPHIOrDbg() {
- for (Instruction &I : *this)
+const Instruction* BasicBlock::getFirstNonPHIOrDbg() const {
+ for (const Instruction &I : *this)
if (!isa<PHINode>(I) && !isa<DbgInfoIntrinsic>(I))
return &I;
return nullptr;
}
-Instruction* BasicBlock::getFirstNonPHIOrDbgOrLifetime() {
- for (Instruction &I : *this) {
+const Instruction* BasicBlock::getFirstNonPHIOrDbgOrLifetime() const {
+ for (const Instruction &I : *this) {
if (isa<PHINode>(I) || isa<DbgInfoIntrinsic>(I))
continue;
@@ -206,12 +197,12 @@ Instruction* BasicBlock::getFirstNonPHIOrDbgOrLifetime() {
return nullptr;
}
-BasicBlock::iterator BasicBlock::getFirstInsertionPt() {
- Instruction *FirstNonPHI = getFirstNonPHI();
+BasicBlock::const_iterator BasicBlock::getFirstInsertionPt() const {
+ const Instruction *FirstNonPHI = getFirstNonPHI();
if (!FirstNonPHI)
return end();
- iterator InsertPt = FirstNonPHI->getIterator();
+ const_iterator InsertPt = FirstNonPHI->getIterator();
if (InsertPt->isEHPad()) ++InsertPt;
return InsertPt;
}
@@ -223,10 +214,10 @@ void BasicBlock::dropAllReferences() {
/// If this basic block has a single predecessor block,
/// return the block, otherwise return a null pointer.
-BasicBlock *BasicBlock::getSinglePredecessor() {
- pred_iterator PI = pred_begin(this), E = pred_end(this);
+const BasicBlock *BasicBlock::getSinglePredecessor() const {
+ const_pred_iterator PI = pred_begin(this), E = pred_end(this);
if (PI == E) return nullptr; // No preds.
- BasicBlock *ThePred = *PI;
+ const BasicBlock *ThePred = *PI;
++PI;
return (PI == E) ? ThePred : nullptr /*multiple preds*/;
}
@@ -236,10 +227,10 @@ BasicBlock *BasicBlock::getSinglePredecessor() {
/// Note that unique predecessor doesn't mean single edge, there can be
/// multiple edges from the unique predecessor to this block (for example
/// a switch statement with multiple cases having the same destination).
-BasicBlock *BasicBlock::getUniquePredecessor() {
- pred_iterator PI = pred_begin(this), E = pred_end(this);
+const BasicBlock *BasicBlock::getUniquePredecessor() const {
+ const_pred_iterator PI = pred_begin(this), E = pred_end(this);
if (PI == E) return nullptr; // No preds.
- BasicBlock *PredBB = *PI;
+ const BasicBlock *PredBB = *PI;
++PI;
for (;PI != E; ++PI) {
if (*PI != PredBB)
@@ -250,18 +241,18 @@ BasicBlock *BasicBlock::getUniquePredecessor() {
return PredBB;
}
-BasicBlock *BasicBlock::getSingleSuccessor() {
- succ_iterator SI = succ_begin(this), E = succ_end(this);
+const BasicBlock *BasicBlock::getSingleSuccessor() const {
+ succ_const_iterator SI = succ_begin(this), E = succ_end(this);
if (SI == E) return nullptr; // no successors
- BasicBlock *TheSucc = *SI;
+ const BasicBlock *TheSucc = *SI;
++SI;
return (SI == E) ? TheSucc : nullptr /* multiple successors */;
}
-BasicBlock *BasicBlock::getUniqueSuccessor() {
- succ_iterator SI = succ_begin(this), E = succ_end(this);
+const BasicBlock *BasicBlock::getUniqueSuccessor() const {
+ succ_const_iterator SI = succ_begin(this), E = succ_end(this);
if (SI == E) return nullptr; // No successors
- BasicBlock *SuccBB = *SI;
+ const BasicBlock *SuccBB = *SI;
++SI;
for (;SI != E; ++SI) {
if (*SI != SuccBB)
@@ -272,6 +263,10 @@ BasicBlock *BasicBlock::getUniqueSuccessor() {
return SuccBB;
}
+iterator_range<BasicBlock::phi_iterator> BasicBlock::phis() {
+ return make_range<phi_iterator>(dyn_cast<PHINode>(&front()), nullptr);
+}
+
/// This method is used to notify a BasicBlock that the
/// specified Predecessor of the block is no longer able to reach it. This is
/// actually not used to update the Predecessor list, but is actually used to
@@ -360,6 +355,19 @@ bool BasicBlock::canSplitPredecessors() const {
return true;
}
+bool BasicBlock::isLegalToHoistInto() const {
+ auto *Term = getTerminator();
+ // No terminator means the block is under construction.
+ if (!Term)
+ return true;
+
+ // If the block has no successors, there can be no instructions to hoist.
+ assert(Term->getNumSuccessors() > 0);
+
+ // Instructions should not be hoisted across exception handling boundaries.
+ return !Term->isExceptional();
+}
+
/// This splits a basic block into two at the specified
/// instruction. Note that all instructions BEFORE the specified iterator stay
/// as part of the original basic block, an unconditional branch is added to
@@ -398,13 +406,11 @@ BasicBlock *BasicBlock::splitBasicBlock(iterator I, const Twine &BBName) {
// Loop over any phi nodes in the basic block, updating the BB field of
// incoming values...
BasicBlock *Successor = *I;
- PHINode *PN;
- for (BasicBlock::iterator II = Successor->begin();
- (PN = dyn_cast<PHINode>(II)); ++II) {
- int IDX = PN->getBasicBlockIndex(this);
- while (IDX != -1) {
- PN->setIncomingBlock((unsigned)IDX, New);
- IDX = PN->getBasicBlockIndex(this);
+ for (auto &PN : Successor->phis()) {
+ int Idx = PN.getBasicBlockIndex(this);
+ while (Idx != -1) {
+ PN.setIncomingBlock((unsigned)Idx, New);
+ Idx = PN.getBasicBlockIndex(this);
}
}
}
@@ -438,9 +444,6 @@ bool BasicBlock::isLandingPad() const {
}
/// Return the landingpad instruction associated with the landing pad.
-LandingPadInst *BasicBlock::getLandingPadInst() {
- return dyn_cast<LandingPadInst>(getFirstNonPHI());
-}
const LandingPadInst *BasicBlock::getLandingPadInst() const {
return dyn_cast<LandingPadInst>(getFirstNonPHI());
}
diff --git a/contrib/llvm/lib/IR/Comdat.cpp b/contrib/llvm/lib/IR/Comdat.cpp
index fc1b48d..c735f9b 100644
--- a/contrib/llvm/lib/IR/Comdat.cpp
+++ b/contrib/llvm/lib/IR/Comdat.cpp
@@ -1,4 +1,4 @@
-//===-- Comdat.cpp - Implement Metadata classes --------------------------===//
+//===- Comdat.cpp - Implement Metadata classes ----------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -13,10 +13,12 @@
#include "llvm/IR/Comdat.h"
#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+
using namespace llvm;
Comdat::Comdat(Comdat &&C) : Name(C.Name), SK(C.SK) {}
-Comdat::Comdat() : Name(nullptr), SK(Comdat::Any) {}
+Comdat::Comdat() = default;
StringRef Comdat::getName() const { return Name->first(); }
diff --git a/contrib/llvm/lib/IR/ConstantFold.cpp b/contrib/llvm/lib/IR/ConstantFold.cpp
index 098ff90..311b0a7 100644
--- a/contrib/llvm/lib/IR/ConstantFold.cpp
+++ b/contrib/llvm/lib/IR/ConstantFold.cpp
@@ -18,6 +18,7 @@
//===----------------------------------------------------------------------===//
#include "ConstantFold.h"
+#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
@@ -222,7 +223,7 @@ static Constant *ExtractConstantBytes(Constant *C, unsigned ByteStart,
if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) {
APInt V = CI->getValue();
if (ByteStart)
- V = V.lshr(ByteStart*8);
+ V.lshrInPlace(ByteStart*8);
V = V.trunc(ByteSize*8);
return ConstantInt::get(CI->getContext(), V);
}
@@ -241,7 +242,7 @@ static Constant *ExtractConstantBytes(Constant *C, unsigned ByteStart,
// X | -1 -> -1.
if (ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS))
- if (RHSC->isAllOnesValue())
+ if (RHSC->isMinusOne())
return RHSC;
Constant *LHS = ExtractConstantBytes(CE->getOperand(0), ByteStart,ByteSize);
@@ -347,8 +348,7 @@ static Constant *ExtractConstantBytes(Constant *C, unsigned ByteStart,
/// factors factored out. If Folded is false, return null if no factoring was
/// possible, to avoid endlessly bouncing an unfoldable expression back into the
/// top-level folder.
-static Constant *getFoldedSizeOf(Type *Ty, Type *DestTy,
- bool Folded) {
+static Constant *getFoldedSizeOf(Type *Ty, Type *DestTy, bool Folded) {
if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
Constant *N = ConstantInt::get(DestTy, ATy->getNumElements());
Constant *E = getFoldedSizeOf(ATy->getElementType(), DestTy, true);
@@ -403,8 +403,7 @@ static Constant *getFoldedSizeOf(Type *Ty, Type *DestTy,
/// factors factored out. If Folded is false, return null if no factoring was
/// possible, to avoid endlessly bouncing an unfoldable expression back into the
/// top-level folder.
-static Constant *getFoldedAlignOf(Type *Ty, Type *DestTy,
- bool Folded) {
+static Constant *getFoldedAlignOf(Type *Ty, Type *DestTy, bool Folded) {
// The alignment of an array is equal to the alignment of the
// array element. Note that this is not always true for vectors.
if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
@@ -468,8 +467,7 @@ static Constant *getFoldedAlignOf(Type *Ty, Type *DestTy,
/// any known factors factored out. If Folded is false, return null if no
/// factoring was possible, to avoid endlessly bouncing an unfoldable expression
/// back into the top-level folder.
-static Constant *getFoldedOffsetOf(Type *Ty, Constant *FieldNo,
- Type *DestTy,
+static Constant *getFoldedOffsetOf(Type *Ty, Constant *FieldNo, Type *DestTy,
bool Folded) {
if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
Constant *N = ConstantExpr::getCast(CastInst::getCastOpcode(FieldNo, false,
@@ -606,17 +604,15 @@ Constant *llvm::ConstantFoldCastInstruction(unsigned opc, Constant *V,
if (ConstantFP *FPC = dyn_cast<ConstantFP>(V)) {
const APFloat &V = FPC->getValueAPF();
bool ignored;
- uint64_t x[2];
uint32_t DestBitWidth = cast<IntegerType>(DestTy)->getBitWidth();
+ APSInt IntVal(DestBitWidth, opc == Instruction::FPToUI);
if (APFloat::opInvalidOp ==
- V.convertToInteger(x, DestBitWidth, opc==Instruction::FPToSI,
- APFloat::rmTowardZero, &ignored)) {
+ V.convertToInteger(IntVal, APFloat::rmTowardZero, &ignored)) {
// Undefined behavior invoked - the destination type can't represent
// the input constant.
return UndefValue::get(DestTy);
}
- APInt Val(DestBitWidth, x);
- return ConstantInt::get(FPC->getContext(), Val);
+ return ConstantInt::get(FPC->getContext(), IntVal);
}
return nullptr; // Can't fold.
case Instruction::IntToPtr: //always treated as unsigned
@@ -1019,33 +1015,33 @@ Constant *llvm::ConstantFoldBinaryInstruction(unsigned Opcode,
if (ConstantInt *CI2 = dyn_cast<ConstantInt>(C2)) {
switch (Opcode) {
case Instruction::Add:
- if (CI2->equalsInt(0)) return C1; // X + 0 == X
+ if (CI2->isZero()) return C1; // X + 0 == X
break;
case Instruction::Sub:
- if (CI2->equalsInt(0)) return C1; // X - 0 == X
+ if (CI2->isZero()) return C1; // X - 0 == X
break;
case Instruction::Mul:
- if (CI2->equalsInt(0)) return C2; // X * 0 == 0
- if (CI2->equalsInt(1))
+ if (CI2->isZero()) return C2; // X * 0 == 0
+ if (CI2->isOne())
return C1; // X * 1 == X
break;
case Instruction::UDiv:
case Instruction::SDiv:
- if (CI2->equalsInt(1))
+ if (CI2->isOne())
return C1; // X / 1 == X
- if (CI2->equalsInt(0))
+ if (CI2->isZero())
return UndefValue::get(CI2->getType()); // X / 0 == undef
break;
case Instruction::URem:
case Instruction::SRem:
- if (CI2->equalsInt(1))
+ if (CI2->isOne())
return Constant::getNullValue(CI2->getType()); // X % 1 == 0
- if (CI2->equalsInt(0))
+ if (CI2->isZero())
return UndefValue::get(CI2->getType()); // X % 0 == undef
break;
case Instruction::And:
if (CI2->isZero()) return C2; // X & 0 == 0
- if (CI2->isAllOnesValue())
+ if (CI2->isMinusOne())
return C1; // X & -1 == X
if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(C1)) {
@@ -1082,12 +1078,12 @@ Constant *llvm::ConstantFoldBinaryInstruction(unsigned Opcode,
}
break;
case Instruction::Or:
- if (CI2->equalsInt(0)) return C1; // X | 0 == X
- if (CI2->isAllOnesValue())
+ if (CI2->isZero()) return C1; // X | 0 == X
+ if (CI2->isMinusOne())
return C2; // X | -1 == -1
break;
case Instruction::Xor:
- if (CI2->equalsInt(0)) return C1; // X ^ 0 == X
+ if (CI2->isZero()) return C1; // X ^ 0 == X
if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(C1)) {
switch (CE1->getOpcode()) {
@@ -1095,7 +1091,7 @@ Constant *llvm::ConstantFoldBinaryInstruction(unsigned Opcode,
case Instruction::ICmp:
case Instruction::FCmp:
// cmp pred ^ true -> cmp !pred
- assert(CI2->equalsInt(1));
+ assert(CI2->isOne());
CmpInst::Predicate pred = (CmpInst::Predicate)CE1->getPredicate();
pred = CmpInst::getInversePredicate(pred);
return ConstantExpr::getCompare(pred, CE1->getOperand(0),
@@ -1130,18 +1126,18 @@ Constant *llvm::ConstantFoldBinaryInstruction(unsigned Opcode,
case Instruction::Mul:
return ConstantInt::get(CI1->getContext(), C1V * C2V);
case Instruction::UDiv:
- assert(!CI2->isNullValue() && "Div by zero handled above");
+ assert(!CI2->isZero() && "Div by zero handled above");
return ConstantInt::get(CI1->getContext(), C1V.udiv(C2V));
case Instruction::SDiv:
- assert(!CI2->isNullValue() && "Div by zero handled above");
+ assert(!CI2->isZero() && "Div by zero handled above");
if (C2V.isAllOnesValue() && C1V.isMinSignedValue())
return UndefValue::get(CI1->getType()); // MIN_INT / -1 -> undef
return ConstantInt::get(CI1->getContext(), C1V.sdiv(C2V));
case Instruction::URem:
- assert(!CI2->isNullValue() && "Div by zero handled above");
+ assert(!CI2->isZero() && "Div by zero handled above");
return ConstantInt::get(CI1->getContext(), C1V.urem(C2V));
case Instruction::SRem:
- assert(!CI2->isNullValue() && "Div by zero handled above");
+ assert(!CI2->isZero() && "Div by zero handled above");
if (C2V.isAllOnesValue() && C1V.isMinSignedValue())
return UndefValue::get(CI1->getType()); // MIN_INT % -1 -> undef
return ConstantInt::get(CI1->getContext(), C1V.srem(C2V));
@@ -1174,7 +1170,7 @@ Constant *llvm::ConstantFoldBinaryInstruction(unsigned Opcode,
case Instruction::LShr:
case Instruction::AShr:
case Instruction::Shl:
- if (CI1->equalsInt(0)) return C1;
+ if (CI1->isZero()) return C1;
break;
default:
break;
@@ -1209,10 +1205,15 @@ Constant *llvm::ConstantFoldBinaryInstruction(unsigned Opcode,
SmallVector<Constant*, 16> Result;
Type *Ty = IntegerType::get(VTy->getContext(), 32);
for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
- Constant *LHS =
- ConstantExpr::getExtractElement(C1, ConstantInt::get(Ty, i));
- Constant *RHS =
- ConstantExpr::getExtractElement(C2, ConstantInt::get(Ty, i));
+ Constant *ExtractIdx = ConstantInt::get(Ty, i);
+ Constant *LHS = ConstantExpr::getExtractElement(C1, ExtractIdx);
+ Constant *RHS = ConstantExpr::getExtractElement(C2, ExtractIdx);
+
+ // If any element of a divisor vector is zero, the whole op is undef.
+ if ((Opcode == Instruction::SDiv || Opcode == Instruction::UDiv ||
+ Opcode == Instruction::SRem || Opcode == Instruction::URem) &&
+ RHS->isNullValue())
+ return UndefValue::get(VTy);
Result.push_back(ConstantExpr::get(Opcode, LHS, RHS));
}
@@ -2037,9 +2038,6 @@ Constant *llvm::ConstantFoldGetElementPtr(Type *PointeeTy, Constant *C,
Optional<unsigned> InRangeIndex,
ArrayRef<Value *> Idxs) {
if (Idxs.empty()) return C;
- Constant *Idx0 = cast<Constant>(Idxs[0]);
- if ((Idxs.size() == 1 && Idx0->isNullValue()))
- return C;
if (isa<UndefValue>(C)) {
Type *GEPTy = GetElementPtrInst::getGEPReturnType(
@@ -2047,10 +2045,15 @@ Constant *llvm::ConstantFoldGetElementPtr(Type *PointeeTy, Constant *C,
return UndefValue::get(GEPTy);
}
+ Constant *Idx0 = cast<Constant>(Idxs[0]);
+ if (Idxs.size() == 1 && (Idx0->isNullValue() || isa<UndefValue>(Idx0)))
+ return C;
+
if (C->isNullValue()) {
bool isNull = true;
for (unsigned i = 0, e = Idxs.size(); i != e; ++i)
- if (!cast<Constant>(Idxs[i])->isNullValue()) {
+ if (!isa<UndefValue>(Idxs[i]) &&
+ !cast<Constant>(Idxs[i])->isNullValue()) {
isNull = false;
break;
}
@@ -2094,15 +2097,19 @@ Constant *llvm::ConstantFoldGetElementPtr(Type *PointeeTy, Constant *C,
// Subsequent evaluation would get confused and produce erroneous results.
//
// The following prohibits such a GEP from being formed by checking to see
- // if the index is in-range with respect to an array or vector.
+ // if the index is in-range with respect to an array.
+ // TODO: This code may be extended to handle vectors as well.
bool PerformFold = false;
if (Idx0->isNullValue())
PerformFold = true;
else if (LastI.isSequential())
if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx0))
- PerformFold =
- !LastI.isBoundedSequential() ||
- isIndexInRangeOfArrayType(LastI.getSequentialNumElements(), CI);
+ PerformFold = (!LastI.isBoundedSequential() ||
+ isIndexInRangeOfArrayType(
+ LastI.getSequentialNumElements(), CI)) &&
+ !CE->getOperand(CE->getNumOperands() - 1)
+ ->getType()
+ ->isVectorTy();
if (PerformFold) {
SmallVector<Value*, 16> NewIndices;
@@ -2231,7 +2238,8 @@ Constant *llvm::ConstantFoldGetElementPtr(Type *PointeeTy, Constant *C,
ConstantInt *Factor = ConstantInt::get(CI->getType(), NumElements);
NewIdxs[i] = ConstantExpr::getSRem(CI, Factor);
- Constant *PrevIdx = cast<Constant>(Idxs[i - 1]);
+ Constant *PrevIdx = NewIdxs[i-1] ? NewIdxs[i-1] :
+ cast<Constant>(Idxs[i - 1]);
Constant *Div = ConstantExpr::getSDiv(CI, Factor);
unsigned CommonExtendedWidth =
diff --git a/contrib/llvm/lib/IR/ConstantRange.cpp b/contrib/llvm/lib/IR/ConstantRange.cpp
index a85ad46..4bd1725 100644
--- a/contrib/llvm/lib/IR/ConstantRange.cpp
+++ b/contrib/llvm/lib/IR/ConstantRange.cpp
@@ -1,4 +1,4 @@
-//===-- ConstantRange.cpp - ConstantRange implementation ------------------===//
+//===- ConstantRange.cpp - ConstantRange implementation -------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -21,29 +21,31 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/IR/Instruction.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/IR/ConstantRange.h"
+#include "llvm/IR/Constants.h"
#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Metadata.h"
#include "llvm/IR/Operator.h"
-#include "llvm/IR/ConstantRange.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+
using namespace llvm;
-/// Initialize a full (the default) or empty set for the specified type.
-///
-ConstantRange::ConstantRange(uint32_t BitWidth, bool Full) {
- if (Full)
- Lower = Upper = APInt::getMaxValue(BitWidth);
- else
- Lower = Upper = APInt::getMinValue(BitWidth);
-}
+ConstantRange::ConstantRange(uint32_t BitWidth, bool Full)
+ : Lower(Full ? APInt::getMaxValue(BitWidth) : APInt::getMinValue(BitWidth)),
+ Upper(Lower) {}
-/// Initialize a range to hold the single specified value.
-///
-ConstantRange::ConstantRange(APIntMoveTy V)
+ConstantRange::ConstantRange(APInt V)
: Lower(std::move(V)), Upper(Lower + 1) {}
-ConstantRange::ConstantRange(APIntMoveTy L, APIntMoveTy U)
+ConstantRange::ConstantRange(APInt L, APInt U)
: Lower(std::move(L)), Upper(std::move(U)) {
assert(Lower.getBitWidth() == Upper.getBitWidth() &&
"ConstantRange with unequal bit widths");
@@ -70,49 +72,49 @@ ConstantRange ConstantRange::makeAllowedICmpRegion(CmpInst::Predicate Pred,
APInt UMax(CR.getUnsignedMax());
if (UMax.isMinValue())
return ConstantRange(W, /* empty */ false);
- return ConstantRange(APInt::getMinValue(W), UMax);
+ return ConstantRange(APInt::getMinValue(W), std::move(UMax));
}
case CmpInst::ICMP_SLT: {
APInt SMax(CR.getSignedMax());
if (SMax.isMinSignedValue())
return ConstantRange(W, /* empty */ false);
- return ConstantRange(APInt::getSignedMinValue(W), SMax);
+ return ConstantRange(APInt::getSignedMinValue(W), std::move(SMax));
}
case CmpInst::ICMP_ULE: {
APInt UMax(CR.getUnsignedMax());
if (UMax.isMaxValue())
return ConstantRange(W);
- return ConstantRange(APInt::getMinValue(W), UMax + 1);
+ return ConstantRange(APInt::getMinValue(W), std::move(UMax) + 1);
}
case CmpInst::ICMP_SLE: {
APInt SMax(CR.getSignedMax());
if (SMax.isMaxSignedValue())
return ConstantRange(W);
- return ConstantRange(APInt::getSignedMinValue(W), SMax + 1);
+ return ConstantRange(APInt::getSignedMinValue(W), std::move(SMax) + 1);
}
case CmpInst::ICMP_UGT: {
APInt UMin(CR.getUnsignedMin());
if (UMin.isMaxValue())
return ConstantRange(W, /* empty */ false);
- return ConstantRange(UMin + 1, APInt::getNullValue(W));
+ return ConstantRange(std::move(UMin) + 1, APInt::getNullValue(W));
}
case CmpInst::ICMP_SGT: {
APInt SMin(CR.getSignedMin());
if (SMin.isMaxSignedValue())
return ConstantRange(W, /* empty */ false);
- return ConstantRange(SMin + 1, APInt::getSignedMinValue(W));
+ return ConstantRange(std::move(SMin) + 1, APInt::getSignedMinValue(W));
}
case CmpInst::ICMP_UGE: {
APInt UMin(CR.getUnsignedMin());
if (UMin.isMinValue())
return ConstantRange(W);
- return ConstantRange(UMin, APInt::getNullValue(W));
+ return ConstantRange(std::move(UMin), APInt::getNullValue(W));
}
case CmpInst::ICMP_SGE: {
APInt SMin(CR.getSignedMin());
if (SMin.isMinSignedValue())
return ConstantRange(W);
- return ConstantRange(SMin, APInt::getSignedMinValue(W));
+ return ConstantRange(std::move(SMin), APInt::getSignedMinValue(W));
}
}
}
@@ -177,7 +179,7 @@ ConstantRange
ConstantRange::makeGuaranteedNoWrapRegion(Instruction::BinaryOps BinOp,
const ConstantRange &Other,
unsigned NoWrapKind) {
- typedef OverflowingBinaryOperator OBO;
+ using OBO = OverflowingBinaryOperator;
// Computes the intersection of CR0 and CR1. It is different from
// intersectWith in that the ConstantRange returned will only contain elements
@@ -202,7 +204,7 @@ ConstantRange::makeGuaranteedNoWrapRegion(Instruction::BinaryOps BinOp,
return ConstantRange(BitWidth, false);
if (auto *C = Other.getSingleElement())
- if (C->isMinValue())
+ if (C->isNullValue())
// Full set: nothing signed / unsigned wraps when added to 0.
return ConstantRange(BitWidth);
@@ -214,8 +216,8 @@ ConstantRange::makeGuaranteedNoWrapRegion(Instruction::BinaryOps BinOp,
-Other.getUnsignedMax()));
if (NoWrapKind & OBO::NoSignedWrap) {
- APInt SignedMin = Other.getSignedMin();
- APInt SignedMax = Other.getSignedMax();
+ const APInt &SignedMin = Other.getSignedMin();
+ const APInt &SignedMax = Other.getSignedMax();
if (SignedMax.isStrictlyPositive())
Result = SubsetIntersect(
@@ -232,98 +234,76 @@ ConstantRange::makeGuaranteedNoWrapRegion(Instruction::BinaryOps BinOp,
return Result;
}
-/// isFullSet - Return true if this set contains all of the elements possible
-/// for this data-type
bool ConstantRange::isFullSet() const {
return Lower == Upper && Lower.isMaxValue();
}
-/// isEmptySet - Return true if this set contains no members.
-///
bool ConstantRange::isEmptySet() const {
return Lower == Upper && Lower.isMinValue();
}
-/// isWrappedSet - Return true if this set wraps around the top of the range,
-/// for example: [100, 8)
-///
bool ConstantRange::isWrappedSet() const {
return Lower.ugt(Upper);
}
-/// isSignWrappedSet - Return true if this set wraps around the INT_MIN of
-/// its bitwidth, for example: i8 [120, 140).
-///
bool ConstantRange::isSignWrappedSet() const {
return contains(APInt::getSignedMaxValue(getBitWidth())) &&
contains(APInt::getSignedMinValue(getBitWidth()));
}
-/// getSetSize - Return the number of elements in this set.
-///
APInt ConstantRange::getSetSize() const {
- if (isFullSet()) {
- APInt Size(getBitWidth()+1, 0);
- Size.setBit(getBitWidth());
- return Size;
- }
+ if (isFullSet())
+ return APInt::getOneBitSet(getBitWidth()+1, getBitWidth());
// This is also correct for wrapped sets.
return (Upper - Lower).zext(getBitWidth()+1);
}
-/// getUnsignedMax - Return the largest unsigned value contained in the
-/// ConstantRange.
-///
+bool
+ConstantRange::isSizeStrictlySmallerThan(const ConstantRange &Other) const {
+ assert(getBitWidth() == Other.getBitWidth());
+ if (isFullSet())
+ return false;
+ if (Other.isFullSet())
+ return true;
+ return (Upper - Lower).ult(Other.Upper - Other.Lower);
+}
+
+bool
+ConstantRange::isSizeLargerThan(uint64_t MaxSize) const {
+ assert(MaxSize && "MaxSize can't be 0.");
+ // If this a full set, we need special handling to avoid needing an extra bit
+ // to represent the size.
+ if (isFullSet())
+ return APInt::getMaxValue(getBitWidth()).ugt(MaxSize - 1);
+
+ return (Upper - Lower).ugt(MaxSize);
+}
+
APInt ConstantRange::getUnsignedMax() const {
if (isFullSet() || isWrappedSet())
return APInt::getMaxValue(getBitWidth());
return getUpper() - 1;
}
-/// getUnsignedMin - Return the smallest unsigned value contained in the
-/// ConstantRange.
-///
APInt ConstantRange::getUnsignedMin() const {
- if (isFullSet() || (isWrappedSet() && getUpper() != 0))
+ if (isFullSet() || (isWrappedSet() && !getUpper().isNullValue()))
return APInt::getMinValue(getBitWidth());
return getLower();
}
-/// getSignedMax - Return the largest signed value contained in the
-/// ConstantRange.
-///
APInt ConstantRange::getSignedMax() const {
- APInt SignedMax(APInt::getSignedMaxValue(getBitWidth()));
- if (!isWrappedSet()) {
- if (getLower().sle(getUpper() - 1))
- return getUpper() - 1;
- return SignedMax;
- }
- if (getLower().isNegative() == getUpper().isNegative())
- return SignedMax;
+ if (isFullSet() || Lower.sgt(Upper))
+ return APInt::getSignedMaxValue(getBitWidth());
return getUpper() - 1;
}
-/// getSignedMin - Return the smallest signed value contained in the
-/// ConstantRange.
-///
APInt ConstantRange::getSignedMin() const {
- APInt SignedMin(APInt::getSignedMinValue(getBitWidth()));
- if (!isWrappedSet()) {
- if (getLower().sle(getUpper() - 1))
- return getLower();
- return SignedMin;
- }
- if ((getUpper() - 1).slt(getLower())) {
- if (getUpper() != SignedMin)
- return SignedMin;
- }
+ if (isFullSet() || (Lower.sgt(Upper) && !getUpper().isMinSignedValue()))
+ return APInt::getSignedMinValue(getBitWidth());
return getLower();
}
-/// contains - Return true if the specified value is in the set.
-///
bool ConstantRange::contains(const APInt &V) const {
if (Lower == Upper)
return isFullSet();
@@ -333,10 +313,6 @@ bool ConstantRange::contains(const APInt &V) const {
return Lower.ule(V) || V.ult(Upper);
}
-/// contains - Return true if the argument is a subset of this range.
-/// Two equal sets contain each other. The empty set contained by all other
-/// sets.
-///
bool ConstantRange::contains(const ConstantRange &Other) const {
if (isFullSet() || Other.isEmptySet()) return true;
if (isEmptySet() || Other.isFullSet()) return false;
@@ -355,8 +331,6 @@ bool ConstantRange::contains(const ConstantRange &Other) const {
return Other.getUpper().ule(Upper) && Lower.ule(Other.getLower());
}
-/// subtract - Subtract the specified constant from the endpoints of this
-/// constant range.
ConstantRange ConstantRange::subtract(const APInt &Val) const {
assert(Val.getBitWidth() == getBitWidth() && "Wrong bit width");
// If the set is empty or full, don't modify the endpoints.
@@ -365,17 +339,10 @@ ConstantRange ConstantRange::subtract(const APInt &Val) const {
return ConstantRange(Lower - Val, Upper - Val);
}
-/// \brief Subtract the specified range from this range (aka relative complement
-/// of the sets).
ConstantRange ConstantRange::difference(const ConstantRange &CR) const {
return intersectWith(CR.inverse());
}
-/// intersectWith - Return the range that results from the intersection of this
-/// range with another range. The resultant range is guaranteed to include all
-/// elements contained in both input ranges, and to have the smallest possible
-/// set size that does so. Because there may be two intersections with the
-/// same set size, A.intersectWith(B) might not be equal to B.intersectWith(A).
ConstantRange ConstantRange::intersectWith(const ConstantRange &CR) const {
assert(getBitWidth() == CR.getBitWidth() &&
"ConstantRange types don't agree!");
@@ -414,7 +381,7 @@ ConstantRange ConstantRange::intersectWith(const ConstantRange &CR) const {
if (CR.Upper.ule(Lower))
return ConstantRange(CR.Lower, Upper);
- if (getSetSize().ult(CR.getSetSize()))
+ if (isSizeStrictlySmallerThan(CR))
return *this;
return CR;
}
@@ -429,7 +396,7 @@ ConstantRange ConstantRange::intersectWith(const ConstantRange &CR) const {
if (CR.Upper.ult(Upper)) {
if (CR.Lower.ult(Upper)) {
- if (getSetSize().ult(CR.getSetSize()))
+ if (isSizeStrictlySmallerThan(CR))
return *this;
return CR;
}
@@ -445,18 +412,11 @@ ConstantRange ConstantRange::intersectWith(const ConstantRange &CR) const {
return ConstantRange(CR.Lower, Upper);
}
- if (getSetSize().ult(CR.getSetSize()))
+ if (isSizeStrictlySmallerThan(CR))
return *this;
return CR;
}
-
-/// unionWith - Return the range that results from the union of this range with
-/// another range. The resultant range is guaranteed to include the elements of
-/// both sets, but may contain more. For example, [3, 9) union [12,15) is
-/// [3, 15), which includes 9, 10, and 11, which were not included in either
-/// set before.
-///
ConstantRange ConstantRange::unionWith(const ConstantRange &CR) const {
assert(getBitWidth() == CR.getBitWidth() &&
"ConstantRange types don't agree!");
@@ -475,16 +435,13 @@ ConstantRange ConstantRange::unionWith(const ConstantRange &CR) const {
return ConstantRange(CR.Lower, Upper);
}
- APInt L = Lower, U = Upper;
- if (CR.Lower.ult(L))
- L = CR.Lower;
- if ((CR.Upper - 1).ugt(U - 1))
- U = CR.Upper;
+ APInt L = CR.Lower.ult(Lower) ? CR.Lower : Lower;
+ APInt U = (CR.Upper - 1).ugt(Upper - 1) ? CR.Upper : Upper;
- if (L == 0 && U == 0)
+ if (L.isNullValue() && U.isNullValue())
return ConstantRange(getBitWidth());
- return ConstantRange(L, U);
+ return ConstantRange(std::move(L), std::move(U));
}
if (!CR.isWrappedSet()) {
@@ -525,13 +482,10 @@ ConstantRange ConstantRange::unionWith(const ConstantRange &CR) const {
if (CR.Lower.ule(Upper) || Lower.ule(CR.Upper))
return ConstantRange(getBitWidth());
- APInt L = Lower, U = Upper;
- if (CR.Upper.ugt(U))
- U = CR.Upper;
- if (CR.Lower.ult(L))
- L = CR.Lower;
+ APInt L = CR.Lower.ult(Lower) ? CR.Lower : Lower;
+ APInt U = CR.Upper.ugt(Upper) ? CR.Upper : Upper;
- return ConstantRange(L, U);
+ return ConstantRange(std::move(L), std::move(U));
}
ConstantRange ConstantRange::castOp(Instruction::CastOps CastOp,
@@ -558,14 +512,14 @@ ConstantRange ConstantRange::castOp(Instruction::CastOps CastOp,
auto BW = getBitWidth();
APInt Min = APInt::getMinValue(BW).zextOrSelf(ResultBitWidth);
APInt Max = APInt::getMaxValue(BW).zextOrSelf(ResultBitWidth);
- return ConstantRange(Min, Max);
+ return ConstantRange(std::move(Min), std::move(Max));
}
case Instruction::SIToFP: {
// TODO: use input range if available
auto BW = getBitWidth();
APInt SMin = APInt::getSignedMinValue(BW).sextOrSelf(ResultBitWidth);
APInt SMax = APInt::getSignedMaxValue(BW).sextOrSelf(ResultBitWidth);
- return ConstantRange(SMin, SMax);
+ return ConstantRange(std::move(SMin), std::move(SMax));
}
case Instruction::FPTrunc:
case Instruction::FPExt:
@@ -577,10 +531,6 @@ ConstantRange ConstantRange::castOp(Instruction::CastOps CastOp,
};
}
-/// zeroExtend - Return a new range in the specified integer type, which must
-/// be strictly larger than the current type. The returned range will
-/// correspond to the possible range of values as if the source range had been
-/// zero extended.
ConstantRange ConstantRange::zeroExtend(uint32_t DstTySize) const {
if (isEmptySet()) return ConstantRange(DstTySize, /*isFullSet=*/false);
@@ -591,16 +541,13 @@ ConstantRange ConstantRange::zeroExtend(uint32_t DstTySize) const {
APInt LowerExt(DstTySize, 0);
if (!Upper) // special case: [X, 0) -- not really wrapping around
LowerExt = Lower.zext(DstTySize);
- return ConstantRange(LowerExt, APInt::getOneBitSet(DstTySize, SrcTySize));
+ return ConstantRange(std::move(LowerExt),
+ APInt::getOneBitSet(DstTySize, SrcTySize));
}
return ConstantRange(Lower.zext(DstTySize), Upper.zext(DstTySize));
}
-/// signExtend - Return a new range in the specified integer type, which must
-/// be strictly larger than the current type. The returned range will
-/// correspond to the possible range of values as if the source range had been
-/// sign extended.
ConstantRange ConstantRange::signExtend(uint32_t DstTySize) const {
if (isEmptySet()) return ConstantRange(DstTySize, /*isFullSet=*/false);
@@ -619,10 +566,6 @@ ConstantRange ConstantRange::signExtend(uint32_t DstTySize) const {
return ConstantRange(Lower.sext(DstTySize), Upper.sext(DstTySize));
}
-/// truncate - Return a new range in the specified integer type, which must be
-/// strictly smaller than the current type. The returned range will
-/// correspond to the possible range of values as if the source range had been
-/// truncated to the specified type.
ConstantRange ConstantRange::truncate(uint32_t DstTySize) const {
assert(getBitWidth() > DstTySize && "Not a value truncation");
if (isEmptySet())
@@ -630,10 +573,6 @@ ConstantRange ConstantRange::truncate(uint32_t DstTySize) const {
if (isFullSet())
return ConstantRange(DstTySize, /*isFullSet=*/true);
- APInt MaxValue = APInt::getMaxValue(DstTySize).zext(getBitWidth());
- APInt MaxBitValue(getBitWidth(), 0);
- MaxBitValue.setBit(DstTySize);
-
APInt LowerDiv(Lower), UpperDiv(Upper);
ConstantRange Union(DstTySize, /*isFullSet=*/false);
@@ -641,41 +580,46 @@ ConstantRange ConstantRange::truncate(uint32_t DstTySize) const {
// We use the non-wrapped set code to analyze the [Lower, MaxValue) part, and
// then we do the union with [MaxValue, Upper)
if (isWrappedSet()) {
- // If Upper is greater than Max Value, it covers the whole truncated range.
- if (Upper.uge(MaxValue))
+ // If Upper is greater than or equal to MaxValue(DstTy), it covers the whole
+ // truncated range.
+ if (Upper.getActiveBits() > DstTySize ||
+ Upper.countTrailingOnes() == DstTySize)
return ConstantRange(DstTySize, /*isFullSet=*/true);
Union = ConstantRange(APInt::getMaxValue(DstTySize),Upper.trunc(DstTySize));
- UpperDiv = APInt::getMaxValue(getBitWidth());
+ UpperDiv.setAllBits();
// Union covers the MaxValue case, so return if the remaining range is just
- // MaxValue.
+ // MaxValue(DstTy).
if (LowerDiv == UpperDiv)
return Union;
}
// Chop off the most significant bits that are past the destination bitwidth.
- if (LowerDiv.uge(MaxValue)) {
- APInt Div(getBitWidth(), 0);
- APInt::udivrem(LowerDiv, MaxBitValue, Div, LowerDiv);
- UpperDiv = UpperDiv - MaxBitValue * Div;
+ if (LowerDiv.getActiveBits() > DstTySize) {
+ // Mask to just the signficant bits and subtract from LowerDiv/UpperDiv.
+ APInt Adjust = LowerDiv & APInt::getBitsSetFrom(getBitWidth(), DstTySize);
+ LowerDiv -= Adjust;
+ UpperDiv -= Adjust;
}
- if (UpperDiv.ule(MaxValue))
+ unsigned UpperDivWidth = UpperDiv.getActiveBits();
+ if (UpperDivWidth <= DstTySize)
return ConstantRange(LowerDiv.trunc(DstTySize),
UpperDiv.trunc(DstTySize)).unionWith(Union);
// The truncated value wraps around. Check if we can do better than fullset.
- APInt UpperModulo = UpperDiv - MaxBitValue;
- if (UpperModulo.ult(LowerDiv))
- return ConstantRange(LowerDiv.trunc(DstTySize),
- UpperModulo.trunc(DstTySize)).unionWith(Union);
+ if (UpperDivWidth == DstTySize + 1) {
+ // Clear the MSB so that UpperDiv wraps around.
+ UpperDiv.clearBit(DstTySize);
+ if (UpperDiv.ult(LowerDiv))
+ return ConstantRange(LowerDiv.trunc(DstTySize),
+ UpperDiv.trunc(DstTySize)).unionWith(Union);
+ }
return ConstantRange(DstTySize, /*isFullSet=*/true);
}
-/// zextOrTrunc - make this range have the bit width given by \p DstTySize. The
-/// value is zero extended, truncated, or left alone to make it that width.
ConstantRange ConstantRange::zextOrTrunc(uint32_t DstTySize) const {
unsigned SrcTySize = getBitWidth();
if (SrcTySize > DstTySize)
@@ -685,8 +629,6 @@ ConstantRange ConstantRange::zextOrTrunc(uint32_t DstTySize) const {
return *this;
}
-/// sextOrTrunc - make this range have the bit width given by \p DstTySize. The
-/// value is sign extended, truncated, or left alone to make it that width.
ConstantRange ConstantRange::sextOrTrunc(uint32_t DstTySize) const {
unsigned SrcTySize = getBitWidth();
if (SrcTySize > DstTySize)
@@ -739,17 +681,16 @@ ConstantRange::add(const ConstantRange &Other) const {
if (isFullSet() || Other.isFullSet())
return ConstantRange(getBitWidth(), /*isFullSet=*/true);
- APInt Spread_X = getSetSize(), Spread_Y = Other.getSetSize();
APInt NewLower = getLower() + Other.getLower();
APInt NewUpper = getUpper() + Other.getUpper() - 1;
if (NewLower == NewUpper)
return ConstantRange(getBitWidth(), /*isFullSet=*/true);
- ConstantRange X = ConstantRange(NewLower, NewUpper);
- if (X.getSetSize().ult(Spread_X) || X.getSetSize().ult(Spread_Y))
+ ConstantRange X = ConstantRange(std::move(NewLower), std::move(NewUpper));
+ if (X.isSizeStrictlySmallerThan(*this) ||
+ X.isSizeStrictlySmallerThan(Other))
// We've wrapped, therefore, full set.
return ConstantRange(getBitWidth(), /*isFullSet=*/true);
-
return X;
}
@@ -773,17 +714,16 @@ ConstantRange::sub(const ConstantRange &Other) const {
if (isFullSet() || Other.isFullSet())
return ConstantRange(getBitWidth(), /*isFullSet=*/true);
- APInt Spread_X = getSetSize(), Spread_Y = Other.getSetSize();
APInt NewLower = getLower() - Other.getUpper() + 1;
APInt NewUpper = getUpper() - Other.getLower();
if (NewLower == NewUpper)
return ConstantRange(getBitWidth(), /*isFullSet=*/true);
- ConstantRange X = ConstantRange(NewLower, NewUpper);
- if (X.getSetSize().ult(Spread_X) || X.getSetSize().ult(Spread_Y))
+ ConstantRange X = ConstantRange(std::move(NewLower), std::move(NewUpper));
+ if (X.isSizeStrictlySmallerThan(*this) ||
+ X.isSizeStrictlySmallerThan(Other))
// We've wrapped, therefore, full set.
return ConstantRange(getBitWidth(), /*isFullSet=*/true);
-
return X;
}
@@ -817,7 +757,8 @@ ConstantRange::multiply(const ConstantRange &Other) const {
// from one positive number to another which is as good as we can generate.
// In this case, skip the extra work of generating signed ranges which aren't
// going to be better than this range.
- if (!UR.isWrappedSet() && UR.getLower().isNonNegative())
+ if (!UR.isWrappedSet() &&
+ (UR.getUpper().isNonNegative() || UR.getUpper().isMinSignedValue()))
return UR;
// Now the signed range. Because we could be dealing with negative numbers
@@ -837,7 +778,7 @@ ConstantRange::multiply(const ConstantRange &Other) const {
ConstantRange Result_sext(std::min(L, Compare), std::max(L, Compare) + 1);
ConstantRange SR = Result_sext.truncate(getBitWidth());
- return UR.getSetSize().ult(SR.getSetSize()) ? UR : SR;
+ return UR.isSizeStrictlySmallerThan(SR) ? UR : SR;
}
ConstantRange
@@ -850,7 +791,7 @@ ConstantRange::smax(const ConstantRange &Other) const {
APInt NewU = APIntOps::smax(getSignedMax(), Other.getSignedMax()) + 1;
if (NewU == NewL)
return ConstantRange(getBitWidth(), /*isFullSet=*/true);
- return ConstantRange(NewL, NewU);
+ return ConstantRange(std::move(NewL), std::move(NewU));
}
ConstantRange
@@ -863,7 +804,7 @@ ConstantRange::umax(const ConstantRange &Other) const {
APInt NewU = APIntOps::umax(getUnsignedMax(), Other.getUnsignedMax()) + 1;
if (NewU == NewL)
return ConstantRange(getBitWidth(), /*isFullSet=*/true);
- return ConstantRange(NewL, NewU);
+ return ConstantRange(std::move(NewL), std::move(NewU));
}
ConstantRange
@@ -876,7 +817,7 @@ ConstantRange::smin(const ConstantRange &Other) const {
APInt NewU = APIntOps::smin(getSignedMax(), Other.getSignedMax()) + 1;
if (NewU == NewL)
return ConstantRange(getBitWidth(), /*isFullSet=*/true);
- return ConstantRange(NewL, NewU);
+ return ConstantRange(std::move(NewL), std::move(NewU));
}
ConstantRange
@@ -889,12 +830,12 @@ ConstantRange::umin(const ConstantRange &Other) const {
APInt NewU = APIntOps::umin(getUnsignedMax(), Other.getUnsignedMax()) + 1;
if (NewU == NewL)
return ConstantRange(getBitWidth(), /*isFullSet=*/true);
- return ConstantRange(NewL, NewU);
+ return ConstantRange(std::move(NewL), std::move(NewU));
}
ConstantRange
ConstantRange::udiv(const ConstantRange &RHS) const {
- if (isEmptySet() || RHS.isEmptySet() || RHS.getUnsignedMax() == 0)
+ if (isEmptySet() || RHS.isEmptySet() || RHS.getUnsignedMax().isNullValue())
return ConstantRange(getBitWidth(), /*isFullSet=*/false);
if (RHS.isFullSet())
return ConstantRange(getBitWidth(), /*isFullSet=*/true);
@@ -902,13 +843,13 @@ ConstantRange::udiv(const ConstantRange &RHS) const {
APInt Lower = getUnsignedMin().udiv(RHS.getUnsignedMax());
APInt RHS_umin = RHS.getUnsignedMin();
- if (RHS_umin == 0) {
+ if (RHS_umin.isNullValue()) {
// We want the lowest value in RHS excluding zero. Usually that would be 1
// except for a range in the form of [X, 1) in which case it would be X.
if (RHS.getUpper() == 1)
RHS_umin = RHS.getLower();
else
- RHS_umin = APInt(getBitWidth(), 1);
+ RHS_umin = 1;
}
APInt Upper = getUnsignedMax().udiv(RHS_umin) + 1;
@@ -918,7 +859,7 @@ ConstantRange::udiv(const ConstantRange &RHS) const {
if (Lower == Upper)
return ConstantRange(getBitWidth(), /*isFullSet=*/true);
- return ConstantRange(Lower, Upper);
+ return ConstantRange(std::move(Lower), std::move(Upper));
}
ConstantRange
@@ -931,7 +872,7 @@ ConstantRange::binaryAnd(const ConstantRange &Other) const {
APInt umin = APIntOps::umin(Other.getUnsignedMax(), getUnsignedMax());
if (umin.isAllOnesValue())
return ConstantRange(getBitWidth(), /*isFullSet=*/true);
- return ConstantRange(APInt::getNullValue(getBitWidth()), umin + 1);
+ return ConstantRange(APInt::getNullValue(getBitWidth()), std::move(umin) + 1);
}
ConstantRange
@@ -942,9 +883,9 @@ ConstantRange::binaryOr(const ConstantRange &Other) const {
// TODO: replace this with something less conservative
APInt umax = APIntOps::umax(getUnsignedMin(), Other.getUnsignedMin());
- if (umax.isMinValue())
+ if (umax.isNullValue())
return ConstantRange(getBitWidth(), /*isFullSet=*/true);
- return ConstantRange(umax, APInt::getNullValue(getBitWidth()));
+ return ConstantRange(std::move(umax), APInt::getNullValue(getBitWidth()));
}
ConstantRange
@@ -952,29 +893,33 @@ ConstantRange::shl(const ConstantRange &Other) const {
if (isEmptySet() || Other.isEmptySet())
return ConstantRange(getBitWidth(), /*isFullSet=*/false);
- APInt min = getUnsignedMin().shl(Other.getUnsignedMin());
- APInt max = getUnsignedMax().shl(Other.getUnsignedMax());
+ APInt max = getUnsignedMax();
+ APInt Other_umax = Other.getUnsignedMax();
- // there's no overflow!
- APInt Zeros(getBitWidth(), getUnsignedMax().countLeadingZeros());
- if (Zeros.ugt(Other.getUnsignedMax()))
- return ConstantRange(min, max + 1);
+ // there's overflow!
+ if (Other_umax.uge(max.countLeadingZeros()))
+ return ConstantRange(getBitWidth(), /*isFullSet=*/true);
// FIXME: implement the other tricky cases
- return ConstantRange(getBitWidth(), /*isFullSet=*/true);
+
+ APInt min = getUnsignedMin();
+ min <<= Other.getUnsignedMin();
+ max <<= Other_umax;
+
+ return ConstantRange(std::move(min), std::move(max) + 1);
}
ConstantRange
ConstantRange::lshr(const ConstantRange &Other) const {
if (isEmptySet() || Other.isEmptySet())
return ConstantRange(getBitWidth(), /*isFullSet=*/false);
-
- APInt max = getUnsignedMax().lshr(Other.getUnsignedMin());
+
+ APInt max = getUnsignedMax().lshr(Other.getUnsignedMin()) + 1;
APInt min = getUnsignedMin().lshr(Other.getUnsignedMax());
- if (min == max + 1)
+ if (min == max)
return ConstantRange(getBitWidth(), /*isFullSet=*/true);
- return ConstantRange(min, max + 1);
+ return ConstantRange(std::move(min), std::move(max));
}
ConstantRange ConstantRange::inverse() const {
@@ -985,8 +930,6 @@ ConstantRange ConstantRange::inverse() const {
return ConstantRange(Upper, Lower);
}
-/// print - Print out the bounds to a stream...
-///
void ConstantRange::print(raw_ostream &OS) const {
if (isFullSet())
OS << "full-set";
@@ -996,11 +939,11 @@ void ConstantRange::print(raw_ostream &OS) const {
OS << "[" << Lower << "," << Upper << ")";
}
-/// dump - Allow printing from a debugger easily...
-///
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
LLVM_DUMP_METHOD void ConstantRange::dump() const {
print(dbgs());
}
+#endif
ConstantRange llvm::getConstantRangeFromMetadata(const MDNode &Ranges) {
const unsigned NumRanges = Ranges.getNumOperands() / 2;
diff --git a/contrib/llvm/lib/IR/Constants.cpp b/contrib/llvm/lib/IR/Constants.cpp
index 533b924..f56fe70 100644
--- a/contrib/llvm/lib/IR/Constants.cpp
+++ b/contrib/llvm/lib/IR/Constants.cpp
@@ -30,17 +30,13 @@
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
-#include <cstdarg>
+
using namespace llvm;
//===----------------------------------------------------------------------===//
// Constant Class
//===----------------------------------------------------------------------===//
-void Constant::anchor() { }
-
-void ConstantData::anchor() {}
-
bool Constant::isNegativeZeroValue() const {
// Floating point values have an explicit -0.0 value.
if (const ConstantFP *CFP = dyn_cast<ConstantFP>(this))
@@ -48,8 +44,8 @@ bool Constant::isNegativeZeroValue() const {
// Equivalent for a vector of -0.0's.
if (const ConstantDataVector *CV = dyn_cast<ConstantDataVector>(this))
- if (ConstantFP *SplatCFP = dyn_cast_or_null<ConstantFP>(CV->getSplatValue()))
- if (SplatCFP && SplatCFP->isZero() && SplatCFP->isNegative())
+ if (CV->getElementType()->isFloatingPointTy() && CV->isSplat())
+ if (CV->getElementAsAPFloat(0).isNegZero())
return true;
if (const ConstantVector *CV = dyn_cast<ConstantVector>(this))
@@ -74,8 +70,8 @@ bool Constant::isZeroValue() const {
// Equivalent for a vector of -0.0's.
if (const ConstantDataVector *CV = dyn_cast<ConstantDataVector>(this))
- if (ConstantFP *SplatCFP = dyn_cast_or_null<ConstantFP>(CV->getSplatValue()))
- if (SplatCFP && SplatCFP->isZero())
+ if (CV->getElementType()->isFloatingPointTy() && CV->isSplat())
+ if (CV->getElementAsAPFloat(0).isZero())
return true;
if (const ConstantVector *CV = dyn_cast<ConstantVector>(this))
@@ -117,9 +113,13 @@ bool Constant::isAllOnesValue() const {
return Splat->isAllOnesValue();
// Check for constant vectors which are splats of -1 values.
- if (const ConstantDataVector *CV = dyn_cast<ConstantDataVector>(this))
- if (Constant *Splat = CV->getSplatValue())
- return Splat->isAllOnesValue();
+ if (const ConstantDataVector *CV = dyn_cast<ConstantDataVector>(this)) {
+ if (CV->isSplat()) {
+ if (CV->getElementType()->isFloatingPointTy())
+ return CV->getElementAsAPFloat(0).bitcastToAPInt().isAllOnesValue();
+ return CV->getElementAsAPInt(0).isAllOnesValue();
+ }
+ }
return false;
}
@@ -131,7 +131,7 @@ bool Constant::isOneValue() const {
// Check for FP which are bitcasted from 1 integers
if (const ConstantFP *CFP = dyn_cast<ConstantFP>(this))
- return CFP->getValueAPF().bitcastToAPInt() == 1;
+ return CFP->getValueAPF().bitcastToAPInt().isOneValue();
// Check for constant vectors which are splats of 1 values.
if (const ConstantVector *CV = dyn_cast<ConstantVector>(this))
@@ -139,9 +139,13 @@ bool Constant::isOneValue() const {
return Splat->isOneValue();
// Check for constant vectors which are splats of 1 values.
- if (const ConstantDataVector *CV = dyn_cast<ConstantDataVector>(this))
- if (Constant *Splat = CV->getSplatValue())
- return Splat->isOneValue();
+ if (const ConstantDataVector *CV = dyn_cast<ConstantDataVector>(this)) {
+ if (CV->isSplat()) {
+ if (CV->getElementType()->isFloatingPointTy())
+ return CV->getElementAsAPFloat(0).bitcastToAPInt().isOneValue();
+ return CV->getElementAsAPInt(0).isOneValue();
+ }
+ }
return false;
}
@@ -161,9 +165,13 @@ bool Constant::isMinSignedValue() const {
return Splat->isMinSignedValue();
// Check for constant vectors which are splats of INT_MIN values.
- if (const ConstantDataVector *CV = dyn_cast<ConstantDataVector>(this))
- if (Constant *Splat = CV->getSplatValue())
- return Splat->isMinSignedValue();
+ if (const ConstantDataVector *CV = dyn_cast<ConstantDataVector>(this)) {
+ if (CV->isSplat()) {
+ if (CV->getElementType()->isFloatingPointTy())
+ return CV->getElementAsAPFloat(0).bitcastToAPInt().isMinSignedValue();
+ return CV->getElementAsAPInt(0).isMinSignedValue();
+ }
+ }
return false;
}
@@ -183,9 +191,13 @@ bool Constant::isNotMinSignedValue() const {
return Splat->isNotMinSignedValue();
// Check for constant vectors which are splats of INT_MIN values.
- if (const ConstantDataVector *CV = dyn_cast<ConstantDataVector>(this))
- if (Constant *Splat = CV->getSplatValue())
- return Splat->isNotMinSignedValue();
+ if (const ConstantDataVector *CV = dyn_cast<ConstantDataVector>(this)) {
+ if (CV->isSplat()) {
+ if (CV->getElementType()->isFloatingPointTy())
+ return !CV->getElementAsAPFloat(0).bitcastToAPInt().isMinSignedValue();
+ return !CV->getElementAsAPInt(0).isMinSignedValue();
+ }
+ }
// It *may* contain INT_MIN, we can't tell.
return false;
@@ -496,8 +508,6 @@ void Constant::removeDeadConstantUsers() const {
// ConstantInt
//===----------------------------------------------------------------------===//
-void ConstantInt::anchor() { }
-
ConstantInt::ConstantInt(IntegerType *Ty, const APInt &V)
: ConstantData(Ty, ConstantIntVal), Val(V) {
assert(V.getBitWidth() == Ty->getBitWidth() && "Invalid constant for type");
@@ -518,27 +528,19 @@ ConstantInt *ConstantInt::getFalse(LLVMContext &Context) {
}
Constant *ConstantInt::getTrue(Type *Ty) {
- VectorType *VTy = dyn_cast<VectorType>(Ty);
- if (!VTy) {
- assert(Ty->isIntegerTy(1) && "True must be i1 or vector of i1.");
- return ConstantInt::getTrue(Ty->getContext());
- }
- assert(VTy->getElementType()->isIntegerTy(1) &&
- "True must be vector of i1 or i1.");
- return ConstantVector::getSplat(VTy->getNumElements(),
- ConstantInt::getTrue(Ty->getContext()));
+ assert(Ty->isIntOrIntVectorTy(1) && "Type not i1 or vector of i1.");
+ ConstantInt *TrueC = ConstantInt::getTrue(Ty->getContext());
+ if (auto *VTy = dyn_cast<VectorType>(Ty))
+ return ConstantVector::getSplat(VTy->getNumElements(), TrueC);
+ return TrueC;
}
Constant *ConstantInt::getFalse(Type *Ty) {
- VectorType *VTy = dyn_cast<VectorType>(Ty);
- if (!VTy) {
- assert(Ty->isIntegerTy(1) && "False must be i1 or vector of i1.");
- return ConstantInt::getFalse(Ty->getContext());
- }
- assert(VTy->getElementType()->isIntegerTy(1) &&
- "False must be vector of i1 or i1.");
- return ConstantVector::getSplat(VTy->getNumElements(),
- ConstantInt::getFalse(Ty->getContext()));
+ assert(Ty->isIntOrIntVectorTy(1) && "Type not i1 or vector of i1.");
+ ConstantInt *FalseC = ConstantInt::getFalse(Ty->getContext());
+ if (auto *VTy = dyn_cast<VectorType>(Ty))
+ return ConstantVector::getSplat(VTy->getNumElements(), FalseC);
+ return FalseC;
}
// Get a ConstantInt from an APInt.
@@ -618,8 +620,6 @@ static const fltSemantics *TypeToFloatSemantics(Type *Ty) {
return &APFloat::PPCDoubleDouble();
}
-void ConstantFP::anchor() { }
-
Constant *ConstantFP::get(Type *Ty, double V) {
LLVMContext &Context = Ty->getContext();
@@ -732,7 +732,7 @@ bool ConstantFP::isExactlyValue(const APFloat &V) const {
/// Remove the constant from the constant table.
void ConstantFP::destroyConstantImpl() {
- llvm_unreachable("You can't ConstantInt->destroyConstantImpl()!");
+ llvm_unreachable("You can't ConstantFP->destroyConstantImpl()!");
}
//===----------------------------------------------------------------------===//
@@ -974,16 +974,6 @@ Constant *ConstantStruct::get(StructType *ST, ArrayRef<Constant*> V) {
return ST->getContext().pImpl->StructConstants.getOrCreate(ST, V);
}
-Constant *ConstantStruct::get(StructType *T, ...) {
- va_list ap;
- SmallVector<Constant*, 8> Values;
- va_start(ap, T);
- while (Constant *Val = va_arg(ap, llvm::Constant*))
- Values.push_back(Val);
- va_end(ap);
- return get(T, Values);
-}
-
ConstantVector::ConstantVector(VectorType *T, ArrayRef<Constant *> V)
: ConstantAggregate(T, ConstantVectorVal, V) {
assert(V.size() == T->getNumElements() &&
@@ -1027,7 +1017,7 @@ Constant *ConstantVector::getImpl(ArrayRef<Constant*> V) {
return getSequenceIfElementsMatch<ConstantDataVector>(C, V);
// Otherwise, the element type isn't compatible with ConstantDataVector, or
- // the operand list constants a ConstantExpr or something else strange.
+ // the operand list contains a ConstantExpr or something else strange.
return nullptr;
}
@@ -1183,21 +1173,14 @@ bool ConstantInt::isValueValidForType(Type *Ty, uint64_t Val) {
unsigned NumBits = Ty->getIntegerBitWidth(); // assert okay
if (Ty->isIntegerTy(1))
return Val == 0 || Val == 1;
- if (NumBits >= 64)
- return true; // always true, has to fit in largest type
- uint64_t Max = (1ll << NumBits) - 1;
- return Val <= Max;
+ return isUIntN(NumBits, Val);
}
bool ConstantInt::isValueValidForType(Type *Ty, int64_t Val) {
unsigned NumBits = Ty->getIntegerBitWidth();
if (Ty->isIntegerTy(1))
return Val == 0 || Val == 1 || Val == -1;
- if (NumBits >= 64)
- return true; // always true, has to fit in largest type
- int64_t Min = -(1ll << (NumBits-1));
- int64_t Max = (1ll << (NumBits-1)) - 1;
- return (Val >= Min && Val <= Max);
+ return isIntN(NumBits, Val);
}
bool ConstantFP::isValueValidForType(Type *Ty, const APFloat& Val) {
@@ -1668,9 +1651,9 @@ Constant *ConstantExpr::getFPToSI(Constant *C, Type *Ty, bool OnlyIfReduced) {
Constant *ConstantExpr::getPtrToInt(Constant *C, Type *DstTy,
bool OnlyIfReduced) {
- assert(C->getType()->getScalarType()->isPointerTy() &&
+ assert(C->getType()->isPtrOrPtrVectorTy() &&
"PtrToInt source must be pointer or pointer vector");
- assert(DstTy->getScalarType()->isIntegerTy() &&
+ assert(DstTy->isIntOrIntVectorTy() &&
"PtrToInt destination must be integer or integer vector");
assert(isa<VectorType>(C->getType()) == isa<VectorType>(DstTy));
if (isa<VectorType>(C->getType()))
@@ -1681,9 +1664,9 @@ Constant *ConstantExpr::getPtrToInt(Constant *C, Type *DstTy,
Constant *ConstantExpr::getIntToPtr(Constant *C, Type *DstTy,
bool OnlyIfReduced) {
- assert(C->getType()->getScalarType()->isIntegerTy() &&
+ assert(C->getType()->isIntOrIntVectorTy() &&
"IntToPtr source must be integer or integer vector");
- assert(DstTy->getScalarType()->isPointerTy() &&
+ assert(DstTy->isPtrOrPtrVectorTy() &&
"IntToPtr destination must be a pointer or pointer vector");
assert(isa<VectorType>(C->getType()) == isa<VectorType>(DstTy));
if (isa<VectorType>(C->getType()))
@@ -1818,8 +1801,7 @@ Constant *ConstantExpr::getSizeOf(Type* Ty) {
Constant *ConstantExpr::getAlignOf(Type* Ty) {
// alignof is implemented as: (i64) gep ({i1,Ty}*)null, 0, 1
// Note that a non-inbounds gep is used, as null isn't within any object.
- Type *AligningTy =
- StructType::get(Type::getInt1Ty(Ty->getContext()), Ty, nullptr);
+ Type *AligningTy = StructType::get(Type::getInt1Ty(Ty->getContext()), Ty);
Constant *NullPtr = Constant::getNullValue(AligningTy->getPointerTo(0));
Constant *Zero = ConstantInt::get(Type::getInt64Ty(Ty->getContext()), 0);
Constant *One = ConstantInt::get(Type::getInt32Ty(Ty->getContext()), 1);
@@ -1948,8 +1930,8 @@ Constant *ConstantExpr::getGetElementPtr(Type *Ty, Constant *C,
Constant *ConstantExpr::getICmp(unsigned short pred, Constant *LHS,
Constant *RHS, bool OnlyIfReduced) {
assert(LHS->getType() == RHS->getType());
- assert(pred >= ICmpInst::FIRST_ICMP_PREDICATE &&
- pred <= ICmpInst::LAST_ICMP_PREDICATE && "Invalid ICmp Predicate");
+ assert(CmpInst::isIntPredicate((CmpInst::Predicate)pred) &&
+ "Invalid ICmp Predicate");
if (Constant *FC = ConstantFoldCompareInstruction(pred, LHS, RHS))
return FC; // Fold a few common cases...
@@ -1973,7 +1955,8 @@ Constant *ConstantExpr::getICmp(unsigned short pred, Constant *LHS,
Constant *ConstantExpr::getFCmp(unsigned short pred, Constant *LHS,
Constant *RHS, bool OnlyIfReduced) {
assert(LHS->getType() == RHS->getType());
- assert(pred <= FCmpInst::LAST_FCMP_PREDICATE && "Invalid FCmp Predicate");
+ assert(CmpInst::isFPPredicate((CmpInst::Predicate)pred) &&
+ "Invalid FCmp Predicate");
if (Constant *FC = ConstantFoldCompareInstruction(pred, LHS, RHS))
return FC; // Fold a few common cases...
@@ -2285,9 +2268,6 @@ Type *GetElementPtrConstantExpr::getResultElementType() const {
//===----------------------------------------------------------------------===//
// ConstantData* implementations
-void ConstantDataArray::anchor() {}
-void ConstantDataVector::anchor() {}
-
Type *ConstantDataSequential::getElementType() const {
return getType()->getElementType();
}
@@ -2416,32 +2396,32 @@ void ConstantDataSequential::destroyConstantImpl() {
Constant *ConstantDataArray::get(LLVMContext &Context, ArrayRef<uint8_t> Elts) {
Type *Ty = ArrayType::get(Type::getInt8Ty(Context), Elts.size());
const char *Data = reinterpret_cast<const char *>(Elts.data());
- return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*1), Ty);
+ return getImpl(StringRef(Data, Elts.size() * 1), Ty);
}
Constant *ConstantDataArray::get(LLVMContext &Context, ArrayRef<uint16_t> Elts){
Type *Ty = ArrayType::get(Type::getInt16Ty(Context), Elts.size());
const char *Data = reinterpret_cast<const char *>(Elts.data());
- return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*2), Ty);
+ return getImpl(StringRef(Data, Elts.size() * 2), Ty);
}
Constant *ConstantDataArray::get(LLVMContext &Context, ArrayRef<uint32_t> Elts){
Type *Ty = ArrayType::get(Type::getInt32Ty(Context), Elts.size());
const char *Data = reinterpret_cast<const char *>(Elts.data());
- return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*4), Ty);
+ return getImpl(StringRef(Data, Elts.size() * 4), Ty);
}
Constant *ConstantDataArray::get(LLVMContext &Context, ArrayRef<uint64_t> Elts){
Type *Ty = ArrayType::get(Type::getInt64Ty(Context), Elts.size());
const char *Data = reinterpret_cast<const char *>(Elts.data());
- return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*8), Ty);
+ return getImpl(StringRef(Data, Elts.size() * 8), Ty);
}
Constant *ConstantDataArray::get(LLVMContext &Context, ArrayRef<float> Elts) {
Type *Ty = ArrayType::get(Type::getFloatTy(Context), Elts.size());
const char *Data = reinterpret_cast<const char *>(Elts.data());
- return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*4), Ty);
+ return getImpl(StringRef(Data, Elts.size() * 4), Ty);
}
Constant *ConstantDataArray::get(LLVMContext &Context, ArrayRef<double> Elts) {
Type *Ty = ArrayType::get(Type::getDoubleTy(Context), Elts.size());
const char *Data = reinterpret_cast<const char *>(Elts.data());
- return getImpl(StringRef(const_cast<char *>(Data), Elts.size() * 8), Ty);
+ return getImpl(StringRef(Data, Elts.size() * 8), Ty);
}
/// getFP() constructors - Return a constant with array type with an element
@@ -2453,27 +2433,26 @@ Constant *ConstantDataArray::getFP(LLVMContext &Context,
ArrayRef<uint16_t> Elts) {
Type *Ty = ArrayType::get(Type::getHalfTy(Context), Elts.size());
const char *Data = reinterpret_cast<const char *>(Elts.data());
- return getImpl(StringRef(const_cast<char *>(Data), Elts.size() * 2), Ty);
+ return getImpl(StringRef(Data, Elts.size() * 2), Ty);
}
Constant *ConstantDataArray::getFP(LLVMContext &Context,
ArrayRef<uint32_t> Elts) {
Type *Ty = ArrayType::get(Type::getFloatTy(Context), Elts.size());
const char *Data = reinterpret_cast<const char *>(Elts.data());
- return getImpl(StringRef(const_cast<char *>(Data), Elts.size() * 4), Ty);
+ return getImpl(StringRef(Data, Elts.size() * 4), Ty);
}
Constant *ConstantDataArray::getFP(LLVMContext &Context,
ArrayRef<uint64_t> Elts) {
Type *Ty = ArrayType::get(Type::getDoubleTy(Context), Elts.size());
const char *Data = reinterpret_cast<const char *>(Elts.data());
- return getImpl(StringRef(const_cast<char *>(Data), Elts.size() * 8), Ty);
+ return getImpl(StringRef(Data, Elts.size() * 8), Ty);
}
Constant *ConstantDataArray::getString(LLVMContext &Context,
StringRef Str, bool AddNull) {
if (!AddNull) {
const uint8_t *Data = reinterpret_cast<const uint8_t *>(Str.data());
- return get(Context, makeArrayRef(const_cast<uint8_t *>(Data),
- Str.size()));
+ return get(Context, makeArrayRef(Data, Str.size()));
}
SmallVector<uint8_t, 64> ElementVals;
@@ -2488,32 +2467,32 @@ Constant *ConstantDataArray::getString(LLVMContext &Context,
Constant *ConstantDataVector::get(LLVMContext &Context, ArrayRef<uint8_t> Elts){
Type *Ty = VectorType::get(Type::getInt8Ty(Context), Elts.size());
const char *Data = reinterpret_cast<const char *>(Elts.data());
- return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*1), Ty);
+ return getImpl(StringRef(Data, Elts.size() * 1), Ty);
}
Constant *ConstantDataVector::get(LLVMContext &Context, ArrayRef<uint16_t> Elts){
Type *Ty = VectorType::get(Type::getInt16Ty(Context), Elts.size());
const char *Data = reinterpret_cast<const char *>(Elts.data());
- return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*2), Ty);
+ return getImpl(StringRef(Data, Elts.size() * 2), Ty);
}
Constant *ConstantDataVector::get(LLVMContext &Context, ArrayRef<uint32_t> Elts){
Type *Ty = VectorType::get(Type::getInt32Ty(Context), Elts.size());
const char *Data = reinterpret_cast<const char *>(Elts.data());
- return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*4), Ty);
+ return getImpl(StringRef(Data, Elts.size() * 4), Ty);
}
Constant *ConstantDataVector::get(LLVMContext &Context, ArrayRef<uint64_t> Elts){
Type *Ty = VectorType::get(Type::getInt64Ty(Context), Elts.size());
const char *Data = reinterpret_cast<const char *>(Elts.data());
- return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*8), Ty);
+ return getImpl(StringRef(Data, Elts.size() * 8), Ty);
}
Constant *ConstantDataVector::get(LLVMContext &Context, ArrayRef<float> Elts) {
Type *Ty = VectorType::get(Type::getFloatTy(Context), Elts.size());
const char *Data = reinterpret_cast<const char *>(Elts.data());
- return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*4), Ty);
+ return getImpl(StringRef(Data, Elts.size() * 4), Ty);
}
Constant *ConstantDataVector::get(LLVMContext &Context, ArrayRef<double> Elts) {
Type *Ty = VectorType::get(Type::getDoubleTy(Context), Elts.size());
const char *Data = reinterpret_cast<const char *>(Elts.data());
- return getImpl(StringRef(const_cast<char *>(Data), Elts.size() * 8), Ty);
+ return getImpl(StringRef(Data, Elts.size() * 8), Ty);
}
/// getFP() constructors - Return a constant with vector type with an element
@@ -2525,19 +2504,19 @@ Constant *ConstantDataVector::getFP(LLVMContext &Context,
ArrayRef<uint16_t> Elts) {
Type *Ty = VectorType::get(Type::getHalfTy(Context), Elts.size());
const char *Data = reinterpret_cast<const char *>(Elts.data());
- return getImpl(StringRef(const_cast<char *>(Data), Elts.size() * 2), Ty);
+ return getImpl(StringRef(Data, Elts.size() * 2), Ty);
}
Constant *ConstantDataVector::getFP(LLVMContext &Context,
ArrayRef<uint32_t> Elts) {
Type *Ty = VectorType::get(Type::getFloatTy(Context), Elts.size());
const char *Data = reinterpret_cast<const char *>(Elts.data());
- return getImpl(StringRef(const_cast<char *>(Data), Elts.size() * 4), Ty);
+ return getImpl(StringRef(Data, Elts.size() * 4), Ty);
}
Constant *ConstantDataVector::getFP(LLVMContext &Context,
ArrayRef<uint64_t> Elts) {
Type *Ty = VectorType::get(Type::getDoubleTy(Context), Elts.size());
const char *Data = reinterpret_cast<const char *>(Elts.data());
- return getImpl(StringRef(const_cast<char *>(Data), Elts.size() * 8), Ty);
+ return getImpl(StringRef(Data, Elts.size() * 8), Ty);
}
Constant *ConstantDataVector::getSplat(unsigned NumElts, Constant *V) {
@@ -2592,13 +2571,41 @@ uint64_t ConstantDataSequential::getElementAsInteger(unsigned Elt) const {
switch (getElementType()->getIntegerBitWidth()) {
default: llvm_unreachable("Invalid bitwidth for CDS");
case 8:
- return *const_cast<uint8_t *>(reinterpret_cast<const uint8_t *>(EltPtr));
+ return *reinterpret_cast<const uint8_t *>(EltPtr);
case 16:
- return *const_cast<uint16_t *>(reinterpret_cast<const uint16_t *>(EltPtr));
+ return *reinterpret_cast<const uint16_t *>(EltPtr);
case 32:
- return *const_cast<uint32_t *>(reinterpret_cast<const uint32_t *>(EltPtr));
+ return *reinterpret_cast<const uint32_t *>(EltPtr);
case 64:
- return *const_cast<uint64_t *>(reinterpret_cast<const uint64_t *>(EltPtr));
+ return *reinterpret_cast<const uint64_t *>(EltPtr);
+ }
+}
+
+APInt ConstantDataSequential::getElementAsAPInt(unsigned Elt) const {
+ assert(isa<IntegerType>(getElementType()) &&
+ "Accessor can only be used when element is an integer");
+ const char *EltPtr = getElementPointer(Elt);
+
+ // The data is stored in host byte order, make sure to cast back to the right
+ // type to load with the right endianness.
+ switch (getElementType()->getIntegerBitWidth()) {
+ default: llvm_unreachable("Invalid bitwidth for CDS");
+ case 8: {
+ auto EltVal = *reinterpret_cast<const uint8_t *>(EltPtr);
+ return APInt(8, EltVal);
+ }
+ case 16: {
+ auto EltVal = *reinterpret_cast<const uint16_t *>(EltPtr);
+ return APInt(16, EltVal);
+ }
+ case 32: {
+ auto EltVal = *reinterpret_cast<const uint32_t *>(EltPtr);
+ return APInt(32, EltVal);
+ }
+ case 64: {
+ auto EltVal = *reinterpret_cast<const uint64_t *>(EltPtr);
+ return APInt(64, EltVal);
+ }
}
}
@@ -2626,16 +2633,13 @@ APFloat ConstantDataSequential::getElementAsAPFloat(unsigned Elt) const {
float ConstantDataSequential::getElementAsFloat(unsigned Elt) const {
assert(getElementType()->isFloatTy() &&
"Accessor can only be used when element is a 'float'");
- const float *EltPtr = reinterpret_cast<const float *>(getElementPointer(Elt));
- return *const_cast<float *>(EltPtr);
+ return *reinterpret_cast<const float *>(getElementPointer(Elt));
}
double ConstantDataSequential::getElementAsDouble(unsigned Elt) const {
assert(getElementType()->isDoubleTy() &&
"Accessor can only be used when element is a 'float'");
- const double *EltPtr =
- reinterpret_cast<const double *>(getElementPointer(Elt));
- return *const_cast<double *>(EltPtr);
+ return *reinterpret_cast<const double *>(getElementPointer(Elt));
}
Constant *ConstantDataSequential::getElementAsConstant(unsigned Elt) const {
@@ -2646,8 +2650,8 @@ Constant *ConstantDataSequential::getElementAsConstant(unsigned Elt) const {
return ConstantInt::get(getElementType(), getElementAsInteger(Elt));
}
-bool ConstantDataSequential::isString() const {
- return isa<ArrayType>(getType()) && getElementType()->isIntegerTy(8);
+bool ConstantDataSequential::isString(unsigned CharSize) const {
+ return isa<ArrayType>(getType()) && getElementType()->isIntegerTy(CharSize);
}
bool ConstantDataSequential::isCString() const {
@@ -2663,17 +2667,21 @@ bool ConstantDataSequential::isCString() const {
return Str.drop_back().find(0) == StringRef::npos;
}
-Constant *ConstantDataVector::getSplatValue() const {
+bool ConstantDataVector::isSplat() const {
const char *Base = getRawDataValues().data();
// Compare elements 1+ to the 0'th element.
unsigned EltSize = getElementByteSize();
for (unsigned i = 1, e = getNumElements(); i != e; ++i)
if (memcmp(Base, Base+i*EltSize, EltSize))
- return nullptr;
+ return false;
+ return true;
+}
+
+Constant *ConstantDataVector::getSplatValue() const {
// If they're all the same, return the 0th one as a representative.
- return getElementAsConstant(0);
+ return isSplat() ? getElementAsConstant(0) : nullptr;
}
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/lib/IR/ConstantsContext.h b/contrib/llvm/lib/IR/ConstantsContext.h
index eda751d..6585304 100644
--- a/contrib/llvm/lib/IR/ConstantsContext.h
+++ b/contrib/llvm/lib/IR/ConstantsContext.h
@@ -22,6 +22,7 @@
#include "llvm/ADT/None.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/Constant.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/InlineAsm.h"
@@ -43,8 +44,6 @@ namespace llvm {
/// UnaryConstantExpr - This class is private to Constants.cpp, and is used
/// behind the scenes to implement unary constant exprs.
class UnaryConstantExpr : public ConstantExpr {
- void anchor() override;
-
public:
UnaryConstantExpr(unsigned Opcode, Constant *C, Type *Ty)
: ConstantExpr(Ty, Opcode, &Op<0>(), 1) {
@@ -56,16 +55,12 @@ public:
return User::operator new(s, 1);
}
- void *operator new(size_t, unsigned) = delete;
-
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
};
/// BinaryConstantExpr - This class is private to Constants.cpp, and is used
/// behind the scenes to implement binary constant exprs.
class BinaryConstantExpr : public ConstantExpr {
- void anchor() override;
-
public:
BinaryConstantExpr(unsigned Opcode, Constant *C1, Constant *C2,
unsigned Flags)
@@ -80,8 +75,6 @@ public:
return User::operator new(s, 2);
}
- void *operator new(size_t, unsigned) = delete;
-
/// Transparently provide more efficient getOperand methods.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
};
@@ -89,8 +82,6 @@ public:
/// SelectConstantExpr - This class is private to Constants.cpp, and is used
/// behind the scenes to implement select constant exprs.
class SelectConstantExpr : public ConstantExpr {
- void anchor() override;
-
public:
SelectConstantExpr(Constant *C1, Constant *C2, Constant *C3)
: ConstantExpr(C2->getType(), Instruction::Select, &Op<0>(), 3) {
@@ -104,8 +95,6 @@ public:
return User::operator new(s, 3);
}
- void *operator new(size_t, unsigned) = delete;
-
/// Transparently provide more efficient getOperand methods.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
};
@@ -114,8 +103,6 @@ public:
/// Constants.cpp, and is used behind the scenes to implement
/// extractelement constant exprs.
class ExtractElementConstantExpr : public ConstantExpr {
- void anchor() override;
-
public:
ExtractElementConstantExpr(Constant *C1, Constant *C2)
: ConstantExpr(cast<VectorType>(C1->getType())->getElementType(),
@@ -129,8 +116,6 @@ public:
return User::operator new(s, 2);
}
- void *operator new(size_t, unsigned) = delete;
-
/// Transparently provide more efficient getOperand methods.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
};
@@ -139,8 +124,6 @@ public:
/// Constants.cpp, and is used behind the scenes to implement
/// insertelement constant exprs.
class InsertElementConstantExpr : public ConstantExpr {
- void anchor() override;
-
public:
InsertElementConstantExpr(Constant *C1, Constant *C2, Constant *C3)
: ConstantExpr(C1->getType(), Instruction::InsertElement,
@@ -155,8 +138,6 @@ public:
return User::operator new(s, 3);
}
- void *operator new(size_t, unsigned) = delete;
-
/// Transparently provide more efficient getOperand methods.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
};
@@ -165,8 +146,6 @@ public:
/// Constants.cpp, and is used behind the scenes to implement
/// shufflevector constant exprs.
class ShuffleVectorConstantExpr : public ConstantExpr {
- void anchor() override;
-
public:
ShuffleVectorConstantExpr(Constant *C1, Constant *C2, Constant *C3)
: ConstantExpr(VectorType::get(
@@ -184,8 +163,6 @@ public:
return User::operator new(s, 3);
}
- void *operator new(size_t, unsigned) = delete;
-
/// Transparently provide more efficient getOperand methods.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
};
@@ -194,8 +171,6 @@ public:
/// Constants.cpp, and is used behind the scenes to implement
/// extractvalue constant exprs.
class ExtractValueConstantExpr : public ConstantExpr {
- void anchor() override;
-
public:
ExtractValueConstantExpr(Constant *Agg, ArrayRef<unsigned> IdxList,
Type *DestTy)
@@ -209,8 +184,6 @@ public:
return User::operator new(s, 1);
}
- void *operator new(size_t, unsigned) = delete;
-
/// Indices - These identify which value to extract.
const SmallVector<unsigned, 4> Indices;
@@ -229,8 +202,6 @@ public:
/// Constants.cpp, and is used behind the scenes to implement
/// insertvalue constant exprs.
class InsertValueConstantExpr : public ConstantExpr {
- void anchor() override;
-
public:
InsertValueConstantExpr(Constant *Agg, Constant *Val,
ArrayRef<unsigned> IdxList, Type *DestTy)
@@ -245,8 +216,6 @@ public:
return User::operator new(s, 2);
}
- void *operator new(size_t, unsigned) = delete;
-
/// Indices - These identify the position for the insertion.
const SmallVector<unsigned, 4> Indices;
@@ -270,8 +239,6 @@ class GetElementPtrConstantExpr : public ConstantExpr {
GetElementPtrConstantExpr(Type *SrcElementTy, Constant *C,
ArrayRef<Constant *> IdxList, Type *DestTy);
- void anchor() override;
-
public:
static GetElementPtrConstantExpr *Create(Type *SrcElementTy, Constant *C,
ArrayRef<Constant *> IdxList,
@@ -300,8 +267,6 @@ public:
// behind the scenes to implement ICmp and FCmp constant expressions. This is
// needed in order to store the predicate value for these instructions.
class CompareConstantExpr : public ConstantExpr {
- void anchor() override;
-
public:
unsigned short predicate;
CompareConstantExpr(Type *ty, Instruction::OtherOps opc,
@@ -316,8 +281,6 @@ public:
return User::operator new(s, 2);
}
- void *operator new(size_t, unsigned) = delete;
-
/// Transparently provide more efficient getOperand methods.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
@@ -387,31 +350,34 @@ struct ConstantExprKeyType;
template <class ConstantClass> struct ConstantInfo;
template <> struct ConstantInfo<ConstantExpr> {
- typedef ConstantExprKeyType ValType;
- typedef Type TypeClass;
+ using ValType = ConstantExprKeyType;
+ using TypeClass = Type;
};
template <> struct ConstantInfo<InlineAsm> {
- typedef InlineAsmKeyType ValType;
- typedef PointerType TypeClass;
+ using ValType = InlineAsmKeyType;
+ using TypeClass = PointerType;
};
template <> struct ConstantInfo<ConstantArray> {
- typedef ConstantAggrKeyType<ConstantArray> ValType;
- typedef ArrayType TypeClass;
+ using ValType = ConstantAggrKeyType<ConstantArray>;
+ using TypeClass = ArrayType;
};
template <> struct ConstantInfo<ConstantStruct> {
- typedef ConstantAggrKeyType<ConstantStruct> ValType;
- typedef StructType TypeClass;
+ using ValType = ConstantAggrKeyType<ConstantStruct>;
+ using TypeClass = StructType;
};
template <> struct ConstantInfo<ConstantVector> {
- typedef ConstantAggrKeyType<ConstantVector> ValType;
- typedef VectorType TypeClass;
+ using ValType = ConstantAggrKeyType<ConstantVector>;
+ using TypeClass = VectorType;
};
template <class ConstantClass> struct ConstantAggrKeyType {
ArrayRef<Constant *> Operands;
+
ConstantAggrKeyType(ArrayRef<Constant *> Operands) : Operands(Operands) {}
+
ConstantAggrKeyType(ArrayRef<Constant *> Operands, const ConstantClass *)
: Operands(Operands) {}
+
ConstantAggrKeyType(const ConstantClass *C,
SmallVectorImpl<Constant *> &Storage) {
assert(Storage.empty() && "Expected empty storage");
@@ -437,7 +403,8 @@ template <class ConstantClass> struct ConstantAggrKeyType {
return hash_combine_range(Operands.begin(), Operands.end());
}
- typedef typename ConstantInfo<ConstantClass>::TypeClass TypeClass;
+ using TypeClass = typename ConstantInfo<ConstantClass>::TypeClass;
+
ConstantClass *create(TypeClass *Ty) const {
return new (Operands.size()) ConstantClass(Ty, Operands);
}
@@ -457,6 +424,7 @@ struct InlineAsmKeyType {
: AsmString(AsmString), Constraints(Constraints), FTy(FTy),
HasSideEffects(HasSideEffects), IsAlignStack(IsAlignStack),
AsmDialect(AsmDialect) {}
+
InlineAsmKeyType(const InlineAsm *Asm, SmallVectorImpl<Constant *> &)
: AsmString(Asm->getAsmString()), Constraints(Asm->getConstraintString()),
FTy(Asm->getFunctionType()), HasSideEffects(Asm->hasSideEffects()),
@@ -483,7 +451,8 @@ struct InlineAsmKeyType {
AsmDialect, FTy);
}
- typedef ConstantInfo<InlineAsm>::TypeClass TypeClass;
+ using TypeClass = ConstantInfo<InlineAsm>::TypeClass;
+
InlineAsm *create(TypeClass *Ty) const {
assert(PointerType::getUnqual(FTy) == Ty);
return new InlineAsm(FTy, AsmString, Constraints, HasSideEffects,
@@ -507,11 +476,13 @@ struct ConstantExprKeyType {
: Opcode(Opcode), SubclassOptionalData(SubclassOptionalData),
SubclassData(SubclassData), Ops(Ops), Indexes(Indexes),
ExplicitTy(ExplicitTy) {}
+
ConstantExprKeyType(ArrayRef<Constant *> Operands, const ConstantExpr *CE)
: Opcode(CE->getOpcode()),
SubclassOptionalData(CE->getRawSubclassOptionalData()),
SubclassData(CE->isCompare() ? CE->getPredicate() : 0), Ops(Operands),
Indexes(CE->hasIndices() ? CE->getIndices() : ArrayRef<unsigned>()) {}
+
ConstantExprKeyType(const ConstantExpr *CE,
SmallVectorImpl<Constant *> &Storage)
: Opcode(CE->getOpcode()),
@@ -553,7 +524,8 @@ struct ConstantExprKeyType {
hash_combine_range(Indexes.begin(), Indexes.end()));
}
- typedef ConstantInfo<ConstantExpr>::TypeClass TypeClass;
+ using TypeClass = ConstantInfo<ConstantExpr>::TypeClass;
+
ConstantExpr *create(TypeClass *Ty) const {
switch (Opcode) {
default:
@@ -594,16 +566,17 @@ struct ConstantExprKeyType {
template <class ConstantClass> class ConstantUniqueMap {
public:
- typedef typename ConstantInfo<ConstantClass>::ValType ValType;
- typedef typename ConstantInfo<ConstantClass>::TypeClass TypeClass;
- typedef std::pair<TypeClass *, ValType> LookupKey;
+ using ValType = typename ConstantInfo<ConstantClass>::ValType;
+ using TypeClass = typename ConstantInfo<ConstantClass>::TypeClass;
+ using LookupKey = std::pair<TypeClass *, ValType>;
/// Key and hash together, so that we compute the hash only once and reuse it.
- typedef std::pair<unsigned, LookupKey> LookupKeyHashed;
+ using LookupKeyHashed = std::pair<unsigned, LookupKey>;
private:
struct MapInfo {
- typedef DenseMapInfo<ConstantClass *> ConstantClassInfo;
+ using ConstantClassInfo = DenseMapInfo<ConstantClass *>;
+
static inline ConstantClass *getEmptyKey() {
return ConstantClassInfo::getEmptyKey();
}
@@ -643,7 +616,7 @@ private:
};
public:
- typedef DenseSet<ConstantClass *, MapInfo> MapTy;
+ using MapTy = DenseSet<ConstantClass *, MapInfo>;
private:
MapTy Map;
diff --git a/contrib/llvm/lib/IR/Core.cpp b/contrib/llvm/lib/IR/Core.cpp
index 00bb476..aba7704 100644
--- a/contrib/llvm/lib/IR/Core.cpp
+++ b/contrib/llvm/lib/IR/Core.cpp
@@ -14,9 +14,7 @@
#include "llvm-c/Core.h"
#include "llvm/ADT/StringSwitch.h"
-#include "llvm/Bitcode/BitcodeReader.h"
#include "llvm/IR/Attributes.h"
-#include "AttributeSetNode.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
@@ -51,6 +49,7 @@ void llvm::initializeCore(PassRegistry &Registry) {
initializePrintModulePassWrapperPass(Registry);
initializePrintFunctionPassWrapperPass(Registry);
initializePrintBasicBlockPassPass(Registry);
+ initializeSafepointIRVerifierPass(Registry);
initializeVerifierLegacyPassPass(Registry);
}
@@ -259,7 +258,8 @@ void LLVMSetTarget(LLVMModuleRef M, const char *Triple) {
}
void LLVMDumpModule(LLVMModuleRef M) {
- unwrap(M)->dump();
+ unwrap(M)->print(errs(), nullptr,
+ /*ShouldPreserveUseListOrder=*/false, /*IsForDebug=*/true);
}
LLVMBool LLVMPrintModuleToFile(LLVMModuleRef M, const char *Filename,
@@ -358,9 +358,11 @@ LLVMContextRef LLVMGetTypeContext(LLVMTypeRef Ty) {
return wrap(&unwrap(Ty)->getContext());
}
-void LLVMDumpType(LLVMTypeRef Ty) {
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+LLVM_DUMP_METHOD void LLVMDumpType(LLVMTypeRef Ty) {
return unwrap(Ty)->dump();
}
+#endif
char *LLVMPrintTypeToString(LLVMTypeRef Ty) {
std::string buf;
@@ -566,6 +568,14 @@ LLVMTypeRef LLVMGetTypeByName(LLVMModuleRef M, const char *Name) {
/*--.. Operations on array, pointer, and vector types (sequence types) .....--*/
+void LLVMGetSubtypes(LLVMTypeRef Tp, LLVMTypeRef *Arr) {
+ int i = 0;
+ for (auto *T : unwrap(Tp)->subtypes()) {
+ Arr[i] = wrap(T);
+ i++;
+ }
+}
+
LLVMTypeRef LLVMArrayType(LLVMTypeRef ElementType, unsigned ElementCount) {
return wrap(ArrayType::get(unwrap(ElementType), ElementCount));
}
@@ -585,6 +595,10 @@ LLVMTypeRef LLVMGetElementType(LLVMTypeRef WrappedTy) {
return wrap(cast<SequentialType>(Ty)->getElementType());
}
+unsigned LLVMGetNumContainedTypes(LLVMTypeRef Tp) {
+ return unwrap(Tp)->getNumContainedTypes();
+}
+
unsigned LLVMGetArrayLength(LLVMTypeRef ArrayTy) {
return unwrap<ArrayType>(ArrayTy)->getNumElements();
}
@@ -640,8 +654,8 @@ void LLVMSetValueName(LLVMValueRef Val, const char *Name) {
unwrap(Val)->setName(Name);
}
-void LLVMDumpValue(LLVMValueRef Val) {
- unwrap(Val)->dump();
+LLVM_DUMP_METHOD void LLVMDumpValue(LLVMValueRef Val) {
+ unwrap(Val)->print(errs(), /*IsForDebug=*/true);
}
char* LLVMPrintValueToString(LLVMValueRef Val) {
@@ -861,6 +875,19 @@ LLVMValueRef LLVMMDNode(LLVMValueRef *Vals, unsigned Count) {
return LLVMMDNodeInContext(LLVMGetGlobalContext(), Vals, Count);
}
+LLVMValueRef LLVMMetadataAsValue(LLVMContextRef C, LLVMMetadataRef MD) {
+ return wrap(MetadataAsValue::get(*unwrap(C), unwrap(MD)));
+}
+
+LLVMMetadataRef LLVMValueAsMetadata(LLVMValueRef Val) {
+ auto *V = unwrap(Val);
+ if (auto *C = dyn_cast<Constant>(V))
+ return wrap(ConstantAsMetadata::get(C));
+ if (auto *MAV = dyn_cast<MetadataAsValue>(V))
+ return wrap(MAV->getMetadata());
+ return wrap(ValueAsMetadata::get(V));
+}
+
const char *LLVMGetMDString(LLVMValueRef V, unsigned *Length) {
if (const auto *MD = dyn_cast<MetadataAsValue>(unwrap(V)))
if (const MDString *S = dyn_cast<MDString>(MD->getMetadata())) {
@@ -1844,18 +1871,14 @@ void LLVMAddAttributeAtIndex(LLVMValueRef F, LLVMAttributeIndex Idx,
}
unsigned LLVMGetAttributeCountAtIndex(LLVMValueRef F, LLVMAttributeIndex Idx) {
- auto *ASN = AttributeSetNode::get(unwrap<Function>(F)->getAttributes(), Idx);
- if (!ASN)
- return 0;
- return ASN->getNumAttributes();
+ auto AS = unwrap<Function>(F)->getAttributes().getAttributes(Idx);
+ return AS.getNumAttributes();
}
void LLVMGetAttributesAtIndex(LLVMValueRef F, LLVMAttributeIndex Idx,
LLVMAttributeRef *Attrs) {
- auto *ASN = AttributeSetNode::get(unwrap<Function>(F)->getAttributes(), Idx);
- if (!ASN)
- return;
- for (auto A: make_range(ASN->begin(), ASN->end()))
+ auto AS = unwrap<Function>(F)->getAttributes().getAttributes(Idx);
+ for (auto A : AS)
*Attrs++ = wrap(A);
}
@@ -1885,13 +1908,8 @@ void LLVMRemoveStringAttributeAtIndex(LLVMValueRef F, LLVMAttributeIndex Idx,
void LLVMAddTargetDependentFunctionAttr(LLVMValueRef Fn, const char *A,
const char *V) {
Function *Func = unwrap<Function>(Fn);
- AttributeSet::AttrIndex Idx =
- AttributeSet::AttrIndex(AttributeSet::FunctionIndex);
- AttrBuilder B;
-
- B.addAttribute(A, V);
- AttributeSet Set = AttributeSet::get(Func->getContext(), Idx, B);
- Func->addAttributes(Idx, Set);
+ Attribute Attr = Attribute::get(Func->getContext(), A, V);
+ Func->addAttribute(AttributeList::FunctionIndex, Attr);
}
/*--.. Operations on parameters ............................................--*/
@@ -1910,10 +1928,8 @@ void LLVMGetParams(LLVMValueRef FnRef, LLVMValueRef *ParamRefs) {
}
LLVMValueRef LLVMGetParam(LLVMValueRef FnRef, unsigned index) {
- Function::arg_iterator AI = unwrap<Function>(FnRef)->arg_begin();
- while (index --> 0)
- AI++;
- return wrap(&*AI);
+ Function *Fn = unwrap<Function>(FnRef);
+ return wrap(&Fn->arg_begin()[index]);
}
LLVMValueRef LLVMGetParamParent(LLVMValueRef V) {
@@ -1938,25 +1954,22 @@ LLVMValueRef LLVMGetLastParam(LLVMValueRef Fn) {
LLVMValueRef LLVMGetNextParam(LLVMValueRef Arg) {
Argument *A = unwrap<Argument>(Arg);
- Function::arg_iterator I(A);
- if (++I == A->getParent()->arg_end())
+ Function *Fn = A->getParent();
+ if (A->getArgNo() + 1 >= Fn->arg_size())
return nullptr;
- return wrap(&*I);
+ return wrap(&Fn->arg_begin()[A->getArgNo() + 1]);
}
LLVMValueRef LLVMGetPreviousParam(LLVMValueRef Arg) {
Argument *A = unwrap<Argument>(Arg);
- Function::arg_iterator I(A);
- if (I == A->getParent()->arg_begin())
+ if (A->getArgNo() == 0)
return nullptr;
- return wrap(&*--I);
+ return wrap(&A->getParent()->arg_begin()[A->getArgNo() - 1]);
}
void LLVMSetParamAlignment(LLVMValueRef Arg, unsigned align) {
Argument *A = unwrap<Argument>(Arg);
- AttrBuilder B;
- B.addAlignmentAttr(align);
- A->addAttr(AttributeSet::get(A->getContext(),A->getArgNo() + 1, B));
+ A->addAttr(Attribute::getWithAlignment(A->getContext(), align));
}
/*--.. Operations on basic blocks ..........................................--*/
@@ -2163,12 +2176,8 @@ void LLVMSetInstructionCallConv(LLVMValueRef Instr, unsigned CC) {
void LLVMSetInstrParamAlignment(LLVMValueRef Instr, unsigned index,
unsigned align) {
CallSite Call = CallSite(unwrap<Instruction>(Instr));
- AttrBuilder B;
- B.addAlignmentAttr(align);
- Call.setAttributes(Call.getAttributes()
- .addAttributes(Call->getContext(), index,
- AttributeSet::get(Call->getContext(),
- index, B)));
+ Attribute AlignAttr = Attribute::getWithAlignment(Call->getContext(), align);
+ Call.addAttribute(index, AlignAttr);
}
void LLVMAddCallSiteAttribute(LLVMValueRef C, LLVMAttributeIndex Idx,
@@ -2179,19 +2188,15 @@ void LLVMAddCallSiteAttribute(LLVMValueRef C, LLVMAttributeIndex Idx,
unsigned LLVMGetCallSiteAttributeCount(LLVMValueRef C,
LLVMAttributeIndex Idx) {
auto CS = CallSite(unwrap<Instruction>(C));
- auto *ASN = AttributeSetNode::get(CS.getAttributes(), Idx);
- if (!ASN)
- return 0;
- return ASN->getNumAttributes();
+ auto AS = CS.getAttributes().getAttributes(Idx);
+ return AS.getNumAttributes();
}
void LLVMGetCallSiteAttributes(LLVMValueRef C, LLVMAttributeIndex Idx,
LLVMAttributeRef *Attrs) {
auto CS = CallSite(unwrap<Instruction>(C));
- auto *ASN = AttributeSetNode::get(CS.getAttributes(), Idx);
- if (!ASN)
- return;
- for (auto A: make_range(ASN->begin(), ASN->end()))
+ auto AS = CS.getAttributes().getAttributes(Idx);
+ for (auto A : AS)
*Attrs++ = wrap(A);
}
@@ -2750,11 +2755,14 @@ static LLVMAtomicOrdering mapToLLVMOrdering(AtomicOrdering Ordering) {
llvm_unreachable("Invalid AtomicOrdering value!");
}
+// TODO: Should this and other atomic instructions support building with
+// "syncscope"?
LLVMValueRef LLVMBuildFence(LLVMBuilderRef B, LLVMAtomicOrdering Ordering,
LLVMBool isSingleThread, const char *Name) {
return wrap(
unwrap(B)->CreateFence(mapFromLLVMOrdering(Ordering),
- isSingleThread ? SingleThread : CrossThread,
+ isSingleThread ? SyncScope::SingleThread
+ : SyncScope::System,
Name));
}
@@ -3036,7 +3044,8 @@ LLVMValueRef LLVMBuildAtomicRMW(LLVMBuilderRef B,LLVMAtomicRMWBinOp op,
case LLVMAtomicRMWBinOpUMin: intop = AtomicRMWInst::UMin; break;
}
return wrap(unwrap(B)->CreateAtomicRMW(intop, unwrap(PTR), unwrap(Val),
- mapFromLLVMOrdering(ordering), singleThread ? SingleThread : CrossThread));
+ mapFromLLVMOrdering(ordering), singleThread ? SyncScope::SingleThread
+ : SyncScope::System));
}
LLVMValueRef LLVMBuildAtomicCmpXchg(LLVMBuilderRef B, LLVMValueRef Ptr,
@@ -3048,7 +3057,7 @@ LLVMValueRef LLVMBuildAtomicCmpXchg(LLVMBuilderRef B, LLVMValueRef Ptr,
return wrap(unwrap(B)->CreateAtomicCmpXchg(unwrap(Ptr), unwrap(Cmp),
unwrap(New), mapFromLLVMOrdering(SuccessOrdering),
mapFromLLVMOrdering(FailureOrdering),
- singleThread ? SingleThread : CrossThread));
+ singleThread ? SyncScope::SingleThread : SyncScope::System));
}
@@ -3056,17 +3065,18 @@ LLVMBool LLVMIsAtomicSingleThread(LLVMValueRef AtomicInst) {
Value *P = unwrap<Value>(AtomicInst);
if (AtomicRMWInst *I = dyn_cast<AtomicRMWInst>(P))
- return I->getSynchScope() == SingleThread;
- return cast<AtomicCmpXchgInst>(P)->getSynchScope() == SingleThread;
+ return I->getSyncScopeID() == SyncScope::SingleThread;
+ return cast<AtomicCmpXchgInst>(P)->getSyncScopeID() ==
+ SyncScope::SingleThread;
}
void LLVMSetAtomicSingleThread(LLVMValueRef AtomicInst, LLVMBool NewValue) {
Value *P = unwrap<Value>(AtomicInst);
- SynchronizationScope Sync = NewValue ? SingleThread : CrossThread;
+ SyncScope::ID SSID = NewValue ? SyncScope::SingleThread : SyncScope::System;
if (AtomicRMWInst *I = dyn_cast<AtomicRMWInst>(P))
- return I->setSynchScope(Sync);
- return cast<AtomicCmpXchgInst>(P)->setSynchScope(Sync);
+ return I->setSyncScopeID(SSID);
+ return cast<AtomicCmpXchgInst>(P)->setSyncScopeID(SSID);
}
LLVMAtomicOrdering LLVMGetCmpXchgSuccessOrdering(LLVMValueRef CmpXchgInst) {
diff --git a/contrib/llvm/lib/IR/DIBuilder.cpp b/contrib/llvm/lib/IR/DIBuilder.cpp
index d061610..bce28ba 100644
--- a/contrib/llvm/lib/IR/DIBuilder.cpp
+++ b/contrib/llvm/lib/IR/DIBuilder.cpp
@@ -12,14 +12,14 @@
//===----------------------------------------------------------------------===//
#include "llvm/IR/DIBuilder.h"
+#include "LLVMContextImpl.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/BinaryFormat/Dwarf.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Module.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/Dwarf.h"
-#include "LLVMContextImpl.h"
using namespace llvm;
using namespace llvm::dwarf;
@@ -39,6 +39,21 @@ void DIBuilder::trackIfUnresolved(MDNode *N) {
UnresolvedNodes.emplace_back(N);
}
+void DIBuilder::finalizeSubprogram(DISubprogram *SP) {
+ MDTuple *Temp = SP->getVariables().get();
+ if (!Temp || !Temp->isTemporary())
+ return;
+
+ SmallVector<Metadata *, 4> Variables;
+
+ auto PV = PreservedVariables.find(SP);
+ if (PV != PreservedVariables.end())
+ Variables.append(PV->second.begin(), PV->second.end());
+
+ DINodeArray AV = getOrCreateArray(Variables);
+ TempMDTuple(Temp)->replaceAllUsesWith(AV.get());
+}
+
void DIBuilder::finalize() {
if (!CUNode) {
assert(!AllowUnresolvedNodes &&
@@ -62,25 +77,11 @@ void DIBuilder::finalize() {
CUNode->replaceRetainedTypes(MDTuple::get(VMContext, RetainValues));
DISubprogramArray SPs = MDTuple::get(VMContext, AllSubprograms);
- auto resolveVariables = [&](DISubprogram *SP) {
- MDTuple *Temp = SP->getVariables().get();
- if (!Temp)
- return;
-
- SmallVector<Metadata *, 4> Variables;
-
- auto PV = PreservedVariables.find(SP);
- if (PV != PreservedVariables.end())
- Variables.append(PV->second.begin(), PV->second.end());
-
- DINodeArray AV = getOrCreateArray(Variables);
- TempMDTuple(Temp)->replaceAllUsesWith(AV.get());
- };
for (auto *SP : SPs)
- resolveVariables(SP);
+ finalizeSubprogram(SP);
for (auto *N : RetainValues)
if (auto *SP = dyn_cast<DISubprogram>(N))
- resolveVariables(SP);
+ finalizeSubprogram(SP);
if (!AllGVs.empty())
CUNode->replaceGlobalVariables(MDTuple::get(VMContext, AllGVs));
@@ -126,7 +127,7 @@ DICompileUnit *DIBuilder::createCompileUnit(
unsigned Lang, DIFile *File, StringRef Producer, bool isOptimized,
StringRef Flags, unsigned RunTimeVer, StringRef SplitName,
DICompileUnit::DebugEmissionKind Kind, uint64_t DWOId,
- bool SplitDebugInlining) {
+ bool SplitDebugInlining, bool DebugInfoForProfiling) {
assert(((Lang <= dwarf::DW_LANG_Fortran08 && Lang >= dwarf::DW_LANG_C89) ||
(Lang <= dwarf::DW_LANG_hi_user && Lang >= dwarf::DW_LANG_lo_user)) &&
@@ -136,7 +137,7 @@ DICompileUnit *DIBuilder::createCompileUnit(
CUNode = DICompileUnit::getDistinct(
VMContext, Lang, File, Producer, isOptimized, Flags, RunTimeVer,
SplitName, Kind, nullptr, nullptr, nullptr, nullptr, nullptr, DWOId,
- SplitDebugInlining);
+ SplitDebugInlining, DebugInfoForProfiling);
// Create a named metadata so that it is easier to find cu in a module.
NamedMDNode *NMD = M.getOrInsertNamedMetadata("llvm.dbg.cu");
@@ -147,10 +148,13 @@ DICompileUnit *DIBuilder::createCompileUnit(
static DIImportedEntity *
createImportedModule(LLVMContext &C, dwarf::Tag Tag, DIScope *Context,
- Metadata *NS, unsigned Line, StringRef Name,
+ Metadata *NS, DIFile *File, unsigned Line, StringRef Name,
SmallVectorImpl<TrackingMDNodeRef> &AllImportedModules) {
+ if (Line)
+ assert(File && "Source location has line number but no file");
unsigned EntitiesCount = C.pImpl->DIImportedEntitys.size();
- auto *M = DIImportedEntity::get(C, Tag, Context, DINodeRef(NS), Line, Name);
+ auto *M =
+ DIImportedEntity::get(C, Tag, Context, DINodeRef(NS), File, Line, Name);
if (EntitiesCount < C.pImpl->DIImportedEntitys.size())
// A new Imported Entity was just added to the context.
// Add it to the Imported Modules list.
@@ -159,33 +163,38 @@ createImportedModule(LLVMContext &C, dwarf::Tag Tag, DIScope *Context,
}
DIImportedEntity *DIBuilder::createImportedModule(DIScope *Context,
- DINamespace *NS,
+ DINamespace *NS, DIFile *File,
unsigned Line) {
return ::createImportedModule(VMContext, dwarf::DW_TAG_imported_module,
- Context, NS, Line, StringRef(), AllImportedModules);
+ Context, NS, File, Line, StringRef(),
+ AllImportedModules);
}
DIImportedEntity *DIBuilder::createImportedModule(DIScope *Context,
DIImportedEntity *NS,
- unsigned Line) {
+ DIFile *File, unsigned Line) {
return ::createImportedModule(VMContext, dwarf::DW_TAG_imported_module,
- Context, NS, Line, StringRef(), AllImportedModules);
+ Context, NS, File, Line, StringRef(),
+ AllImportedModules);
}
DIImportedEntity *DIBuilder::createImportedModule(DIScope *Context, DIModule *M,
- unsigned Line) {
+ DIFile *File, unsigned Line) {
return ::createImportedModule(VMContext, dwarf::DW_TAG_imported_module,
- Context, M, Line, StringRef(), AllImportedModules);
+ Context, M, File, Line, StringRef(),
+ AllImportedModules);
}
DIImportedEntity *DIBuilder::createImportedDeclaration(DIScope *Context,
DINode *Decl,
+ DIFile *File,
unsigned Line,
StringRef Name) {
// Make sure to use the unique identifier based metadata reference for
// types that have one.
return ::createImportedModule(VMContext, dwarf::DW_TAG_imported_declaration,
- Context, Decl, Line, Name, AllImportedModules);
+ Context, Decl, File, Line, Name,
+ AllImportedModules);
}
DIFile *DIBuilder::createFile(StringRef Filename, StringRef Directory,
@@ -241,17 +250,20 @@ DIBasicType *DIBuilder::createBasicType(StringRef Name, uint64_t SizeInBits,
DIDerivedType *DIBuilder::createQualifiedType(unsigned Tag, DIType *FromTy) {
return DIDerivedType::get(VMContext, Tag, "", nullptr, 0, nullptr, FromTy, 0,
- 0, 0, DINode::FlagZero);
+ 0, 0, None, DINode::FlagZero);
}
-DIDerivedType *DIBuilder::createPointerType(DIType *PointeeTy,
- uint64_t SizeInBits,
- uint32_t AlignInBits,
- StringRef Name) {
+DIDerivedType *DIBuilder::createPointerType(
+ DIType *PointeeTy,
+ uint64_t SizeInBits,
+ uint32_t AlignInBits,
+ Optional<unsigned> DWARFAddressSpace,
+ StringRef Name) {
// FIXME: Why is there a name here?
return DIDerivedType::get(VMContext, dwarf::DW_TAG_pointer_type, Name,
nullptr, 0, nullptr, PointeeTy, SizeInBits,
- AlignInBits, 0, DINode::FlagZero);
+ AlignInBits, 0, DWARFAddressSpace,
+ DINode::FlagZero);
}
DIDerivedType *DIBuilder::createMemberPointerType(DIType *PointeeTy,
@@ -261,15 +273,18 @@ DIDerivedType *DIBuilder::createMemberPointerType(DIType *PointeeTy,
DINode::DIFlags Flags) {
return DIDerivedType::get(VMContext, dwarf::DW_TAG_ptr_to_member_type, "",
nullptr, 0, nullptr, PointeeTy, SizeInBits,
- AlignInBits, 0, Flags, Base);
+ AlignInBits, 0, None, Flags, Base);
}
-DIDerivedType *DIBuilder::createReferenceType(unsigned Tag, DIType *RTy,
- uint64_t SizeInBits,
- uint32_t AlignInBits) {
+DIDerivedType *DIBuilder::createReferenceType(
+ unsigned Tag, DIType *RTy,
+ uint64_t SizeInBits,
+ uint32_t AlignInBits,
+ Optional<unsigned> DWARFAddressSpace) {
assert(RTy && "Unable to create reference type");
return DIDerivedType::get(VMContext, Tag, "", nullptr, 0, nullptr, RTy,
- SizeInBits, AlignInBits, 0, DINode::FlagZero);
+ SizeInBits, AlignInBits, 0, DWARFAddressSpace,
+ DINode::FlagZero);
}
DIDerivedType *DIBuilder::createTypedef(DIType *Ty, StringRef Name,
@@ -277,14 +292,14 @@ DIDerivedType *DIBuilder::createTypedef(DIType *Ty, StringRef Name,
DIScope *Context) {
return DIDerivedType::get(VMContext, dwarf::DW_TAG_typedef, Name, File,
LineNo, getNonCompileUnitScope(Context), Ty, 0, 0,
- 0, DINode::FlagZero);
+ 0, None, DINode::FlagZero);
}
DIDerivedType *DIBuilder::createFriend(DIType *Ty, DIType *FriendTy) {
assert(Ty && "Invalid type!");
assert(FriendTy && "Invalid friend type!");
return DIDerivedType::get(VMContext, dwarf::DW_TAG_friend, "", nullptr, 0, Ty,
- FriendTy, 0, 0, 0, DINode::FlagZero);
+ FriendTy, 0, 0, 0, None, DINode::FlagZero);
}
DIDerivedType *DIBuilder::createInheritance(DIType *Ty, DIType *BaseTy,
@@ -292,7 +307,7 @@ DIDerivedType *DIBuilder::createInheritance(DIType *Ty, DIType *BaseTy,
DINode::DIFlags Flags) {
assert(Ty && "Unable to create inheritance");
return DIDerivedType::get(VMContext, dwarf::DW_TAG_inheritance, "", nullptr,
- 0, Ty, BaseTy, 0, 0, BaseOffset, Flags);
+ 0, Ty, BaseTy, 0, 0, BaseOffset, None, Flags);
}
DIDerivedType *DIBuilder::createMemberType(DIScope *Scope, StringRef Name,
@@ -303,7 +318,7 @@ DIDerivedType *DIBuilder::createMemberType(DIScope *Scope, StringRef Name,
DINode::DIFlags Flags, DIType *Ty) {
return DIDerivedType::get(VMContext, dwarf::DW_TAG_member, Name, File,
LineNumber, getNonCompileUnitScope(Scope), Ty,
- SizeInBits, AlignInBits, OffsetInBits, Flags);
+ SizeInBits, AlignInBits, OffsetInBits, None, Flags);
}
static ConstantAsMetadata *getConstantOrNull(Constant *C) {
@@ -320,7 +335,7 @@ DIDerivedType *DIBuilder::createBitFieldMemberType(
return DIDerivedType::get(
VMContext, dwarf::DW_TAG_member, Name, File, LineNumber,
getNonCompileUnitScope(Scope), Ty, SizeInBits, /* AlignInBits */ 0,
- OffsetInBits, Flags,
+ OffsetInBits, None, Flags,
ConstantAsMetadata::get(ConstantInt::get(IntegerType::get(VMContext, 64),
StorageOffsetInBits)));
}
@@ -333,7 +348,8 @@ DIBuilder::createStaticMemberType(DIScope *Scope, StringRef Name, DIFile *File,
Flags |= DINode::FlagStaticMember;
return DIDerivedType::get(VMContext, dwarf::DW_TAG_member, Name, File,
LineNumber, getNonCompileUnitScope(Scope), Ty, 0,
- AlignInBits, 0, Flags, getConstantOrNull(Val));
+ AlignInBits, 0, None, Flags,
+ getConstantOrNull(Val));
}
DIDerivedType *
@@ -343,7 +359,7 @@ DIBuilder::createObjCIVar(StringRef Name, DIFile *File, unsigned LineNumber,
DIType *Ty, MDNode *PropertyNode) {
return DIDerivedType::get(VMContext, dwarf::DW_TAG_member, Name, File,
LineNumber, getNonCompileUnitScope(File), Ty,
- SizeInBits, AlignInBits, OffsetInBits, Flags,
+ SizeInBits, AlignInBits, OffsetInBits, None, Flags,
PropertyNode);
}
@@ -442,14 +458,6 @@ DISubroutineType *DIBuilder::createSubroutineType(DITypeRefArray ParameterTypes,
return DISubroutineType::get(VMContext, Flags, CC, ParameterTypes);
}
-DICompositeType *DIBuilder::createExternalTypeRef(unsigned Tag, DIFile *File,
- StringRef UniqueIdentifier) {
- assert(!UniqueIdentifier.empty() && "external type ref without uid");
- return DICompositeType::get(VMContext, Tag, "", nullptr, 0, nullptr, nullptr,
- 0, 0, 0, DINode::FlagExternalTypeRef, nullptr, 0,
- nullptr, nullptr, UniqueIdentifier);
-}
-
DICompositeType *DIBuilder::createEnumerationType(
DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNumber,
uint64_t SizeInBits, uint32_t AlignInBits, DINodeArray Elements,
@@ -677,13 +685,14 @@ DISubprogram *DIBuilder::createFunction(
DIScope *Context, StringRef Name, StringRef LinkageName, DIFile *File,
unsigned LineNo, DISubroutineType *Ty, bool isLocalToUnit,
bool isDefinition, unsigned ScopeLine, DINode::DIFlags Flags,
- bool isOptimized, DITemplateParameterArray TParams, DISubprogram *Decl) {
+ bool isOptimized, DITemplateParameterArray TParams, DISubprogram *Decl,
+ DITypeArray ThrownTypes) {
auto *Node = getSubprogram(
/* IsDistinct = */ isDefinition, VMContext,
getNonCompileUnitScope(Context), Name, LinkageName, File, LineNo, Ty,
isLocalToUnit, isDefinition, ScopeLine, nullptr, 0, 0, 0, Flags,
isOptimized, isDefinition ? CUNode : nullptr, TParams, Decl,
- MDTuple::getTemporary(VMContext, None).release());
+ MDTuple::getTemporary(VMContext, None).release(), ThrownTypes);
if (isDefinition)
AllSubprograms.push_back(Node);
@@ -695,23 +704,22 @@ DISubprogram *DIBuilder::createTempFunctionFwdDecl(
DIScope *Context, StringRef Name, StringRef LinkageName, DIFile *File,
unsigned LineNo, DISubroutineType *Ty, bool isLocalToUnit,
bool isDefinition, unsigned ScopeLine, DINode::DIFlags Flags,
- bool isOptimized, DITemplateParameterArray TParams, DISubprogram *Decl) {
+ bool isOptimized, DITemplateParameterArray TParams, DISubprogram *Decl,
+ DITypeArray ThrownTypes) {
return DISubprogram::getTemporary(
VMContext, getNonCompileUnitScope(Context), Name, LinkageName,
File, LineNo, Ty, isLocalToUnit, isDefinition, ScopeLine, nullptr,
0, 0, 0, Flags, isOptimized, isDefinition ? CUNode : nullptr,
- TParams, Decl, nullptr)
+ TParams, Decl, nullptr, ThrownTypes)
.release();
}
-DISubprogram *DIBuilder::createMethod(DIScope *Context, StringRef Name,
- StringRef LinkageName, DIFile *F,
- unsigned LineNo, DISubroutineType *Ty,
- bool isLocalToUnit, bool isDefinition,
- unsigned VK, unsigned VIndex,
- int ThisAdjustment, DIType *VTableHolder,
- DINode::DIFlags Flags, bool isOptimized,
- DITemplateParameterArray TParams) {
+DISubprogram *DIBuilder::createMethod(
+ DIScope *Context, StringRef Name, StringRef LinkageName, DIFile *F,
+ unsigned LineNo, DISubroutineType *Ty, bool isLocalToUnit,
+ bool isDefinition, unsigned VK, unsigned VIndex, int ThisAdjustment,
+ DIType *VTableHolder, DINode::DIFlags Flags, bool isOptimized,
+ DITemplateParameterArray TParams, DITypeArray ThrownTypes) {
assert(getNonCompileUnitScope(Context) &&
"Methods should have both a Context and a context that isn't "
"the compile unit.");
@@ -720,7 +728,7 @@ DISubprogram *DIBuilder::createMethod(DIScope *Context, StringRef Name,
/* IsDistinct = */ isDefinition, VMContext, cast<DIScope>(Context), Name,
LinkageName, F, LineNo, Ty, isLocalToUnit, isDefinition, LineNo,
VTableHolder, VK, VIndex, ThisAdjustment, Flags, isOptimized,
- isDefinition ? CUNode : nullptr, TParams, nullptr, nullptr);
+ isDefinition ? CUNode : nullptr, TParams, nullptr, nullptr, ThrownTypes);
if (isDefinition)
AllSubprograms.push_back(SP);
@@ -729,10 +737,15 @@ DISubprogram *DIBuilder::createMethod(DIScope *Context, StringRef Name,
}
DINamespace *DIBuilder::createNameSpace(DIScope *Scope, StringRef Name,
- DIFile *File, unsigned LineNo,
bool ExportSymbols) {
- return DINamespace::get(VMContext, getNonCompileUnitScope(Scope), File, Name,
- LineNo, ExportSymbols);
+
+ // It is okay to *not* make anonymous top-level namespaces distinct, because
+ // all nodes that have an anonymous namespace as their parent scope are
+ // guaranteed to be unique and/or are linked to their containing
+ // DICompileUnit. This decision is an explicit tradeoff of link time versus
+ // memory usage versus code simplicity and may get revisited in the future.
+ return DINamespace::get(VMContext, getNonCompileUnitScope(Scope), Name,
+ ExportSymbols);
}
DIModule *DIBuilder::createModule(DIScope *Scope, StringRef Name,
diff --git a/contrib/llvm/lib/IR/DataLayout.cpp b/contrib/llvm/lib/IR/DataLayout.cpp
index d15a34c..5de281a 100644
--- a/contrib/llvm/lib/IR/DataLayout.cpp
+++ b/contrib/llvm/lib/IR/DataLayout.cpp
@@ -1,4 +1,4 @@
-//===-- DataLayout.cpp - Data size & alignment routines --------------------==//
+//===- DataLayout.cpp - Data size & alignment routines ---------------------==//
//
// The LLVM Compiler Infrastructure
//
@@ -18,19 +18,25 @@
#include "llvm/IR/DataLayout.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Triple.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/GetElementPtrTypeIterator.h"
+#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/Module.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/MathExtras.h"
-#include "llvm/Support/Mutex.h"
-#include "llvm/Support/raw_ostream.h"
#include <algorithm>
+#include <cassert>
+#include <cstdint>
#include <cstdlib>
+#include <tuple>
+#include <utility>
+
using namespace llvm;
//===----------------------------------------------------------------------===//
@@ -73,7 +79,6 @@ StructLayout::StructLayout(StructType *ST, const DataLayout &DL) {
}
}
-
/// getElementContainingOffset - Given a valid offset into the structure,
/// return the structure index that contains it.
unsigned StructLayout::getElementContainingOffset(uint64_t Offset) const {
@@ -118,9 +123,6 @@ LayoutAlignElem::operator==(const LayoutAlignElem &rhs) const {
&& TypeBitWidth == rhs.TypeBitWidth);
}
-const LayoutAlignElem
-DataLayout::InvalidAlignmentElem = { INVALID_ALIGN, 0, 0, 0 };
-
//===----------------------------------------------------------------------===//
// PointerAlignElem, PointerAlign support
//===----------------------------------------------------------------------===//
@@ -145,9 +147,6 @@ PointerAlignElem::operator==(const PointerAlignElem &rhs) const {
&& TypeByteWidth == rhs.TypeByteWidth);
}
-const PointerAlignElem
-DataLayout::InvalidPointerElem = { 0U, 0U, 0U, ~0U };
-
//===----------------------------------------------------------------------===//
// DataLayout Class Implementation
//===----------------------------------------------------------------------===//
@@ -180,6 +179,7 @@ void DataLayout::reset(StringRef Desc) {
LayoutMap = nullptr;
BigEndian = false;
+ AllocaAddrSpace = 0;
StackNaturalAlign = 0;
ManglingMode = MM_None;
NonIntegralAddressSpaces.clear();
@@ -307,7 +307,7 @@ void DataLayout::parseSpecifier(StringRef Desc) {
case 'a': {
AlignTypeEnum AlignType;
switch (Specifier) {
- default:
+ default: llvm_unreachable("Unexpected specifier!");
case 'i': AlignType = INTEGER_ALIGN; break;
case 'v': AlignType = VECTOR_ALIGN; break;
case 'f': AlignType = FLOAT_ALIGN; break;
@@ -343,7 +343,7 @@ void DataLayout::parseSpecifier(StringRef Desc) {
break;
}
case 'n': // Native integer types.
- for (;;) {
+ while (true) {
unsigned Width = getInt(Tok);
if (Width == 0)
report_fatal_error(
@@ -358,6 +358,12 @@ void DataLayout::parseSpecifier(StringRef Desc) {
StackNaturalAlign = inBytes(getInt(Tok));
break;
}
+ case 'A': { // Default stack/alloca address space.
+ AllocaAddrSpace = getInt(Tok);
+ if (!isUInt<24>(AllocaAddrSpace))
+ report_fatal_error("Invalid address space, must be a 24bit integer");
+ break;
+ }
case 'm':
if (!Tok.empty())
report_fatal_error("Unexpected trailing characters after mangling specifier in datalayout string");
@@ -392,7 +398,7 @@ void DataLayout::parseSpecifier(StringRef Desc) {
}
}
-DataLayout::DataLayout(const Module *M) : LayoutMap(nullptr) {
+DataLayout::DataLayout(const Module *M) {
init(M);
}
@@ -400,6 +406,7 @@ void DataLayout::init(const Module *M) { *this = M->getDataLayout(); }
bool DataLayout::operator==(const DataLayout &Other) const {
bool Ret = BigEndian == Other.BigEndian &&
+ AllocaAddrSpace == Other.AllocaAddrSpace &&
StackNaturalAlign == Other.StackNaturalAlign &&
ManglingMode == Other.ManglingMode &&
LegalIntWidths == Other.LegalIntWidths &&
@@ -408,6 +415,18 @@ bool DataLayout::operator==(const DataLayout &Other) const {
return Ret;
}
+DataLayout::AlignmentsTy::iterator
+DataLayout::findAlignmentLowerBound(AlignTypeEnum AlignType,
+ uint32_t BitWidth) {
+ auto Pair = std::make_pair((unsigned)AlignType, BitWidth);
+ return std::lower_bound(Alignments.begin(), Alignments.end(), Pair,
+ [](const LayoutAlignElem &LHS,
+ const std::pair<unsigned, uint32_t> &RHS) {
+ return std::tie(LHS.AlignType, LHS.TypeBitWidth) <
+ std::tie(RHS.first, RHS.second);
+ });
+}
+
void
DataLayout::setAlignment(AlignTypeEnum align_type, unsigned abi_align,
unsigned pref_align, uint32_t bit_width) {
@@ -426,18 +445,17 @@ DataLayout::setAlignment(AlignTypeEnum align_type, unsigned abi_align,
report_fatal_error(
"Preferred alignment cannot be less than the ABI alignment");
- for (LayoutAlignElem &Elem : Alignments) {
- if (Elem.AlignType == (unsigned)align_type &&
- Elem.TypeBitWidth == bit_width) {
- // Update the abi, preferred alignments.
- Elem.ABIAlign = abi_align;
- Elem.PrefAlign = pref_align;
- return;
- }
+ AlignmentsTy::iterator I = findAlignmentLowerBound(align_type, bit_width);
+ if (I != Alignments.end() &&
+ I->AlignType == (unsigned)align_type && I->TypeBitWidth == bit_width) {
+ // Update the abi, preferred alignments.
+ I->ABIAlign = abi_align;
+ I->PrefAlign = pref_align;
+ } else {
+ // Insert before I to keep the vector sorted.
+ Alignments.insert(I, LayoutAlignElem::get(align_type, abi_align,
+ pref_align, bit_width));
}
-
- Alignments.push_back(LayoutAlignElem::get(align_type, abi_align,
- pref_align, bit_width));
}
DataLayout::PointersTy::iterator
@@ -471,45 +489,29 @@ void DataLayout::setPointerAlignment(uint32_t AddrSpace, unsigned ABIAlign,
unsigned DataLayout::getAlignmentInfo(AlignTypeEnum AlignType,
uint32_t BitWidth, bool ABIInfo,
Type *Ty) const {
- // Check to see if we have an exact match and remember the best match we see.
- int BestMatchIdx = -1;
- int LargestInt = -1;
- for (unsigned i = 0, e = Alignments.size(); i != e; ++i) {
- if (Alignments[i].AlignType == (unsigned)AlignType &&
- Alignments[i].TypeBitWidth == BitWidth)
- return ABIInfo ? Alignments[i].ABIAlign : Alignments[i].PrefAlign;
-
- // The best match so far depends on what we're looking for.
- if (AlignType == INTEGER_ALIGN &&
- Alignments[i].AlignType == INTEGER_ALIGN) {
- // The "best match" for integers is the smallest size that is larger than
- // the BitWidth requested.
- if (Alignments[i].TypeBitWidth > BitWidth && (BestMatchIdx == -1 ||
- Alignments[i].TypeBitWidth < Alignments[BestMatchIdx].TypeBitWidth))
- BestMatchIdx = i;
- // However, if there isn't one that's larger, then we must use the
- // largest one we have (see below)
- if (LargestInt == -1 ||
- Alignments[i].TypeBitWidth > Alignments[LargestInt].TypeBitWidth)
- LargestInt = i;
+ AlignmentsTy::const_iterator I = findAlignmentLowerBound(AlignType, BitWidth);
+ // See if we found an exact match. Of if we are looking for an integer type,
+ // but don't have an exact match take the next largest integer. This is where
+ // the lower_bound will point to when it fails an exact match.
+ if (I != Alignments.end() && I->AlignType == (unsigned)AlignType &&
+ (I->TypeBitWidth == BitWidth || AlignType == INTEGER_ALIGN))
+ return ABIInfo ? I->ABIAlign : I->PrefAlign;
+
+ if (AlignType == INTEGER_ALIGN) {
+ // If we didn't have a larger value try the largest value we have.
+ if (I != Alignments.begin()) {
+ --I; // Go to the previous entry and see if its an integer.
+ if (I->AlignType == INTEGER_ALIGN)
+ return ABIInfo ? I->ABIAlign : I->PrefAlign;
}
- }
-
- // Okay, we didn't find an exact solution. Fall back here depending on what
- // is being looked for.
- if (BestMatchIdx == -1) {
- // If we didn't find an integer alignment, fall back on most conservative.
- if (AlignType == INTEGER_ALIGN) {
- BestMatchIdx = LargestInt;
- } else if (AlignType == VECTOR_ALIGN) {
- // By default, use natural alignment for vector types. This is consistent
- // with what clang and llvm-gcc do.
- unsigned Align = getTypeAllocSize(cast<VectorType>(Ty)->getElementType());
- Align *= cast<VectorType>(Ty)->getNumElements();
- Align = PowerOf2Ceil(Align);
- return Align;
- }
- }
+ } else if (AlignType == VECTOR_ALIGN) {
+ // By default, use natural alignment for vector types. This is consistent
+ // with what clang and llvm-gcc do.
+ unsigned Align = getTypeAllocSize(cast<VectorType>(Ty)->getElementType());
+ Align *= cast<VectorType>(Ty)->getNumElements();
+ Align = PowerOf2Ceil(Align);
+ return Align;
+ }
// If we still couldn't find a reasonable default alignment, fall back
// to a simple heuristic that the alignment is the first power of two
@@ -517,21 +519,15 @@ unsigned DataLayout::getAlignmentInfo(AlignTypeEnum AlignType,
// approximation of reality, and if the user wanted something less
// less conservative, they should have specified it explicitly in the data
// layout.
- if (BestMatchIdx == -1) {
- unsigned Align = getTypeStoreSize(Ty);
- Align = PowerOf2Ceil(Align);
- return Align;
- }
-
- // Since we got a "best match" index, just return it.
- return ABIInfo ? Alignments[BestMatchIdx].ABIAlign
- : Alignments[BestMatchIdx].PrefAlign;
+ unsigned Align = getTypeStoreSize(Ty);
+ Align = PowerOf2Ceil(Align);
+ return Align;
}
namespace {
class StructLayoutMap {
- typedef DenseMap<StructType*, StructLayout*> LayoutInfoTy;
+ using LayoutInfoTy = DenseMap<StructType*, StructLayout*>;
LayoutInfoTy LayoutInfo;
public:
@@ -586,7 +582,6 @@ const StructLayout *DataLayout::getStructLayout(StructType *Ty) const {
return L;
}
-
unsigned DataLayout::getPointerABIAlignment(unsigned AS) const {
PointersTy::const_iterator I = findPointerLowerBound(AS);
if (I == Pointers.end() || I->AddressSpace != AS) {
@@ -617,11 +612,8 @@ unsigned DataLayout::getPointerSize(unsigned AS) const {
unsigned DataLayout::getPointerTypeSizeInBits(Type *Ty) const {
assert(Ty->isPtrOrPtrVectorTy() &&
"This should only be called with a pointer or pointer vector type");
-
- if (Ty->isPointerTy())
- return getTypeSizeInBits(Ty);
-
- return getTypeSizeInBits(Ty->getScalarType());
+ Ty = Ty->getScalarType();
+ return getPointerSizeInBits(cast<PointerType>(Ty)->getAddressSpace());
}
/*!
@@ -633,7 +625,7 @@ unsigned DataLayout::getPointerTypeSizeInBits(Type *Ty) const {
== false) for the requested type \a Ty.
*/
unsigned DataLayout::getAlignment(Type *Ty, bool abi_or_pref) const {
- int AlignType = -1;
+ AlignTypeEnum AlignType;
assert(Ty->isSized() && "Cannot getTypeInfo() on a type that is unsized!");
switch (Ty->getTypeID()) {
@@ -682,8 +674,7 @@ unsigned DataLayout::getAlignment(Type *Ty, bool abi_or_pref) const {
llvm_unreachable("Bad type for getAlignment!!!");
}
- return getAlignmentInfo((AlignTypeEnum)AlignType, getTypeSizeInBits(Ty),
- abi_or_pref, Ty);
+ return getAlignmentInfo(AlignType, getTypeSizeInBits(Ty), abi_or_pref, Ty);
}
unsigned DataLayout::getABITypeAlignment(Type *Ty) const {
@@ -791,4 +782,3 @@ unsigned DataLayout::getPreferredAlignment(const GlobalVariable *GV) const {
unsigned DataLayout::getPreferredAlignmentLog(const GlobalVariable *GV) const {
return Log2_32(getPreferredAlignment(GV));
}
-
diff --git a/contrib/llvm/lib/IR/DebugInfo.cpp b/contrib/llvm/lib/IR/DebugInfo.cpp
index 6b9bc68..56cec57 100644
--- a/contrib/llvm/lib/IR/DebugInfo.cpp
+++ b/contrib/llvm/lib/IR/DebugInfo.cpp
@@ -1,4 +1,4 @@
-//===--- DebugInfo.cpp - Debug Information Helper Classes -----------------===//
+//===- DebugInfo.cpp - Debug Information Helper Classes -------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -13,21 +13,28 @@
//===----------------------------------------------------------------------===//
#include "llvm/IR/DebugInfo.h"
-#include "LLVMContextImpl.h"
-#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/None.h"
#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Constants.h"
-#include "llvm/IR/DIBuilder.h"
-#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/DebugInfoMetadata.h"
+#include "llvm/IR/DebugLoc.h"
+#include "llvm/IR/Function.h"
#include "llvm/IR/GVMaterializer.h"
-#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Instruction.h"
#include "llvm/IR/IntrinsicInst.h"
-#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
-#include "llvm/IR/ValueHandle.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/Dwarf.h"
-#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/Casting.h"
+#include <algorithm>
+#include <cassert>
+#include <utility>
+
using namespace llvm;
using namespace llvm::dwarf;
@@ -79,9 +86,19 @@ void DebugInfoFinder::processModule(const Module &M) {
processScope(M->getScope());
}
}
- for (auto &F : M.functions())
+ for (auto &F : M.functions()) {
if (auto *SP = cast_or_null<DISubprogram>(F.getSubprogram()))
processSubprogram(SP);
+ // There could be subprograms from inlined functions referenced from
+ // instructions only. Walk the function to find them.
+ for (const BasicBlock &BB : F) {
+ for (const Instruction &I : BB) {
+ if (!I.getDebugLoc())
+ continue;
+ processLocation(M, I.getDebugLoc().get());
+ }
+ }
+ }
}
void DebugInfoFinder::processLocation(const Module &M, const DILocation *Loc) {
@@ -239,6 +256,38 @@ bool DebugInfoFinder::addScope(DIScope *Scope) {
return true;
}
+static MDNode *stripDebugLocFromLoopID(MDNode *N) {
+ assert(N->op_begin() != N->op_end() && "Missing self reference?");
+
+ // if there is no debug location, we do not have to rewrite this MDNode.
+ if (std::none_of(N->op_begin() + 1, N->op_end(), [](const MDOperand &Op) {
+ return isa<DILocation>(Op.get());
+ }))
+ return N;
+
+ // If there is only the debug location without any actual loop metadata, we
+ // can remove the metadata.
+ if (std::none_of(N->op_begin() + 1, N->op_end(), [](const MDOperand &Op) {
+ return !isa<DILocation>(Op.get());
+ }))
+ return nullptr;
+
+ SmallVector<Metadata *, 4> Args;
+ // Reserve operand 0 for loop id self reference.
+ auto TempNode = MDNode::getTemporary(N->getContext(), None);
+ Args.push_back(TempNode.get());
+ // Add all non-debug location operands back.
+ for (auto Op = N->op_begin() + 1; Op != N->op_end(); Op++) {
+ if (!isa<DILocation>(*Op))
+ Args.push_back(*Op);
+ }
+
+ // Set the first operand to itself.
+ MDNode *LoopID = MDNode::get(N->getContext(), Args);
+ LoopID->replaceOperandWith(0, LoopID);
+ return LoopID;
+}
+
bool llvm::stripDebugInfo(Function &F) {
bool Changed = false;
if (F.getSubprogram()) {
@@ -246,6 +295,7 @@ bool llvm::stripDebugInfo(Function &F) {
F.setSubprogram(nullptr);
}
+ DenseMap<MDNode*, MDNode*> LoopIDsMap;
for (BasicBlock &BB : F) {
for (auto II = BB.begin(), End = BB.end(); II != End;) {
Instruction &I = *II++; // We may delete the instruction, increment now.
@@ -259,6 +309,15 @@ bool llvm::stripDebugInfo(Function &F) {
I.setDebugLoc(DebugLoc());
}
}
+
+ auto *TermInst = BB.getTerminator();
+ if (auto *LoopID = TermInst->getMetadata(LLVMContext::MD_loop)) {
+ auto *NewLoopID = LoopIDsMap.lookup(LoopID);
+ if (!NewLoopID)
+ NewLoopID = LoopIDsMap[LoopID] = stripDebugLocFromLoopID(LoopID);
+ if (NewLoopID != LoopID)
+ TermInst->setMetadata(LLVMContext::MD_loop, NewLoopID);
+ }
}
return Changed;
}
@@ -410,7 +469,8 @@ private:
CU->isOptimized(), CU->getFlags(), CU->getRuntimeVersion(),
CU->getSplitDebugFilename(), DICompileUnit::LineTablesOnly, EnumTypes,
RetainedTypes, GlobalVariables, ImportedEntities, CU->getMacros(),
- CU->getDWOId(), CU->getSplitDebugInlining());
+ CU->getDWOId(), CU->getSplitDebugInlining(),
+ CU->getDebugInfoForProfiling());
}
DILocation *getReplacementMDLocation(DILocation *MLD) {
@@ -472,7 +532,7 @@ private:
void traverse(MDNode *);
};
-} // Anonymous namespace.
+} // end anonymous namespace
void DebugTypeInfoRemoval::traverse(MDNode *N) {
if (!N || Replacements.count(N))
@@ -537,7 +597,7 @@ bool llvm::stripNonLineTableDebugInfo(Module &M) {
GV.eraseMetadata(LLVMContext::MD_dbg);
DebugTypeInfoRemoval Mapper(M.getContext());
- auto remap = [&](llvm::MDNode *Node) -> llvm::MDNode * {
+ auto remap = [&](MDNode *Node) -> MDNode * {
if (!Node)
return nullptr;
Mapper.traverseAndRemap(Node);
@@ -558,17 +618,26 @@ bool llvm::stripNonLineTableDebugInfo(Module &M) {
}
for (auto &BB : F) {
for (auto &I : BB) {
- if (I.getDebugLoc() == DebugLoc())
- continue;
-
- // Make a replacement.
- auto &DL = I.getDebugLoc();
- auto *Scope = DL.getScope();
- MDNode *InlinedAt = DL.getInlinedAt();
- Scope = remap(Scope);
- InlinedAt = remap(InlinedAt);
- I.setDebugLoc(
- DebugLoc::get(DL.getLine(), DL.getCol(), Scope, InlinedAt));
+ auto remapDebugLoc = [&](DebugLoc DL) -> DebugLoc {
+ auto *Scope = DL.getScope();
+ MDNode *InlinedAt = DL.getInlinedAt();
+ Scope = remap(Scope);
+ InlinedAt = remap(InlinedAt);
+ return DebugLoc::get(DL.getLine(), DL.getCol(), Scope, InlinedAt);
+ };
+
+ if (I.getDebugLoc() != DebugLoc())
+ I.setDebugLoc(remapDebugLoc(I.getDebugLoc()));
+
+ // Remap DILocations in untyped MDNodes (e.g., llvm.loop).
+ SmallVector<std::pair<unsigned, MDNode *>, 2> MDs;
+ I.getAllMetadata(MDs);
+ for (auto Attachment : MDs)
+ if (auto *T = dyn_cast_or_null<MDTuple>(Attachment.second))
+ for (unsigned N = 0; N < T->getNumOperands(); ++N)
+ if (auto *Loc = dyn_cast_or_null<DILocation>(T->getOperand(N)))
+ if (Loc != DebugLoc())
+ T->replaceOperandWith(N, remapDebugLoc(Loc));
}
}
}
diff --git a/contrib/llvm/lib/IR/DebugInfoMetadata.cpp b/contrib/llvm/lib/IR/DebugInfoMetadata.cpp
index 8e21a90..c14940b 100644
--- a/contrib/llvm/lib/IR/DebugInfoMetadata.cpp
+++ b/contrib/llvm/lib/IR/DebugInfoMetadata.cpp
@@ -15,6 +15,7 @@
#include "LLVMContextImpl.h"
#include "MetadataImpl.h"
#include "llvm/ADT/StringSwitch.h"
+#include "llvm/IR/DIBuilder.h"
#include "llvm/IR/Function.h"
using namespace llvm;
@@ -214,6 +215,10 @@ void GenericDINode::recalculateHash() {
#define DEFINE_GETIMPL_STORE_NO_CONSTRUCTOR_ARGS(CLASS, OPS) \
return storeImpl(new (array_lengthof(OPS)) CLASS(Context, Storage, OPS), \
Storage, Context.pImpl->CLASS##s)
+#define DEFINE_GETIMPL_STORE_N(CLASS, ARGS, OPS, NUM_OPS) \
+ return storeImpl(new (NUM_OPS) \
+ CLASS(Context, Storage, UNWRAP_ARGS(ARGS), OPS), \
+ Storage, Context.pImpl->CLASS##s)
DISubrange *DISubrange::getImpl(LLVMContext &Context, int64_t Count, int64_t Lo,
StorageType Storage, bool ShouldCreate) {
@@ -245,16 +250,18 @@ DIBasicType *DIBasicType::getImpl(LLVMContext &Context, unsigned Tag,
DIDerivedType *DIDerivedType::getImpl(
LLVMContext &Context, unsigned Tag, MDString *Name, Metadata *File,
unsigned Line, Metadata *Scope, Metadata *BaseType, uint64_t SizeInBits,
- uint32_t AlignInBits, uint64_t OffsetInBits, DIFlags Flags,
- Metadata *ExtraData, StorageType Storage, bool ShouldCreate) {
+ uint32_t AlignInBits, uint64_t OffsetInBits,
+ Optional<unsigned> DWARFAddressSpace, DIFlags Flags, Metadata *ExtraData,
+ StorageType Storage, bool ShouldCreate) {
assert(isCanonical(Name) && "Expected canonical MDString");
DEFINE_GETIMPL_LOOKUP(DIDerivedType,
(Tag, Name, File, Line, Scope, BaseType, SizeInBits,
- AlignInBits, OffsetInBits, Flags, ExtraData));
+ AlignInBits, OffsetInBits, DWARFAddressSpace, Flags,
+ ExtraData));
Metadata *Ops[] = {File, Scope, Name, BaseType, ExtraData};
DEFINE_GETIMPL_STORE(
- DIDerivedType, (Tag, Line, SizeInBits, AlignInBits, OffsetInBits, Flags),
- Ops);
+ DIDerivedType, (Tag, Line, SizeInBits, AlignInBits, OffsetInBits,
+ DWARFAddressSpace, Flags), Ops);
}
DICompositeType *DICompositeType::getImpl(
@@ -383,8 +390,8 @@ DICompileUnit *DICompileUnit::getImpl(
unsigned RuntimeVersion, MDString *SplitDebugFilename,
unsigned EmissionKind, Metadata *EnumTypes, Metadata *RetainedTypes,
Metadata *GlobalVariables, Metadata *ImportedEntities, Metadata *Macros,
- uint64_t DWOId, bool SplitDebugInlining, StorageType Storage,
- bool ShouldCreate) {
+ uint64_t DWOId, bool SplitDebugInlining, bool DebugInfoForProfiling,
+ StorageType Storage, bool ShouldCreate) {
assert(Storage != Uniqued && "Cannot unique DICompileUnit");
assert(isCanonical(Producer) && "Expected canonical MDString");
assert(isCanonical(Flags) && "Expected canonical MDString");
@@ -397,7 +404,8 @@ DICompileUnit *DICompileUnit::getImpl(
return storeImpl(new (array_lengthof(Ops))
DICompileUnit(Context, Storage, SourceLanguage,
IsOptimized, RuntimeVersion, EmissionKind,
- DWOId, SplitDebugInlining, Ops),
+ DWOId, SplitDebugInlining,
+ DebugInfoForProfiling, Ops),
Storage);
}
@@ -438,21 +446,30 @@ DISubprogram *DISubprogram::getImpl(
Metadata *ContainingType, unsigned Virtuality, unsigned VirtualIndex,
int ThisAdjustment, DIFlags Flags, bool IsOptimized, Metadata *Unit,
Metadata *TemplateParams, Metadata *Declaration, Metadata *Variables,
- StorageType Storage, bool ShouldCreate) {
+ Metadata *ThrownTypes, StorageType Storage, bool ShouldCreate) {
assert(isCanonical(Name) && "Expected canonical MDString");
assert(isCanonical(LinkageName) && "Expected canonical MDString");
DEFINE_GETIMPL_LOOKUP(
- DISubprogram,
- (Scope, Name, LinkageName, File, Line, Type, IsLocalToUnit, IsDefinition,
- ScopeLine, ContainingType, Virtuality, VirtualIndex, ThisAdjustment,
- Flags, IsOptimized, Unit, TemplateParams, Declaration, Variables));
- Metadata *Ops[] = {File, Scope, Name, Name,
- LinkageName, Type, ContainingType, Unit,
- TemplateParams, Declaration, Variables};
- DEFINE_GETIMPL_STORE(DISubprogram, (Line, ScopeLine, Virtuality, VirtualIndex,
- ThisAdjustment, Flags, IsLocalToUnit,
- IsDefinition, IsOptimized),
- Ops);
+ DISubprogram, (Scope, Name, LinkageName, File, Line, Type, IsLocalToUnit,
+ IsDefinition, ScopeLine, ContainingType, Virtuality,
+ VirtualIndex, ThisAdjustment, Flags, IsOptimized, Unit,
+ TemplateParams, Declaration, Variables, ThrownTypes));
+ SmallVector<Metadata *, 11> Ops = {
+ File, Scope, Name, LinkageName, Type, Unit,
+ Declaration, Variables, ContainingType, TemplateParams, ThrownTypes};
+ if (!ThrownTypes) {
+ Ops.pop_back();
+ if (!TemplateParams) {
+ Ops.pop_back();
+ if (!ContainingType)
+ Ops.pop_back();
+ }
+ }
+ DEFINE_GETIMPL_STORE_N(DISubprogram,
+ (Line, ScopeLine, Virtuality, VirtualIndex,
+ ThisAdjustment, Flags, IsLocalToUnit, IsDefinition,
+ IsOptimized),
+ Ops, Ops.size());
}
bool DISubprogram::describes(const Function *F) const {
@@ -490,13 +507,13 @@ DILexicalBlockFile *DILexicalBlockFile::getImpl(LLVMContext &Context,
}
DINamespace *DINamespace::getImpl(LLVMContext &Context, Metadata *Scope,
- Metadata *File, MDString *Name, unsigned Line,
- bool ExportSymbols, StorageType Storage,
- bool ShouldCreate) {
+ MDString *Name, bool ExportSymbols,
+ StorageType Storage, bool ShouldCreate) {
assert(isCanonical(Name) && "Expected canonical MDString");
- DEFINE_GETIMPL_LOOKUP(DINamespace, (Scope, File, Name, Line, ExportSymbols));
- Metadata *Ops[] = {File, Scope, Name};
- DEFINE_GETIMPL_STORE(DINamespace, (Line, ExportSymbols), Ops);
+ DEFINE_GETIMPL_LOOKUP(DINamespace, (Scope, Name, ExportSymbols));
+ // The nullptr is for DIScope's File operand. This should be refactored.
+ Metadata *Ops[] = {nullptr, Scope, Name};
+ DEFINE_GETIMPL_STORE(DINamespace, (ExportSymbols), Ops);
}
DIModule *DIModule::getImpl(LLVMContext &Context, Metadata *Scope,
@@ -581,8 +598,7 @@ unsigned DIExpression::ExprOperand::getSize() const {
case dwarf::DW_OP_LLVM_fragment:
return 3;
case dwarf::DW_OP_constu:
- case dwarf::DW_OP_plus:
- case dwarf::DW_OP_minus:
+ case dwarf::DW_OP_plus_uconst:
return 2;
default:
return 1;
@@ -611,10 +627,24 @@ bool DIExpression::isValid() const {
return false;
break;
}
+ case dwarf::DW_OP_swap: {
+ // Must be more than one implicit element on the stack.
+
+ // FIXME: A better way to implement this would be to add a local variable
+ // that keeps track of the stack depth and introduce something like a
+ // DW_LLVM_OP_implicit_location as a placeholder for the location this
+ // DIExpression is attached to, or else pass the number of implicit stack
+ // elements into isValid.
+ if (getNumElements() == 1)
+ return false;
+ break;
+ }
case dwarf::DW_OP_constu:
+ case dwarf::DW_OP_plus_uconst:
case dwarf::DW_OP_plus:
case dwarf::DW_OP_minus:
case dwarf::DW_OP_deref:
+ case dwarf::DW_OP_xderef:
break;
}
}
@@ -631,6 +661,69 @@ DIExpression::getFragmentInfo(expr_op_iterator Start, expr_op_iterator End) {
return None;
}
+void DIExpression::appendOffset(SmallVectorImpl<uint64_t> &Ops,
+ int64_t Offset) {
+ if (Offset > 0) {
+ Ops.push_back(dwarf::DW_OP_plus_uconst);
+ Ops.push_back(Offset);
+ } else if (Offset < 0) {
+ Ops.push_back(dwarf::DW_OP_constu);
+ Ops.push_back(-Offset);
+ Ops.push_back(dwarf::DW_OP_minus);
+ }
+}
+
+bool DIExpression::extractIfOffset(int64_t &Offset) const {
+ if (getNumElements() == 0) {
+ Offset = 0;
+ return true;
+ }
+
+ if (getNumElements() == 2 && Elements[0] == dwarf::DW_OP_plus_uconst) {
+ Offset = Elements[1];
+ return true;
+ }
+
+ if (getNumElements() == 3 && Elements[0] == dwarf::DW_OP_constu) {
+ if (Elements[2] == dwarf::DW_OP_plus) {
+ Offset = Elements[1];
+ return true;
+ }
+ if (Elements[2] == dwarf::DW_OP_minus) {
+ Offset = -Elements[1];
+ return true;
+ }
+ }
+
+ return false;
+}
+
+DIExpression *DIExpression::prepend(const DIExpression *Expr, bool Deref,
+ int64_t Offset, bool StackValue) {
+ SmallVector<uint64_t, 8> Ops;
+ appendOffset(Ops, Offset);
+ if (Deref)
+ Ops.push_back(dwarf::DW_OP_deref);
+ if (Expr)
+ for (auto Op : Expr->expr_ops()) {
+ // A DW_OP_stack_value comes at the end, but before a DW_OP_LLVM_fragment.
+ if (StackValue) {
+ if (Op.getOp() == dwarf::DW_OP_stack_value)
+ StackValue = false;
+ else if (Op.getOp() == dwarf::DW_OP_LLVM_fragment) {
+ Ops.push_back(dwarf::DW_OP_stack_value);
+ StackValue = false;
+ }
+ }
+ Ops.push_back(Op.getOp());
+ for (unsigned I = 0; I < Op.getNumArgs(); ++I)
+ Ops.push_back(Op.getArg(I));
+ }
+ if (StackValue)
+ Ops.push_back(dwarf::DW_OP_stack_value);
+ return DIExpression::get(Expr->getContext(), Ops);
+}
+
bool DIExpression::isConstant() const {
// Recognize DW_OP_constu C DW_OP_stack_value (DW_OP_LLVM_fragment Len Ofs)?.
if (getNumElements() != 3 && getNumElements() != 6)
@@ -667,12 +760,13 @@ DIObjCProperty *DIObjCProperty::getImpl(
DIImportedEntity *DIImportedEntity::getImpl(LLVMContext &Context, unsigned Tag,
Metadata *Scope, Metadata *Entity,
- unsigned Line, MDString *Name,
- StorageType Storage,
+ Metadata *File, unsigned Line,
+ MDString *Name, StorageType Storage,
bool ShouldCreate) {
assert(isCanonical(Name) && "Expected canonical MDString");
- DEFINE_GETIMPL_LOOKUP(DIImportedEntity, (Tag, Scope, Entity, Line, Name));
- Metadata *Ops[] = {Scope, Entity, Name};
+ DEFINE_GETIMPL_LOOKUP(DIImportedEntity,
+ (Tag, Scope, Entity, File, Line, Name));
+ Metadata *Ops[] = {Scope, Entity, Name, File};
DEFINE_GETIMPL_STORE(DIImportedEntity, (Tag, Line), Ops);
}
diff --git a/contrib/llvm/lib/IR/DebugLoc.cpp b/contrib/llvm/lib/IR/DebugLoc.cpp
index ffa7a6b..6297395 100644
--- a/contrib/llvm/lib/IR/DebugLoc.cpp
+++ b/contrib/llvm/lib/IR/DebugLoc.cpp
@@ -10,6 +10,7 @@
#include "llvm/IR/DebugLoc.h"
#include "LLVMContextImpl.h"
#include "llvm/IR/DebugInfo.h"
+#include "llvm/IR/IntrinsicInst.h"
using namespace llvm;
//===----------------------------------------------------------------------===//
@@ -66,8 +67,40 @@ DebugLoc DebugLoc::get(unsigned Line, unsigned Col, const MDNode *Scope,
const_cast<MDNode *>(InlinedAt));
}
+DebugLoc DebugLoc::appendInlinedAt(DebugLoc DL, DILocation *InlinedAt,
+ LLVMContext &Ctx,
+ DenseMap<const MDNode *, MDNode *> &Cache,
+ bool ReplaceLast) {
+ SmallVector<DILocation *, 3> InlinedAtLocations;
+ DILocation *Last = InlinedAt;
+ DILocation *CurInlinedAt = DL;
+
+ // Gather all the inlined-at nodes.
+ while (DILocation *IA = CurInlinedAt->getInlinedAt()) {
+ // Skip any we've already built nodes for.
+ if (auto *Found = Cache[IA]) {
+ Last = cast<DILocation>(Found);
+ break;
+ }
+
+ if (ReplaceLast && !IA->getInlinedAt())
+ break;
+ InlinedAtLocations.push_back(IA);
+ CurInlinedAt = IA;
+ }
+
+ // Starting from the top, rebuild the nodes to point to the new inlined-at
+ // location (then rebuilding the rest of the chain behind it) and update the
+ // map of already-constructed inlined-at nodes.
+ for (const DILocation *MD : reverse(InlinedAtLocations))
+ Cache[MD] = Last = DILocation::getDistinct(
+ Ctx, MD->getLine(), MD->getColumn(), MD->getScope(), Last);
+
+ return Last;
+}
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
LLVM_DUMP_METHOD void DebugLoc::dump() const {
-#ifndef NDEBUG
if (!Loc)
return;
@@ -79,8 +112,8 @@ LLVM_DUMP_METHOD void DebugLoc::dump() const {
InlinedAtDL.dump();
} else
dbgs() << "\n";
-#endif
}
+#endif
void DebugLoc::print(raw_ostream &OS) const {
if (!Loc)
diff --git a/contrib/llvm/lib/IR/DiagnosticInfo.cpp b/contrib/llvm/lib/IR/DiagnosticInfo.cpp
index ea71fde..5129d6b 100644
--- a/contrib/llvm/lib/IR/DiagnosticInfo.cpp
+++ b/contrib/llvm/lib/IR/DiagnosticInfo.cpp
@@ -13,19 +13,30 @@
//===----------------------------------------------------------------------===//
#include "llvm/IR/DiagnosticInfo.h"
-#include "LLVMContextImpl.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/Twine.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Constants.h"
-#include "llvm/IR/DebugInfo.h"
+#include "llvm/IR/DebugInfoMetadata.h"
+#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/DiagnosticPrinter.h"
#include "llvm/IR/Function.h"
+#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/Instruction.h"
+#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Regex.h"
+#include "llvm/Support/raw_ostream.h"
#include <atomic>
+#include <cassert>
+#include <memory>
#include <string>
using namespace llvm;
@@ -53,6 +64,8 @@ struct PassRemarksOpt {
}
};
+} // end anonymous namespace
+
static PassRemarksOpt PassRemarksOptLoc;
static PassRemarksOpt PassRemarksMissedOptLoc;
static PassRemarksOpt PassRemarksAnalysisOptLoc;
@@ -85,7 +98,6 @@ PassRemarksAnalysis(
"the given regular expression"),
cl::Hidden, cl::location(PassRemarksAnalysisOptLoc), cl::ValueRequired,
cl::ZeroOrMore);
-}
int llvm::getNextAvailablePluginDiagnosticKind() {
static std::atomic<int> PluginKindID(DK_FirstPluginKind);
@@ -97,8 +109,7 @@ const char *OptimizationRemarkAnalysis::AlwaysPrint = "";
DiagnosticInfoInlineAsm::DiagnosticInfoInlineAsm(const Instruction &I,
const Twine &MsgStr,
DiagnosticSeverity Severity)
- : DiagnosticInfo(DK_InlineAsm, Severity), LocCookie(0), MsgStr(MsgStr),
- Instr(&I) {
+ : DiagnosticInfo(DK_InlineAsm, Severity), MsgStr(MsgStr), Instr(&I) {
if (const MDNode *SrcLoc = I.getMetadata("srcloc")) {
if (SrcLoc->getNumOperands() != 0)
if (const auto *CI =
@@ -148,21 +159,31 @@ void DiagnosticInfoPGOProfile::print(DiagnosticPrinter &DP) const {
DP << getMsg();
}
-bool DiagnosticInfoWithDebugLocBase::isLocationAvailable() const {
- return getDebugLoc();
+DiagnosticLocation::DiagnosticLocation(const DebugLoc &DL) {
+ if (!DL)
+ return;
+ Filename = DL->getFilename();
+ Line = DL->getLine();
+ Column = DL->getColumn();
+}
+
+DiagnosticLocation::DiagnosticLocation(const DISubprogram *SP) {
+ if (!SP)
+ return;
+ Filename = SP->getFilename();
+ Line = SP->getScopeLine();
+ Column = 0;
}
-void DiagnosticInfoWithDebugLocBase::getLocation(StringRef *Filename,
+void DiagnosticInfoWithLocationBase::getLocation(StringRef *Filename,
unsigned *Line,
unsigned *Column) const {
- DILocation *L = getDebugLoc();
- assert(L != nullptr && "debug location is invalid");
- *Filename = L->getFilename();
- *Line = L->getLine();
- *Column = L->getColumn();
+ *Filename = Loc.getFilename();
+ *Line = Loc.getLine();
+ *Column = Loc.getColumn();
}
-const std::string DiagnosticInfoWithDebugLocBase::getLocationStr() const {
+const std::string DiagnosticInfoWithLocationBase::getLocationStr() const {
StringRef Filename("<unknown>");
unsigned Line = 0;
unsigned Column = 0;
@@ -171,19 +192,19 @@ const std::string DiagnosticInfoWithDebugLocBase::getLocationStr() const {
return (Filename + ":" + Twine(Line) + ":" + Twine(Column)).str();
}
-DiagnosticInfoOptimizationBase::Argument::Argument(StringRef Key, Value *V)
+DiagnosticInfoOptimizationBase::Argument::Argument(StringRef Key, const Value *V)
: Key(Key) {
if (auto *F = dyn_cast<Function>(V)) {
if (DISubprogram *SP = F->getSubprogram())
- DLoc = DebugLoc::get(SP->getScopeLine(), 0, SP);
+ Loc = SP;
}
else if (auto *I = dyn_cast<Instruction>(V))
- DLoc = I->getDebugLoc();
+ Loc = I->getDebugLoc();
// Only include names that correspond to user variables. FIXME: we should use
// debug info if available to get the name of the user variable.
if (isa<llvm::Argument>(V) || isa<GlobalValue>(V))
- Val = GlobalValue::getRealLinkageName(V->getName());
+ Val = GlobalValue::dropLLVMManglingEscape(V->getName());
else if (isa<Constant>(V)) {
raw_string_ostream OS(Val);
V->printAsOperand(OS, /*PrintType=*/false);
@@ -191,7 +212,7 @@ DiagnosticInfoOptimizationBase::Argument::Argument(StringRef Key, Value *V)
Val = I->getOpcodeName();
}
-DiagnosticInfoOptimizationBase::Argument::Argument(StringRef Key, Type *T)
+DiagnosticInfoOptimizationBase::Argument::Argument(StringRef Key, const Type *T)
: Key(Key) {
raw_string_ostream OS(Val);
OS << *T;
@@ -211,73 +232,83 @@ void DiagnosticInfoOptimizationBase::print(DiagnosticPrinter &DP) const {
OptimizationRemark::OptimizationRemark(const char *PassName,
StringRef RemarkName,
- const DebugLoc &DLoc, Value *CodeRegion)
- : DiagnosticInfoOptimizationBase(
+ const DiagnosticLocation &Loc,
+ const Value *CodeRegion)
+ : DiagnosticInfoIROptimization(
DK_OptimizationRemark, DS_Remark, PassName, RemarkName,
- *cast<BasicBlock>(CodeRegion)->getParent(), DLoc, CodeRegion) {}
+ *cast<BasicBlock>(CodeRegion)->getParent(), Loc, CodeRegion) {}
OptimizationRemark::OptimizationRemark(const char *PassName,
- StringRef RemarkName, Instruction *Inst)
- : DiagnosticInfoOptimizationBase(DK_OptimizationRemark, DS_Remark, PassName,
- RemarkName,
- *Inst->getParent()->getParent(),
- Inst->getDebugLoc(), Inst->getParent()) {}
+ StringRef RemarkName,
+ const Instruction *Inst)
+ : DiagnosticInfoIROptimization(DK_OptimizationRemark, DS_Remark, PassName,
+ RemarkName, *Inst->getParent()->getParent(),
+ Inst->getDebugLoc(), Inst->getParent()) {}
-bool OptimizationRemark::isEnabled() const {
+// Helper to allow for an assert before attempting to return an invalid
+// reference.
+static const BasicBlock &getFirstFunctionBlock(const Function *Func) {
+ assert(!Func->empty() && "Function does not have a body");
+ return Func->front();
+}
+
+OptimizationRemark::OptimizationRemark(const char *PassName,
+ StringRef RemarkName,
+ const Function *Func)
+ : DiagnosticInfoIROptimization(DK_OptimizationRemark, DS_Remark, PassName,
+ RemarkName, *Func, Func->getSubprogram(),
+ &getFirstFunctionBlock(Func)) {}
+
+bool OptimizationRemark::isEnabled(StringRef PassName) {
return PassRemarksOptLoc.Pattern &&
- PassRemarksOptLoc.Pattern->match(getPassName());
+ PassRemarksOptLoc.Pattern->match(PassName);
}
-OptimizationRemarkMissed::OptimizationRemarkMissed(const char *PassName,
- StringRef RemarkName,
- const DebugLoc &DLoc,
- Value *CodeRegion)
- : DiagnosticInfoOptimizationBase(
+OptimizationRemarkMissed::OptimizationRemarkMissed(
+ const char *PassName, StringRef RemarkName, const DiagnosticLocation &Loc,
+ const Value *CodeRegion)
+ : DiagnosticInfoIROptimization(
DK_OptimizationRemarkMissed, DS_Remark, PassName, RemarkName,
- *cast<BasicBlock>(CodeRegion)->getParent(), DLoc, CodeRegion) {}
+ *cast<BasicBlock>(CodeRegion)->getParent(), Loc, CodeRegion) {}
OptimizationRemarkMissed::OptimizationRemarkMissed(const char *PassName,
StringRef RemarkName,
- Instruction *Inst)
- : DiagnosticInfoOptimizationBase(DK_OptimizationRemarkMissed, DS_Remark,
- PassName, RemarkName,
- *Inst->getParent()->getParent(),
- Inst->getDebugLoc(), Inst->getParent()) {}
+ const Instruction *Inst)
+ : DiagnosticInfoIROptimization(DK_OptimizationRemarkMissed, DS_Remark,
+ PassName, RemarkName,
+ *Inst->getParent()->getParent(),
+ Inst->getDebugLoc(), Inst->getParent()) {}
-bool OptimizationRemarkMissed::isEnabled() const {
+bool OptimizationRemarkMissed::isEnabled(StringRef PassName) {
return PassRemarksMissedOptLoc.Pattern &&
- PassRemarksMissedOptLoc.Pattern->match(getPassName());
+ PassRemarksMissedOptLoc.Pattern->match(PassName);
}
-OptimizationRemarkAnalysis::OptimizationRemarkAnalysis(const char *PassName,
- StringRef RemarkName,
- const DebugLoc &DLoc,
- Value *CodeRegion)
- : DiagnosticInfoOptimizationBase(
+OptimizationRemarkAnalysis::OptimizationRemarkAnalysis(
+ const char *PassName, StringRef RemarkName, const DiagnosticLocation &Loc,
+ const Value *CodeRegion)
+ : DiagnosticInfoIROptimization(
DK_OptimizationRemarkAnalysis, DS_Remark, PassName, RemarkName,
- *cast<BasicBlock>(CodeRegion)->getParent(), DLoc, CodeRegion) {}
+ *cast<BasicBlock>(CodeRegion)->getParent(), Loc, CodeRegion) {}
OptimizationRemarkAnalysis::OptimizationRemarkAnalysis(const char *PassName,
StringRef RemarkName,
- Instruction *Inst)
- : DiagnosticInfoOptimizationBase(DK_OptimizationRemarkAnalysis, DS_Remark,
- PassName, RemarkName,
- *Inst->getParent()->getParent(),
- Inst->getDebugLoc(), Inst->getParent()) {}
-
-OptimizationRemarkAnalysis::OptimizationRemarkAnalysis(enum DiagnosticKind Kind,
- const char *PassName,
- StringRef RemarkName,
- const DebugLoc &DLoc,
- Value *CodeRegion)
- : DiagnosticInfoOptimizationBase(Kind, DS_Remark, PassName, RemarkName,
- *cast<BasicBlock>(CodeRegion)->getParent(),
- DLoc, CodeRegion) {}
+ const Instruction *Inst)
+ : DiagnosticInfoIROptimization(DK_OptimizationRemarkAnalysis, DS_Remark,
+ PassName, RemarkName,
+ *Inst->getParent()->getParent(),
+ Inst->getDebugLoc(), Inst->getParent()) {}
+
+OptimizationRemarkAnalysis::OptimizationRemarkAnalysis(
+ enum DiagnosticKind Kind, const char *PassName, StringRef RemarkName,
+ const DiagnosticLocation &Loc, const Value *CodeRegion)
+ : DiagnosticInfoIROptimization(Kind, DS_Remark, PassName, RemarkName,
+ *cast<BasicBlock>(CodeRegion)->getParent(),
+ Loc, CodeRegion) {}
-bool OptimizationRemarkAnalysis::isEnabled() const {
- return shouldAlwaysPrint() ||
- (PassRemarksAnalysisOptLoc.Pattern &&
- PassRemarksAnalysisOptLoc.Pattern->match(getPassName()));
+bool OptimizationRemarkAnalysis::isEnabled(StringRef PassName) {
+ return PassRemarksAnalysisOptLoc.Pattern &&
+ PassRemarksAnalysisOptLoc.Pattern->match(PassName);
}
void DiagnosticInfoMIRParser::print(DiagnosticPrinter &DP) const {
@@ -285,42 +316,48 @@ void DiagnosticInfoMIRParser::print(DiagnosticPrinter &DP) const {
}
void llvm::emitOptimizationRemark(LLVMContext &Ctx, const char *PassName,
- const Function &Fn, const DebugLoc &DLoc,
+ const Function &Fn,
+ const DiagnosticLocation &Loc,
const Twine &Msg) {
- Ctx.diagnose(OptimizationRemark(PassName, Fn, DLoc, Msg));
+ Ctx.diagnose(OptimizationRemark(PassName, Fn, Loc, Msg));
}
void llvm::emitOptimizationRemarkMissed(LLVMContext &Ctx, const char *PassName,
const Function &Fn,
- const DebugLoc &DLoc,
+ const DiagnosticLocation &Loc,
const Twine &Msg) {
- Ctx.diagnose(OptimizationRemarkMissed(PassName, Fn, DLoc, Msg));
+ Ctx.diagnose(OptimizationRemarkMissed(PassName, Fn, Loc, Msg));
}
void llvm::emitOptimizationRemarkAnalysis(LLVMContext &Ctx,
const char *PassName,
const Function &Fn,
- const DebugLoc &DLoc,
+ const DiagnosticLocation &Loc,
const Twine &Msg) {
- Ctx.diagnose(OptimizationRemarkAnalysis(PassName, Fn, DLoc, Msg));
+ Ctx.diagnose(OptimizationRemarkAnalysis(PassName, Fn, Loc, Msg));
}
-void llvm::emitOptimizationRemarkAnalysisFPCommute(LLVMContext &Ctx,
- const char *PassName,
- const Function &Fn,
- const DebugLoc &DLoc,
- const Twine &Msg) {
- Ctx.diagnose(OptimizationRemarkAnalysisFPCommute(PassName, Fn, DLoc, Msg));
+void llvm::emitOptimizationRemarkAnalysisFPCommute(
+ LLVMContext &Ctx, const char *PassName, const Function &Fn,
+ const DiagnosticLocation &Loc, const Twine &Msg) {
+ Ctx.diagnose(OptimizationRemarkAnalysisFPCommute(PassName, Fn, Loc, Msg));
}
void llvm::emitOptimizationRemarkAnalysisAliasing(LLVMContext &Ctx,
const char *PassName,
const Function &Fn,
- const DebugLoc &DLoc,
+ const DiagnosticLocation &Loc,
const Twine &Msg) {
- Ctx.diagnose(OptimizationRemarkAnalysisAliasing(PassName, Fn, DLoc, Msg));
+ Ctx.diagnose(OptimizationRemarkAnalysisAliasing(PassName, Fn, Loc, Msg));
}
+DiagnosticInfoOptimizationFailure::DiagnosticInfoOptimizationFailure(
+ const char *PassName, StringRef RemarkName, const DiagnosticLocation &Loc,
+ const Value *CodeRegion)
+ : DiagnosticInfoIROptimization(
+ DK_OptimizationFailure, DS_Warning, PassName, RemarkName,
+ *cast<BasicBlock>(CodeRegion)->getParent(), Loc, CodeRegion) {}
+
bool DiagnosticInfoOptimizationFailure::isEnabled() const {
// Only print warnings.
return getSeverity() == DS_Warning;
@@ -336,18 +373,6 @@ void DiagnosticInfoUnsupported::print(DiagnosticPrinter &DP) const {
DP << Str;
}
-void llvm::emitLoopVectorizeWarning(LLVMContext &Ctx, const Function &Fn,
- const DebugLoc &DLoc, const Twine &Msg) {
- Ctx.diagnose(DiagnosticInfoOptimizationFailure(
- Fn, DLoc, Twine("loop not vectorized: " + Msg)));
-}
-
-void llvm::emitLoopInterleaveWarning(LLVMContext &Ctx, const Function &Fn,
- const DebugLoc &DLoc, const Twine &Msg) {
- Ctx.diagnose(DiagnosticInfoOptimizationFailure(
- Fn, DLoc, Twine("loop not interleaved: " + Msg)));
-}
-
void DiagnosticInfoISelFallback::print(DiagnosticPrinter &DP) const {
DP << "Instruction selection used fallback path for " << getFunction();
}
diff --git a/contrib/llvm/lib/IR/DiagnosticPrinter.cpp b/contrib/llvm/lib/IR/DiagnosticPrinter.cpp
index 659ff49..ee2df9e 100644
--- a/contrib/llvm/lib/IR/DiagnosticPrinter.cpp
+++ b/contrib/llvm/lib/IR/DiagnosticPrinter.cpp
@@ -11,12 +11,12 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/ADT/Twine.h"
#include "llvm/IR/DiagnosticPrinter.h"
+#include "llvm/ADT/Twine.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Value.h"
-#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/SourceMgr.h"
+#include "llvm/Support/raw_ostream.h"
using namespace llvm;
diff --git a/contrib/llvm/lib/IR/Dominators.cpp b/contrib/llvm/lib/IR/Dominators.cpp
index 1880807..4d7e304 100644
--- a/contrib/llvm/lib/IR/Dominators.cpp
+++ b/contrib/llvm/lib/IR/Dominators.cpp
@@ -29,9 +29,9 @@ using namespace llvm;
// Always verify dominfo if expensive checking is enabled.
#ifdef EXPENSIVE_CHECKS
-static bool VerifyDomInfo = true;
+bool llvm::VerifyDomInfo = true;
#else
-static bool VerifyDomInfo = false;
+bool llvm::VerifyDomInfo = false;
#endif
static cl::opt<bool,true>
VerifyDomInfoX("verify-dom-info", cl::location(VerifyDomInfo),
@@ -61,17 +61,39 @@ bool BasicBlockEdge::isSingleEdge() const {
//===----------------------------------------------------------------------===//
template class llvm::DomTreeNodeBase<BasicBlock>;
-template class llvm::DominatorTreeBase<BasicBlock>;
-
-template void llvm::Calculate<Function, BasicBlock *>(
- DominatorTreeBase<
- typename std::remove_pointer<GraphTraits<BasicBlock *>::NodeRef>::type>
- &DT,
- Function &F);
-template void llvm::Calculate<Function, Inverse<BasicBlock *>>(
- DominatorTreeBase<typename std::remove_pointer<
- GraphTraits<Inverse<BasicBlock *>>::NodeRef>::type> &DT,
- Function &F);
+template class llvm::DominatorTreeBase<BasicBlock, false>; // DomTreeBase
+template class llvm::DominatorTreeBase<BasicBlock, true>; // PostDomTreeBase
+
+template void
+llvm::DomTreeBuilder::Calculate<DomTreeBuilder::BBDomTree, Function>(
+ DomTreeBuilder::BBDomTree &DT, Function &F);
+template void
+llvm::DomTreeBuilder::Calculate<DomTreeBuilder::BBPostDomTree, Function>(
+ DomTreeBuilder::BBPostDomTree &DT, Function &F);
+
+template void llvm::DomTreeBuilder::InsertEdge<DomTreeBuilder::BBDomTree>(
+ DomTreeBuilder::BBDomTree &DT, BasicBlock *From, BasicBlock *To);
+template void llvm::DomTreeBuilder::InsertEdge<DomTreeBuilder::BBPostDomTree>(
+ DomTreeBuilder::BBPostDomTree &DT, BasicBlock *From, BasicBlock *To);
+
+template void llvm::DomTreeBuilder::DeleteEdge<DomTreeBuilder::BBDomTree>(
+ DomTreeBuilder::BBDomTree &DT, BasicBlock *From, BasicBlock *To);
+template void llvm::DomTreeBuilder::DeleteEdge<DomTreeBuilder::BBPostDomTree>(
+ DomTreeBuilder::BBPostDomTree &DT, BasicBlock *From, BasicBlock *To);
+
+template bool llvm::DomTreeBuilder::Verify<DomTreeBuilder::BBDomTree>(
+ const DomTreeBuilder::BBDomTree &DT);
+template bool llvm::DomTreeBuilder::Verify<DomTreeBuilder::BBPostDomTree>(
+ const DomTreeBuilder::BBPostDomTree &DT);
+
+bool DominatorTree::invalidate(Function &F, const PreservedAnalyses &PA,
+ FunctionAnalysisManager::Invalidator &) {
+ // Check whether the analysis, all analyses on functions, or the function's
+ // CFG have been preserved.
+ auto PAC = PA.getChecker<DominatorTreeAnalysis>();
+ return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>() ||
+ PAC.preservedSet<CFGAnalyses>());
+}
// dominates - Return true if Def dominates a use in User. This performs
// the special checks necessary if Def and User are in the same basic block.
@@ -141,12 +163,6 @@ bool DominatorTree::dominates(const Instruction *Def,
bool DominatorTree::dominates(const BasicBlockEdge &BBE,
const BasicBlock *UseBB) const {
- // Assert that we have a single edge. We could handle them by simply
- // returning false, but since isSingleEdge is linear on the number of
- // edges, the callers can normally handle them more efficiently.
- assert(BBE.isSingleEdge() &&
- "This function is not efficient in handling multiple edges");
-
// If the BB the edge ends in doesn't dominate the use BB, then the
// edge also doesn't.
const BasicBlock *Start = BBE.getStart();
@@ -179,11 +195,17 @@ bool DominatorTree::dominates(const BasicBlockEdge &BBE,
// trivially dominates itself, so we only have to find if it dominates the
// other predecessors. Since the only way out of X is via NormalDest, X can
// only properly dominate a node if NormalDest dominates that node too.
+ int IsDuplicateEdge = 0;
for (const_pred_iterator PI = pred_begin(End), E = pred_end(End);
PI != E; ++PI) {
const BasicBlock *BB = *PI;
- if (BB == Start)
+ if (BB == Start) {
+ // If there are multiple edges between Start and End, by definition they
+ // can't dominate anything.
+ if (IsDuplicateEdge++)
+ return false;
continue;
+ }
if (!dominates(End, BB))
return false;
@@ -192,12 +214,6 @@ bool DominatorTree::dominates(const BasicBlockEdge &BBE,
}
bool DominatorTree::dominates(const BasicBlockEdge &BBE, const Use &U) const {
- // Assert that we have a single edge. We could handle them by simply
- // returning false, but since isSingleEdge is linear on the number of
- // edges, the callers can normally handle them more efficiently.
- assert(BBE.isSingleEdge() &&
- "This function is not efficient in handling multiple edges");
-
Instruction *UserInst = cast<Instruction>(U.getUser());
// A PHI in the end of the edge is dominated by it.
PHINode *PN = dyn_cast<PHINode>(UserInst);
@@ -282,6 +298,13 @@ bool DominatorTree::isReachableFromEntry(const Use &U) const {
}
void DominatorTree::verifyDomTree() const {
+ // Perform the expensive checks only when VerifyDomInfo is set.
+ if (VerifyDomInfo && !verify()) {
+ errs() << "\n~~~~~~~~~~~\n\t\tDomTree verification failed!\n~~~~~~~~~~~\n";
+ print(errs());
+ abort();
+ }
+
Function &F = *getRoot()->getParent();
DominatorTree OtherDT;
diff --git a/contrib/llvm/lib/IR/Function.cpp b/contrib/llvm/lib/IR/Function.cpp
index 05419aa..85a0198 100644
--- a/contrib/llvm/lib/IR/Function.cpp
+++ b/contrib/llvm/lib/IR/Function.cpp
@@ -1,4 +1,4 @@
-//===-- Function.cpp - Implement the Global object classes ----------------===//
+//===- Function.cpp - Implement the Global object classes -----------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -14,37 +14,60 @@
#include "llvm/IR/Function.h"
#include "LLVMContextImpl.h"
#include "SymbolTableListTraitsImpl.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/None.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/IR/Argument.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CallSite.h"
+#include "llvm/IR/Constant.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/InstIterator.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/MDBuilder.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
+#include "llvm/IR/SymbolTableListTraits.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Use.h"
+#include "llvm/IR/User.h"
+#include "llvm/IR/Value.h"
+#include "llvm/IR/ValueSymbolTable.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <cstring>
+#include <string>
+
using namespace llvm;
// Explicit instantiations of SymbolTableListTraits since some of the methods
// are not in the public header file...
-template class llvm::SymbolTableListTraits<Argument>;
template class llvm::SymbolTableListTraits<BasicBlock>;
//===----------------------------------------------------------------------===//
// Argument Implementation
//===----------------------------------------------------------------------===//
-void Argument::anchor() { }
-
-Argument::Argument(Type *Ty, const Twine &Name, Function *Par)
- : Value(Ty, Value::ArgumentVal) {
- Parent = nullptr;
-
- if (Par)
- Par->getArgumentList().push_back(this);
+Argument::Argument(Type *Ty, const Twine &Name, Function *Par, unsigned ArgNo)
+ : Value(Ty, Value::ArgumentVal), Parent(Par), ArgNo(ArgNo) {
setName(Name);
}
@@ -52,27 +75,9 @@ void Argument::setParent(Function *parent) {
Parent = parent;
}
-/// getArgNo - Return the index of this formal argument in its containing
-/// function. For example in "void foo(int a, float b)" a is 0 and b is 1.
-unsigned Argument::getArgNo() const {
- const Function *F = getParent();
- assert(F && "Argument is not in a function");
-
- Function::const_arg_iterator AI = F->arg_begin();
- unsigned ArgIdx = 0;
- for (; &*AI != this; ++AI)
- ++ArgIdx;
-
- return ArgIdx;
-}
-
-/// hasNonNullAttr - Return true if this argument has the nonnull attribute on
-/// it in its containing function. Also returns true if at least one byte is
-/// known to be dereferenceable and the pointer is in addrspace(0).
bool Argument::hasNonNullAttr() const {
if (!getType()->isPointerTy()) return false;
- if (getParent()->getAttributes().
- hasAttribute(getArgNo()+1, Attribute::NonNull))
+ if (getParent()->hasParamAttribute(getArgNo(), Attribute::NonNull))
return true;
else if (getDereferenceableBytes() > 0 &&
getType()->getPointerAddressSpace() == 0)
@@ -80,25 +85,19 @@ bool Argument::hasNonNullAttr() const {
return false;
}
-/// hasByValAttr - Return true if this argument has the byval attribute on it
-/// in its containing function.
bool Argument::hasByValAttr() const {
if (!getType()->isPointerTy()) return false;
return hasAttribute(Attribute::ByVal);
}
bool Argument::hasSwiftSelfAttr() const {
- return getParent()->getAttributes().
- hasAttribute(getArgNo()+1, Attribute::SwiftSelf);
+ return getParent()->hasParamAttribute(getArgNo(), Attribute::SwiftSelf);
}
bool Argument::hasSwiftErrorAttr() const {
- return getParent()->getAttributes().
- hasAttribute(getArgNo()+1, Attribute::SwiftError);
+ return getParent()->hasParamAttribute(getArgNo(), Attribute::SwiftError);
}
-/// \brief Return true if this argument has the inalloca attribute on it in
-/// its containing function.
bool Argument::hasInAllocaAttr() const {
if (!getType()->isPointerTy()) return false;
return hasAttribute(Attribute::InAlloca);
@@ -106,139 +105,96 @@ bool Argument::hasInAllocaAttr() const {
bool Argument::hasByValOrInAllocaAttr() const {
if (!getType()->isPointerTy()) return false;
- AttributeSet Attrs = getParent()->getAttributes();
- return Attrs.hasAttribute(getArgNo() + 1, Attribute::ByVal) ||
- Attrs.hasAttribute(getArgNo() + 1, Attribute::InAlloca);
+ AttributeList Attrs = getParent()->getAttributes();
+ return Attrs.hasParamAttribute(getArgNo(), Attribute::ByVal) ||
+ Attrs.hasParamAttribute(getArgNo(), Attribute::InAlloca);
}
unsigned Argument::getParamAlignment() const {
assert(getType()->isPointerTy() && "Only pointers have alignments");
- return getParent()->getParamAlignment(getArgNo()+1);
-
+ return getParent()->getParamAlignment(getArgNo());
}
uint64_t Argument::getDereferenceableBytes() const {
assert(getType()->isPointerTy() &&
"Only pointers have dereferenceable bytes");
- return getParent()->getDereferenceableBytes(getArgNo()+1);
+ return getParent()->getParamDereferenceableBytes(getArgNo());
}
uint64_t Argument::getDereferenceableOrNullBytes() const {
assert(getType()->isPointerTy() &&
"Only pointers have dereferenceable bytes");
- return getParent()->getDereferenceableOrNullBytes(getArgNo()+1);
+ return getParent()->getParamDereferenceableOrNullBytes(getArgNo());
}
-/// hasNestAttr - Return true if this argument has the nest attribute on
-/// it in its containing function.
bool Argument::hasNestAttr() const {
if (!getType()->isPointerTy()) return false;
return hasAttribute(Attribute::Nest);
}
-/// hasNoAliasAttr - Return true if this argument has the noalias attribute on
-/// it in its containing function.
bool Argument::hasNoAliasAttr() const {
if (!getType()->isPointerTy()) return false;
return hasAttribute(Attribute::NoAlias);
}
-/// hasNoCaptureAttr - Return true if this argument has the nocapture attribute
-/// on it in its containing function.
bool Argument::hasNoCaptureAttr() const {
if (!getType()->isPointerTy()) return false;
return hasAttribute(Attribute::NoCapture);
}
-/// hasSRetAttr - Return true if this argument has the sret attribute on
-/// it in its containing function.
bool Argument::hasStructRetAttr() const {
if (!getType()->isPointerTy()) return false;
return hasAttribute(Attribute::StructRet);
}
-/// hasReturnedAttr - Return true if this argument has the returned attribute on
-/// it in its containing function.
bool Argument::hasReturnedAttr() const {
return hasAttribute(Attribute::Returned);
}
-/// hasZExtAttr - Return true if this argument has the zext attribute on it in
-/// its containing function.
bool Argument::hasZExtAttr() const {
return hasAttribute(Attribute::ZExt);
}
-/// hasSExtAttr Return true if this argument has the sext attribute on it in its
-/// containing function.
bool Argument::hasSExtAttr() const {
return hasAttribute(Attribute::SExt);
}
-/// Return true if this argument has the readonly or readnone attribute on it
-/// in its containing function.
bool Argument::onlyReadsMemory() const {
- return getParent()->getAttributes().
- hasAttribute(getArgNo()+1, Attribute::ReadOnly) ||
- getParent()->getAttributes().
- hasAttribute(getArgNo()+1, Attribute::ReadNone);
+ AttributeList Attrs = getParent()->getAttributes();
+ return Attrs.hasParamAttribute(getArgNo(), Attribute::ReadOnly) ||
+ Attrs.hasParamAttribute(getArgNo(), Attribute::ReadNone);
+}
+
+void Argument::addAttrs(AttrBuilder &B) {
+ AttributeList AL = getParent()->getAttributes();
+ AL = AL.addParamAttributes(Parent->getContext(), getArgNo(), B);
+ getParent()->setAttributes(AL);
+}
+
+void Argument::addAttr(Attribute::AttrKind Kind) {
+ getParent()->addParamAttr(getArgNo(), Kind);
}
-/// addAttr - Add attributes to an argument.
-void Argument::addAttr(AttributeSet AS) {
- assert(AS.getNumSlots() <= 1 &&
- "Trying to add more than one attribute set to an argument!");
- AttrBuilder B(AS, AS.getSlotIndex(0));
- getParent()->addAttributes(getArgNo() + 1,
- AttributeSet::get(Parent->getContext(),
- getArgNo() + 1, B));
+void Argument::addAttr(Attribute Attr) {
+ getParent()->addParamAttr(getArgNo(), Attr);
}
-/// removeAttr - Remove attributes from an argument.
-void Argument::removeAttr(AttributeSet AS) {
- assert(AS.getNumSlots() <= 1 &&
- "Trying to remove more than one attribute set from an argument!");
- AttrBuilder B(AS, AS.getSlotIndex(0));
- getParent()->removeAttributes(getArgNo() + 1,
- AttributeSet::get(Parent->getContext(),
- getArgNo() + 1, B));
+void Argument::removeAttr(Attribute::AttrKind Kind) {
+ getParent()->removeParamAttr(getArgNo(), Kind);
}
-/// hasAttribute - Checks if an argument has a given attribute.
bool Argument::hasAttribute(Attribute::AttrKind Kind) const {
- return getParent()->hasAttribute(getArgNo() + 1, Kind);
+ return getParent()->hasParamAttribute(getArgNo(), Kind);
}
//===----------------------------------------------------------------------===//
// Helper Methods in Function
//===----------------------------------------------------------------------===//
-bool Function::isMaterializable() const {
- return getGlobalObjectSubClassData() & (1 << IsMaterializableBit);
-}
-
-void Function::setIsMaterializable(bool V) {
- unsigned Mask = 1 << IsMaterializableBit;
- setGlobalObjectSubClassData((~Mask & getGlobalObjectSubClassData()) |
- (V ? Mask : 0u));
-}
-
LLVMContext &Function::getContext() const {
return getType()->getContext();
}
-FunctionType *Function::getFunctionType() const {
- return cast<FunctionType>(getValueType());
-}
-
-bool Function::isVarArg() const {
- return getFunctionType()->isVarArg();
-}
-
-Type *Function::getReturnType() const {
- return getFunctionType()->getReturnType();
-}
-
void Function::removeFromParent() {
getParent()->getFunctionList().remove(getIterator());
}
@@ -254,7 +210,8 @@ void Function::eraseFromParent() {
Function::Function(FunctionType *Ty, LinkageTypes Linkage, const Twine &name,
Module *ParentModule)
: GlobalObject(Ty, Value::FunctionVal,
- OperandTraits<Function>::op_begin(this), 0, Linkage, name) {
+ OperandTraits<Function>::op_begin(this), 0, Linkage, name),
+ NumArgs(Ty->getNumParams()) {
assert(FunctionType::isValidReturnType(getReturnType()) &&
"invalid return type");
setGlobalObjectSubClassData(0);
@@ -282,7 +239,8 @@ Function::~Function() {
dropAllReferences(); // After this it is safe to delete instructions.
// Delete all of the method arguments and unlink from symbol table...
- ArgumentList.clear();
+ if (Arguments)
+ clearArguments();
// Remove the function from the on-the-side GC table.
clearGC();
@@ -290,16 +248,33 @@ Function::~Function() {
void Function::BuildLazyArguments() const {
// Create the arguments vector, all arguments start out unnamed.
- FunctionType *FT = getFunctionType();
- for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
- assert(!FT->getParamType(i)->isVoidTy() &&
- "Cannot have void typed arguments!");
- ArgumentList.push_back(new Argument(FT->getParamType(i)));
+ auto *FT = getFunctionType();
+ if (NumArgs > 0) {
+ Arguments = std::allocator<Argument>().allocate(NumArgs);
+ for (unsigned i = 0, e = NumArgs; i != e; ++i) {
+ Type *ArgTy = FT->getParamType(i);
+ assert(!ArgTy->isVoidTy() && "Cannot have void typed arguments!");
+ new (Arguments + i) Argument(ArgTy, "", const_cast<Function *>(this), i);
+ }
}
// Clear the lazy arguments bit.
unsigned SDC = getSubclassDataFromValue();
const_cast<Function*>(this)->setValueSubclassData(SDC &= ~(1<<0));
+ assert(!hasLazyArguments());
+}
+
+static MutableArrayRef<Argument> makeArgArray(Argument *Args, size_t Count) {
+ return MutableArrayRef<Argument>(Args, Count);
+}
+
+void Function::clearArguments() {
+ for (Argument &A : makeArgArray(Arguments, NumArgs)) {
+ A.setName("");
+ A.~Argument();
+ }
+ std::allocator<Argument>().deallocate(Arguments, NumArgs);
+ Arguments = nullptr;
}
void Function::stealArgumentListFrom(Function &Src) {
@@ -307,10 +282,10 @@ void Function::stealArgumentListFrom(Function &Src) {
// Drop the current arguments, if any, and set the lazy argument bit.
if (!hasLazyArguments()) {
- assert(llvm::all_of(ArgumentList,
+ assert(llvm::all_of(makeArgArray(Arguments, NumArgs),
[](const Argument &A) { return A.use_empty(); }) &&
"Expected arguments to be unused in declaration");
- ArgumentList.clear();
+ clearArguments();
setValueSubclassData(getSubclassDataFromValue() | (1 << 0));
}
@@ -319,18 +294,26 @@ void Function::stealArgumentListFrom(Function &Src) {
return;
// Steal arguments from Src, and fix the lazy argument bits.
- ArgumentList.splice(ArgumentList.end(), Src.ArgumentList);
+ assert(arg_size() == Src.arg_size());
+ Arguments = Src.Arguments;
+ Src.Arguments = nullptr;
+ for (Argument &A : makeArgArray(Arguments, NumArgs)) {
+ // FIXME: This does the work of transferNodesFromList inefficiently.
+ SmallString<128> Name;
+ if (A.hasName())
+ Name = A.getName();
+ if (!Name.empty())
+ A.setName("");
+ A.setParent(this);
+ if (!Name.empty())
+ A.setName(Name);
+ }
+
setValueSubclassData(getSubclassDataFromValue() & ~(1 << 0));
+ assert(!hasLazyArguments());
Src.setValueSubclassData(Src.getSubclassDataFromValue() | (1 << 0));
}
-size_t Function::arg_size() const {
- return getFunctionType()->getNumParams();
-}
-bool Function::arg_empty() const {
- return getFunctionType()->getNumParams() == 0;
-}
-
// dropAllReferences() - This function causes all the subinstructions to "let
// go" of all references that they are maintaining. This allows one to
// 'delete' a whole class at a time, even though there may be circular
@@ -362,53 +345,102 @@ void Function::dropAllReferences() {
}
void Function::addAttribute(unsigned i, Attribute::AttrKind Kind) {
- AttributeSet PAL = getAttributes();
+ AttributeList PAL = getAttributes();
PAL = PAL.addAttribute(getContext(), i, Kind);
setAttributes(PAL);
}
void Function::addAttribute(unsigned i, Attribute Attr) {
- AttributeSet PAL = getAttributes();
+ AttributeList PAL = getAttributes();
PAL = PAL.addAttribute(getContext(), i, Attr);
setAttributes(PAL);
}
-void Function::addAttributes(unsigned i, AttributeSet Attrs) {
- AttributeSet PAL = getAttributes();
+void Function::addAttributes(unsigned i, const AttrBuilder &Attrs) {
+ AttributeList PAL = getAttributes();
PAL = PAL.addAttributes(getContext(), i, Attrs);
setAttributes(PAL);
}
+void Function::addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) {
+ AttributeList PAL = getAttributes();
+ PAL = PAL.addParamAttribute(getContext(), ArgNo, Kind);
+ setAttributes(PAL);
+}
+
+void Function::addParamAttr(unsigned ArgNo, Attribute Attr) {
+ AttributeList PAL = getAttributes();
+ PAL = PAL.addParamAttribute(getContext(), ArgNo, Attr);
+ setAttributes(PAL);
+}
+
+void Function::addParamAttrs(unsigned ArgNo, const AttrBuilder &Attrs) {
+ AttributeList PAL = getAttributes();
+ PAL = PAL.addParamAttributes(getContext(), ArgNo, Attrs);
+ setAttributes(PAL);
+}
+
void Function::removeAttribute(unsigned i, Attribute::AttrKind Kind) {
- AttributeSet PAL = getAttributes();
+ AttributeList PAL = getAttributes();
PAL = PAL.removeAttribute(getContext(), i, Kind);
setAttributes(PAL);
}
void Function::removeAttribute(unsigned i, StringRef Kind) {
- AttributeSet PAL = getAttributes();
+ AttributeList PAL = getAttributes();
PAL = PAL.removeAttribute(getContext(), i, Kind);
setAttributes(PAL);
}
-void Function::removeAttributes(unsigned i, AttributeSet Attrs) {
- AttributeSet PAL = getAttributes();
+void Function::removeAttributes(unsigned i, const AttrBuilder &Attrs) {
+ AttributeList PAL = getAttributes();
PAL = PAL.removeAttributes(getContext(), i, Attrs);
setAttributes(PAL);
}
+void Function::removeParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) {
+ AttributeList PAL = getAttributes();
+ PAL = PAL.removeParamAttribute(getContext(), ArgNo, Kind);
+ setAttributes(PAL);
+}
+
+void Function::removeParamAttr(unsigned ArgNo, StringRef Kind) {
+ AttributeList PAL = getAttributes();
+ PAL = PAL.removeParamAttribute(getContext(), ArgNo, Kind);
+ setAttributes(PAL);
+}
+
+void Function::removeParamAttrs(unsigned ArgNo, const AttrBuilder &Attrs) {
+ AttributeList PAL = getAttributes();
+ PAL = PAL.removeParamAttributes(getContext(), ArgNo, Attrs);
+ setAttributes(PAL);
+}
+
void Function::addDereferenceableAttr(unsigned i, uint64_t Bytes) {
- AttributeSet PAL = getAttributes();
+ AttributeList PAL = getAttributes();
PAL = PAL.addDereferenceableAttr(getContext(), i, Bytes);
setAttributes(PAL);
}
+void Function::addDereferenceableParamAttr(unsigned ArgNo, uint64_t Bytes) {
+ AttributeList PAL = getAttributes();
+ PAL = PAL.addDereferenceableParamAttr(getContext(), ArgNo, Bytes);
+ setAttributes(PAL);
+}
+
void Function::addDereferenceableOrNullAttr(unsigned i, uint64_t Bytes) {
- AttributeSet PAL = getAttributes();
+ AttributeList PAL = getAttributes();
PAL = PAL.addDereferenceableOrNullAttr(getContext(), i, Bytes);
setAttributes(PAL);
}
+void Function::addDereferenceableOrNullParamAttr(unsigned ArgNo,
+ uint64_t Bytes) {
+ AttributeList PAL = getAttributes();
+ PAL = PAL.addDereferenceableOrNullParamAttr(getContext(), ArgNo, Bytes);
+ setAttributes(PAL);
+}
+
const std::string &Function::getGC() const {
assert(hasGC() && "Function has no collector");
return getContext().getGC(*this);
@@ -428,24 +460,20 @@ void Function::clearGC() {
/// Copy all additional attributes (those not needed to create a Function) from
/// the Function Src to this one.
-void Function::copyAttributesFrom(const GlobalValue *Src) {
+void Function::copyAttributesFrom(const Function *Src) {
GlobalObject::copyAttributesFrom(Src);
- const Function *SrcF = dyn_cast<Function>(Src);
- if (!SrcF)
- return;
-
- setCallingConv(SrcF->getCallingConv());
- setAttributes(SrcF->getAttributes());
- if (SrcF->hasGC())
- setGC(SrcF->getGC());
+ setCallingConv(Src->getCallingConv());
+ setAttributes(Src->getAttributes());
+ if (Src->hasGC())
+ setGC(Src->getGC());
else
clearGC();
- if (SrcF->hasPersonalityFn())
- setPersonalityFn(SrcF->getPersonalityFn());
- if (SrcF->hasPrefixData())
- setPrefixData(SrcF->getPrefixData());
- if (SrcF->hasPrologueData())
- setPrologueData(SrcF->getPrologueData());
+ if (Src->hasPersonalityFn())
+ setPersonalityFn(Src->getPersonalityFn());
+ if (Src->hasPrefixData())
+ setPrefixData(Src->getPrefixData());
+ if (Src->hasPrologueData())
+ setPrologueData(Src->getPrologueData());
}
/// Table of string intrinsic names indexed by enum value.
@@ -528,15 +556,23 @@ void Function::recalculateIntrinsicID() {
static std::string getMangledTypeStr(Type* Ty) {
std::string Result;
if (PointerType* PTyp = dyn_cast<PointerType>(Ty)) {
- Result += "p" + llvm::utostr(PTyp->getAddressSpace()) +
+ Result += "p" + utostr(PTyp->getAddressSpace()) +
getMangledTypeStr(PTyp->getElementType());
} else if (ArrayType* ATyp = dyn_cast<ArrayType>(Ty)) {
- Result += "a" + llvm::utostr(ATyp->getNumElements()) +
+ Result += "a" + utostr(ATyp->getNumElements()) +
getMangledTypeStr(ATyp->getElementType());
- } else if (StructType* STyp = dyn_cast<StructType>(Ty)) {
- assert(!STyp->isLiteral() && "TODO: implement literal types");
- Result += STyp->getName();
- } else if (FunctionType* FT = dyn_cast<FunctionType>(Ty)) {
+ } else if (StructType *STyp = dyn_cast<StructType>(Ty)) {
+ if (!STyp->isLiteral()) {
+ Result += "s_";
+ Result += STyp->getName();
+ } else {
+ Result += "sl_";
+ for (auto Elem : STyp->elements())
+ Result += getMangledTypeStr(Elem);
+ }
+ // Ensure nested structs are distinguishable.
+ Result += "s";
+ } else if (FunctionType *FT = dyn_cast<FunctionType>(Ty)) {
Result += "f_" + getMangledTypeStr(FT->getReturnType());
for (size_t i = 0; i < FT->getNumParams(); i++)
Result += getMangledTypeStr(FT->getParamType(i));
@@ -568,7 +604,6 @@ std::string Intrinsic::getName(ID id, ArrayRef<Type*> Tys) {
return Result;
}
-
/// IIT_Info - These are enumerators that describe the entries returned by the
/// getIntrinsicInfoTableEntries function.
///
@@ -611,18 +646,18 @@ enum IIT_Info {
IIT_SAME_VEC_WIDTH_ARG = 31,
IIT_PTR_TO_ARG = 32,
IIT_PTR_TO_ELT = 33,
- IIT_VEC_OF_PTRS_TO_ELT = 34,
+ IIT_VEC_OF_ANYPTRS_TO_ELT = 34,
IIT_I128 = 35,
IIT_V512 = 36,
IIT_V1024 = 37
};
-
static void DecodeIITType(unsigned &NextElt, ArrayRef<unsigned char> Infos,
SmallVectorImpl<Intrinsic::IITDescriptor> &OutputTable) {
+ using namespace Intrinsic;
+
IIT_Info Info = IIT_Info(Infos[NextElt++]);
unsigned StructElts = 2;
- using namespace Intrinsic;
switch (Info) {
case IIT_Done:
@@ -753,10 +788,11 @@ static void DecodeIITType(unsigned &NextElt, ArrayRef<unsigned char> Infos,
OutputTable.push_back(IITDescriptor::get(IITDescriptor::PtrToElt, ArgInfo));
return;
}
- case IIT_VEC_OF_PTRS_TO_ELT: {
- unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
- OutputTable.push_back(IITDescriptor::get(IITDescriptor::VecOfPtrsToElt,
- ArgInfo));
+ case IIT_VEC_OF_ANYPTRS_TO_ELT: {
+ unsigned short ArgNo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
+ unsigned short RefNo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
+ OutputTable.push_back(
+ IITDescriptor::get(IITDescriptor::VecOfAnyPtrsToElt, ArgNo, RefNo));
return;
}
case IIT_EMPTYSTRUCT:
@@ -776,7 +812,6 @@ static void DecodeIITType(unsigned &NextElt, ArrayRef<unsigned char> Infos,
llvm_unreachable("unhandled");
}
-
#define GET_INTRINSIC_GENERATOR_GLOBAL
#include "llvm/IR/Intrinsics.gen"
#undef GET_INTRINSIC_GENERATOR_GLOBAL
@@ -814,10 +849,10 @@ void Intrinsic::getIntrinsicInfoTableEntries(ID id,
DecodeIITType(NextElt, IITEntries, T);
}
-
static Type *DecodeFixedType(ArrayRef<Intrinsic::IITDescriptor> &Infos,
ArrayRef<Type*> Tys, LLVMContext &Context) {
using namespace Intrinsic;
+
IITDescriptor D = Infos.front();
Infos = Infos.slice(1);
@@ -845,7 +880,6 @@ static Type *DecodeFixedType(ArrayRef<Intrinsic::IITDescriptor> &Infos,
Elts[i] = DecodeFixedType(Infos, Tys, Context);
return StructType::get(Context, makeArrayRef(Elts,D.Struct_NumElements));
}
-
case IITDescriptor::Argument:
return Tys[D.getArgumentNumber()];
case IITDescriptor::ExtendArgument: {
@@ -887,21 +921,13 @@ static Type *DecodeFixedType(ArrayRef<Intrinsic::IITDescriptor> &Infos,
Type *EltTy = VTy->getVectorElementType();
return PointerType::getUnqual(EltTy);
}
- case IITDescriptor::VecOfPtrsToElt: {
- Type *Ty = Tys[D.getArgumentNumber()];
- VectorType *VTy = dyn_cast<VectorType>(Ty);
- if (!VTy)
- llvm_unreachable("Expected an argument of Vector Type");
- Type *EltTy = VTy->getVectorElementType();
- return VectorType::get(PointerType::getUnqual(EltTy),
- VTy->getNumElements());
+ case IITDescriptor::VecOfAnyPtrsToElt:
+ // Return the overloaded type (which determines the pointers address space)
+ return Tys[D.getOverloadArgNumber()];
}
- }
llvm_unreachable("unhandled");
}
-
-
FunctionType *Intrinsic::getType(LLVMContext &Context,
ID id, ArrayRef<Type*> Tys) {
SmallVector<IITDescriptor, 8> Table;
@@ -1091,11 +1117,22 @@ bool Intrinsic::matchIntrinsicType(Type *Ty, ArrayRef<Intrinsic::IITDescriptor>
return (!ThisArgType || !ReferenceType ||
ThisArgType->getElementType() != ReferenceType->getElementType());
}
- case IITDescriptor::VecOfPtrsToElt: {
- if (D.getArgumentNumber() >= ArgTys.size())
+ case IITDescriptor::VecOfAnyPtrsToElt: {
+ unsigned RefArgNumber = D.getRefArgNumber();
+
+ // This may only be used when referring to a previous argument.
+ if (RefArgNumber >= ArgTys.size())
return true;
- VectorType * ReferenceType =
- dyn_cast<VectorType> (ArgTys[D.getArgumentNumber()]);
+
+ // Record the overloaded type
+ assert(D.getOverloadArgNumber() == ArgTys.size() &&
+ "Table consistency error");
+ ArgTys.push_back(Ty);
+
+ // Verify the overloaded type "matches" the Ref type.
+ // i.e. Ty is a vector with the same width as Ref.
+ // Composed of pointers to the same element type as Ref.
+ VectorType *ReferenceType = dyn_cast<VectorType>(ArgTys[RefArgNumber]);
VectorType *ThisArgVecTy = dyn_cast<VectorType>(Ty);
if (!ThisArgVecTy || !ReferenceType ||
(ReferenceType->getVectorNumElements() !=
@@ -1279,9 +1316,10 @@ void Function::setValueSubclassDataBit(unsigned Bit, bool On) {
setValueSubclassData(getSubclassDataFromValue() & ~(1 << Bit));
}
-void Function::setEntryCount(uint64_t Count) {
+void Function::setEntryCount(uint64_t Count,
+ const DenseSet<GlobalValue::GUID> *S) {
MDBuilder MDB(getContext());
- setMetadata(LLVMContext::MD_prof, MDB.createFunctionEntryCount(Count));
+ setMetadata(LLVMContext::MD_prof, MDB.createFunctionEntryCount(Count, S));
}
Optional<uint64_t> Function::getEntryCount() const {
@@ -1298,6 +1336,18 @@ Optional<uint64_t> Function::getEntryCount() const {
return None;
}
+DenseSet<GlobalValue::GUID> Function::getImportGUIDs() const {
+ DenseSet<GlobalValue::GUID> R;
+ if (MDNode *MD = getMetadata(LLVMContext::MD_prof))
+ if (MDString *MDS = dyn_cast<MDString>(MD->getOperand(0)))
+ if (MDS->getString().equals("function_entry_count"))
+ for (unsigned i = 2; i < MD->getNumOperands(); i++)
+ R.insert(mdconst::extract<ConstantInt>(MD->getOperand(i))
+ ->getValue()
+ .getZExtValue());
+ return R;
+}
+
void Function::setSectionPrefix(StringRef Prefix) {
MDBuilder MDB(getContext());
setMetadata(LLVMContext::MD_section_prefix,
diff --git a/contrib/llvm/lib/IR/GCOV.cpp b/contrib/llvm/lib/IR/GCOV.cpp
index 3bbcf78..d4b4552 100644
--- a/contrib/llvm/lib/IR/GCOV.cpp
+++ b/contrib/llvm/lib/IR/GCOV.cpp
@@ -103,11 +103,17 @@ bool GCOVFile::readGCDA(GCOVBuffer &Buffer) {
return true;
}
+void GCOVFile::print(raw_ostream &OS) const {
+ for (const auto &FPtr : Functions)
+ FPtr->print(OS);
+}
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// dump - Dump GCOVFile content to dbgs() for debugging purposes.
LLVM_DUMP_METHOD void GCOVFile::dump() const {
- for (const auto &FPtr : Functions)
- FPtr->dump();
+ print(dbgs());
}
+#endif
/// collectLineCounts - Collect line counts. This must be used after
/// reading .gcno and .gcda files.
@@ -343,13 +349,19 @@ uint64_t GCOVFunction::getExitCount() const {
return Blocks.back()->getCount();
}
+void GCOVFunction::print(raw_ostream &OS) const {
+ OS << "===== " << Name << " (" << Ident << ") @ " << Filename << ":"
+ << LineNumber << "\n";
+ for (const auto &Block : Blocks)
+ Block->print(OS);
+}
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// dump - Dump GCOVFunction content to dbgs() for debugging purposes.
LLVM_DUMP_METHOD void GCOVFunction::dump() const {
- dbgs() << "===== " << Name << " (" << Ident << ") @ " << Filename << ":"
- << LineNumber << "\n";
- for (const auto &Block : Blocks)
- Block->dump();
+ print(dbgs());
}
+#endif
/// collectLineCounts - Collect line counts. This must be used after
/// reading .gcno and .gcda files.
@@ -400,29 +412,35 @@ void GCOVBlock::collectLineCounts(FileInfo &FI) {
FI.addBlockLine(Parent.getFilename(), N, this);
}
-/// dump - Dump GCOVBlock content to dbgs() for debugging purposes.
-LLVM_DUMP_METHOD void GCOVBlock::dump() const {
- dbgs() << "Block : " << Number << " Counter : " << Counter << "\n";
+void GCOVBlock::print(raw_ostream &OS) const {
+ OS << "Block : " << Number << " Counter : " << Counter << "\n";
if (!SrcEdges.empty()) {
- dbgs() << "\tSource Edges : ";
+ OS << "\tSource Edges : ";
for (const GCOVEdge *Edge : SrcEdges)
- dbgs() << Edge->Src.Number << " (" << Edge->Count << "), ";
- dbgs() << "\n";
+ OS << Edge->Src.Number << " (" << Edge->Count << "), ";
+ OS << "\n";
}
if (!DstEdges.empty()) {
- dbgs() << "\tDestination Edges : ";
+ OS << "\tDestination Edges : ";
for (const GCOVEdge *Edge : DstEdges)
- dbgs() << Edge->Dst.Number << " (" << Edge->Count << "), ";
- dbgs() << "\n";
+ OS << Edge->Dst.Number << " (" << Edge->Count << "), ";
+ OS << "\n";
}
if (!Lines.empty()) {
- dbgs() << "\tLines : ";
+ OS << "\tLines : ";
for (uint32_t N : Lines)
- dbgs() << (N) << ",";
- dbgs() << "\n";
+ OS << (N) << ",";
+ OS << "\n";
}
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+/// dump - Dump GCOVBlock content to dbgs() for debugging purposes.
+LLVM_DUMP_METHOD void GCOVBlock::dump() const {
+ print(dbgs());
+}
+#endif
+
//===----------------------------------------------------------------------===//
// FileInfo implementation.
@@ -571,8 +589,12 @@ FileInfo::openCoveragePath(StringRef CoveragePath) {
/// print - Print source files with collected line count information.
void FileInfo::print(raw_ostream &InfoOS, StringRef MainFilename,
StringRef GCNOFile, StringRef GCDAFile) {
- for (const auto &LI : LineInfo) {
- StringRef Filename = LI.first();
+ SmallVector<StringRef, 4> Filenames;
+ for (const auto &LI : LineInfo)
+ Filenames.push_back(LI.first());
+ std::sort(Filenames.begin(), Filenames.end());
+
+ for (StringRef Filename : Filenames) {
auto AllLines = LineConsumer(Filename);
std::string CoveragePath = getCoveragePath(Filename, MainFilename);
@@ -585,7 +607,7 @@ void FileInfo::print(raw_ostream &InfoOS, StringRef MainFilename,
CovOS << " -: 0:Runs:" << RunCount << "\n";
CovOS << " -: 0:Programs:" << ProgramCount << "\n";
- const LineData &Line = LI.second;
+ const LineData &Line = LineInfo[Filename];
GCOVCoverage FileCoverage(Filename);
for (uint32_t LineIndex = 0; LineIndex < Line.LastLine || !AllLines.empty();
++LineIndex) {
diff --git a/contrib/llvm/lib/IR/Globals.cpp b/contrib/llvm/lib/IR/Globals.cpp
index 6f73565..afd4a36 100644
--- a/contrib/llvm/lib/IR/Globals.cpp
+++ b/contrib/llvm/lib/IR/Globals.cpp
@@ -12,10 +12,11 @@
//
//===----------------------------------------------------------------------===//
+#include "LLVMContextImpl.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Triple.h"
-#include "llvm/IR/Constants.h"
#include "llvm/IR/ConstantRange.h"
+#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/GlobalAlias.h"
#include "llvm/IR/GlobalValue.h"
@@ -24,7 +25,6 @@
#include "llvm/IR/Operator.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/ErrorHandling.h"
-#include "LLVMContextImpl.h"
using namespace llvm;
//===----------------------------------------------------------------------===//
@@ -69,6 +69,30 @@ void GlobalValue::copyAttributesFrom(const GlobalValue *Src) {
setDLLStorageClass(Src->getDLLStorageClass());
}
+void GlobalValue::removeFromParent() {
+ switch (getValueID()) {
+#define HANDLE_GLOBAL_VALUE(NAME) \
+ case Value::NAME##Val: \
+ return static_cast<NAME *>(this)->removeFromParent();
+#include "llvm/IR/Value.def"
+ default:
+ break;
+ }
+ llvm_unreachable("not a global");
+}
+
+void GlobalValue::eraseFromParent() {
+ switch (getValueID()) {
+#define HANDLE_GLOBAL_VALUE(NAME) \
+ case Value::NAME##Val: \
+ return static_cast<NAME *>(this)->eraseFromParent();
+#include "llvm/IR/Value.def"
+ default:
+ break;
+ }
+ llvm_unreachable("not a global");
+}
+
unsigned GlobalValue::getAlignment() const {
if (auto *GA = dyn_cast<GlobalAlias>(this)) {
// In general we cannot compute this at the IR level, but we try.
@@ -93,24 +117,10 @@ void GlobalObject::setAlignment(unsigned Align) {
assert(getAlignment() == Align && "Alignment representation error!");
}
-unsigned GlobalObject::getGlobalObjectSubClassData() const {
- unsigned ValueData = getGlobalValueSubClassData();
- return ValueData >> GlobalObjectBits;
-}
-
-void GlobalObject::setGlobalObjectSubClassData(unsigned Val) {
- unsigned OldData = getGlobalValueSubClassData();
- setGlobalValueSubClassData((OldData & GlobalObjectMask) |
- (Val << GlobalObjectBits));
- assert(getGlobalObjectSubClassData() == Val && "representation error");
-}
-
-void GlobalObject::copyAttributesFrom(const GlobalValue *Src) {
+void GlobalObject::copyAttributesFrom(const GlobalObject *Src) {
GlobalValue::copyAttributesFrom(Src);
- if (const auto *GV = dyn_cast<GlobalObject>(Src)) {
- setAlignment(GV->getAlignment());
- setSection(GV->getSection());
- }
+ setAlignment(Src->getAlignment());
+ setSection(Src->getSection());
}
std::string GlobalValue::getGlobalIdentifier(StringRef Name,
@@ -152,7 +162,7 @@ StringRef GlobalValue::getSection() const {
return cast<GlobalObject>(this)->getSection();
}
-Comdat *GlobalValue::getComdat() {
+const Comdat *GlobalValue::getComdat() const {
if (auto *GA = dyn_cast<GlobalAlias>(this)) {
// In general we cannot compute this at the IR level, but we try.
if (const GlobalObject *GO = GA->getBaseObject())
@@ -177,7 +187,9 @@ void GlobalObject::setSection(StringRef S) {
// Get or create a stable section name string and put it in the table in the
// context.
- S = getContext().pImpl->SectionStrings.insert(S).first->first();
+ if (!S.empty()) {
+ S = getContext().pImpl->SectionStrings.insert(S).first->first();
+ }
getContext().pImpl->GlobalObjectSections[this] = S;
// Update the HasSectionHashEntryBit. Setting the section to the empty string
@@ -240,10 +252,10 @@ bool GlobalValue::canIncreaseAlignment() const {
return true;
}
-GlobalObject *GlobalValue::getBaseObject() {
+const GlobalObject *GlobalValue::getBaseObject() const {
if (auto *GO = dyn_cast<GlobalObject>(this))
return GO;
- if (auto *GA = dyn_cast<GlobalAlias>(this))
+ if (auto *GA = dyn_cast<GlobalIndirectSymbol>(this))
return GA->getBaseObject();
return nullptr;
}
@@ -281,6 +293,8 @@ GlobalVariable::GlobalVariable(Type *Ty, bool constant, LinkageTypes Link,
InitVal != nullptr, Link, Name, AddressSpace),
isConstantGlobal(constant),
isExternallyInitializedConstant(isExternallyInitialized) {
+ assert(!Ty->isFunctionTy() && PointerType::isValidElementType(Ty) &&
+ "invalid type for global variable");
setThreadLocalMode(TLMode);
if (InitVal) {
assert(InitVal->getType() == Ty &&
@@ -299,6 +313,8 @@ GlobalVariable::GlobalVariable(Module &M, Type *Ty, bool constant,
InitVal != nullptr, Link, Name, AddressSpace),
isConstantGlobal(constant),
isExternallyInitializedConstant(isExternallyInitialized) {
+ assert(!Ty->isFunctionTy() && PointerType::isValidElementType(Ty) &&
+ "invalid type for global variable");
setThreadLocalMode(TLMode);
if (InitVal) {
assert(InitVal->getType() == Ty &&
@@ -343,12 +359,11 @@ void GlobalVariable::setInitializer(Constant *InitVal) {
/// Copy all additional attributes (those not needed to create a GlobalVariable)
/// from the GlobalVariable Src to this one.
-void GlobalVariable::copyAttributesFrom(const GlobalValue *Src) {
+void GlobalVariable::copyAttributesFrom(const GlobalVariable *Src) {
GlobalObject::copyAttributesFrom(Src);
- if (const GlobalVariable *SrcVar = dyn_cast<GlobalVariable>(Src)) {
- setThreadLocalMode(SrcVar->getThreadLocalMode());
- setExternallyInitialized(SrcVar->isExternallyInitialized());
- }
+ setThreadLocalMode(Src->getThreadLocalMode());
+ setExternallyInitialized(Src->isExternallyInitialized());
+ setAttributes(Src->getAttributes());
}
void GlobalVariable::dropAllReferences() {
diff --git a/contrib/llvm/lib/IR/IRBuilder.cpp b/contrib/llvm/lib/IR/IRBuilder.cpp
index d3e410d..b7fa07c 100644
--- a/contrib/llvm/lib/IR/IRBuilder.cpp
+++ b/contrib/llvm/lib/IR/IRBuilder.cpp
@@ -12,9 +12,9 @@
//
//===----------------------------------------------------------------------===//
+#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalVariable.h"
-#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Statepoint.h"
@@ -134,6 +134,37 @@ CreateMemCpy(Value *Dst, Value *Src, Value *Size, unsigned Align,
return CI;
}
+CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemCpy(
+ Value *Dst, Value *Src, Value *Size, uint32_t ElementSize, MDNode *TBAATag,
+ MDNode *TBAAStructTag, MDNode *ScopeTag, MDNode *NoAliasTag) {
+ Dst = getCastedInt8PtrValue(Dst);
+ Src = getCastedInt8PtrValue(Src);
+
+ Value *Ops[] = {Dst, Src, Size, getInt32(ElementSize)};
+ Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()};
+ Module *M = BB->getParent()->getParent();
+ Value *TheFn = Intrinsic::getDeclaration(
+ M, Intrinsic::memcpy_element_unordered_atomic, Tys);
+
+ CallInst *CI = createCallHelper(TheFn, Ops, this);
+
+ // Set the TBAA info if present.
+ if (TBAATag)
+ CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
+
+ // Set the TBAA Struct info if present.
+ if (TBAAStructTag)
+ CI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag);
+
+ if (ScopeTag)
+ CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
+
+ if (NoAliasTag)
+ CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
+
+ return CI;
+}
+
CallInst *IRBuilderBase::
CreateMemMove(Value *Dst, Value *Src, Value *Size, unsigned Align,
bool isVolatile, MDNode *TBAATag, MDNode *ScopeTag,
@@ -161,6 +192,94 @@ CreateMemMove(Value *Dst, Value *Src, Value *Size, unsigned Align,
return CI;
}
+static CallInst *getReductionIntrinsic(IRBuilderBase *Builder, Intrinsic::ID ID,
+ Value *Src) {
+ Module *M = Builder->GetInsertBlock()->getParent()->getParent();
+ Value *Ops[] = {Src};
+ Type *Tys[] = { Src->getType()->getVectorElementType(), Src->getType() };
+ auto Decl = Intrinsic::getDeclaration(M, ID, Tys);
+ return createCallHelper(Decl, Ops, Builder);
+}
+
+CallInst *IRBuilderBase::CreateFAddReduce(Value *Acc, Value *Src) {
+ Module *M = GetInsertBlock()->getParent()->getParent();
+ Value *Ops[] = {Acc, Src};
+ Type *Tys[] = {Src->getType()->getVectorElementType(), Acc->getType(),
+ Src->getType()};
+ auto Decl = Intrinsic::getDeclaration(
+ M, Intrinsic::experimental_vector_reduce_fadd, Tys);
+ return createCallHelper(Decl, Ops, this);
+}
+
+CallInst *IRBuilderBase::CreateFMulReduce(Value *Acc, Value *Src) {
+ Module *M = GetInsertBlock()->getParent()->getParent();
+ Value *Ops[] = {Acc, Src};
+ Type *Tys[] = {Src->getType()->getVectorElementType(), Acc->getType(),
+ Src->getType()};
+ auto Decl = Intrinsic::getDeclaration(
+ M, Intrinsic::experimental_vector_reduce_fmul, Tys);
+ return createCallHelper(Decl, Ops, this);
+}
+
+CallInst *IRBuilderBase::CreateAddReduce(Value *Src) {
+ return getReductionIntrinsic(this, Intrinsic::experimental_vector_reduce_add,
+ Src);
+}
+
+CallInst *IRBuilderBase::CreateMulReduce(Value *Src) {
+ return getReductionIntrinsic(this, Intrinsic::experimental_vector_reduce_mul,
+ Src);
+}
+
+CallInst *IRBuilderBase::CreateAndReduce(Value *Src) {
+ return getReductionIntrinsic(this, Intrinsic::experimental_vector_reduce_and,
+ Src);
+}
+
+CallInst *IRBuilderBase::CreateOrReduce(Value *Src) {
+ return getReductionIntrinsic(this, Intrinsic::experimental_vector_reduce_or,
+ Src);
+}
+
+CallInst *IRBuilderBase::CreateXorReduce(Value *Src) {
+ return getReductionIntrinsic(this, Intrinsic::experimental_vector_reduce_xor,
+ Src);
+}
+
+CallInst *IRBuilderBase::CreateIntMaxReduce(Value *Src, bool IsSigned) {
+ auto ID = IsSigned ? Intrinsic::experimental_vector_reduce_smax
+ : Intrinsic::experimental_vector_reduce_umax;
+ return getReductionIntrinsic(this, ID, Src);
+}
+
+CallInst *IRBuilderBase::CreateIntMinReduce(Value *Src, bool IsSigned) {
+ auto ID = IsSigned ? Intrinsic::experimental_vector_reduce_smin
+ : Intrinsic::experimental_vector_reduce_umin;
+ return getReductionIntrinsic(this, ID, Src);
+}
+
+CallInst *IRBuilderBase::CreateFPMaxReduce(Value *Src, bool NoNaN) {
+ auto Rdx = getReductionIntrinsic(
+ this, Intrinsic::experimental_vector_reduce_fmax, Src);
+ if (NoNaN) {
+ FastMathFlags FMF;
+ FMF.setNoNaNs();
+ Rdx->setFastMathFlags(FMF);
+ }
+ return Rdx;
+}
+
+CallInst *IRBuilderBase::CreateFPMinReduce(Value *Src, bool NoNaN) {
+ auto Rdx = getReductionIntrinsic(
+ this, Intrinsic::experimental_vector_reduce_fmin, Src);
+ if (NoNaN) {
+ FastMathFlags FMF;
+ FMF.setNoNaNs();
+ Rdx->setFastMathFlags(FMF);
+ }
+ return Rdx;
+}
+
CallInst *IRBuilderBase::CreateLifetimeStart(Value *Ptr, ConstantInt *Size) {
assert(isa<PointerType>(Ptr->getType()) &&
"lifetime.start only applies to pointers.");
@@ -172,7 +291,8 @@ CallInst *IRBuilderBase::CreateLifetimeStart(Value *Ptr, ConstantInt *Size) {
"lifetime.start requires the size to be an i64");
Value *Ops[] = { Size, Ptr };
Module *M = BB->getParent()->getParent();
- Value *TheFn = Intrinsic::getDeclaration(M, Intrinsic::lifetime_start);
+ Value *TheFn = Intrinsic::getDeclaration(M, Intrinsic::lifetime_start,
+ { Ptr->getType() });
return createCallHelper(TheFn, Ops, this);
}
@@ -187,7 +307,8 @@ CallInst *IRBuilderBase::CreateLifetimeEnd(Value *Ptr, ConstantInt *Size) {
"lifetime.end requires the size to be an i64");
Value *Ops[] = { Size, Ptr };
Module *M = BB->getParent()->getParent();
- Value *TheFn = Intrinsic::getDeclaration(M, Intrinsic::lifetime_end);
+ Value *TheFn = Intrinsic::getDeclaration(M, Intrinsic::lifetime_end,
+ { Ptr->getType() });
return createCallHelper(TheFn, Ops, this);
}
@@ -291,11 +412,16 @@ CallInst *IRBuilderBase::CreateMaskedGather(Value *Ptrs, unsigned Align,
Mask = Constant::getAllOnesValue(VectorType::get(Type::getInt1Ty(Context),
NumElts));
- Value * Ops[] = {Ptrs, getInt32(Align), Mask, UndefValue::get(DataTy)};
+ if (!PassThru)
+ PassThru = UndefValue::get(DataTy);
+
+ Type *OverloadedTypes[] = {DataTy, PtrsTy};
+ Value * Ops[] = {Ptrs, getInt32(Align), Mask, PassThru};
// We specify only one type when we create this intrinsic. Types of other
// arguments are derived from this type.
- return CreateMaskedIntrinsic(Intrinsic::masked_gather, Ops, { DataTy }, Name);
+ return CreateMaskedIntrinsic(Intrinsic::masked_gather, Ops, OverloadedTypes,
+ Name);
}
/// \brief Create a call to a Masked Scatter intrinsic.
@@ -321,11 +447,13 @@ CallInst *IRBuilderBase::CreateMaskedScatter(Value *Data, Value *Ptrs,
if (!Mask)
Mask = Constant::getAllOnesValue(VectorType::get(Type::getInt1Ty(Context),
NumElts));
+
+ Type *OverloadedTypes[] = {DataTy, PtrsTy};
Value * Ops[] = {Data, Ptrs, getInt32(Align), Mask};
// We specify only one type when we create this intrinsic. Types of other
// arguments are derived from this type.
- return CreateMaskedIntrinsic(Intrinsic::masked_scatter, Ops, { DataTy });
+ return CreateMaskedIntrinsic(Intrinsic::masked_scatter, Ops, OverloadedTypes);
}
template <typename T0, typename T1, typename T2, typename T3>
@@ -482,3 +610,11 @@ CallInst *IRBuilderBase::CreateGCRelocate(Instruction *Statepoint,
getInt32(DerivedOffset)};
return createCallHelper(FnGCRelocate, Args, this, Name);
}
+
+CallInst *IRBuilderBase::CreateBinaryIntrinsic(Intrinsic::ID ID,
+ Value *LHS, Value *RHS,
+ const Twine &Name) {
+ Module *M = BB->getParent()->getParent();
+ Function *Fn = Intrinsic::getDeclaration(M, ID, { LHS->getType() });
+ return createCallHelper(Fn, { LHS, RHS }, this, Name);
+}
diff --git a/contrib/llvm/lib/IR/IRPrintingPasses.cpp b/contrib/llvm/lib/IR/IRPrintingPasses.cpp
index 05e206c..955fdc7 100644
--- a/contrib/llvm/lib/IR/IRPrintingPasses.cpp
+++ b/contrib/llvm/lib/IR/IRPrintingPasses.cpp
@@ -70,6 +70,8 @@ public:
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesAll();
}
+
+ StringRef getPassName() const override { return "Print Module IR"; }
};
class PrintFunctionPassWrapper : public FunctionPass {
@@ -91,6 +93,8 @@ public:
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesAll();
}
+
+ StringRef getPassName() const override { return "Print Function IR"; }
};
class PrintBasicBlockPass : public BasicBlockPass {
@@ -111,6 +115,8 @@ public:
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesAll();
}
+
+ StringRef getPassName() const override { return "Print BasicBlock IR"; }
};
}
diff --git a/contrib/llvm/lib/IR/InlineAsm.cpp b/contrib/llvm/lib/IR/InlineAsm.cpp
index 5a91185..ad22efd 100644
--- a/contrib/llvm/lib/IR/InlineAsm.cpp
+++ b/contrib/llvm/lib/IR/InlineAsm.cpp
@@ -1,4 +1,4 @@
-//===-- InlineAsm.cpp - Implement the InlineAsm class ---------------------===//
+//===- InlineAsm.cpp - Implement the InlineAsm class ----------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -14,24 +14,19 @@
#include "llvm/IR/InlineAsm.h"
#include "ConstantsContext.h"
#include "LLVMContextImpl.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Compiler.h"
#include <algorithm>
+#include <cassert>
#include <cctype>
-using namespace llvm;
-
-// Implement the first virtual method in this class in this file so the
-// InlineAsm vtable is emitted here.
-InlineAsm::~InlineAsm() {
-}
+#include <cstddef>
+#include <cstdlib>
-InlineAsm *InlineAsm::get(FunctionType *FTy, StringRef AsmString,
- StringRef Constraints, bool hasSideEffects,
- bool isAlignStack, AsmDialect asmDialect) {
- InlineAsmKeyType Key(AsmString, Constraints, FTy, hasSideEffects,
- isAlignStack, asmDialect);
- LLVMContextImpl *pImpl = FTy->getContext().pImpl;
- return pImpl->InlineAsms.getOrCreate(PointerType::getUnqual(FTy), Key);
-}
+using namespace llvm;
InlineAsm::InlineAsm(FunctionType *FTy, const std::string &asmString,
const std::string &constraints, bool hasSideEffects,
@@ -40,12 +35,20 @@ InlineAsm::InlineAsm(FunctionType *FTy, const std::string &asmString,
AsmString(asmString), Constraints(constraints), FTy(FTy),
HasSideEffects(hasSideEffects), IsAlignStack(isAlignStack),
Dialect(asmDialect) {
-
// Do various checks on the constraint string and type.
assert(Verify(getFunctionType(), constraints) &&
"Function type not legal for constraints!");
}
+InlineAsm *InlineAsm::get(FunctionType *FTy, StringRef AsmString,
+ StringRef Constraints, bool hasSideEffects,
+ bool isAlignStack, AsmDialect asmDialect) {
+ InlineAsmKeyType Key(AsmString, Constraints, FTy, hasSideEffects,
+ isAlignStack, asmDialect);
+ LLVMContextImpl *pImpl = FTy->getContext().pImpl;
+ return pImpl->InlineAsms.getOrCreate(PointerType::getUnqual(FTy), Key);
+}
+
void InlineAsm::destroyConstant() {
getType()->getContext().pImpl->InlineAsms.remove(this);
delete this;
@@ -55,14 +58,6 @@ FunctionType *InlineAsm::getFunctionType() const {
return FTy;
}
-///Default constructor.
-InlineAsm::ConstraintInfo::ConstraintInfo() :
- Type(isInput), isEarlyClobber(false),
- MatchingInput(-1), isCommutative(false),
- isIndirect(false), isMultipleAlternative(false),
- currentAlternativeIndex(0) {
-}
-
/// Parse - Analyze the specified string (e.g. "==&{eax}") and fill in the
/// fields in this structure. If the constraint string is not understood,
/// return true, otherwise return false.
diff --git a/contrib/llvm/lib/IR/Instruction.cpp b/contrib/llvm/lib/IR/Instruction.cpp
index 2fa0348..365cb01 100644
--- a/contrib/llvm/lib/IR/Instruction.cpp
+++ b/contrib/llvm/lib/IR/Instruction.cpp
@@ -11,11 +11,12 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/ADT/DenseSet.h"
#include "llvm/IR/Instruction.h"
+#include "llvm/ADT/DenseSet.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Instructions.h"
+#include "llvm/IR/MDBuilder.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/Type.h"
@@ -42,8 +43,6 @@ Instruction::Instruction(Type *ty, unsigned it, Use *Ops, unsigned NumOps,
InsertAtEnd->getInstList().push_back(this);
}
-
-// Out of line virtual method, so the vtable, etc has a home.
Instruction::~Instruction() {
assert(!Parent && "Instruction still linked in the program!");
if (hasMetadataHashEntry())
@@ -59,12 +58,6 @@ const Module *Instruction::getModule() const {
return getParent()->getModule();
}
-Module *Instruction::getModule() {
- return getParent()->getModule();
-}
-
-Function *Instruction::getFunction() { return getParent()->getParent(); }
-
const Function *Instruction::getFunction() const {
return getParent()->getParent();
}
@@ -122,6 +115,29 @@ bool Instruction::hasNoSignedWrap() const {
return cast<OverflowingBinaryOperator>(this)->hasNoSignedWrap();
}
+void Instruction::dropPoisonGeneratingFlags() {
+ switch (getOpcode()) {
+ case Instruction::Add:
+ case Instruction::Sub:
+ case Instruction::Mul:
+ case Instruction::Shl:
+ cast<OverflowingBinaryOperator>(this)->setHasNoUnsignedWrap(false);
+ cast<OverflowingBinaryOperator>(this)->setHasNoSignedWrap(false);
+ break;
+
+ case Instruction::UDiv:
+ case Instruction::SDiv:
+ case Instruction::AShr:
+ case Instruction::LShr:
+ cast<PossiblyExactOperator>(this)->setIsExact(false);
+ break;
+
+ case Instruction::GetElementPtr:
+ cast<GetElementPtrInst>(this)->setIsInBounds(false);
+ break;
+ }
+}
+
bool Instruction::isExact() const {
return cast<PossiblyExactOperator>(this)->isExact();
}
@@ -186,6 +202,11 @@ bool Instruction::hasAllowReciprocal() const {
return cast<FPMathOperator>(this)->hasAllowReciprocal();
}
+bool Instruction::hasAllowContract() const {
+ assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
+ return cast<FPMathOperator>(this)->hasAllowContract();
+}
+
FastMathFlags Instruction::getFastMathFlags() const {
assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
return cast<FPMathOperator>(this)->getFastMathFlags();
@@ -195,10 +216,10 @@ void Instruction::copyFastMathFlags(const Instruction *I) {
copyFastMathFlags(I->getFastMathFlags());
}
-void Instruction::copyIRFlags(const Value *V) {
+void Instruction::copyIRFlags(const Value *V, bool IncludeWrapFlags) {
// Copy the wrapping flags.
- if (auto *OB = dyn_cast<OverflowingBinaryOperator>(V)) {
- if (isa<OverflowingBinaryOperator>(this)) {
+ if (IncludeWrapFlags && isa<OverflowingBinaryOperator>(this)) {
+ if (auto *OB = dyn_cast<OverflowingBinaryOperator>(V)) {
setHasNoSignedWrap(OB->hasNoSignedWrap());
setHasNoUnsignedWrap(OB->hasNoUnsignedWrap());
}
@@ -341,13 +362,13 @@ static bool haveSameSpecialState(const Instruction *I1, const Instruction *I2,
(LI->getAlignment() == cast<LoadInst>(I2)->getAlignment() ||
IgnoreAlignment) &&
LI->getOrdering() == cast<LoadInst>(I2)->getOrdering() &&
- LI->getSynchScope() == cast<LoadInst>(I2)->getSynchScope();
+ LI->getSyncScopeID() == cast<LoadInst>(I2)->getSyncScopeID();
if (const StoreInst *SI = dyn_cast<StoreInst>(I1))
return SI->isVolatile() == cast<StoreInst>(I2)->isVolatile() &&
(SI->getAlignment() == cast<StoreInst>(I2)->getAlignment() ||
IgnoreAlignment) &&
SI->getOrdering() == cast<StoreInst>(I2)->getOrdering() &&
- SI->getSynchScope() == cast<StoreInst>(I2)->getSynchScope();
+ SI->getSyncScopeID() == cast<StoreInst>(I2)->getSyncScopeID();
if (const CmpInst *CI = dyn_cast<CmpInst>(I1))
return CI->getPredicate() == cast<CmpInst>(I2)->getPredicate();
if (const CallInst *CI = dyn_cast<CallInst>(I1))
@@ -365,7 +386,7 @@ static bool haveSameSpecialState(const Instruction *I1, const Instruction *I2,
return EVI->getIndices() == cast<ExtractValueInst>(I2)->getIndices();
if (const FenceInst *FI = dyn_cast<FenceInst>(I1))
return FI->getOrdering() == cast<FenceInst>(I2)->getOrdering() &&
- FI->getSynchScope() == cast<FenceInst>(I2)->getSynchScope();
+ FI->getSyncScopeID() == cast<FenceInst>(I2)->getSyncScopeID();
if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I1))
return CXI->isVolatile() == cast<AtomicCmpXchgInst>(I2)->isVolatile() &&
CXI->isWeak() == cast<AtomicCmpXchgInst>(I2)->isWeak() &&
@@ -373,12 +394,13 @@ static bool haveSameSpecialState(const Instruction *I1, const Instruction *I2,
cast<AtomicCmpXchgInst>(I2)->getSuccessOrdering() &&
CXI->getFailureOrdering() ==
cast<AtomicCmpXchgInst>(I2)->getFailureOrdering() &&
- CXI->getSynchScope() == cast<AtomicCmpXchgInst>(I2)->getSynchScope();
+ CXI->getSyncScopeID() ==
+ cast<AtomicCmpXchgInst>(I2)->getSyncScopeID();
if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I1))
return RMWI->getOperation() == cast<AtomicRMWInst>(I2)->getOperation() &&
RMWI->isVolatile() == cast<AtomicRMWInst>(I2)->isVolatile() &&
RMWI->getOrdering() == cast<AtomicRMWInst>(I2)->getOrdering() &&
- RMWI->getSynchScope() == cast<AtomicRMWInst>(I2)->getSynchScope();
+ RMWI->getSyncScopeID() == cast<AtomicRMWInst>(I2)->getSyncScopeID();
return true;
}
@@ -511,6 +533,30 @@ bool Instruction::isAtomic() const {
}
}
+bool Instruction::hasAtomicLoad() const {
+ assert(isAtomic());
+ switch (getOpcode()) {
+ default:
+ return false;
+ case Instruction::AtomicCmpXchg:
+ case Instruction::AtomicRMW:
+ case Instruction::Load:
+ return true;
+ }
+}
+
+bool Instruction::hasAtomicStore() const {
+ assert(isAtomic());
+ switch (getOpcode()) {
+ default:
+ return false;
+ case Instruction::AtomicCmpXchg:
+ case Instruction::AtomicRMW:
+ case Instruction::Store:
+ return true;
+ }
+}
+
bool Instruction::mayThrow() const {
if (const CallInst *CI = dyn_cast<CallInst>(this))
return !CI->doesNotThrow();
@@ -521,17 +567,6 @@ bool Instruction::mayThrow() const {
return isa<ResumeInst>(this);
}
-/// Return true if the instruction is associative:
-///
-/// Associative operators satisfy: x op (y op z) === (x op y) op z
-///
-/// In LLVM, the Add, Mul, And, Or, and Xor operators are associative.
-///
-bool Instruction::isAssociative(unsigned Opcode) {
- return Opcode == And || Opcode == Or || Opcode == Xor ||
- Opcode == Add || Opcode == Mul;
-}
-
bool Instruction::isAssociative() const {
unsigned Opcode = getOpcode();
if (isAssociative(Opcode))
@@ -546,51 +581,6 @@ bool Instruction::isAssociative() const {
}
}
-/// Return true if the instruction is commutative:
-///
-/// Commutative operators satisfy: (x op y) === (y op x)
-///
-/// In LLVM, these are the associative operators, plus SetEQ and SetNE, when
-/// applied to any type.
-///
-bool Instruction::isCommutative(unsigned op) {
- switch (op) {
- case Add:
- case FAdd:
- case Mul:
- case FMul:
- case And:
- case Or:
- case Xor:
- return true;
- default:
- return false;
- }
-}
-
-/// Return true if the instruction is idempotent:
-///
-/// Idempotent operators satisfy: x op x === x
-///
-/// In LLVM, the And and Or operators are idempotent.
-///
-bool Instruction::isIdempotent(unsigned Opcode) {
- return Opcode == And || Opcode == Or;
-}
-
-/// Return true if the instruction is nilpotent:
-///
-/// Nilpotent operators satisfy: x op x === Id,
-///
-/// where Id is the identity for the operator, i.e. a constant such that
-/// x op Id === x and Id op x === x for all x.
-///
-/// In LLVM, the Xor operator is nilpotent.
-///
-bool Instruction::isNilpotent(unsigned Opcode) {
- return Opcode == Xor;
-}
-
Instruction *Instruction::cloneImpl() const {
llvm_unreachable("Subclass of Instruction failed to implement cloneImpl");
}
@@ -651,3 +641,55 @@ Instruction *Instruction::clone() const {
New->copyMetadata(*this);
return New;
}
+
+void Instruction::updateProfWeight(uint64_t S, uint64_t T) {
+ auto *ProfileData = getMetadata(LLVMContext::MD_prof);
+ if (ProfileData == nullptr)
+ return;
+
+ auto *ProfDataName = dyn_cast<MDString>(ProfileData->getOperand(0));
+ if (!ProfDataName || (!ProfDataName->getString().equals("branch_weights") &&
+ !ProfDataName->getString().equals("VP")))
+ return;
+
+ MDBuilder MDB(getContext());
+ SmallVector<Metadata *, 3> Vals;
+ Vals.push_back(ProfileData->getOperand(0));
+ APInt APS(128, S), APT(128, T);
+ if (ProfDataName->getString().equals("branch_weights"))
+ for (unsigned i = 1; i < ProfileData->getNumOperands(); i++) {
+ // Using APInt::div may be expensive, but most cases should fit 64 bits.
+ APInt Val(128,
+ mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(i))
+ ->getValue()
+ .getZExtValue());
+ Val *= APS;
+ Vals.push_back(MDB.createConstant(
+ ConstantInt::get(Type::getInt64Ty(getContext()),
+ Val.udiv(APT).getLimitedValue())));
+ }
+ else if (ProfDataName->getString().equals("VP"))
+ for (unsigned i = 1; i < ProfileData->getNumOperands(); i += 2) {
+ // The first value is the key of the value profile, which will not change.
+ Vals.push_back(ProfileData->getOperand(i));
+ // Using APInt::div may be expensive, but most cases should fit 64 bits.
+ APInt Val(128,
+ mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(i + 1))
+ ->getValue()
+ .getZExtValue());
+ Val *= APS;
+ Vals.push_back(MDB.createConstant(
+ ConstantInt::get(Type::getInt64Ty(getContext()),
+ Val.udiv(APT).getLimitedValue())));
+ }
+ setMetadata(LLVMContext::MD_prof, MDNode::get(getContext(), Vals));
+}
+
+void Instruction::setProfWeight(uint64_t W) {
+ assert((isa<CallInst>(this) || isa<InvokeInst>(this)) &&
+ "Can only set weights for call and invoke instrucitons");
+ SmallVector<uint32_t, 1> Weights;
+ Weights.push_back(W);
+ MDBuilder MDB(getContext());
+ setMetadata(LLVMContext::MD_prof, MDB.createBranchWeights(Weights));
+}
diff --git a/contrib/llvm/lib/IR/Instructions.cpp b/contrib/llvm/lib/IR/Instructions.cpp
index b679269..2c49564 100644
--- a/contrib/llvm/lib/IR/Instructions.cpp
+++ b/contrib/llvm/lib/IR/Instructions.cpp
@@ -1,4 +1,4 @@
-//===-- Instructions.cpp - Implement the LLVM instructions ----------------===//
+//===- Instructions.cpp - Implement the LLVM instructions -----------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -14,16 +14,34 @@
#include "llvm/IR/Instructions.h"
#include "LLVMContextImpl.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CallSite.h"
-#include "llvm/IR/ConstantRange.h"
+#include "llvm/IR/Constant.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Operator.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/AtomicOrdering.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <vector>
+
using namespace llvm;
//===----------------------------------------------------------------------===//
@@ -41,16 +59,40 @@ User::op_iterator CallSite::getCallee() const {
// TerminatorInst Class
//===----------------------------------------------------------------------===//
-// Out of line virtual method, so the vtable, etc has a home.
-TerminatorInst::~TerminatorInst() {
+unsigned TerminatorInst::getNumSuccessors() const {
+ switch (getOpcode()) {
+#define HANDLE_TERM_INST(N, OPC, CLASS) \
+ case Instruction::OPC: \
+ return static_cast<const CLASS *>(this)->getNumSuccessors();
+#include "llvm/IR/Instruction.def"
+ default:
+ break;
+ }
+ llvm_unreachable("not a terminator");
}
-//===----------------------------------------------------------------------===//
-// UnaryInstruction Class
-//===----------------------------------------------------------------------===//
+BasicBlock *TerminatorInst::getSuccessor(unsigned idx) const {
+ switch (getOpcode()) {
+#define HANDLE_TERM_INST(N, OPC, CLASS) \
+ case Instruction::OPC: \
+ return static_cast<const CLASS *>(this)->getSuccessor(idx);
+#include "llvm/IR/Instruction.def"
+ default:
+ break;
+ }
+ llvm_unreachable("not a terminator");
+}
-// Out of line virtual method, so the vtable, etc has a home.
-UnaryInstruction::~UnaryInstruction() {
+void TerminatorInst::setSuccessor(unsigned idx, BasicBlock *B) {
+ switch (getOpcode()) {
+#define HANDLE_TERM_INST(N, OPC, CLASS) \
+ case Instruction::OPC: \
+ return static_cast<CLASS *>(this)->setSuccessor(idx, B);
+#include "llvm/IR/Instruction.def"
+ default:
+ break;
+ }
+ llvm_unreachable("not a terminator");
}
//===----------------------------------------------------------------------===//
@@ -82,13 +124,10 @@ const char *SelectInst::areInvalidOperands(Value *Op0, Value *Op1, Value *Op2) {
return nullptr;
}
-
//===----------------------------------------------------------------------===//
// PHINode Class
//===----------------------------------------------------------------------===//
-void PHINode::anchor() {}
-
PHINode::PHINode(const PHINode &PN)
: Instruction(PN.getType(), Instruction::PHI, nullptr, PN.getNumOperands()),
ReservedSpace(PN.getNumOperands()) {
@@ -242,9 +281,6 @@ void LandingPadInst::addClause(Constant *Val) {
// CallInst Implementation
//===----------------------------------------------------------------------===//
-CallInst::~CallInst() {
-}
-
void CallInst::init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) {
this->FTy = FTy;
@@ -307,7 +343,7 @@ CallInst::CallInst(const CallInst &CI)
: Instruction(CI.getType(), Instruction::Call,
OperandTraits<CallInst>::op_end(this) - CI.getNumOperands(),
CI.getNumOperands()),
- AttributeList(CI.AttributeList), FTy(CI.FTy) {
+ Attrs(CI.Attrs), FTy(CI.FTy) {
setTailCallKind(CI.getTailCallKind());
setCallingConv(CI.getCallingConv());
@@ -334,59 +370,97 @@ CallInst *CallInst::Create(CallInst *CI, ArrayRef<OperandBundleDef> OpB,
Value *CallInst::getReturnedArgOperand() const {
unsigned Index;
- if (AttributeList.hasAttrSomewhere(Attribute::Returned, &Index) && Index)
- return getArgOperand(Index-1);
+ if (Attrs.hasAttrSomewhere(Attribute::Returned, &Index) && Index)
+ return getArgOperand(Index - AttributeList::FirstArgIndex);
if (const Function *F = getCalledFunction())
if (F->getAttributes().hasAttrSomewhere(Attribute::Returned, &Index) &&
Index)
- return getArgOperand(Index-1);
-
+ return getArgOperand(Index - AttributeList::FirstArgIndex);
+
return nullptr;
}
void CallInst::addAttribute(unsigned i, Attribute::AttrKind Kind) {
- AttributeSet PAL = getAttributes();
+ AttributeList PAL = getAttributes();
PAL = PAL.addAttribute(getContext(), i, Kind);
setAttributes(PAL);
}
void CallInst::addAttribute(unsigned i, Attribute Attr) {
- AttributeSet PAL = getAttributes();
+ AttributeList PAL = getAttributes();
PAL = PAL.addAttribute(getContext(), i, Attr);
setAttributes(PAL);
}
+void CallInst::addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) {
+ assert(ArgNo < getNumArgOperands() && "Out of bounds");
+ AttributeList PAL = getAttributes();
+ PAL = PAL.addParamAttribute(getContext(), ArgNo, Kind);
+ setAttributes(PAL);
+}
+
+void CallInst::addParamAttr(unsigned ArgNo, Attribute Attr) {
+ assert(ArgNo < getNumArgOperands() && "Out of bounds");
+ AttributeList PAL = getAttributes();
+ PAL = PAL.addParamAttribute(getContext(), ArgNo, Attr);
+ setAttributes(PAL);
+}
+
void CallInst::removeAttribute(unsigned i, Attribute::AttrKind Kind) {
- AttributeSet PAL = getAttributes();
+ AttributeList PAL = getAttributes();
PAL = PAL.removeAttribute(getContext(), i, Kind);
setAttributes(PAL);
}
void CallInst::removeAttribute(unsigned i, StringRef Kind) {
- AttributeSet PAL = getAttributes();
+ AttributeList PAL = getAttributes();
PAL = PAL.removeAttribute(getContext(), i, Kind);
setAttributes(PAL);
}
+void CallInst::removeParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) {
+ assert(ArgNo < getNumArgOperands() && "Out of bounds");
+ AttributeList PAL = getAttributes();
+ PAL = PAL.removeParamAttribute(getContext(), ArgNo, Kind);
+ setAttributes(PAL);
+}
+
+void CallInst::removeParamAttr(unsigned ArgNo, StringRef Kind) {
+ assert(ArgNo < getNumArgOperands() && "Out of bounds");
+ AttributeList PAL = getAttributes();
+ PAL = PAL.removeParamAttribute(getContext(), ArgNo, Kind);
+ setAttributes(PAL);
+}
+
void CallInst::addDereferenceableAttr(unsigned i, uint64_t Bytes) {
- AttributeSet PAL = getAttributes();
+ AttributeList PAL = getAttributes();
PAL = PAL.addDereferenceableAttr(getContext(), i, Bytes);
setAttributes(PAL);
}
void CallInst::addDereferenceableOrNullAttr(unsigned i, uint64_t Bytes) {
- AttributeSet PAL = getAttributes();
+ AttributeList PAL = getAttributes();
PAL = PAL.addDereferenceableOrNullAttr(getContext(), i, Bytes);
setAttributes(PAL);
}
+bool CallInst::hasRetAttr(Attribute::AttrKind Kind) const {
+ if (Attrs.hasAttribute(AttributeList::ReturnIndex, Kind))
+ return true;
+
+ // Look at the callee, if available.
+ if (const Function *F = getCalledFunction())
+ return F->getAttributes().hasAttribute(AttributeList::ReturnIndex, Kind);
+ return false;
+}
+
bool CallInst::paramHasAttr(unsigned i, Attribute::AttrKind Kind) const {
- assert(i < (getNumArgOperands() + 1) && "Param index out of bounds!");
+ assert(i < getNumArgOperands() && "Param index out of bounds!");
- if (AttributeList.hasAttribute(i, Kind))
+ if (Attrs.hasParamAttribute(i, Kind))
return true;
if (const Function *F = getCalledFunction())
- return F->getAttributes().hasAttribute(i, Kind);
+ return F->getAttributes().hasParamAttribute(i, Kind);
return false;
}
@@ -400,8 +474,13 @@ bool CallInst::dataOperandHasImpliedAttr(unsigned i,
// question is a call argument; or be indirectly implied by the kind of its
// containing operand bundle, if the operand is a bundle operand.
+ if (i == AttributeList::ReturnIndex)
+ return hasRetAttr(Kind);
+
+ // FIXME: Avoid these i - 1 calculations and update the API to use zero-based
+ // indices.
if (i < (getNumArgOperands() + 1))
- return paramHasAttr(i, Kind);
+ return paramHasAttr(i - 1, Kind);
assert(hasOperandBundles() && i >= (getBundleOperandsStartIndex() + 1) &&
"Must be either a call argument or an operand bundle!");
@@ -466,7 +545,7 @@ static Instruction *createMalloc(Instruction *InsertBefore,
Value *MallocFunc = MallocF;
if (!MallocFunc)
// prototype malloc as "void *malloc(size_t)"
- MallocFunc = M->getOrInsertFunction("malloc", BPTy, IntPtrTy, nullptr);
+ MallocFunc = M->getOrInsertFunction("malloc", BPTy, IntPtrTy);
PointerType *AllocPtrType = PointerType::getUnqual(AllocTy);
CallInst *MCall = nullptr;
Instruction *Result = nullptr;
@@ -489,7 +568,8 @@ static Instruction *createMalloc(Instruction *InsertBefore,
MCall->setTailCall();
if (Function *F = dyn_cast<Function>(MallocFunc)) {
MCall->setCallingConv(F->getCallingConv());
- if (!F->doesNotAlias(0)) F->setDoesNotAlias(0);
+ if (!F->returnDoesNotAlias())
+ F->setReturnDoesNotAlias();
}
assert(!MCall->getType()->isVoidTy() && "Malloc has void return type");
@@ -520,7 +600,6 @@ Instruction *CallInst::CreateMalloc(Instruction *InsertBefore,
ArraySize, OpB, MallocF, Name);
}
-
/// CreateMalloc - Generate the IR for a call to malloc:
/// 1. Compute the malloc call's argument as the specified type's size,
/// possibly multiplied by the array size if the array size is not
@@ -560,7 +639,7 @@ static Instruction *createFree(Value *Source,
Type *VoidTy = Type::getVoidTy(M->getContext());
Type *IntPtrTy = Type::getInt8PtrTy(M->getContext());
// prototype free as "void free(void*)"
- Value *FreeFunc = M->getOrInsertFunction("free", VoidTy, IntPtrTy, nullptr);
+ Value *FreeFunc = M->getOrInsertFunction("free", VoidTy, IntPtrTy);
CallInst *Result = nullptr;
Value *PtrCast = Source;
if (InsertBefore) {
@@ -646,7 +725,7 @@ InvokeInst::InvokeInst(const InvokeInst &II)
OperandTraits<InvokeInst>::op_end(this) -
II.getNumOperands(),
II.getNumOperands()),
- AttributeList(II.AttributeList), FTy(II.FTy) {
+ Attrs(II.Attrs), FTy(II.FTy) {
setCallingConv(II.getCallingConv());
std::copy(II.op_begin(), II.op_end(), op_begin());
std::copy(II.bundle_op_info_begin(), II.bundle_op_info_end(),
@@ -668,36 +747,36 @@ InvokeInst *InvokeInst::Create(InvokeInst *II, ArrayRef<OperandBundleDef> OpB,
return NewII;
}
-BasicBlock *InvokeInst::getSuccessorV(unsigned idx) const {
- return getSuccessor(idx);
-}
-unsigned InvokeInst::getNumSuccessorsV() const {
- return getNumSuccessors();
-}
-void InvokeInst::setSuccessorV(unsigned idx, BasicBlock *B) {
- return setSuccessor(idx, B);
-}
-
Value *InvokeInst::getReturnedArgOperand() const {
unsigned Index;
- if (AttributeList.hasAttrSomewhere(Attribute::Returned, &Index) && Index)
- return getArgOperand(Index-1);
+ if (Attrs.hasAttrSomewhere(Attribute::Returned, &Index) && Index)
+ return getArgOperand(Index - AttributeList::FirstArgIndex);
if (const Function *F = getCalledFunction())
if (F->getAttributes().hasAttrSomewhere(Attribute::Returned, &Index) &&
Index)
- return getArgOperand(Index-1);
-
+ return getArgOperand(Index - AttributeList::FirstArgIndex);
+
return nullptr;
}
+bool InvokeInst::hasRetAttr(Attribute::AttrKind Kind) const {
+ if (Attrs.hasAttribute(AttributeList::ReturnIndex, Kind))
+ return true;
+
+ // Look at the callee, if available.
+ if (const Function *F = getCalledFunction())
+ return F->getAttributes().hasAttribute(AttributeList::ReturnIndex, Kind);
+ return false;
+}
+
bool InvokeInst::paramHasAttr(unsigned i, Attribute::AttrKind Kind) const {
- assert(i < (getNumArgOperands() + 1) && "Param index out of bounds!");
+ assert(i < getNumArgOperands() && "Param index out of bounds!");
- if (AttributeList.hasAttribute(i, Kind))
+ if (Attrs.hasParamAttribute(i, Kind))
return true;
if (const Function *F = getCalledFunction())
- return F->getAttributes().hasAttribute(i, Kind);
+ return F->getAttributes().hasParamAttribute(i, Kind);
return false;
}
@@ -711,8 +790,13 @@ bool InvokeInst::dataOperandHasImpliedAttr(unsigned i,
// question is an invoke argument; or be indirectly implied by the kind of its
// containing operand bundle, if the operand is a bundle operand.
+ if (i == AttributeList::ReturnIndex)
+ return hasRetAttr(Kind);
+
+ // FIXME: Avoid these i - 1 calculations and update the API to use zero-based
+ // indices.
if (i < (getNumArgOperands() + 1))
- return paramHasAttr(i, Kind);
+ return paramHasAttr(i - 1, Kind);
assert(hasOperandBundles() && i >= (getBundleOperandsStartIndex() + 1) &&
"Must be either an invoke argument or an operand bundle!");
@@ -720,37 +804,49 @@ bool InvokeInst::dataOperandHasImpliedAttr(unsigned i,
}
void InvokeInst::addAttribute(unsigned i, Attribute::AttrKind Kind) {
- AttributeSet PAL = getAttributes();
+ AttributeList PAL = getAttributes();
PAL = PAL.addAttribute(getContext(), i, Kind);
setAttributes(PAL);
}
void InvokeInst::addAttribute(unsigned i, Attribute Attr) {
- AttributeSet PAL = getAttributes();
+ AttributeList PAL = getAttributes();
PAL = PAL.addAttribute(getContext(), i, Attr);
setAttributes(PAL);
}
+void InvokeInst::addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) {
+ AttributeList PAL = getAttributes();
+ PAL = PAL.addParamAttribute(getContext(), ArgNo, Kind);
+ setAttributes(PAL);
+}
+
void InvokeInst::removeAttribute(unsigned i, Attribute::AttrKind Kind) {
- AttributeSet PAL = getAttributes();
+ AttributeList PAL = getAttributes();
PAL = PAL.removeAttribute(getContext(), i, Kind);
setAttributes(PAL);
}
void InvokeInst::removeAttribute(unsigned i, StringRef Kind) {
- AttributeSet PAL = getAttributes();
+ AttributeList PAL = getAttributes();
PAL = PAL.removeAttribute(getContext(), i, Kind);
setAttributes(PAL);
}
+void InvokeInst::removeParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) {
+ AttributeList PAL = getAttributes();
+ PAL = PAL.removeParamAttribute(getContext(), ArgNo, Kind);
+ setAttributes(PAL);
+}
+
void InvokeInst::addDereferenceableAttr(unsigned i, uint64_t Bytes) {
- AttributeSet PAL = getAttributes();
+ AttributeList PAL = getAttributes();
PAL = PAL.addDereferenceableAttr(getContext(), i, Bytes);
setAttributes(PAL);
}
void InvokeInst::addDereferenceableOrNullAttr(unsigned i, uint64_t Bytes) {
- AttributeSet PAL = getAttributes();
+ AttributeList PAL = getAttributes();
PAL = PAL.addDereferenceableOrNullAttr(getContext(), i, Bytes);
setAttributes(PAL);
}
@@ -780,6 +876,7 @@ ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, Instruction *InsertBefore)
if (retVal)
Op<0>() = retVal;
}
+
ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd)
: TerminatorInst(Type::getVoidTy(C), Instruction::Ret,
OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal,
@@ -787,28 +884,12 @@ ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd)
if (retVal)
Op<0>() = retVal;
}
+
ReturnInst::ReturnInst(LLVMContext &Context, BasicBlock *InsertAtEnd)
: TerminatorInst(Type::getVoidTy(Context), Instruction::Ret,
OperandTraits<ReturnInst>::op_end(this), 0, InsertAtEnd) {
}
-unsigned ReturnInst::getNumSuccessorsV() const {
- return getNumSuccessors();
-}
-
-/// Out-of-line ReturnInst method, put here so the C++ compiler can choose to
-/// emit the vtable for the class in this translation unit.
-void ReturnInst::setSuccessorV(unsigned idx, BasicBlock *NewSucc) {
- llvm_unreachable("ReturnInst has no successors!");
-}
-
-BasicBlock *ReturnInst::getSuccessorV(unsigned idx) const {
- llvm_unreachable("ReturnInst has no successors!");
-}
-
-ReturnInst::~ReturnInst() {
-}
-
//===----------------------------------------------------------------------===//
// ResumeInst Implementation
//===----------------------------------------------------------------------===//
@@ -831,18 +912,6 @@ ResumeInst::ResumeInst(Value *Exn, BasicBlock *InsertAtEnd)
Op<0>() = Exn;
}
-unsigned ResumeInst::getNumSuccessorsV() const {
- return getNumSuccessors();
-}
-
-void ResumeInst::setSuccessorV(unsigned idx, BasicBlock *NewSucc) {
- llvm_unreachable("ResumeInst has no successors!");
-}
-
-BasicBlock *ResumeInst::getSuccessorV(unsigned idx) const {
- llvm_unreachable("ResumeInst has no successors!");
-}
-
//===----------------------------------------------------------------------===//
// CleanupReturnInst Implementation
//===----------------------------------------------------------------------===//
@@ -885,18 +954,6 @@ CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
init(CleanupPad, UnwindBB);
}
-BasicBlock *CleanupReturnInst::getSuccessorV(unsigned Idx) const {
- assert(Idx == 0);
- return getUnwindDest();
-}
-unsigned CleanupReturnInst::getNumSuccessorsV() const {
- return getNumSuccessors();
-}
-void CleanupReturnInst::setSuccessorV(unsigned Idx, BasicBlock *B) {
- assert(Idx == 0);
- setUnwindDest(B);
-}
-
//===----------------------------------------------------------------------===//
// CatchReturnInst Implementation
//===----------------------------------------------------------------------===//
@@ -928,18 +985,6 @@ CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
init(CatchPad, BB);
}
-BasicBlock *CatchReturnInst::getSuccessorV(unsigned Idx) const {
- assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!");
- return getSuccessor();
-}
-unsigned CatchReturnInst::getNumSuccessorsV() const {
- return getNumSuccessors();
-}
-void CatchReturnInst::setSuccessorV(unsigned Idx, BasicBlock *B) {
- assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!");
- setSuccessor(B);
-}
-
//===----------------------------------------------------------------------===//
// CatchSwitchInst Implementation
//===----------------------------------------------------------------------===//
@@ -1023,16 +1068,6 @@ void CatchSwitchInst::removeHandler(handler_iterator HI) {
setNumHungOffUseOperands(getNumOperands() - 1);
}
-BasicBlock *CatchSwitchInst::getSuccessorV(unsigned idx) const {
- return getSuccessor(idx);
-}
-unsigned CatchSwitchInst::getNumSuccessorsV() const {
- return getNumSuccessors();
-}
-void CatchSwitchInst::setSuccessorV(unsigned idx, BasicBlock *B) {
- setSuccessor(idx, B);
-}
-
//===----------------------------------------------------------------------===//
// FuncletPadInst Implementation
//===----------------------------------------------------------------------===//
@@ -1085,18 +1120,6 @@ UnreachableInst::UnreachableInst(LLVMContext &Context, BasicBlock *InsertAtEnd)
nullptr, 0, InsertAtEnd) {
}
-unsigned UnreachableInst::getNumSuccessorsV() const {
- return getNumSuccessors();
-}
-
-void UnreachableInst::setSuccessorV(unsigned idx, BasicBlock *NewSucc) {
- llvm_unreachable("UnreachableInst has no successors!");
-}
-
-BasicBlock *UnreachableInst::getSuccessorV(unsigned idx) const {
- llvm_unreachable("UnreachableInst has no successors!");
-}
-
//===----------------------------------------------------------------------===//
// BranchInst Implementation
//===----------------------------------------------------------------------===//
@@ -1114,6 +1137,7 @@ BranchInst::BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore)
assert(IfTrue && "Branch destination may not be null!");
Op<-1>() = IfTrue;
}
+
BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
Instruction *InsertBefore)
: TerminatorInst(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
@@ -1148,7 +1172,6 @@ BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
#endif
}
-
BranchInst::BranchInst(const BranchInst &BI) :
TerminatorInst(Type::getVoidTy(BI.getContext()), Instruction::Br,
OperandTraits<BranchInst>::op_end(this) - BI.getNumOperands(),
@@ -1172,17 +1195,6 @@ void BranchInst::swapSuccessors() {
swapProfMetadata();
}
-BasicBlock *BranchInst::getSuccessorV(unsigned idx) const {
- return getSuccessor(idx);
-}
-unsigned BranchInst::getNumSuccessorsV() const {
- return getNumSuccessors();
-}
-void BranchInst::setSuccessorV(unsigned idx, BasicBlock *B) {
- setSuccessor(idx, B);
-}
-
-
//===----------------------------------------------------------------------===//
// AllocaInst Implementation
//===----------------------------------------------------------------------===//
@@ -1199,44 +1211,44 @@ static Value *getAISize(LLVMContext &Context, Value *Amt) {
return Amt;
}
-AllocaInst::AllocaInst(Type *Ty, const Twine &Name, Instruction *InsertBefore)
- : AllocaInst(Ty, /*ArraySize=*/nullptr, Name, InsertBefore) {}
-
-AllocaInst::AllocaInst(Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd)
- : AllocaInst(Ty, /*ArraySize=*/nullptr, Name, InsertAtEnd) {}
-
-AllocaInst::AllocaInst(Type *Ty, Value *ArraySize, const Twine &Name,
+AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
Instruction *InsertBefore)
- : AllocaInst(Ty, ArraySize, /*Align=*/0, Name, InsertBefore) {}
+ : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertBefore) {}
-AllocaInst::AllocaInst(Type *Ty, Value *ArraySize, const Twine &Name,
+AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
BasicBlock *InsertAtEnd)
- : AllocaInst(Ty, ArraySize, /*Align=*/0, Name, InsertAtEnd) {}
+ : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertAtEnd) {}
-AllocaInst::AllocaInst(Type *Ty, Value *ArraySize, unsigned Align,
+AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
const Twine &Name, Instruction *InsertBefore)
- : UnaryInstruction(PointerType::getUnqual(Ty), Alloca,
- getAISize(Ty->getContext(), ArraySize), InsertBefore),
- AllocatedType(Ty) {
+ : AllocaInst(Ty, AddrSpace, ArraySize, /*Align=*/0, Name, InsertBefore) {}
+
+AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
+ const Twine &Name, BasicBlock *InsertAtEnd)
+ : AllocaInst(Ty, AddrSpace, ArraySize, /*Align=*/0, Name, InsertAtEnd) {}
+
+AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
+ unsigned Align, const Twine &Name,
+ Instruction *InsertBefore)
+ : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca,
+ getAISize(Ty->getContext(), ArraySize), InsertBefore),
+ AllocatedType(Ty) {
setAlignment(Align);
assert(!Ty->isVoidTy() && "Cannot allocate void!");
setName(Name);
}
-AllocaInst::AllocaInst(Type *Ty, Value *ArraySize, unsigned Align,
- const Twine &Name, BasicBlock *InsertAtEnd)
- : UnaryInstruction(PointerType::getUnqual(Ty), Alloca,
- getAISize(Ty->getContext(), ArraySize), InsertAtEnd),
+AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
+ unsigned Align, const Twine &Name,
+ BasicBlock *InsertAtEnd)
+ : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca,
+ getAISize(Ty->getContext(), ArraySize), InsertAtEnd),
AllocatedType(Ty) {
setAlignment(Align);
assert(!Ty->isVoidTy() && "Cannot allocate void!");
setName(Name);
}
-// Out of line virtual method, so the vtable, etc has a home.
-AllocaInst::~AllocaInst() {
-}
-
void AllocaInst::setAlignment(unsigned Align) {
assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!");
assert(Align <= MaximumAlignment &&
@@ -1292,34 +1304,34 @@ LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
unsigned Align, Instruction *InsertBef)
: LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
- CrossThread, InsertBef) {}
+ SyncScope::System, InsertBef) {}
LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
unsigned Align, BasicBlock *InsertAE)
: LoadInst(Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
- CrossThread, InsertAE) {}
+ SyncScope::System, InsertAE) {}
LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
unsigned Align, AtomicOrdering Order,
- SynchronizationScope SynchScope, Instruction *InsertBef)
+ SyncScope::ID SSID, Instruction *InsertBef)
: UnaryInstruction(Ty, Load, Ptr, InsertBef) {
assert(Ty == cast<PointerType>(Ptr->getType())->getElementType());
setVolatile(isVolatile);
setAlignment(Align);
- setAtomic(Order, SynchScope);
+ setAtomic(Order, SSID);
AssertOK();
setName(Name);
}
LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
unsigned Align, AtomicOrdering Order,
- SynchronizationScope SynchScope,
+ SyncScope::ID SSID,
BasicBlock *InsertAE)
: UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(),
Load, Ptr, InsertAE) {
setVolatile(isVolatile);
setAlignment(Align);
- setAtomic(Order, SynchScope);
+ setAtomic(Order, SSID);
AssertOK();
setName(Name);
}
@@ -1407,16 +1419,16 @@ StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, unsigned Align,
Instruction *InsertBefore)
: StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
- CrossThread, InsertBefore) {}
+ SyncScope::System, InsertBefore) {}
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, unsigned Align,
BasicBlock *InsertAtEnd)
: StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
- CrossThread, InsertAtEnd) {}
+ SyncScope::System, InsertAtEnd) {}
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
unsigned Align, AtomicOrdering Order,
- SynchronizationScope SynchScope,
+ SyncScope::ID SSID,
Instruction *InsertBefore)
: Instruction(Type::getVoidTy(val->getContext()), Store,
OperandTraits<StoreInst>::op_begin(this),
@@ -1426,13 +1438,13 @@ StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
Op<1>() = addr;
setVolatile(isVolatile);
setAlignment(Align);
- setAtomic(Order, SynchScope);
+ setAtomic(Order, SSID);
AssertOK();
}
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
unsigned Align, AtomicOrdering Order,
- SynchronizationScope SynchScope,
+ SyncScope::ID SSID,
BasicBlock *InsertAtEnd)
: Instruction(Type::getVoidTy(val->getContext()), Store,
OperandTraits<StoreInst>::op_begin(this),
@@ -1442,7 +1454,7 @@ StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
Op<1>() = addr;
setVolatile(isVolatile);
setAlignment(Align);
- setAtomic(Order, SynchScope);
+ setAtomic(Order, SSID);
AssertOK();
}
@@ -1462,13 +1474,13 @@ void StoreInst::setAlignment(unsigned Align) {
void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,
AtomicOrdering SuccessOrdering,
AtomicOrdering FailureOrdering,
- SynchronizationScope SynchScope) {
+ SyncScope::ID SSID) {
Op<0>() = Ptr;
Op<1>() = Cmp;
Op<2>() = NewVal;
setSuccessOrdering(SuccessOrdering);
setFailureOrdering(FailureOrdering);
- setSynchScope(SynchScope);
+ setSyncScopeID(SSID);
assert(getOperand(0) && getOperand(1) && getOperand(2) &&
"All operands must be non-null!");
@@ -1495,27 +1507,25 @@ void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,
AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
AtomicOrdering SuccessOrdering,
AtomicOrdering FailureOrdering,
- SynchronizationScope SynchScope,
+ SyncScope::ID SSID,
Instruction *InsertBefore)
: Instruction(
- StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext()),
- nullptr),
+ StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),
OperandTraits<AtomicCmpXchgInst>::operands(this), InsertBefore) {
- Init(Ptr, Cmp, NewVal, SuccessOrdering, FailureOrdering, SynchScope);
+ Init(Ptr, Cmp, NewVal, SuccessOrdering, FailureOrdering, SSID);
}
AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
AtomicOrdering SuccessOrdering,
AtomicOrdering FailureOrdering,
- SynchronizationScope SynchScope,
+ SyncScope::ID SSID,
BasicBlock *InsertAtEnd)
: Instruction(
- StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext()),
- nullptr),
+ StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),
OperandTraits<AtomicCmpXchgInst>::operands(this), InsertAtEnd) {
- Init(Ptr, Cmp, NewVal, SuccessOrdering, FailureOrdering, SynchScope);
+ Init(Ptr, Cmp, NewVal, SuccessOrdering, FailureOrdering, SSID);
}
//===----------------------------------------------------------------------===//
@@ -1524,12 +1534,12 @@ AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val,
AtomicOrdering Ordering,
- SynchronizationScope SynchScope) {
+ SyncScope::ID SSID) {
Op<0>() = Ptr;
Op<1>() = Val;
setOperation(Operation);
setOrdering(Ordering);
- setSynchScope(SynchScope);
+ setSyncScopeID(SSID);
assert(getOperand(0) && getOperand(1) &&
"All operands must be non-null!");
@@ -1544,24 +1554,24 @@ void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val,
AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
AtomicOrdering Ordering,
- SynchronizationScope SynchScope,
+ SyncScope::ID SSID,
Instruction *InsertBefore)
: Instruction(Val->getType(), AtomicRMW,
OperandTraits<AtomicRMWInst>::op_begin(this),
OperandTraits<AtomicRMWInst>::operands(this),
InsertBefore) {
- Init(Operation, Ptr, Val, Ordering, SynchScope);
+ Init(Operation, Ptr, Val, Ordering, SSID);
}
AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
AtomicOrdering Ordering,
- SynchronizationScope SynchScope,
+ SyncScope::ID SSID,
BasicBlock *InsertAtEnd)
: Instruction(Val->getType(), AtomicRMW,
OperandTraits<AtomicRMWInst>::op_begin(this),
OperandTraits<AtomicRMWInst>::operands(this),
InsertAtEnd) {
- Init(Operation, Ptr, Val, Ordering, SynchScope);
+ Init(Operation, Ptr, Val, Ordering, SSID);
}
//===----------------------------------------------------------------------===//
@@ -1569,27 +1579,25 @@ AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
//===----------------------------------------------------------------------===//
FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering,
- SynchronizationScope SynchScope,
+ SyncScope::ID SSID,
Instruction *InsertBefore)
: Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertBefore) {
setOrdering(Ordering);
- setSynchScope(SynchScope);
+ setSyncScopeID(SSID);
}
FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering,
- SynchronizationScope SynchScope,
+ SyncScope::ID SSID,
BasicBlock *InsertAtEnd)
: Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertAtEnd) {
setOrdering(Ordering);
- setSynchScope(SynchScope);
+ setSyncScopeID(SSID);
}
//===----------------------------------------------------------------------===//
// GetElementPtrInst Implementation
//===----------------------------------------------------------------------===//
-void GetElementPtrInst::anchor() {}
-
void GetElementPtrInst::init(Value *Ptr, ArrayRef<Value *> IdxList,
const Twine &Name) {
assert(getNumOperands() == 1 + IdxList.size() &&
@@ -1726,14 +1734,12 @@ ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
setName(Name);
}
-
bool ExtractElementInst::isValidOperands(const Value *Val, const Value *Index) {
if (!Val->getType()->isVectorTy() || !Index->getType()->isIntegerTy())
return false;
return true;
}
-
//===----------------------------------------------------------------------===//
// InsertElementInst Implementation
//===----------------------------------------------------------------------===//
@@ -1780,7 +1786,6 @@ bool InsertElementInst::isValidOperands(const Value *Vec, const Value *Elt,
return true;
}
-
//===----------------------------------------------------------------------===//
// ShuffleVectorInst Implementation
//===----------------------------------------------------------------------===//
@@ -1827,7 +1832,7 @@ bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2,
return false;
// Mask must be vector of i32.
- VectorType *MaskTy = dyn_cast<VectorType>(Mask->getType());
+ auto *MaskTy = dyn_cast<VectorType>(Mask->getType());
if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32))
return false;
@@ -1835,10 +1840,10 @@ bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2,
if (isa<UndefValue>(Mask) || isa<ConstantAggregateZero>(Mask))
return true;
- if (const ConstantVector *MV = dyn_cast<ConstantVector>(Mask)) {
+ if (const auto *MV = dyn_cast<ConstantVector>(Mask)) {
unsigned V1Size = cast<VectorType>(V1->getType())->getNumElements();
for (Value *Op : MV->operands()) {
- if (ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
+ if (auto *CI = dyn_cast<ConstantInt>(Op)) {
if (CI->uge(V1Size*2))
return false;
} else if (!isa<UndefValue>(Op)) {
@@ -1848,8 +1853,7 @@ bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2,
return true;
}
- if (const ConstantDataSequential *CDS =
- dyn_cast<ConstantDataSequential>(Mask)) {
+ if (const auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
unsigned V1Size = cast<VectorType>(V1->getType())->getNumElements();
for (unsigned i = 0, e = MaskTy->getNumElements(); i != e; ++i)
if (CDS->getElementAsInteger(i) >= V1Size*2)
@@ -1861,7 +1865,7 @@ bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2,
// used as the shuffle mask. When this occurs, the shuffle mask will
// fall into this case and fail. To avoid this error, do this bit of
// ugliness to allow such a mask pass.
- if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(Mask))
+ if (const auto *CE = dyn_cast<ConstantExpr>(Mask))
if (CE->getOpcode() == Instruction::UserOp1)
return true;
@@ -1870,7 +1874,7 @@ bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2,
int ShuffleVectorInst::getMaskValue(Constant *Mask, unsigned i) {
assert(i < Mask->getType()->getVectorNumElements() && "Index out of range");
- if (ConstantDataSequential *CDS =dyn_cast<ConstantDataSequential>(Mask))
+ if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask))
return CDS->getElementAsInteger(i);
Constant *C = Mask->getAggregateElement(i);
if (isa<UndefValue>(C))
@@ -1882,7 +1886,7 @@ void ShuffleVectorInst::getShuffleMask(Constant *Mask,
SmallVectorImpl<int> &Result) {
unsigned NumElts = Mask->getType()->getVectorNumElements();
- if (ConstantDataSequential *CDS=dyn_cast<ConstantDataSequential>(Mask)) {
+ if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
for (unsigned i = 0; i != NumElts; ++i)
Result.push_back(CDS->getElementAsInteger(i));
return;
@@ -1894,7 +1898,6 @@ void ShuffleVectorInst::getShuffleMask(Constant *Mask,
}
}
-
//===----------------------------------------------------------------------===//
// InsertValueInst Class
//===----------------------------------------------------------------------===//
@@ -1907,7 +1910,7 @@ void InsertValueInst::init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
// (other than weirdness with &*IdxBegin being invalid; see
// getelementptr's init routine for example). But there's no
// present need to support it.
- assert(Idxs.size() > 0 && "InsertValueInst must have at least one index");
+ assert(!Idxs.empty() && "InsertValueInst must have at least one index");
assert(ExtractValueInst::getIndexedType(Agg->getType(), Idxs) ==
Val->getType() && "Inserted value must match indexed type!");
@@ -1936,7 +1939,7 @@ void ExtractValueInst::init(ArrayRef<unsigned> Idxs, const Twine &Name) {
// There's no fundamental reason why we require at least one index.
// But there's no present need to support it.
- assert(Idxs.size() > 0 && "ExtractValueInst must have at least one index");
+ assert(!Idxs.empty() && "ExtractValueInst must have at least one index");
Indices.append(Idxs.begin(), Idxs.end());
setName(Name);
@@ -1992,8 +1995,8 @@ BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2,
InsertBefore) {
Op<0>() = S1;
Op<1>() = S2;
- init(iType);
setName(Name);
+ AssertOK();
}
BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2,
@@ -2005,18 +2008,17 @@ BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2,
InsertAtEnd) {
Op<0>() = S1;
Op<1>() = S2;
- init(iType);
setName(Name);
+ AssertOK();
}
-
-void BinaryOperator::init(BinaryOps iType) {
+void BinaryOperator::AssertOK() {
Value *LHS = getOperand(0), *RHS = getOperand(1);
(void)LHS; (void)RHS; // Silence warnings.
assert(LHS->getType() == RHS->getType() &&
"Binary operator operand types must match!");
#ifndef NDEBUG
- switch (iType) {
+ switch (getOpcode()) {
case Add: case Sub:
case Mul:
assert(getType() == LHS->getType() &&
@@ -2036,8 +2038,7 @@ void BinaryOperator::init(BinaryOps iType) {
case SDiv:
assert(getType() == LHS->getType() &&
"Arithmetic operation should return same type as operands!");
- assert((getType()->isIntegerTy() || (getType()->isVectorTy() &&
- cast<VectorType>(getType())->getElementType()->isIntegerTy())) &&
+ assert(getType()->isIntOrIntVectorTy() &&
"Incorrect operand type (not integer) for S/UDIV");
break;
case FDiv:
@@ -2050,8 +2051,7 @@ void BinaryOperator::init(BinaryOps iType) {
case SRem:
assert(getType() == LHS->getType() &&
"Arithmetic operation should return same type as operands!");
- assert((getType()->isIntegerTy() || (getType()->isVectorTy() &&
- cast<VectorType>(getType())->getElementType()->isIntegerTy())) &&
+ assert(getType()->isIntOrIntVectorTy() &&
"Incorrect operand type (not integer) for S/UREM");
break;
case FRem:
@@ -2065,22 +2065,17 @@ void BinaryOperator::init(BinaryOps iType) {
case AShr:
assert(getType() == LHS->getType() &&
"Shift operation should return same type as operands!");
- assert((getType()->isIntegerTy() ||
- (getType()->isVectorTy() &&
- cast<VectorType>(getType())->getElementType()->isIntegerTy())) &&
+ assert(getType()->isIntOrIntVectorTy() &&
"Tried to create a shift operation on a non-integral type!");
break;
case And: case Or:
case Xor:
assert(getType() == LHS->getType() &&
"Logical operation should return same type as operands!");
- assert((getType()->isIntegerTy() ||
- (getType()->isVectorTy() &&
- cast<VectorType>(getType())->getElementType()->isIntegerTy())) &&
+ assert(getType()->isIntOrIntVectorTy() &&
"Tried to create a logical operation on a non-integral type!");
break;
- default:
- break;
+ default: llvm_unreachable("Invalid opcode provided");
}
#endif
}
@@ -2169,7 +2164,6 @@ BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name,
Op->getType(), Name, InsertAtEnd);
}
-
// isConstantAllOnes - Helper function for several functions below
static inline bool isConstantAllOnes(const Value *V) {
if (const Constant *C = dyn_cast<Constant>(V))
@@ -2235,7 +2229,6 @@ const Value *BinaryOperator::getNotArgument(const Value *BinOp) {
return getNotArgument(const_cast<Value*>(BinOp));
}
-
// Exchange the two operands to this instruction. This instruction is safe to
// use on any binary instruction and does not modify the semantics of the
// instruction. If the instruction is order-dependent (SetLT f.e.), the opcode
@@ -2247,7 +2240,6 @@ bool BinaryOperator::swapOperands() {
return false;
}
-
//===----------------------------------------------------------------------===//
// FPMathOperator Class
//===----------------------------------------------------------------------===//
@@ -2261,13 +2253,10 @@ float FPMathOperator::getFPAccuracy() const {
return Accuracy->getValueAPF().convertToFloat();
}
-
//===----------------------------------------------------------------------===//
// CastInst Class
//===----------------------------------------------------------------------===//
-void CastInst::anchor() {}
-
// Just determine if this cast only deals with integral->integral conversion.
bool CastInst::isIntegerCast() const {
switch (getOpcode()) {
@@ -2523,13 +2512,12 @@ unsigned CastInst::isEliminableCastPair(
return Instruction::BitCast;
return 0;
}
- case 12: {
+ case 12:
// addrspacecast, addrspacecast -> bitcast, if SrcAS == DstAS
// addrspacecast, addrspacecast -> addrspacecast, if SrcAS != DstAS
if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
return Instruction::AddrSpaceCast;
return Instruction::BitCast;
- }
case 13:
// FIXME: this state can be merged with (1), but the following assert
// is useful to check the correcteness of the sequence due to semantic
@@ -2550,7 +2538,6 @@ unsigned CastInst::isEliminableCastPair(
DstTy->getScalarType()->getPointerElementType())
return Instruction::AddrSpaceCast;
return 0;
-
case 15:
// FIXME: this state can be merged with (1), but the following assert
// is useful to check the correcteness of the sequence due to semantic
@@ -3026,7 +3013,6 @@ CastInst::getCastOpcode(
/// of the types involved.
bool
CastInst::castIsValid(Instruction::CastOps op, Value *S, Type *DstTy) {
-
// Check for type sanity on the arguments
Type *SrcTy = S->getType();
@@ -3078,16 +3064,14 @@ CastInst::castIsValid(Instruction::CastOps op, Value *S, Type *DstTy) {
if (VectorType *VT = dyn_cast<VectorType>(SrcTy))
if (VT->getNumElements() != cast<VectorType>(DstTy)->getNumElements())
return false;
- return SrcTy->getScalarType()->isPointerTy() &&
- DstTy->getScalarType()->isIntegerTy();
+ return SrcTy->isPtrOrPtrVectorTy() && DstTy->isIntOrIntVectorTy();
case Instruction::IntToPtr:
if (isa<VectorType>(SrcTy) != isa<VectorType>(DstTy))
return false;
if (VectorType *VT = dyn_cast<VectorType>(SrcTy))
if (VT->getNumElements() != cast<VectorType>(DstTy)->getNumElements())
return false;
- return SrcTy->getScalarType()->isIntegerTy() &&
- DstTy->getScalarType()->isPointerTy();
+ return SrcTy->isIntOrIntVectorTy() && DstTy->isPtrOrPtrVectorTy();
case Instruction::BitCast: {
PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
@@ -3299,8 +3283,6 @@ AddrSpaceCastInst::AddrSpaceCastInst(
// CmpInst Classes
//===----------------------------------------------------------------------===//
-void CmpInst::anchor() {}
-
CmpInst::CmpInst(Type *ty, OtherOps op, Predicate predicate, Value *LHS,
Value *RHS, const Twine &Name, Instruction *InsertBefore)
: Instruction(ty, op,
@@ -3375,7 +3357,6 @@ bool CmpInst::isEquality() const {
return cast<FCmpInst>(this)->isEquality();
}
-
CmpInst::Predicate CmpInst::getInversePredicate(Predicate pred) {
switch (pred) {
default: llvm_unreachable("Unknown cmp predicate!");
@@ -3441,8 +3422,6 @@ StringRef CmpInst::getPredicateName(Predicate Pred) {
}
}
-void ICmpInst::anchor() {}
-
ICmpInst::Predicate ICmpInst::getSignedPredicate(Predicate pred) {
switch (pred) {
default: llvm_unreachable("Unknown icmp predicate!");
@@ -3655,16 +3634,16 @@ void SwitchInst::addCase(ConstantInt *OnVal, BasicBlock *Dest) {
// Initialize some new operands.
assert(OpNo+1 < ReservedSpace && "Growing didn't work!");
setNumHungOffUseOperands(OpNo+2);
- CaseIt Case(this, NewCaseIdx);
+ CaseHandle Case(this, NewCaseIdx);
Case.setValue(OnVal);
Case.setSuccessor(Dest);
}
/// removeCase - This method removes the specified case and its successor
/// from the switch instruction.
-void SwitchInst::removeCase(CaseIt i) {
- unsigned idx = i.getCaseIndex();
-
+SwitchInst::CaseIt SwitchInst::removeCase(CaseIt I) {
+ unsigned idx = I->getCaseIndex();
+
assert(2 + idx*2 < getNumOperands() && "Case index out of range!!!");
unsigned NumOps = getNumOperands();
@@ -3680,6 +3659,8 @@ void SwitchInst::removeCase(CaseIt i) {
OL[NumOps-2].set(nullptr);
OL[NumOps-2+1].set(nullptr);
setNumHungOffUseOperands(NumOps-2);
+
+ return CaseIt(this, idx);
}
/// growOperands - grow operands - This grows the operand list in response
@@ -3693,17 +3674,6 @@ void SwitchInst::growOperands() {
growHungoffUses(ReservedSpace);
}
-
-BasicBlock *SwitchInst::getSuccessorV(unsigned idx) const {
- return getSuccessor(idx);
-}
-unsigned SwitchInst::getNumSuccessorsV() const {
- return getNumSuccessors();
-}
-void SwitchInst::setSuccessorV(unsigned idx, BasicBlock *B) {
- setSuccessor(idx, B);
-}
-
//===----------------------------------------------------------------------===//
// IndirectBrInst Implementation
//===----------------------------------------------------------------------===//
@@ -3783,16 +3753,6 @@ void IndirectBrInst::removeDestination(unsigned idx) {
setNumHungOffUseOperands(NumOps-1);
}
-BasicBlock *IndirectBrInst::getSuccessorV(unsigned idx) const {
- return getSuccessor(idx);
-}
-unsigned IndirectBrInst::getNumSuccessorsV() const {
- return getNumSuccessors();
-}
-void IndirectBrInst::setSuccessorV(unsigned idx, BasicBlock *B) {
- setSuccessor(idx, B);
-}
-
//===----------------------------------------------------------------------===//
// cloneImpl() implementations
//===----------------------------------------------------------------------===//
@@ -3826,6 +3786,7 @@ InsertValueInst *InsertValueInst::cloneImpl() const {
AllocaInst *AllocaInst::cloneImpl() const {
AllocaInst *Result = new AllocaInst(getAllocatedType(),
+ getType()->getAddressSpace(),
(Value *)getOperand(0), getAlignment());
Result->setUsedWithInAlloca(isUsedWithInAlloca());
Result->setSwiftError(isSwiftError());
@@ -3834,12 +3795,12 @@ AllocaInst *AllocaInst::cloneImpl() const {
LoadInst *LoadInst::cloneImpl() const {
return new LoadInst(getOperand(0), Twine(), isVolatile(),
- getAlignment(), getOrdering(), getSynchScope());
+ getAlignment(), getOrdering(), getSyncScopeID());
}
StoreInst *StoreInst::cloneImpl() const {
return new StoreInst(getOperand(0), getOperand(1), isVolatile(),
- getAlignment(), getOrdering(), getSynchScope());
+ getAlignment(), getOrdering(), getSyncScopeID());
}
@@ -3847,7 +3808,7 @@ AtomicCmpXchgInst *AtomicCmpXchgInst::cloneImpl() const {
AtomicCmpXchgInst *Result =
new AtomicCmpXchgInst(getOperand(0), getOperand(1), getOperand(2),
getSuccessOrdering(), getFailureOrdering(),
- getSynchScope());
+ getSyncScopeID());
Result->setVolatile(isVolatile());
Result->setWeak(isWeak());
return Result;
@@ -3855,14 +3816,14 @@ AtomicCmpXchgInst *AtomicCmpXchgInst::cloneImpl() const {
AtomicRMWInst *AtomicRMWInst::cloneImpl() const {
AtomicRMWInst *Result =
- new AtomicRMWInst(getOperation(),getOperand(0), getOperand(1),
- getOrdering(), getSynchScope());
+ new AtomicRMWInst(getOperation(), getOperand(0), getOperand(1),
+ getOrdering(), getSyncScopeID());
Result->setVolatile(isVolatile());
return Result;
}
FenceInst *FenceInst::cloneImpl() const {
- return new FenceInst(getContext(), getOrdering(), getSynchScope());
+ return new FenceInst(getContext(), getOrdering(), getSyncScopeID());
}
TruncInst *TruncInst::cloneImpl() const {
diff --git a/contrib/llvm/lib/IR/IntrinsicInst.cpp b/contrib/llvm/lib/IR/IntrinsicInst.cpp
index 2402506..8b12c55 100644
--- a/contrib/llvm/lib/IR/IntrinsicInst.cpp
+++ b/contrib/llvm/lib/IR/IntrinsicInst.cpp
@@ -22,6 +22,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/ADT/StringSwitch.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/Metadata.h"
@@ -93,3 +94,56 @@ Value *InstrProfIncrementInst::getStep() const {
LLVMContext &Context = M->getContext();
return ConstantInt::get(Type::getInt64Ty(Context), 1);
}
+
+ConstrainedFPIntrinsic::RoundingMode
+ConstrainedFPIntrinsic::getRoundingMode() const {
+ unsigned NumOperands = getNumArgOperands();
+ Metadata *MD =
+ dyn_cast<MetadataAsValue>(getArgOperand(NumOperands - 2))->getMetadata();
+ if (!MD || !isa<MDString>(MD))
+ return rmInvalid;
+ StringRef RoundingArg = cast<MDString>(MD)->getString();
+
+ // For dynamic rounding mode, we use round to nearest but we will set the
+ // 'exact' SDNodeFlag so that the value will not be rounded.
+ return StringSwitch<RoundingMode>(RoundingArg)
+ .Case("round.dynamic", rmDynamic)
+ .Case("round.tonearest", rmToNearest)
+ .Case("round.downward", rmDownward)
+ .Case("round.upward", rmUpward)
+ .Case("round.towardzero", rmTowardZero)
+ .Default(rmInvalid);
+}
+
+ConstrainedFPIntrinsic::ExceptionBehavior
+ConstrainedFPIntrinsic::getExceptionBehavior() const {
+ unsigned NumOperands = getNumArgOperands();
+ Metadata *MD =
+ dyn_cast<MetadataAsValue>(getArgOperand(NumOperands - 1))->getMetadata();
+ if (!MD || !isa<MDString>(MD))
+ return ebInvalid;
+ StringRef ExceptionArg = cast<MDString>(MD)->getString();
+ return StringSwitch<ExceptionBehavior>(ExceptionArg)
+ .Case("fpexcept.ignore", ebIgnore)
+ .Case("fpexcept.maytrap", ebMayTrap)
+ .Case("fpexcept.strict", ebStrict)
+ .Default(ebInvalid);
+}
+
+bool ConstrainedFPIntrinsic::isUnaryOp() const {
+ switch (getIntrinsicID()) {
+ default:
+ return false;
+ case Intrinsic::experimental_constrained_sqrt:
+ case Intrinsic::experimental_constrained_sin:
+ case Intrinsic::experimental_constrained_cos:
+ case Intrinsic::experimental_constrained_exp:
+ case Intrinsic::experimental_constrained_exp2:
+ case Intrinsic::experimental_constrained_log:
+ case Intrinsic::experimental_constrained_log10:
+ case Intrinsic::experimental_constrained_log2:
+ case Intrinsic::experimental_constrained_rint:
+ case Intrinsic::experimental_constrained_nearbyint:
+ return true;
+ }
+}
diff --git a/contrib/llvm/lib/IR/LLVMContext.cpp b/contrib/llvm/lib/IR/LLVMContext.cpp
index dd66f14..c58459d 100644
--- a/contrib/llvm/lib/IR/LLVMContext.cpp
+++ b/contrib/llvm/lib/IR/LLVMContext.cpp
@@ -13,11 +13,11 @@
//===----------------------------------------------------------------------===//
#include "llvm/IR/LLVMContext.h"
+#include "LLVMContextImpl.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
-#include "LLVMContextImpl.h"
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/DiagnosticPrinter.h"
#include "llvm/IR/Metadata.h"
@@ -58,6 +58,7 @@ LLVMContext::LLVMContext() : pImpl(new LLVMContextImpl(*this)) {
{MD_type, "type"},
{MD_section_prefix, "section_prefix"},
{MD_absolute_symbol, "absolute_symbol"},
+ {MD_associated, "associated"},
};
for (auto &MDKind : MDKinds) {
@@ -80,6 +81,18 @@ LLVMContext::LLVMContext() : pImpl(new LLVMContextImpl(*this)) {
assert(GCTransitionEntry->second == LLVMContext::OB_gc_transition &&
"gc-transition operand bundle id drifted!");
(void)GCTransitionEntry;
+
+ SyncScope::ID SingleThreadSSID =
+ pImpl->getOrInsertSyncScopeID("singlethread");
+ assert(SingleThreadSSID == SyncScope::SingleThread &&
+ "singlethread synchronization scope ID drifted!");
+ (void)SingleThreadSSID;
+
+ SyncScope::ID SystemSSID =
+ pImpl->getOrInsertSyncScopeID("");
+ assert(SystemSSID == SyncScope::System &&
+ "system synchronization scope ID drifted!");
+ (void)SystemSSID;
}
LLVMContext::~LLVMContext() { delete pImpl; }
@@ -124,11 +137,18 @@ void LLVMContext::setDiagnosticHandler(DiagnosticHandlerTy DiagnosticHandler,
pImpl->RespectDiagnosticFilters = RespectFilters;
}
-void LLVMContext::setDiagnosticHotnessRequested(bool Requested) {
- pImpl->DiagnosticHotnessRequested = Requested;
+void LLVMContext::setDiagnosticsHotnessRequested(bool Requested) {
+ pImpl->DiagnosticsHotnessRequested = Requested;
}
-bool LLVMContext::getDiagnosticHotnessRequested() const {
- return pImpl->DiagnosticHotnessRequested;
+bool LLVMContext::getDiagnosticsHotnessRequested() const {
+ return pImpl->DiagnosticsHotnessRequested;
+}
+
+void LLVMContext::setDiagnosticsHotnessThreshold(uint64_t Threshold) {
+ pImpl->DiagnosticsHotnessThreshold = Threshold;
+}
+uint64_t LLVMContext::getDiagnosticsHotnessThreshold() const {
+ return pImpl->DiagnosticsHotnessThreshold;
}
yaml::Output *LLVMContext::getDiagnosticsOutputFile() {
@@ -247,6 +267,14 @@ uint32_t LLVMContext::getOperandBundleTagID(StringRef Tag) const {
return pImpl->getOperandBundleTagID(Tag);
}
+SyncScope::ID LLVMContext::getOrInsertSyncScopeID(StringRef SSN) {
+ return pImpl->getOrInsertSyncScopeID(SSN);
+}
+
+void LLVMContext::getSyncScopeNames(SmallVectorImpl<StringRef> &SSNs) const {
+ pImpl->getSyncScopeNames(SSNs);
+}
+
void LLVMContext::setGC(const Function &Fn, std::string GCName) {
auto It = pImpl->GCNames.find(&Fn);
diff --git a/contrib/llvm/lib/IR/LLVMContextImpl.cpp b/contrib/llvm/lib/IR/LLVMContextImpl.cpp
index c43356c..57dd08b 100644
--- a/contrib/llvm/lib/IR/LLVMContextImpl.cpp
+++ b/contrib/llvm/lib/IR/LLVMContextImpl.cpp
@@ -1,4 +1,4 @@
-//===-- LLVMContextImpl.cpp - Implement LLVMContextImpl -------------------===//
+//===- LLVMContextImpl.cpp - Implement LLVMContextImpl --------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -12,18 +12,17 @@
//===----------------------------------------------------------------------===//
#include "LLVMContextImpl.h"
-#include "llvm/ADT/STLExtras.h"
-#include "llvm/IR/Attributes.h"
-#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/OptBisect.h"
+#include "llvm/IR/Type.h"
#include "llvm/Support/ManagedStatic.h"
-#include <algorithm>
+#include <cassert>
+#include <utility>
+
using namespace llvm;
LLVMContextImpl::LLVMContextImpl(LLVMContext &C)
- : TheTrueVal(nullptr), TheFalseVal(nullptr),
- VoidTy(C, Type::VoidTyID),
+ : VoidTy(C, Type::VoidTyID),
LabelTy(C, Type::LabelTyID),
HalfTy(C, Type::HalfTyID),
FloatTy(C, Type::FloatTyID),
@@ -39,17 +38,7 @@ LLVMContextImpl::LLVMContextImpl(LLVMContext &C)
Int16Ty(C, 16),
Int32Ty(C, 32),
Int64Ty(C, 64),
- Int128Ty(C, 128) {
- InlineAsmDiagHandler = nullptr;
- InlineAsmDiagContext = nullptr;
- DiagnosticHandler = nullptr;
- DiagnosticContext = nullptr;
- RespectDiagnosticFilters = false;
- DiagnosticHotnessRequested = false;
- YieldCallback = nullptr;
- YieldOpaqueHandle = nullptr;
- NamedStructTypesUniqueID = 0;
-}
+ Int128Ty(C, 128) {}
LLVMContextImpl::~LLVMContextImpl() {
// NOTE: We need to delete the contents of OwnedModules, but Module's dtor
@@ -114,9 +103,10 @@ LLVMContextImpl::~LLVMContextImpl() {
}
// Destroy attribute lists.
- for (FoldingSetIterator<AttributeSetImpl> I = AttrsLists.begin(),
- E = AttrsLists.end(); I != E; ) {
- FoldingSetIterator<AttributeSetImpl> Elem = I++;
+ for (FoldingSetIterator<AttributeListImpl> I = AttrsLists.begin(),
+ E = AttrsLists.end();
+ I != E;) {
+ FoldingSetIterator<AttributeListImpl> Elem = I++;
delete &*Elem;
}
@@ -155,7 +145,6 @@ void LLVMContextImpl::dropTriviallyDeadConstantArrays() {
C->destroyConstant();
}
}
-
} while (Changed);
}
@@ -164,6 +153,7 @@ void Module::dropTriviallyDeadConstantArrays() {
}
namespace llvm {
+
/// \brief Make MDOperand transparent for hashing.
///
/// This overload of an implementation detail of the hashing library makes
@@ -178,7 +168,8 @@ namespace llvm {
/// does not cause MDOperand to be transparent. In particular, a bare pointer
/// doesn't get hashed before it's combined, whereas \a MDOperand would.
static const Metadata *get_hashable_data(const MDOperand &X) { return X.get(); }
-}
+
+} // end namespace llvm
unsigned MDNodeOpsKey::calculateHash(MDNode *N, unsigned Offset) {
unsigned Hash = hash_combine_range(N->op_begin() + Offset, N->op_end());
@@ -214,26 +205,19 @@ uint32_t LLVMContextImpl::getOperandBundleTagID(StringRef Tag) const {
return I->second;
}
-// ConstantsContext anchors
-void UnaryConstantExpr::anchor() { }
-
-void BinaryConstantExpr::anchor() { }
-
-void SelectConstantExpr::anchor() { }
-
-void ExtractElementConstantExpr::anchor() { }
-
-void InsertElementConstantExpr::anchor() { }
-
-void ShuffleVectorConstantExpr::anchor() { }
-
-void ExtractValueConstantExpr::anchor() { }
-
-void InsertValueConstantExpr::anchor() { }
-
-void GetElementPtrConstantExpr::anchor() { }
+SyncScope::ID LLVMContextImpl::getOrInsertSyncScopeID(StringRef SSN) {
+ auto NewSSID = SSC.size();
+ assert(NewSSID < std::numeric_limits<SyncScope::ID>::max() &&
+ "Hit the maximum number of synchronization scopes allowed!");
+ return SSC.insert(std::make_pair(SSN, SyncScope::ID(NewSSID))).first->second;
+}
-void CompareConstantExpr::anchor() { }
+void LLVMContextImpl::getSyncScopeNames(
+ SmallVectorImpl<StringRef> &SSNs) const {
+ SSNs.resize(SSC.size());
+ for (const auto &SSE : SSC)
+ SSNs[SSE.second] = SSE.first();
+}
/// Singleton instance of the OptBisect class.
///
diff --git a/contrib/llvm/lib/IR/LLVMContextImpl.h b/contrib/llvm/lib/IR/LLVMContextImpl.h
index 850c81c..bea2c7a 100644
--- a/contrib/llvm/lib/IR/LLVMContextImpl.h
+++ b/contrib/llvm/lib/IR/LLVMContextImpl.h
@@ -1,4 +1,4 @@
-//===-- LLVMContextImpl.h - The LLVMContextImpl opaque class ----*- C++ -*-===//
+//===- LLVMContextImpl.h - The LLVMContextImpl opaque class -----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -21,48 +21,61 @@
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/Hashing.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSet.h"
+#include "llvm/BinaryFormat/Dwarf.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DebugInfoMetadata.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Metadata.h"
-#include "llvm/IR/ValueHandle.h"
-#include "llvm/Support/Dwarf.h"
+#include "llvm/IR/TrackingMDRef.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Support/YAMLTraits.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <memory>
+#include <string>
+#include <utility>
#include <vector>
namespace llvm {
-class ConstantInt;
class ConstantFP;
-class DiagnosticInfoOptimizationRemark;
-class DiagnosticInfoOptimizationRemarkMissed;
-class DiagnosticInfoOptimizationRemarkAnalysis;
-class GCStrategy;
-class LLVMContext;
+class ConstantInt;
class Type;
class Value;
+class ValueHandleBase;
struct DenseMapAPIntKeyInfo {
static inline APInt getEmptyKey() {
APInt V(nullptr, 0);
- V.VAL = 0;
+ V.U.VAL = 0;
return V;
}
+
static inline APInt getTombstoneKey() {
APInt V(nullptr, 0);
- V.VAL = 1;
+ V.U.VAL = 1;
return V;
}
+
static unsigned getHashValue(const APInt &Key) {
return static_cast<unsigned>(hash_value(Key));
}
+
static bool isEqual(const APInt &LHS, const APInt &RHS) {
return LHS.getBitWidth() == RHS.getBitWidth() && LHS == RHS;
}
@@ -71,9 +84,11 @@ struct DenseMapAPIntKeyInfo {
struct DenseMapAPFloatKeyInfo {
static inline APFloat getEmptyKey() { return APFloat(APFloat::Bogus(), 1); }
static inline APFloat getTombstoneKey() { return APFloat(APFloat::Bogus(), 2); }
+
static unsigned getHashValue(const APFloat &Key) {
return static_cast<unsigned>(hash_value(Key));
}
+
static bool isEqual(const APFloat &LHS, const APFloat &RHS) {
return LHS.bitwiseIsEqual(RHS);
}
@@ -83,10 +98,13 @@ struct AnonStructTypeKeyInfo {
struct KeyTy {
ArrayRef<Type*> ETypes;
bool isPacked;
+
KeyTy(const ArrayRef<Type*>& E, bool P) :
ETypes(E), isPacked(P) {}
+
KeyTy(const StructType *ST)
: ETypes(ST->elements()), isPacked(ST->isPacked()) {}
+
bool operator==(const KeyTy& that) const {
if (isPacked != that.isPacked)
return false;
@@ -98,25 +116,31 @@ struct AnonStructTypeKeyInfo {
return !this->operator==(that);
}
};
+
static inline StructType* getEmptyKey() {
return DenseMapInfo<StructType*>::getEmptyKey();
}
+
static inline StructType* getTombstoneKey() {
return DenseMapInfo<StructType*>::getTombstoneKey();
}
+
static unsigned getHashValue(const KeyTy& Key) {
return hash_combine(hash_combine_range(Key.ETypes.begin(),
Key.ETypes.end()),
Key.isPacked);
}
+
static unsigned getHashValue(const StructType *ST) {
return getHashValue(KeyTy(ST));
}
+
static bool isEqual(const KeyTy& LHS, const StructType *RHS) {
if (RHS == getEmptyKey() || RHS == getTombstoneKey())
return false;
return LHS == KeyTy(RHS);
}
+
static bool isEqual(const StructType *LHS, const StructType *RHS) {
return LHS == RHS;
}
@@ -127,11 +151,13 @@ struct FunctionTypeKeyInfo {
const Type *ReturnType;
ArrayRef<Type*> Params;
bool isVarArg;
+
KeyTy(const Type* R, const ArrayRef<Type*>& P, bool V) :
ReturnType(R), Params(P), isVarArg(V) {}
KeyTy(const FunctionType *FT)
: ReturnType(FT->getReturnType()), Params(FT->params()),
isVarArg(FT->isVarArg()) {}
+
bool operator==(const KeyTy& that) const {
if (ReturnType != that.ReturnType)
return false;
@@ -145,26 +171,32 @@ struct FunctionTypeKeyInfo {
return !this->operator==(that);
}
};
+
static inline FunctionType* getEmptyKey() {
return DenseMapInfo<FunctionType*>::getEmptyKey();
}
+
static inline FunctionType* getTombstoneKey() {
return DenseMapInfo<FunctionType*>::getTombstoneKey();
}
+
static unsigned getHashValue(const KeyTy& Key) {
return hash_combine(Key.ReturnType,
hash_combine_range(Key.Params.begin(),
Key.Params.end()),
Key.isVarArg);
}
+
static unsigned getHashValue(const FunctionType *FT) {
return getHashValue(KeyTy(FT));
}
+
static bool isEqual(const KeyTy& LHS, const FunctionType *RHS) {
if (RHS == getEmptyKey() || RHS == getTombstoneKey())
return false;
return LHS == KeyTy(RHS);
}
+
static bool isEqual(const FunctionType *LHS, const FunctionType *RHS) {
return LHS == RHS;
}
@@ -174,7 +206,6 @@ struct FunctionTypeKeyInfo {
class MDNodeOpsKey {
ArrayRef<Metadata *> RawOps;
ArrayRef<MDOperand> Ops;
-
unsigned Hash;
protected:
@@ -212,14 +243,15 @@ public:
};
template <class NodeTy> struct MDNodeKeyImpl;
-template <class NodeTy> struct MDNodeInfo;
/// Configuration point for MDNodeInfo::isEqual().
template <class NodeTy> struct MDNodeSubsetEqualImpl {
- typedef MDNodeKeyImpl<NodeTy> KeyTy;
+ using KeyTy = MDNodeKeyImpl<NodeTy>;
+
static bool isSubsetEqual(const KeyTy &LHS, const NodeTy *RHS) {
return false;
}
+
static bool isSubsetEqual(const NodeTy *LHS, const NodeTy *RHS) {
return false;
}
@@ -252,7 +284,6 @@ template <> struct MDNodeKeyImpl<DILocation> {
MDNodeKeyImpl(unsigned Line, unsigned Column, Metadata *Scope,
Metadata *InlinedAt)
: Line(Line), Column(Column), Scope(Scope), InlinedAt(InlinedAt) {}
-
MDNodeKeyImpl(const DILocation *L)
: Line(L->getLine()), Column(L->getColumn()), Scope(L->getRawScope()),
InlinedAt(L->getRawInlinedAt()) {}
@@ -261,6 +292,7 @@ template <> struct MDNodeKeyImpl<DILocation> {
return Line == RHS->getLine() && Column == RHS->getColumn() &&
Scope == RHS->getRawScope() && InlinedAt == RHS->getRawInlinedAt();
}
+
unsigned getHashValue() const {
return hash_combine(Line, Column, Scope, InlinedAt);
}
@@ -270,6 +302,7 @@ template <> struct MDNodeKeyImpl<DILocation> {
template <> struct MDNodeKeyImpl<GenericDINode> : MDNodeOpsKey {
unsigned Tag;
MDString *Header;
+
MDNodeKeyImpl(unsigned Tag, MDString *Header, ArrayRef<Metadata *> DwarfOps)
: MDNodeOpsKey(DwarfOps), Tag(Tag), Header(Header) {}
MDNodeKeyImpl(const GenericDINode *N)
@@ -299,6 +332,7 @@ template <> struct MDNodeKeyImpl<DISubrange> {
bool isKeyOf(const DISubrange *RHS) const {
return Count == RHS->getCount() && LowerBound == RHS->getLowerBound();
}
+
unsigned getHashValue() const { return hash_combine(Count, LowerBound); }
};
@@ -313,6 +347,7 @@ template <> struct MDNodeKeyImpl<DIEnumerator> {
bool isKeyOf(const DIEnumerator *RHS) const {
return Value == RHS->getValue() && Name == RHS->getRawName();
}
+
unsigned getHashValue() const { return hash_combine(Value, Name); }
};
@@ -337,6 +372,7 @@ template <> struct MDNodeKeyImpl<DIBasicType> {
AlignInBits == RHS->getAlignInBits() &&
Encoding == RHS->getEncoding();
}
+
unsigned getHashValue() const {
return hash_combine(Tag, Name, SizeInBits, AlignInBits, Encoding);
}
@@ -352,22 +388,26 @@ template <> struct MDNodeKeyImpl<DIDerivedType> {
uint64_t SizeInBits;
uint64_t OffsetInBits;
uint32_t AlignInBits;
+ Optional<unsigned> DWARFAddressSpace;
unsigned Flags;
Metadata *ExtraData;
MDNodeKeyImpl(unsigned Tag, MDString *Name, Metadata *File, unsigned Line,
Metadata *Scope, Metadata *BaseType, uint64_t SizeInBits,
- uint32_t AlignInBits, uint64_t OffsetInBits, unsigned Flags,
+ uint32_t AlignInBits, uint64_t OffsetInBits,
+ Optional<unsigned> DWARFAddressSpace, unsigned Flags,
Metadata *ExtraData)
: Tag(Tag), Name(Name), File(File), Line(Line), Scope(Scope),
BaseType(BaseType), SizeInBits(SizeInBits), OffsetInBits(OffsetInBits),
- AlignInBits(AlignInBits), Flags(Flags), ExtraData(ExtraData) {}
+ AlignInBits(AlignInBits), DWARFAddressSpace(DWARFAddressSpace),
+ Flags(Flags), ExtraData(ExtraData) {}
MDNodeKeyImpl(const DIDerivedType *N)
: Tag(N->getTag()), Name(N->getRawName()), File(N->getRawFile()),
Line(N->getLine()), Scope(N->getRawScope()),
BaseType(N->getRawBaseType()), SizeInBits(N->getSizeInBits()),
OffsetInBits(N->getOffsetInBits()), AlignInBits(N->getAlignInBits()),
- Flags(N->getFlags()), ExtraData(N->getRawExtraData()) {}
+ DWARFAddressSpace(N->getDWARFAddressSpace()), Flags(N->getFlags()),
+ ExtraData(N->getRawExtraData()) {}
bool isKeyOf(const DIDerivedType *RHS) const {
return Tag == RHS->getTag() && Name == RHS->getRawName() &&
@@ -375,9 +415,12 @@ template <> struct MDNodeKeyImpl<DIDerivedType> {
Scope == RHS->getRawScope() && BaseType == RHS->getRawBaseType() &&
SizeInBits == RHS->getSizeInBits() &&
AlignInBits == RHS->getAlignInBits() &&
- OffsetInBits == RHS->getOffsetInBits() && Flags == RHS->getFlags() &&
+ OffsetInBits == RHS->getOffsetInBits() &&
+ DWARFAddressSpace == RHS->getDWARFAddressSpace() &&
+ Flags == RHS->getFlags() &&
ExtraData == RHS->getRawExtraData();
}
+
unsigned getHashValue() const {
// If this is a member inside an ODR type, only hash the type and the name.
// Otherwise the hash will be stronger than
@@ -396,10 +439,12 @@ template <> struct MDNodeKeyImpl<DIDerivedType> {
};
template <> struct MDNodeSubsetEqualImpl<DIDerivedType> {
- typedef MDNodeKeyImpl<DIDerivedType> KeyTy;
+ using KeyTy = MDNodeKeyImpl<DIDerivedType>;
+
static bool isSubsetEqual(const KeyTy &LHS, const DIDerivedType *RHS) {
return isODRMember(LHS.Tag, LHS.Scope, LHS.Name, RHS);
}
+
static bool isSubsetEqual(const DIDerivedType *LHS, const DIDerivedType *RHS) {
return isODRMember(LHS->getTag(), LHS->getRawScope(), LHS->getRawName(),
RHS);
@@ -474,6 +519,7 @@ template <> struct MDNodeKeyImpl<DICompositeType> {
TemplateParams == RHS->getRawTemplateParams() &&
Identifier == RHS->getRawIdentifier();
}
+
unsigned getHashValue() const {
// Intentionally computes the hash on a subset of the operands for
// performance reason. The subset has to be significant enough to avoid
@@ -498,6 +544,7 @@ template <> struct MDNodeKeyImpl<DISubroutineType> {
return Flags == RHS->getFlags() && CC == RHS->getCC() &&
TypeArray == RHS->getRawTypeArray();
}
+
unsigned getHashValue() const { return hash_combine(Flags, CC, TypeArray); }
};
@@ -521,6 +568,7 @@ template <> struct MDNodeKeyImpl<DIFile> {
CSKind == RHS->getChecksumKind() &&
Checksum == RHS->getRawChecksum();
}
+
unsigned getHashValue() const {
return hash_combine(Filename, Directory, CSKind, Checksum);
}
@@ -546,6 +594,7 @@ template <> struct MDNodeKeyImpl<DISubprogram> {
Metadata *TemplateParams;
Metadata *Declaration;
Metadata *Variables;
+ Metadata *ThrownTypes;
MDNodeKeyImpl(Metadata *Scope, MDString *Name, MDString *LinkageName,
Metadata *File, unsigned Line, Metadata *Type,
@@ -553,7 +602,8 @@ template <> struct MDNodeKeyImpl<DISubprogram> {
Metadata *ContainingType, unsigned Virtuality,
unsigned VirtualIndex, int ThisAdjustment, unsigned Flags,
bool IsOptimized, Metadata *Unit, Metadata *TemplateParams,
- Metadata *Declaration, Metadata *Variables)
+ Metadata *Declaration, Metadata *Variables,
+ Metadata *ThrownTypes)
: Scope(Scope), Name(Name), LinkageName(LinkageName), File(File),
Line(Line), Type(Type), IsLocalToUnit(IsLocalToUnit),
IsDefinition(IsDefinition), ScopeLine(ScopeLine),
@@ -561,7 +611,7 @@ template <> struct MDNodeKeyImpl<DISubprogram> {
VirtualIndex(VirtualIndex), ThisAdjustment(ThisAdjustment),
Flags(Flags), IsOptimized(IsOptimized), Unit(Unit),
TemplateParams(TemplateParams), Declaration(Declaration),
- Variables(Variables) {}
+ Variables(Variables), ThrownTypes(ThrownTypes) {}
MDNodeKeyImpl(const DISubprogram *N)
: Scope(N->getRawScope()), Name(N->getRawName()),
LinkageName(N->getRawLinkageName()), File(N->getRawFile()),
@@ -572,7 +622,8 @@ template <> struct MDNodeKeyImpl<DISubprogram> {
ThisAdjustment(N->getThisAdjustment()), Flags(N->getFlags()),
IsOptimized(N->isOptimized()), Unit(N->getRawUnit()),
TemplateParams(N->getRawTemplateParams()),
- Declaration(N->getRawDeclaration()), Variables(N->getRawVariables()) {}
+ Declaration(N->getRawDeclaration()), Variables(N->getRawVariables()),
+ ThrownTypes(N->getRawThrownTypes()) {}
bool isKeyOf(const DISubprogram *RHS) const {
return Scope == RHS->getRawScope() && Name == RHS->getRawName() &&
@@ -589,8 +640,10 @@ template <> struct MDNodeKeyImpl<DISubprogram> {
Unit == RHS->getUnit() &&
TemplateParams == RHS->getRawTemplateParams() &&
Declaration == RHS->getRawDeclaration() &&
- Variables == RHS->getRawVariables();
+ Variables == RHS->getRawVariables() &&
+ ThrownTypes == RHS->getRawThrownTypes();
}
+
unsigned getHashValue() const {
// If this is a declaration inside an ODR type, only hash the type and the
// name. Otherwise the hash will be stronger than
@@ -609,20 +662,24 @@ template <> struct MDNodeKeyImpl<DISubprogram> {
};
template <> struct MDNodeSubsetEqualImpl<DISubprogram> {
- typedef MDNodeKeyImpl<DISubprogram> KeyTy;
+ using KeyTy = MDNodeKeyImpl<DISubprogram>;
+
static bool isSubsetEqual(const KeyTy &LHS, const DISubprogram *RHS) {
return isDeclarationOfODRMember(LHS.IsDefinition, LHS.Scope,
- LHS.LinkageName, RHS);
+ LHS.LinkageName, LHS.TemplateParams, RHS);
}
+
static bool isSubsetEqual(const DISubprogram *LHS, const DISubprogram *RHS) {
return isDeclarationOfODRMember(LHS->isDefinition(), LHS->getRawScope(),
- LHS->getRawLinkageName(), RHS);
+ LHS->getRawLinkageName(),
+ LHS->getRawTemplateParams(), RHS);
}
/// Subprograms compare equal if they declare the same function in an ODR
/// type.
static bool isDeclarationOfODRMember(bool IsDefinition, const Metadata *Scope,
const MDString *LinkageName,
+ const Metadata *TemplateParams,
const DISubprogram *RHS) {
// Check whether the LHS is eligible.
if (IsDefinition || !Scope || !LinkageName)
@@ -633,8 +690,14 @@ template <> struct MDNodeSubsetEqualImpl<DISubprogram> {
return false;
// Compare to the RHS.
+ // FIXME: We need to compare template parameters here to avoid incorrect
+ // collisions in mapMetadata when RF_MoveDistinctMDs and a ODR-DISubprogram
+ // has a non-ODR template parameter (i.e., a DICompositeType that does not
+ // have an identifier). Eventually we should decouple ODR logic from
+ // uniquing logic.
return IsDefinition == RHS->isDefinition() && Scope == RHS->getRawScope() &&
- LinkageName == RHS->getRawLinkageName();
+ LinkageName == RHS->getRawLinkageName() &&
+ TemplateParams == RHS->getRawTemplateParams();
}
};
@@ -654,6 +717,7 @@ template <> struct MDNodeKeyImpl<DILexicalBlock> {
return Scope == RHS->getRawScope() && File == RHS->getRawFile() &&
Line == RHS->getLine() && Column == RHS->getColumn();
}
+
unsigned getHashValue() const {
return hash_combine(Scope, File, Line, Column);
}
@@ -674,6 +738,7 @@ template <> struct MDNodeKeyImpl<DILexicalBlockFile> {
return Scope == RHS->getRawScope() && File == RHS->getRawFile() &&
Discriminator == RHS->getDiscriminator();
}
+
unsigned getHashValue() const {
return hash_combine(Scope, File, Discriminator);
}
@@ -681,26 +746,22 @@ template <> struct MDNodeKeyImpl<DILexicalBlockFile> {
template <> struct MDNodeKeyImpl<DINamespace> {
Metadata *Scope;
- Metadata *File;
MDString *Name;
- unsigned Line;
bool ExportSymbols;
- MDNodeKeyImpl(Metadata *Scope, Metadata *File, MDString *Name, unsigned Line,
- bool ExportSymbols)
- : Scope(Scope), File(File), Name(Name), Line(Line),
- ExportSymbols(ExportSymbols) {}
+ MDNodeKeyImpl(Metadata *Scope, MDString *Name, bool ExportSymbols)
+ : Scope(Scope), Name(Name), ExportSymbols(ExportSymbols) {}
MDNodeKeyImpl(const DINamespace *N)
- : Scope(N->getRawScope()), File(N->getRawFile()), Name(N->getRawName()),
- Line(N->getLine()), ExportSymbols(N->getExportSymbols()) {}
+ : Scope(N->getRawScope()), Name(N->getRawName()),
+ ExportSymbols(N->getExportSymbols()) {}
bool isKeyOf(const DINamespace *RHS) const {
- return Scope == RHS->getRawScope() && File == RHS->getRawFile() &&
- Name == RHS->getRawName() && Line == RHS->getLine() &&
+ return Scope == RHS->getRawScope() && Name == RHS->getRawName() &&
ExportSymbols == RHS->getExportSymbols();
}
+
unsigned getHashValue() const {
- return hash_combine(Scope, File, Name, Line);
+ return hash_combine(Scope, Name);
}
};
@@ -710,6 +771,7 @@ template <> struct MDNodeKeyImpl<DIModule> {
MDString *ConfigurationMacros;
MDString *IncludePath;
MDString *ISysRoot;
+
MDNodeKeyImpl(Metadata *Scope, MDString *Name, MDString *ConfigurationMacros,
MDString *IncludePath, MDString *ISysRoot)
: Scope(Scope), Name(Name), ConfigurationMacros(ConfigurationMacros),
@@ -725,6 +787,7 @@ template <> struct MDNodeKeyImpl<DIModule> {
IncludePath == RHS->getRawIncludePath() &&
ISysRoot == RHS->getRawISysRoot();
}
+
unsigned getHashValue() const {
return hash_combine(Scope, Name,
ConfigurationMacros, IncludePath, ISysRoot);
@@ -742,6 +805,7 @@ template <> struct MDNodeKeyImpl<DITemplateTypeParameter> {
bool isKeyOf(const DITemplateTypeParameter *RHS) const {
return Name == RHS->getRawName() && Type == RHS->getRawType();
}
+
unsigned getHashValue() const { return hash_combine(Name, Type); }
};
@@ -761,6 +825,7 @@ template <> struct MDNodeKeyImpl<DITemplateValueParameter> {
return Tag == RHS->getTag() && Name == RHS->getRawName() &&
Type == RHS->getRawType() && Value == RHS->getValue();
}
+
unsigned getHashValue() const { return hash_combine(Tag, Name, Type, Value); }
};
@@ -803,6 +868,7 @@ template <> struct MDNodeKeyImpl<DIGlobalVariable> {
RHS->getRawStaticDataMemberDeclaration() &&
AlignInBits == RHS->getAlignInBits();
}
+
unsigned getHashValue() const {
// We do not use AlignInBits in hashing function here on purpose:
// in most cases this param for local variable is zero (for function param
@@ -843,6 +909,7 @@ template <> struct MDNodeKeyImpl<DILocalVariable> {
Type == RHS->getRawType() && Arg == RHS->getArg() &&
Flags == RHS->getFlags() && AlignInBits == RHS->getAlignInBits();
}
+
unsigned getHashValue() const {
// We do not use AlignInBits in hashing function here on purpose:
// in most cases this param for local variable is zero (for function param
@@ -864,6 +931,7 @@ template <> struct MDNodeKeyImpl<DIExpression> {
bool isKeyOf(const DIExpression *RHS) const {
return Elements == RHS->getElements();
}
+
unsigned getHashValue() const {
return hash_combine_range(Elements.begin(), Elements.end());
}
@@ -882,6 +950,7 @@ template <> struct MDNodeKeyImpl<DIGlobalVariableExpression> {
return Variable == RHS->getRawVariable() &&
Expression == RHS->getRawExpression();
}
+
unsigned getHashValue() const { return hash_combine(Variable, Expression); }
};
@@ -910,6 +979,7 @@ template <> struct MDNodeKeyImpl<DIObjCProperty> {
SetterName == RHS->getRawSetterName() &&
Attributes == RHS->getAttributes() && Type == RHS->getRawType();
}
+
unsigned getHashValue() const {
return hash_combine(Name, File, Line, GetterName, SetterName, Attributes,
Type);
@@ -920,23 +990,26 @@ template <> struct MDNodeKeyImpl<DIImportedEntity> {
unsigned Tag;
Metadata *Scope;
Metadata *Entity;
+ Metadata *File;
unsigned Line;
MDString *Name;
- MDNodeKeyImpl(unsigned Tag, Metadata *Scope, Metadata *Entity, unsigned Line,
- MDString *Name)
- : Tag(Tag), Scope(Scope), Entity(Entity), Line(Line), Name(Name) {}
+ MDNodeKeyImpl(unsigned Tag, Metadata *Scope, Metadata *Entity, Metadata *File,
+ unsigned Line, MDString *Name)
+ : Tag(Tag), Scope(Scope), Entity(Entity), File(File), Line(Line),
+ Name(Name) {}
MDNodeKeyImpl(const DIImportedEntity *N)
: Tag(N->getTag()), Scope(N->getRawScope()), Entity(N->getRawEntity()),
- Line(N->getLine()), Name(N->getRawName()) {}
+ File(N->getRawFile()), Line(N->getLine()), Name(N->getRawName()) {}
bool isKeyOf(const DIImportedEntity *RHS) const {
return Tag == RHS->getTag() && Scope == RHS->getRawScope() &&
- Entity == RHS->getRawEntity() && Line == RHS->getLine() &&
- Name == RHS->getRawName();
+ Entity == RHS->getRawEntity() && File == RHS->getFile() &&
+ Line == RHS->getLine() && Name == RHS->getRawName();
}
+
unsigned getHashValue() const {
- return hash_combine(Tag, Scope, Entity, Line, Name);
+ return hash_combine(Tag, Scope, Entity, File, Line, Name);
}
};
@@ -956,6 +1029,7 @@ template <> struct MDNodeKeyImpl<DIMacro> {
return MIType == RHS->getMacinfoType() && Line == RHS->getLine() &&
Name == RHS->getRawName() && Value == RHS->getRawValue();
}
+
unsigned getHashValue() const {
return hash_combine(MIType, Line, Name, Value);
}
@@ -978,6 +1052,7 @@ template <> struct MDNodeKeyImpl<DIMacroFile> {
return MIType == RHS->getMacinfoType() && Line == RHS->getLine() &&
File == RHS->getRawFile() && Elements == RHS->getRawElements();
}
+
unsigned getHashValue() const {
return hash_combine(MIType, Line, File, Elements);
}
@@ -985,23 +1060,29 @@ template <> struct MDNodeKeyImpl<DIMacroFile> {
/// \brief DenseMapInfo for MDNode subclasses.
template <class NodeTy> struct MDNodeInfo {
- typedef MDNodeKeyImpl<NodeTy> KeyTy;
- typedef MDNodeSubsetEqualImpl<NodeTy> SubsetEqualTy;
+ using KeyTy = MDNodeKeyImpl<NodeTy>;
+ using SubsetEqualTy = MDNodeSubsetEqualImpl<NodeTy>;
+
static inline NodeTy *getEmptyKey() {
return DenseMapInfo<NodeTy *>::getEmptyKey();
}
+
static inline NodeTy *getTombstoneKey() {
return DenseMapInfo<NodeTy *>::getTombstoneKey();
}
+
static unsigned getHashValue(const KeyTy &Key) { return Key.getHashValue(); }
+
static unsigned getHashValue(const NodeTy *N) {
return KeyTy(N).getHashValue();
}
+
static bool isEqual(const KeyTy &LHS, const NodeTy *RHS) {
if (RHS == getEmptyKey() || RHS == getTombstoneKey())
return false;
return SubsetEqualTy::isSubsetEqual(LHS, RHS) || LHS.isKeyOf(RHS);
}
+
static bool isEqual(const NodeTy *LHS, const NodeTy *RHS) {
if (LHS == RHS)
return true;
@@ -1011,7 +1092,7 @@ template <class NodeTy> struct MDNodeInfo {
}
};
-#define HANDLE_MDNODE_LEAF(CLASS) typedef MDNodeInfo<CLASS> CLASS##Info;
+#define HANDLE_MDNODE_LEAF(CLASS) using CLASS##Info = MDNodeInfo<CLASS>;
#include "llvm/IR/Metadata.def"
/// \brief Map-like storage for metadata attachments.
@@ -1084,28 +1165,29 @@ public:
/// will be automatically deleted if this context is deleted.
SmallPtrSet<Module*, 4> OwnedModules;
- LLVMContext::InlineAsmDiagHandlerTy InlineAsmDiagHandler;
- void *InlineAsmDiagContext;
-
- LLVMContext::DiagnosticHandlerTy DiagnosticHandler;
- void *DiagnosticContext;
- bool RespectDiagnosticFilters;
- bool DiagnosticHotnessRequested;
+ LLVMContext::InlineAsmDiagHandlerTy InlineAsmDiagHandler = nullptr;
+ void *InlineAsmDiagContext = nullptr;
+
+ LLVMContext::DiagnosticHandlerTy DiagnosticHandler = nullptr;
+ void *DiagnosticContext = nullptr;
+ bool RespectDiagnosticFilters = false;
+ bool DiagnosticsHotnessRequested = false;
+ uint64_t DiagnosticsHotnessThreshold = 0;
std::unique_ptr<yaml::Output> DiagnosticsOutputFile;
- LLVMContext::YieldCallbackTy YieldCallback;
- void *YieldOpaqueHandle;
+ LLVMContext::YieldCallbackTy YieldCallback = nullptr;
+ void *YieldOpaqueHandle = nullptr;
- typedef DenseMap<APInt, std::unique_ptr<ConstantInt>, DenseMapAPIntKeyInfo>
- IntMapTy;
+ using IntMapTy =
+ DenseMap<APInt, std::unique_ptr<ConstantInt>, DenseMapAPIntKeyInfo>;
IntMapTy IntConstants;
- typedef DenseMap<APFloat, std::unique_ptr<ConstantFP>, DenseMapAPFloatKeyInfo>
- FPMapTy;
+ using FPMapTy =
+ DenseMap<APFloat, std::unique_ptr<ConstantFP>, DenseMapAPFloatKeyInfo>;
FPMapTy FPConstants;
FoldingSet<AttributeImpl> AttrsSet;
- FoldingSet<AttributeSetImpl> AttrsLists;
+ FoldingSet<AttributeListImpl> AttrsLists;
FoldingSet<AttributeSetNode> AttrsSetNodes;
StringMap<MDString, BumpPtrAllocator> MDStringCache;
@@ -1129,13 +1211,13 @@ public:
DenseMap<Type *, std::unique_ptr<ConstantAggregateZero>> CAZConstants;
- typedef ConstantUniqueMap<ConstantArray> ArrayConstantsTy;
+ using ArrayConstantsTy = ConstantUniqueMap<ConstantArray>;
ArrayConstantsTy ArrayConstants;
- typedef ConstantUniqueMap<ConstantStruct> StructConstantsTy;
+ using StructConstantsTy = ConstantUniqueMap<ConstantStruct>;
StructConstantsTy StructConstants;
- typedef ConstantUniqueMap<ConstantVector> VectorConstantsTy;
+ using VectorConstantsTy = ConstantUniqueMap<ConstantVector>;
VectorConstantsTy VectorConstants;
DenseMap<PointerType *, std::unique_ptr<ConstantPointerNull>> CPNConstants;
@@ -1150,8 +1232,8 @@ public:
ConstantUniqueMap<InlineAsm> InlineAsms;
- ConstantInt *TheTrueVal;
- ConstantInt *TheFalseVal;
+ ConstantInt *TheTrueVal = nullptr;
+ ConstantInt *TheFalseVal = nullptr;
std::unique_ptr<ConstantTokenNone> TheNoneToken;
@@ -1159,7 +1241,6 @@ public:
Type VoidTy, LabelTy, HalfTy, FloatTy, DoubleTy, MetadataTy, TokenTy;
Type X86_FP80Ty, FP128Ty, PPC_FP128Ty, X86_MMXTy;
IntegerType Int1Ty, Int8Ty, Int16Ty, Int32Ty, Int64Ty, Int128Ty;
-
/// TypeAllocator - All dynamically allocated types are allocated from this.
/// They live forever until the context is torn down.
@@ -1167,23 +1248,22 @@ public:
DenseMap<unsigned, IntegerType*> IntegerTypes;
- typedef DenseSet<FunctionType *, FunctionTypeKeyInfo> FunctionTypeSet;
+ using FunctionTypeSet = DenseSet<FunctionType *, FunctionTypeKeyInfo>;
FunctionTypeSet FunctionTypes;
- typedef DenseSet<StructType *, AnonStructTypeKeyInfo> StructTypeSet;
+ using StructTypeSet = DenseSet<StructType *, AnonStructTypeKeyInfo>;
StructTypeSet AnonStructTypes;
StringMap<StructType*> NamedStructTypes;
- unsigned NamedStructTypesUniqueID;
+ unsigned NamedStructTypesUniqueID = 0;
DenseMap<std::pair<Type *, uint64_t>, ArrayType*> ArrayTypes;
DenseMap<std::pair<Type *, unsigned>, VectorType*> VectorTypes;
DenseMap<Type*, PointerType*> PointerTypes; // Pointers in AddrSpace = 0
DenseMap<std::pair<Type*, unsigned>, PointerType*> ASPointerTypes;
-
/// ValueHandles - This map keeps track of all of the value handles that are
/// watching a Value*. The Value::HasValueHandle bit is used to know
/// whether or not a value has an entry in this map.
- typedef DenseMap<Value*, ValueHandleBase*> ValueHandlesTy;
+ using ValueHandlesTy = DenseMap<Value *, ValueHandleBase *>;
ValueHandlesTy ValueHandles;
/// CustomMDKindNames - Map to hold the metadata string to ID mapping.
@@ -1219,6 +1299,20 @@ public:
void getOperandBundleTags(SmallVectorImpl<StringRef> &Tags) const;
uint32_t getOperandBundleTagID(StringRef Tag) const;
+ /// A set of interned synchronization scopes. The StringMap maps
+ /// synchronization scope names to their respective synchronization scope IDs.
+ StringMap<SyncScope::ID> SSC;
+
+ /// getOrInsertSyncScopeID - Maps synchronization scope name to
+ /// synchronization scope ID. Every synchronization scope registered with
+ /// LLVMContext has unique ID except pre-defined ones.
+ SyncScope::ID getOrInsertSyncScopeID(StringRef SSN);
+
+ /// getSyncScopeNames - Populates client supplied SmallVector with
+ /// synchronization scope names registered with LLVMContext. Synchronization
+ /// scope names are ordered by increasing synchronization scope IDs.
+ void getSyncScopeNames(SmallVectorImpl<StringRef> &SSNs) const;
+
/// Maintain the GC name for each function.
///
/// This saves allocating an additional word in Function for programs which
@@ -1241,6 +1335,6 @@ public:
OptBisect &getOptBisect();
};
-}
+} // end namespace llvm
-#endif
+#endif // LLVM_LIB_IR_LLVMCONTEXTIMPL_H
diff --git a/contrib/llvm/lib/IR/LegacyPassManager.cpp b/contrib/llvm/lib/IR/LegacyPassManager.cpp
index 628a67bd..995e1e5 100644
--- a/contrib/llvm/lib/IR/LegacyPassManager.cpp
+++ b/contrib/llvm/lib/IR/LegacyPassManager.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/IR/LegacyPassManager.h"
+#include "llvm/ADT/Statistic.h"
#include "llvm/IR/IRPrintingPasses.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/LegacyPassManagers.h"
@@ -465,6 +466,11 @@ public:
// null. It may be called multiple times.
static void createTheTimeInfo();
+ // print - Prints out timing information and then resets the timers.
+ void print() {
+ TG.print(*CreateInfoOutputFile());
+ }
+
/// getPassTimer - Return the timer for the specified pass if it exists.
Timer *getPassTimer(Pass *P) {
if (P->getAsPMDataManager())
@@ -587,7 +593,7 @@ AnalysisUsage *PMTopLevelManager::findAnalysisUsage(Pass *P) {
assert(Node && "cached analysis usage must be non null");
AnUsageMap[P] = &Node->AU;
- AnUsage = &Node->AU;;
+ AnUsage = &Node->AU;
}
return AnUsage;
}
@@ -619,21 +625,21 @@ void PMTopLevelManager::schedulePass(Pass *P) {
checkAnalysis = false;
const AnalysisUsage::VectorType &RequiredSet = AnUsage->getRequiredSet();
- for (AnalysisUsage::VectorType::const_iterator I = RequiredSet.begin(),
- E = RequiredSet.end(); I != E; ++I) {
+ for (const AnalysisID ID : RequiredSet) {
- Pass *AnalysisPass = findAnalysisPass(*I);
+ Pass *AnalysisPass = findAnalysisPass(ID);
if (!AnalysisPass) {
- const PassInfo *PI = findAnalysisPassInfo(*I);
+ const PassInfo *PI = findAnalysisPassInfo(ID);
if (!PI) {
// Pass P is not in the global PassRegistry
dbgs() << "Pass '" << P->getPassName() << "' is not initialized." << "\n";
dbgs() << "Verify if there is a pass dependency cycle." << "\n";
dbgs() << "Required Passes:" << "\n";
- for (AnalysisUsage::VectorType::const_iterator I2 = RequiredSet.begin(),
- E = RequiredSet.end(); I2 != E && I2 != I; ++I2) {
- Pass *AnalysisPass2 = findAnalysisPass(*I2);
+ for (const AnalysisID ID2 : RequiredSet) {
+ if (ID == ID2)
+ break;
+ Pass *AnalysisPass2 = findAnalysisPass(ID2);
if (AnalysisPass2) {
dbgs() << "\t" << AnalysisPass2->getPassName() << "\n";
} else {
@@ -1064,17 +1070,15 @@ void PMDataManager::collectRequiredAndUsedAnalyses(
void PMDataManager::initializeAnalysisImpl(Pass *P) {
AnalysisUsage *AnUsage = TPM->findAnalysisUsage(P);
- for (AnalysisUsage::VectorType::const_iterator
- I = AnUsage->getRequiredSet().begin(),
- E = AnUsage->getRequiredSet().end(); I != E; ++I) {
- Pass *Impl = findAnalysisPass(*I, true);
+ for (const AnalysisID ID : AnUsage->getRequiredSet()) {
+ Pass *Impl = findAnalysisPass(ID, true);
if (!Impl)
// This may be analysis pass that is initialized on the fly.
// If that is not the case then it will raise an assert when it is used.
continue;
AnalysisResolver *AR = P->getResolver();
assert(AR && "Analysis Resolver is not set");
- AR->addAnalysisImplsPair(*I, Impl);
+ AR->addAnalysisImplsPair(ID, Impl);
}
}
@@ -1106,21 +1110,19 @@ void PMDataManager::dumpLastUses(Pass *P, unsigned Offset) const{
TPM->collectLastUses(LUses, P);
- for (SmallVectorImpl<Pass *>::iterator I = LUses.begin(),
- E = LUses.end(); I != E; ++I) {
+ for (Pass *P : LUses) {
dbgs() << "--" << std::string(Offset*2, ' ');
- (*I)->dumpPassStructure(0);
+ P->dumpPassStructure(0);
}
}
void PMDataManager::dumpPassArguments() const {
- for (SmallVectorImpl<Pass *>::const_iterator I = PassVector.begin(),
- E = PassVector.end(); I != E; ++I) {
- if (PMDataManager *PMD = (*I)->getAsPMDataManager())
+ for (Pass *P : PassVector) {
+ if (PMDataManager *PMD = P->getAsPMDataManager())
PMD->dumpPassArguments();
else
if (const PassInfo *PI =
- TPM->findAnalysisPassInfo((*I)->getPassID()))
+ TPM->findAnalysisPassInfo(P->getPassID()))
if (!PI->isAnalysisGroup())
dbgs() << " -" << PI->getPassArgument();
}
@@ -1249,9 +1251,8 @@ Pass *PMDataManager::getOnTheFlyPass(Pass *P, AnalysisID PI, Function &F) {
// Destructor
PMDataManager::~PMDataManager() {
- for (SmallVectorImpl<Pass *>::iterator I = PassVector.begin(),
- E = PassVector.end(); I != E; ++I)
- delete *I;
+ for (Pass *P : PassVector)
+ delete P;
}
//===----------------------------------------------------------------------===//
@@ -1278,35 +1279,35 @@ bool BBPassManager::runOnFunction(Function &F) {
bool Changed = doInitialization(F);
- for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I)
+ for (BasicBlock &BB : F)
for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) {
BasicBlockPass *BP = getContainedPass(Index);
bool LocalChanged = false;
- dumpPassInfo(BP, EXECUTION_MSG, ON_BASICBLOCK_MSG, I->getName());
+ dumpPassInfo(BP, EXECUTION_MSG, ON_BASICBLOCK_MSG, BB.getName());
dumpRequiredSet(BP);
initializeAnalysisImpl(BP);
{
// If the pass crashes, remember this.
- PassManagerPrettyStackEntry X(BP, *I);
+ PassManagerPrettyStackEntry X(BP, BB);
TimeRegion PassTimer(getPassTimer(BP));
- LocalChanged |= BP->runOnBasicBlock(*I);
+ LocalChanged |= BP->runOnBasicBlock(BB);
}
Changed |= LocalChanged;
if (LocalChanged)
dumpPassInfo(BP, MODIFICATION_MSG, ON_BASICBLOCK_MSG,
- I->getName());
+ BB.getName());
dumpPreservedSet(BP);
dumpUsedSet(BP);
verifyPreservedAnalysis(BP);
removeNotPreservedAnalysis(BP);
recordAvailableAnalysis(BP);
- removeDeadPasses(BP, I->getName(), ON_BASICBLOCK_MSG);
+ removeDeadPasses(BP, BB.getName(), ON_BASICBLOCK_MSG);
}
return doFinalization(F) || Changed;
@@ -1752,6 +1753,13 @@ Timer *llvm::getPassTimer(Pass *P) {
return nullptr;
}
+/// If timing is enabled, report the times collected up to now and then reset
+/// them.
+void llvm::reportAndResetTimings() {
+ if (TheTimeInfo)
+ TheTimeInfo->print();
+}
+
//===----------------------------------------------------------------------===//
// PMStack implementation
//
diff --git a/contrib/llvm/lib/IR/MDBuilder.cpp b/contrib/llvm/lib/IR/MDBuilder.cpp
index f4bfd59..b9c4f48 100644
--- a/contrib/llvm/lib/IR/MDBuilder.cpp
+++ b/contrib/llvm/lib/IR/MDBuilder.cpp
@@ -56,11 +56,16 @@ MDNode *MDBuilder::createUnpredictable() {
return MDNode::get(Context, None);
}
-MDNode *MDBuilder::createFunctionEntryCount(uint64_t Count) {
+MDNode *MDBuilder::createFunctionEntryCount(
+ uint64_t Count, const DenseSet<GlobalValue::GUID> *Imports) {
Type *Int64Ty = Type::getInt64Ty(Context);
- return MDNode::get(Context,
- {createString("function_entry_count"),
- createConstant(ConstantInt::get(Int64Ty, Count))});
+ SmallVector<Metadata *, 8> Ops;
+ Ops.push_back(createString("function_entry_count"));
+ Ops.push_back(createConstant(ConstantInt::get(Int64Ty, Count)));
+ if (Imports)
+ for (auto ID : *Imports)
+ Ops.push_back(createConstant(ConstantInt::get(Int64Ty, ID)));
+ return MDNode::get(Context, Ops);
}
MDNode *MDBuilder::createFunctionSectionPrefix(StringRef Prefix) {
diff --git a/contrib/llvm/lib/IR/Mangler.cpp b/contrib/llvm/lib/IR/Mangler.cpp
index 41e11b3..03723bf 100644
--- a/contrib/llvm/lib/IR/Mangler.cpp
+++ b/contrib/llvm/lib/IR/Mangler.cpp
@@ -13,6 +13,7 @@
#include "llvm/IR/Mangler.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/Triple.h"
#include "llvm/ADT/Twine.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
@@ -172,3 +173,34 @@ void Mangler::getNameWithPrefix(SmallVectorImpl<char> &OutName,
raw_svector_ostream OS(OutName);
getNameWithPrefix(OS, GV, CannotUsePrivateLabel);
}
+
+void llvm::emitLinkerFlagsForGlobalCOFF(raw_ostream &OS, const GlobalValue *GV,
+ const Triple &TT, Mangler &Mangler) {
+ if (!GV->hasDLLExportStorageClass() || GV->isDeclaration())
+ return;
+
+ if (TT.isKnownWindowsMSVCEnvironment())
+ OS << " /EXPORT:";
+ else
+ OS << " -export:";
+
+ if (TT.isWindowsGNUEnvironment() || TT.isWindowsCygwinEnvironment()) {
+ std::string Flag;
+ raw_string_ostream FlagOS(Flag);
+ Mangler.getNameWithPrefix(FlagOS, GV, false);
+ FlagOS.flush();
+ if (Flag[0] == GV->getParent()->getDataLayout().getGlobalPrefix())
+ OS << Flag.substr(1);
+ else
+ OS << Flag;
+ } else {
+ Mangler.getNameWithPrefix(OS, GV, false);
+ }
+
+ if (!GV->getValueType()->isFunctionTy()) {
+ if (TT.isKnownWindowsMSVCEnvironment())
+ OS << ",DATA";
+ else
+ OS << ",data";
+ }
+}
diff --git a/contrib/llvm/lib/IR/Metadata.cpp b/contrib/llvm/lib/IR/Metadata.cpp
index 1d19304..ac02ff7 100644
--- a/contrib/llvm/lib/IR/Metadata.cpp
+++ b/contrib/llvm/lib/IR/Metadata.cpp
@@ -11,20 +11,52 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/IR/Metadata.h"
#include "LLVMContextImpl.h"
#include "MetadataImpl.h"
#include "SymbolTableListTraitsImpl.h"
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/None.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/IR/Argument.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Constant.h"
#include "llvm/IR/ConstantRange.h"
+#include "llvm/IR/Constants.h"
#include "llvm/IR/DebugInfoMetadata.h"
+#include "llvm/IR/DebugLoc.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GlobalObject.h"
+#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
+#include "llvm/IR/TrackingMDRef.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Value.h"
#include "llvm/IR/ValueHandle.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MathExtras.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <iterator>
+#include <tuple>
+#include <type_traits>
+#include <utility>
+#include <vector>
using namespace llvm;
@@ -203,7 +235,7 @@ void ReplaceableMetadataImpl::replaceAllUsesWith(Metadata *MD) {
return;
// Copy out uses since UseMap will get touched below.
- typedef std::pair<void *, std::pair<OwnerTy, uint64_t>> UseTy;
+ using UseTy = std::pair<void *, std::pair<OwnerTy, uint64_t>>;
SmallVector<UseTy, 8> Uses(UseMap.begin(), UseMap.end());
std::sort(Uses.begin(), Uses.end(), [](const UseTy &L, const UseTy &R) {
return L.second.second < R.second.second;
@@ -256,7 +288,7 @@ void ReplaceableMetadataImpl::resolveAllUses(bool ResolveUsers) {
}
// Copy out uses since UseMap could get touched below.
- typedef std::pair<void *, std::pair<OwnerTy, uint64_t>> UseTy;
+ using UseTy = std::pair<void *, std::pair<OwnerTy, uint64_t>>;
SmallVector<UseTy, 8> Uses(UseMap.begin(), UseMap.end());
std::sort(Uses.begin(), Uses.end(), [](const UseTy &L, const UseTy &R) {
return L.second.second < R.second.second;
@@ -728,8 +760,8 @@ static T *uniquifyImpl(T *N, DenseSet<T *, InfoT> &Store) {
}
template <class NodeTy> struct MDNode::HasCachedHash {
- typedef char Yes[1];
- typedef char No[2];
+ using Yes = char[1];
+ using No = char[2];
template <class U, U Val> struct SFINAE {};
template <class U>
@@ -937,7 +969,7 @@ static void addRange(SmallVectorImpl<ConstantInt *> &EndPoints,
MDNode *MDNode::getMostGenericRange(MDNode *A, MDNode *B) {
// Given two ranges, we want to compute the union of the ranges. This
- // is slightly complitade by having to combine the intervals and merge
+ // is slightly complicated by having to combine the intervals and merge
// the ones that overlap.
if (!A || !B)
@@ -946,7 +978,7 @@ MDNode *MDNode::getMostGenericRange(MDNode *A, MDNode *B) {
if (A == B)
return A;
- // First, walk both lists in older of the lower boundary of each interval.
+ // First, walk both lists in order of the lower boundary of each interval.
// At each step, try to merge the new interval to the last one we adedd.
SmallVector<ConstantInt *, 4> EndPoints;
int AI = 0;
@@ -1027,8 +1059,7 @@ static SmallVector<TrackingMDRef, 4> &getNMDOps(void *Operands) {
}
NamedMDNode::NamedMDNode(const Twine &N)
- : Name(N.str()), Parent(nullptr),
- Operands(new SmallVector<TrackingMDRef, 4>()) {}
+ : Name(N.str()), Operands(new SmallVector<TrackingMDRef, 4>()) {}
NamedMDNode::~NamedMDNode() {
dropAllReferences();
@@ -1308,17 +1339,26 @@ bool Instruction::extractProfTotalWeight(uint64_t &TotalVal) const {
return false;
auto *ProfDataName = dyn_cast<MDString>(ProfileData->getOperand(0));
- if (!ProfDataName || !ProfDataName->getString().equals("branch_weights"))
+ if (!ProfDataName)
return false;
- TotalVal = 0;
- for (unsigned i = 1; i < ProfileData->getNumOperands(); i++) {
- auto *V = mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(i));
- if (!V)
- return false;
- TotalVal += V->getValue().getZExtValue();
+ if (ProfDataName->getString().equals("branch_weights")) {
+ TotalVal = 0;
+ for (unsigned i = 1; i < ProfileData->getNumOperands(); i++) {
+ auto *V = mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(i));
+ if (!V)
+ return false;
+ TotalVal += V->getValue().getZExtValue();
+ }
+ return true;
+ } else if (ProfDataName->getString().equals("VP") &&
+ ProfileData->getNumOperands() > 3) {
+ TotalVal = mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(2))
+ ->getValue()
+ .getZExtValue();
+ return true;
}
- return true;
+ return false;
}
void Instruction::clearMetadataHashEntries() {
@@ -1432,7 +1472,7 @@ void GlobalObject::copyMetadata(const GlobalObject *Other, unsigned Offset) {
if (E)
OrigElements = E->getElements();
std::vector<uint64_t> Elements(OrigElements.size() + 2);
- Elements[0] = dwarf::DW_OP_plus;
+ Elements[0] = dwarf::DW_OP_plus_uconst;
Elements[1] = Offset;
std::copy(OrigElements.begin(), OrigElements.end(), Elements.begin() + 2);
E = DIExpression::get(getContext(), Elements);
@@ -1446,7 +1486,7 @@ void GlobalObject::addTypeMetadata(unsigned Offset, Metadata *TypeID) {
addMetadata(
LLVMContext::MD_type,
*MDTuple::get(getContext(),
- {llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
+ {ConstantAsMetadata::get(ConstantInt::get(
Type::getInt64Ty(getContext()), Offset)),
TypeID}));
}
@@ -1459,6 +1499,15 @@ DISubprogram *Function::getSubprogram() const {
return cast_or_null<DISubprogram>(getMetadata(LLVMContext::MD_dbg));
}
+bool Function::isDebugInfoForProfiling() const {
+ if (DISubprogram *SP = getSubprogram()) {
+ if (DICompileUnit *CU = SP->getUnit()) {
+ return CU->getDebugInfoForProfiling();
+ }
+ }
+ return false;
+}
+
void GlobalVariable::addDebugInfo(DIGlobalVariableExpression *GV) {
addMetadata(LLVMContext::MD_dbg, *GV);
}
diff --git a/contrib/llvm/lib/IR/Module.cpp b/contrib/llvm/lib/IR/Module.cpp
index 1911f84..c230a50 100644
--- a/contrib/llvm/lib/IR/Module.cpp
+++ b/contrib/llvm/lib/IR/Module.cpp
@@ -1,4 +1,4 @@
-//===-- Module.cpp - Implement the Module class ---------------------------===//
+//===- Module.cpp - Implement the Module class ----------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -13,25 +13,44 @@
#include "llvm/IR/Module.h"
#include "SymbolTableListTraitsImpl.h"
-#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallString.h"
-#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/Comdat.h"
#include "llvm/IR/Constants.h"
-#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DebugInfoMetadata.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Function.h"
#include "llvm/IR/GVMaterializer.h"
-#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/GlobalAlias.h"
+#include "llvm/IR/GlobalIFunc.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/IR/SymbolTableListTraits.h"
+#include "llvm/IR/Type.h"
#include "llvm/IR/TypeFinder.h"
-#include "llvm/Support/Dwarf.h"
+#include "llvm/IR/Value.h"
+#include "llvm/IR/ValueSymbolTable.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/CodeGen.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/RandomNumberGenerator.h"
#include <algorithm>
-#include <cstdarg>
-#include <cstdlib>
+#include <cassert>
+#include <cstdint>
+#include <memory>
+#include <utility>
+#include <vector>
using namespace llvm;
@@ -69,7 +88,7 @@ Module::~Module() {
delete static_cast<StringMap<NamedMDNode *> *>(NamedMDSymTab);
}
-RandomNumberGenerator *Module::createRNG(const Pass* P) const {
+std::unique_ptr<RandomNumberGenerator> Module::createRNG(const Pass* P) const {
SmallString<32> Salt(P->getPassName());
// This RNG is guaranteed to produce the same random stream only
@@ -84,7 +103,7 @@ RandomNumberGenerator *Module::createRNG(const Pass* P) const {
// store salt metadata from the Module constructor.
Salt += sys::path::filename(getModuleIdentifier());
- return new RandomNumberGenerator(Salt);
+ return std::unique_ptr<RandomNumberGenerator>(new RandomNumberGenerator(Salt));
}
/// getNamedValue - Return the first global value in the module with
@@ -120,9 +139,8 @@ void Module::getOperandBundleTags(SmallVectorImpl<StringRef> &Result) const {
// it. This is nice because it allows most passes to get away with not handling
// the symbol table directly for this common task.
//
-Constant *Module::getOrInsertFunction(StringRef Name,
- FunctionType *Ty,
- AttributeSet AttributeList) {
+Constant *Module::getOrInsertFunction(StringRef Name, FunctionType *Ty,
+ AttributeList AttributeList) {
// See if we have a definition for the specified function already.
GlobalValue *F = getNamedValue(Name);
if (!F) {
@@ -145,49 +163,7 @@ Constant *Module::getOrInsertFunction(StringRef Name,
Constant *Module::getOrInsertFunction(StringRef Name,
FunctionType *Ty) {
- return getOrInsertFunction(Name, Ty, AttributeSet());
-}
-
-// getOrInsertFunction - Look up the specified function in the module symbol
-// table. If it does not exist, add a prototype for the function and return it.
-// This version of the method takes a null terminated list of function
-// arguments, which makes it easier for clients to use.
-//
-Constant *Module::getOrInsertFunction(StringRef Name,
- AttributeSet AttributeList,
- Type *RetTy, ...) {
- va_list Args;
- va_start(Args, RetTy);
-
- // Build the list of argument types...
- std::vector<Type*> ArgTys;
- while (Type *ArgTy = va_arg(Args, Type*))
- ArgTys.push_back(ArgTy);
-
- va_end(Args);
-
- // Build the function type and chain to the other getOrInsertFunction...
- return getOrInsertFunction(Name,
- FunctionType::get(RetTy, ArgTys, false),
- AttributeList);
-}
-
-Constant *Module::getOrInsertFunction(StringRef Name,
- Type *RetTy, ...) {
- va_list Args;
- va_start(Args, RetTy);
-
- // Build the list of argument types...
- std::vector<Type*> ArgTys;
- while (Type *ArgTy = va_arg(Args, Type*))
- ArgTys.push_back(ArgTy);
-
- va_end(Args);
-
- // Build the function type and chain to the other getOrInsertFunction...
- return getOrInsertFunction(Name,
- FunctionType::get(RetTy, ArgTys, false),
- AttributeSet());
+ return getOrInsertFunction(Name, Ty, AttributeList());
}
// getFunction - Look up the specified function in the module symbol table.
@@ -208,7 +184,8 @@ Function *Module::getFunction(StringRef Name) const {
/// If AllowLocal is set to true, this function will return types that
/// have an local. By default, these types are not returned.
///
-GlobalVariable *Module::getGlobalVariable(StringRef Name, bool AllowLocal) {
+GlobalVariable *Module::getGlobalVariable(StringRef Name,
+ bool AllowLocal) const {
if (GlobalVariable *Result =
dyn_cast_or_null<GlobalVariable>(getNamedValue(Name)))
if (AllowLocal || !Result->hasLocalLinkage())
@@ -465,6 +442,14 @@ void Module::dropAllReferences() {
GIF.dropAllReferences();
}
+unsigned Module::getNumberRegisterParameters() const {
+ auto *Val =
+ cast_or_null<ConstantAsMetadata>(getModuleFlag("NumRegisterParameters"));
+ if (!Val)
+ return 0;
+ return cast<ConstantInt>(Val->getValue())->getZExtValue();
+}
+
unsigned Module::getDwarfVersion() const {
auto *Val = cast_or_null<ConstantAsMetadata>(getModuleFlag("Dwarf Version"));
if (!Val)
@@ -496,7 +481,7 @@ PICLevel::Level Module::getPICLevel() const {
}
void Module::setPICLevel(PICLevel::Level PL) {
- addModuleFlag(ModFlagBehavior::Error, "PIC Level", PL);
+ addModuleFlag(ModFlagBehavior::Max, "PIC Level", PL);
}
PIELevel::Level Module::getPIELevel() const {
@@ -510,7 +495,7 @@ PIELevel::Level Module::getPIELevel() const {
}
void Module::setPIELevel(PIELevel::Level PL) {
- addModuleFlag(ModFlagBehavior::Error, "PIE Level", PL);
+ addModuleFlag(ModFlagBehavior::Max, "PIE Level", PL);
}
void Module::setProfileSummary(Metadata *M) {
diff --git a/contrib/llvm/lib/IR/ModuleSummaryIndex.cpp b/contrib/llvm/lib/IR/ModuleSummaryIndex.cpp
index 9072f4b..51c4bae 100644
--- a/contrib/llvm/lib/IR/ModuleSummaryIndex.cpp
+++ b/contrib/llvm/lib/IR/ModuleSummaryIndex.cpp
@@ -16,61 +16,13 @@
#include "llvm/ADT/StringMap.h"
using namespace llvm;
-// Create the combined module index/summary from multiple
-// per-module instances.
-void ModuleSummaryIndex::mergeFrom(std::unique_ptr<ModuleSummaryIndex> Other,
- uint64_t NextModuleId) {
- if (Other->modulePaths().empty())
- return;
-
- assert(Other->modulePaths().size() == 1 &&
- "Can only merge from an single-module index at that time");
-
- StringRef OtherModPath = Other->modulePaths().begin()->first();
- StringRef ModPath = addModulePath(OtherModPath, NextModuleId,
- Other->getModuleHash(OtherModPath))
- ->first();
-
- for (auto &OtherGlobalValSummaryLists : *Other) {
- GlobalValue::GUID ValueGUID = OtherGlobalValSummaryLists.first;
- GlobalValueSummaryList &List = OtherGlobalValSummaryLists.second;
-
- // Assert that the value summary list only has one entry, since we shouldn't
- // have duplicate names within a single per-module index.
- assert(List.size() == 1);
- std::unique_ptr<GlobalValueSummary> Summary = std::move(List.front());
-
- // Note the module path string ref was copied above and is still owned by
- // the original per-module index. Reset it to the new module path
- // string reference owned by the combined index.
- Summary->setModulePath(ModPath);
-
- // Add new value summary to existing list. There may be duplicates when
- // combining GlobalValueMap entries, due to COMDAT values. Any local
- // values were given unique global IDs.
- addGlobalValueSummary(ValueGUID, std::move(Summary));
- }
-}
-
-void ModuleSummaryIndex::removeEmptySummaryEntries() {
- for (auto MI = begin(), MIE = end(); MI != MIE;) {
- // Only expect this to be called on a per-module index, which has a single
- // entry per value entry list.
- assert(MI->second.size() == 1);
- if (!MI->second[0])
- MI = GlobalValueMap.erase(MI);
- else
- ++MI;
- }
-}
-
// Collect for the given module the list of function it defines
// (GUID -> Summary).
void ModuleSummaryIndex::collectDefinedFunctionsForModule(
StringRef ModulePath, GVSummaryMapTy &GVSummaryMap) const {
for (auto &GlobalList : *this) {
auto GUID = GlobalList.first;
- for (auto &GlobSummary : GlobalList.second) {
+ for (auto &GlobSummary : GlobalList.second.SummaryList) {
auto *Summary = dyn_cast_or_null<FunctionSummary>(GlobSummary.get());
if (!Summary)
// Ignore global variable, focus on functions
@@ -88,7 +40,7 @@ void ModuleSummaryIndex::collectDefinedGVSummariesPerModule(
StringMap<GVSummaryMapTy> &ModuleToDefinedGVSummaries) const {
for (auto &GlobalList : *this) {
auto GUID = GlobalList.first;
- for (auto &Summary : GlobalList.second) {
+ for (auto &Summary : GlobalList.second.SummaryList) {
ModuleToDefinedGVSummaries[Summary->modulePath()][GUID] = Summary.get();
}
}
@@ -97,10 +49,23 @@ void ModuleSummaryIndex::collectDefinedGVSummariesPerModule(
GlobalValueSummary *
ModuleSummaryIndex::getGlobalValueSummary(uint64_t ValueGUID,
bool PerModuleIndex) const {
- auto SummaryList = findGlobalValueSummaryList(ValueGUID);
- assert(SummaryList != end() && "GlobalValue not found in index");
- assert((!PerModuleIndex || SummaryList->second.size() == 1) &&
+ auto VI = getValueInfo(ValueGUID);
+ assert(VI && "GlobalValue not found in index");
+ assert((!PerModuleIndex || VI.getSummaryList().size() == 1) &&
"Expected a single entry per global value in per-module index");
- auto &Summary = SummaryList->second[0];
+ auto &Summary = VI.getSummaryList()[0];
return Summary.get();
}
+
+bool ModuleSummaryIndex::isGUIDLive(GlobalValue::GUID GUID) const {
+ auto VI = getValueInfo(GUID);
+ if (!VI)
+ return true;
+ const auto &SummaryList = VI.getSummaryList();
+ if (SummaryList.empty())
+ return true;
+ for (auto &I : SummaryList)
+ if (isGlobalValueLive(I.get()))
+ return true;
+ return false;
+}
diff --git a/contrib/llvm/lib/IR/Operator.cpp b/contrib/llvm/lib/IR/Operator.cpp
index 2fba24d..7d819f3 100644
--- a/contrib/llvm/lib/IR/Operator.cpp
+++ b/contrib/llvm/lib/IR/Operator.cpp
@@ -1,4 +1,18 @@
+//===-- Operator.cpp - Implement the LLVM operators -----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the non-inline methods for the LLVM Operator classes.
+//
+//===----------------------------------------------------------------------===//
+
#include "llvm/IR/Operator.h"
+#include "llvm/IR/DataLayout.h"
#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Type.h"
diff --git a/contrib/llvm/lib/IR/OptBisect.cpp b/contrib/llvm/lib/IR/OptBisect.cpp
index e9574ca..f1c7005 100644
--- a/contrib/llvm/lib/IR/OptBisect.cpp
+++ b/contrib/llvm/lib/IR/OptBisect.cpp
@@ -13,11 +13,12 @@
///
//===----------------------------------------------------------------------===//
+#include "llvm/IR/OptBisect.h"
#include "llvm/Analysis/CallGraphSCCPass.h"
#include "llvm/Analysis/LazyCallGraph.h"
#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/Analysis/RegionInfo.h"
#include "llvm/IR/Module.h"
-#include "llvm/IR/OptBisect.h"
#include "llvm/Pass.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/raw_ostream.h"
@@ -39,14 +40,6 @@ static void printPassMessage(const StringRef &Name, int PassNum,
<< "(" << PassNum << ") " << Name << " on " << TargetDesc << "\n";
}
-static void printCaseMessage(int CaseNum, StringRef Msg, bool Running) {
- if (Running)
- errs() << "BISECT: running case (";
- else
- errs() << "BISECT: NOT running case (";
- errs() << CaseNum << "): " << Msg << "\n";
-}
-
static std::string getDescription(const Module &M) {
return "module (" + M.getName().str() + ")";
}
@@ -61,13 +54,20 @@ static std::string getDescription(const BasicBlock &BB) {
}
static std::string getDescription(const Loop &L) {
- // FIXME: I'd like to be able to provide a better description here, but
- // calling L->getHeader() would introduce a new dependency on the
- // LLVMCore library.
+ // FIXME: Move into LoopInfo so we can get a better description
+ // (and avoid a circular dependency between IR and Analysis).
return "loop";
}
+static std::string getDescription(const Region &R) {
+ // FIXME: Move into RegionInfo so we can get a better description
+ // (and avoid a circular dependency between IR and Analysis).
+ return "region";
+}
+
static std::string getDescription(const CallGraphSCC &SCC) {
+ // FIXME: Move into CallGraphSCCPass to avoid circular dependency between
+ // IR and Analysis.
std::string Desc = "SCC (";
bool First = true;
for (CallGraphNode *CGN : SCC) {
@@ -91,6 +91,7 @@ template bool OptBisect::shouldRunPass(const Pass *, const Function &);
template bool OptBisect::shouldRunPass(const Pass *, const BasicBlock &);
template bool OptBisect::shouldRunPass(const Pass *, const Loop &);
template bool OptBisect::shouldRunPass(const Pass *, const CallGraphSCC &);
+template bool OptBisect::shouldRunPass(const Pass *, const Region &);
template <class UnitT>
bool OptBisect::shouldRunPass(const Pass *P, const UnitT &U) {
@@ -108,13 +109,3 @@ bool OptBisect::checkPass(const StringRef PassName,
printPassMessage(PassName, CurBisectNum, TargetDesc, ShouldRun);
return ShouldRun;
}
-
-bool OptBisect::shouldRunCase(const Twine &Msg) {
- if (!BisectEnabled)
- return true;
- int CurFuelNum = ++LastBisectNum;
- bool ShouldRun = (OptBisectLimit == -1 || CurFuelNum <= OptBisectLimit);
- printCaseMessage(CurFuelNum, Msg.str(), ShouldRun);
- return ShouldRun;
-}
-
diff --git a/contrib/llvm/lib/IR/Pass.cpp b/contrib/llvm/lib/IR/Pass.cpp
index a42945e..f1b5f2f 100644
--- a/contrib/llvm/lib/IR/Pass.cpp
+++ b/contrib/llvm/lib/IR/Pass.cpp
@@ -118,10 +118,12 @@ void Pass::print(raw_ostream &O,const Module*) const {
O << "Pass::print not implemented for pass: '" << getPassName() << "'!\n";
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
// dump - call print(cerr);
LLVM_DUMP_METHOD void Pass::dump() const {
print(dbgs(), nullptr);
}
+#endif
//===----------------------------------------------------------------------===//
// ImmutablePass Implementation
diff --git a/contrib/llvm/lib/IR/PassManager.cpp b/contrib/llvm/lib/IR/PassManager.cpp
index 8f68bb1..47fdfed 100644
--- a/contrib/llvm/lib/IR/PassManager.cpp
+++ b/contrib/llvm/lib/IR/PassManager.cpp
@@ -91,4 +91,6 @@ bool FunctionAnalysisManagerModuleProxy::Result::invalidate(
}
}
+AnalysisSetKey CFGAnalyses::SetKey;
+
AnalysisSetKey PreservedAnalyses::AllAnalysesKey;
diff --git a/contrib/llvm/lib/IR/PassRegistry.cpp b/contrib/llvm/lib/IR/PassRegistry.cpp
index 584dee2..c0f6f07 100644
--- a/contrib/llvm/lib/IR/PassRegistry.cpp
+++ b/contrib/llvm/lib/IR/PassRegistry.cpp
@@ -105,8 +105,6 @@ void PassRegistry::registerAnalysisGroup(const void *InterfaceID,
ImplementationInfo->getNormalCtor() &&
"Cannot specify pass as default if it does not have a default ctor");
InterfaceInfo->setNormalCtor(ImplementationInfo->getNormalCtor());
- InterfaceInfo->setTargetMachineCtor(
- ImplementationInfo->getTargetMachineCtor());
}
}
diff --git a/contrib/llvm/lib/IR/SafepointIRVerifier.cpp b/contrib/llvm/lib/IR/SafepointIRVerifier.cpp
new file mode 100644
index 0000000..8b328c2
--- /dev/null
+++ b/contrib/llvm/lib/IR/SafepointIRVerifier.cpp
@@ -0,0 +1,437 @@
+//===-- SafepointIRVerifier.cpp - Verify gc.statepoint invariants ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Run a sanity check on the IR to ensure that Safepoints - if they've been
+// inserted - were inserted correctly. In particular, look for use of
+// non-relocated values after a safepoint. It's primary use is to check the
+// correctness of safepoint insertion immediately after insertion, but it can
+// also be used to verify that later transforms have not found a way to break
+// safepoint semenatics.
+//
+// In its current form, this verify checks a property which is sufficient, but
+// not neccessary for correctness. There are some cases where an unrelocated
+// pointer can be used after the safepoint. Consider this example:
+//
+// a = ...
+// b = ...
+// (a',b') = safepoint(a,b)
+// c = cmp eq a b
+// br c, ..., ....
+//
+// Because it is valid to reorder 'c' above the safepoint, this is legal. In
+// practice, this is a somewhat uncommon transform, but CodeGenPrep does create
+// idioms like this. The verifier knows about these cases and avoids reporting
+// false positives.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SetOperations.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Value.h"
+#include "llvm/IR/SafepointIRVerifier.h"
+#include "llvm/IR/Statepoint.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/raw_ostream.h"
+
+#define DEBUG_TYPE "safepoint-ir-verifier"
+
+using namespace llvm;
+
+/// This option is used for writing test cases. Instead of crashing the program
+/// when verification fails, report a message to the console (for FileCheck
+/// usage) and continue execution as if nothing happened.
+static cl::opt<bool> PrintOnly("safepoint-ir-verifier-print-only",
+ cl::init(false));
+
+static void Verify(const Function &F, const DominatorTree &DT);
+
+struct SafepointIRVerifier : public FunctionPass {
+ static char ID; // Pass identification, replacement for typeid
+ DominatorTree DT;
+ SafepointIRVerifier() : FunctionPass(ID) {
+ initializeSafepointIRVerifierPass(*PassRegistry::getPassRegistry());
+ }
+
+ bool runOnFunction(Function &F) override {
+ DT.recalculate(F);
+ Verify(F, DT);
+ return false; // no modifications
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesAll();
+ }
+
+ StringRef getPassName() const override { return "safepoint verifier"; }
+};
+
+void llvm::verifySafepointIR(Function &F) {
+ SafepointIRVerifier pass;
+ pass.runOnFunction(F);
+}
+
+char SafepointIRVerifier::ID = 0;
+
+FunctionPass *llvm::createSafepointIRVerifierPass() {
+ return new SafepointIRVerifier();
+}
+
+INITIALIZE_PASS_BEGIN(SafepointIRVerifier, "verify-safepoint-ir",
+ "Safepoint IR Verifier", false, true)
+INITIALIZE_PASS_END(SafepointIRVerifier, "verify-safepoint-ir",
+ "Safepoint IR Verifier", false, true)
+
+static bool isGCPointerType(Type *T) {
+ if (auto *PT = dyn_cast<PointerType>(T))
+ // For the sake of this example GC, we arbitrarily pick addrspace(1) as our
+ // GC managed heap. We know that a pointer into this heap needs to be
+ // updated and that no other pointer does.
+ return (1 == PT->getAddressSpace());
+ return false;
+}
+
+static bool containsGCPtrType(Type *Ty) {
+ if (isGCPointerType(Ty))
+ return true;
+ if (VectorType *VT = dyn_cast<VectorType>(Ty))
+ return isGCPointerType(VT->getScalarType());
+ if (ArrayType *AT = dyn_cast<ArrayType>(Ty))
+ return containsGCPtrType(AT->getElementType());
+ if (StructType *ST = dyn_cast<StructType>(Ty))
+ return std::any_of(ST->subtypes().begin(), ST->subtypes().end(),
+ containsGCPtrType);
+ return false;
+}
+
+// Debugging aid -- prints a [Begin, End) range of values.
+template<typename IteratorTy>
+static void PrintValueSet(raw_ostream &OS, IteratorTy Begin, IteratorTy End) {
+ OS << "[ ";
+ while (Begin != End) {
+ OS << **Begin << " ";
+ ++Begin;
+ }
+ OS << "]";
+}
+
+/// The verifier algorithm is phrased in terms of availability. The set of
+/// values "available" at a given point in the control flow graph is the set of
+/// correctly relocated value at that point, and is a subset of the set of
+/// definitions dominating that point.
+
+/// State we compute and track per basic block.
+struct BasicBlockState {
+ // Set of values available coming in, before the phi nodes
+ DenseSet<const Value *> AvailableIn;
+
+ // Set of values available going out
+ DenseSet<const Value *> AvailableOut;
+
+ // AvailableOut minus AvailableIn.
+ // All elements are Instructions
+ DenseSet<const Value *> Contribution;
+
+ // True if this block contains a safepoint and thus AvailableIn does not
+ // contribute to AvailableOut.
+ bool Cleared = false;
+};
+
+
+/// Gather all the definitions dominating the start of BB into Result. This is
+/// simply the Defs introduced by every dominating basic block and the function
+/// arguments.
+static void GatherDominatingDefs(const BasicBlock *BB,
+ DenseSet<const Value *> &Result,
+ const DominatorTree &DT,
+ DenseMap<const BasicBlock *, BasicBlockState *> &BlockMap) {
+ DomTreeNode *DTN = DT[const_cast<BasicBlock *>(BB)];
+
+ while (DTN->getIDom()) {
+ DTN = DTN->getIDom();
+ const auto &Defs = BlockMap[DTN->getBlock()]->Contribution;
+ Result.insert(Defs.begin(), Defs.end());
+ // If this block is 'Cleared', then nothing LiveIn to this block can be
+ // available after this block completes. Note: This turns out to be
+ // really important for reducing memory consuption of the initial available
+ // sets and thus peak memory usage by this verifier.
+ if (BlockMap[DTN->getBlock()]->Cleared)
+ return;
+ }
+
+ for (const Argument &A : BB->getParent()->args())
+ if (containsGCPtrType(A.getType()))
+ Result.insert(&A);
+}
+
+/// Model the effect of an instruction on the set of available values.
+static void TransferInstruction(const Instruction &I, bool &Cleared,
+ DenseSet<const Value *> &Available) {
+ if (isStatepoint(I)) {
+ Cleared = true;
+ Available.clear();
+ } else if (containsGCPtrType(I.getType()))
+ Available.insert(&I);
+}
+
+/// Compute the AvailableOut set for BB, based on the
+/// BasicBlockState BBS, which is the BasicBlockState for BB. FirstPass is set
+/// when the verifier runs for the first time computing the AvailableOut set
+/// for BB.
+static void TransferBlock(const BasicBlock *BB,
+ BasicBlockState &BBS, bool FirstPass) {
+
+ const DenseSet<const Value *> &AvailableIn = BBS.AvailableIn;
+ DenseSet<const Value *> &AvailableOut = BBS.AvailableOut;
+
+ if (BBS.Cleared) {
+ // AvailableOut does not change no matter how the input changes, just
+ // leave it be. We need to force this calculation the first time so that
+ // we have a AvailableOut at all.
+ if (FirstPass) {
+ AvailableOut = BBS.Contribution;
+ }
+ } else {
+ // Otherwise, we need to reduce the AvailableOut set by things which are no
+ // longer in our AvailableIn
+ DenseSet<const Value *> Temp = BBS.Contribution;
+ set_union(Temp, AvailableIn);
+ AvailableOut = std::move(Temp);
+ }
+
+ DEBUG(dbgs() << "Transfered block " << BB->getName() << " from ";
+ PrintValueSet(dbgs(), AvailableIn.begin(), AvailableIn.end());
+ dbgs() << " to ";
+ PrintValueSet(dbgs(), AvailableOut.begin(), AvailableOut.end());
+ dbgs() << "\n";);
+}
+
+/// A given derived pointer can have multiple base pointers through phi/selects.
+/// This type indicates when the base pointer is exclusively constant
+/// (ExclusivelySomeConstant), and if that constant is proven to be exclusively
+/// null, we record that as ExclusivelyNull. In all other cases, the BaseType is
+/// NonConstant.
+enum BaseType {
+ NonConstant = 1, // Base pointers is not exclusively constant.
+ ExclusivelyNull,
+ ExclusivelySomeConstant // Base pointers for a given derived pointer is from a
+ // set of constants, but they are not exclusively
+ // null.
+};
+
+/// Return the baseType for Val which states whether Val is exclusively
+/// derived from constant/null, or not exclusively derived from constant.
+/// Val is exclusively derived off a constant base when all operands of phi and
+/// selects are derived off a constant base.
+static enum BaseType getBaseType(const Value *Val) {
+
+ SmallVector<const Value *, 32> Worklist;
+ DenseSet<const Value *> Visited;
+ bool isExclusivelyDerivedFromNull = true;
+ Worklist.push_back(Val);
+ // Strip through all the bitcasts and geps to get base pointer. Also check for
+ // the exclusive value when there can be multiple base pointers (through phis
+ // or selects).
+ while(!Worklist.empty()) {
+ const Value *V = Worklist.pop_back_val();
+ if (!Visited.insert(V).second)
+ continue;
+
+ if (const auto *CI = dyn_cast<CastInst>(V)) {
+ Worklist.push_back(CI->stripPointerCasts());
+ continue;
+ }
+ if (const auto *GEP = dyn_cast<GetElementPtrInst>(V)) {
+ Worklist.push_back(GEP->getPointerOperand());
+ continue;
+ }
+ // Push all the incoming values of phi node into the worklist for
+ // processing.
+ if (const auto *PN = dyn_cast<PHINode>(V)) {
+ for (Value *InV: PN->incoming_values())
+ Worklist.push_back(InV);
+ continue;
+ }
+ if (const auto *SI = dyn_cast<SelectInst>(V)) {
+ // Push in the true and false values
+ Worklist.push_back(SI->getTrueValue());
+ Worklist.push_back(SI->getFalseValue());
+ continue;
+ }
+ if (isa<Constant>(V)) {
+ // We found at least one base pointer which is non-null, so this derived
+ // pointer is not exclusively derived from null.
+ if (V != Constant::getNullValue(V->getType()))
+ isExclusivelyDerivedFromNull = false;
+ // Continue processing the remaining values to make sure it's exclusively
+ // constant.
+ continue;
+ }
+ // At this point, we know that the base pointer is not exclusively
+ // constant.
+ return BaseType::NonConstant;
+ }
+ // Now, we know that the base pointer is exclusively constant, but we need to
+ // differentiate between exclusive null constant and non-null constant.
+ return isExclusivelyDerivedFromNull ? BaseType::ExclusivelyNull
+ : BaseType::ExclusivelySomeConstant;
+}
+
+static void Verify(const Function &F, const DominatorTree &DT) {
+ SpecificBumpPtrAllocator<BasicBlockState> BSAllocator;
+ DenseMap<const BasicBlock *, BasicBlockState *> BlockMap;
+
+ DEBUG(dbgs() << "Verifying gc pointers in function: " << F.getName() << "\n");
+ if (PrintOnly)
+ dbgs() << "Verifying gc pointers in function: " << F.getName() << "\n";
+
+
+ for (const BasicBlock &BB : F) {
+ BasicBlockState *BBS = new(BSAllocator.Allocate()) BasicBlockState;
+ for (const auto &I : BB)
+ TransferInstruction(I, BBS->Cleared, BBS->Contribution);
+ BlockMap[&BB] = BBS;
+ }
+
+ for (auto &BBI : BlockMap) {
+ GatherDominatingDefs(BBI.first, BBI.second->AvailableIn, DT, BlockMap);
+ TransferBlock(BBI.first, *BBI.second, true);
+ }
+
+ SetVector<const BasicBlock *> Worklist;
+ for (auto &BBI : BlockMap)
+ Worklist.insert(BBI.first);
+
+ // This loop iterates the AvailableIn and AvailableOut sets to a fixed point.
+ // The AvailableIn and AvailableOut sets decrease as we iterate.
+ while (!Worklist.empty()) {
+ const BasicBlock *BB = Worklist.pop_back_val();
+ BasicBlockState *BBS = BlockMap[BB];
+
+ size_t OldInCount = BBS->AvailableIn.size();
+ for (const BasicBlock *PBB : predecessors(BB))
+ set_intersect(BBS->AvailableIn, BlockMap[PBB]->AvailableOut);
+
+ if (OldInCount == BBS->AvailableIn.size())
+ continue;
+
+ assert(OldInCount > BBS->AvailableIn.size() && "invariant!");
+
+ size_t OldOutCount = BBS->AvailableOut.size();
+ TransferBlock(BB, *BBS, false);
+ if (OldOutCount != BBS->AvailableOut.size()) {
+ assert(OldOutCount > BBS->AvailableOut.size() && "invariant!");
+ Worklist.insert(succ_begin(BB), succ_end(BB));
+ }
+ }
+
+ // We now have all the information we need to decide if the use of a heap
+ // reference is legal or not, given our safepoint semantics.
+
+ bool AnyInvalidUses = false;
+
+ auto ReportInvalidUse = [&AnyInvalidUses](const Value &V,
+ const Instruction &I) {
+ errs() << "Illegal use of unrelocated value found!\n";
+ errs() << "Def: " << V << "\n";
+ errs() << "Use: " << I << "\n";
+ if (!PrintOnly)
+ abort();
+ AnyInvalidUses = true;
+ };
+
+ auto isNotExclusivelyConstantDerived = [](const Value *V) {
+ return getBaseType(V) == BaseType::NonConstant;
+ };
+
+ for (const BasicBlock &BB : F) {
+ // We destructively modify AvailableIn as we traverse the block instruction
+ // by instruction.
+ DenseSet<const Value *> &AvailableSet = BlockMap[&BB]->AvailableIn;
+ for (const Instruction &I : BB) {
+ if (const PHINode *PN = dyn_cast<PHINode>(&I)) {
+ if (containsGCPtrType(PN->getType()))
+ for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
+ const BasicBlock *InBB = PN->getIncomingBlock(i);
+ const Value *InValue = PN->getIncomingValue(i);
+
+ if (isNotExclusivelyConstantDerived(InValue) &&
+ !BlockMap[InBB]->AvailableOut.count(InValue))
+ ReportInvalidUse(*InValue, *PN);
+ }
+ } else if (isa<CmpInst>(I) &&
+ containsGCPtrType(I.getOperand(0)->getType())) {
+ Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
+ enum BaseType baseTyLHS = getBaseType(LHS),
+ baseTyRHS = getBaseType(RHS);
+
+ // Returns true if LHS and RHS are unrelocated pointers and they are
+ // valid unrelocated uses.
+ auto hasValidUnrelocatedUse = [&AvailableSet, baseTyLHS, baseTyRHS, &LHS, &RHS] () {
+ // A cmp instruction has valid unrelocated pointer operands only if
+ // both operands are unrelocated pointers.
+ // In the comparison between two pointers, if one is an unrelocated
+ // use, the other *should be* an unrelocated use, for this
+ // instruction to contain valid unrelocated uses. This unrelocated
+ // use can be a null constant as well, or another unrelocated
+ // pointer.
+ if (AvailableSet.count(LHS) || AvailableSet.count(RHS))
+ return false;
+ // Constant pointers (that are not exclusively null) may have
+ // meaning in different VMs, so we cannot reorder the compare
+ // against constant pointers before the safepoint. In other words,
+ // comparison of an unrelocated use against a non-null constant
+ // maybe invalid.
+ if ((baseTyLHS == BaseType::ExclusivelySomeConstant &&
+ baseTyRHS == BaseType::NonConstant) ||
+ (baseTyLHS == BaseType::NonConstant &&
+ baseTyRHS == BaseType::ExclusivelySomeConstant))
+ return false;
+ // All other cases are valid cases enumerated below:
+ // 1. Comparison between an exlusively derived null pointer and a
+ // constant base pointer.
+ // 2. Comparison between an exlusively derived null pointer and a
+ // non-constant unrelocated base pointer.
+ // 3. Comparison between 2 unrelocated pointers.
+ return true;
+ };
+ if (!hasValidUnrelocatedUse()) {
+ // Print out all non-constant derived pointers that are unrelocated
+ // uses, which are invalid.
+ if (baseTyLHS == BaseType::NonConstant && !AvailableSet.count(LHS))
+ ReportInvalidUse(*LHS, I);
+ if (baseTyRHS == BaseType::NonConstant && !AvailableSet.count(RHS))
+ ReportInvalidUse(*RHS, I);
+ }
+ } else {
+ for (const Value *V : I.operands())
+ if (containsGCPtrType(V->getType()) &&
+ isNotExclusivelyConstantDerived(V) && !AvailableSet.count(V))
+ ReportInvalidUse(*V, I);
+ }
+
+ bool Cleared = false;
+ TransferInstruction(I, Cleared, AvailableSet);
+ (void)Cleared;
+ }
+ }
+
+ if (PrintOnly && !AnyInvalidUses) {
+ dbgs() << "No illegal uses found by SafepointIRVerifier in: " << F.getName()
+ << "\n";
+ }
+}
diff --git a/contrib/llvm/lib/IR/Statepoint.cpp b/contrib/llvm/lib/IR/Statepoint.cpp
index 63be1e7..18efee21 100644
--- a/contrib/llvm/lib/IR/Statepoint.cpp
+++ b/contrib/llvm/lib/IR/Statepoint.cpp
@@ -44,27 +44,40 @@ bool llvm::isGCRelocate(ImmutableCallSite CS) {
return CS.getInstruction() && isa<GCRelocateInst>(CS.getInstruction());
}
+bool llvm::isGCRelocate(const Value *V) {
+ if (auto CS = ImmutableCallSite(V))
+ return isGCRelocate(CS);
+ return false;
+}
+
bool llvm::isGCResult(ImmutableCallSite CS) {
return CS.getInstruction() && isa<GCResultInst>(CS.getInstruction());
}
+bool llvm::isGCResult(const Value *V) {
+ if (auto CS = ImmutableCallSite(V))
+ return isGCResult(CS);
+ return false;
+}
+
bool llvm::isStatepointDirectiveAttr(Attribute Attr) {
return Attr.hasAttribute("statepoint-id") ||
Attr.hasAttribute("statepoint-num-patch-bytes");
}
-StatepointDirectives llvm::parseStatepointDirectivesFromAttrs(AttributeSet AS) {
+StatepointDirectives
+llvm::parseStatepointDirectivesFromAttrs(AttributeList AS) {
StatepointDirectives Result;
Attribute AttrID =
- AS.getAttribute(AttributeSet::FunctionIndex, "statepoint-id");
+ AS.getAttribute(AttributeList::FunctionIndex, "statepoint-id");
uint64_t StatepointID;
if (AttrID.isStringAttribute())
if (!AttrID.getValueAsString().getAsInteger(10, StatepointID))
Result.StatepointID = StatepointID;
uint32_t NumPatchBytes;
- Attribute AttrNumPatchBytes = AS.getAttribute(AttributeSet::FunctionIndex,
+ Attribute AttrNumPatchBytes = AS.getAttribute(AttributeList::FunctionIndex,
"statepoint-num-patch-bytes");
if (AttrNumPatchBytes.isStringAttribute())
if (!AttrNumPatchBytes.getValueAsString().getAsInteger(10, NumPatchBytes))
diff --git a/contrib/llvm/lib/IR/Type.cpp b/contrib/llvm/lib/IR/Type.cpp
index ca86673..20e9c2b 100644
--- a/contrib/llvm/lib/IR/Type.cpp
+++ b/contrib/llvm/lib/IR/Type.cpp
@@ -1,4 +1,4 @@
-//===-- Type.cpp - Implement the Type class -------------------------------===//
+//===- Type.cpp - Implement the Type class --------------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -13,10 +13,23 @@
#include "llvm/IR/Type.h"
#include "LLVMContextImpl.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/None.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/Constant.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
-#include <algorithm>
-#include <cstdarg>
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+#include <utility>
+
using namespace llvm;
//===----------------------------------------------------------------------===//
@@ -41,12 +54,6 @@ Type *Type::getPrimitiveType(LLVMContext &C, TypeID IDNumber) {
}
}
-Type *Type::getScalarType() const {
- if (auto *VTy = dyn_cast<VectorType>(this))
- return VTy->getElementType();
- return const_cast<Type*>(this);
-}
-
bool Type::isIntegerTy(unsigned Bitwidth) const {
return isIntegerTy() && cast<IntegerType>(this)->getBitWidth() == Bitwidth;
}
@@ -226,7 +233,6 @@ PointerType *Type::getInt64PtrTy(LLVMContext &C, unsigned AS) {
return getInt64Ty(C)->getPointerTo(AS);
}
-
//===----------------------------------------------------------------------===//
// IntegerType Implementation
//===----------------------------------------------------------------------===//
@@ -368,7 +374,8 @@ void StructType::setName(StringRef Name) {
if (Name == getName()) return;
StringMap<StructType *> &SymbolTable = getContext().pImpl->NamedStructTypes;
- typedef StringMap<StructType *>::MapEntryTy EntryTy;
+
+ using EntryTy = StringMap<StructType *>::MapEntryTy;
// If this struct already had a name, remove its symbol table entry. Don't
// delete the data yet because it may be part of the new name.
@@ -425,21 +432,6 @@ StructType *StructType::get(LLVMContext &Context, bool isPacked) {
return get(Context, None, isPacked);
}
-StructType *StructType::get(Type *type, ...) {
- assert(type && "Cannot create a struct type with no elements with this");
- LLVMContext &Ctx = type->getContext();
- va_list ap;
- SmallVector<llvm::Type*, 8> StructFields;
- va_start(ap, type);
- while (type) {
- StructFields.push_back(type);
- type = va_arg(ap, llvm::Type*);
- }
- auto *Ret = llvm::StructType::get(Ctx, StructFields);
- va_end(ap);
- return Ret;
-}
-
StructType *StructType::create(LLVMContext &Context, ArrayRef<Type*> Elements,
StringRef Name, bool isPacked) {
StructType *ST = create(Context, Name);
@@ -468,21 +460,6 @@ StructType *StructType::create(ArrayRef<Type*> Elements) {
return create(Elements[0]->getContext(), Elements, StringRef());
}
-StructType *StructType::create(StringRef Name, Type *type, ...) {
- assert(type && "Cannot create a struct type with no elements with this");
- LLVMContext &Ctx = type->getContext();
- va_list ap;
- SmallVector<llvm::Type*, 8> StructFields;
- va_start(ap, type);
- while (type) {
- StructFields.push_back(type);
- type = va_arg(ap, llvm::Type*);
- }
- auto *Ret = llvm::StructType::create(Ctx, StructFields, Name);
- va_end(ap);
- return Ret;
-}
-
bool StructType::isSized(SmallPtrSetImpl<Type*> *Visited) const {
if ((getSubclassData() & SCDB_IsSized) != 0)
return true;
@@ -514,19 +491,6 @@ StringRef StructType::getName() const {
return ((StringMapEntry<StructType*> *)SymbolTableEntry)->getKey();
}
-void StructType::setBody(Type *type, ...) {
- assert(type && "Cannot create a struct type with no elements with this");
- va_list ap;
- SmallVector<llvm::Type*, 8> StructFields;
- va_start(ap, type);
- while (type) {
- StructFields.push_back(type);
- type = va_arg(ap, llvm::Type*);
- }
- setBody(StructFields);
- va_end(ap);
-}
-
bool StructType::isValidElementType(Type *ElemTy) {
return !ElemTy->isVoidTy() && !ElemTy->isLabelTy() &&
!ElemTy->isMetadataTy() && !ElemTy->isFunctionTy() &&
@@ -546,7 +510,6 @@ StructType *Module::getTypeByName(StringRef Name) const {
return getContext().pImpl->NamedStructTypes.lookup(Name);
}
-
//===----------------------------------------------------------------------===//
// CompositeType Implementation
//===----------------------------------------------------------------------===//
@@ -575,7 +538,7 @@ bool CompositeType::indexValid(const Value *V) const {
if (auto *STy = dyn_cast<StructType>(this)) {
// Structure indexes require (vectors of) 32-bit integer constants. In the
// vector case all of the indices must be equal.
- if (!V->getType()->getScalarType()->isIntegerTy(32))
+ if (!V->getType()->isIntOrIntVectorTy(32))
return false;
const Constant *C = dyn_cast<Constant>(V);
if (C && V->getType()->isVectorTy())
@@ -595,7 +558,6 @@ bool CompositeType::indexValid(unsigned Idx) const {
return true;
}
-
//===----------------------------------------------------------------------===//
// ArrayType Implementation
//===----------------------------------------------------------------------===//
@@ -667,7 +629,6 @@ PointerType *PointerType::get(Type *EltTy, unsigned AddressSpace) {
return Entry;
}
-
PointerType::PointerType(Type *E, unsigned AddrSpace)
: Type(E->getContext(), PointerTyID), PointeeTy(E) {
ContainedTys = &PointeeTy;
diff --git a/contrib/llvm/lib/IR/TypeFinder.cpp b/contrib/llvm/lib/IR/TypeFinder.cpp
index dc4c1cf..b39678a 100644
--- a/contrib/llvm/lib/IR/TypeFinder.cpp
+++ b/contrib/llvm/lib/IR/TypeFinder.cpp
@@ -1,4 +1,4 @@
-//===-- TypeFinder.cpp - Implement the TypeFinder class -------------------===//
+//===- TypeFinder.cpp - Implement the TypeFinder class --------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -14,10 +14,19 @@
#include "llvm/IR/TypeFinder.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Constant.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
+#include "llvm/IR/Instruction.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Use.h"
+#include "llvm/IR/User.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Casting.h"
+#include <utility>
+
using namespace llvm;
void TypeFinder::run(const Module &M, bool onlyNamed) {
diff --git a/contrib/llvm/lib/IR/User.cpp b/contrib/llvm/lib/IR/User.cpp
index 497b4aa..d460391 100644
--- a/contrib/llvm/lib/IR/User.cpp
+++ b/contrib/llvm/lib/IR/User.cpp
@@ -19,8 +19,6 @@ class BasicBlock;
// User Class
//===----------------------------------------------------------------------===//
-void User::anchor() {}
-
void User::replaceUsesOfWith(Value *From, Value *To) {
if (From == To) return; // Duh what?
@@ -193,12 +191,4 @@ void User::operator delete(void *Usr) {
}
}
-//===----------------------------------------------------------------------===//
-// Operator Class
-//===----------------------------------------------------------------------===//
-
-Operator::~Operator() {
- llvm_unreachable("should never destroy an Operator");
-}
-
} // End llvm namespace
diff --git a/contrib/llvm/lib/IR/Value.cpp b/contrib/llvm/lib/IR/Value.cpp
index 91a999b..51a7d42 100644
--- a/contrib/llvm/lib/IR/Value.cpp
+++ b/contrib/llvm/lib/IR/Value.cpp
@@ -20,6 +20,7 @@
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/DerivedUser.h"
#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instructions.h"
@@ -59,7 +60,7 @@ Value::Value(Type *ty, unsigned scid)
(SubclassID < ConstantFirstVal || SubclassID > ConstantLastVal))
assert((VTy->isFirstClassType() || VTy->isVoidTy()) &&
"Cannot create non-first-class values except for constants!");
- static_assert(sizeof(Value) == 3 * sizeof(void *) + 2 * sizeof(unsigned),
+ static_assert(sizeof(Value) == 2 * sizeof(void *) + 2 * sizeof(unsigned),
"Value too big");
}
@@ -89,6 +90,32 @@ Value::~Value() {
destroyValueName();
}
+void Value::deleteValue() {
+ switch (getValueID()) {
+#define HANDLE_VALUE(Name) \
+ case Value::Name##Val: \
+ delete static_cast<Name *>(this); \
+ break;
+#define HANDLE_MEMORY_VALUE(Name) \
+ case Value::Name##Val: \
+ static_cast<DerivedUser *>(this)->DeleteValue( \
+ static_cast<DerivedUser *>(this)); \
+ break;
+#define HANDLE_INSTRUCTION(Name) /* nothing */
+#include "llvm/IR/Value.def"
+
+#define HANDLE_INST(N, OPC, CLASS) \
+ case Value::InstructionVal + Instruction::OPC: \
+ delete static_cast<CLASS *>(this); \
+ break;
+#define HANDLE_USER_INST(N, OPC, CLASS)
+#include "llvm/IR/Instruction.def"
+
+ default:
+ llvm_unreachable("attempting to delete unknown value kind");
+ }
+}
+
void Value::destroyValueName() {
ValueName *Name = getValueName();
if (Name)
@@ -320,7 +347,7 @@ void Value::takeName(Value *V) {
ST->reinsertValue(this);
}
-void Value::assertModuleIsMaterialized() const {
+void Value::assertModuleIsMaterializedImpl() const {
#ifndef NDEBUG
const GlobalValue *GV = dyn_cast<GlobalValue>(this);
if (!GV)
@@ -432,24 +459,26 @@ namespace {
enum PointerStripKind {
PSK_ZeroIndices,
PSK_ZeroIndicesAndAliases,
+ PSK_ZeroIndicesAndAliasesAndBarriers,
PSK_InBoundsConstantIndices,
PSK_InBounds
};
template <PointerStripKind StripKind>
-static Value *stripPointerCastsAndOffsets(Value *V) {
+static const Value *stripPointerCastsAndOffsets(const Value *V) {
if (!V->getType()->isPointerTy())
return V;
// Even though we don't look through PHI nodes, we could be called on an
// instruction in an unreachable block, which may be on a cycle.
- SmallPtrSet<Value *, 4> Visited;
+ SmallPtrSet<const Value *, 4> Visited;
Visited.insert(V);
do {
- if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
+ if (auto *GEP = dyn_cast<GEPOperator>(V)) {
switch (StripKind) {
case PSK_ZeroIndicesAndAliases:
+ case PSK_ZeroIndicesAndAliasesAndBarriers:
case PSK_ZeroIndices:
if (!GEP->hasAllZeroIndices())
return V;
@@ -467,17 +496,25 @@ static Value *stripPointerCastsAndOffsets(Value *V) {
} else if (Operator::getOpcode(V) == Instruction::BitCast ||
Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
V = cast<Operator>(V)->getOperand(0);
- } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
+ } else if (auto *GA = dyn_cast<GlobalAlias>(V)) {
if (StripKind == PSK_ZeroIndices || GA->isInterposable())
return V;
V = GA->getAliasee();
} else {
- if (auto CS = CallSite(V))
- if (Value *RV = CS.getReturnedArgOperand()) {
+ if (auto CS = ImmutableCallSite(V)) {
+ if (const Value *RV = CS.getReturnedArgOperand()) {
V = RV;
continue;
}
-
+ // The result of invariant.group.barrier must alias it's argument,
+ // but it can't be marked with returned attribute, that's why it needs
+ // special case.
+ if (StripKind == PSK_ZeroIndicesAndAliasesAndBarriers &&
+ CS.getIntrinsicID() == Intrinsic::invariant_group_barrier) {
+ V = CS.getArgOperand(0);
+ continue;
+ }
+ }
return V;
}
assert(V->getType()->isPointerTy() && "Unexpected operand type!");
@@ -487,20 +524,26 @@ static Value *stripPointerCastsAndOffsets(Value *V) {
}
} // end anonymous namespace
-Value *Value::stripPointerCasts() {
+const Value *Value::stripPointerCasts() const {
return stripPointerCastsAndOffsets<PSK_ZeroIndicesAndAliases>(this);
}
-Value *Value::stripPointerCastsNoFollowAliases() {
+const Value *Value::stripPointerCastsNoFollowAliases() const {
return stripPointerCastsAndOffsets<PSK_ZeroIndices>(this);
}
-Value *Value::stripInBoundsConstantOffsets() {
+const Value *Value::stripInBoundsConstantOffsets() const {
return stripPointerCastsAndOffsets<PSK_InBoundsConstantIndices>(this);
}
-Value *Value::stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL,
- APInt &Offset) {
+const Value *Value::stripPointerCastsAndBarriers() const {
+ return stripPointerCastsAndOffsets<PSK_ZeroIndicesAndAliasesAndBarriers>(
+ this);
+}
+
+const Value *
+Value::stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL,
+ APInt &Offset) const {
if (!getType()->isPointerTy())
return this;
@@ -510,11 +553,11 @@ Value *Value::stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL,
// Even though we don't look through PHI nodes, we could be called on an
// instruction in an unreachable block, which may be on a cycle.
- SmallPtrSet<Value *, 4> Visited;
+ SmallPtrSet<const Value *, 4> Visited;
Visited.insert(this);
- Value *V = this;
+ const Value *V = this;
do {
- if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
+ if (auto *GEP = dyn_cast<GEPOperator>(V)) {
if (!GEP->isInBounds())
return V;
APInt GEPOffset(Offset);
@@ -524,11 +567,11 @@ Value *Value::stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL,
V = GEP->getPointerOperand();
} else if (Operator::getOpcode(V) == Instruction::BitCast) {
V = cast<Operator>(V)->getOperand(0);
- } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
+ } else if (auto *GA = dyn_cast<GlobalAlias>(V)) {
V = GA->getAliasee();
} else {
- if (auto CS = CallSite(V))
- if (Value *RV = CS.getReturnedArgOperand()) {
+ if (auto CS = ImmutableCallSite(V))
+ if (const Value *RV = CS.getReturnedArgOperand()) {
V = RV;
continue;
}
@@ -541,7 +584,7 @@ Value *Value::stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL,
return V;
}
-Value *Value::stripInBoundsOffsets() {
+const Value *Value::stripInBoundsOffsets() const {
return stripPointerCastsAndOffsets<PSK_InBounds>(this);
}
@@ -562,9 +605,9 @@ unsigned Value::getPointerDereferenceableBytes(const DataLayout &DL,
CanBeNull = true;
}
} else if (auto CS = ImmutableCallSite(this)) {
- DerefBytes = CS.getDereferenceableBytes(0);
+ DerefBytes = CS.getDereferenceableBytes(AttributeList::ReturnIndex);
if (DerefBytes == 0) {
- DerefBytes = CS.getDereferenceableOrNullBytes(0);
+ DerefBytes = CS.getDereferenceableOrNullBytes(AttributeList::ReturnIndex);
CanBeNull = true;
}
} else if (const LoadInst *LI = dyn_cast<LoadInst>(this)) {
@@ -633,7 +676,7 @@ unsigned Value::getPointerAlignment(const DataLayout &DL) const {
Align = DL.getPrefTypeAlignment(AllocatedType);
}
} else if (auto CS = ImmutableCallSite(this))
- Align = CS.getAttributes().getParamAlignment(AttributeSet::ReturnIndex);
+ Align = CS.getAttributes().getRetAlignment();
else if (const LoadInst *LI = dyn_cast<LoadInst>(this))
if (MDNode *MD = LI->getMetadata(LLVMContext::MD_align)) {
ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0));
@@ -643,9 +686,9 @@ unsigned Value::getPointerAlignment(const DataLayout &DL) const {
return Align;
}
-Value *Value::DoPHITranslation(const BasicBlock *CurBB,
- const BasicBlock *PredBB) {
- PHINode *PN = dyn_cast<PHINode>(this);
+const Value *Value::DoPHITranslation(const BasicBlock *CurBB,
+ const BasicBlock *PredBB) const {
+ auto *PN = dyn_cast<PHINode>(this);
if (PN && PN->getParent() == CurBB)
return PN->getIncomingValueForBlock(PredBB);
return this;
@@ -695,7 +738,7 @@ void ValueHandleBase::AddToExistingUseList(ValueHandleBase **List) {
setPrevPtr(List);
if (Next) {
Next->setPrevPtr(&Next);
- assert(V == Next->V && "Added to wrong list?");
+ assert(getValPtr() == Next->getValPtr() && "Added to wrong list?");
}
}
@@ -710,14 +753,14 @@ void ValueHandleBase::AddToExistingUseListAfter(ValueHandleBase *List) {
}
void ValueHandleBase::AddToUseList() {
- assert(V && "Null pointer doesn't have a use list!");
+ assert(getValPtr() && "Null pointer doesn't have a use list!");
- LLVMContextImpl *pImpl = V->getContext().pImpl;
+ LLVMContextImpl *pImpl = getValPtr()->getContext().pImpl;
- if (V->HasValueHandle) {
+ if (getValPtr()->HasValueHandle) {
// If this value already has a ValueHandle, then it must be in the
// ValueHandles map already.
- ValueHandleBase *&Entry = pImpl->ValueHandles[V];
+ ValueHandleBase *&Entry = pImpl->ValueHandles[getValPtr()];
assert(Entry && "Value doesn't have any handles?");
AddToExistingUseList(&Entry);
return;
@@ -731,10 +774,10 @@ void ValueHandleBase::AddToUseList() {
DenseMap<Value*, ValueHandleBase*> &Handles = pImpl->ValueHandles;
const void *OldBucketPtr = Handles.getPointerIntoBucketsArray();
- ValueHandleBase *&Entry = Handles[V];
+ ValueHandleBase *&Entry = Handles[getValPtr()];
assert(!Entry && "Value really did already have handles?");
AddToExistingUseList(&Entry);
- V->HasValueHandle = true;
+ getValPtr()->HasValueHandle = true;
// If reallocation didn't happen or if this was the first insertion, don't
// walk the table.
@@ -746,14 +789,14 @@ void ValueHandleBase::AddToUseList() {
// Okay, reallocation did happen. Fix the Prev Pointers.
for (DenseMap<Value*, ValueHandleBase*>::iterator I = Handles.begin(),
E = Handles.end(); I != E; ++I) {
- assert(I->second && I->first == I->second->V &&
+ assert(I->second && I->first == I->second->getValPtr() &&
"List invariant broken!");
I->second->setPrevPtr(&I->second);
}
}
void ValueHandleBase::RemoveFromUseList() {
- assert(V && V->HasValueHandle &&
+ assert(getValPtr() && getValPtr()->HasValueHandle &&
"Pointer doesn't have a use list!");
// Unlink this from its use list.
@@ -770,11 +813,11 @@ void ValueHandleBase::RemoveFromUseList() {
// If the Next pointer was null, then it is possible that this was the last
// ValueHandle watching VP. If so, delete its entry from the ValueHandles
// map.
- LLVMContextImpl *pImpl = V->getContext().pImpl;
+ LLVMContextImpl *pImpl = getValPtr()->getContext().pImpl;
DenseMap<Value*, ValueHandleBase*> &Handles = pImpl->ValueHandles;
if (Handles.isPointerIntoBucketsArray(PrevPtr)) {
- Handles.erase(V);
- V->HasValueHandle = false;
+ Handles.erase(getValPtr());
+ getValPtr()->HasValueHandle = false;
}
}
@@ -804,13 +847,10 @@ void ValueHandleBase::ValueIsDeleted(Value *V) {
switch (Entry->getKind()) {
case Assert:
break;
- case Tracking:
- // Mark that this value has been deleted by setting it to an invalid Value
- // pointer.
- Entry->operator=(DenseMapInfo<Value *>::getTombstoneKey());
- break;
case Weak:
- // Weak just goes to null, which will unlink it from the list.
+ case WeakTracking:
+ // WeakTracking and Weak just go to null, which unlinks them
+ // from the list.
Entry->operator=(nullptr);
break;
case Callback:
@@ -858,16 +898,10 @@ void ValueHandleBase::ValueIsRAUWd(Value *Old, Value *New) {
switch (Entry->getKind()) {
case Assert:
- // Asserting handle does not follow RAUW implicitly.
- break;
- case Tracking:
- // Tracking goes to new value like a WeakVH. Note that this may make it
- // something incompatible with its templated type. We don't want to have a
- // virtual (or inline) interface to handle this though, so instead we make
- // the TrackingVH accessors guarantee that a client never sees this value.
-
- LLVM_FALLTHROUGH;
case Weak:
+ // Asserting and Weak handles do not follow RAUW implicitly.
+ break;
+ case WeakTracking:
// Weak goes to the new value, which will unlink it from Old's list.
Entry->operator=(New);
break;
@@ -879,18 +913,17 @@ void ValueHandleBase::ValueIsRAUWd(Value *Old, Value *New) {
}
#ifndef NDEBUG
- // If any new tracking or weak value handles were added while processing the
+ // If any new weak value handles were added while processing the
// list, then complain about it now.
if (Old->HasValueHandle)
for (Entry = pImpl->ValueHandles[Old]; Entry; Entry = Entry->Next)
switch (Entry->getKind()) {
- case Tracking:
- case Weak:
+ case WeakTracking:
dbgs() << "After RAUW from " << *Old->getType() << " %"
<< Old->getName() << " to " << *New->getType() << " %"
<< New->getName() << "\n";
- llvm_unreachable("A tracking or weak value handle still pointed to the"
- " old value!\n");
+ llvm_unreachable(
+ "A weak tracking value handle still pointed to the old value!\n");
default:
break;
}
diff --git a/contrib/llvm/lib/IR/ValueSymbolTable.cpp b/contrib/llvm/lib/IR/ValueSymbolTable.cpp
index 8a6a320..ccdabe0 100644
--- a/contrib/llvm/lib/IR/ValueSymbolTable.cpp
+++ b/contrib/llvm/lib/IR/ValueSymbolTable.cpp
@@ -1,4 +1,4 @@
-//===-- ValueSymbolTable.cpp - Implement the ValueSymbolTable class -------===//
+//===- ValueSymbolTable.cpp - Implement the ValueSymbolTable class --------===//
//
// The LLVM Compiler Infrastructure
//
@@ -15,6 +15,7 @@
#include "llvm/ADT/SmallString.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/Type.h"
+#include "llvm/IR/Value.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
@@ -99,13 +100,15 @@ ValueName *ValueSymbolTable::createValueName(StringRef Name, Value *V) {
return makeUniqueName(V, UniqueName);
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
// dump - print out the symbol table
//
LLVM_DUMP_METHOD void ValueSymbolTable::dump() const {
- //DEBUG(dbgs() << "ValueSymbolTable:\n");
+ //dbgs() << "ValueSymbolTable:\n";
for (const auto &I : *this) {
- //DEBUG(dbgs() << " '" << I->getKeyData() << "' = ");
+ //dbgs() << " '" << I->getKeyData() << "' = ";
I.getValue()->dump();
- //DEBUG(dbgs() << "\n");
+ //dbgs() << "\n";
}
}
+#endif
diff --git a/contrib/llvm/lib/IR/ValueTypes.cpp b/contrib/llvm/lib/IR/ValueTypes.cpp
index 2132e16..cf6ee06 100644
--- a/contrib/llvm/lib/IR/ValueTypes.cpp
+++ b/contrib/llvm/lib/IR/ValueTypes.cpp
@@ -142,6 +142,7 @@ std::string EVT::getEVTString() const {
case MVT::Other: return "ch";
case MVT::Glue: return "glue";
case MVT::x86mmx: return "x86mmx";
+ case MVT::v1i1: return "v1i1";
case MVT::v2i1: return "v2i1";
case MVT::v4i1: return "v4i1";
case MVT::v8i1: return "v8i1";
@@ -220,6 +221,7 @@ Type *EVT::getTypeForEVT(LLVMContext &Context) const {
case MVT::f128: return Type::getFP128Ty(Context);
case MVT::ppcf128: return Type::getPPC_FP128Ty(Context);
case MVT::x86mmx: return Type::getX86_MMXTy(Context);
+ case MVT::v1i1: return VectorType::get(Type::getInt1Ty(Context), 1);
case MVT::v2i1: return VectorType::get(Type::getInt1Ty(Context), 2);
case MVT::v4i1: return VectorType::get(Type::getInt1Ty(Context), 4);
case MVT::v8i1: return VectorType::get(Type::getInt1Ty(Context), 8);
diff --git a/contrib/llvm/lib/IR/Verifier.cpp b/contrib/llvm/lib/IR/Verifier.cpp
index 5855059..454a56a 100644
--- a/contrib/llvm/lib/IR/Verifier.cpp
+++ b/contrib/llvm/lib/IR/Verifier.cpp
@@ -49,7 +49,6 @@
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/ilist.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
@@ -59,6 +58,8 @@
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
+#include "llvm/ADT/ilist.h"
+#include "llvm/BinaryFormat/Dwarf.h"
#include "llvm/IR/Argument.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/BasicBlock.h"
@@ -81,10 +82,10 @@
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/InlineAsm.h"
+#include "llvm/IR/InstVisitor.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
-#include "llvm/IR/InstVisitor.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/LLVMContext.h"
@@ -102,7 +103,6 @@
#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/Dwarf.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
@@ -267,6 +267,9 @@ class Verifier : public InstVisitor<Verifier>, VerifierSupport {
/// \brief Keep track of the metadata nodes that have been checked already.
SmallPtrSet<const Metadata *, 32> MDNodes;
+ /// Keep track which DISubprogram is attached to which function.
+ DenseMap<const DISubprogram *, const Function *> DISubprogramAttachments;
+
/// Track all DICompileUnits visited.
SmallPtrSet<const Metadata *, 2> CUVisited;
@@ -277,6 +280,9 @@ class Verifier : public InstVisitor<Verifier>, VerifierSupport {
/// already.
bool SawFrameEscape;
+ /// Whether the current function has a DISubprogram attached to it.
+ bool HasDebugInfo = false;
+
/// Stores the count of how many objects were passed to llvm.localescape for a
/// given function and the largest index passed to llvm.localrecover.
DenseMap<Function *, std::pair<unsigned, unsigned>> FrameEscapeInfo;
@@ -297,6 +303,9 @@ class Verifier : public InstVisitor<Verifier>, VerifierSupport {
// constant expressions, we can arrive at a particular user many times.
SmallPtrSet<const Value *, 32> GlobalValueVisited;
+ // Keeps track of duplicate function argument debug info.
+ SmallVector<const DILocalVariable *, 16> DebugFnArgs;
+
TBAAVerifier TBAAVerifyHelper;
void checkAtomicMemAccessSize(Type *Ty, const Instruction *I);
@@ -342,6 +351,7 @@ public:
visit(const_cast<Function &>(F));
verifySiblingFuncletUnwinds();
InstsInThisBlock.clear();
+ DebugFnArgs.clear();
LandingPadResultTy = nullptr;
SawFrameEscape = false;
SiblingFuncletInfo.clear();
@@ -379,7 +389,7 @@ public:
verifyCompileUnits();
verifyDeoptimizeCallingConvs();
-
+ DISubprogramAttachments.clear();
return !Broken;
}
@@ -457,6 +467,7 @@ private:
void visitUserOp1(Instruction &I);
void visitUserOp2(Instruction &I) { visitUserOp1(I); }
void visitIntrinsicCallSite(Intrinsic::ID ID, CallSite CS);
+ void visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI);
template <class DbgIntrinsicTy>
void visitDbgIntrinsic(StringRef Kind, DbgIntrinsicTy &DII);
void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI);
@@ -481,12 +492,11 @@ private:
void verifyMustTailCall(CallInst &CI);
bool performTypeCheck(Intrinsic::ID ID, Function *F, Type *Ty, int VT,
unsigned ArgNo, std::string &Suffix);
- bool verifyAttributeCount(AttributeSet Attrs, unsigned Params);
- void verifyAttributeTypes(AttributeSet Attrs, unsigned Idx, bool isFunction,
+ bool verifyAttributeCount(AttributeList Attrs, unsigned Params);
+ void verifyAttributeTypes(AttributeSet Attrs, bool IsFunction,
const Value *V);
- void verifyParameterAttrs(AttributeSet Attrs, unsigned Idx, Type *Ty,
- bool isReturnValue, const Value *V);
- void verifyFunctionAttrs(FunctionType *FT, AttributeSet Attrs,
+ void verifyParameterAttrs(AttributeSet Attrs, Type *Ty, const Value *V);
+ void verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
const Value *V);
void verifyFunctionMetadata(ArrayRef<std::pair<unsigned, MDNode *>> MDs);
@@ -497,6 +507,7 @@ private:
void verifySiblingFuncletUnwinds();
void verifyFragmentExpression(const DbgInfoIntrinsic &I);
+ void verifyFnArgs(const DbgInfoIntrinsic &I);
/// Module-level debug info verification...
void verifyCompileUnits();
@@ -652,7 +663,8 @@ void Verifier::visitGlobalVariable(const GlobalVariable &GV) {
if (auto *GVE = dyn_cast<DIGlobalVariableExpression>(MD))
visitDIGlobalVariableExpression(*GVE);
else
- AssertDI(false, "!dbg attachment of global variable must be a DIGlobalVariableExpression");
+ AssertDI(false, "!dbg attachment of global variable must be a "
+ "DIGlobalVariableExpression");
}
if (!GV.hasInitializer()) {
@@ -822,28 +834,6 @@ static bool isType(const Metadata *MD) { return !MD || isa<DIType>(MD); }
static bool isScope(const Metadata *MD) { return !MD || isa<DIScope>(MD); }
static bool isDINode(const Metadata *MD) { return !MD || isa<DINode>(MD); }
-template <class Ty>
-static bool isValidMetadataArrayImpl(const MDTuple &N, bool AllowNull) {
- for (Metadata *MD : N.operands()) {
- if (MD) {
- if (!isa<Ty>(MD))
- return false;
- } else {
- if (!AllowNull)
- return false;
- }
- }
- return true;
-}
-
-template <class Ty> static bool isValidMetadataArray(const MDTuple &N) {
- return isValidMetadataArrayImpl<Ty>(N, /* AllowNull */ false);
-}
-
-template <class Ty> static bool isValidMetadataNullArray(const MDTuple &N) {
- return isValidMetadataArrayImpl<Ty>(N, /* AllowNull */ true);
-}
-
void Verifier::visitDILocation(const DILocation &N) {
AssertDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
"location requires a valid scope", &N, N.getRawScope());
@@ -900,6 +890,13 @@ void Verifier::visitDIDerivedType(const DIDerivedType &N) {
AssertDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
AssertDI(isType(N.getRawBaseType()), "invalid base type", &N,
N.getRawBaseType());
+
+ if (N.getDWARFAddressSpace()) {
+ AssertDI(N.getTag() == dwarf::DW_TAG_pointer_type ||
+ N.getTag() == dwarf::DW_TAG_reference_type,
+ "DWARF address space only applies to pointer or reference types",
+ &N);
+ }
}
static bool hasConflictingReferenceFlags(unsigned Flags) {
@@ -1024,6 +1021,8 @@ void Verifier::visitDISubprogram(const DISubprogram &N) {
AssertDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
if (auto *F = N.getRawFile())
AssertDI(isa<DIFile>(F), "invalid file", &N, F);
+ else
+ AssertDI(N.getLine() == 0, "line specified with no file", &N, N.getLine());
if (auto *T = N.getRawType())
AssertDI(isa<DISubroutineType>(T), "invalid subroutine type", &N, T);
AssertDI(isType(N.getRawContainingType()), "invalid containing type", &N,
@@ -1054,6 +1053,14 @@ void Verifier::visitDISubprogram(const DISubprogram &N) {
// Subprogram declarations (part of the type hierarchy).
AssertDI(!Unit, "subprogram declarations must not have a compile unit", &N);
}
+
+ if (auto *RawThrownTypes = N.getRawThrownTypes()) {
+ auto *ThrownTypes = dyn_cast<MDTuple>(RawThrownTypes);
+ AssertDI(ThrownTypes, "invalid thrown types list", &N, RawThrownTypes);
+ for (Metadata *Op : ThrownTypes->operands())
+ AssertDI(Op && isa<DIType>(Op), "invalid thrown type", &N, ThrownTypes,
+ Op);
+ }
}
void Verifier::visitDILexicalBlockBase(const DILexicalBlockBase &N) {
@@ -1199,9 +1206,9 @@ void Verifier::visitComdat(const Comdat &C) {
void Verifier::visitModuleIdents(const Module &M) {
const NamedMDNode *Idents = M.getNamedMetadata("llvm.ident");
- if (!Idents)
+ if (!Idents)
return;
-
+
// llvm.ident takes a list of metadata entry. Each entry has only one string.
// Scan each llvm.ident entry and make sure that this requirement is met.
for (const MDNode *N : Idents->operands()) {
@@ -1211,7 +1218,7 @@ void Verifier::visitModuleIdents(const Module &M) {
("invalid value for llvm.ident metadata entry operand"
"(the operand should be a string)"),
N->getOperand(0));
- }
+ }
}
void Verifier::visitModuleFlags(const Module &M) {
@@ -1275,6 +1282,13 @@ Verifier::visitModuleFlag(const MDNode *Op,
// These behavior types accept any value.
break;
+ case Module::Max: {
+ Assert(mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2)),
+ "invalid value for 'max' module flag (expected constant integer)",
+ Op->getOperand(2));
+ break;
+ }
+
case Module::Require: {
// The value should itself be an MDNode with two operands, a flag ID (an
// MDString), and a value.
@@ -1310,73 +1324,90 @@ Verifier::visitModuleFlag(const MDNode *Op,
Assert(Inserted,
"module flag identifiers must be unique (or of 'require' type)", ID);
}
-}
-void Verifier::verifyAttributeTypes(AttributeSet Attrs, unsigned Idx,
- bool isFunction, const Value *V) {
- unsigned Slot = ~0U;
- for (unsigned I = 0, E = Attrs.getNumSlots(); I != E; ++I)
- if (Attrs.getSlotIndex(I) == Idx) {
- Slot = I;
- break;
- }
+ if (ID->getString() == "wchar_size") {
+ ConstantInt *Value
+ = mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
+ Assert(Value, "wchar_size metadata requires constant integer argument");
+ }
+
+ if (ID->getString() == "Linker Options") {
+ // If the llvm.linker.options named metadata exists, we assume that the
+ // bitcode reader has upgraded the module flag. Otherwise the flag might
+ // have been created by a client directly.
+ Assert(M.getNamedMetadata("llvm.linker.options"),
+ "'Linker Options' named metadata no longer supported");
+ }
+}
+
+/// Return true if this attribute kind only applies to functions.
+static bool isFuncOnlyAttr(Attribute::AttrKind Kind) {
+ switch (Kind) {
+ case Attribute::NoReturn:
+ case Attribute::NoUnwind:
+ case Attribute::NoInline:
+ case Attribute::AlwaysInline:
+ case Attribute::OptimizeForSize:
+ case Attribute::StackProtect:
+ case Attribute::StackProtectReq:
+ case Attribute::StackProtectStrong:
+ case Attribute::SafeStack:
+ case Attribute::NoRedZone:
+ case Attribute::NoImplicitFloat:
+ case Attribute::Naked:
+ case Attribute::InlineHint:
+ case Attribute::StackAlignment:
+ case Attribute::UWTable:
+ case Attribute::NonLazyBind:
+ case Attribute::ReturnsTwice:
+ case Attribute::SanitizeAddress:
+ case Attribute::SanitizeThread:
+ case Attribute::SanitizeMemory:
+ case Attribute::MinSize:
+ case Attribute::NoDuplicate:
+ case Attribute::Builtin:
+ case Attribute::NoBuiltin:
+ case Attribute::Cold:
+ case Attribute::OptimizeNone:
+ case Attribute::JumpTable:
+ case Attribute::Convergent:
+ case Attribute::ArgMemOnly:
+ case Attribute::NoRecurse:
+ case Attribute::InaccessibleMemOnly:
+ case Attribute::InaccessibleMemOrArgMemOnly:
+ case Attribute::AllocSize:
+ case Attribute::Speculatable:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
- assert(Slot != ~0U && "Attribute set inconsistency!");
+/// Return true if this is a function attribute that can also appear on
+/// arguments.
+static bool isFuncOrArgAttr(Attribute::AttrKind Kind) {
+ return Kind == Attribute::ReadOnly || Kind == Attribute::WriteOnly ||
+ Kind == Attribute::ReadNone;
+}
- for (AttributeSet::iterator I = Attrs.begin(Slot), E = Attrs.end(Slot);
- I != E; ++I) {
- if (I->isStringAttribute())
+void Verifier::verifyAttributeTypes(AttributeSet Attrs, bool IsFunction,
+ const Value *V) {
+ for (Attribute A : Attrs) {
+ if (A.isStringAttribute())
continue;
- if (I->getKindAsEnum() == Attribute::NoReturn ||
- I->getKindAsEnum() == Attribute::NoUnwind ||
- I->getKindAsEnum() == Attribute::NoInline ||
- I->getKindAsEnum() == Attribute::AlwaysInline ||
- I->getKindAsEnum() == Attribute::OptimizeForSize ||
- I->getKindAsEnum() == Attribute::StackProtect ||
- I->getKindAsEnum() == Attribute::StackProtectReq ||
- I->getKindAsEnum() == Attribute::StackProtectStrong ||
- I->getKindAsEnum() == Attribute::SafeStack ||
- I->getKindAsEnum() == Attribute::NoRedZone ||
- I->getKindAsEnum() == Attribute::NoImplicitFloat ||
- I->getKindAsEnum() == Attribute::Naked ||
- I->getKindAsEnum() == Attribute::InlineHint ||
- I->getKindAsEnum() == Attribute::StackAlignment ||
- I->getKindAsEnum() == Attribute::UWTable ||
- I->getKindAsEnum() == Attribute::NonLazyBind ||
- I->getKindAsEnum() == Attribute::ReturnsTwice ||
- I->getKindAsEnum() == Attribute::SanitizeAddress ||
- I->getKindAsEnum() == Attribute::SanitizeThread ||
- I->getKindAsEnum() == Attribute::SanitizeMemory ||
- I->getKindAsEnum() == Attribute::MinSize ||
- I->getKindAsEnum() == Attribute::NoDuplicate ||
- I->getKindAsEnum() == Attribute::Builtin ||
- I->getKindAsEnum() == Attribute::NoBuiltin ||
- I->getKindAsEnum() == Attribute::Cold ||
- I->getKindAsEnum() == Attribute::OptimizeNone ||
- I->getKindAsEnum() == Attribute::JumpTable ||
- I->getKindAsEnum() == Attribute::Convergent ||
- I->getKindAsEnum() == Attribute::ArgMemOnly ||
- I->getKindAsEnum() == Attribute::NoRecurse ||
- I->getKindAsEnum() == Attribute::InaccessibleMemOnly ||
- I->getKindAsEnum() == Attribute::InaccessibleMemOrArgMemOnly ||
- I->getKindAsEnum() == Attribute::AllocSize) {
- if (!isFunction) {
- CheckFailed("Attribute '" + I->getAsString() +
- "' only applies to functions!", V);
- return;
- }
- } else if (I->getKindAsEnum() == Attribute::ReadOnly ||
- I->getKindAsEnum() == Attribute::WriteOnly ||
- I->getKindAsEnum() == Attribute::ReadNone) {
- if (Idx == 0) {
- CheckFailed("Attribute '" + I->getAsString() +
- "' does not apply to function returns");
+ if (isFuncOnlyAttr(A.getKindAsEnum())) {
+ if (!IsFunction) {
+ CheckFailed("Attribute '" + A.getAsString() +
+ "' only applies to functions!",
+ V);
return;
}
- } else if (isFunction) {
- CheckFailed("Attribute '" + I->getAsString() +
- "' does not apply to functions!", V);
+ } else if (IsFunction && !isFuncOrArgAttr(A.getKindAsEnum())) {
+ CheckFailed("Attribute '" + A.getAsString() +
+ "' does not apply to functions!",
+ V);
return;
}
}
@@ -1384,106 +1415,91 @@ void Verifier::verifyAttributeTypes(AttributeSet Attrs, unsigned Idx,
// VerifyParameterAttrs - Check the given attributes for an argument or return
// value of the specified type. The value V is printed in error messages.
-void Verifier::verifyParameterAttrs(AttributeSet Attrs, unsigned Idx, Type *Ty,
- bool isReturnValue, const Value *V) {
- if (!Attrs.hasAttributes(Idx))
+void Verifier::verifyParameterAttrs(AttributeSet Attrs, Type *Ty,
+ const Value *V) {
+ if (!Attrs.hasAttributes())
return;
- verifyAttributeTypes(Attrs, Idx, false, V);
-
- if (isReturnValue)
- Assert(!Attrs.hasAttribute(Idx, Attribute::ByVal) &&
- !Attrs.hasAttribute(Idx, Attribute::Nest) &&
- !Attrs.hasAttribute(Idx, Attribute::StructRet) &&
- !Attrs.hasAttribute(Idx, Attribute::NoCapture) &&
- !Attrs.hasAttribute(Idx, Attribute::Returned) &&
- !Attrs.hasAttribute(Idx, Attribute::InAlloca) &&
- !Attrs.hasAttribute(Idx, Attribute::SwiftSelf) &&
- !Attrs.hasAttribute(Idx, Attribute::SwiftError),
- "Attributes 'byval', 'inalloca', 'nest', 'sret', 'nocapture', "
- "'returned', 'swiftself', and 'swifterror' do not apply to return "
- "values!",
- V);
+ verifyAttributeTypes(Attrs, /*IsFunction=*/false, V);
// Check for mutually incompatible attributes. Only inreg is compatible with
// sret.
unsigned AttrCount = 0;
- AttrCount += Attrs.hasAttribute(Idx, Attribute::ByVal);
- AttrCount += Attrs.hasAttribute(Idx, Attribute::InAlloca);
- AttrCount += Attrs.hasAttribute(Idx, Attribute::StructRet) ||
- Attrs.hasAttribute(Idx, Attribute::InReg);
- AttrCount += Attrs.hasAttribute(Idx, Attribute::Nest);
+ AttrCount += Attrs.hasAttribute(Attribute::ByVal);
+ AttrCount += Attrs.hasAttribute(Attribute::InAlloca);
+ AttrCount += Attrs.hasAttribute(Attribute::StructRet) ||
+ Attrs.hasAttribute(Attribute::InReg);
+ AttrCount += Attrs.hasAttribute(Attribute::Nest);
Assert(AttrCount <= 1, "Attributes 'byval', 'inalloca', 'inreg', 'nest', "
"and 'sret' are incompatible!",
V);
- Assert(!(Attrs.hasAttribute(Idx, Attribute::InAlloca) &&
- Attrs.hasAttribute(Idx, Attribute::ReadOnly)),
+ Assert(!(Attrs.hasAttribute(Attribute::InAlloca) &&
+ Attrs.hasAttribute(Attribute::ReadOnly)),
"Attributes "
"'inalloca and readonly' are incompatible!",
V);
- Assert(!(Attrs.hasAttribute(Idx, Attribute::StructRet) &&
- Attrs.hasAttribute(Idx, Attribute::Returned)),
+ Assert(!(Attrs.hasAttribute(Attribute::StructRet) &&
+ Attrs.hasAttribute(Attribute::Returned)),
"Attributes "
"'sret and returned' are incompatible!",
V);
- Assert(!(Attrs.hasAttribute(Idx, Attribute::ZExt) &&
- Attrs.hasAttribute(Idx, Attribute::SExt)),
+ Assert(!(Attrs.hasAttribute(Attribute::ZExt) &&
+ Attrs.hasAttribute(Attribute::SExt)),
"Attributes "
"'zeroext and signext' are incompatible!",
V);
- Assert(!(Attrs.hasAttribute(Idx, Attribute::ReadNone) &&
- Attrs.hasAttribute(Idx, Attribute::ReadOnly)),
+ Assert(!(Attrs.hasAttribute(Attribute::ReadNone) &&
+ Attrs.hasAttribute(Attribute::ReadOnly)),
"Attributes "
"'readnone and readonly' are incompatible!",
V);
- Assert(!(Attrs.hasAttribute(Idx, Attribute::ReadNone) &&
- Attrs.hasAttribute(Idx, Attribute::WriteOnly)),
+ Assert(!(Attrs.hasAttribute(Attribute::ReadNone) &&
+ Attrs.hasAttribute(Attribute::WriteOnly)),
"Attributes "
"'readnone and writeonly' are incompatible!",
V);
- Assert(!(Attrs.hasAttribute(Idx, Attribute::ReadOnly) &&
- Attrs.hasAttribute(Idx, Attribute::WriteOnly)),
+ Assert(!(Attrs.hasAttribute(Attribute::ReadOnly) &&
+ Attrs.hasAttribute(Attribute::WriteOnly)),
"Attributes "
"'readonly and writeonly' are incompatible!",
V);
- Assert(!(Attrs.hasAttribute(Idx, Attribute::NoInline) &&
- Attrs.hasAttribute(Idx, Attribute::AlwaysInline)),
+ Assert(!(Attrs.hasAttribute(Attribute::NoInline) &&
+ Attrs.hasAttribute(Attribute::AlwaysInline)),
"Attributes "
"'noinline and alwaysinline' are incompatible!",
V);
- Assert(
- !AttrBuilder(Attrs, Idx).overlaps(AttributeFuncs::typeIncompatible(Ty)),
- "Wrong types for attribute: " +
- AttributeSet::get(Context, Idx, AttributeFuncs::typeIncompatible(Ty))
- .getAsString(Idx),
- V);
+ AttrBuilder IncompatibleAttrs = AttributeFuncs::typeIncompatible(Ty);
+ Assert(!AttrBuilder(Attrs).overlaps(IncompatibleAttrs),
+ "Wrong types for attribute: " +
+ AttributeSet::get(Context, IncompatibleAttrs).getAsString(),
+ V);
if (PointerType *PTy = dyn_cast<PointerType>(Ty)) {
SmallPtrSet<Type*, 4> Visited;
if (!PTy->getElementType()->isSized(&Visited)) {
- Assert(!Attrs.hasAttribute(Idx, Attribute::ByVal) &&
- !Attrs.hasAttribute(Idx, Attribute::InAlloca),
+ Assert(!Attrs.hasAttribute(Attribute::ByVal) &&
+ !Attrs.hasAttribute(Attribute::InAlloca),
"Attributes 'byval' and 'inalloca' do not support unsized types!",
V);
}
if (!isa<PointerType>(PTy->getElementType()))
- Assert(!Attrs.hasAttribute(Idx, Attribute::SwiftError),
+ Assert(!Attrs.hasAttribute(Attribute::SwiftError),
"Attribute 'swifterror' only applies to parameters "
"with pointer to pointer type!",
V);
} else {
- Assert(!Attrs.hasAttribute(Idx, Attribute::ByVal),
+ Assert(!Attrs.hasAttribute(Attribute::ByVal),
"Attribute 'byval' only applies to parameters with pointer type!",
V);
- Assert(!Attrs.hasAttribute(Idx, Attribute::SwiftError),
+ Assert(!Attrs.hasAttribute(Attribute::SwiftError),
"Attribute 'swifterror' only applies to parameters "
"with pointer type!",
V);
@@ -1492,7 +1508,7 @@ void Verifier::verifyParameterAttrs(AttributeSet Attrs, unsigned Idx, Type *Ty,
// Check parameter attributes against a function type.
// The value V is printed in error messages.
-void Verifier::verifyFunctionAttrs(FunctionType *FT, AttributeSet Attrs,
+void Verifier::verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
const Value *V) {
if (Attrs.isEmpty())
return;
@@ -1503,122 +1519,124 @@ void Verifier::verifyFunctionAttrs(FunctionType *FT, AttributeSet Attrs,
bool SawSwiftSelf = false;
bool SawSwiftError = false;
- for (unsigned i = 0, e = Attrs.getNumSlots(); i != e; ++i) {
- unsigned Idx = Attrs.getSlotIndex(i);
-
- Type *Ty;
- if (Idx == 0)
- Ty = FT->getReturnType();
- else if (Idx-1 < FT->getNumParams())
- Ty = FT->getParamType(Idx-1);
- else
- break; // VarArgs attributes, verified elsewhere.
+ // Verify return value attributes.
+ AttributeSet RetAttrs = Attrs.getRetAttributes();
+ Assert((!RetAttrs.hasAttribute(Attribute::ByVal) &&
+ !RetAttrs.hasAttribute(Attribute::Nest) &&
+ !RetAttrs.hasAttribute(Attribute::StructRet) &&
+ !RetAttrs.hasAttribute(Attribute::NoCapture) &&
+ !RetAttrs.hasAttribute(Attribute::Returned) &&
+ !RetAttrs.hasAttribute(Attribute::InAlloca) &&
+ !RetAttrs.hasAttribute(Attribute::SwiftSelf) &&
+ !RetAttrs.hasAttribute(Attribute::SwiftError)),
+ "Attributes 'byval', 'inalloca', 'nest', 'sret', 'nocapture', "
+ "'returned', 'swiftself', and 'swifterror' do not apply to return "
+ "values!",
+ V);
+ Assert((!RetAttrs.hasAttribute(Attribute::ReadOnly) &&
+ !RetAttrs.hasAttribute(Attribute::WriteOnly) &&
+ !RetAttrs.hasAttribute(Attribute::ReadNone)),
+ "Attribute '" + RetAttrs.getAsString() +
+ "' does not apply to function returns",
+ V);
+ verifyParameterAttrs(RetAttrs, FT->getReturnType(), V);
- verifyParameterAttrs(Attrs, Idx, Ty, Idx == 0, V);
+ // Verify parameter attributes.
+ for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
+ Type *Ty = FT->getParamType(i);
+ AttributeSet ArgAttrs = Attrs.getParamAttributes(i);
- if (Idx == 0)
- continue;
+ verifyParameterAttrs(ArgAttrs, Ty, V);
- if (Attrs.hasAttribute(Idx, Attribute::Nest)) {
+ if (ArgAttrs.hasAttribute(Attribute::Nest)) {
Assert(!SawNest, "More than one parameter has attribute nest!", V);
SawNest = true;
}
- if (Attrs.hasAttribute(Idx, Attribute::Returned)) {
+ if (ArgAttrs.hasAttribute(Attribute::Returned)) {
Assert(!SawReturned, "More than one parameter has attribute returned!",
V);
Assert(Ty->canLosslesslyBitCastTo(FT->getReturnType()),
- "Incompatible "
- "argument and return types for 'returned' attribute",
+ "Incompatible argument and return types for 'returned' attribute",
V);
SawReturned = true;
}
- if (Attrs.hasAttribute(Idx, Attribute::StructRet)) {
+ if (ArgAttrs.hasAttribute(Attribute::StructRet)) {
Assert(!SawSRet, "Cannot have multiple 'sret' parameters!", V);
- Assert(Idx == 1 || Idx == 2,
+ Assert(i == 0 || i == 1,
"Attribute 'sret' is not on first or second parameter!", V);
SawSRet = true;
}
- if (Attrs.hasAttribute(Idx, Attribute::SwiftSelf)) {
+ if (ArgAttrs.hasAttribute(Attribute::SwiftSelf)) {
Assert(!SawSwiftSelf, "Cannot have multiple 'swiftself' parameters!", V);
SawSwiftSelf = true;
}
- if (Attrs.hasAttribute(Idx, Attribute::SwiftError)) {
+ if (ArgAttrs.hasAttribute(Attribute::SwiftError)) {
Assert(!SawSwiftError, "Cannot have multiple 'swifterror' parameters!",
V);
SawSwiftError = true;
}
- if (Attrs.hasAttribute(Idx, Attribute::InAlloca)) {
- Assert(Idx == FT->getNumParams(), "inalloca isn't on the last parameter!",
- V);
+ if (ArgAttrs.hasAttribute(Attribute::InAlloca)) {
+ Assert(i == FT->getNumParams() - 1,
+ "inalloca isn't on the last parameter!", V);
}
}
- if (!Attrs.hasAttributes(AttributeSet::FunctionIndex))
+ if (!Attrs.hasAttributes(AttributeList::FunctionIndex))
return;
- verifyAttributeTypes(Attrs, AttributeSet::FunctionIndex, true, V);
+ verifyAttributeTypes(Attrs.getFnAttributes(), /*IsFunction=*/true, V);
- Assert(
- !(Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::ReadNone) &&
- Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::ReadOnly)),
- "Attributes 'readnone and readonly' are incompatible!", V);
+ Assert(!(Attrs.hasFnAttribute(Attribute::ReadNone) &&
+ Attrs.hasFnAttribute(Attribute::ReadOnly)),
+ "Attributes 'readnone and readonly' are incompatible!", V);
- Assert(
- !(Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::ReadNone) &&
- Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::WriteOnly)),
- "Attributes 'readnone and writeonly' are incompatible!", V);
+ Assert(!(Attrs.hasFnAttribute(Attribute::ReadNone) &&
+ Attrs.hasFnAttribute(Attribute::WriteOnly)),
+ "Attributes 'readnone and writeonly' are incompatible!", V);
- Assert(
- !(Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::ReadOnly) &&
- Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::WriteOnly)),
- "Attributes 'readonly and writeonly' are incompatible!", V);
+ Assert(!(Attrs.hasFnAttribute(Attribute::ReadOnly) &&
+ Attrs.hasFnAttribute(Attribute::WriteOnly)),
+ "Attributes 'readonly and writeonly' are incompatible!", V);
- Assert(
- !(Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::ReadNone) &&
- Attrs.hasAttribute(AttributeSet::FunctionIndex,
- Attribute::InaccessibleMemOrArgMemOnly)),
- "Attributes 'readnone and inaccessiblemem_or_argmemonly' are incompatible!", V);
+ Assert(!(Attrs.hasFnAttribute(Attribute::ReadNone) &&
+ Attrs.hasFnAttribute(Attribute::InaccessibleMemOrArgMemOnly)),
+ "Attributes 'readnone and inaccessiblemem_or_argmemonly' are "
+ "incompatible!",
+ V);
- Assert(
- !(Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::ReadNone) &&
- Attrs.hasAttribute(AttributeSet::FunctionIndex,
- Attribute::InaccessibleMemOnly)),
- "Attributes 'readnone and inaccessiblememonly' are incompatible!", V);
+ Assert(!(Attrs.hasFnAttribute(Attribute::ReadNone) &&
+ Attrs.hasFnAttribute(Attribute::InaccessibleMemOnly)),
+ "Attributes 'readnone and inaccessiblememonly' are incompatible!", V);
- Assert(
- !(Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::NoInline) &&
- Attrs.hasAttribute(AttributeSet::FunctionIndex,
- Attribute::AlwaysInline)),
- "Attributes 'noinline and alwaysinline' are incompatible!", V);
-
- if (Attrs.hasAttribute(AttributeSet::FunctionIndex,
- Attribute::OptimizeNone)) {
- Assert(Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::NoInline),
+ Assert(!(Attrs.hasFnAttribute(Attribute::NoInline) &&
+ Attrs.hasFnAttribute(Attribute::AlwaysInline)),
+ "Attributes 'noinline and alwaysinline' are incompatible!", V);
+
+ if (Attrs.hasFnAttribute(Attribute::OptimizeNone)) {
+ Assert(Attrs.hasFnAttribute(Attribute::NoInline),
"Attribute 'optnone' requires 'noinline'!", V);
- Assert(!Attrs.hasAttribute(AttributeSet::FunctionIndex,
- Attribute::OptimizeForSize),
+ Assert(!Attrs.hasFnAttribute(Attribute::OptimizeForSize),
"Attributes 'optsize and optnone' are incompatible!", V);
- Assert(!Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::MinSize),
+ Assert(!Attrs.hasFnAttribute(Attribute::MinSize),
"Attributes 'minsize and optnone' are incompatible!", V);
}
- if (Attrs.hasAttribute(AttributeSet::FunctionIndex,
- Attribute::JumpTable)) {
+ if (Attrs.hasFnAttribute(Attribute::JumpTable)) {
const GlobalValue *GV = cast<GlobalValue>(V);
Assert(GV->hasGlobalUnnamedAddr(),
"Attribute 'jumptable' requires 'unnamed_addr'", V);
}
- if (Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::AllocSize)) {
+ if (Attrs.hasFnAttribute(Attribute::AllocSize)) {
std::pair<unsigned, Optional<unsigned>> Args =
- Attrs.getAllocSizeArgs(AttributeSet::FunctionIndex);
+ Attrs.getAllocSizeArgs(AttributeList::FunctionIndex);
auto CheckParam = [&](StringRef Name, unsigned ParamNo) {
if (ParamNo >= FT->getNumParams()) {
@@ -1649,8 +1667,8 @@ void Verifier::verifyFunctionMetadata(
for (const auto &Pair : MDs) {
if (Pair.first == LLVMContext::MD_prof) {
MDNode *MD = Pair.second;
- Assert(MD->getNumOperands() == 2,
- "!prof annotations should have exactly 2 operands", MD);
+ Assert(MD->getNumOperands() >= 2,
+ "!prof annotations should have no less than 2 operands", MD);
// Check first operand.
Assert(MD->getOperand(0) != nullptr, "first operand should not be null",
@@ -1725,18 +1743,10 @@ void Verifier::visitConstantExpr(const ConstantExpr *CE) {
}
}
-bool Verifier::verifyAttributeCount(AttributeSet Attrs, unsigned Params) {
- if (Attrs.getNumSlots() == 0)
- return true;
-
- unsigned LastSlot = Attrs.getNumSlots() - 1;
- unsigned LastIndex = Attrs.getSlotIndex(LastSlot);
- if (LastIndex <= Params
- || (LastIndex == AttributeSet::FunctionIndex
- && (LastSlot == 0 || Attrs.getSlotIndex(LastSlot - 1) <= Params)))
- return true;
-
- return false;
+bool Verifier::verifyAttributeCount(AttributeList Attrs, unsigned Params) {
+ // There shouldn't be more attribute sets than there are parameters plus the
+ // function and return value.
+ return Attrs.getNumAttrSets() <= Params + 2;
}
/// Verify that statepoint intrinsic is well formed.
@@ -1844,7 +1854,7 @@ void Verifier::verifyStatepoint(ImmutableCallSite CS) {
Assert(ExpectedNumArgs <= (int)CS.arg_size(),
"gc.statepoint too few arguments according to length fields", &CI);
- // Check that the only uses of this gc.statepoint are gc.result or
+ // Check that the only uses of this gc.statepoint are gc.result or
// gc.relocate calls which are tied to this statepoint and thus part
// of the same statepoint sequence
for (const User *U : CI.users()) {
@@ -1963,7 +1973,7 @@ void Verifier::visitFunction(const Function &F) {
Assert(!F.hasStructRetAttr() || F.getReturnType()->isVoidTy(),
"Invalid struct return type!", &F);
- AttributeSet Attrs = F.getAttributes();
+ AttributeList Attrs = F.getAttributes();
Assert(verifyAttributeCount(Attrs, FT->getNumParams()),
"Attribute after last parameter!", &F);
@@ -1974,7 +1984,7 @@ void Verifier::visitFunction(const Function &F) {
// On function declarations/definitions, we do not support the builtin
// attribute. We do not check this in VerifyFunctionAttrs since that is
// checking for Attributes that can/can not ever be on functions.
- Assert(!Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::Builtin),
+ Assert(!Attrs.hasFnAttribute(Attribute::Builtin),
"Attribute 'builtin' can only be applied to a callsite.", &F);
// Check that this function meets the restrictions on this calling convention.
@@ -1984,6 +1994,19 @@ void Verifier::visitFunction(const Function &F) {
default:
case CallingConv::C:
break;
+ case CallingConv::AMDGPU_KERNEL:
+ case CallingConv::SPIR_KERNEL:
+ Assert(F.getReturnType()->isVoidTy(),
+ "Calling convention requires void return type", &F);
+ LLVM_FALLTHROUGH;
+ case CallingConv::AMDGPU_VS:
+ case CallingConv::AMDGPU_HS:
+ case CallingConv::AMDGPU_GS:
+ case CallingConv::AMDGPU_PS:
+ case CallingConv::AMDGPU_CS:
+ Assert(!F.hasStructRetAttr(),
+ "Calling convention does not allow sret", &F);
+ LLVM_FALLTHROUGH;
case CallingConv::Fast:
case CallingConv::Cold:
case CallingConv::Intel_OCL_BI:
@@ -2014,7 +2037,7 @@ void Verifier::visitFunction(const Function &F) {
}
// Check that swifterror argument is only used by loads and stores.
- if (Attrs.hasAttribute(i+1, Attribute::SwiftError)) {
+ if (Attrs.hasParamAttribute(i, Attribute::SwiftError)) {
verifySwiftErrorValue(&Arg);
}
++i;
@@ -2078,13 +2101,19 @@ void Verifier::visitFunction(const Function &F) {
switch (I.first) {
default:
break;
- case LLVMContext::MD_dbg:
+ case LLVMContext::MD_dbg: {
++NumDebugAttachments;
AssertDI(NumDebugAttachments == 1,
"function must have a single !dbg attachment", &F, I.second);
AssertDI(isa<DISubprogram>(I.second),
"function !dbg attachment must be a subprogram", &F, I.second);
+ auto *SP = cast<DISubprogram>(I.second);
+ const Function *&AttachedTo = DISubprogramAttachments[SP];
+ AssertDI(!AttachedTo || AttachedTo == &F,
+ "DISubprogram attached to more than one function", SP, &F);
+ AttachedTo = &F;
break;
+ }
case LLVMContext::MD_prof:
++NumProfAttachments;
Assert(NumProfAttachments == 1,
@@ -2113,11 +2142,10 @@ void Verifier::visitFunction(const Function &F) {
"Function is marked as dllimport, but not external.", &F);
auto *N = F.getSubprogram();
- if (!N)
+ HasDebugInfo = (N != nullptr);
+ if (!HasDebugInfo)
return;
- visitDISubprogram(*N);
-
// Check that all !dbg attachments lead to back to N (or, at least, another
// subprogram that describes the same function).
//
@@ -2476,15 +2504,13 @@ void Verifier::visitPtrToIntInst(PtrToIntInst &I) {
Type *SrcTy = I.getOperand(0)->getType();
Type *DestTy = I.getType();
- Assert(SrcTy->getScalarType()->isPointerTy(),
- "PtrToInt source must be pointer", &I);
+ Assert(SrcTy->isPtrOrPtrVectorTy(), "PtrToInt source must be pointer", &I);
if (auto *PTy = dyn_cast<PointerType>(SrcTy->getScalarType()))
Assert(!DL.isNonIntegralPointerType(PTy),
"ptrtoint not supported for non-integral pointers");
- Assert(DestTy->getScalarType()->isIntegerTy(),
- "PtrToInt result must be integral", &I);
+ Assert(DestTy->isIntOrIntVectorTy(), "PtrToInt result must be integral", &I);
Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToInt type mismatch",
&I);
@@ -2503,10 +2529,9 @@ void Verifier::visitIntToPtrInst(IntToPtrInst &I) {
Type *SrcTy = I.getOperand(0)->getType();
Type *DestTy = I.getType();
- Assert(SrcTy->getScalarType()->isIntegerTy(),
+ Assert(SrcTy->isIntOrIntVectorTy(),
"IntToPtr source must be an integral", &I);
- Assert(DestTy->getScalarType()->isPointerTy(),
- "IntToPtr result must be a pointer", &I);
+ Assert(DestTy->isPtrOrPtrVectorTy(), "IntToPtr result must be a pointer", &I);
if (auto *PTy = dyn_cast<PointerType>(DestTy->getScalarType()))
Assert(!DL.isNonIntegralPointerType(PTy),
@@ -2601,11 +2626,20 @@ void Verifier::verifyCallSite(CallSite CS) {
"Call parameter type does not match function signature!",
CS.getArgument(i), FTy->getParamType(i), I);
- AttributeSet Attrs = CS.getAttributes();
+ AttributeList Attrs = CS.getAttributes();
Assert(verifyAttributeCount(Attrs, CS.arg_size()),
"Attribute after last parameter!", I);
+ if (Attrs.hasAttribute(AttributeList::FunctionIndex, Attribute::Speculatable)) {
+ // Don't allow speculatable on call sites, unless the underlying function
+ // declaration is also speculatable.
+ Function *Callee
+ = dyn_cast<Function>(CS.getCalledValue()->stripPointerCasts());
+ Assert(Callee && Callee->isSpeculatable(),
+ "speculatable attribute may not apply to call sites", I);
+ }
+
// Verify call attributes.
verifyFunctionAttrs(FTy, Attrs, I);
@@ -2623,7 +2657,7 @@ void Verifier::verifyCallSite(CallSite CS) {
// make sure the underlying alloca/parameter it comes from has a swifterror as
// well.
for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
- if (CS.paramHasAttr(i+1, Attribute::SwiftError)) {
+ if (CS.paramHasAttr(i, Attribute::SwiftError)) {
Value *SwiftErrorArg = CS.getArgument(i);
if (auto AI = dyn_cast<AllocaInst>(SwiftErrorArg->stripInBoundsOffsets())) {
Assert(AI->isSwiftError(),
@@ -2641,24 +2675,25 @@ void Verifier::verifyCallSite(CallSite CS) {
bool SawNest = false;
bool SawReturned = false;
- for (unsigned Idx = 1; Idx < 1 + FTy->getNumParams(); ++Idx) {
- if (Attrs.hasAttribute(Idx, Attribute::Nest))
+ for (unsigned Idx = 0; Idx < FTy->getNumParams(); ++Idx) {
+ if (Attrs.hasParamAttribute(Idx, Attribute::Nest))
SawNest = true;
- if (Attrs.hasAttribute(Idx, Attribute::Returned))
+ if (Attrs.hasParamAttribute(Idx, Attribute::Returned))
SawReturned = true;
}
// Check attributes on the varargs part.
- for (unsigned Idx = 1 + FTy->getNumParams(); Idx <= CS.arg_size(); ++Idx) {
- Type *Ty = CS.getArgument(Idx-1)->getType();
- verifyParameterAttrs(Attrs, Idx, Ty, false, I);
+ for (unsigned Idx = FTy->getNumParams(); Idx < CS.arg_size(); ++Idx) {
+ Type *Ty = CS.getArgument(Idx)->getType();
+ AttributeSet ArgAttrs = Attrs.getParamAttributes(Idx);
+ verifyParameterAttrs(ArgAttrs, Ty, I);
- if (Attrs.hasAttribute(Idx, Attribute::Nest)) {
+ if (ArgAttrs.hasAttribute(Attribute::Nest)) {
Assert(!SawNest, "More than one parameter has attribute nest!", I);
SawNest = true;
}
- if (Attrs.hasAttribute(Idx, Attribute::Returned)) {
+ if (ArgAttrs.hasAttribute(Attribute::Returned)) {
Assert(!SawReturned, "More than one parameter has attribute returned!",
I);
Assert(Ty->canLosslesslyBitCastTo(FTy->getReturnType()),
@@ -2668,11 +2703,12 @@ void Verifier::verifyCallSite(CallSite CS) {
SawReturned = true;
}
- Assert(!Attrs.hasAttribute(Idx, Attribute::StructRet),
+ Assert(!ArgAttrs.hasAttribute(Attribute::StructRet),
"Attribute 'sret' cannot be used for vararg call arguments!", I);
- if (Attrs.hasAttribute(Idx, Attribute::InAlloca))
- Assert(Idx == CS.arg_size(), "inalloca isn't on the last argument!", I);
+ if (ArgAttrs.hasAttribute(Attribute::InAlloca))
+ Assert(Idx == CS.arg_size() - 1, "inalloca isn't on the last argument!",
+ I);
}
}
@@ -2726,9 +2762,9 @@ void Verifier::verifyCallSite(CallSite CS) {
// do so causes assertion failures when the inliner sets up inline scope info.
if (I->getFunction()->getSubprogram() && CS.getCalledFunction() &&
CS.getCalledFunction()->getSubprogram())
- Assert(I->getDebugLoc(), "inlinable function call in a function with debug "
- "info must have a !dbg location",
- I);
+ AssertDI(I->getDebugLoc(), "inlinable function call in a function with "
+ "debug info must have a !dbg location",
+ I);
visitInstruction(*I);
}
@@ -2745,18 +2781,18 @@ static bool isTypeCongruent(Type *L, Type *R) {
return PL->getAddressSpace() == PR->getAddressSpace();
}
-static AttrBuilder getParameterABIAttributes(int I, AttributeSet Attrs) {
+static AttrBuilder getParameterABIAttributes(int I, AttributeList Attrs) {
static const Attribute::AttrKind ABIAttrs[] = {
Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca,
Attribute::InReg, Attribute::Returned, Attribute::SwiftSelf,
Attribute::SwiftError};
AttrBuilder Copy;
for (auto AK : ABIAttrs) {
- if (Attrs.hasAttribute(I + 1, AK))
+ if (Attrs.hasParamAttribute(I, AK))
Copy.addAttribute(AK);
}
- if (Attrs.hasAttribute(I + 1, Attribute::Alignment))
- Copy.addAlignmentAttr(Attrs.getParamAlignment(I + 1));
+ if (Attrs.hasParamAttribute(I, Attribute::Alignment))
+ Copy.addAlignmentAttr(Attrs.getParamAlignment(I));
return Copy;
}
@@ -2787,8 +2823,8 @@ void Verifier::verifyMustTailCall(CallInst &CI) {
// - All ABI-impacting function attributes, such as sret, byval, inreg,
// returned, and inalloca, must match.
- AttributeSet CallerAttrs = F->getAttributes();
- AttributeSet CalleeAttrs = CI.getAttributes();
+ AttributeList CallerAttrs = F->getAttributes();
+ AttributeList CalleeAttrs = CI.getAttributes();
for (int I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
AttrBuilder CallerABIAttrs = getParameterABIAttributes(I, CallerAttrs);
AttrBuilder CalleeABIAttrs = getParameterABIAttributes(I, CalleeAttrs);
@@ -2913,11 +2949,10 @@ void Verifier::visitICmpInst(ICmpInst &IC) {
Assert(Op0Ty == Op1Ty,
"Both operands to ICmp instruction are not of the same type!", &IC);
// Check that the operands are the right type
- Assert(Op0Ty->isIntOrIntVectorTy() || Op0Ty->getScalarType()->isPointerTy(),
+ Assert(Op0Ty->isIntOrIntVectorTy() || Op0Ty->isPtrOrPtrVectorTy(),
"Invalid operand types for ICmp instruction", &IC);
// Check that the predicate is valid.
- Assert(IC.getPredicate() >= CmpInst::FIRST_ICMP_PREDICATE &&
- IC.getPredicate() <= CmpInst::LAST_ICMP_PREDICATE,
+ Assert(IC.isIntPredicate(),
"Invalid predicate in ICmp instruction!", &IC);
visitInstruction(IC);
@@ -2933,8 +2968,7 @@ void Verifier::visitFCmpInst(FCmpInst &FC) {
Assert(Op0Ty->isFPOrFPVectorTy(),
"Invalid operand types for FCmp instruction", &FC);
// Check that the predicate is valid.
- Assert(FC.getPredicate() >= CmpInst::FIRST_FCMP_PREDICATE &&
- FC.getPredicate() <= CmpInst::LAST_FCMP_PREDICATE,
+ Assert(FC.isFPPredicate(),
"Invalid predicate in FCmp instruction!", &FC);
visitInstruction(FC);
@@ -2972,7 +3006,7 @@ void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
GetElementPtrInst::getIndexedType(GEP.getSourceElementType(), Idxs);
Assert(ElTy, "Invalid indices for GEP pointer type!", &GEP);
- Assert(GEP.getType()->getScalarType()->isPointerTy() &&
+ Assert(GEP.getType()->isPtrOrPtrVectorTy() &&
GEP.getResultElementType() == ElTy,
"GEP is not of right type for indices!", &GEP, ElTy);
@@ -2988,7 +3022,7 @@ void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
unsigned IndexWidth = IndexTy->getVectorNumElements();
Assert(IndexWidth == GEPWidth, "Invalid GEP index vector width", &GEP);
}
- Assert(IndexTy->getScalarType()->isIntegerTy(),
+ Assert(IndexTy->isIntOrIntVectorTy(),
"All GEP indices should be of integer type");
}
}
@@ -3074,7 +3108,7 @@ void Verifier::visitLoadInst(LoadInst &LI) {
ElTy, &LI);
checkAtomicMemAccessSize(ElTy, &LI);
} else {
- Assert(LI.getSynchScope() == CrossThread,
+ Assert(LI.getSyncScopeID() == SyncScope::System,
"Non-atomic load cannot have SynchronizationScope specified", &LI);
}
@@ -3103,7 +3137,7 @@ void Verifier::visitStoreInst(StoreInst &SI) {
ElTy, &SI);
checkAtomicMemAccessSize(ElTy, &SI);
} else {
- Assert(SI.getSynchScope() == CrossThread,
+ Assert(SI.getSyncScopeID() == SyncScope::System,
"Non-atomic store cannot have SynchronizationScope specified", &SI);
}
visitInstruction(SI);
@@ -3116,7 +3150,7 @@ void Verifier::verifySwiftErrorCallSite(CallSite CS,
for (CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
I != E; ++I, ++Idx) {
if (*I == SwiftErrorVal) {
- Assert(CS.paramHasAttr(Idx+1, Attribute::SwiftError),
+ Assert(CS.paramHasAttr(Idx, Attribute::SwiftError),
"swifterror value when used in a callsite should be marked "
"with swifterror attribute",
SwiftErrorVal, CS);
@@ -3148,8 +3182,9 @@ void Verifier::verifySwiftErrorValue(const Value *SwiftErrorVal) {
void Verifier::visitAllocaInst(AllocaInst &AI) {
SmallPtrSet<Type*, 4> Visited;
PointerType *PTy = AI.getType();
- Assert(PTy->getAddressSpace() == 0,
- "Allocation instruction pointer not in the generic address space!",
+ // TODO: Relax this restriction?
+ Assert(PTy->getAddressSpace() == DL.getAllocaAddrSpace(),
+ "Allocation instruction pointer not in the stack address space!",
&AI);
Assert(AI.getAllocatedType()->isSized(&Visited),
"Cannot allocate unsized type", &AI);
@@ -3901,7 +3936,7 @@ void Verifier::visitIntrinsicCallSite(Intrinsic::ID ID, CallSite CS) {
// If the intrinsic takes MDNode arguments, verify that they are either global
// or are local to *this* function.
- for (Value *V : CS.args())
+ for (Value *V : CS.args())
if (auto *MD = dyn_cast<MetadataAsValue>(V))
visitMetadataAsValue(*MD, CS.getCaller());
@@ -3929,6 +3964,26 @@ void Verifier::visitIntrinsicCallSite(Intrinsic::ID ID, CallSite CS) {
"constant int",
CS);
break;
+ case Intrinsic::experimental_constrained_fadd:
+ case Intrinsic::experimental_constrained_fsub:
+ case Intrinsic::experimental_constrained_fmul:
+ case Intrinsic::experimental_constrained_fdiv:
+ case Intrinsic::experimental_constrained_frem:
+ case Intrinsic::experimental_constrained_sqrt:
+ case Intrinsic::experimental_constrained_pow:
+ case Intrinsic::experimental_constrained_powi:
+ case Intrinsic::experimental_constrained_sin:
+ case Intrinsic::experimental_constrained_cos:
+ case Intrinsic::experimental_constrained_exp:
+ case Intrinsic::experimental_constrained_exp2:
+ case Intrinsic::experimental_constrained_log:
+ case Intrinsic::experimental_constrained_log10:
+ case Intrinsic::experimental_constrained_log2:
+ case Intrinsic::experimental_constrained_rint:
+ case Intrinsic::experimental_constrained_nearbyint:
+ visitConstrainedFPIntrinsic(
+ cast<ConstrainedFPIntrinsic>(*CS.getInstruction()));
+ break;
case Intrinsic::dbg_declare: // llvm.dbg.declare
Assert(isa<MetadataAsValue>(CS.getArgOperand(0)),
"invalid llvm.dbg.declare intrinsic call 1", CS);
@@ -3952,10 +4007,16 @@ void Verifier::visitIntrinsicCallSite(Intrinsic::ID ID, CallSite CS) {
CS);
break;
}
- case Intrinsic::memcpy_element_atomic: {
- ConstantInt *ElementSizeCI = dyn_cast<ConstantInt>(CS.getArgOperand(3));
- Assert(ElementSizeCI, "element size of the element-wise atomic memory "
- "intrinsic must be a constant int",
+ case Intrinsic::memcpy_element_unordered_atomic: {
+ const ElementUnorderedAtomicMemCpyInst *MI =
+ cast<ElementUnorderedAtomicMemCpyInst>(CS.getInstruction());
+ ;
+
+ ConstantInt *ElementSizeCI =
+ dyn_cast<ConstantInt>(MI->getRawElementSizeInBytes());
+ Assert(ElementSizeCI,
+ "element size of the element-wise unordered atomic memory "
+ "intrinsic must be a constant int",
CS);
const APInt &ElementSizeVal = ElementSizeCI->getValue();
Assert(ElementSizeVal.isPowerOf2(),
@@ -3963,19 +4024,91 @@ void Verifier::visitIntrinsicCallSite(Intrinsic::ID ID, CallSite CS) {
"must be a power of 2",
CS);
+ if (auto *LengthCI = dyn_cast<ConstantInt>(MI->getLength())) {
+ uint64_t Length = LengthCI->getZExtValue();
+ uint64_t ElementSize = MI->getElementSizeInBytes();
+ Assert((Length % ElementSize) == 0,
+ "constant length must be a multiple of the element size in the "
+ "element-wise atomic memory intrinsic",
+ CS);
+ }
+
auto IsValidAlignment = [&](uint64_t Alignment) {
return isPowerOf2_64(Alignment) && ElementSizeVal.ule(Alignment);
};
-
- uint64_t DstAlignment = CS.getParamAlignment(1),
- SrcAlignment = CS.getParamAlignment(2);
-
+ uint64_t DstAlignment = CS.getParamAlignment(0),
+ SrcAlignment = CS.getParamAlignment(1);
Assert(IsValidAlignment(DstAlignment),
- "incorrect alignment of the destination argument",
+ "incorrect alignment of the destination argument", CS);
+ Assert(IsValidAlignment(SrcAlignment),
+ "incorrect alignment of the source argument", CS);
+ break;
+ }
+ case Intrinsic::memmove_element_unordered_atomic: {
+ auto *MI = cast<ElementUnorderedAtomicMemMoveInst>(CS.getInstruction());
+
+ ConstantInt *ElementSizeCI =
+ dyn_cast<ConstantInt>(MI->getRawElementSizeInBytes());
+ Assert(ElementSizeCI,
+ "element size of the element-wise unordered atomic memory "
+ "intrinsic must be a constant int",
+ CS);
+ const APInt &ElementSizeVal = ElementSizeCI->getValue();
+ Assert(ElementSizeVal.isPowerOf2(),
+ "element size of the element-wise atomic memory intrinsic "
+ "must be a power of 2",
CS);
+
+ if (auto *LengthCI = dyn_cast<ConstantInt>(MI->getLength())) {
+ uint64_t Length = LengthCI->getZExtValue();
+ uint64_t ElementSize = MI->getElementSizeInBytes();
+ Assert((Length % ElementSize) == 0,
+ "constant length must be a multiple of the element size in the "
+ "element-wise atomic memory intrinsic",
+ CS);
+ }
+
+ auto IsValidAlignment = [&](uint64_t Alignment) {
+ return isPowerOf2_64(Alignment) && ElementSizeVal.ule(Alignment);
+ };
+ uint64_t DstAlignment = CS.getParamAlignment(0),
+ SrcAlignment = CS.getParamAlignment(1);
+ Assert(IsValidAlignment(DstAlignment),
+ "incorrect alignment of the destination argument", CS);
Assert(IsValidAlignment(SrcAlignment),
- "incorrect alignment of the source argument",
+ "incorrect alignment of the source argument", CS);
+ break;
+ }
+ case Intrinsic::memset_element_unordered_atomic: {
+ auto *MI = cast<ElementUnorderedAtomicMemSetInst>(CS.getInstruction());
+
+ ConstantInt *ElementSizeCI =
+ dyn_cast<ConstantInt>(MI->getRawElementSizeInBytes());
+ Assert(ElementSizeCI,
+ "element size of the element-wise unordered atomic memory "
+ "intrinsic must be a constant int",
+ CS);
+ const APInt &ElementSizeVal = ElementSizeCI->getValue();
+ Assert(ElementSizeVal.isPowerOf2(),
+ "element size of the element-wise atomic memory intrinsic "
+ "must be a power of 2",
CS);
+
+ if (auto *LengthCI = dyn_cast<ConstantInt>(MI->getLength())) {
+ uint64_t Length = LengthCI->getZExtValue();
+ uint64_t ElementSize = MI->getElementSizeInBytes();
+ Assert((Length % ElementSize) == 0,
+ "constant length must be a multiple of the element size in the "
+ "element-wise atomic memory intrinsic",
+ CS);
+ }
+
+ auto IsValidAlignment = [&](uint64_t Alignment) {
+ return isPowerOf2_64(Alignment) && ElementSizeVal.ule(Alignment);
+ };
+ uint64_t DstAlignment = CS.getParamAlignment(0);
+ Assert(IsValidAlignment(DstAlignment),
+ "incorrect alignment of the destination argument", CS);
break;
}
case Intrinsic::gcroot:
@@ -4182,7 +4315,7 @@ void Verifier::visitIntrinsicCallSite(Intrinsic::ID ID, CallSite CS) {
// relocated pointer. It can be casted to the correct type later if it's
// desired. However, they must have the same address space and 'vectorness'
GCRelocateInst &Relocate = cast<GCRelocateInst>(*CS.getInstruction());
- Assert(Relocate.getDerivedPtr()->getType()->getScalarType()->isPointerTy(),
+ Assert(Relocate.getDerivedPtr()->getType()->isPtrOrPtrVectorTy(),
"gc.relocate: relocated value must be a gc pointer", CS);
auto ResultType = CS.getType();
@@ -4205,7 +4338,7 @@ void Verifier::visitIntrinsicCallSite(Intrinsic::ID ID, CallSite CS) {
}
case Intrinsic::masked_load: {
Assert(CS.getType()->isVectorTy(), "masked_load: must return a vector", CS);
-
+
Value *Ptr = CS.getArgOperand(0);
//Value *Alignment = CS.getArgOperand(1);
Value *Mask = CS.getArgOperand(2);
@@ -4215,12 +4348,12 @@ void Verifier::visitIntrinsicCallSite(Intrinsic::ID ID, CallSite CS) {
// DataTy is the overloaded type
Type *DataTy = cast<PointerType>(Ptr->getType())->getElementType();
- Assert(DataTy == CS.getType(),
+ Assert(DataTy == CS.getType(),
"masked_load: return must match pointer type", CS);
Assert(PassThru->getType() == DataTy,
"masked_load: pass through and data type must match", CS);
Assert(Mask->getType()->getVectorNumElements() ==
- DataTy->getVectorNumElements(),
+ DataTy->getVectorNumElements(),
"masked_load: vector mask must be same length as data", CS);
break;
}
@@ -4234,10 +4367,10 @@ void Verifier::visitIntrinsicCallSite(Intrinsic::ID ID, CallSite CS) {
// DataTy is the overloaded type
Type *DataTy = cast<PointerType>(Ptr->getType())->getElementType();
- Assert(DataTy == Val->getType(),
+ Assert(DataTy == Val->getType(),
"masked_store: storee must match pointer type", CS);
Assert(Mask->getType()->getVectorNumElements() ==
- DataTy->getVectorNumElements(),
+ DataTy->getVectorNumElements(),
"masked_store: vector mask must be same length as data", CS);
break;
}
@@ -4294,6 +4427,20 @@ static DISubprogram *getSubprogram(Metadata *LocalScope) {
return nullptr;
}
+void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
+ unsigned NumOperands = FPI.getNumArgOperands();
+ Assert(((NumOperands == 3 && FPI.isUnaryOp()) || (NumOperands == 4)),
+ "invalid arguments for constrained FP intrinsic", &FPI);
+ Assert(isa<MetadataAsValue>(FPI.getArgOperand(NumOperands-1)),
+ "invalid exception behavior argument", &FPI);
+ Assert(isa<MetadataAsValue>(FPI.getArgOperand(NumOperands-2)),
+ "invalid rounding mode argument", &FPI);
+ Assert(FPI.getRoundingMode() != ConstrainedFPIntrinsic::rmInvalid,
+ "invalid rounding mode argument", &FPI);
+ Assert(FPI.getExceptionBehavior() != ConstrainedFPIntrinsic::ebInvalid,
+ "invalid exception behavior argument", &FPI);
+}
+
template <class DbgIntrinsicTy>
void Verifier::visitDbgIntrinsic(StringRef Kind, DbgIntrinsicTy &DII) {
auto *MD = cast<MetadataAsValue>(DII.getArgOperand(0))->getMetadata();
@@ -4330,6 +4477,8 @@ void Verifier::visitDbgIntrinsic(StringRef Kind, DbgIntrinsicTy &DII) {
" variable and !dbg attachment",
&DII, BB, F, Var, Var->getScope()->getSubprogram(), Loc,
Loc->getScope()->getSubprogram());
+
+ verifyFnArgs(DII);
}
static uint64_t getVariableSize(const DILocalVariable &V) {
@@ -4398,15 +4547,49 @@ void Verifier::verifyFragmentExpression(const DbgInfoIntrinsic &I) {
AssertDI(FragSize != VarSize, "fragment covers entire variable", &I, V, E);
}
+void Verifier::verifyFnArgs(const DbgInfoIntrinsic &I) {
+ // This function does not take the scope of noninlined function arguments into
+ // account. Don't run it if current function is nodebug, because it may
+ // contain inlined debug intrinsics.
+ if (!HasDebugInfo)
+ return;
+
+ DILocalVariable *Var;
+ if (auto *DV = dyn_cast<DbgValueInst>(&I)) {
+ // For performance reasons only check non-inlined ones.
+ if (DV->getDebugLoc()->getInlinedAt())
+ return;
+ Var = DV->getVariable();
+ } else {
+ auto *DD = cast<DbgDeclareInst>(&I);
+ if (DD->getDebugLoc()->getInlinedAt())
+ return;
+ Var = DD->getVariable();
+ }
+ AssertDI(Var, "dbg intrinsic without variable");
+
+ unsigned ArgNo = Var->getArg();
+ if (!ArgNo)
+ return;
+
+ // Verify there are no duplicate function argument debug info entries.
+ // These will cause hard-to-debug assertions in the DWARF backend.
+ if (DebugFnArgs.size() < ArgNo)
+ DebugFnArgs.resize(ArgNo, nullptr);
+
+ auto *Prev = DebugFnArgs[ArgNo - 1];
+ DebugFnArgs[ArgNo - 1] = Var;
+ AssertDI(!Prev || (Prev == Var), "conflicting debug info for argument", &I,
+ Prev, Var);
+}
+
void Verifier::verifyCompileUnits() {
auto *CUs = M.getNamedMetadata("llvm.dbg.cu");
SmallPtrSet<const Metadata *, 2> Listed;
if (CUs)
Listed.insert(CUs->op_begin(), CUs->op_end());
- AssertDI(
- all_of(CUVisited,
- [&Listed](const Metadata *CU) { return Listed.count(CU); }),
- "All DICompileUnits must be listed in llvm.dbg.cu");
+ for (auto *CU : CUVisited)
+ AssertDI(Listed.count(CU), "DICompileUnit not listed in llvm.dbg.cu", CU);
CUVisited.clear();
}
OpenPOWER on IntegriCloud