summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/lib/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/lib/CodeGen')
-rw-r--r--contrib/llvm/lib/CodeGen/Analysis.cpp35
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/ARMException.cpp38
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp5
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCFIException.cpp28
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h225
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.h74
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/EHStreamer.cpp2
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/WinCodeViewLineTables.cpp30
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/WinCodeViewLineTables.h8
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/WinException.cpp32
-rw-r--r--contrib/llvm/lib/CodeGen/BasicTargetTransformInfo.cpp3
-rw-r--r--contrib/llvm/lib/CodeGen/CodeGenPrepare.cpp146
-rw-r--r--contrib/llvm/lib/CodeGen/DeadMachineInstructionElim.cpp2
-rw-r--r--contrib/llvm/lib/CodeGen/ExecutionDepsFix.cpp12
-rw-r--r--contrib/llvm/lib/CodeGen/GlobalMerge.cpp29
-rw-r--r--contrib/llvm/lib/CodeGen/ImplicitNullChecks.cpp93
-rw-r--r--contrib/llvm/lib/CodeGen/LLVMTargetMachine.cpp18
-rw-r--r--contrib/llvm/lib/CodeGen/LiveRegMatrix.cpp12
-rw-r--r--contrib/llvm/lib/CodeGen/MIRParser/MILexer.cpp30
-rw-r--r--contrib/llvm/lib/CodeGen/MIRParser/MILexer.h21
-rw-r--r--contrib/llvm/lib/CodeGen/MIRParser/MIParser.cpp269
-rw-r--r--contrib/llvm/lib/CodeGen/MIRParser/MIParser.h16
-rw-r--r--contrib/llvm/lib/CodeGen/MIRParser/MIRParser.cpp164
-rw-r--r--contrib/llvm/lib/CodeGen/MIRPrinter.cpp165
-rw-r--r--contrib/llvm/lib/CodeGen/MachineDominators.cpp4
-rw-r--r--contrib/llvm/lib/CodeGen/MachineFunction.cpp40
-rw-r--r--contrib/llvm/lib/CodeGen/MachineModuleInfo.cpp3
-rw-r--r--contrib/llvm/lib/CodeGen/MachineRegisterInfo.cpp48
-rw-r--r--contrib/llvm/lib/CodeGen/MachineTraceMetrics.cpp47
-rw-r--r--contrib/llvm/lib/CodeGen/Passes.cpp12
-rw-r--r--contrib/llvm/lib/CodeGen/PrologEpilogInserter.cpp57
-rw-r--r--contrib/llvm/lib/CodeGen/RegAllocFast.cpp14
-rw-r--r--contrib/llvm/lib/CodeGen/RegAllocGreedy.cpp42
-rw-r--r--contrib/llvm/lib/CodeGen/RegisterPressure.cpp12
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp145
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp36
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp25
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp4
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp263
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp30
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp123
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp8
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h6
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp43
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp37
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp198
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp5
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp10
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp193
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp555
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h6
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp2
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp50
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp6
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp178
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/TargetSelectionDAGInfo.cpp4
-rw-r--r--contrib/llvm/lib/CodeGen/SjLjEHPrepare.cpp14
-rw-r--r--contrib/llvm/lib/CodeGen/StackMapLivenessAnalysis.cpp45
-rw-r--r--contrib/llvm/lib/CodeGen/StackMaps.cpp165
-rw-r--r--contrib/llvm/lib/CodeGen/StackProtector.cpp2
-rw-r--r--contrib/llvm/lib/CodeGen/TargetFrameLoweringImpl.cpp30
-rw-r--r--contrib/llvm/lib/CodeGen/TargetLoweringBase.cpp49
-rw-r--r--contrib/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp8
-rw-r--r--contrib/llvm/lib/CodeGen/VirtRegMap.cpp73
-rw-r--r--contrib/llvm/lib/CodeGen/WinEHPrepare.cpp75
65 files changed, 2472 insertions, 1652 deletions
diff --git a/contrib/llvm/lib/CodeGen/Analysis.cpp b/contrib/llvm/lib/CodeGen/Analysis.cpp
index 3224fac..98d4c8a 100644
--- a/contrib/llvm/lib/CodeGen/Analysis.cpp
+++ b/contrib/llvm/lib/CodeGen/Analysis.cpp
@@ -81,27 +81,27 @@ unsigned llvm::ComputeLinearIndex(Type *Ty,
/// If Offsets is non-null, it points to a vector to be filled in
/// with the in-memory offsets of each of the individual values.
///
-void llvm::ComputeValueVTs(const TargetLowering &TLI, Type *Ty,
- SmallVectorImpl<EVT> &ValueVTs,
+void llvm::ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL,
+ Type *Ty, SmallVectorImpl<EVT> &ValueVTs,
SmallVectorImpl<uint64_t> *Offsets,
uint64_t StartingOffset) {
// Given a struct type, recursively traverse the elements.
if (StructType *STy = dyn_cast<StructType>(Ty)) {
- const StructLayout *SL = TLI.getDataLayout()->getStructLayout(STy);
+ const StructLayout *SL = DL.getStructLayout(STy);
for (StructType::element_iterator EB = STy->element_begin(),
EI = EB,
EE = STy->element_end();
EI != EE; ++EI)
- ComputeValueVTs(TLI, *EI, ValueVTs, Offsets,
+ ComputeValueVTs(TLI, DL, *EI, ValueVTs, Offsets,
StartingOffset + SL->getElementOffset(EI - EB));
return;
}
// Given an array type, recursively traverse the elements.
if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
Type *EltTy = ATy->getElementType();
- uint64_t EltSize = TLI.getDataLayout()->getTypeAllocSize(EltTy);
+ uint64_t EltSize = DL.getTypeAllocSize(EltTy);
for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
- ComputeValueVTs(TLI, EltTy, ValueVTs, Offsets,
+ ComputeValueVTs(TLI, DL, EltTy, ValueVTs, Offsets,
StartingOffset + i * EltSize);
return;
}
@@ -109,7 +109,7 @@ void llvm::ComputeValueVTs(const TargetLowering &TLI, Type *Ty,
if (Ty->isVoidTy())
return;
// Base case: we can get an EVT for this LLVM IR type.
- ValueVTs.push_back(TLI.getValueType(Ty));
+ ValueVTs.push_back(TLI.getValueType(DL, Ty));
if (Offsets)
Offsets->push_back(StartingOffset);
}
@@ -233,7 +233,8 @@ static bool isNoopBitcast(Type *T1, Type *T2,
static const Value *getNoopInput(const Value *V,
SmallVectorImpl<unsigned> &ValLoc,
unsigned &DataBits,
- const TargetLoweringBase &TLI) {
+ const TargetLoweringBase &TLI,
+ const DataLayout &DL) {
while (true) {
// Try to look through V1; if V1 is not an instruction, it can't be looked
// through.
@@ -255,16 +256,16 @@ static const Value *getNoopInput(const Value *V,
// Make sure this isn't a truncating or extending cast. We could
// support this eventually, but don't bother for now.
if (!isa<VectorType>(I->getType()) &&
- TLI.getPointerTy().getSizeInBits() ==
- cast<IntegerType>(Op->getType())->getBitWidth())
+ DL.getPointerSizeInBits() ==
+ cast<IntegerType>(Op->getType())->getBitWidth())
NoopInput = Op;
} else if (isa<PtrToIntInst>(I)) {
// Look through ptrtoint.
// Make sure this isn't a truncating or extending cast. We could
// support this eventually, but don't bother for now.
if (!isa<VectorType>(I->getType()) &&
- TLI.getPointerTy().getSizeInBits() ==
- cast<IntegerType>(I->getType())->getBitWidth())
+ DL.getPointerSizeInBits() ==
+ cast<IntegerType>(I->getType())->getBitWidth())
NoopInput = Op;
} else if (isa<TruncInst>(I) &&
TLI.allowTruncateForTailCall(Op->getType(), I->getType())) {
@@ -331,14 +332,15 @@ static bool slotOnlyDiscardsData(const Value *RetVal, const Value *CallVal,
SmallVectorImpl<unsigned> &RetIndices,
SmallVectorImpl<unsigned> &CallIndices,
bool AllowDifferingSizes,
- const TargetLoweringBase &TLI) {
+ const TargetLoweringBase &TLI,
+ const DataLayout &DL) {
// Trace the sub-value needed by the return value as far back up the graph as
// possible, in the hope that it will intersect with the value produced by the
// call. In the simple case with no "returned" attribute, the hope is actually
// that we end up back at the tail call instruction itself.
unsigned BitsRequired = UINT_MAX;
- RetVal = getNoopInput(RetVal, RetIndices, BitsRequired, TLI);
+ RetVal = getNoopInput(RetVal, RetIndices, BitsRequired, TLI, DL);
// If this slot in the value returned is undef, it doesn't matter what the
// call puts there, it'll be fine.
@@ -350,7 +352,7 @@ static bool slotOnlyDiscardsData(const Value *RetVal, const Value *CallVal,
// a "returned" attribute, the search will be blocked immediately and the loop
// a Noop.
unsigned BitsProvided = UINT_MAX;
- CallVal = getNoopInput(CallVal, CallIndices, BitsProvided, TLI);
+ CallVal = getNoopInput(CallVal, CallIndices, BitsProvided, TLI, DL);
// There's no hope if we can't actually trace them to (the same part of!) the
// same value.
@@ -606,7 +608,8 @@ bool llvm::returnTypeIsEligibleForTailCall(const Function *F,
// Finally, we can check whether the value produced by the tail call at this
// index is compatible with the value we return.
if (!slotOnlyDiscardsData(RetVal, CallVal, TmpRetPath, TmpCallPath,
- AllowDifferingSizes, TLI))
+ AllowDifferingSizes, TLI,
+ F->getParent()->getDataLayout()))
return false;
CallEmpty = !nextRealType(CallSubTypes, CallPath);
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/ARMException.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/ARMException.cpp
index 4cb460a..0bad795 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/ARMException.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/ARMException.cpp
@@ -69,24 +69,32 @@ void ARMException::beginFunction(const MachineFunction *MF) {
///
void ARMException::endFunction(const MachineFunction *MF) {
ARMTargetStreamer &ATS = getTargetStreamer();
+ const Function *F = MF->getFunction();
+ const Function *Per = nullptr;
+ if (F->hasPersonalityFn())
+ Per = dyn_cast<Function>(F->getPersonalityFn()->stripPointerCasts());
+ assert(!MMI->getPersonality() || Per == MMI->getPersonality());
+ bool forceEmitPersonality =
+ F->hasPersonalityFn() && !isNoOpWithoutInvoke(classifyEHPersonality(Per)) &&
+ F->needsUnwindTableEntry();
+ bool shouldEmitPersonality = forceEmitPersonality ||
+ !MMI->getLandingPads().empty();
if (!Asm->MF->getFunction()->needsUnwindTableEntry() &&
- MMI->getLandingPads().empty())
+ !shouldEmitPersonality)
ATS.emitCantUnwind();
- else {
- if (!MMI->getLandingPads().empty()) {
- // Emit references to personality.
- if (const Function *Personality = MMI->getPersonality()) {
- MCSymbol *PerSym = Asm->getSymbol(Personality);
- Asm->OutStreamer->EmitSymbolAttribute(PerSym, MCSA_Global);
- ATS.emitPersonality(PerSym);
- }
-
- // Emit .handlerdata directive.
- ATS.emitHandlerData();
-
- // Emit actual exception table
- emitExceptionTable();
+ else if (shouldEmitPersonality) {
+ // Emit references to personality.
+ if (Per) {
+ MCSymbol *PerSym = Asm->getSymbol(Per);
+ Asm->OutStreamer->EmitSymbolAttribute(PerSym, MCSA_Global);
+ ATS.emitPersonality(PerSym);
}
+
+ // Emit .handlerdata directive.
+ ATS.emitHandlerData();
+
+ // Emit actual exception table
+ emitExceptionTable();
}
if (Asm->MAI->getExceptionHandlingType() == ExceptionHandling::ARM)
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
index 8a7e9f9..125047e 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
@@ -19,7 +19,6 @@
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/ConstantFolding.h"
-#include "llvm/Analysis/JumpInstrTableInfo.h"
#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/GCMetadataPrinter.h"
#include "llvm/CodeGen/MachineConstantPool.h"
@@ -820,7 +819,7 @@ void AsmPrinter::EmitFunctionBody() {
emitCFIInstruction(MI);
break;
- case TargetOpcode::FRAME_ALLOC:
+ case TargetOpcode::LOCAL_ESCAPE:
emitFrameAlloc(MI);
break;
@@ -1024,7 +1023,7 @@ bool AsmPrinter::doFinalization(Module &M) {
// Emit visibility info for declarations
for (const Function &F : M) {
- if (!F.isDeclaration())
+ if (!F.isDeclarationForLinker())
continue;
GlobalValue::VisibilityTypes V = F.getVisibility();
if (V == GlobalValue::DefaultVisibility)
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCFIException.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCFIException.cpp
index 0bc873e..2c212c7 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCFIException.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCFIException.cpp
@@ -89,6 +89,7 @@ void DwarfCFIException::endModule() {
void DwarfCFIException::beginFunction(const MachineFunction *MF) {
shouldEmitMoves = shouldEmitPersonality = shouldEmitLSDA = false;
+ const Function *F = MF->getFunction();
// If any landing pads survive, we need an EH table.
bool hasLandingPads = !MMI->getLandingPads().empty();
@@ -104,10 +105,24 @@ void DwarfCFIException::beginFunction(const MachineFunction *MF) {
const TargetLoweringObjectFile &TLOF = Asm->getObjFileLowering();
unsigned PerEncoding = TLOF.getPersonalityEncoding();
- const Function *Per = MMI->getPersonality();
-
- shouldEmitPersonality = hasLandingPads &&
- PerEncoding != dwarf::DW_EH_PE_omit && Per;
+ const Function *Per = nullptr;
+ if (F->hasPersonalityFn())
+ Per = dyn_cast<Function>(F->getPersonalityFn()->stripPointerCasts());
+ assert(!MMI->getPersonality() || Per == MMI->getPersonality());
+
+ // Emit a personality function even when there are no landing pads
+ bool forceEmitPersonality =
+ // ...if a personality function is explicitly specified
+ F->hasPersonalityFn() &&
+ // ... and it's not known to be a noop in the absence of invokes
+ !isNoOpWithoutInvoke(classifyEHPersonality(Per)) &&
+ // ... and we're not explicitly asked not to emit it
+ F->needsUnwindTableEntry();
+
+ shouldEmitPersonality =
+ (forceEmitPersonality ||
+ (hasLandingPads && PerEncoding != dwarf::DW_EH_PE_omit)) &&
+ Per;
unsigned LSDAEncoding = TLOF.getLSDAEncoding();
shouldEmitLSDA = shouldEmitPersonality &&
@@ -123,6 +138,11 @@ void DwarfCFIException::beginFunction(const MachineFunction *MF) {
if (!shouldEmitPersonality)
return;
+ // If we are forced to emit this personality, make sure to record
+ // it because it might not appear in any landingpad
+ if (forceEmitPersonality)
+ MMI->addPersonality(Per);
+
const MCSymbol *Sym =
TLOF.getCFIPersonalitySymbol(Per, *Asm->Mang, Asm->TM, MMI);
Asm->OutStreamer->EmitCFIPersonality(Sym, PerEncoding);
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h
index 1c3e2ae..01f34c6 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h
@@ -49,7 +49,7 @@ class DwarfUnit;
class MachineModuleInfo;
//===----------------------------------------------------------------------===//
-/// \brief This class is used to record source line correspondence.
+/// This class is used to record source line correspondence.
class SrcLineInfo {
unsigned Line; // Source line number.
unsigned Column; // Source column.
@@ -161,7 +161,7 @@ public:
return dwarf::DW_TAG_variable;
}
- /// \brief Return true if DbgVariable is artificial.
+ /// Return true if DbgVariable is artificial.
bool isArtificial() const {
if (Var->isArtificial())
return true;
@@ -190,149 +190,152 @@ public:
const DIType *getType() const;
private:
- /// resolve - Look in the DwarfDebug map for the MDNode that
+ /// Look in the DwarfDebug map for the MDNode that
/// corresponds to the reference.
template <typename T> T *resolve(TypedDINodeRef<T> Ref) const;
};
-/// \brief Helper used to pair up a symbol and its DWARF compile unit.
+/// Helper used to pair up a symbol and its DWARF compile unit.
struct SymbolCU {
SymbolCU(DwarfCompileUnit *CU, const MCSymbol *Sym) : Sym(Sym), CU(CU) {}
const MCSymbol *Sym;
DwarfCompileUnit *CU;
};
-/// \brief Collects and handles dwarf debug information.
+/// Collects and handles dwarf debug information.
class DwarfDebug : public AsmPrinterHandler {
- // Target of Dwarf emission.
+ /// Target of Dwarf emission.
AsmPrinter *Asm;
- // Collected machine module information.
+ /// Collected machine module information.
MachineModuleInfo *MMI;
- // All DIEValues are allocated through this allocator.
+ /// All DIEValues are allocated through this allocator.
BumpPtrAllocator DIEValueAllocator;
- // Maps MDNode with its corresponding DwarfCompileUnit.
+ /// Maps MDNode with its corresponding DwarfCompileUnit.
MapVector<const MDNode *, DwarfCompileUnit *> CUMap;
- // Maps subprogram MDNode with its corresponding DwarfCompileUnit.
+ /// Maps subprogram MDNode with its corresponding DwarfCompileUnit.
MapVector<const MDNode *, DwarfCompileUnit *> SPMap;
- // Maps a CU DIE with its corresponding DwarfCompileUnit.
+ /// Maps a CU DIE with its corresponding DwarfCompileUnit.
DenseMap<const DIE *, DwarfCompileUnit *> CUDieMap;
- // List of all labels used in aranges generation.
+ /// List of all labels used in aranges generation.
std::vector<SymbolCU> ArangeLabels;
- // Size of each symbol emitted (for those symbols that have a specific size).
+ /// Size of each symbol emitted (for those symbols that have a specific size).
DenseMap<const MCSymbol *, uint64_t> SymSize;
LexicalScopes LScopes;
- // Collection of abstract variables.
+ /// Collection of abstract variables.
DenseMap<const MDNode *, std::unique_ptr<DbgVariable>> AbstractVariables;
SmallVector<std::unique_ptr<DbgVariable>, 64> ConcreteVariables;
- // Collection of DebugLocEntry. Stored in a linked list so that DIELocLists
- // can refer to them in spite of insertions into this list.
+ /// Collection of DebugLocEntry. Stored in a linked list so that DIELocLists
+ /// can refer to them in spite of insertions into this list.
DebugLocStream DebugLocs;
- // This is a collection of subprogram MDNodes that are processed to
- // create DIEs.
+ /// This is a collection of subprogram MDNodes that are processed to
+ /// create DIEs.
SmallPtrSet<const MDNode *, 16> ProcessedSPNodes;
- // Maps instruction with label emitted before instruction.
+ /// Maps instruction with label emitted before instruction.
DenseMap<const MachineInstr *, MCSymbol *> LabelsBeforeInsn;
- // Maps instruction with label emitted after instruction.
+ /// Maps instruction with label emitted after instruction.
DenseMap<const MachineInstr *, MCSymbol *> LabelsAfterInsn;
- // History of DBG_VALUE and clobber instructions for each user variable.
- // Variables are listed in order of appearance.
+ /// History of DBG_VALUE and clobber instructions for each user
+ /// variable. Variables are listed in order of appearance.
DbgValueHistoryMap DbgValues;
- // Previous instruction's location information. This is used to determine
- // label location to indicate scope boundries in dwarf debug info.
+ /// Previous instruction's location information. This is used to
+ /// determine label location to indicate scope boundries in dwarf
+ /// debug info.
DebugLoc PrevInstLoc;
MCSymbol *PrevLabel;
- // This location indicates end of function prologue and beginning of function
- // body.
+ /// This location indicates end of function prologue and beginning of
+ /// function body.
DebugLoc PrologEndLoc;
- // If nonnull, stores the current machine function we're processing.
+ /// If nonnull, stores the current machine function we're processing.
const MachineFunction *CurFn;
- // If nonnull, stores the current machine instruction we're processing.
+ /// If nonnull, stores the current machine instruction we're processing.
const MachineInstr *CurMI;
- // If nonnull, stores the CU in which the previous subprogram was contained.
+ /// If nonnull, stores the CU in which the previous subprogram was contained.
const DwarfCompileUnit *PrevCU;
- // As an optimization, there is no need to emit an entry in the directory
- // table for the same directory as DW_AT_comp_dir.
+ /// As an optimization, there is no need to emit an entry in the directory
+ /// table for the same directory as DW_AT_comp_dir.
StringRef CompilationDir;
- // Holder for the file specific debug information.
+ /// Holder for the file specific debug information.
DwarfFile InfoHolder;
- // Holders for the various debug information flags that we might need to
- // have exposed. See accessor functions below for description.
+ /// Holders for the various debug information flags that we might need to
+ /// have exposed. See accessor functions below for description.
- // Holder for imported entities.
+ /// Holder for imported entities.
typedef SmallVector<std::pair<const MDNode *, const MDNode *>, 32>
ImportedEntityMap;
ImportedEntityMap ScopesWithImportedEntities;
- // Map from MDNodes for user-defined types to the type units that describe
- // them.
+ /// Map from MDNodes for user-defined types to the type units that
+ /// describe them.
DenseMap<const MDNode *, const DwarfTypeUnit *> DwarfTypeUnits;
SmallVector<
std::pair<std::unique_ptr<DwarfTypeUnit>, const DICompositeType *>, 1>
TypeUnitsUnderConstruction;
- // Whether to emit the pubnames/pubtypes sections.
+ /// Whether to emit the pubnames/pubtypes sections.
bool HasDwarfPubSections;
- // Whether or not to use AT_ranges for compilation units.
+ /// Whether or not to use AT_ranges for compilation units.
bool HasCURanges;
- // Whether we emitted a function into a section other than the default
- // text.
+ /// Whether we emitted a function into a section other than the
+ /// default text.
bool UsedNonDefaultText;
- // Whether to use the GNU TLS opcode (instead of the standard opcode).
+ /// Whether to use the GNU TLS opcode (instead of the standard opcode).
bool UseGNUTLSOpcode;
- // Version of dwarf we're emitting.
+ /// Version of dwarf we're emitting.
unsigned DwarfVersion;
- // Maps from a type identifier to the actual MDNode.
+ /// Maps from a type identifier to the actual MDNode.
DITypeIdentifierMap TypeIdentifierMap;
- // DWARF5 Experimental Options
+ /// DWARF5 Experimental Options
+ /// @{
bool HasDwarfAccelTables;
bool HasSplitDwarf;
- // Separated Dwarf Variables
- // In general these will all be for bits that are left in the
- // original object file, rather than things that are meant
- // to be in the .dwo sections.
+ /// Separated Dwarf Variables
+ /// In general these will all be for bits that are left in the
+ /// original object file, rather than things that are meant
+ /// to be in the .dwo sections.
- // Holder for the skeleton information.
+ /// Holder for the skeleton information.
DwarfFile SkeletonHolder;
- /// Store file names for type units under fission in a line table header that
- /// will be emitted into debug_line.dwo.
- // FIXME: replace this with a map from comp_dir to table so that we can emit
- // multiple tables during LTO each of which uses directory 0, referencing the
- // comp_dir of all the type units that use it.
+ /// Store file names for type units under fission in a line table
+ /// header that will be emitted into debug_line.dwo.
+ // FIXME: replace this with a map from comp_dir to table so that we
+ // can emit multiple tables during LTO each of which uses directory
+ // 0, referencing the comp_dir of all the type units that use it.
MCDwarfDwoLineTable SplitTypeUnitFileTable;
-
- // True iff there are multiple CUs in this module.
+ /// @}
+
+ /// True iff there are multiple CUs in this module.
bool SingleCU;
bool IsDarwin;
bool IsPS4;
@@ -354,7 +357,7 @@ class DwarfDebug : public AsmPrinterHandler {
typedef DbgValueHistoryMap::InlinedVariable InlinedVariable;
- /// \brief Find abstract variable associated with Var.
+ /// Find abstract variable associated with Var.
DbgVariable *getExistingAbstractVariable(InlinedVariable IV,
const DILocalVariable *&Cleansed);
DbgVariable *getExistingAbstractVariable(InlinedVariable IV);
@@ -366,56 +369,56 @@ class DwarfDebug : public AsmPrinterHandler {
DbgVariable *createConcreteVariable(LexicalScope &Scope, InlinedVariable IV);
- /// \brief Construct a DIE for this abstract scope.
+ /// Construct a DIE for this abstract scope.
void constructAbstractSubprogramScopeDIE(LexicalScope *Scope);
- /// \brief Compute the size and offset of a DIE given an incoming Offset.
+ /// Compute the size and offset of a DIE given an incoming Offset.
unsigned computeSizeAndOffset(DIE *Die, unsigned Offset);
- /// \brief Compute the size and offset of all the DIEs.
+ /// Compute the size and offset of all the DIEs.
void computeSizeAndOffsets();
- /// \brief Collect info for variables that were optimized out.
+ /// Collect info for variables that were optimized out.
void collectDeadVariables();
void finishVariableDefinitions();
void finishSubprogramDefinitions();
- /// \brief Finish off debug information after all functions have been
+ /// Finish off debug information after all functions have been
/// processed.
void finalizeModuleInfo();
- /// \brief Emit the debug info section.
+ /// Emit the debug info section.
void emitDebugInfo();
- /// \brief Emit the abbreviation section.
+ /// Emit the abbreviation section.
void emitAbbreviations();
- /// \brief Emit a specified accelerator table.
+ /// Emit a specified accelerator table.
void emitAccel(DwarfAccelTable &Accel, MCSection *Section,
StringRef TableName);
- /// \brief Emit visible names into a hashed accelerator table section.
+ /// Emit visible names into a hashed accelerator table section.
void emitAccelNames();
- /// \brief Emit objective C classes and categories into a hashed
+ /// Emit objective C classes and categories into a hashed
/// accelerator table section.
void emitAccelObjC();
- /// \brief Emit namespace dies into a hashed accelerator table.
+ /// Emit namespace dies into a hashed accelerator table.
void emitAccelNamespaces();
- /// \brief Emit type dies into a hashed accelerator table.
+ /// Emit type dies into a hashed accelerator table.
void emitAccelTypes();
- /// \brief Emit visible names into a debug pubnames section.
+ /// Emit visible names into a debug pubnames section.
/// \param GnuStyle determines whether or not we want to emit
/// additional information into the table ala newer gcc for gdb
/// index.
void emitDebugPubNames(bool GnuStyle = false);
- /// \brief Emit visible types into a debug pubtypes section.
+ /// Emit visible types into a debug pubtypes section.
/// \param GnuStyle determines whether or not we want to emit
/// additional information into the table ala newer gcc for gdb
/// index.
@@ -425,91 +428,91 @@ class DwarfDebug : public AsmPrinterHandler {
bool GnuStyle, MCSection *PSec, StringRef Name,
const StringMap<const DIE *> &(DwarfCompileUnit::*Accessor)() const);
- /// \brief Emit visible names into a debug str section.
+ /// Emit visible names into a debug str section.
void emitDebugStr();
- /// \brief Emit visible names into a debug loc section.
+ /// Emit visible names into a debug loc section.
void emitDebugLoc();
- /// \brief Emit visible names into a debug loc dwo section.
+ /// Emit visible names into a debug loc dwo section.
void emitDebugLocDWO();
- /// \brief Emit visible names into a debug aranges section.
+ /// Emit visible names into a debug aranges section.
void emitDebugARanges();
- /// \brief Emit visible names into a debug ranges section.
+ /// Emit visible names into a debug ranges section.
void emitDebugRanges();
- /// \brief Emit inline info using custom format.
+ /// Emit inline info using custom format.
void emitDebugInlineInfo();
/// DWARF 5 Experimental Split Dwarf Emitters
- /// \brief Initialize common features of skeleton units.
+ /// Initialize common features of skeleton units.
void initSkeletonUnit(const DwarfUnit &U, DIE &Die,
std::unique_ptr<DwarfUnit> NewU);
- /// \brief Construct the split debug info compile unit for the debug info
+ /// Construct the split debug info compile unit for the debug info
/// section.
DwarfCompileUnit &constructSkeletonCU(const DwarfCompileUnit &CU);
- /// \brief Construct the split debug info compile unit for the debug info
+ /// Construct the split debug info compile unit for the debug info
/// section.
DwarfTypeUnit &constructSkeletonTU(DwarfTypeUnit &TU);
- /// \brief Emit the debug info dwo section.
+ /// Emit the debug info dwo section.
void emitDebugInfoDWO();
- /// \brief Emit the debug abbrev dwo section.
+ /// Emit the debug abbrev dwo section.
void emitDebugAbbrevDWO();
- /// \brief Emit the debug line dwo section.
+ /// Emit the debug line dwo section.
void emitDebugLineDWO();
- /// \brief Emit the debug str dwo section.
+ /// Emit the debug str dwo section.
void emitDebugStrDWO();
/// Flags to let the linker know we have emitted new style pubnames. Only
/// emit it here if we don't have a skeleton CU for split dwarf.
void addGnuPubAttributes(DwarfUnit &U, DIE &D) const;
- /// \brief Create new DwarfCompileUnit for the given metadata node with tag
+ /// Create new DwarfCompileUnit for the given metadata node with tag
/// DW_TAG_compile_unit.
DwarfCompileUnit &constructDwarfCompileUnit(const DICompileUnit *DIUnit);
- /// \brief Construct imported_module or imported_declaration DIE.
+ /// Construct imported_module or imported_declaration DIE.
void constructAndAddImportedEntityDIE(DwarfCompileUnit &TheCU,
const DIImportedEntity *N);
- /// \brief Register a source line with debug info. Returns the unique
+ /// Register a source line with debug info. Returns the unique
/// label that was emitted and which provides correspondence to the
/// source line list.
void recordSourceLine(unsigned Line, unsigned Col, const MDNode *Scope,
unsigned Flags);
- /// \brief Indentify instructions that are marking the beginning of or
+ /// Indentify instructions that are marking the beginning of or
/// ending of a scope.
void identifyScopeMarkers();
- /// \brief Populate LexicalScope entries with variables' info.
+ /// Populate LexicalScope entries with variables' info.
void collectVariableInfo(DwarfCompileUnit &TheCU, const DISubprogram *SP,
DenseSet<InlinedVariable> &ProcessedVars);
- /// \brief Build the location list for all DBG_VALUEs in the
+ /// Build the location list for all DBG_VALUEs in the
/// function that describe the same variable.
void buildLocationList(SmallVectorImpl<DebugLocEntry> &DebugLoc,
const DbgValueHistoryMap::InstrRanges &Ranges);
- /// \brief Collect variable information from the side table maintained
+ /// Collect variable information from the side table maintained
/// by MMI.
void collectVariableInfoFromMMITable(DenseSet<InlinedVariable> &P);
- /// \brief Ensure that a label will be emitted before MI.
+ /// Ensure that a label will be emitted before MI.
void requestLabelBeforeInsn(const MachineInstr *MI) {
LabelsBeforeInsn.insert(std::make_pair(MI, nullptr));
}
- /// \brief Ensure that a label will be emitted after MI.
+ /// Ensure that a label will be emitted after MI.
void requestLabelAfterInsn(const MachineInstr *MI) {
LabelsAfterInsn.insert(std::make_pair(MI, nullptr));
}
@@ -522,50 +525,50 @@ public:
~DwarfDebug() override;
- /// \brief Emit all Dwarf sections that should come prior to the
+ /// Emit all Dwarf sections that should come prior to the
/// content.
void beginModule();
- /// \brief Emit all Dwarf sections that should come after the content.
+ /// Emit all Dwarf sections that should come after the content.
void endModule() override;
- /// \brief Gather pre-function debug information.
+ /// Gather pre-function debug information.
void beginFunction(const MachineFunction *MF) override;
- /// \brief Gather and emit post-function debug information.
+ /// Gather and emit post-function debug information.
void endFunction(const MachineFunction *MF) override;
- /// \brief Process beginning of an instruction.
+ /// Process beginning of an instruction.
void beginInstruction(const MachineInstr *MI) override;
- /// \brief Process end of an instruction.
+ /// Process end of an instruction.
void endInstruction() override;
- /// \brief Add a DIE to the set of types that we're going to pull into
+ /// Add a DIE to the set of types that we're going to pull into
/// type units.
void addDwarfTypeUnitType(DwarfCompileUnit &CU, StringRef Identifier,
DIE &Die, const DICompositeType *CTy);
- /// \brief Add a label so that arange data can be generated for it.
+ /// Add a label so that arange data can be generated for it.
void addArangeLabel(SymbolCU SCU) { ArangeLabels.push_back(SCU); }
- /// \brief For symbols that have a size designated (e.g. common symbols),
+ /// For symbols that have a size designated (e.g. common symbols),
/// this tracks that size.
void setSymbolSize(const MCSymbol *Sym, uint64_t Size) override {
SymSize[Sym] = Size;
}
- /// \brief Returns whether to use DW_OP_GNU_push_tls_address, instead of the
+ /// Returns whether to use DW_OP_GNU_push_tls_address, instead of the
/// standard DW_OP_form_tls_address opcode
bool useGNUTLSOpcode() const { return UseGNUTLSOpcode; }
// Experimental DWARF5 features.
- /// \brief Returns whether or not to emit tables that dwarf consumers can
+ /// Returns whether or not to emit tables that dwarf consumers can
/// use to accelerate lookup.
bool useDwarfAccelTables() const { return HasDwarfAccelTables; }
- /// \brief Returns whether or not to change the current debug info for the
+ /// Returns whether or not to change the current debug info for the
/// split dwarf proposal support.
bool useSplitDwarf() const { return HasSplitDwarf; }
@@ -579,7 +582,7 @@ public:
/// Returns the entries for the .debug_loc section.
const DebugLocStream &getDebugLocs() const { return DebugLocs; }
- /// \brief Emit an entry for the debug loc section. This can be used to
+ /// Emit an entry for the debug loc section. This can be used to
/// handle an entry that's going to be emitted into the debug loc section.
void emitDebugLocEntry(ByteStreamer &Streamer,
const DebugLocStream::Entry &Entry);
@@ -592,7 +595,7 @@ public:
return Ref.resolve(TypeIdentifierMap);
}
- /// \brief Return the TypeIdentifierMap.
+ /// Return the TypeIdentifierMap.
const DITypeIdentifierMap &getTypeIdentifierMap() const {
return TypeIdentifierMap;
}
@@ -627,14 +630,14 @@ public:
less_first()));
}
- /// \brief A helper function to check whether the DIE for a given Scope is
+ /// A helper function to check whether the DIE for a given Scope is
/// going to be null.
bool isLexicalScopeDIENull(LexicalScope *Scope);
- /// \brief Return Label preceding the instruction.
+ /// Return Label preceding the instruction.
MCSymbol *getLabelBeforeInsn(const MachineInstr *MI);
- /// \brief Return Label immediately following the instruction.
+ /// Return Label immediately following the instruction.
MCSymbol *getLabelAfterInsn(const MachineInstr *MI);
// FIXME: Sink these functions down into DwarfFile/Dwarf*Unit.
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.h b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.h
index 4000ae4..44d9d22 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.h
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.h
@@ -113,7 +113,7 @@ protected:
DwarfUnit(unsigned UID, dwarf::Tag, const DICompileUnit *CU, AsmPrinter *A,
DwarfDebug *DW, DwarfFile *DWU);
- /// \brief Add a string attribute data and value.
+ /// Add a string attribute data and value.
///
/// This is guaranteed to be in the local string pool instead of indirected.
void addLocalString(DIE &Die, dwarf::Attribute Attribute, StringRef Str);
@@ -142,10 +142,10 @@ public:
unsigned getDebugInfoOffset() const { return DebugInfoOffset; }
void setDebugInfoOffset(unsigned DbgInfoOff) { DebugInfoOffset = DbgInfoOff; }
- /// \brief Return true if this compile unit has something to write out.
+ /// Return true if this compile unit has something to write out.
bool hasContent() const { return UnitDie.hasChildren(); }
- /// \brief Get string containing language specific context for a global name.
+ /// Get string containing language specific context for a global name.
///
/// Walks the metadata parent chain in a language specific manner (using the
/// compile unit language) and returns it as a string. This is done at the
@@ -162,42 +162,42 @@ public:
virtual void addGlobalType(const DIType *Ty, const DIE &Die,
const DIScope *Context) {}
- /// \brief Add a new name to the namespace accelerator table.
+ /// Add a new name to the namespace accelerator table.
void addAccelNamespace(StringRef Name, const DIE &Die);
- /// \brief Returns the DIE map slot for the specified debug variable.
+ /// Returns the DIE map slot for the specified debug variable.
///
/// We delegate the request to DwarfDebug when the MDNode can be part of the
/// type system, since DIEs for the type system can be shared across CUs and
/// the mappings are kept in DwarfDebug.
DIE *getDIE(const DINode *D) const;
- /// \brief Returns a fresh newly allocated DIELoc.
+ /// Returns a fresh newly allocated DIELoc.
DIELoc *getDIELoc() { return new (DIEValueAllocator) DIELoc; }
- /// \brief Insert DIE into the map.
+ /// Insert DIE into the map.
///
/// We delegate the request to DwarfDebug when the MDNode can be part of the
/// type system, since DIEs for the type system can be shared across CUs and
/// the mappings are kept in DwarfDebug.
void insertDIE(const DINode *Desc, DIE *D);
- /// \brief Add a flag that is true to the DIE.
+ /// Add a flag that is true to the DIE.
void addFlag(DIE &Die, dwarf::Attribute Attribute);
- /// \brief Add an unsigned integer attribute data and value.
+ /// Add an unsigned integer attribute data and value.
void addUInt(DIE &Die, dwarf::Attribute Attribute, Optional<dwarf::Form> Form,
uint64_t Integer);
void addUInt(DIE &Block, dwarf::Form Form, uint64_t Integer);
- /// \brief Add an signed integer attribute data and value.
+ /// Add an signed integer attribute data and value.
void addSInt(DIE &Die, dwarf::Attribute Attribute, Optional<dwarf::Form> Form,
int64_t Integer);
void addSInt(DIELoc &Die, Optional<dwarf::Form> Form, int64_t Integer);
- /// \brief Add a string attribute data and value.
+ /// Add a string attribute data and value.
///
/// We always emit a reference to the string pool instead of immediate
/// strings so that DIEs have more predictable sizes. In the case of split
@@ -205,38 +205,38 @@ public:
/// into the string table.
void addString(DIE &Die, dwarf::Attribute Attribute, StringRef Str);
- /// \brief Add a Dwarf label attribute data and value.
+ /// Add a Dwarf label attribute data and value.
DIE::value_iterator addLabel(DIE &Die, dwarf::Attribute Attribute,
dwarf::Form Form, const MCSymbol *Label);
void addLabel(DIELoc &Die, dwarf::Form Form, const MCSymbol *Label);
- /// \brief Add an offset into a section attribute data and value.
+ /// Add an offset into a section attribute data and value.
void addSectionOffset(DIE &Die, dwarf::Attribute Attribute, uint64_t Integer);
- /// \brief Add a dwarf op address data and value using the form given and an
+ /// Add a dwarf op address data and value using the form given and an
/// op of either DW_FORM_addr or DW_FORM_GNU_addr_index.
void addOpAddress(DIELoc &Die, const MCSymbol *Label);
- /// \brief Add a label delta attribute data and value.
+ /// Add a label delta attribute data and value.
void addLabelDelta(DIE &Die, dwarf::Attribute Attribute, const MCSymbol *Hi,
const MCSymbol *Lo);
- /// \brief Add a DIE attribute data and value.
+ /// Add a DIE attribute data and value.
void addDIEEntry(DIE &Die, dwarf::Attribute Attribute, DIE &Entry);
- /// \brief Add a DIE attribute data and value.
+ /// Add a DIE attribute data and value.
void addDIEEntry(DIE &Die, dwarf::Attribute Attribute, DIEEntry Entry);
void addDIETypeSignature(DIE &Die, const DwarfTypeUnit &Type);
- /// \brief Add block data.
+ /// Add block data.
void addBlock(DIE &Die, dwarf::Attribute Attribute, DIELoc *Block);
- /// \brief Add block data.
+ /// Add block data.
void addBlock(DIE &Die, dwarf::Attribute Attribute, DIEBlock *Block);
- /// \brief Add location information to specified debug information entry.
+ /// Add location information to specified debug information entry.
void addSourceLine(DIE &Die, unsigned Line, StringRef File,
StringRef Directory);
void addSourceLine(DIE &Die, const DILocalVariable *V);
@@ -246,30 +246,30 @@ public:
void addSourceLine(DIE &Die, const DINamespace *NS);
void addSourceLine(DIE &Die, const DIObjCProperty *Ty);
- /// \brief Add constant value entry in variable DIE.
+ /// Add constant value entry in variable DIE.
void addConstantValue(DIE &Die, const MachineOperand &MO, const DIType *Ty);
void addConstantValue(DIE &Die, const ConstantInt *CI, const DIType *Ty);
void addConstantValue(DIE &Die, const APInt &Val, const DIType *Ty);
void addConstantValue(DIE &Die, const APInt &Val, bool Unsigned);
void addConstantValue(DIE &Die, bool Unsigned, uint64_t Val);
- /// \brief Add constant value entry in variable DIE.
+ /// Add constant value entry in variable DIE.
void addConstantFPValue(DIE &Die, const MachineOperand &MO);
void addConstantFPValue(DIE &Die, const ConstantFP *CFP);
- /// \brief Add a linkage name, if it isn't empty.
+ /// Add a linkage name, if it isn't empty.
void addLinkageName(DIE &Die, StringRef LinkageName);
- /// \brief Add template parameters in buffer.
+ /// Add template parameters in buffer.
void addTemplateParams(DIE &Buffer, DINodeArray TParams);
- /// \brief Add register operand.
+ /// Add register operand.
/// \returns false if the register does not exist, e.g., because it was never
/// materialized.
bool addRegisterOpPiece(DIELoc &TheDie, unsigned Reg,
unsigned SizeInBits = 0, unsigned OffsetInBits = 0);
- /// \brief Add register offset.
+ /// Add register offset.
/// \returns false if the register does not exist, e.g., because it was never
/// materialized.
bool addRegisterOffset(DIELoc &TheDie, unsigned Reg, int64_t Offset);
@@ -283,7 +283,7 @@ public:
dwarf::Attribute Attribute,
const MachineLocation &Location);
- /// \brief Add a new type attribute to the specified entity.
+ /// Add a new type attribute to the specified entity.
///
/// This takes and attribute parameter because DW_AT_friend attributes are
/// also type references.
@@ -297,19 +297,19 @@ public:
void applySubprogramAttributes(const DISubprogram *SP, DIE &SPDie,
bool Minimal = false);
- /// \brief Find existing DIE or create new DIE for the given type.
+ /// Find existing DIE or create new DIE for the given type.
DIE *getOrCreateTypeDIE(const MDNode *N);
- /// \brief Get context owner's DIE.
+ /// Get context owner's DIE.
DIE *createTypeDIE(const DICompositeType *Ty);
- /// \brief Get context owner's DIE.
+ /// Get context owner's DIE.
DIE *getOrCreateContextDIE(const DIScope *Context);
- /// \brief Construct DIEs for types that contain vtables.
+ /// Construct DIEs for types that contain vtables.
void constructContainingTypeDIEs();
- /// \brief Construct function argument DIEs.
+ /// Construct function argument DIEs.
void constructSubprogramArguments(DIE &Buffer, DITypeRefArray Args);
/// Create a DIE with the given Tag, add the DIE to its parent, and
@@ -332,14 +332,14 @@ public:
void constructTypeDIE(DIE &Buffer, const DICompositeType *CTy);
protected:
- /// \brief Create new static data member DIE.
+ /// Create new static data member DIE.
DIE *getOrCreateStaticMemberDIE(const DIDerivedType *DT);
/// Look up the source ID with the given directory and source file names. If
/// none currently exists, create a new ID and insert it in the line table.
virtual unsigned getOrCreateSourceID(StringRef File, StringRef Directory) = 0;
- /// \brief Look in the DwarfDebug map for the MDNode that corresponds to the
+ /// Look in the DwarfDebug map for the MDNode that corresponds to the
/// reference.
template <typename T> T *resolve(TypedDINodeRef<T> Ref) const {
return DD->resolve(Ref);
@@ -358,15 +358,15 @@ private:
void constructTemplateValueParameterDIE(DIE &Buffer,
const DITemplateValueParameter *TVP);
- /// \brief Return the default lower bound for an array.
+ /// Return the default lower bound for an array.
///
/// If the DWARF version doesn't handle the language, return -1.
int64_t getDefaultLowerBound() const;
- /// \brief Get an anonymous type for index type.
+ /// Get an anonymous type for index type.
DIE *getIndexTyDie();
- /// \brief Set D as anonymous type for index which can be reused later.
+ /// Set D as anonymous type for index which can be reused later.
void setIndexTyDie(DIE *D) { IndexTyDie = D; }
/// If this is a named finished type then include it in the list of types for
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/EHStreamer.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/EHStreamer.cpp
index 1be3fd7..49ef8d3 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/EHStreamer.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/EHStreamer.cpp
@@ -309,7 +309,7 @@ computeCallSiteTable(SmallVectorImpl<CallSiteEntry> &CallSites,
// If some instruction between the previous try-range and the end of the
// function may throw, create a call-site entry with no landing pad for the
// region following the try-range.
- if (SawPotentiallyThrowing && !IsSJLJ) {
+ if (SawPotentiallyThrowing && !IsSJLJ && LastLabel != nullptr) {
CallSiteEntry Site = { LastLabel, nullptr, nullptr, 0 };
CallSites.push_back(Site);
}
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/WinCodeViewLineTables.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/WinCodeViewLineTables.cpp
index 535b1f6..6610ac7 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/WinCodeViewLineTables.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/WinCodeViewLineTables.cpp
@@ -97,7 +97,7 @@ void WinCodeViewLineTables::maybeRecordLocation(DebugLoc DL,
MCSymbol *MCL = Asm->MMI->getContext().createTempSymbol();
Asm->OutStreamer->EmitLabel(MCL);
CurFn->Instrs.push_back(MCL);
- InstrInfo[MCL] = InstrInfoTy(Filename, DL.getLine());
+ InstrInfo[MCL] = InstrInfoTy(Filename, DL.getLine(), DL.getCol());
}
WinCodeViewLineTables::WinCodeViewLineTables(AsmPrinter *AP)
@@ -264,22 +264,38 @@ void WinCodeViewLineTables::emitDebugInfoForFunction(const Function *GV) {
// Identify the function this subsection is for.
Asm->OutStreamer->EmitCOFFSecRel32(Fn);
Asm->OutStreamer->EmitCOFFSectionIndex(Fn);
- // Insert padding after a 16-bit section index.
- Asm->EmitInt16(0);
+ // Insert flags after a 16-bit section index.
+ Asm->EmitInt16(COFF::DEBUG_LINE_TABLES_HAVE_COLUMN_RECORDS);
// Length of the function's code, in bytes.
EmitLabelDiff(*Asm->OutStreamer, Fn, FI.End);
// PC-to-linenumber lookup table:
MCSymbol *FileSegmentEnd = nullptr;
+
+ // The start of the last segment:
+ size_t LastSegmentStart = 0;
+
+ auto FinishPreviousChunk = [&] {
+ if (!FileSegmentEnd)
+ return;
+ for (size_t ColSegI = LastSegmentStart,
+ ColSegEnd = ColSegI + FilenameSegmentLengths[LastSegmentStart];
+ ColSegI != ColSegEnd; ++ColSegI) {
+ unsigned ColumnNumber = InstrInfo[FI.Instrs[ColSegI]].ColumnNumber;
+ Asm->EmitInt16(ColumnNumber); // Start column
+ Asm->EmitInt16(ColumnNumber); // End column
+ }
+ Asm->OutStreamer->EmitLabel(FileSegmentEnd);
+ };
+
for (size_t J = 0, F = FI.Instrs.size(); J != F; ++J) {
MCSymbol *Instr = FI.Instrs[J];
assert(InstrInfo.count(Instr));
if (FilenameSegmentLengths.count(J)) {
// We came to a beginning of a new filename segment.
- if (FileSegmentEnd)
- Asm->OutStreamer->EmitLabel(FileSegmentEnd);
+ FinishPreviousChunk();
StringRef CurFilename = InstrInfo[FI.Instrs[J]].Filename;
assert(FileNameRegistry.Infos.count(CurFilename));
size_t IndexInStringTable =
@@ -300,6 +316,7 @@ void WinCodeViewLineTables::emitDebugInfoForFunction(const Function *GV) {
// records.
FileSegmentEnd = Asm->MMI->getContext().createTempSymbol();
EmitLabelDiff(*Asm->OutStreamer, FileSegmentBegin, FileSegmentEnd);
+ LastSegmentStart = J;
}
// The first PC with the given linenumber and the linenumber itself.
@@ -307,8 +324,7 @@ void WinCodeViewLineTables::emitDebugInfoForFunction(const Function *GV) {
Asm->EmitInt32(InstrInfo[Instr].LineNumber);
}
- if (FileSegmentEnd)
- Asm->OutStreamer->EmitLabel(FileSegmentEnd);
+ FinishPreviousChunk();
Asm->OutStreamer->EmitLabel(LineTableEnd);
}
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/WinCodeViewLineTables.h b/contrib/llvm/lib/CodeGen/AsmPrinter/WinCodeViewLineTables.h
index a5b399f..43d1a43 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/WinCodeViewLineTables.h
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/WinCodeViewLineTables.h
@@ -52,11 +52,13 @@ class LLVM_LIBRARY_VISIBILITY WinCodeViewLineTables : public AsmPrinterHandler {
struct InstrInfoTy {
StringRef Filename;
unsigned LineNumber;
+ unsigned ColumnNumber;
- InstrInfoTy() : LineNumber(0) {}
+ InstrInfoTy() : LineNumber(0), ColumnNumber(0) {}
- InstrInfoTy(StringRef Filename, unsigned LineNumber)
- : Filename(Filename), LineNumber(LineNumber) {}
+ InstrInfoTy(StringRef Filename, unsigned LineNumber, unsigned ColumnNumber)
+ : Filename(Filename), LineNumber(LineNumber),
+ ColumnNumber(ColumnNumber) {}
};
DenseMap<MCSymbol *, InstrInfoTy> InstrInfo;
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/WinException.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/WinException.cpp
index 79830bc..71c7781 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/WinException.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/WinException.cpp
@@ -70,19 +70,27 @@ void WinException::beginFunction(const MachineFunction *MF) {
const TargetLoweringObjectFile &TLOF = Asm->getObjFileLowering();
unsigned PerEncoding = TLOF.getPersonalityEncoding();
- const Function *Per = MMI->getPersonality();
+ const Function *Per = nullptr;
+ if (F->hasPersonalityFn())
+ Per = dyn_cast<Function>(F->getPersonalityFn()->stripPointerCasts());
- shouldEmitPersonality = hasLandingPads &&
- PerEncoding != dwarf::DW_EH_PE_omit && Per;
+ bool forceEmitPersonality =
+ F->hasPersonalityFn() && !isNoOpWithoutInvoke(classifyEHPersonality(Per)) &&
+ F->needsUnwindTableEntry();
+
+ shouldEmitPersonality = forceEmitPersonality || (hasLandingPads &&
+ PerEncoding != dwarf::DW_EH_PE_omit && Per);
unsigned LSDAEncoding = TLOF.getLSDAEncoding();
shouldEmitLSDA = shouldEmitPersonality &&
LSDAEncoding != dwarf::DW_EH_PE_omit;
- // If we're not using CFI, we don't want the CFI or the personality. Emit the
- // LSDA if this is the parent function.
+ // If we're not using CFI, we don't want the CFI or the personality. If
+ // WinEHPrepare outlined something, we should emit the LSDA.
if (!Asm->MAI->usesWindowsCFI()) {
- shouldEmitLSDA = (hasLandingPads && F == ParentF);
+ bool HasOutlinedChildren =
+ F->hasFnAttribute("wineh-parent") && F == ParentF;
+ shouldEmitLSDA = HasOutlinedChildren;
shouldEmitPersonality = false;
return;
}
@@ -121,7 +129,10 @@ void WinException::endFunction(const MachineFunction *MF) {
if (!shouldEmitPersonality && !shouldEmitMoves && !shouldEmitLSDA)
return;
- EHPersonality Per = MMI->getPersonalityType();
+ const Function *F = MF->getFunction();
+ EHPersonality Per = EHPersonality::Unknown;
+ if (F->hasPersonalityFn())
+ Per = classifyEHPersonality(F->getPersonalityFn());
// Get rid of any dead landing pads if we're not using a Windows EH scheme. In
// Windows EH schemes, the landing pad is not actually reachable. It only
@@ -350,6 +361,7 @@ void WinException::emitCXXFrameHandler3Table(const MachineFunction *MF) {
// EHFlags & 1 -> Synchronous exceptions only, no async exceptions.
// EHFlags & 2 -> ???
// EHFlags & 4 -> The function is noexcept(true), unwinding can't continue.
+ OS.EmitValueToAlignment(4);
OS.EmitLabel(FuncInfoXData);
OS.EmitIntValue(0x19930522, 4); // MagicNumber
OS.EmitIntValue(FuncInfo.UnwindMap.size(), 4); // MaxState
@@ -555,7 +567,7 @@ void WinException::emitEHRegistrationOffsetLabel(const WinEHFuncInfo &FuncInfo,
// we've code generated the parent, we can emit the label assignment that
// those helpers use to get the offset of the registration node.
assert(FuncInfo.EHRegNodeEscapeIndex != INT_MAX &&
- "no EH reg node frameescape index");
+ "no EH reg node localescape index");
MCSymbol *ParentFrameOffset =
Asm->OutContext.getOrCreateParentFrameOffsetSymbol(FLinkageName);
MCSymbol *RegistrationOffsetSym = Asm->OutContext.getOrCreateFrameAllocSymbol(
@@ -578,9 +590,11 @@ void WinException::emitExceptHandlerTable(const MachineFunction *MF) {
// Emit the __ehtable label that we use for llvm.x86.seh.lsda.
MCSymbol *LSDALabel = Asm->OutContext.getOrCreateLSDASymbol(FLinkageName);
+ OS.EmitValueToAlignment(4);
OS.EmitLabel(LSDALabel);
- const Function *Per = MMI->getPersonality();
+ const Function *Per =
+ dyn_cast<Function>(F->getPersonalityFn()->stripPointerCasts());
StringRef PerName = Per->getName();
int BaseState = -1;
if (PerName == "_except_handler4") {
diff --git a/contrib/llvm/lib/CodeGen/BasicTargetTransformInfo.cpp b/contrib/llvm/lib/CodeGen/BasicTargetTransformInfo.cpp
index 82f5c48..db00910 100644
--- a/contrib/llvm/lib/CodeGen/BasicTargetTransformInfo.cpp
+++ b/contrib/llvm/lib/CodeGen/BasicTargetTransformInfo.cpp
@@ -34,4 +34,5 @@ cl::opt<unsigned>
cl::Hidden);
BasicTTIImpl::BasicTTIImpl(const TargetMachine *TM, Function &F)
- : BaseT(TM), ST(TM->getSubtargetImpl(F)), TLI(ST->getTargetLowering()) {}
+ : BaseT(TM, F.getParent()->getDataLayout()), ST(TM->getSubtargetImpl(F)),
+ TLI(ST->getTargetLowering()) {}
diff --git a/contrib/llvm/lib/CodeGen/CodeGenPrepare.cpp b/contrib/llvm/lib/CodeGen/CodeGenPrepare.cpp
index 70de4e7..6ab6acc 100644
--- a/contrib/llvm/lib/CodeGen/CodeGenPrepare.cpp
+++ b/contrib/llvm/lib/CodeGen/CodeGenPrepare.cpp
@@ -147,10 +147,13 @@ class TypePromotionTransaction;
/// OptSize - True if optimizing for size.
bool OptSize;
+ /// DataLayout for the Function being processed.
+ const DataLayout *DL;
+
public:
static char ID; // Pass identification, replacement for typeid
explicit CodeGenPrepare(const TargetMachine *TM = nullptr)
- : FunctionPass(ID), TM(TM), TLI(nullptr), TTI(nullptr) {
+ : FunctionPass(ID), TM(TM), TLI(nullptr), TTI(nullptr), DL(nullptr) {
initializeCodeGenPreparePass(*PassRegistry::getPassRegistry());
}
bool runOnFunction(Function &F) override;
@@ -203,6 +206,8 @@ bool CodeGenPrepare::runOnFunction(Function &F) {
if (skipOptnoneFunction(F))
return false;
+ DL = &F.getParent()->getDataLayout();
+
bool EverMadeChange = false;
// Clear per function information.
InsertedInsts.clear();
@@ -753,10 +758,11 @@ static bool SinkCast(CastInst *CI) {
///
/// Return true if any changes are made.
///
-static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI){
+static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI,
+ const DataLayout &DL) {
// If this is a noop copy,
- EVT SrcVT = TLI.getValueType(CI->getOperand(0)->getType());
- EVT DstVT = TLI.getValueType(CI->getType());
+ EVT SrcVT = TLI.getValueType(DL, CI->getOperand(0)->getType());
+ EVT DstVT = TLI.getValueType(DL, CI->getType());
// This is an fp<->int conversion?
if (SrcVT.isInteger() != DstVT.isInteger())
@@ -921,7 +927,7 @@ static bool isExtractBitsCandidateUse(Instruction *User) {
static bool
SinkShiftAndTruncate(BinaryOperator *ShiftI, Instruction *User, ConstantInt *CI,
DenseMap<BasicBlock *, BinaryOperator *> &InsertedShifts,
- const TargetLowering &TLI) {
+ const TargetLowering &TLI, const DataLayout &DL) {
BasicBlock *UserBB = User->getParent();
DenseMap<BasicBlock *, CastInst *> InsertedTruncs;
TruncInst *TruncI = dyn_cast<TruncInst>(User);
@@ -947,7 +953,7 @@ SinkShiftAndTruncate(BinaryOperator *ShiftI, Instruction *User, ConstantInt *CI,
// approximation; some nodes' legality is determined by the
// operand or other means. There's no good way to find out though.
if (TLI.isOperationLegalOrCustom(
- ISDOpcode, TLI.getValueType(TruncUser->getType(), true)))
+ ISDOpcode, TLI.getValueType(DL, TruncUser->getType(), true)))
continue;
// Don't bother for PHI nodes.
@@ -1005,13 +1011,14 @@ SinkShiftAndTruncate(BinaryOperator *ShiftI, Instruction *User, ConstantInt *CI,
/// instruction.
/// Return true if any changes are made.
static bool OptimizeExtractBits(BinaryOperator *ShiftI, ConstantInt *CI,
- const TargetLowering &TLI) {
+ const TargetLowering &TLI,
+ const DataLayout &DL) {
BasicBlock *DefBB = ShiftI->getParent();
/// Only insert instructions in each block once.
DenseMap<BasicBlock *, BinaryOperator *> InsertedShifts;
- bool shiftIsLegal = TLI.isTypeLegal(TLI.getValueType(ShiftI->getType()));
+ bool shiftIsLegal = TLI.isTypeLegal(TLI.getValueType(DL, ShiftI->getType()));
bool MadeChange = false;
for (Value::user_iterator UI = ShiftI->user_begin(), E = ShiftI->user_end();
@@ -1048,9 +1055,10 @@ static bool OptimizeExtractBits(BinaryOperator *ShiftI, ConstantInt *CI,
if (isa<TruncInst>(User) && shiftIsLegal
// If the type of the truncate is legal, no trucate will be
// introduced in other basic blocks.
- && (!TLI.isTypeLegal(TLI.getValueType(User->getType()))))
+ &&
+ (!TLI.isTypeLegal(TLI.getValueType(DL, User->getType()))))
MadeChange =
- SinkShiftAndTruncate(ShiftI, User, CI, InsertedShifts, TLI);
+ SinkShiftAndTruncate(ShiftI, User, CI, InsertedShifts, TLI, DL);
continue;
}
@@ -1307,12 +1315,10 @@ bool CodeGenPrepare::OptimizeCallInst(CallInst *CI, bool& ModifiedDT) {
return true;
}
- const DataLayout *TD = TLI ? TLI->getDataLayout() : nullptr;
-
// Align the pointer arguments to this call if the target thinks it's a good
// idea
unsigned MinSize, PrefAlign;
- if (TLI && TD && TLI->shouldAlignPointerArgs(CI, MinSize, PrefAlign)) {
+ if (TLI && TLI->shouldAlignPointerArgs(CI, MinSize, PrefAlign)) {
for (auto &Arg : CI->arg_operands()) {
// We want to align both objects whose address is used directly and
// objects whose address is used in casts and GEPs, though it only makes
@@ -1320,36 +1326,34 @@ bool CodeGenPrepare::OptimizeCallInst(CallInst *CI, bool& ModifiedDT) {
// if size - offset meets the size threshold.
if (!Arg->getType()->isPointerTy())
continue;
- APInt Offset(TD->getPointerSizeInBits(
- cast<PointerType>(Arg->getType())->getAddressSpace()), 0);
- Value *Val = Arg->stripAndAccumulateInBoundsConstantOffsets(*TD, Offset);
+ APInt Offset(DL->getPointerSizeInBits(
+ cast<PointerType>(Arg->getType())->getAddressSpace()),
+ 0);
+ Value *Val = Arg->stripAndAccumulateInBoundsConstantOffsets(*DL, Offset);
uint64_t Offset2 = Offset.getLimitedValue();
if ((Offset2 & (PrefAlign-1)) != 0)
continue;
AllocaInst *AI;
- if ((AI = dyn_cast<AllocaInst>(Val)) &&
- AI->getAlignment() < PrefAlign &&
- TD->getTypeAllocSize(AI->getAllocatedType()) >= MinSize + Offset2)
+ if ((AI = dyn_cast<AllocaInst>(Val)) && AI->getAlignment() < PrefAlign &&
+ DL->getTypeAllocSize(AI->getAllocatedType()) >= MinSize + Offset2)
AI->setAlignment(PrefAlign);
// Global variables can only be aligned if they are defined in this
// object (i.e. they are uniquely initialized in this object), and
// over-aligning global variables that have an explicit section is
// forbidden.
GlobalVariable *GV;
- if ((GV = dyn_cast<GlobalVariable>(Val)) &&
- GV->hasUniqueInitializer() &&
- !GV->hasSection() &&
- GV->getAlignment() < PrefAlign &&
- TD->getTypeAllocSize(
- GV->getType()->getElementType()) >= MinSize + Offset2)
+ if ((GV = dyn_cast<GlobalVariable>(Val)) && GV->hasUniqueInitializer() &&
+ !GV->hasSection() && GV->getAlignment() < PrefAlign &&
+ DL->getTypeAllocSize(GV->getType()->getElementType()) >=
+ MinSize + Offset2)
GV->setAlignment(PrefAlign);
}
// If this is a memcpy (or similar) then we may be able to improve the
// alignment
if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(CI)) {
- unsigned Align = getKnownAlignment(MI->getDest(), *TD);
+ unsigned Align = getKnownAlignment(MI->getDest(), *DL);
if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI))
- Align = std::min(Align, getKnownAlignment(MTI->getSource(), *TD));
+ Align = std::min(Align, getKnownAlignment(MTI->getSource(), *DL));
if (Align > MI->getAlignment())
MI->setAlignment(ConstantInt::get(MI->getAlignmentType(), Align));
}
@@ -2099,6 +2103,7 @@ class AddressingModeMatcher {
SmallVectorImpl<Instruction*> &AddrModeInsts;
const TargetMachine &TM;
const TargetLowering &TLI;
+ const DataLayout &DL;
/// AccessTy/MemoryInst - This is the type for the access (e.g. double) and
/// the memory instruction that we're computing this address for.
@@ -2131,8 +2136,9 @@ class AddressingModeMatcher {
: AddrModeInsts(AMI), TM(TM),
TLI(*TM.getSubtargetImpl(*MI->getParent()->getParent())
->getTargetLowering()),
- AccessTy(AT), AddrSpace(AS), MemoryInst(MI), AddrMode(AM),
- InsertedInsts(InsertedInsts), PromotedInsts(PromotedInsts), TPT(TPT) {
+ DL(MI->getModule()->getDataLayout()), AccessTy(AT), AddrSpace(AS),
+ MemoryInst(MI), AddrMode(AM), InsertedInsts(InsertedInsts),
+ PromotedInsts(PromotedInsts), TPT(TPT) {
IgnoreProfitability = false;
}
public:
@@ -2199,7 +2205,7 @@ bool AddressingModeMatcher::MatchScaledValue(Value *ScaleReg, int64_t Scale,
TestAddrMode.ScaledReg = ScaleReg;
// If the new address isn't legal, bail out.
- if (!TLI.isLegalAddressingMode(TestAddrMode, AccessTy, AddrSpace))
+ if (!TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace))
return false;
// It was legal, so commit it.
@@ -2216,7 +2222,7 @@ bool AddressingModeMatcher::MatchScaledValue(Value *ScaleReg, int64_t Scale,
// If this addressing mode is legal, commit it and remember that we folded
// this instruction.
- if (TLI.isLegalAddressingMode(TestAddrMode, AccessTy, AddrSpace)) {
+ if (TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace)) {
AddrModeInsts.push_back(cast<Instruction>(ScaleReg));
AddrMode = TestAddrMode;
return true;
@@ -2262,7 +2268,8 @@ static bool MightBeFoldableInst(Instruction *I) {
/// \note \p Val is assumed to be the product of some type promotion.
/// Therefore if \p Val has an undefined state in \p TLI, this is assumed
/// to be legal, as the non-promoted value would have had the same state.
-static bool isPromotedInstructionLegal(const TargetLowering &TLI, Value *Val) {
+static bool isPromotedInstructionLegal(const TargetLowering &TLI,
+ const DataLayout &DL, Value *Val) {
Instruction *PromotedInst = dyn_cast<Instruction>(Val);
if (!PromotedInst)
return false;
@@ -2272,7 +2279,7 @@ static bool isPromotedInstructionLegal(const TargetLowering &TLI, Value *Val) {
return true;
// Otherwise, check if the promoted instruction is legal or not.
return TLI.isOperationLegalOrCustom(
- ISDOpcode, TLI.getValueType(PromotedInst->getType()));
+ ISDOpcode, TLI.getValueType(DL, PromotedInst->getType()));
}
/// \brief Hepler class to perform type promotion.
@@ -2646,7 +2653,7 @@ bool AddressingModeMatcher::IsPromotionProfitable(
// The promotion is neutral but it may help folding the sign extension in
// loads for instance.
// Check that we did not create an illegal instruction.
- return isPromotedInstructionLegal(TLI, PromotedOperand);
+ return isPromotedInstructionLegal(TLI, DL, PromotedOperand);
}
/// MatchOperationAddr - Given an instruction or constant expr, see if we can
@@ -2674,12 +2681,14 @@ bool AddressingModeMatcher::MatchOperationAddr(User *AddrInst, unsigned Opcode,
case Instruction::PtrToInt:
// PtrToInt is always a noop, as we know that the int type is pointer sized.
return MatchAddr(AddrInst->getOperand(0), Depth);
- case Instruction::IntToPtr:
+ case Instruction::IntToPtr: {
+ auto AS = AddrInst->getType()->getPointerAddressSpace();
+ auto PtrTy = MVT::getIntegerVT(DL.getPointerSizeInBits(AS));
// This inttoptr is a no-op if the integer type is pointer sized.
- if (TLI.getValueType(AddrInst->getOperand(0)->getType()) ==
- TLI.getPointerTy(AddrInst->getType()->getPointerAddressSpace()))
+ if (TLI.getValueType(DL, AddrInst->getOperand(0)->getType()) == PtrTy)
return MatchAddr(AddrInst->getOperand(0), Depth);
return false;
+ }
case Instruction::BitCast:
// BitCast is always a noop, and we can handle it as long as it is
// int->int or pointer->pointer (we don't want int<->fp or something).
@@ -2752,16 +2761,15 @@ bool AddressingModeMatcher::MatchOperationAddr(User *AddrInst, unsigned Opcode,
unsigned VariableScale = 0;
int64_t ConstantOffset = 0;
- const DataLayout *TD = TLI.getDataLayout();
gep_type_iterator GTI = gep_type_begin(AddrInst);
for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) {
if (StructType *STy = dyn_cast<StructType>(*GTI)) {
- const StructLayout *SL = TD->getStructLayout(STy);
+ const StructLayout *SL = DL.getStructLayout(STy);
unsigned Idx =
cast<ConstantInt>(AddrInst->getOperand(i))->getZExtValue();
ConstantOffset += SL->getElementOffset(Idx);
} else {
- uint64_t TypeSize = TD->getTypeAllocSize(GTI.getIndexedType());
+ uint64_t TypeSize = DL.getTypeAllocSize(GTI.getIndexedType());
if (ConstantInt *CI = dyn_cast<ConstantInt>(AddrInst->getOperand(i))) {
ConstantOffset += CI->getSExtValue()*TypeSize;
} else if (TypeSize) { // Scales of zero don't do anything.
@@ -2781,7 +2789,7 @@ bool AddressingModeMatcher::MatchOperationAddr(User *AddrInst, unsigned Opcode,
if (VariableOperand == -1) {
AddrMode.BaseOffs += ConstantOffset;
if (ConstantOffset == 0 ||
- TLI.isLegalAddressingMode(AddrMode, AccessTy, AddrSpace)) {
+ TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) {
// Check to see if we can fold the base pointer in too.
if (MatchAddr(AddrInst->getOperand(0), Depth+1))
return true;
@@ -2904,14 +2912,14 @@ bool AddressingModeMatcher::MatchAddr(Value *Addr, unsigned Depth) {
if (ConstantInt *CI = dyn_cast<ConstantInt>(Addr)) {
// Fold in immediates if legal for the target.
AddrMode.BaseOffs += CI->getSExtValue();
- if (TLI.isLegalAddressingMode(AddrMode, AccessTy, AddrSpace))
+ if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
return true;
AddrMode.BaseOffs -= CI->getSExtValue();
} else if (GlobalValue *GV = dyn_cast<GlobalValue>(Addr)) {
// If this is a global variable, try to fold it into the addressing mode.
if (!AddrMode.BaseGV) {
AddrMode.BaseGV = GV;
- if (TLI.isLegalAddressingMode(AddrMode, AccessTy, AddrSpace))
+ if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
return true;
AddrMode.BaseGV = nullptr;
}
@@ -2955,7 +2963,7 @@ bool AddressingModeMatcher::MatchAddr(Value *Addr, unsigned Depth) {
AddrMode.HasBaseReg = true;
AddrMode.BaseReg = Addr;
// Still check for legality in case the target supports [imm] but not [i+r].
- if (TLI.isLegalAddressingMode(AddrMode, AccessTy, AddrSpace))
+ if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
return true;
AddrMode.HasBaseReg = false;
AddrMode.BaseReg = nullptr;
@@ -2965,7 +2973,7 @@ bool AddressingModeMatcher::MatchAddr(Value *Addr, unsigned Depth) {
if (AddrMode.Scale == 0) {
AddrMode.Scale = 1;
AddrMode.ScaledReg = Addr;
- if (TLI.isLegalAddressingMode(AddrMode, AccessTy, AddrSpace))
+ if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
return true;
AddrMode.Scale = 0;
AddrMode.ScaledReg = nullptr;
@@ -2984,7 +2992,8 @@ static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal,
const TargetLowering *TLI = TM.getSubtargetImpl(*F)->getTargetLowering();
const TargetRegisterInfo *TRI = TM.getSubtargetImpl(*F)->getRegisterInfo();
TargetLowering::AsmOperandInfoVector TargetConstraints =
- TLI->ParseConstraints(TRI, ImmutableCallSite(CI));
+ TLI->ParseConstraints(F->getParent()->getDataLayout(), TRI,
+ ImmutableCallSite(CI));
for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) {
TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i];
@@ -3324,7 +3333,7 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
// prevents new inttoptr/ptrtoint pairs from degrading AA capabilities.
DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for "
<< *MemoryInst << "\n");
- Type *IntPtrTy = TLI->getDataLayout()->getIntPtrType(Addr->getType());
+ Type *IntPtrTy = DL->getIntPtrType(Addr->getType());
Value *ResultPtr = nullptr, *ResultIndex = nullptr;
// First, find the pointer.
@@ -3443,7 +3452,7 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
} else {
DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for "
<< *MemoryInst << "\n");
- Type *IntPtrTy = TLI->getDataLayout()->getIntPtrType(Addr->getType());
+ Type *IntPtrTy = DL->getIntPtrType(Addr->getType());
Value *Result = nullptr;
// Start with the base register. Do this first so that subsequent address
@@ -3545,8 +3554,8 @@ bool CodeGenPrepare::OptimizeInlineAsmInst(CallInst *CS) {
const TargetRegisterInfo *TRI =
TM->getSubtargetImpl(*CS->getParent()->getParent())->getRegisterInfo();
- TargetLowering::AsmOperandInfoVector
- TargetConstraints = TLI->ParseConstraints(TRI, CS);
+ TargetLowering::AsmOperandInfoVector TargetConstraints =
+ TLI->ParseConstraints(*DL, TRI, CS);
unsigned ArgNo = 0;
for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) {
TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i];
@@ -3680,7 +3689,7 @@ bool CodeGenPrepare::ExtLdPromotion(TypePromotionTransaction &TPT,
TotalCreatedInstsCost -= ExtCost;
if (!StressExtLdPromotion &&
(TotalCreatedInstsCost > 1 ||
- !isPromotedInstructionLegal(*TLI, PromotedVal))) {
+ !isPromotedInstructionLegal(*TLI, *DL, PromotedVal))) {
// The promotion is not profitable, rollback to the previous state.
TPT.rollback(LastKnownGood);
continue;
@@ -3735,8 +3744,8 @@ bool CodeGenPrepare::MoveExtToFormExtLoad(Instruction *&I) {
if (!HasPromoted && LI->getParent() == I->getParent())
return false;
- EVT VT = TLI->getValueType(I->getType());
- EVT LoadVT = TLI->getValueType(LI->getType());
+ EVT VT = TLI->getValueType(*DL, I->getType());
+ EVT LoadVT = TLI->getValueType(*DL, LI->getType());
// If the load has other users and the truncate is not free, this probably
// isn't worthwhile.
@@ -4013,6 +4022,9 @@ namespace {
/// Assuming both extractelement and store can be combine, we get rid of the
/// transition.
class VectorPromoteHelper {
+ /// DataLayout associated with the current module.
+ const DataLayout &DL;
+
/// Used to perform some checks on the legality of vector operations.
const TargetLowering &TLI;
@@ -4086,7 +4098,8 @@ class VectorPromoteHelper {
unsigned Align = ST->getAlignment();
// Check if this store is supported.
if (!TLI.allowsMisalignedMemoryAccesses(
- TLI.getValueType(ST->getValueOperand()->getType()), AS, Align)) {
+ TLI.getValueType(DL, ST->getValueOperand()->getType()), AS,
+ Align)) {
// If this is not supported, there is no way we can combine
// the extract with the store.
return false;
@@ -4181,9 +4194,10 @@ class VectorPromoteHelper {
}
public:
- VectorPromoteHelper(const TargetLowering &TLI, const TargetTransformInfo &TTI,
- Instruction *Transition, unsigned CombineCost)
- : TLI(TLI), TTI(TTI), Transition(Transition),
+ VectorPromoteHelper(const DataLayout &DL, const TargetLowering &TLI,
+ const TargetTransformInfo &TTI, Instruction *Transition,
+ unsigned CombineCost)
+ : DL(DL), TLI(TLI), TTI(TTI), Transition(Transition),
StoreExtractCombineCost(CombineCost), CombineInst(nullptr) {
assert(Transition && "Do not know how to promote null");
}
@@ -4219,7 +4233,7 @@ public:
return false;
return StressStoreExtract ||
TLI.isOperationLegalOrCustom(
- ISDOpcode, TLI.getValueType(getTransitionType(), true));
+ ISDOpcode, TLI.getValueType(DL, getTransitionType(), true));
}
/// \brief Check whether or not \p Use can be combined
@@ -4323,7 +4337,7 @@ bool CodeGenPrepare::OptimizeExtractElementInst(Instruction *Inst) {
// we do not do that for now.
BasicBlock *Parent = Inst->getParent();
DEBUG(dbgs() << "Found an interesting transition: " << *Inst << '\n');
- VectorPromoteHelper VPH(*TLI, *TTI, Inst, CombineCost);
+ VectorPromoteHelper VPH(*DL, *TLI, *TTI, Inst, CombineCost);
// If the transition has more than one use, assume this is not going to be
// beneficial.
while (Inst->hasOneUse()) {
@@ -4368,8 +4382,7 @@ bool CodeGenPrepare::OptimizeInst(Instruction *I, bool& ModifiedDT) {
// It is possible for very late stage optimizations (such as SimplifyCFG)
// to introduce PHI nodes too late to be cleaned up. If we detect such a
// trivial PHI, go ahead and zap it here.
- const DataLayout &DL = I->getModule()->getDataLayout();
- if (Value *V = SimplifyInstruction(P, DL, TLInfo, nullptr)) {
+ if (Value *V = SimplifyInstruction(P, *DL, TLInfo, nullptr)) {
P->replaceAllUsesWith(V);
P->eraseFromParent();
++NumPHIsElim;
@@ -4388,15 +4401,16 @@ bool CodeGenPrepare::OptimizeInst(Instruction *I, bool& ModifiedDT) {
if (isa<Constant>(CI->getOperand(0)))
return false;
- if (TLI && OptimizeNoopCopyExpression(CI, *TLI))
+ if (TLI && OptimizeNoopCopyExpression(CI, *TLI, *DL))
return true;
if (isa<ZExtInst>(I) || isa<SExtInst>(I)) {
/// Sink a zext or sext into its user blocks if the target type doesn't
/// fit in one register
- if (TLI && TLI->getTypeAction(CI->getContext(),
- TLI->getValueType(CI->getType())) ==
- TargetLowering::TypeExpandInteger) {
+ if (TLI &&
+ TLI->getTypeAction(CI->getContext(),
+ TLI->getValueType(*DL, CI->getType())) ==
+ TargetLowering::TypeExpandInteger) {
return SinkCast(CI);
} else {
bool MadeChange = MoveExtToFormExtLoad(I);
@@ -4433,7 +4447,7 @@ bool CodeGenPrepare::OptimizeInst(Instruction *I, bool& ModifiedDT) {
BinOp->getOpcode() == Instruction::LShr)) {
ConstantInt *CI = dyn_cast<ConstantInt>(BinOp->getOperand(1));
if (TLI && CI && TLI->hasExtractBitsInsn())
- return OptimizeExtractBits(BinOp, CI, *TLI);
+ return OptimizeExtractBits(BinOp, CI, *TLI, *DL);
return false;
}
diff --git a/contrib/llvm/lib/CodeGen/DeadMachineInstructionElim.cpp b/contrib/llvm/lib/CodeGen/DeadMachineInstructionElim.cpp
index 963d573..941129b 100644
--- a/contrib/llvm/lib/CodeGen/DeadMachineInstructionElim.cpp
+++ b/contrib/llvm/lib/CodeGen/DeadMachineInstructionElim.cpp
@@ -60,7 +60,7 @@ bool DeadMachineInstructionElim::isDead(const MachineInstr *MI) const {
return false;
// Don't delete frame allocation labels.
- if (MI->getOpcode() == TargetOpcode::FRAME_ALLOC)
+ if (MI->getOpcode() == TargetOpcode::LOCAL_ESCAPE)
return false;
// Don't delete instructions with side effects.
diff --git a/contrib/llvm/lib/CodeGen/ExecutionDepsFix.cpp b/contrib/llvm/lib/CodeGen/ExecutionDepsFix.cpp
index 5b09cf1..201f9c1 100644
--- a/contrib/llvm/lib/CodeGen/ExecutionDepsFix.cpp
+++ b/contrib/llvm/lib/CodeGen/ExecutionDepsFix.cpp
@@ -733,12 +733,14 @@ bool ExeDepsFix::runOnMachineFunction(MachineFunction &mf) {
// If no relevant registers are used in the function, we can skip it
// completely.
bool anyregs = false;
+ const MachineRegisterInfo &MRI = mf.getRegInfo();
for (TargetRegisterClass::const_iterator I = RC->begin(), E = RC->end();
- I != E; ++I)
- if (MF->getRegInfo().isPhysRegUsed(*I)) {
- anyregs = true;
- break;
- }
+ I != E && !anyregs; ++I)
+ for (MCRegAliasIterator AI(*I, TRI, true); AI.isValid(); ++AI)
+ if (!MRI.reg_nodbg_empty(*AI)) {
+ anyregs = true;
+ break;
+ }
if (!anyregs) return false;
// Initialize the AliasMap on the first use.
diff --git a/contrib/llvm/lib/CodeGen/GlobalMerge.cpp b/contrib/llvm/lib/CodeGen/GlobalMerge.cpp
index 37b3bf1..6f9e839 100644
--- a/contrib/llvm/lib/CodeGen/GlobalMerge.cpp
+++ b/contrib/llvm/lib/CodeGen/GlobalMerge.cpp
@@ -117,7 +117,6 @@ STATISTIC(NumMerged, "Number of globals merged");
namespace {
class GlobalMerge : public FunctionPass {
const TargetMachine *TM;
- const DataLayout *DL;
// FIXME: Infer the maximum possible offset depending on the actual users
// (these max offsets are different for the users inside Thumb or ARM
// functions), see the code that passes in the offset in the ARM backend
@@ -160,8 +159,8 @@ namespace {
explicit GlobalMerge(const TargetMachine *TM = nullptr,
unsigned MaximalOffset = 0,
bool OnlyOptimizeForSize = false)
- : FunctionPass(ID), TM(TM), DL(TM->getDataLayout()),
- MaxOffset(MaximalOffset), OnlyOptimizeForSize(OnlyOptimizeForSize) {
+ : FunctionPass(ID), TM(TM), MaxOffset(MaximalOffset),
+ OnlyOptimizeForSize(OnlyOptimizeForSize) {
initializeGlobalMergePass(*PassRegistry::getPassRegistry());
}
@@ -188,14 +187,16 @@ INITIALIZE_PASS_END(GlobalMerge, "global-merge", "Merge global variables",
bool GlobalMerge::doMerge(SmallVectorImpl<GlobalVariable*> &Globals,
Module &M, bool isConst, unsigned AddrSpace) const {
+ auto &DL = M.getDataLayout();
// FIXME: Find better heuristics
- std::stable_sort(Globals.begin(), Globals.end(),
- [this](const GlobalVariable *GV1, const GlobalVariable *GV2) {
- Type *Ty1 = cast<PointerType>(GV1->getType())->getElementType();
- Type *Ty2 = cast<PointerType>(GV2->getType())->getElementType();
+ std::stable_sort(
+ Globals.begin(), Globals.end(),
+ [&DL](const GlobalVariable *GV1, const GlobalVariable *GV2) {
+ Type *Ty1 = cast<PointerType>(GV1->getType())->getElementType();
+ Type *Ty2 = cast<PointerType>(GV2->getType())->getElementType();
- return (DL->getTypeAllocSize(Ty1) < DL->getTypeAllocSize(Ty2));
- });
+ return (DL.getTypeAllocSize(Ty1) < DL.getTypeAllocSize(Ty2));
+ });
// If we want to just blindly group all globals together, do so.
if (!GlobalMergeGroupByUse) {
@@ -410,6 +411,7 @@ bool GlobalMerge::doMerge(SmallVectorImpl<GlobalVariable *> &Globals,
unsigned AddrSpace) const {
Type *Int32Ty = Type::getInt32Ty(M.getContext());
+ auto &DL = M.getDataLayout();
assert(Globals.size() > 1);
@@ -427,7 +429,7 @@ bool GlobalMerge::doMerge(SmallVectorImpl<GlobalVariable *> &Globals,
GlobalVariable *TheFirstExternal = 0;
for (j = i; j != -1; j = GlobalSet.find_next(j)) {
Type *Ty = Globals[j]->getType()->getElementType();
- MergedSize += DL->getTypeAllocSize(Ty);
+ MergedSize += DL.getTypeAllocSize(Ty);
if (MergedSize > MaxOffset) {
break;
}
@@ -526,6 +528,7 @@ bool GlobalMerge::doInitialization(Module &M) {
if (!EnableGlobalMerge)
return false;
+ auto &DL = M.getDataLayout();
DenseMap<unsigned, SmallVector<GlobalVariable*, 16> > Globals, ConstGlobals,
BSSGlobals;
bool Changed = false;
@@ -548,9 +551,9 @@ bool GlobalMerge::doInitialization(Module &M) {
unsigned AddressSpace = PT->getAddressSpace();
// Ignore fancy-aligned globals for now.
- unsigned Alignment = DL->getPreferredAlignment(I);
+ unsigned Alignment = DL.getPreferredAlignment(I);
Type *Ty = I->getType()->getElementType();
- if (Alignment > DL->getABITypeAlignment(Ty))
+ if (Alignment > DL.getABITypeAlignment(Ty))
continue;
// Ignore all 'special' globals.
@@ -562,7 +565,7 @@ bool GlobalMerge::doInitialization(Module &M) {
if (isMustKeepGlobalVariable(I))
continue;
- if (DL->getTypeAllocSize(Ty) < MaxOffset) {
+ if (DL.getTypeAllocSize(Ty) < MaxOffset) {
if (TargetLoweringObjectFile::getKindForGlobal(I, *TM).isBSSLocal())
BSSGlobals[AddressSpace].push_back(I);
else if (I->isConstant())
diff --git a/contrib/llvm/lib/CodeGen/ImplicitNullChecks.cpp b/contrib/llvm/lib/CodeGen/ImplicitNullChecks.cpp
index a02cd67..93e0487 100644
--- a/contrib/llvm/lib/CodeGen/ImplicitNullChecks.cpp
+++ b/contrib/llvm/lib/CodeGen/ImplicitNullChecks.cpp
@@ -25,9 +25,12 @@
//
//===----------------------------------------------------------------------===//
+#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
@@ -47,6 +50,11 @@ static cl::opt<unsigned> PageSize("imp-null-check-page-size",
"bytes"),
cl::init(4096));
+#define DEBUG_TYPE "implicit-null-checks"
+
+STATISTIC(NumImplicitNullChecks,
+ "Number of explicit null checks made implicit");
+
namespace {
class ImplicitNullChecks : public MachineFunctionPass {
@@ -171,6 +179,9 @@ bool ImplicitNullChecks::analyzeBlockForNullChecks(
// callq throw_NullPointerException
//
// LblNotNull:
+ // Inst0
+ // Inst1
+ // ...
// Def = Load (%RAX + <offset>)
// ...
//
@@ -181,6 +192,8 @@ bool ImplicitNullChecks::analyzeBlockForNullChecks(
// jmp LblNotNull ;; explicit or fallthrough
//
// LblNotNull:
+ // Inst0
+ // Inst1
// ...
//
// LblNull:
@@ -188,15 +201,75 @@ bool ImplicitNullChecks::analyzeBlockForNullChecks(
//
unsigned PointerReg = MBP.LHS.getReg();
- MachineInstr *MemOp = &*NotNullSucc->begin();
- unsigned BaseReg, Offset;
- if (TII->getMemOpBaseRegImmOfs(MemOp, BaseReg, Offset, TRI))
- if (MemOp->mayLoad() && !MemOp->isPredicable() && BaseReg == PointerReg &&
- Offset < PageSize && MemOp->getDesc().getNumDefs() == 1) {
- NullCheckList.emplace_back(MemOp, MBP.ConditionDef, &MBB, NotNullSucc,
- NullSucc);
- return true;
+
+ // As we scan NotNullSucc for a suitable load instruction, we keep track of
+ // the registers defined and used by the instructions we scan past. This bit
+ // of information lets us decide if it is legal to hoist the load instruction
+ // we find (if we do find such an instruction) to before NotNullSucc.
+ DenseSet<unsigned> RegDefs, RegUses;
+
+ // Returns true if it is safe to reorder MI to before NotNullSucc.
+ auto IsSafeToHoist = [&](MachineInstr *MI) {
+ // Right now we don't want to worry about LLVM's memory model. This can be
+ // made more precise later.
+ for (auto *MMO : MI->memoperands())
+ if (!MMO->isUnordered())
+ return false;
+
+ for (auto &MO : MI->operands()) {
+ if (MO.isReg() && MO.getReg()) {
+ for (unsigned Reg : RegDefs)
+ if (TRI->regsOverlap(Reg, MO.getReg()))
+ return false; // We found a write-after-write or read-after-write
+
+ if (MO.isDef())
+ for (unsigned Reg : RegUses)
+ if (TRI->regsOverlap(Reg, MO.getReg()))
+ return false; // We found a write-after-read
+ }
+ }
+
+ return true;
+ };
+
+ for (auto MII = NotNullSucc->begin(), MIE = NotNullSucc->end(); MII != MIE;
+ ++MII) {
+ MachineInstr *MI = &*MII;
+ unsigned BaseReg, Offset;
+ if (TII->getMemOpBaseRegImmOfs(MI, BaseReg, Offset, TRI))
+ if (MI->mayLoad() && !MI->isPredicable() && BaseReg == PointerReg &&
+ Offset < PageSize && MI->getDesc().getNumDefs() == 1 &&
+ IsSafeToHoist(MI)) {
+ NullCheckList.emplace_back(MI, MBP.ConditionDef, &MBB, NotNullSucc,
+ NullSucc);
+ return true;
+ }
+
+ // MI did not match our criteria for conversion to a trapping load. Check
+ // if we can continue looking.
+
+ if (MI->mayStore() || MI->hasUnmodeledSideEffects())
+ return false;
+
+ for (auto *MMO : MI->memoperands())
+ // Right now we don't want to worry about LLVM's memory model.
+ if (!MMO->isUnordered())
+ return false;
+
+ // It _may_ be okay to reorder a later load instruction across MI. Make a
+ // note of its operands so that we can make the legality check if we find a
+ // suitable load instruction:
+
+ for (auto &MO : MI->operands()) {
+ if (!MO.isReg() || !MO.getReg())
+ continue;
+
+ if (MO.isDef())
+ RegDefs.insert(MO.getReg());
+ else
+ RegUses.insert(MO.getReg());
}
+ }
return false;
}
@@ -247,7 +320,7 @@ void ImplicitNullChecks::rewriteNullChecks(
// touch the successors list for any basic block since we haven't changed
// control flow, we've just made it implicit.
insertFaultingLoad(NC.MemOperation, NC.CheckBlock, HandlerLabel);
- NC.MemOperation->removeFromParent();
+ NC.MemOperation->eraseFromParent();
NC.CheckOperation->eraseFromParent();
// Insert an *unconditional* branch to not-null successor.
@@ -257,6 +330,8 @@ void ImplicitNullChecks::rewriteNullChecks(
// Emit the HandlerLabel as an EH_LABEL.
BuildMI(*NC.NullSucc, NC.NullSucc->begin(), DL,
TII->get(TargetOpcode::EH_LABEL)).addSym(HandlerLabel);
+
+ NumImplicitNullChecks++;
}
}
diff --git a/contrib/llvm/lib/CodeGen/LLVMTargetMachine.cpp b/contrib/llvm/lib/CodeGen/LLVMTargetMachine.cpp
index b486bdc..37299eb 100644
--- a/contrib/llvm/lib/CodeGen/LLVMTargetMachine.cpp
+++ b/contrib/llvm/lib/CodeGen/LLVMTargetMachine.cpp
@@ -90,8 +90,8 @@ TargetIRAnalysis LLVMTargetMachine::getTargetIRAnalysis() {
/// addPassesToX helper drives creation and initialization of TargetPassConfig.
static MCContext *
addPassesToGenerateCode(LLVMTargetMachine *TM, PassManagerBase &PM,
- bool DisableVerify, AnalysisID StartAfter,
- AnalysisID StopAfter,
+ bool DisableVerify, AnalysisID StartBefore,
+ AnalysisID StartAfter, AnalysisID StopAfter,
MachineFunctionInitializer *MFInitializer = nullptr) {
// Add internal analysis passes from the target machine.
@@ -100,7 +100,7 @@ addPassesToGenerateCode(LLVMTargetMachine *TM, PassManagerBase &PM,
// Targets may override createPassConfig to provide a target-specific
// subclass.
TargetPassConfig *PassConfig = TM->createPassConfig(PM);
- PassConfig->setStartStopPasses(StartAfter, StopAfter);
+ PassConfig->setStartStopPasses(StartBefore, StartAfter, StopAfter);
// Set PassConfig options provided by TargetMachine.
PassConfig->setDisableVerify(DisableVerify);
@@ -143,11 +143,12 @@ addPassesToGenerateCode(LLVMTargetMachine *TM, PassManagerBase &PM,
bool LLVMTargetMachine::addPassesToEmitFile(
PassManagerBase &PM, raw_pwrite_stream &Out, CodeGenFileType FileType,
- bool DisableVerify, AnalysisID StartAfter, AnalysisID StopAfter,
- MachineFunctionInitializer *MFInitializer) {
+ bool DisableVerify, AnalysisID StartBefore, AnalysisID StartAfter,
+ AnalysisID StopAfter, MachineFunctionInitializer *MFInitializer) {
// Add common CodeGen passes.
- MCContext *Context = addPassesToGenerateCode(
- this, PM, DisableVerify, StartAfter, StopAfter, MFInitializer);
+ MCContext *Context =
+ addPassesToGenerateCode(this, PM, DisableVerify, StartBefore, StartAfter,
+ StopAfter, MFInitializer);
if (!Context)
return true;
@@ -231,7 +232,8 @@ bool LLVMTargetMachine::addPassesToEmitMC(PassManagerBase &PM, MCContext *&Ctx,
raw_pwrite_stream &Out,
bool DisableVerify) {
// Add common CodeGen passes.
- Ctx = addPassesToGenerateCode(this, PM, DisableVerify, nullptr, nullptr);
+ Ctx = addPassesToGenerateCode(this, PM, DisableVerify, nullptr, nullptr,
+ nullptr);
if (!Ctx)
return true;
diff --git a/contrib/llvm/lib/CodeGen/LiveRegMatrix.cpp b/contrib/llvm/lib/CodeGen/LiveRegMatrix.cpp
index 154ce6f..000151a 100644
--- a/contrib/llvm/lib/CodeGen/LiveRegMatrix.cpp
+++ b/contrib/llvm/lib/CodeGen/LiveRegMatrix.cpp
@@ -15,12 +15,12 @@
#include "RegisterCoalescer.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/VirtRegMap.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
@@ -49,7 +49,6 @@ void LiveRegMatrix::getAnalysisUsage(AnalysisUsage &AU) const {
bool LiveRegMatrix::runOnMachineFunction(MachineFunction &MF) {
TRI = MF.getSubtarget().getRegisterInfo();
- MRI = &MF.getRegInfo();
LIS = &getAnalysis<LiveIntervals>();
VRM = &getAnalysis<VirtRegMap>();
@@ -101,7 +100,6 @@ void LiveRegMatrix::assign(LiveInterval &VirtReg, unsigned PhysReg) {
<< " to " << PrintReg(PhysReg, TRI) << ':');
assert(!VRM->hasPhys(VirtReg.reg) && "Duplicate VirtReg assignment");
VRM->assignVirt2Phys(VirtReg.reg, PhysReg);
- MRI->setPhysRegUsed(PhysReg);
foreachUnit(TRI, VirtReg, PhysReg, [&](unsigned Unit,
const LiveRange &Range) {
@@ -131,6 +129,14 @@ void LiveRegMatrix::unassign(LiveInterval &VirtReg) {
DEBUG(dbgs() << '\n');
}
+bool LiveRegMatrix::isPhysRegUsed(unsigned PhysReg) const {
+ for (MCRegUnitIterator Unit(PhysReg, TRI); Unit.isValid(); ++Unit) {
+ if (!Matrix[*Unit].empty())
+ return true;
+ }
+ return false;
+}
+
bool LiveRegMatrix::checkRegMaskInterference(LiveInterval &VirtReg,
unsigned PhysReg) {
// Check if the cached information is valid.
diff --git a/contrib/llvm/lib/CodeGen/MIRParser/MILexer.cpp b/contrib/llvm/lib/CodeGen/MIRParser/MILexer.cpp
index e9b3916..482c33a 100644
--- a/contrib/llvm/lib/CodeGen/MIRParser/MILexer.cpp
+++ b/contrib/llvm/lib/CodeGen/MIRParser/MILexer.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "MILexer.h"
+#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Twine.h"
#include <cctype>
@@ -64,6 +65,17 @@ static bool isIdentifierChar(char C) {
return isalpha(C) || isdigit(C) || C == '_' || C == '-' || C == '.';
}
+static MIToken::TokenKind getIdentifierKind(StringRef Identifier) {
+ return StringSwitch<MIToken::TokenKind>(Identifier)
+ .Case("_", MIToken::underscore)
+ .Case("implicit", MIToken::kw_implicit)
+ .Case("implicit-def", MIToken::kw_implicit_define)
+ .Case("dead", MIToken::kw_dead)
+ .Case("killed", MIToken::kw_killed)
+ .Case("undef", MIToken::kw_undef)
+ .Default(MIToken::Identifier);
+}
+
static Cursor maybeLexIdentifier(Cursor C, MIToken &Token) {
if (!isalpha(C.peek()) && C.peek() != '_')
return None;
@@ -71,8 +83,7 @@ static Cursor maybeLexIdentifier(Cursor C, MIToken &Token) {
while (isIdentifierChar(C.peek()))
C.advance();
auto Identifier = Range.upto(C);
- Token = MIToken(Identifier == "_" ? MIToken::underscore : MIToken::Identifier,
- Identifier);
+ Token = MIToken(getIdentifierKind(Identifier), Identifier);
return C;
}
@@ -104,9 +115,22 @@ static Cursor maybeLexMachineBasicBlock(
return C;
}
+static Cursor lexVirtualRegister(Cursor C, MIToken &Token) {
+ auto Range = C;
+ C.advance(); // Skip '%'
+ auto NumberRange = C;
+ while (isdigit(C.peek()))
+ C.advance();
+ Token = MIToken(MIToken::VirtualRegister, Range.upto(C),
+ APSInt(NumberRange.upto(C)));
+ return C;
+}
+
static Cursor maybeLexRegister(Cursor C, MIToken &Token) {
if (C.peek() != '%')
return None;
+ if (isdigit(C.peek(1)))
+ return lexVirtualRegister(C, Token);
auto Range = C;
C.advance(); // Skip '%'
while (isIdentifierChar(C.peek()))
@@ -155,6 +179,8 @@ static MIToken::TokenKind symbolToken(char C) {
return MIToken::comma;
case '=':
return MIToken::equal;
+ case ':':
+ return MIToken::colon;
default:
return MIToken::Error;
}
diff --git a/contrib/llvm/lib/CodeGen/MIRParser/MILexer.h b/contrib/llvm/lib/CodeGen/MIRParser/MILexer.h
index c28935f..55460b5 100644
--- a/contrib/llvm/lib/CodeGen/MIRParser/MILexer.h
+++ b/contrib/llvm/lib/CodeGen/MIRParser/MILexer.h
@@ -35,6 +35,14 @@ struct MIToken {
comma,
equal,
underscore,
+ colon,
+
+ // Keywords
+ kw_implicit,
+ kw_implicit_define,
+ kw_dead,
+ kw_killed,
+ kw_undef,
// Identifier tokens
Identifier,
@@ -44,7 +52,8 @@ struct MIToken {
GlobalValue,
// Other tokens
- IntegerLiteral
+ IntegerLiteral,
+ VirtualRegister
};
private:
@@ -66,7 +75,13 @@ public:
bool isError() const { return Kind == Error; }
bool isRegister() const {
- return Kind == NamedRegister || Kind == underscore;
+ return Kind == NamedRegister || Kind == underscore ||
+ Kind == VirtualRegister;
+ }
+
+ bool isRegisterFlag() const {
+ return Kind == kw_implicit || Kind == kw_implicit_define ||
+ Kind == kw_dead || Kind == kw_killed || Kind == kw_undef;
}
bool is(TokenKind K) const { return Kind == K; }
@@ -81,7 +96,7 @@ public:
bool hasIntegerValue() const {
return Kind == IntegerLiteral || Kind == MachineBasicBlock ||
- Kind == GlobalValue;
+ Kind == GlobalValue || Kind == VirtualRegister;
}
};
diff --git a/contrib/llvm/lib/CodeGen/MIRParser/MIParser.cpp b/contrib/llvm/lib/CodeGen/MIRParser/MIParser.cpp
index b618e53..c000112 100644
--- a/contrib/llvm/lib/CodeGen/MIRParser/MIParser.cpp
+++ b/contrib/llvm/lib/CodeGen/MIRParser/MIParser.cpp
@@ -18,6 +18,7 @@
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/IR/Module.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/SourceMgr.h"
@@ -28,14 +29,25 @@ using namespace llvm;
namespace {
+/// A wrapper struct around the 'MachineOperand' struct that includes a source
+/// range.
+struct MachineOperandWithLocation {
+ MachineOperand Operand;
+ StringRef::iterator Begin;
+ StringRef::iterator End;
+
+ MachineOperandWithLocation(const MachineOperand &Operand,
+ StringRef::iterator Begin, StringRef::iterator End)
+ : Operand(Operand), Begin(Begin), End(End) {}
+};
+
class MIParser {
SourceMgr &SM;
MachineFunction &MF;
SMDiagnostic &Error;
StringRef Source, CurrentSource;
MIToken Token;
- /// Maps from basic block numbers to MBBs.
- const DenseMap<unsigned, MachineBasicBlock *> &MBBSlots;
+ const PerFunctionMIParsingState &PFS;
/// Maps from indices to unnamed global values and metadata nodes.
const SlotMapping &IRSlots;
/// Maps from instruction names to op codes.
@@ -44,11 +56,12 @@ class MIParser {
StringMap<unsigned> Names2Regs;
/// Maps from register mask names to register masks.
StringMap<const uint32_t *> Names2RegMasks;
+ /// Maps from subregister names to subregister indices.
+ StringMap<unsigned> Names2SubRegIndices;
public:
MIParser(SourceMgr &SM, MachineFunction &MF, SMDiagnostic &Error,
- StringRef Source,
- const DenseMap<unsigned, MachineBasicBlock *> &MBBSlots,
+ StringRef Source, const PerFunctionMIParsingState &PFS,
const SlotMapping &IRSlots);
void lex();
@@ -65,8 +78,11 @@ public:
bool parse(MachineInstr *&MI);
bool parseMBB(MachineBasicBlock *&MBB);
+ bool parseNamedRegister(unsigned &Reg);
bool parseRegister(unsigned &Reg);
+ bool parseRegisterFlag(unsigned &Flags);
+ bool parseSubRegisterIndex(unsigned &SubReg);
bool parseRegisterOperand(MachineOperand &Dest, bool IsDef = false);
bool parseImmediateOperand(MachineOperand &Dest);
bool parseMBBReference(MachineBasicBlock *&MBB);
@@ -88,6 +104,9 @@ private:
bool parseInstruction(unsigned &OpCode);
+ bool verifyImplicitOperands(ArrayRef<MachineOperandWithLocation> Operands,
+ const MCInstrDesc &MCID);
+
void initNames2Regs();
/// Try to convert a register name to a register number. Return true if the
@@ -100,17 +119,22 @@ private:
///
/// Return null if the identifier isn't a register mask.
const uint32_t *getRegMask(StringRef Identifier);
+
+ void initNames2SubRegIndices();
+
+ /// Check if the given identifier is a name of a subregister index.
+ ///
+ /// Return 0 if the name isn't a subregister index class.
+ unsigned getSubRegIndex(StringRef Name);
};
} // end anonymous namespace
MIParser::MIParser(SourceMgr &SM, MachineFunction &MF, SMDiagnostic &Error,
- StringRef Source,
- const DenseMap<unsigned, MachineBasicBlock *> &MBBSlots,
+ StringRef Source, const PerFunctionMIParsingState &PFS,
const SlotMapping &IRSlots)
: SM(SM), MF(MF), Error(Error), Source(Source), CurrentSource(Source),
- Token(MIToken::Error, StringRef()), MBBSlots(MBBSlots), IRSlots(IRSlots) {
-}
+ Token(MIToken::Error, StringRef()), PFS(PFS), IRSlots(IRSlots) {}
void MIParser::lex() {
CurrentSource = lexMIToken(
@@ -121,8 +145,6 @@ void MIParser::lex() {
bool MIParser::error(const Twine &Msg) { return error(Token.location(), Msg); }
bool MIParser::error(StringRef::iterator Loc, const Twine &Msg) {
- // TODO: Get the proper location in the MIR file, not just a location inside
- // the string.
assert(Loc >= Source.data() && Loc <= (Source.data() + Source.size()));
Error = SMDiagnostic(
SM, SMLoc(),
@@ -137,11 +159,12 @@ bool MIParser::parse(MachineInstr *&MI) {
// Parse any register operands before '='
// TODO: Allow parsing of multiple operands before '='
MachineOperand MO = MachineOperand::CreateImm(0);
- SmallVector<MachineOperand, 8> Operands;
- if (Token.isRegister()) {
+ SmallVector<MachineOperandWithLocation, 8> Operands;
+ if (Token.isRegister() || Token.isRegisterFlag()) {
+ auto Loc = Token.location();
if (parseRegisterOperand(MO, /*IsDef=*/true))
return true;
- Operands.push_back(MO);
+ Operands.push_back(MachineOperandWithLocation(MO, Loc, Token.location()));
if (Token.isNot(MIToken::equal))
return error("expected '='");
lex();
@@ -155,9 +178,10 @@ bool MIParser::parse(MachineInstr *&MI) {
// Parse the remaining machine operands.
while (Token.isNot(MIToken::Eof)) {
+ auto Loc = Token.location();
if (parseMachineOperand(MO))
return true;
- Operands.push_back(MO);
+ Operands.push_back(MachineOperandWithLocation(MO, Loc, Token.location()));
if (Token.is(MIToken::Eof))
break;
if (Token.isNot(MIToken::comma))
@@ -166,25 +190,16 @@ bool MIParser::parse(MachineInstr *&MI) {
}
const auto &MCID = MF.getSubtarget().getInstrInfo()->get(OpCode);
-
- // Verify machine operands.
if (!MCID.isVariadic()) {
- for (size_t I = 0, E = Operands.size(); I < E; ++I) {
- if (I < MCID.getNumOperands())
- continue;
- // Mark this register as implicit to prevent an assertion when it's added
- // to an instruction. This is a temporary workaround until the implicit
- // register flag can be parsed.
- if (Operands[I].isReg())
- Operands[I].setImplicit();
- }
+ // FIXME: Move the implicit operand verification to the machine verifier.
+ if (verifyImplicitOperands(Operands, MCID))
+ return true;
}
- // TODO: Determine the implicit behaviour when implicit register flags are
- // parsed.
+ // TODO: Check for extraneous machine operands.
MI = MF.CreateMachineInstr(MCID, DebugLoc(), /*NoImplicit=*/true);
for (const auto &Operand : Operands)
- MI->addOperand(MF, Operand);
+ MI->addOperand(MF, Operand.Operand);
return false;
}
@@ -201,6 +216,80 @@ bool MIParser::parseMBB(MachineBasicBlock *&MBB) {
return false;
}
+bool MIParser::parseNamedRegister(unsigned &Reg) {
+ lex();
+ if (Token.isNot(MIToken::NamedRegister))
+ return error("expected a named register");
+ if (parseRegister(Reg))
+ return 0;
+ lex();
+ if (Token.isNot(MIToken::Eof))
+ return error("expected end of string after the register reference");
+ return false;
+}
+
+static const char *printImplicitRegisterFlag(const MachineOperand &MO) {
+ assert(MO.isImplicit());
+ return MO.isDef() ? "implicit-def" : "implicit";
+}
+
+static std::string getRegisterName(const TargetRegisterInfo *TRI,
+ unsigned Reg) {
+ assert(TargetRegisterInfo::isPhysicalRegister(Reg) && "expected phys reg");
+ return StringRef(TRI->getName(Reg)).lower();
+}
+
+bool MIParser::verifyImplicitOperands(
+ ArrayRef<MachineOperandWithLocation> Operands, const MCInstrDesc &MCID) {
+ if (MCID.isCall())
+ // We can't verify call instructions as they can contain arbitrary implicit
+ // register and register mask operands.
+ return false;
+
+ // Gather all the expected implicit operands.
+ SmallVector<MachineOperand, 4> ImplicitOperands;
+ if (MCID.ImplicitDefs)
+ for (const uint16_t *ImpDefs = MCID.getImplicitDefs(); *ImpDefs; ++ImpDefs)
+ ImplicitOperands.push_back(
+ MachineOperand::CreateReg(*ImpDefs, true, true));
+ if (MCID.ImplicitUses)
+ for (const uint16_t *ImpUses = MCID.getImplicitUses(); *ImpUses; ++ImpUses)
+ ImplicitOperands.push_back(
+ MachineOperand::CreateReg(*ImpUses, false, true));
+
+ const auto *TRI = MF.getSubtarget().getRegisterInfo();
+ assert(TRI && "Expected target register info");
+ size_t I = ImplicitOperands.size(), J = Operands.size();
+ while (I) {
+ --I;
+ if (J) {
+ --J;
+ const auto &ImplicitOperand = ImplicitOperands[I];
+ const auto &Operand = Operands[J].Operand;
+ if (ImplicitOperand.isIdenticalTo(Operand))
+ continue;
+ if (Operand.isReg() && Operand.isImplicit()) {
+ return error(Operands[J].Begin,
+ Twine("expected an implicit register operand '") +
+ printImplicitRegisterFlag(ImplicitOperand) + " %" +
+ getRegisterName(TRI, ImplicitOperand.getReg()) + "'");
+ }
+ }
+ // TODO: Fix source location when Operands[J].end is right before '=', i.e:
+ // insead of reporting an error at this location:
+ // %eax = MOV32r0
+ // ^
+ // report the error at the following location:
+ // %eax = MOV32r0
+ // ^
+ return error(J < Operands.size() ? Operands[J].End : Token.location(),
+ Twine("missing implicit register operand '") +
+ printImplicitRegisterFlag(ImplicitOperands[I]) + " %" +
+ getRegisterName(TRI, ImplicitOperands[I].getReg()) + "'");
+ }
+ return false;
+}
+
bool MIParser::parseInstruction(unsigned &OpCode) {
if (Token.isNot(MIToken::Identifier))
return error("expected a machine instruction");
@@ -222,6 +311,17 @@ bool MIParser::parseRegister(unsigned &Reg) {
return error(Twine("unknown register name '") + Name + "'");
break;
}
+ case MIToken::VirtualRegister: {
+ unsigned ID;
+ if (getUnsigned(ID))
+ return true;
+ const auto RegInfo = PFS.VirtualRegisterSlots.find(ID);
+ if (RegInfo == PFS.VirtualRegisterSlots.end())
+ return error(Twine("use of undefined virtual register '%") + Twine(ID) +
+ "'");
+ Reg = RegInfo->second;
+ break;
+ }
// TODO: Parse other register kinds.
default:
llvm_unreachable("The current token should be a register");
@@ -229,14 +329,66 @@ bool MIParser::parseRegister(unsigned &Reg) {
return false;
}
+bool MIParser::parseRegisterFlag(unsigned &Flags) {
+ switch (Token.kind()) {
+ case MIToken::kw_implicit:
+ Flags |= RegState::Implicit;
+ break;
+ case MIToken::kw_implicit_define:
+ Flags |= RegState::ImplicitDefine;
+ break;
+ case MIToken::kw_dead:
+ Flags |= RegState::Dead;
+ break;
+ case MIToken::kw_killed:
+ Flags |= RegState::Kill;
+ break;
+ case MIToken::kw_undef:
+ Flags |= RegState::Undef;
+ break;
+ // TODO: report an error when we specify the same flag more than once.
+ // TODO: parse the other register flags.
+ default:
+ llvm_unreachable("The current token should be a register flag");
+ }
+ lex();
+ return false;
+}
+
+bool MIParser::parseSubRegisterIndex(unsigned &SubReg) {
+ assert(Token.is(MIToken::colon));
+ lex();
+ if (Token.isNot(MIToken::Identifier))
+ return error("expected a subregister index after ':'");
+ auto Name = Token.stringValue();
+ SubReg = getSubRegIndex(Name);
+ if (!SubReg)
+ return error(Twine("use of unknown subregister index '") + Name + "'");
+ lex();
+ return false;
+}
+
bool MIParser::parseRegisterOperand(MachineOperand &Dest, bool IsDef) {
unsigned Reg;
- // TODO: Parse register flags.
+ unsigned Flags = IsDef ? RegState::Define : 0;
+ while (Token.isRegisterFlag()) {
+ if (parseRegisterFlag(Flags))
+ return true;
+ }
+ if (!Token.isRegister())
+ return error("expected a register after register flags");
if (parseRegister(Reg))
return true;
lex();
- // TODO: Parse subregister.
- Dest = MachineOperand::CreateReg(Reg, IsDef);
+ unsigned SubReg = 0;
+ if (Token.is(MIToken::colon)) {
+ if (parseSubRegisterIndex(SubReg))
+ return true;
+ }
+ Dest = MachineOperand::CreateReg(
+ Reg, Flags & RegState::Define, Flags & RegState::Implicit,
+ Flags & RegState::Kill, Flags & RegState::Dead, Flags & RegState::Undef,
+ /*isEarlyClobber=*/false, SubReg);
return false;
}
@@ -266,8 +418,8 @@ bool MIParser::parseMBBReference(MachineBasicBlock *&MBB) {
unsigned Number;
if (getUnsigned(Number))
return true;
- auto MBBInfo = MBBSlots.find(Number);
- if (MBBInfo == MBBSlots.end())
+ auto MBBInfo = PFS.MBBSlots.find(Number);
+ if (MBBInfo == PFS.MBBSlots.end())
return error(Twine("use of undefined machine basic block #") +
Twine(Number));
MBB = MBBInfo->second;
@@ -318,8 +470,14 @@ bool MIParser::parseGlobalAddressOperand(MachineOperand &Dest) {
bool MIParser::parseMachineOperand(MachineOperand &Dest) {
switch (Token.kind()) {
+ case MIToken::kw_implicit:
+ case MIToken::kw_implicit_define:
+ case MIToken::kw_dead:
+ case MIToken::kw_killed:
+ case MIToken::kw_undef:
case MIToken::underscore:
case MIToken::NamedRegister:
+ case MIToken::VirtualRegister:
return parseRegisterOperand(Dest);
case MIToken::IntegerLiteral:
return parseImmediateOperand(Dest);
@@ -408,16 +566,41 @@ const uint32_t *MIParser::getRegMask(StringRef Identifier) {
return RegMaskInfo->getValue();
}
-bool llvm::parseMachineInstr(
- MachineInstr *&MI, SourceMgr &SM, MachineFunction &MF, StringRef Src,
- const DenseMap<unsigned, MachineBasicBlock *> &MBBSlots,
- const SlotMapping &IRSlots, SMDiagnostic &Error) {
- return MIParser(SM, MF, Error, Src, MBBSlots, IRSlots).parse(MI);
+void MIParser::initNames2SubRegIndices() {
+ if (!Names2SubRegIndices.empty())
+ return;
+ const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
+ for (unsigned I = 1, E = TRI->getNumSubRegIndices(); I < E; ++I)
+ Names2SubRegIndices.insert(
+ std::make_pair(StringRef(TRI->getSubRegIndexName(I)).lower(), I));
+}
+
+unsigned MIParser::getSubRegIndex(StringRef Name) {
+ initNames2SubRegIndices();
+ auto SubRegInfo = Names2SubRegIndices.find(Name);
+ if (SubRegInfo == Names2SubRegIndices.end())
+ return 0;
+ return SubRegInfo->getValue();
+}
+
+bool llvm::parseMachineInstr(MachineInstr *&MI, SourceMgr &SM,
+ MachineFunction &MF, StringRef Src,
+ const PerFunctionMIParsingState &PFS,
+ const SlotMapping &IRSlots, SMDiagnostic &Error) {
+ return MIParser(SM, MF, Error, Src, PFS, IRSlots).parse(MI);
+}
+
+bool llvm::parseMBBReference(MachineBasicBlock *&MBB, SourceMgr &SM,
+ MachineFunction &MF, StringRef Src,
+ const PerFunctionMIParsingState &PFS,
+ const SlotMapping &IRSlots, SMDiagnostic &Error) {
+ return MIParser(SM, MF, Error, Src, PFS, IRSlots).parseMBB(MBB);
}
-bool llvm::parseMBBReference(
- MachineBasicBlock *&MBB, SourceMgr &SM, MachineFunction &MF, StringRef Src,
- const DenseMap<unsigned, MachineBasicBlock *> &MBBSlots,
- const SlotMapping &IRSlots, SMDiagnostic &Error) {
- return MIParser(SM, MF, Error, Src, MBBSlots, IRSlots).parseMBB(MBB);
+bool llvm::parseNamedRegisterReference(unsigned &Reg, SourceMgr &SM,
+ MachineFunction &MF, StringRef Src,
+ const PerFunctionMIParsingState &PFS,
+ const SlotMapping &IRSlots,
+ SMDiagnostic &Error) {
+ return MIParser(SM, MF, Error, Src, PFS, IRSlots).parseNamedRegister(Reg);
}
diff --git a/contrib/llvm/lib/CodeGen/MIRParser/MIParser.h b/contrib/llvm/lib/CodeGen/MIRParser/MIParser.h
index 4d6d4e7..fca4c4e 100644
--- a/contrib/llvm/lib/CodeGen/MIRParser/MIParser.h
+++ b/contrib/llvm/lib/CodeGen/MIRParser/MIParser.h
@@ -26,16 +26,26 @@ struct SlotMapping;
class SMDiagnostic;
class SourceMgr;
+struct PerFunctionMIParsingState {
+ DenseMap<unsigned, MachineBasicBlock *> MBBSlots;
+ DenseMap<unsigned, unsigned> VirtualRegisterSlots;
+};
+
bool parseMachineInstr(MachineInstr *&MI, SourceMgr &SM, MachineFunction &MF,
- StringRef Src,
- const DenseMap<unsigned, MachineBasicBlock *> &MBBSlots,
+ StringRef Src, const PerFunctionMIParsingState &PFS,
const SlotMapping &IRSlots, SMDiagnostic &Error);
bool parseMBBReference(MachineBasicBlock *&MBB, SourceMgr &SM,
MachineFunction &MF, StringRef Src,
- const DenseMap<unsigned, MachineBasicBlock *> &MBBSlots,
+ const PerFunctionMIParsingState &PFS,
const SlotMapping &IRSlots, SMDiagnostic &Error);
+bool parseNamedRegisterReference(unsigned &Reg, SourceMgr &SM,
+ MachineFunction &MF, StringRef Src,
+ const PerFunctionMIParsingState &PFS,
+ const SlotMapping &IRSlots,
+ SMDiagnostic &Error);
+
} // end namespace llvm
#endif
diff --git a/contrib/llvm/lib/CodeGen/MIRParser/MIRParser.cpp b/contrib/llvm/lib/CodeGen/MIRParser/MIRParser.cpp
index 3974583..16b0e16 100644
--- a/contrib/llvm/lib/CodeGen/MIRParser/MIRParser.cpp
+++ b/contrib/llvm/lib/CodeGen/MIRParser/MIRParser.cpp
@@ -21,6 +21,7 @@
#include "llvm/AsmParser/Parser.h"
#include "llvm/AsmParser/SlotMapping.h"
#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/MIRYamlMapping.h"
#include "llvm/IR/BasicBlock.h"
@@ -48,6 +49,8 @@ class MIRParserImpl {
LLVMContext &Context;
StringMap<std::unique_ptr<yaml::MachineFunction>> Functions;
SlotMapping IRSlots;
+ /// Maps from register class names to register classes.
+ StringMap<const TargetRegisterClass *> Names2RegClasses;
public:
MIRParserImpl(std::unique_ptr<MemoryBuffer> Contents, StringRef Filename,
@@ -60,6 +63,11 @@ public:
/// Always returns true.
bool error(const Twine &Message);
+ /// Report an error with the given message at the given location.
+ ///
+ /// Always returns true.
+ bool error(SMLoc Loc, const Twine &Message);
+
/// Report a given error with the location translated from the location in an
/// embedded string literal to a location in the MIR file.
///
@@ -90,13 +98,18 @@ public:
/// Initialize the machine basic block using it's YAML representation.
///
/// Return true if an error occurred.
- bool initializeMachineBasicBlock(
- MachineFunction &MF, MachineBasicBlock &MBB,
- const yaml::MachineBasicBlock &YamlMBB,
- const DenseMap<unsigned, MachineBasicBlock *> &MBBSlots);
+ bool initializeMachineBasicBlock(MachineFunction &MF, MachineBasicBlock &MBB,
+ const yaml::MachineBasicBlock &YamlMBB,
+ const PerFunctionMIParsingState &PFS);
+
+ bool
+ initializeRegisterInfo(const MachineFunction &MF,
+ MachineRegisterInfo &RegInfo,
+ const yaml::MachineFunction &YamlMF,
+ DenseMap<unsigned, unsigned> &VirtualRegisterSlots);
- bool initializeRegisterInfo(MachineRegisterInfo &RegInfo,
- const yaml::MachineFunction &YamlMF);
+ bool initializeFrameInfo(MachineFrameInfo &MFI,
+ const yaml::MachineFunction &YamlMF);
private:
/// Return a MIR diagnostic converted from an MI string diagnostic.
@@ -109,6 +122,14 @@ private:
/// Create an empty function with the given name.
void createDummyFunction(StringRef Name, Module &M);
+
+ void initNames2RegClasses(const MachineFunction &MF);
+
+ /// Check if the given identifier is a name of a register class.
+ ///
+ /// Return null if the name isn't a register class.
+ const TargetRegisterClass *getRegClass(const MachineFunction &MF,
+ StringRef Name);
};
} // end namespace llvm
@@ -125,6 +146,12 @@ bool MIRParserImpl::error(const Twine &Message) {
return true;
}
+bool MIRParserImpl::error(SMLoc Loc, const Twine &Message) {
+ Context.diagnose(DiagnosticInfoMIRParser(
+ DS_Error, SM.GetMessage(Loc, SourceMgr::DK_Error, Message)));
+ return true;
+}
+
bool MIRParserImpl::error(const SMDiagnostic &Error, SMRange SourceRange) {
assert(Error.getKind() == SourceMgr::DK_Error && "Expected an error");
reportDiagnostic(diagFromMIStringDiag(Error, SourceRange));
@@ -233,34 +260,44 @@ bool MIRParserImpl::initializeMachineFunction(MachineFunction &MF) {
MF.setAlignment(YamlMF.Alignment);
MF.setExposesReturnsTwice(YamlMF.ExposesReturnsTwice);
MF.setHasInlineAsm(YamlMF.HasInlineAsm);
- if (initializeRegisterInfo(MF.getRegInfo(), YamlMF))
+ PerFunctionMIParsingState PFS;
+ if (initializeRegisterInfo(MF, MF.getRegInfo(), YamlMF,
+ PFS.VirtualRegisterSlots))
+ return true;
+ if (initializeFrameInfo(*MF.getFrameInfo(), YamlMF))
return true;
const auto &F = *MF.getFunction();
- DenseMap<unsigned, MachineBasicBlock *> MBBSlots;
for (const auto &YamlMBB : YamlMF.BasicBlocks) {
const BasicBlock *BB = nullptr;
- if (!YamlMBB.Name.empty()) {
+ const yaml::StringValue &Name = YamlMBB.Name;
+ if (!Name.Value.empty()) {
BB = dyn_cast_or_null<BasicBlock>(
- F.getValueSymbolTable().lookup(YamlMBB.Name));
+ F.getValueSymbolTable().lookup(Name.Value));
if (!BB)
- return error(Twine("basic block '") + YamlMBB.Name +
- "' is not defined in the function '" + MF.getName() + "'");
+ return error(Name.SourceRange.Start,
+ Twine("basic block '") + Name.Value +
+ "' is not defined in the function '" + MF.getName() +
+ "'");
}
auto *MBB = MF.CreateMachineBasicBlock(BB);
MF.insert(MF.end(), MBB);
- bool WasInserted = MBBSlots.insert(std::make_pair(YamlMBB.ID, MBB)).second;
+ bool WasInserted =
+ PFS.MBBSlots.insert(std::make_pair(YamlMBB.ID, MBB)).second;
if (!WasInserted)
return error(Twine("redefinition of machine basic block with id #") +
Twine(YamlMBB.ID));
}
+ if (YamlMF.BasicBlocks.empty())
+ return error(Twine("machine function '") + Twine(MF.getName()) +
+ "' requires at least one machine basic block in its body");
// Initialize the machine basic blocks after creating them all so that the
// machine instructions parser can resolve the MBB references.
unsigned I = 0;
for (const auto &YamlMBB : YamlMF.BasicBlocks) {
if (initializeMachineBasicBlock(MF, *MF.getBlockNumbered(I++), YamlMBB,
- MBBSlots))
+ PFS))
return true;
}
return false;
@@ -269,7 +306,7 @@ bool MIRParserImpl::initializeMachineFunction(MachineFunction &MF) {
bool MIRParserImpl::initializeMachineBasicBlock(
MachineFunction &MF, MachineBasicBlock &MBB,
const yaml::MachineBasicBlock &YamlMBB,
- const DenseMap<unsigned, MachineBasicBlock *> &MBBSlots) {
+ const PerFunctionMIParsingState &PFS) {
MBB.setAlignment(YamlMBB.Alignment);
if (YamlMBB.AddressTaken)
MBB.setHasAddressTaken();
@@ -278,16 +315,24 @@ bool MIRParserImpl::initializeMachineBasicBlock(
// Parse the successors.
for (const auto &MBBSource : YamlMBB.Successors) {
MachineBasicBlock *SuccMBB = nullptr;
- if (parseMBBReference(SuccMBB, SM, MF, MBBSource.Value, MBBSlots, IRSlots,
+ if (parseMBBReference(SuccMBB, SM, MF, MBBSource.Value, PFS, IRSlots,
Error))
return error(Error, MBBSource.SourceRange);
// TODO: Report an error when adding the same successor more than once.
MBB.addSuccessor(SuccMBB);
}
+ // Parse the liveins.
+ for (const auto &LiveInSource : YamlMBB.LiveIns) {
+ unsigned Reg = 0;
+ if (parseNamedRegisterReference(Reg, SM, MF, LiveInSource.Value, PFS,
+ IRSlots, Error))
+ return error(Error, LiveInSource.SourceRange);
+ MBB.addLiveIn(Reg);
+ }
// Parse the instructions.
for (const auto &MISource : YamlMBB.Instructions) {
MachineInstr *MI = nullptr;
- if (parseMachineInstr(MI, SM, MF, MISource.Value, MBBSlots, IRSlots, Error))
+ if (parseMachineInstr(MI, SM, MF, MISource.Value, PFS, IRSlots, Error))
return error(Error, MISource.SourceRange);
MBB.insert(MBB.end(), MI);
}
@@ -295,7 +340,9 @@ bool MIRParserImpl::initializeMachineBasicBlock(
}
bool MIRParserImpl::initializeRegisterInfo(
- MachineRegisterInfo &RegInfo, const yaml::MachineFunction &YamlMF) {
+ const MachineFunction &MF, MachineRegisterInfo &RegInfo,
+ const yaml::MachineFunction &YamlMF,
+ DenseMap<unsigned, unsigned> &VirtualRegisterSlots) {
assert(RegInfo.isSSA());
if (!YamlMF.IsSSA)
RegInfo.leaveSSA();
@@ -303,6 +350,67 @@ bool MIRParserImpl::initializeRegisterInfo(
if (!YamlMF.TracksRegLiveness)
RegInfo.invalidateLiveness();
RegInfo.enableSubRegLiveness(YamlMF.TracksSubRegLiveness);
+
+ // Parse the virtual register information.
+ for (const auto &VReg : YamlMF.VirtualRegisters) {
+ const auto *RC = getRegClass(MF, VReg.Class.Value);
+ if (!RC)
+ return error(VReg.Class.SourceRange.Start,
+ Twine("use of undefined register class '") +
+ VReg.Class.Value + "'");
+ unsigned Reg = RegInfo.createVirtualRegister(RC);
+ // TODO: Report an error when the same virtual register with the same ID is
+ // redefined.
+ VirtualRegisterSlots.insert(std::make_pair(VReg.ID, Reg));
+ }
+ return false;
+}
+
+bool MIRParserImpl::initializeFrameInfo(MachineFrameInfo &MFI,
+ const yaml::MachineFunction &YamlMF) {
+ const yaml::MachineFrameInfo &YamlMFI = YamlMF.FrameInfo;
+ MFI.setFrameAddressIsTaken(YamlMFI.IsFrameAddressTaken);
+ MFI.setReturnAddressIsTaken(YamlMFI.IsReturnAddressTaken);
+ MFI.setHasStackMap(YamlMFI.HasStackMap);
+ MFI.setHasPatchPoint(YamlMFI.HasPatchPoint);
+ MFI.setStackSize(YamlMFI.StackSize);
+ MFI.setOffsetAdjustment(YamlMFI.OffsetAdjustment);
+ if (YamlMFI.MaxAlignment)
+ MFI.ensureMaxAlignment(YamlMFI.MaxAlignment);
+ MFI.setAdjustsStack(YamlMFI.AdjustsStack);
+ MFI.setHasCalls(YamlMFI.HasCalls);
+ MFI.setMaxCallFrameSize(YamlMFI.MaxCallFrameSize);
+ MFI.setHasOpaqueSPAdjustment(YamlMFI.HasOpaqueSPAdjustment);
+ MFI.setHasVAStart(YamlMFI.HasVAStart);
+ MFI.setHasMustTailInVarArgFunc(YamlMFI.HasMustTailInVarArgFunc);
+
+ // Initialize the fixed frame objects.
+ for (const auto &Object : YamlMF.FixedStackObjects) {
+ int ObjectIdx;
+ if (Object.Type != yaml::FixedMachineStackObject::SpillSlot)
+ ObjectIdx = MFI.CreateFixedObject(Object.Size, Object.Offset,
+ Object.IsImmutable, Object.IsAliased);
+ else
+ ObjectIdx = MFI.CreateFixedSpillStackObject(Object.Size, Object.Offset);
+ MFI.setObjectAlignment(ObjectIdx, Object.Alignment);
+ // TODO: Store the mapping between fixed object IDs and object indices to
+ // parse fixed stack object references correctly.
+ }
+
+ // Initialize the ordinary frame objects.
+ for (const auto &Object : YamlMF.StackObjects) {
+ int ObjectIdx;
+ if (Object.Type == yaml::MachineStackObject::VariableSized)
+ ObjectIdx =
+ MFI.CreateVariableSizedObject(Object.Alignment, /*Alloca=*/nullptr);
+ else
+ ObjectIdx = MFI.CreateStackObject(
+ Object.Size, Object.Alignment,
+ Object.Type == yaml::MachineStackObject::SpillSlot);
+ MFI.setObjectOffset(ObjectIdx, Object.Offset);
+ // TODO: Store the mapping between object IDs and object indices to parse
+ // stack object references correctly.
+ }
return false;
}
@@ -353,6 +461,26 @@ SMDiagnostic MIRParserImpl::diagFromLLVMAssemblyDiag(const SMDiagnostic &Error,
Error.getFixIts());
}
+void MIRParserImpl::initNames2RegClasses(const MachineFunction &MF) {
+ if (!Names2RegClasses.empty())
+ return;
+ const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
+ for (unsigned I = 0, E = TRI->getNumRegClasses(); I < E; ++I) {
+ const auto *RC = TRI->getRegClass(I);
+ Names2RegClasses.insert(
+ std::make_pair(StringRef(TRI->getRegClassName(RC)).lower(), RC));
+ }
+}
+
+const TargetRegisterClass *MIRParserImpl::getRegClass(const MachineFunction &MF,
+ StringRef Name) {
+ initNames2RegClasses(MF);
+ auto RegClassInfo = Names2RegClasses.find(Name);
+ if (RegClassInfo == Names2RegClasses.end())
+ return nullptr;
+ return RegClassInfo->getValue();
+}
+
MIRParser::MIRParser(std::unique_ptr<MIRParserImpl> Impl)
: Impl(std::move(Impl)) {}
diff --git a/contrib/llvm/lib/CodeGen/MIRPrinter.cpp b/contrib/llvm/lib/CodeGen/MIRPrinter.cpp
index 76cbe29..d5cf924 100644
--- a/contrib/llvm/lib/CodeGen/MIRPrinter.cpp
+++ b/contrib/llvm/lib/CodeGen/MIRPrinter.cpp
@@ -15,10 +15,12 @@
#include "MIRPrinter.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/MIRYamlMapping.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Module.h"
+#include "llvm/IR/ModuleSlotTracker.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/YAMLTraits.h"
@@ -40,9 +42,13 @@ public:
void print(const MachineFunction &MF);
- void convert(yaml::MachineFunction &MF, const MachineRegisterInfo &RegInfo);
- void convert(const Module &M, yaml::MachineBasicBlock &YamlMBB,
+ void convert(yaml::MachineFunction &MF, const MachineRegisterInfo &RegInfo,
+ const TargetRegisterInfo *TRI);
+ void convert(yaml::MachineFrameInfo &YamlMFI, const MachineFrameInfo &MFI);
+ void convert(ModuleSlotTracker &MST, yaml::MachineBasicBlock &YamlMBB,
const MachineBasicBlock &MBB);
+ void convertStackObjects(yaml::MachineFunction &MF,
+ const MachineFrameInfo &MFI);
private:
void initRegisterMaskIds(const MachineFunction &MF);
@@ -51,14 +57,14 @@ private:
/// This class prints out the machine instructions using the MIR serialization
/// format.
class MIPrinter {
- const Module &M;
raw_ostream &OS;
+ ModuleSlotTracker &MST;
const DenseMap<const uint32_t *, unsigned> &RegisterMaskIds;
public:
- MIPrinter(const Module &M, raw_ostream &OS,
+ MIPrinter(raw_ostream &OS, ModuleSlotTracker &MST,
const DenseMap<const uint32_t *, unsigned> &RegisterMaskIds)
- : M(M), OS(OS), RegisterMaskIds(RegisterMaskIds) {}
+ : OS(OS), MST(MST), RegisterMaskIds(RegisterMaskIds) {}
void print(const MachineInstr &MI);
void printMBBReference(const MachineBasicBlock &MBB);
@@ -84,6 +90,19 @@ template <> struct BlockScalarTraits<Module> {
} // end namespace yaml
} // end namespace llvm
+static void printReg(unsigned Reg, raw_ostream &OS,
+ const TargetRegisterInfo *TRI) {
+ // TODO: Print Stack Slots.
+ if (!Reg)
+ OS << '_';
+ else if (TargetRegisterInfo::isVirtualRegister(Reg))
+ OS << '%' << TargetRegisterInfo::virtReg2Index(Reg);
+ else if (Reg < TRI->getNumRegs())
+ OS << '%' << StringRef(TRI->getName(Reg)).lower();
+ else
+ llvm_unreachable("Can't print this kind of register yet");
+}
+
void MIRPrinter::print(const MachineFunction &MF) {
initRegisterMaskIds(MF);
@@ -92,10 +111,12 @@ void MIRPrinter::print(const MachineFunction &MF) {
YamlMF.Alignment = MF.getAlignment();
YamlMF.ExposesReturnsTwice = MF.exposesReturnsTwice();
YamlMF.HasInlineAsm = MF.hasInlineAsm();
- convert(YamlMF, MF.getRegInfo());
+ convert(YamlMF, MF.getRegInfo(), MF.getSubtarget().getRegisterInfo());
+ convert(YamlMF.FrameInfo, *MF.getFrameInfo());
+ convertStackObjects(YamlMF, *MF.getFrameInfo());
int I = 0;
- const auto &M = *MF.getFunction()->getParent();
+ ModuleSlotTracker MST(MF.getFunction()->getParent());
for (const auto &MBB : MF) {
// TODO: Allow printing of non sequentially numbered MBBs.
// This is currently needed as the basic block references get their index
@@ -105,7 +126,7 @@ void MIRPrinter::print(const MachineFunction &MF) {
"Can't print MBBs that aren't sequentially numbered");
(void)I;
yaml::MachineBasicBlock YamlMBB;
- convert(M, YamlMBB, MBB);
+ convert(MST, YamlMBB, MBB);
YamlMF.BasicBlocks.push_back(YamlMBB);
}
yaml::Output Out(OS);
@@ -113,37 +134,120 @@ void MIRPrinter::print(const MachineFunction &MF) {
}
void MIRPrinter::convert(yaml::MachineFunction &MF,
- const MachineRegisterInfo &RegInfo) {
+ const MachineRegisterInfo &RegInfo,
+ const TargetRegisterInfo *TRI) {
MF.IsSSA = RegInfo.isSSA();
MF.TracksRegLiveness = RegInfo.tracksLiveness();
MF.TracksSubRegLiveness = RegInfo.subRegLivenessEnabled();
+
+ // Print the virtual register definitions.
+ for (unsigned I = 0, E = RegInfo.getNumVirtRegs(); I < E; ++I) {
+ unsigned Reg = TargetRegisterInfo::index2VirtReg(I);
+ yaml::VirtualRegisterDefinition VReg;
+ VReg.ID = I;
+ VReg.Class =
+ StringRef(TRI->getRegClassName(RegInfo.getRegClass(Reg))).lower();
+ MF.VirtualRegisters.push_back(VReg);
+ }
+}
+
+void MIRPrinter::convert(yaml::MachineFrameInfo &YamlMFI,
+ const MachineFrameInfo &MFI) {
+ YamlMFI.IsFrameAddressTaken = MFI.isFrameAddressTaken();
+ YamlMFI.IsReturnAddressTaken = MFI.isReturnAddressTaken();
+ YamlMFI.HasStackMap = MFI.hasStackMap();
+ YamlMFI.HasPatchPoint = MFI.hasPatchPoint();
+ YamlMFI.StackSize = MFI.getStackSize();
+ YamlMFI.OffsetAdjustment = MFI.getOffsetAdjustment();
+ YamlMFI.MaxAlignment = MFI.getMaxAlignment();
+ YamlMFI.AdjustsStack = MFI.adjustsStack();
+ YamlMFI.HasCalls = MFI.hasCalls();
+ YamlMFI.MaxCallFrameSize = MFI.getMaxCallFrameSize();
+ YamlMFI.HasOpaqueSPAdjustment = MFI.hasOpaqueSPAdjustment();
+ YamlMFI.HasVAStart = MFI.hasVAStart();
+ YamlMFI.HasMustTailInVarArgFunc = MFI.hasMustTailInVarArgFunc();
}
-void MIRPrinter::convert(const Module &M, yaml::MachineBasicBlock &YamlMBB,
+void MIRPrinter::convertStackObjects(yaml::MachineFunction &MF,
+ const MachineFrameInfo &MFI) {
+ // Process fixed stack objects.
+ unsigned ID = 0;
+ for (int I = MFI.getObjectIndexBegin(); I < 0; ++I) {
+ if (MFI.isDeadObjectIndex(I))
+ continue;
+
+ yaml::FixedMachineStackObject YamlObject;
+ YamlObject.ID = ID++;
+ YamlObject.Type = MFI.isSpillSlotObjectIndex(I)
+ ? yaml::FixedMachineStackObject::SpillSlot
+ : yaml::FixedMachineStackObject::DefaultType;
+ YamlObject.Offset = MFI.getObjectOffset(I);
+ YamlObject.Size = MFI.getObjectSize(I);
+ YamlObject.Alignment = MFI.getObjectAlignment(I);
+ YamlObject.IsImmutable = MFI.isImmutableObjectIndex(I);
+ YamlObject.IsAliased = MFI.isAliasedObjectIndex(I);
+ MF.FixedStackObjects.push_back(YamlObject);
+ // TODO: Store the mapping between fixed object IDs and object indices to
+ // print the fixed stack object references correctly.
+ }
+
+ // Process ordinary stack objects.
+ ID = 0;
+ for (int I = 0, E = MFI.getObjectIndexEnd(); I < E; ++I) {
+ if (MFI.isDeadObjectIndex(I))
+ continue;
+
+ yaml::MachineStackObject YamlObject;
+ YamlObject.ID = ID++;
+ YamlObject.Type = MFI.isSpillSlotObjectIndex(I)
+ ? yaml::MachineStackObject::SpillSlot
+ : MFI.isVariableSizedObjectIndex(I)
+ ? yaml::MachineStackObject::VariableSized
+ : yaml::MachineStackObject::DefaultType;
+ YamlObject.Offset = MFI.getObjectOffset(I);
+ YamlObject.Size = MFI.getObjectSize(I);
+ YamlObject.Alignment = MFI.getObjectAlignment(I);
+
+ MF.StackObjects.push_back(YamlObject);
+ // TODO: Store the mapping between object IDs and object indices to print
+ // the stack object references correctly.
+ }
+}
+
+void MIRPrinter::convert(ModuleSlotTracker &MST,
+ yaml::MachineBasicBlock &YamlMBB,
const MachineBasicBlock &MBB) {
assert(MBB.getNumber() >= 0 && "Invalid MBB number");
YamlMBB.ID = (unsigned)MBB.getNumber();
// TODO: Serialize unnamed BB references.
if (const auto *BB = MBB.getBasicBlock())
- YamlMBB.Name = BB->hasName() ? BB->getName() : "<unnamed bb>";
+ YamlMBB.Name.Value = BB->hasName() ? BB->getName() : "<unnamed bb>";
else
- YamlMBB.Name = "";
+ YamlMBB.Name.Value = "";
YamlMBB.Alignment = MBB.getAlignment();
YamlMBB.AddressTaken = MBB.hasAddressTaken();
YamlMBB.IsLandingPad = MBB.isLandingPad();
for (const auto *SuccMBB : MBB.successors()) {
std::string Str;
raw_string_ostream StrOS(Str);
- MIPrinter(M, StrOS, RegisterMaskIds).printMBBReference(*SuccMBB);
+ MIPrinter(StrOS, MST, RegisterMaskIds).printMBBReference(*SuccMBB);
YamlMBB.Successors.push_back(StrOS.str());
}
-
+ // Print the live in registers.
+ const auto *TRI = MBB.getParent()->getSubtarget().getRegisterInfo();
+ assert(TRI && "Expected target register info");
+ for (auto I = MBB.livein_begin(), E = MBB.livein_end(); I != E; ++I) {
+ std::string Str;
+ raw_string_ostream StrOS(Str);
+ printReg(*I, StrOS, TRI);
+ YamlMBB.LiveIns.push_back(StrOS.str());
+ }
// Print the machine instructions.
YamlMBB.Instructions.reserve(MBB.size());
std::string Str;
for (const auto &MI : MBB) {
raw_string_ostream StrOS(Str);
- MIPrinter(M, StrOS, RegisterMaskIds).print(MI);
+ MIPrinter(StrOS, MST, RegisterMaskIds).print(MI);
YamlMBB.Instructions.push_back(StrOS.str());
Str.clear();
}
@@ -188,18 +292,6 @@ void MIPrinter::print(const MachineInstr &MI) {
}
}
-static void printReg(unsigned Reg, raw_ostream &OS,
- const TargetRegisterInfo *TRI) {
- // TODO: Print Stack Slots.
- // TODO: Print virtual registers.
- if (!Reg)
- OS << '_';
- else if (Reg < TRI->getNumRegs())
- OS << '%' << StringRef(TRI->getName(Reg)).lower();
- else
- llvm_unreachable("Can't print this kind of register yet");
-}
-
void MIPrinter::printMBBReference(const MachineBasicBlock &MBB) {
OS << "%bb." << MBB.getNumber();
if (const auto *BB = MBB.getBasicBlock()) {
@@ -211,9 +303,19 @@ void MIPrinter::printMBBReference(const MachineBasicBlock &MBB) {
void MIPrinter::print(const MachineOperand &Op, const TargetRegisterInfo *TRI) {
switch (Op.getType()) {
case MachineOperand::MO_Register:
- // TODO: Print register flags.
+ // TODO: Print the other register flags.
+ if (Op.isImplicit())
+ OS << (Op.isDef() ? "implicit-def " : "implicit ");
+ if (Op.isDead())
+ OS << "dead ";
+ if (Op.isKill())
+ OS << "killed ";
+ if (Op.isUndef())
+ OS << "undef ";
printReg(Op.getReg(), OS, TRI);
- // TODO: Print sub register.
+ // Print the sub register.
+ if (Op.getSubReg() != 0)
+ OS << ':' << TRI->getSubRegIndexName(Op.getSubReg());
break;
case MachineOperand::MO_Immediate:
OS << Op.getImm();
@@ -222,10 +324,7 @@ void MIPrinter::print(const MachineOperand &Op, const TargetRegisterInfo *TRI) {
printMBBReference(*Op.getMBB());
break;
case MachineOperand::MO_GlobalAddress:
- // FIXME: Make this faster - print as operand will create a slot tracker to
- // print unnamed values for the whole module every time it's called, which
- // is inefficient.
- Op.getGlobal()->printAsOperand(OS, /*PrintType=*/false, &M);
+ Op.getGlobal()->printAsOperand(OS, /*PrintType=*/false, MST);
// TODO: Print offset and target flags.
break;
case MachineOperand::MO_RegisterMask: {
diff --git a/contrib/llvm/lib/CodeGen/MachineDominators.cpp b/contrib/llvm/lib/CodeGen/MachineDominators.cpp
index 467a2e4..3f04bb0 100644
--- a/contrib/llvm/lib/CodeGen/MachineDominators.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineDominators.cpp
@@ -19,8 +19,8 @@
using namespace llvm;
namespace llvm {
-TEMPLATE_INSTANTIATION(class DomTreeNodeBase<MachineBasicBlock>);
-TEMPLATE_INSTANTIATION(class DominatorTreeBase<MachineBasicBlock>);
+template class DomTreeNodeBase<MachineBasicBlock>;
+template class DominatorTreeBase<MachineBasicBlock>;
}
char MachineDominatorTree::ID = 0;
diff --git a/contrib/llvm/lib/CodeGen/MachineFunction.cpp b/contrib/llvm/lib/CodeGen/MachineFunction.cpp
index 800d1b5..9856e70 100644
--- a/contrib/llvm/lib/CodeGen/MachineFunction.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineFunction.cpp
@@ -29,6 +29,7 @@
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/Function.h"
+#include "llvm/IR/Module.h"
#include "llvm/IR/ModuleSlotTracker.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
@@ -74,7 +75,7 @@ MachineFunction::MachineFunction(const Function *F, const TargetMachine &TM,
if (Fn->hasFnAttribute(Attribute::StackAlignment))
FrameInfo->ensureMaxAlignment(Fn->getFnStackAlignment());
- ConstantPool = new (Allocator) MachineConstantPool(TM);
+ ConstantPool = new (Allocator) MachineConstantPool(getDataLayout());
Alignment = STI->getTargetLowering()->getMinFunctionAlignment();
// FIXME: Shouldn't use pref alignment if explicit alignment is set on Fn.
@@ -118,6 +119,10 @@ MachineFunction::~MachineFunction() {
}
}
+const DataLayout &MachineFunction::getDataLayout() const {
+ return Fn->getParent()->getDataLayout();
+}
+
/// Get the JumpTableInfo for this function.
/// If it does not already exist, allocate one.
MachineJumpTableInfo *MachineFunction::
@@ -458,12 +463,12 @@ unsigned MachineFunction::addLiveIn(unsigned PReg,
/// normal 'L' label is returned.
MCSymbol *MachineFunction::getJTISymbol(unsigned JTI, MCContext &Ctx,
bool isLinkerPrivate) const {
- const DataLayout *DL = getTarget().getDataLayout();
+ const DataLayout &DL = getDataLayout();
assert(JumpTableInfo && "No jump tables");
assert(JTI < JumpTableInfo->getJumpTables().size() && "Invalid JTI!");
- const char *Prefix = isLinkerPrivate ? DL->getLinkerPrivateGlobalPrefix() :
- DL->getPrivateGlobalPrefix();
+ const char *Prefix = isLinkerPrivate ? DL.getLinkerPrivateGlobalPrefix()
+ : DL.getPrivateGlobalPrefix();
SmallString<60> Name;
raw_svector_ostream(Name)
<< Prefix << "JTI" << getFunctionNumber() << '_' << JTI;
@@ -472,9 +477,9 @@ MCSymbol *MachineFunction::getJTISymbol(unsigned JTI, MCContext &Ctx,
/// Return a function-local symbol to represent the PIC base.
MCSymbol *MachineFunction::getPICBaseSymbol() const {
- const DataLayout *DL = getTarget().getDataLayout();
- return Ctx.getOrCreateSymbol(Twine(DL->getPrivateGlobalPrefix())+
- Twine(getFunctionNumber())+"$pb");
+ const DataLayout &DL = getDataLayout();
+ return Ctx.getOrCreateSymbol(Twine(DL.getPrivateGlobalPrefix()) +
+ Twine(getFunctionNumber()) + "$pb");
}
//===----------------------------------------------------------------------===//
@@ -790,10 +795,6 @@ void MachineJumpTableInfo::dump() const { print(dbgs()); }
void MachineConstantPoolValue::anchor() { }
-const DataLayout *MachineConstantPool::getDataLayout() const {
- return TM.getDataLayout();
-}
-
Type *MachineConstantPoolEntry::getType() const {
if (isMachineConstantPoolEntry())
return Val.MachineCPVal->getType();
@@ -851,7 +852,7 @@ MachineConstantPool::~MachineConstantPool() {
/// Test whether the given two constants can be allocated the same constant pool
/// entry.
static bool CanShareConstantPoolEntry(const Constant *A, const Constant *B,
- const DataLayout *TD) {
+ const DataLayout &DL) {
// Handle the trivial case quickly.
if (A == B) return true;
@@ -865,8 +866,8 @@ static bool CanShareConstantPoolEntry(const Constant *A, const Constant *B,
return false;
// For now, only support constants with the same size.
- uint64_t StoreSize = TD->getTypeStoreSize(A->getType());
- if (StoreSize != TD->getTypeStoreSize(B->getType()) || StoreSize > 128)
+ uint64_t StoreSize = DL.getTypeStoreSize(A->getType());
+ if (StoreSize != DL.getTypeStoreSize(B->getType()) || StoreSize > 128)
return false;
Type *IntTy = IntegerType::get(A->getContext(), StoreSize*8);
@@ -877,16 +878,16 @@ static bool CanShareConstantPoolEntry(const Constant *A, const Constant *B,
// DataLayout.
if (isa<PointerType>(A->getType()))
A = ConstantFoldInstOperands(Instruction::PtrToInt, IntTy,
- const_cast<Constant *>(A), *TD);
+ const_cast<Constant *>(A), DL);
else if (A->getType() != IntTy)
A = ConstantFoldInstOperands(Instruction::BitCast, IntTy,
- const_cast<Constant *>(A), *TD);
+ const_cast<Constant *>(A), DL);
if (isa<PointerType>(B->getType()))
B = ConstantFoldInstOperands(Instruction::PtrToInt, IntTy,
- const_cast<Constant *>(B), *TD);
+ const_cast<Constant *>(B), DL);
else if (B->getType() != IntTy)
B = ConstantFoldInstOperands(Instruction::BitCast, IntTy,
- const_cast<Constant *>(B), *TD);
+ const_cast<Constant *>(B), DL);
return A == B;
}
@@ -903,8 +904,7 @@ unsigned MachineConstantPool::getConstantPoolIndex(const Constant *C,
// FIXME, this could be made much more efficient for large constant pools.
for (unsigned i = 0, e = Constants.size(); i != e; ++i)
if (!Constants[i].isMachineConstantPoolEntry() &&
- CanShareConstantPoolEntry(Constants[i].Val.ConstVal, C,
- getDataLayout())) {
+ CanShareConstantPoolEntry(Constants[i].Val.ConstVal, C, DL)) {
if ((unsigned)Constants[i].getAlignment() < Alignment)
Constants[i].Alignment = Alignment;
return i;
diff --git a/contrib/llvm/lib/CodeGen/MachineModuleInfo.cpp b/contrib/llvm/lib/CodeGen/MachineModuleInfo.cpp
index 42d0603..6a20624 100644
--- a/contrib/llvm/lib/CodeGen/MachineModuleInfo.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineModuleInfo.cpp
@@ -320,7 +320,10 @@ void MachineModuleInfo::addPersonality(MachineBasicBlock *LandingPad,
const Function *Personality) {
LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
LP.Personality = Personality;
+ addPersonality(Personality);
+}
+void MachineModuleInfo::addPersonality(const Function *Personality) {
for (unsigned i = 0; i < Personalities.size(); ++i)
if (Personalities[i] == Personality)
return;
diff --git a/contrib/llvm/lib/CodeGen/MachineRegisterInfo.cpp b/contrib/llvm/lib/CodeGen/MachineRegisterInfo.cpp
index 278a8f2..5984af8 100644
--- a/contrib/llvm/lib/CodeGen/MachineRegisterInfo.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineRegisterInfo.cpp
@@ -13,6 +13,7 @@
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/IR/Function.h"
#include "llvm/Support/raw_os_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
@@ -28,7 +29,6 @@ MachineRegisterInfo::MachineRegisterInfo(const MachineFunction *MF)
TracksSubRegLiveness(false) {
VRegInfo.reserve(256);
RegAllocHints.reserve(256);
- UsedRegUnits.resize(getTargetRegisterInfo()->getNumRegUnits());
UsedPhysRegMask.resize(getTargetRegisterInfo()->getNumRegs());
// Create the physreg use/def lists.
@@ -441,3 +441,49 @@ void MachineRegisterInfo::markUsesInDebugValueAsUndef(unsigned Reg) const {
UseMI->getOperand(0).setReg(0U);
}
}
+
+static const Function *getCalledFunction(const MachineInstr &MI) {
+ for (const MachineOperand &MO : MI.operands()) {
+ if (!MO.isGlobal())
+ continue;
+ const Function *Func = dyn_cast<Function>(MO.getGlobal());
+ if (Func != nullptr)
+ return Func;
+ }
+ return nullptr;
+}
+
+static bool isNoReturnDef(const MachineOperand &MO) {
+ // Anything which is not a noreturn function is a real def.
+ const MachineInstr &MI = *MO.getParent();
+ if (!MI.isCall())
+ return false;
+ const MachineBasicBlock &MBB = *MI.getParent();
+ if (!MBB.succ_empty())
+ return false;
+ const MachineFunction &MF = *MBB.getParent();
+ // We need to keep correct unwind information even if the function will
+ // not return, since the runtime may need it.
+ if (MF.getFunction()->hasFnAttribute(Attribute::UWTable))
+ return false;
+ const Function *Called = getCalledFunction(MI);
+ if (Called == nullptr || !Called->hasFnAttribute(Attribute::NoReturn)
+ || !Called->hasFnAttribute(Attribute::NoUnwind))
+ return false;
+
+ return true;
+}
+
+bool MachineRegisterInfo::isPhysRegModified(unsigned PhysReg) const {
+ if (UsedPhysRegMask.test(PhysReg))
+ return true;
+ const TargetRegisterInfo *TRI = getTargetRegisterInfo();
+ for (MCRegAliasIterator AI(PhysReg, TRI, true); AI.isValid(); ++AI) {
+ for (const MachineOperand &MO : make_range(def_begin(*AI), def_end())) {
+ if (isNoReturnDef(MO))
+ continue;
+ return true;
+ }
+ }
+ return false;
+}
diff --git a/contrib/llvm/lib/CodeGen/MachineTraceMetrics.cpp b/contrib/llvm/lib/CodeGen/MachineTraceMetrics.cpp
index f9adba0..9404c68 100644
--- a/contrib/llvm/lib/CodeGen/MachineTraceMetrics.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineTraceMetrics.cpp
@@ -509,18 +509,17 @@ MachineTraceMetrics::Ensemble::invalidate(const MachineBasicBlock *BadMBB) {
<< " height.\n");
// Find any MBB predecessors that have MBB as their preferred successor.
// They are the only ones that need to be invalidated.
- for (MachineBasicBlock::const_pred_iterator
- I = MBB->pred_begin(), E = MBB->pred_end(); I != E; ++I) {
- TraceBlockInfo &TBI = BlockInfo[(*I)->getNumber()];
+ for (const MachineBasicBlock *Pred : MBB->predecessors()) {
+ TraceBlockInfo &TBI = BlockInfo[Pred->getNumber()];
if (!TBI.hasValidHeight())
continue;
if (TBI.Succ == MBB) {
TBI.invalidateHeight();
- WorkList.push_back(*I);
+ WorkList.push_back(Pred);
continue;
}
// Verify that TBI.Succ is actually a *I successor.
- assert((!TBI.Succ || (*I)->isSuccessor(TBI.Succ)) && "CFG changed");
+ assert((!TBI.Succ || Pred->isSuccessor(TBI.Succ)) && "CFG changed");
}
} while (!WorkList.empty());
}
@@ -535,18 +534,17 @@ MachineTraceMetrics::Ensemble::invalidate(const MachineBasicBlock *BadMBB) {
<< " depth.\n");
// Find any MBB successors that have MBB as their preferred predecessor.
// They are the only ones that need to be invalidated.
- for (MachineBasicBlock::const_succ_iterator
- I = MBB->succ_begin(), E = MBB->succ_end(); I != E; ++I) {
- TraceBlockInfo &TBI = BlockInfo[(*I)->getNumber()];
+ for (const MachineBasicBlock *Succ : MBB->successors()) {
+ TraceBlockInfo &TBI = BlockInfo[Succ->getNumber()];
if (!TBI.hasValidDepth())
continue;
if (TBI.Pred == MBB) {
TBI.invalidateDepth();
- WorkList.push_back(*I);
+ WorkList.push_back(Succ);
continue;
}
// Verify that TBI.Pred is actually a *I predecessor.
- assert((!TBI.Pred || (*I)->isPredecessor(TBI.Pred)) && "CFG changed");
+ assert((!TBI.Pred || Succ->isPredecessor(TBI.Pred)) && "CFG changed");
}
} while (!WorkList.empty());
}
@@ -998,8 +996,7 @@ computeInstrHeights(const MachineBasicBlock *MBB) {
// MBB is the highest precomputed block in the trace.
if (MBB) {
TraceBlockInfo &TBI = BlockInfo[MBB->getNumber()];
- for (unsigned i = 0, e = TBI.LiveIns.size(); i != e; ++i) {
- LiveInReg LI = TBI.LiveIns[i];
+ for (LiveInReg &LI : TBI.LiveIns) {
if (TargetRegisterInfo::isVirtualRegister(LI.Reg)) {
// For virtual registers, the def latency is included.
unsigned &Height = Heights[MTM.MRI->getVRegDef(LI.Reg)];
@@ -1131,11 +1128,16 @@ computeInstrHeights(const MachineBasicBlock *MBB) {
MachineTraceMetrics::Trace
MachineTraceMetrics::Ensemble::getTrace(const MachineBasicBlock *MBB) {
- // FIXME: Check cache tags, recompute as needed.
- computeTrace(MBB);
- computeInstrDepths(MBB);
- computeInstrHeights(MBB);
- return Trace(*this, BlockInfo[MBB->getNumber()]);
+ TraceBlockInfo &TBI = BlockInfo[MBB->getNumber()];
+
+ if (!TBI.hasValidDepth() || !TBI.hasValidHeight())
+ computeTrace(MBB);
+ if (!TBI.HasValidInstrDepths)
+ computeInstrDepths(MBB);
+ if (!TBI.HasValidInstrHeights)
+ computeInstrHeights(MBB);
+
+ return Trace(*this, TBI);
}
unsigned
@@ -1204,8 +1206,7 @@ unsigned MachineTraceMetrics::Trace::getResourceLength(
unsigned ResourceIdx)
->unsigned {
unsigned Cycles = 0;
- for (unsigned I = 0; I != Instrs.size(); ++I) {
- const MCSchedClassDesc *SC = Instrs[I];
+ for (const MCSchedClassDesc *SC : Instrs) {
if (!SC->isValid())
continue;
for (TargetSchedModel::ProcResIter
@@ -1223,8 +1224,8 @@ unsigned MachineTraceMetrics::Trace::getResourceLength(
for (unsigned K = 0; K != PRDepths.size(); ++K) {
unsigned PRCycles = PRDepths[K] + PRHeights[K];
- for (unsigned I = 0; I != Extrablocks.size(); ++I)
- PRCycles += TE.MTM.getProcResourceCycles(Extrablocks[I]->getNumber())[K];
+ for (const MachineBasicBlock *MBB : Extrablocks)
+ PRCycles += TE.MTM.getProcResourceCycles(MBB->getNumber())[K];
PRCycles += extraCycles(ExtraInstrs, K);
PRCycles -= extraCycles(RemoveInstrs, K);
PRMax = std::max(PRMax, PRCycles);
@@ -1235,8 +1236,8 @@ unsigned MachineTraceMetrics::Trace::getResourceLength(
// Instrs: #instructions in current trace outside current block.
unsigned Instrs = TBI.InstrDepth + TBI.InstrHeight;
// Add instruction count from the extra blocks.
- for (unsigned i = 0, e = Extrablocks.size(); i != e; ++i)
- Instrs += TE.MTM.getResources(Extrablocks[i])->InstrCount;
+ for (const MachineBasicBlock *MBB : Extrablocks)
+ Instrs += TE.MTM.getResources(MBB)->InstrCount;
Instrs += ExtraInstrs.size();
Instrs -= RemoveInstrs.size();
if (unsigned IW = TE.MTM.SchedModel.getIssueWidth())
diff --git a/contrib/llvm/lib/CodeGen/Passes.cpp b/contrib/llvm/lib/CodeGen/Passes.cpp
index 210a7a1..024d166 100644
--- a/contrib/llvm/lib/CodeGen/Passes.cpp
+++ b/contrib/llvm/lib/CodeGen/Passes.cpp
@@ -214,10 +214,10 @@ TargetPassConfig::~TargetPassConfig() {
// Out of line constructor provides default values for pass options and
// registers all common codegen passes.
TargetPassConfig::TargetPassConfig(TargetMachine *tm, PassManagerBase &pm)
- : ImmutablePass(ID), PM(&pm), StartAfter(nullptr), StopAfter(nullptr),
- Started(true), Stopped(false), AddingMachinePasses(false), TM(tm),
- Impl(nullptr), Initialized(false), DisableVerify(false),
- EnableTailMerge(true), EnableShrinkWrap(false) {
+ : ImmutablePass(ID), PM(&pm), StartBefore(nullptr), StartAfter(nullptr),
+ StopAfter(nullptr), Started(true), Stopped(false),
+ AddingMachinePasses(false), TM(tm), Impl(nullptr), Initialized(false),
+ DisableVerify(false), EnableTailMerge(true), EnableShrinkWrap(false) {
Impl = new PassConfigImpl();
@@ -288,6 +288,8 @@ void TargetPassConfig::addPass(Pass *P, bool verifyAfter, bool printAfter) {
// and shouldn't reference it.
AnalysisID PassID = P->getPassID();
+ if (StartBefore == PassID)
+ Started = true;
if (Started && !Stopped) {
std::string Banner;
// Construct banner message before PM->add() as that may delete the pass.
@@ -422,7 +424,7 @@ void TargetPassConfig::addPassesToHandleExceptions() {
// removed from the parent invoke(s). This could happen when a landing
// pad is shared by multiple invokes and is also a target of a normal
// edge from elsewhere.
- addPass(createSjLjEHPreparePass(TM));
+ addPass(createSjLjEHPreparePass());
// FALLTHROUGH
case ExceptionHandling::DwarfCFI:
case ExceptionHandling::ARM:
diff --git a/contrib/llvm/lib/CodeGen/PrologEpilogInserter.cpp b/contrib/llvm/lib/CodeGen/PrologEpilogInserter.cpp
index 76583f0..b2fdee6 100644
--- a/contrib/llvm/lib/CodeGen/PrologEpilogInserter.cpp
+++ b/contrib/llvm/lib/CodeGen/PrologEpilogInserter.cpp
@@ -82,7 +82,8 @@ private:
void calculateSets(MachineFunction &Fn);
void calculateCallsInformation(MachineFunction &Fn);
- void calculateCalleeSavedRegisters(MachineFunction &Fn);
+ void assignCalleeSavedSpillSlots(MachineFunction &Fn,
+ const BitVector &SavedRegs);
void insertCSRSpillsAndRestores(MachineFunction &Fn);
void calculateFrameObjectOffsets(MachineFunction &Fn);
void replaceFrameIndices(MachineFunction &Fn);
@@ -92,7 +93,7 @@ private:
void insertPrologEpilogCode(MachineFunction &Fn);
// Convenience for recognizing return blocks.
- bool isReturnBlock(MachineBasicBlock *MBB);
+ bool isReturnBlock(const MachineBasicBlock *MBB) const;
};
} // namespace
@@ -127,7 +128,7 @@ void PEI::getAnalysisUsage(AnalysisUsage &AU) const {
MachineFunctionPass::getAnalysisUsage(AU);
}
-bool PEI::isReturnBlock(MachineBasicBlock* MBB) {
+bool PEI::isReturnBlock(const MachineBasicBlock* MBB) const {
return (MBB && !MBB->empty() && MBB->back().isReturn());
}
@@ -143,7 +144,12 @@ void PEI::calculateSets(MachineFunction &Fn) {
if (MFI->getSavePoint()) {
SaveBlock = MFI->getSavePoint();
assert(MFI->getRestorePoint() && "Both restore and save must be set");
- RestoreBlocks.push_back(MFI->getRestorePoint());
+ MachineBasicBlock *RestoreBlock = MFI->getRestorePoint();
+ // If RestoreBlock does not have any successor and is not a return block
+ // then the end point is unreachable and we do not need to insert any
+ // epilogue.
+ if (!RestoreBlock->succ_empty() || isReturnBlock(RestoreBlock))
+ RestoreBlocks.push_back(RestoreBlock);
return;
}
@@ -178,13 +184,12 @@ bool PEI::runOnMachineFunction(MachineFunction &Fn) {
// instructions.
calculateCallsInformation(Fn);
- // Allow the target machine to make some adjustments to the function
- // e.g. UsedPhysRegs before calculateCalleeSavedRegisters.
- TFI->processFunctionBeforeCalleeSavedScan(Fn, RS);
+ // Determine which of the registers in the callee save list should be saved.
+ BitVector SavedRegs;
+ TFI->determineCalleeSaves(Fn, SavedRegs, RS);
- // Scan the function for modified callee saved registers and insert spill code
- // for any callee saved registers that are modified.
- calculateCalleeSavedRegisters(Fn);
+ // Insert spill code for any callee saved registers that are modified.
+ assignCalleeSavedSpillSlots(Fn, SavedRegs);
// Determine placement of CSR spill/restore code:
// place all spills in the entry block, all restores in return blocks.
@@ -290,39 +295,27 @@ void PEI::calculateCallsInformation(MachineFunction &Fn) {
}
}
-
-/// calculateCalleeSavedRegisters - Scan the function for modified callee saved
-/// registers.
-void PEI::calculateCalleeSavedRegisters(MachineFunction &F) {
- const TargetRegisterInfo *RegInfo = F.getSubtarget().getRegisterInfo();
- const TargetFrameLowering *TFI = F.getSubtarget().getFrameLowering();
- MachineFrameInfo *MFI = F.getFrameInfo();
-
- // Get the callee saved register list...
- const MCPhysReg *CSRegs = RegInfo->getCalleeSavedRegs(&F);
-
+void PEI::assignCalleeSavedSpillSlots(MachineFunction &F,
+ const BitVector &SavedRegs) {
// These are used to keep track the callee-save area. Initialize them.
MinCSFrameIndex = INT_MAX;
MaxCSFrameIndex = 0;
- // Early exit for targets which have no callee saved registers.
- if (!CSRegs || CSRegs[0] == 0)
+ if (SavedRegs.empty())
return;
- // In Naked functions we aren't going to save any registers.
- if (F.getFunction()->hasFnAttribute(Attribute::Naked))
- return;
+ const TargetRegisterInfo *RegInfo = F.getSubtarget().getRegisterInfo();
+ const MCPhysReg *CSRegs = RegInfo->getCalleeSavedRegs(&F);
std::vector<CalleeSavedInfo> CSI;
for (unsigned i = 0; CSRegs[i]; ++i) {
unsigned Reg = CSRegs[i];
- // Functions which call __builtin_unwind_init get all their registers saved.
- if (F.getRegInfo().isPhysRegUsed(Reg) || F.getMMI().callsUnwindInit()) {
- // If the reg is modified, save it!
+ if (SavedRegs.test(Reg))
CSI.push_back(CalleeSavedInfo(Reg));
- }
}
+ const TargetFrameLowering *TFI = F.getSubtarget().getFrameLowering();
+ MachineFrameInfo *MFI = F.getFrameInfo();
if (!TFI->assignCalleeSavedSpillSlots(F, RegInfo, CSI)) {
// If target doesn't implement this, use generic code.
@@ -1033,12 +1026,8 @@ PEI::scavengeFrameVirtualRegs(MachineFunction &Fn) {
// Replace this reference to the virtual register with the
// scratch register.
assert (ScratchReg && "Missing scratch register!");
- MachineRegisterInfo &MRI = Fn.getRegInfo();
Fn.getRegInfo().replaceRegWith(Reg, ScratchReg);
- // Make sure MRI now accounts this register as used.
- MRI.setPhysRegUsed(ScratchReg);
-
// Because this instruction was processed by the RS before this
// register was allocated, make sure that the RS now records the
// register as being used.
diff --git a/contrib/llvm/lib/CodeGen/RegAllocFast.cpp b/contrib/llvm/lib/CodeGen/RegAllocFast.cpp
index fd3d4d7..660bb4f 100644
--- a/contrib/llvm/lib/CodeGen/RegAllocFast.cpp
+++ b/contrib/llvm/lib/CodeGen/RegAllocFast.cpp
@@ -986,10 +986,6 @@ void RAFast::AllocateBasicBlock() {
}
}
- for (UsedInInstrSet::iterator
- I = UsedInInstr.begin(), E = UsedInInstr.end(); I != E; ++I)
- MRI->setRegUnitUsed(*I);
-
// Track registers defined by instruction - early clobbers and tied uses at
// this point.
UsedInInstr.clear();
@@ -1050,10 +1046,6 @@ void RAFast::AllocateBasicBlock() {
killVirtReg(VirtDead[i]);
VirtDead.clear();
- for (UsedInInstrSet::iterator
- I = UsedInInstr.begin(), E = UsedInInstr.end(); I != E; ++I)
- MRI->setRegUnitUsed(*I);
-
if (CopyDst && CopyDst == CopySrc && CopyDstSub == CopySrcSub) {
DEBUG(dbgs() << "-- coalescing: " << *MI);
Coalesced.push_back(MI);
@@ -1103,12 +1095,6 @@ bool RAFast::runOnMachineFunction(MachineFunction &Fn) {
AllocateBasicBlock();
}
- // Add the clobber lists for all the instructions we skipped earlier.
- for (const MCInstrDesc *Desc : SkippedInstrs)
- if (const uint16_t *Defs = Desc->getImplicitDefs())
- while (*Defs)
- MRI->setPhysRegUsed(*Defs++);
-
// All machine operands and other references to virtual registers have been
// replaced. Remove the virtual registers.
MRI->clearVirtRegs();
diff --git a/contrib/llvm/lib/CodeGen/RegAllocGreedy.cpp b/contrib/llvm/lib/CodeGen/RegAllocGreedy.cpp
index 26f42c9..7ebcf7f 100644
--- a/contrib/llvm/lib/CodeGen/RegAllocGreedy.cpp
+++ b/contrib/llvm/lib/CodeGen/RegAllocGreedy.cpp
@@ -400,6 +400,8 @@ private:
typedef SmallVector<HintInfo, 4> HintsInfo;
BlockFrequency getBrokenHintFreq(const HintsInfo &, unsigned);
void collectHintInfo(unsigned, HintsInfo &);
+
+ bool isUnusedCalleeSavedReg(unsigned PhysReg) const;
};
} // end anonymous namespace
@@ -816,6 +818,16 @@ void RAGreedy::evictInterference(LiveInterval &VirtReg, unsigned PhysReg,
}
}
+/// Returns true if the given \p PhysReg is a callee saved register and has not
+/// been used for allocation yet.
+bool RAGreedy::isUnusedCalleeSavedReg(unsigned PhysReg) const {
+ unsigned CSR = RegClassInfo.getLastCalleeSavedAlias(PhysReg);
+ if (CSR == 0)
+ return false;
+
+ return !Matrix->isPhysRegUsed(PhysReg);
+}
+
/// tryEvict - Try to evict all interferences for a physreg.
/// @param VirtReg Currently unassigned virtual register.
/// @param Order Physregs to try.
@@ -861,13 +873,12 @@ unsigned RAGreedy::tryEvict(LiveInterval &VirtReg,
continue;
// The first use of a callee-saved register in a function has cost 1.
// Don't start using a CSR when the CostPerUseLimit is low.
- if (CostPerUseLimit == 1)
- if (unsigned CSR = RegClassInfo.getLastCalleeSavedAlias(PhysReg))
- if (!MRI->isPhysRegUsed(CSR)) {
- DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " would clobber CSR "
- << PrintReg(CSR, TRI) << '\n');
- continue;
- }
+ if (CostPerUseLimit == 1 && isUnusedCalleeSavedReg(PhysReg)) {
+ DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " would clobber CSR "
+ << PrintReg(RegClassInfo.getLastCalleeSavedAlias(PhysReg), TRI)
+ << '\n');
+ continue;
+ }
if (!canEvictInterference(VirtReg, PhysReg, false, BestCost))
continue;
@@ -1348,9 +1359,8 @@ unsigned RAGreedy::calculateRegionSplitCost(LiveInterval &VirtReg,
unsigned BestCand = NoCand;
Order.rewind();
while (unsigned PhysReg = Order.next()) {
- if (unsigned CSR = RegClassInfo.getLastCalleeSavedAlias(PhysReg))
- if (IgnoreCSR && !MRI->isPhysRegUsed(CSR))
- continue;
+ if (IgnoreCSR && isUnusedCalleeSavedReg(PhysReg))
+ continue;
// Discard bad candidates before we run out of interference cache cursors.
// This will only affect register classes with a lot of registers (>32).
@@ -2134,7 +2144,8 @@ unsigned RAGreedy::tryLastChanceRecoloring(LiveInterval &VirtReg,
unsigned ItVirtReg = (*It)->reg;
if (VRM->hasPhys(ItVirtReg))
Matrix->unassign(**It);
- Matrix->assign(**It, VirtRegToPhysReg[ItVirtReg]);
+ unsigned ItPhysReg = VirtRegToPhysReg[ItVirtReg];
+ Matrix->assign(**It, ItPhysReg);
}
}
@@ -2441,16 +2452,11 @@ unsigned RAGreedy::selectOrSplitImpl(LiveInterval &VirtReg,
// First try assigning a free register.
AllocationOrder Order(VirtReg.reg, *VRM, RegClassInfo);
if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs)) {
- // We check other options if we are using a CSR for the first time.
- bool CSRFirstUse = false;
- if (unsigned CSR = RegClassInfo.getLastCalleeSavedAlias(PhysReg))
- if (!MRI->isPhysRegUsed(CSR))
- CSRFirstUse = true;
-
// When NewVRegs is not empty, we may have made decisions such as evicting
// a virtual register, go with the earlier decisions and use the physical
// register.
- if (CSRCost.getFrequency() && CSRFirstUse && NewVRegs.empty()) {
+ if (CSRCost.getFrequency() && isUnusedCalleeSavedReg(PhysReg) &&
+ NewVRegs.empty()) {
unsigned CSRReg = tryAssignCSRFirstTime(VirtReg, Order, PhysReg,
CostPerUseLimit, NewVRegs);
if (CSRReg || !NewVRegs.empty())
diff --git a/contrib/llvm/lib/CodeGen/RegisterPressure.cpp b/contrib/llvm/lib/CodeGen/RegisterPressure.cpp
index 450a305..c3786e5 100644
--- a/contrib/llvm/lib/CodeGen/RegisterPressure.cpp
+++ b/contrib/llvm/lib/CodeGen/RegisterPressure.cpp
@@ -77,6 +77,16 @@ void RegPressureTracker::dump() const {
P.dump(TRI);
}
+void PressureDiff::dump(const TargetRegisterInfo &TRI) const {
+ for (const PressureChange &Change : *this) {
+ if (!Change.isValid() || Change.getUnitInc() == 0)
+ continue;
+ dbgs() << " " << TRI.getRegPressureSetName(Change.getPSet())
+ << " " << Change.getUnitInc();
+ }
+ dbgs() << '\n';
+}
+
/// Increase the current pressure as impacted by these registers and bump
/// the high water mark if needed.
void RegPressureTracker::increaseRegPressure(ArrayRef<unsigned> RegUnits) {
@@ -787,6 +797,8 @@ getMaxUpwardPressureDelta(const MachineInstr *MI, PressureDiff *PDiff,
RegPressureDelta Delta2;
getUpwardPressureDelta(MI, *PDiff, Delta2, CriticalPSets, MaxPressureLimit);
if (Delta != Delta2) {
+ dbgs() << "PDiff: ";
+ PDiff->dump(*TRI);
dbgs() << "DELTA: " << *MI;
if (Delta.Excess.isValid())
dbgs() << "Excess1 " << TRI->getRegPressureSetName(Delta.Excess.getPSet())
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 6056d93..52d620b 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -443,8 +443,9 @@ namespace {
assert(LHSTy.isInteger() && "Shift amount is not an integer type!");
if (LHSTy.isVector())
return LHSTy;
- return LegalTypes ? TLI.getScalarShiftAmountTy(LHSTy)
- : TLI.getPointerTy();
+ auto &DL = DAG.getDataLayout();
+ return LegalTypes ? TLI.getScalarShiftAmountTy(DL, LHSTy)
+ : TLI.getPointerTy(DL);
}
/// This method returns true if we are running before type legalization or
@@ -456,7 +457,7 @@ namespace {
/// Convenience wrapper around TargetLowering::getSetCCResultType
EVT getSetCCResultType(EVT VT) const {
- return TLI.getSetCCResultType(*DAG.getContext(), VT);
+ return TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
}
};
}
@@ -3111,7 +3112,7 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
// For big endian targets, we need to add an offset to the pointer
// to load the correct bytes. For little endian systems, we merely
// need to read fewer bytes from the same pointer.
- if (TLI.isBigEndian()) {
+ if (DAG.getDataLayout().isBigEndian()) {
unsigned LVTStoreBytes = LoadedVT.getStoreSize();
unsigned EVTStoreBytes = ExtVT.getStoreSize();
unsigned PtrOff = LVTStoreBytes - EVTStoreBytes;
@@ -6675,7 +6676,7 @@ SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) {
// For big endian targets, we need to adjust the offset to the pointer to
// load the correct bytes.
- if (TLI.isBigEndian()) {
+ if (DAG.getDataLayout().isBigEndian()) {
unsigned LVTStoreBits = LN0->getMemoryVT().getStoreSizeInBits();
unsigned EVTStoreBits = ExtVT.getStoreSizeInBits();
ShAmt = LVTStoreBits - EVTStoreBits - ShAmt;
@@ -6873,7 +6874,7 @@ SDValue DAGCombiner::visitSIGN_EXTEND_VECTOR_INREG(SDNode *N) {
SDValue DAGCombiner::visitTRUNCATE(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
- bool isLE = TLI.isLittleEndian();
+ bool isLE = DAG.getDataLayout().isLittleEndian();
// noop truncate
if (N0.getValueType() == N->getValueType(0))
@@ -6926,7 +6927,7 @@ SDValue DAGCombiner::visitTRUNCATE(SDNode *N) {
SDValue EltNo = N0->getOperand(1);
if (isa<ConstantSDNode>(EltNo) && isTypeLegal(NVT)) {
int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
- EVT IndexTy = TLI.getVectorIdxTy();
+ EVT IndexTy = TLI.getVectorIdxTy(DAG.getDataLayout());
int Index = isLE ? (Elt*SizeRatio) : (Elt*SizeRatio + (SizeRatio-1));
SDValue V = DAG.getNode(ISD::BITCAST, SDLoc(N),
@@ -7093,8 +7094,8 @@ SDValue DAGCombiner::CombineConsecutiveLoads(SDNode *N, EVT VT) {
!LD2->isVolatile() &&
DAG.isConsecutiveLoad(LD2, LD1, LD1VT.getSizeInBits()/8, 1)) {
unsigned Align = LD1->getAlignment();
- unsigned NewAlign = TLI.getDataLayout()->
- getABITypeAlignment(VT.getTypeForEVT(*DAG.getContext()));
+ unsigned NewAlign = DAG.getDataLayout().getABITypeAlignment(
+ VT.getTypeForEVT(*DAG.getContext()));
if (NewAlign <= Align &&
(!LegalOperations || TLI.isOperationLegal(ISD::LOAD, VT)))
@@ -7150,13 +7151,13 @@ SDValue DAGCombiner::visitBITCAST(SDNode *N) {
// Do not change the width of a volatile load.
!cast<LoadSDNode>(N0)->isVolatile() &&
// Do not remove the cast if the types differ in endian layout.
- TLI.hasBigEndianPartOrdering(N0.getValueType()) ==
- TLI.hasBigEndianPartOrdering(VT) &&
+ TLI.hasBigEndianPartOrdering(N0.getValueType(), DAG.getDataLayout()) ==
+ TLI.hasBigEndianPartOrdering(VT, DAG.getDataLayout()) &&
(!LegalOperations || TLI.isOperationLegal(ISD::LOAD, VT)) &&
TLI.isLoadBitCastBeneficial(N0.getValueType(), VT)) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
- unsigned Align = TLI.getDataLayout()->
- getABITypeAlignment(VT.getTypeForEVT(*DAG.getContext()));
+ unsigned Align = DAG.getDataLayout().getABITypeAlignment(
+ VT.getTypeForEVT(*DAG.getContext()));
unsigned OrigAlign = LN0->getAlignment();
if (Align <= OrigAlign) {
@@ -7368,7 +7369,7 @@ ConstantFoldBITCASTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) {
SmallVector<SDValue, 8> Ops;
for (unsigned i = 0, e = BV->getNumOperands(); i != e;
i += NumInputsPerOutput) {
- bool isLE = TLI.isLittleEndian();
+ bool isLE = DAG.getDataLayout().isLittleEndian();
APInt NewBits = APInt(DstBitSize, 0);
bool EltIsUndef = true;
for (unsigned j = 0; j != NumInputsPerOutput; ++j) {
@@ -7415,7 +7416,7 @@ ConstantFoldBITCASTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) {
}
// For big endian targets, swap the order of the pieces of each element.
- if (TLI.isBigEndian())
+ if (DAG.getDataLayout().isBigEndian())
std::reverse(Ops.end()-NumOutputsPerInput, Ops.end());
}
@@ -8373,6 +8374,9 @@ SDValue DAGCombiner::visitFDIV(SDNode *N) {
if (TLI.combineRepeatedFPDivisors(Users.size())) {
SDValue FPOne = DAG.getConstantFP(1.0, DL, VT);
+ // FIXME: This optimization requires some level of fast-math, so the
+ // created reciprocal node should at least have the 'allowReciprocal'
+ // fast-math-flag set.
SDValue Reciprocal = DAG.getNode(ISD::FDIV, DL, VT, FPOne, N1);
// Dividend / Divisor -> Dividend * Reciprocal
@@ -8381,10 +8385,14 @@ SDValue DAGCombiner::visitFDIV(SDNode *N) {
if (Dividend != FPOne) {
SDValue NewNode = DAG.getNode(ISD::FMUL, SDLoc(U), VT, Dividend,
Reciprocal);
- DAG.ReplaceAllUsesWith(U, NewNode.getNode());
+ CombineTo(U, NewNode);
+ } else if (U != Reciprocal.getNode()) {
+ // In the absence of fast-math-flags, this user node is always the
+ // same node as Reciprocal, but with FMF they may be different nodes.
+ CombineTo(U, Reciprocal);
}
}
- return SDValue();
+ return SDValue(N, 0); // N was replaced.
}
}
@@ -8406,30 +8414,29 @@ SDValue DAGCombiner::visitFREM(SDNode *N) {
}
SDValue DAGCombiner::visitFSQRT(SDNode *N) {
- if (DAG.getTarget().Options.UnsafeFPMath &&
- !TLI.isFsqrtCheap()) {
- // Compute this as X * (1/sqrt(X)) = X * (X ** -0.5)
- if (SDValue RV = BuildRsqrtEstimate(N->getOperand(0))) {
- EVT VT = RV.getValueType();
- SDLoc DL(N);
- RV = DAG.getNode(ISD::FMUL, DL, VT, N->getOperand(0), RV);
- AddToWorklist(RV.getNode());
+ if (!DAG.getTarget().Options.UnsafeFPMath || TLI.isFsqrtCheap())
+ return SDValue();
- // Unfortunately, RV is now NaN if the input was exactly 0.
- // Select out this case and force the answer to 0.
- SDValue Zero = DAG.getConstantFP(0.0, DL, VT);
- SDValue ZeroCmp =
- DAG.getSetCC(DL, TLI.getSetCCResultType(*DAG.getContext(), VT),
- N->getOperand(0), Zero, ISD::SETEQ);
- AddToWorklist(ZeroCmp.getNode());
- AddToWorklist(RV.getNode());
+ // Compute this as X * (1/sqrt(X)) = X * (X ** -0.5)
+ SDValue RV = BuildRsqrtEstimate(N->getOperand(0));
+ if (!RV)
+ return SDValue();
+
+ EVT VT = RV.getValueType();
+ SDLoc DL(N);
+ RV = DAG.getNode(ISD::FMUL, DL, VT, N->getOperand(0), RV);
+ AddToWorklist(RV.getNode());
- RV = DAG.getNode(VT.isVector() ? ISD::VSELECT : ISD::SELECT,
- DL, VT, ZeroCmp, Zero, RV);
- return RV;
- }
- }
- return SDValue();
+ // Unfortunately, RV is now NaN if the input was exactly 0.
+ // Select out this case and force the answer to 0.
+ SDValue Zero = DAG.getConstantFP(0.0, DL, VT);
+ EVT CCVT = TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
+ SDValue ZeroCmp = DAG.getSetCC(DL, CCVT, N->getOperand(0), Zero, ISD::SETEQ);
+ AddToWorklist(ZeroCmp.getNode());
+ AddToWorklist(RV.getNode());
+
+ return DAG.getNode(VT.isVector() ? ISD::VSELECT : ISD::SELECT, DL, VT,
+ ZeroCmp, Zero, RV);
}
SDValue DAGCombiner::visitFCOPYSIGN(SDNode *N) {
@@ -9144,7 +9151,8 @@ static bool canFoldInAddressingMode(SDNode *N, SDNode *Use,
} else
return false;
- return TLI.isLegalAddressingMode(AM, VT.getTypeForEVT(*DAG.getContext()), AS);
+ return TLI.isLegalAddressingMode(DAG.getDataLayout(), AM,
+ VT.getTypeForEVT(*DAG.getContext()), AS);
}
/// Try turning a load/store into a pre-indexed load/store when the base
@@ -9869,8 +9877,7 @@ struct LoadedSlice {
/// \pre DAG != nullptr.
uint64_t getOffsetFromBase() const {
assert(DAG && "Missing context.");
- bool IsBigEndian =
- DAG->getTargetLoweringInfo().getDataLayout()->isBigEndian();
+ bool IsBigEndian = DAG->getDataLayout().isBigEndian();
assert(!(Shift & 0x7) && "Shifts not aligned on Bytes are not supported.");
uint64_t Offset = Shift / 8;
unsigned TySizeInBytes = Origin->getValueSizeInBits(0) / 8;
@@ -9953,7 +9960,7 @@ struct LoadedSlice {
// Check if it will be merged with the load.
// 1. Check the alignment constraint.
- unsigned RequiredAlignment = TLI.getDataLayout()->getABITypeAlignment(
+ unsigned RequiredAlignment = DAG->getDataLayout().getABITypeAlignment(
ResVT.getTypeForEVT(*DAG->getContext()));
if (RequiredAlignment > getAlignment())
@@ -10321,7 +10328,7 @@ ShrinkLoadReplaceStoreWithStore(const std::pair<unsigned, unsigned> &MaskInfo,
unsigned StOffset;
unsigned NewAlign = St->getAlignment();
- if (DAG.getTargetLoweringInfo().isLittleEndian())
+ if (DAG.getDataLayout().isLittleEndian())
StOffset = ByteShift;
else
StOffset = IVal.getValueType().getStoreSize() - ByteShift - NumBytes;
@@ -10434,12 +10441,12 @@ SDValue DAGCombiner::ReduceLoadOpStoreWidth(SDNode *N) {
uint64_t PtrOff = ShAmt / 8;
// For big endian targets, we need to adjust the offset to the pointer to
// load the correct bytes.
- if (TLI.isBigEndian())
+ if (DAG.getDataLayout().isBigEndian())
PtrOff = (BitWidth + 7 - NewBW) / 8 - PtrOff;
unsigned NewAlign = MinAlign(LD->getAlignment(), PtrOff);
Type *NewVTTy = NewVT.getTypeForEVT(*DAG.getContext());
- if (NewAlign < TLI.getDataLayout()->getABITypeAlignment(NewVTTy))
+ if (NewAlign < DAG.getDataLayout().getABITypeAlignment(NewVTTy))
return SDValue();
SDValue NewPtr = DAG.getNode(ISD::ADD, SDLoc(LD),
@@ -10503,7 +10510,7 @@ SDValue DAGCombiner::TransformFPLoadStorePair(SDNode *N) {
unsigned LDAlign = LD->getAlignment();
unsigned STAlign = ST->getAlignment();
Type *IntVTTy = IntVT.getTypeForEVT(*DAG.getContext());
- unsigned ABIAlign = TLI.getDataLayout()->getABITypeAlignment(IntVTTy);
+ unsigned ABIAlign = DAG.getDataLayout().getABITypeAlignment(IntVTTy);
if (LDAlign < ABIAlign || STAlign < ABIAlign)
return SDValue();
@@ -10685,7 +10692,7 @@ bool DAGCombiner::MergeStoresOfConstantsOrVecElts(
// Construct a single integer constant which is made of the smaller
// constant inputs.
- bool IsLE = TLI.isLittleEndian();
+ bool IsLE = DAG.getDataLayout().isLittleEndian();
for (unsigned i = 0; i < NumElem ; ++i) {
unsigned Idx = IsLE ? (NumElem - 1 - i) : i;
StoreSDNode *St = cast<StoreSDNode>(StoreNodes[Idx].MemNode);
@@ -10743,7 +10750,7 @@ static bool allowableAlignment(const SelectionDAG &DAG,
return true;
Type *Ty = EVTTy.getTypeForEVT(*DAG.getContext());
- unsigned ABIAlignment = TLI.getDataLayout()->getPrefTypeAlignment(Ty);
+ unsigned ABIAlignment = DAG.getDataLayout().getPrefTypeAlignment(Ty);
return (Align >= ABIAlignment);
}
@@ -11205,8 +11212,8 @@ SDValue DAGCombiner::visitSTORE(SDNode *N) {
ST->isUnindexed()) {
unsigned OrigAlign = ST->getAlignment();
EVT SVT = Value.getOperand(0).getValueType();
- unsigned Align = TLI.getDataLayout()->
- getABITypeAlignment(SVT.getTypeForEVT(*DAG.getContext()));
+ unsigned Align = DAG.getDataLayout().getABITypeAlignment(
+ SVT.getTypeForEVT(*DAG.getContext()));
if (Align <= OrigAlign &&
((!LegalOperations && !ST->isVolatile()) ||
TLI.isOperationLegalOrCustom(ISD::STORE, SVT)))
@@ -11265,7 +11272,8 @@ SDValue DAGCombiner::visitSTORE(SDNode *N) {
uint64_t Val = CFP->getValueAPF().bitcastToAPInt().getZExtValue();
SDValue Lo = DAG.getConstant(Val & 0xFFFFFFFF, SDLoc(CFP), MVT::i32);
SDValue Hi = DAG.getConstant(Val >> 32, SDLoc(CFP), MVT::i32);
- if (TLI.isBigEndian()) std::swap(Lo, Hi);
+ if (DAG.getDataLayout().isBigEndian())
+ std::swap(Lo, Hi);
unsigned Alignment = ST->getAlignment();
bool isVolatile = ST->isVolatile();
@@ -11514,7 +11522,7 @@ SDValue DAGCombiner::ReplaceExtractVectorEltOfLoadWithNarrowedLoad(
EVT ResultVT = EVE->getValueType(0);
EVT VecEltVT = InVecVT.getVectorElementType();
unsigned Align = OriginalLoad->getAlignment();
- unsigned NewAlign = TLI.getDataLayout()->getABITypeAlignment(
+ unsigned NewAlign = DAG.getDataLayout().getABITypeAlignment(
VecEltVT.getTypeForEVT(*DAG.getContext()));
if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, VecEltVT))
@@ -11648,7 +11656,7 @@ SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
// scalar_to_vector here as well.
if (!LegalOperations) {
- EVT IndexTy = TLI.getVectorIdxTy();
+ EVT IndexTy = TLI.getVectorIdxTy(DAG.getDataLayout());
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(N), NVT, SVInVec,
DAG.getConstant(OrigElt, SDLoc(SVOp), IndexTy));
}
@@ -11825,7 +11833,7 @@ SDValue DAGCombiner::reduceBuildVecExtToExtBuildVec(SDNode *N) {
if (!ValidTypes)
return SDValue();
- bool isLE = TLI.isLittleEndian();
+ bool isLE = DAG.getDataLayout().isLittleEndian();
unsigned ElemRatio = OutScalarTy.getSizeInBits()/SourceType.getSizeInBits();
assert(ElemRatio > 1 && "Invalid element size ratio");
SDValue Filler = AllAnyExt ? DAG.getUNDEF(SourceType):
@@ -12079,10 +12087,13 @@ SDValue DAGCombiner::visitBUILD_VECTOR(SDNode *N) {
// Try to replace VecIn1 with two extract_subvectors
// No need to update the masks, they should still be correct.
- VecIn2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, VecIn1,
- DAG.getConstant(VT.getVectorNumElements(), dl, TLI.getVectorIdxTy()));
- VecIn1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, VecIn1,
- DAG.getConstant(0, dl, TLI.getVectorIdxTy()));
+ VecIn2 = DAG.getNode(
+ ISD::EXTRACT_SUBVECTOR, dl, VT, VecIn1,
+ DAG.getConstant(VT.getVectorNumElements(), dl,
+ TLI.getVectorIdxTy(DAG.getDataLayout())));
+ VecIn1 = DAG.getNode(
+ ISD::EXTRACT_SUBVECTOR, dl, VT, VecIn1,
+ DAG.getConstant(0, dl, TLI.getVectorIdxTy(DAG.getDataLayout())));
} else
return SDValue();
}
@@ -13354,12 +13365,13 @@ SDValue DAGCombiner::SimplifySelectCC(SDLoc DL, SDValue N0, SDValue N1,
const_cast<ConstantFP*>(TV->getConstantFPValue())
};
Type *FPTy = Elts[0]->getType();
- const DataLayout &TD = *TLI.getDataLayout();
+ const DataLayout &TD = DAG.getDataLayout();
// Create a ConstantArray of the two constants.
Constant *CA = ConstantArray::get(ArrayType::get(FPTy, 2), Elts);
- SDValue CPIdx = DAG.getConstantPool(CA, TLI.getPointerTy(),
- TD.getPrefTypeAlignment(FPTy));
+ SDValue CPIdx =
+ DAG.getConstantPool(CA, TLI.getPointerTy(DAG.getDataLayout()),
+ TD.getPrefTypeAlignment(FPTy));
unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment();
// Get the offsets to the 0 and 1 element of the array so that we can
@@ -13832,6 +13844,15 @@ bool DAGCombiner::isAlias(LSBaseSDNode *Op0, LSBaseSDNode *Op1) const {
// If they are both volatile then they cannot be reordered.
if (Op0->isVolatile() && Op1->isVolatile()) return true;
+ // If one operation reads from invariant memory, and the other may store, they
+ // cannot alias. These should really be checking the equivalent of mayWrite,
+ // but it only matters for memory nodes other than load /store.
+ if (Op0->isInvariant() && Op1->writeMem())
+ return false;
+
+ if (Op1->isInvariant() && Op0->writeMem())
+ return false;
+
// Gather base node and offset information.
SDValue Base1, Base2;
int64_t Offset1, Offset2;
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
index 5452b17..2b9ba2c 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
@@ -166,7 +166,7 @@ bool FastISel::hasTrivialKill(const Value *V) {
}
unsigned FastISel::getRegForValue(const Value *V) {
- EVT RealVT = TLI.getValueType(V->getType(), /*AllowUnknown=*/true);
+ EVT RealVT = TLI.getValueType(DL, V->getType(), /*AllowUnknown=*/true);
// Don't handle non-simple values in FastISel.
if (!RealVT.isSimple())
return 0;
@@ -228,7 +228,7 @@ unsigned FastISel::materializeConstant(const Value *V, MVT VT) {
if (!Reg) {
// Try to emit the constant by using an integer constant with a cast.
const APFloat &Flt = CF->getValueAPF();
- EVT IntVT = TLI.getPointerTy();
+ EVT IntVT = TLI.getPointerTy(DL);
uint64_t x[2];
uint32_t IntBitWidth = IntVT.getSizeInBits();
@@ -321,7 +321,7 @@ std::pair<unsigned, bool> FastISel::getRegForGEPIndex(const Value *Idx) {
bool IdxNIsKill = hasTrivialKill(Idx);
// If the index is smaller or larger than intptr_t, truncate or extend it.
- MVT PtrVT = TLI.getPointerTy();
+ MVT PtrVT = TLI.getPointerTy(DL);
EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
if (IdxVT.bitsLT(PtrVT)) {
IdxN = fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND, IdxN,
@@ -493,7 +493,7 @@ bool FastISel::selectGetElementPtr(const User *I) {
// FIXME: What's a good SWAG number for MaxOffs?
uint64_t MaxOffs = 2048;
Type *Ty = I->getOperand(0)->getType();
- MVT VT = TLI.getPointerTy();
+ MVT VT = TLI.getPointerTy(DL);
for (GetElementPtrInst::const_op_iterator OI = I->op_begin() + 1,
E = I->op_end();
OI != E; ++OI) {
@@ -908,10 +908,10 @@ bool FastISel::lowerCallTo(CallLoweringInfo &CLI) {
// Handle the incoming return values from the call.
CLI.clearIns();
SmallVector<EVT, 4> RetTys;
- ComputeValueVTs(TLI, CLI.RetTy, RetTys);
+ ComputeValueVTs(TLI, DL, CLI.RetTy, RetTys);
SmallVector<ISD::OutputArg, 4> Outs;
- GetReturnInfo(CLI.RetTy, getReturnAttrs(CLI), Outs, TLI);
+ GetReturnInfo(CLI.RetTy, getReturnAttrs(CLI), Outs, TLI, DL);
bool CanLowerReturn = TLI.CanLowerReturn(
CLI.CallConv, *FuncInfo.MF, CLI.IsVarArg, Outs, CLI.RetTy->getContext());
@@ -976,7 +976,7 @@ bool FastISel::lowerCallTo(CallLoweringInfo &CLI) {
// not there, but there are cases it cannot get right.
unsigned FrameAlign = Arg.Alignment;
if (!FrameAlign)
- FrameAlign = TLI.getByValTypeAlignment(ElementTy);
+ FrameAlign = TLI.getByValTypeAlignment(ElementTy, DL);
Flags.setByValSize(FrameSize);
Flags.setByValAlign(FrameAlign);
}
@@ -1245,8 +1245,8 @@ bool FastISel::selectIntrinsicCall(const IntrinsicInst *II) {
}
bool FastISel::selectCast(const User *I, unsigned Opcode) {
- EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
- EVT DstVT = TLI.getValueType(I->getType());
+ EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
+ EVT DstVT = TLI.getValueType(DL, I->getType());
if (SrcVT == MVT::Other || !SrcVT.isSimple() || DstVT == MVT::Other ||
!DstVT.isSimple())
@@ -1288,8 +1288,8 @@ bool FastISel::selectBitCast(const User *I) {
}
// Bitcasts of other values become reg-reg copies or BITCAST operators.
- EVT SrcEVT = TLI.getValueType(I->getOperand(0)->getType());
- EVT DstEVT = TLI.getValueType(I->getType());
+ EVT SrcEVT = TLI.getValueType(DL, I->getOperand(0)->getType());
+ EVT DstEVT = TLI.getValueType(DL, I->getType());
if (SrcEVT == MVT::Other || DstEVT == MVT::Other ||
!TLI.isTypeLegal(SrcEVT) || !TLI.isTypeLegal(DstEVT))
// Unhandled type. Halt "fast" selection and bail.
@@ -1413,7 +1413,7 @@ bool FastISel::selectFNeg(const User *I) {
bool OpRegIsKill = hasTrivialKill(I);
// If the target has ISD::FNEG, use it.
- EVT VT = TLI.getValueType(I->getType());
+ EVT VT = TLI.getValueType(DL, I->getType());
unsigned ResultReg = fastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(), ISD::FNEG,
OpReg, OpRegIsKill);
if (ResultReg) {
@@ -1456,7 +1456,7 @@ bool FastISel::selectExtractValue(const User *U) {
// Make sure we only try to handle extracts with a legal result. But also
// allow i1 because it's easy.
- EVT RealVT = TLI.getValueType(EVI->getType(), /*AllowUnknown=*/true);
+ EVT RealVT = TLI.getValueType(DL, EVI->getType(), /*AllowUnknown=*/true);
if (!RealVT.isSimple())
return false;
MVT VT = RealVT.getSimpleVT();
@@ -1480,7 +1480,7 @@ bool FastISel::selectExtractValue(const User *U) {
unsigned VTIndex = ComputeLinearIndex(AggTy, EVI->getIndices());
SmallVector<EVT, 4> AggValueVTs;
- ComputeValueVTs(TLI, AggTy, AggValueVTs);
+ ComputeValueVTs(TLI, DL, AggTy, AggValueVTs);
for (unsigned i = 0; i < VTIndex; i++)
ResultReg += TLI.getNumRegisters(FuncInfo.Fn->getContext(), AggValueVTs[i]);
@@ -1582,8 +1582,8 @@ bool FastISel::selectOperator(const User *I, unsigned Opcode) {
case Instruction::IntToPtr: // Deliberate fall-through.
case Instruction::PtrToInt: {
- EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
- EVT DstVT = TLI.getValueType(I->getType());
+ EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
+ EVT DstVT = TLI.getValueType(DL, I->getType());
if (DstVT.bitsGT(SrcVT))
return selectCast(I, ISD::ZERO_EXTEND);
if (DstVT.bitsLT(SrcVT))
@@ -1612,7 +1612,7 @@ FastISel::FastISel(FunctionLoweringInfo &FuncInfo,
bool SkipTargetIndependentISel)
: FuncInfo(FuncInfo), MF(FuncInfo.MF), MRI(FuncInfo.MF->getRegInfo()),
MFI(*FuncInfo.MF->getFrameInfo()), MCP(*FuncInfo.MF->getConstantPool()),
- TM(FuncInfo.MF->getTarget()), DL(*TM.getDataLayout()),
+ TM(FuncInfo.MF->getTarget()), DL(MF->getDataLayout()),
TII(*MF->getSubtarget().getInstrInfo()),
TLI(*MF->getSubtarget().getTargetLowering()),
TRI(*MF->getSubtarget().getRegisterInfo()), LibInfo(LibInfo),
@@ -2037,7 +2037,7 @@ bool FastISel::handlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
// own moves. Second, this check is necessary because FastISel doesn't
// use CreateRegs to create registers, so it always creates
// exactly one register for each non-void instruction.
- EVT VT = TLI.getValueType(PN->getType(), /*AllowUnknown=*/true);
+ EVT VT = TLI.getValueType(DL, PN->getType(), /*AllowUnknown=*/true);
if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
// Handle integer promotions, though, because they're common and easy.
if (!(VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)) {
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
index ecaa2c9..cc306cb 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
@@ -90,7 +90,8 @@ void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf,
// Check whether the function can return without sret-demotion.
SmallVector<ISD::OutputArg, 4> Outs;
- GetReturnInfo(Fn->getReturnType(), Fn->getAttributes(), Outs, *TLI);
+ GetReturnInfo(Fn->getReturnType(), Fn->getAttributes(), Outs, *TLI,
+ mf.getDataLayout());
CanLowerReturn = TLI->CanLowerReturn(Fn->getCallingConv(), *MF,
Fn->isVarArg(), Outs, Fn->getContext());
@@ -106,9 +107,9 @@ void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf,
if (AI->isStaticAlloca()) {
const ConstantInt *CUI = cast<ConstantInt>(AI->getArraySize());
Type *Ty = AI->getAllocatedType();
- uint64_t TySize = TLI->getDataLayout()->getTypeAllocSize(Ty);
+ uint64_t TySize = MF->getDataLayout().getTypeAllocSize(Ty);
unsigned Align =
- std::max((unsigned)TLI->getDataLayout()->getPrefTypeAlignment(Ty),
+ std::max((unsigned)MF->getDataLayout().getPrefTypeAlignment(Ty),
AI->getAlignment());
TySize *= CUI->getZExtValue(); // Get total allocated size.
@@ -118,10 +119,10 @@ void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf,
MF->getFrameInfo()->CreateStackObject(TySize, Align, false, AI);
} else {
- unsigned Align = std::max(
- (unsigned)TLI->getDataLayout()->getPrefTypeAlignment(
- AI->getAllocatedType()),
- AI->getAlignment());
+ unsigned Align =
+ std::max((unsigned)MF->getDataLayout().getPrefTypeAlignment(
+ AI->getAllocatedType()),
+ AI->getAlignment());
unsigned StackAlign =
MF->getSubtarget().getFrameLowering()->getStackAlignment();
if (Align <= StackAlign)
@@ -138,7 +139,7 @@ void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf,
unsigned SP = TLI->getStackPointerRegisterToSaveRestore();
const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
std::vector<TargetLowering::AsmOperandInfo> Ops =
- TLI->ParseConstraints(TRI, CS);
+ TLI->ParseConstraints(Fn->getParent()->getDataLayout(), TRI, CS);
for (size_t I = 0, E = Ops.size(); I != E; ++I) {
TargetLowering::AsmOperandInfo &Op = Ops[I];
if (Op.Type == InlineAsm::isClobber) {
@@ -148,7 +149,7 @@ void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf,
TLI->getRegForInlineAsmConstraint(TRI, Op.ConstraintCode,
Op.ConstraintVT);
if (PhysReg.first == SP)
- MF->getFrameInfo()->setHasInlineAsmWithSPAdjust(true);
+ MF->getFrameInfo()->setHasOpaqueSPAdjustment(true);
}
}
}
@@ -236,7 +237,7 @@ void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf,
assert(PHIReg && "PHI node does not have an assigned virtual register!");
SmallVector<EVT, 4> ValueVTs;
- ComputeValueVTs(*TLI, PN->getType(), ValueVTs);
+ ComputeValueVTs(*TLI, MF->getDataLayout(), PN->getType(), ValueVTs);
for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) {
EVT VT = ValueVTs[vti];
unsigned NumRegisters = TLI->getNumRegisters(Fn->getContext(), VT);
@@ -366,7 +367,7 @@ unsigned FunctionLoweringInfo::CreateRegs(Type *Ty) {
const TargetLowering *TLI = MF->getSubtarget().getTargetLowering();
SmallVector<EVT, 4> ValueVTs;
- ComputeValueVTs(*TLI, Ty, ValueVTs);
+ ComputeValueVTs(*TLI, MF->getDataLayout(), Ty, ValueVTs);
unsigned FirstReg = 0;
for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
@@ -413,7 +414,7 @@ void FunctionLoweringInfo::ComputePHILiveOutRegInfo(const PHINode *PN) {
return;
SmallVector<EVT, 1> ValueVTs;
- ComputeValueVTs(*TLI, Ty, ValueVTs);
+ ComputeValueVTs(*TLI, MF->getDataLayout(), Ty, ValueVTs);
assert(ValueVTs.size() == 1 &&
"PHIs with non-vector integer types should have a single VT.");
EVT IntVT = ValueVTs[0];
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
index 42595cb..5ec1030 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
@@ -406,10 +406,10 @@ void InstrEmitter::AddOperand(MachineInstrBuilder &MIB,
Type *Type = CP->getType();
// MachineConstantPool wants an explicit alignment.
if (Align == 0) {
- Align = MF->getTarget().getDataLayout()->getPrefTypeAlignment(Type);
+ Align = MF->getDataLayout().getPrefTypeAlignment(Type);
if (Align == 0) {
// Alignment of vector types. FIXME!
- Align = MF->getTarget().getDataLayout()->getTypeAllocSize(Type);
+ Align = MF->getDataLayout().getTypeAllocSize(Type);
}
}
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
index c0d7871..21ab072 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
@@ -65,7 +65,7 @@ class SelectionDAGLegalize {
SmallSetVector<SDNode *, 16> *UpdatedNodes;
EVT getSetCCResultType(EVT VT) const {
- return TLI.getSetCCResultType(*DAG.getContext(), VT);
+ return TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
}
// Libcall insertion helpers.
@@ -269,7 +269,8 @@ SelectionDAGLegalize::ExpandConstantFP(ConstantFPSDNode *CFP, bool UseCP) {
}
}
- SDValue CPIdx = DAG.getConstantPool(LLVMC, TLI.getPointerTy());
+ SDValue CPIdx =
+ DAG.getConstantPool(LLVMC, TLI.getPointerTy(DAG.getDataLayout()));
unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment();
if (Extend) {
SDValue Result =
@@ -331,7 +332,8 @@ static void ExpandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG,
SDValue Store = DAG.getTruncStore(Chain, dl,
Val, StackPtr, MachinePointerInfo(),
StoredVT, false, false, 0);
- SDValue Increment = DAG.getConstant(RegBytes, dl, TLI.getPointerTy(AS));
+ SDValue Increment = DAG.getConstant(
+ RegBytes, dl, TLI.getPointerTy(DAG.getDataLayout(), AS));
SmallVector<SDValue, 8> Stores;
unsigned Offset = 0;
@@ -385,24 +387,27 @@ static void ExpandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG,
int IncrementSize = NumBits / 8;
// Divide the stored value in two parts.
- SDValue ShiftAmount = DAG.getConstant(NumBits, dl,
- TLI.getShiftAmountTy(Val.getValueType()));
+ SDValue ShiftAmount =
+ DAG.getConstant(NumBits, dl, TLI.getShiftAmountTy(Val.getValueType(),
+ DAG.getDataLayout()));
SDValue Lo = Val;
SDValue Hi = DAG.getNode(ISD::SRL, dl, VT, Val, ShiftAmount);
// Store the two parts
SDValue Store1, Store2;
- Store1 = DAG.getTruncStore(Chain, dl, TLI.isLittleEndian()?Lo:Hi, Ptr,
- ST->getPointerInfo(), NewStoredVT,
+ Store1 = DAG.getTruncStore(Chain, dl,
+ DAG.getDataLayout().isLittleEndian() ? Lo : Hi,
+ Ptr, ST->getPointerInfo(), NewStoredVT,
ST->isVolatile(), ST->isNonTemporal(), Alignment);
Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
- DAG.getConstant(IncrementSize, dl, TLI.getPointerTy(AS)));
+ DAG.getConstant(IncrementSize, dl,
+ TLI.getPointerTy(DAG.getDataLayout(), AS)));
Alignment = MinAlign(Alignment, IncrementSize);
- Store2 = DAG.getTruncStore(Chain, dl, TLI.isLittleEndian()?Hi:Lo, Ptr,
- ST->getPointerInfo().getWithOffset(IncrementSize),
- NewStoredVT, ST->isVolatile(), ST->isNonTemporal(),
- Alignment, ST->getAAInfo());
+ Store2 = DAG.getTruncStore(
+ Chain, dl, DAG.getDataLayout().isLittleEndian() ? Hi : Lo, Ptr,
+ ST->getPointerInfo().getWithOffset(IncrementSize), NewStoredVT,
+ ST->isVolatile(), ST->isNonTemporal(), Alignment, ST->getAAInfo());
SDValue Result =
DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Store1, Store2);
@@ -448,7 +453,8 @@ ExpandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG,
// Make sure the stack slot is also aligned for the register type.
SDValue StackBase = DAG.CreateStackTemporary(LoadedVT, RegVT);
- SDValue Increment = DAG.getConstant(RegBytes, dl, TLI.getPointerTy());
+ SDValue Increment =
+ DAG.getConstant(RegBytes, dl, TLI.getPointerTy(DAG.getDataLayout()));
SmallVector<SDValue, 8> Stores;
SDValue StackPtr = StackBase;
unsigned Offset = 0;
@@ -522,7 +528,7 @@ ExpandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG,
// Load the value in two parts
SDValue Lo, Hi;
- if (TLI.isLittleEndian()) {
+ if (DAG.getDataLayout().isLittleEndian()) {
Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, LD->getPointerInfo(),
NewLoadedVT, LD->isVolatile(),
LD->isNonTemporal(), LD->isInvariant(), Alignment,
@@ -549,8 +555,9 @@ ExpandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG,
}
// aggregate the two parts
- SDValue ShiftAmount = DAG.getConstant(NumBits, dl,
- TLI.getShiftAmountTy(Hi.getValueType()));
+ SDValue ShiftAmount =
+ DAG.getConstant(NumBits, dl, TLI.getShiftAmountTy(Hi.getValueType(),
+ DAG.getDataLayout()));
SDValue Result = DAG.getNode(ISD::SHL, dl, VT, Hi, ShiftAmount);
Result = DAG.getNode(ISD::OR, dl, VT, Result, Lo);
@@ -581,7 +588,7 @@ PerformInsertVectorEltInMemory(SDValue Vec, SDValue Val, SDValue Idx,
EVT VT = Tmp1.getValueType();
EVT EltVT = VT.getVectorElementType();
EVT IdxVT = Tmp3.getValueType();
- EVT PtrVT = TLI.getPointerTy();
+ EVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
SDValue StackPtr = DAG.CreateStackTemporary(VT);
int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
@@ -677,7 +684,8 @@ SDValue SelectionDAGLegalize::OptimizeFloatStore(StoreSDNode* ST) {
const APInt &IntVal = CFP->getValueAPF().bitcastToAPInt();
SDValue Lo = DAG.getConstant(IntVal.trunc(32), dl, MVT::i32);
SDValue Hi = DAG.getConstant(IntVal.lshr(32).trunc(32), dl, MVT::i32);
- if (TLI.isBigEndian()) std::swap(Lo, Hi);
+ if (DAG.getDataLayout().isBigEndian())
+ std::swap(Lo, Hi);
Lo = DAG.getStore(Chain, dl, Lo, Ptr, ST->getPointerInfo(), isVolatile,
isNonTemporal, Alignment, AAInfo);
@@ -724,7 +732,7 @@ void SelectionDAGLegalize::LegalizeStoreOps(SDNode *Node) {
unsigned Align = ST->getAlignment();
if (!TLI.allowsMisalignedMemoryAccesses(ST->getMemoryVT(), AS, Align)) {
Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext());
- unsigned ABIAlignment= TLI.getDataLayout()->getABITypeAlignment(Ty);
+ unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(Ty);
if (Align < ABIAlignment)
ExpandUnalignedStore(cast<StoreSDNode>(Node), DAG, TLI, this);
}
@@ -756,6 +764,7 @@ void SelectionDAGLegalize::LegalizeStoreOps(SDNode *Node) {
EVT StVT = ST->getMemoryVT();
unsigned StWidth = StVT.getSizeInBits();
+ auto &DL = DAG.getDataLayout();
if (StWidth != StVT.getStoreSizeInBits()) {
// Promote to a byte-sized store with upper bits zero if not
@@ -782,7 +791,7 @@ void SelectionDAGLegalize::LegalizeStoreOps(SDNode *Node) {
SDValue Lo, Hi;
unsigned IncrementSize;
- if (TLI.isLittleEndian()) {
+ if (DL.isLittleEndian()) {
// TRUNCSTORE:i24 X -> TRUNCSTORE:i16 X, TRUNCSTORE@+2:i8 (srl X, 16)
// Store the bottom RoundWidth bits.
Lo = DAG.getTruncStore(Chain, dl, Value, Ptr, ST->getPointerInfo(),
@@ -795,9 +804,10 @@ void SelectionDAGLegalize::LegalizeStoreOps(SDNode *Node) {
Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
DAG.getConstant(IncrementSize, dl,
Ptr.getValueType()));
- Hi = DAG.getNode(ISD::SRL, dl, Value.getValueType(), Value,
- DAG.getConstant(RoundWidth, dl,
- TLI.getShiftAmountTy(Value.getValueType())));
+ Hi = DAG.getNode(
+ ISD::SRL, dl, Value.getValueType(), Value,
+ DAG.getConstant(RoundWidth, dl,
+ TLI.getShiftAmountTy(Value.getValueType(), DL)));
Hi = DAG.getTruncStore(Chain, dl, Hi, Ptr,
ST->getPointerInfo().getWithOffset(IncrementSize),
ExtraVT, isVolatile, isNonTemporal,
@@ -806,9 +816,10 @@ void SelectionDAGLegalize::LegalizeStoreOps(SDNode *Node) {
// Big endian - avoid unaligned stores.
// TRUNCSTORE:i24 X -> TRUNCSTORE:i16 (srl X, 8), TRUNCSTORE@+2:i8 X
// Store the top RoundWidth bits.
- Hi = DAG.getNode(ISD::SRL, dl, Value.getValueType(), Value,
- DAG.getConstant(ExtraWidth, dl,
- TLI.getShiftAmountTy(Value.getValueType())));
+ Hi = DAG.getNode(
+ ISD::SRL, dl, Value.getValueType(), Value,
+ DAG.getConstant(ExtraWidth, dl,
+ TLI.getShiftAmountTy(Value.getValueType(), DL)));
Hi = DAG.getTruncStore(Chain, dl, Hi, Ptr, ST->getPointerInfo(),
RoundVT, isVolatile, isNonTemporal, Alignment,
AAInfo);
@@ -838,7 +849,7 @@ void SelectionDAGLegalize::LegalizeStoreOps(SDNode *Node) {
// expand it.
if (!TLI.allowsMisalignedMemoryAccesses(ST->getMemoryVT(), AS, Align)) {
Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext());
- unsigned ABIAlignment= TLI.getDataLayout()->getABITypeAlignment(Ty);
+ unsigned ABIAlignment = DL.getABITypeAlignment(Ty);
if (Align < ABIAlignment)
ExpandUnalignedStore(cast<StoreSDNode>(Node), DAG, TLI, this);
}
@@ -890,8 +901,7 @@ void SelectionDAGLegalize::LegalizeLoadOps(SDNode *Node) {
// expand it.
if (!TLI.allowsMisalignedMemoryAccesses(LD->getMemoryVT(), AS, Align)) {
Type *Ty = LD->getMemoryVT().getTypeForEVT(*DAG.getContext());
- unsigned ABIAlignment =
- TLI.getDataLayout()->getABITypeAlignment(Ty);
+ unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(Ty);
if (Align < ABIAlignment){
ExpandUnalignedLoad(cast<LoadSDNode>(Node), DAG, TLI, RVal, RChain);
}
@@ -995,8 +1005,9 @@ void SelectionDAGLegalize::LegalizeLoadOps(SDNode *Node) {
EVT ExtraVT = EVT::getIntegerVT(*DAG.getContext(), ExtraWidth);
SDValue Lo, Hi, Ch;
unsigned IncrementSize;
+ auto &DL = DAG.getDataLayout();
- if (TLI.isLittleEndian()) {
+ if (DL.isLittleEndian()) {
// EXTLOAD:i24 -> ZEXTLOAD:i16 | (shl EXTLOAD@+2:i8, 16)
// Load the bottom RoundWidth bits.
Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, Node->getValueType(0),
@@ -1020,9 +1031,10 @@ void SelectionDAGLegalize::LegalizeLoadOps(SDNode *Node) {
Hi.getValue(1));
// Move the top bits to the right place.
- Hi = DAG.getNode(ISD::SHL, dl, Hi.getValueType(), Hi,
- DAG.getConstant(RoundWidth, dl,
- TLI.getShiftAmountTy(Hi.getValueType())));
+ Hi = DAG.getNode(
+ ISD::SHL, dl, Hi.getValueType(), Hi,
+ DAG.getConstant(RoundWidth, dl,
+ TLI.getShiftAmountTy(Hi.getValueType(), DL)));
// Join the hi and lo parts.
Value = DAG.getNode(ISD::OR, dl, Node->getValueType(0), Lo, Hi);
@@ -1051,9 +1063,10 @@ void SelectionDAGLegalize::LegalizeLoadOps(SDNode *Node) {
Hi.getValue(1));
// Move the top bits to the right place.
- Hi = DAG.getNode(ISD::SHL, dl, Hi.getValueType(), Hi,
- DAG.getConstant(ExtraWidth, dl,
- TLI.getShiftAmountTy(Hi.getValueType())));
+ Hi = DAG.getNode(
+ ISD::SHL, dl, Hi.getValueType(), Hi,
+ DAG.getConstant(ExtraWidth, dl,
+ TLI.getShiftAmountTy(Hi.getValueType(), DL)));
// Join the hi and lo parts.
Value = DAG.getNode(ISD::OR, dl, Node->getValueType(0), Lo, Hi);
@@ -1086,7 +1099,7 @@ void SelectionDAGLegalize::LegalizeLoadOps(SDNode *Node) {
unsigned Align = LD->getAlignment();
if (!TLI.allowsMisalignedMemoryAccesses(MemVT, AS, Align)) {
Type *Ty = LD->getMemoryVT().getTypeForEVT(*DAG.getContext());
- unsigned ABIAlignment = TLI.getDataLayout()->getABITypeAlignment(Ty);
+ unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(Ty);
if (Align < ABIAlignment){
ExpandUnalignedLoad(cast<LoadSDNode>(Node), DAG, TLI, Value, Chain);
}
@@ -1439,7 +1452,7 @@ SDValue SelectionDAGLegalize::ExpandExtractFromVectorThroughStack(SDValue Op) {
Idx = DAG.getNode(ISD::MUL, dl, Idx.getValueType(), Idx,
DAG.getConstant(EltSize, SDLoc(Vec), Idx.getValueType()));
- Idx = DAG.getZExtOrTrunc(Idx, dl, TLI.getPointerTy());
+ Idx = DAG.getZExtOrTrunc(Idx, dl, TLI.getPointerTy(DAG.getDataLayout()));
StackPtr = DAG.getNode(ISD::ADD, dl, Idx.getValueType(), Idx, StackPtr);
SDValue NewLoad;
@@ -1491,7 +1504,7 @@ SDValue SelectionDAGLegalize::ExpandInsertToVectorThroughStack(SDValue Op) {
Idx = DAG.getNode(ISD::MUL, dl, Idx.getValueType(), Idx,
DAG.getConstant(EltSize, SDLoc(Vec), Idx.getValueType()));
- Idx = DAG.getZExtOrTrunc(Idx, dl, TLI.getPointerTy());
+ Idx = DAG.getZExtOrTrunc(Idx, dl, TLI.getPointerTy(DAG.getDataLayout()));
SDValue SubStackPtr = DAG.getNode(ISD::ADD, dl, Idx.getValueType(), Idx,
StackPtr);
@@ -1569,15 +1582,16 @@ SDValue SelectionDAGLegalize::ExpandFCOPYSIGN(SDNode* Node) {
// Convert to an integer with the same sign bit.
SignBit = DAG.getNode(ISD::BITCAST, dl, IVT, Tmp2);
} else {
+ auto &DL = DAG.getDataLayout();
// Store the float to memory, then load the sign part out as an integer.
- MVT LoadTy = TLI.getPointerTy();
+ MVT LoadTy = TLI.getPointerTy(DL);
// First create a temporary that is aligned for both the load and store.
SDValue StackPtr = DAG.CreateStackTemporary(FloatVT, LoadTy);
// Then store the float to it.
SDValue Ch =
DAG.getStore(DAG.getEntryNode(), dl, Tmp2, StackPtr, MachinePointerInfo(),
false, false, 0);
- if (TLI.isBigEndian()) {
+ if (DL.isBigEndian()) {
assert(FloatVT.isByteSized() && "Unsupported floating point type!");
// Load out a legal integer with the same sign bit as the float.
SignBit = DAG.getLoad(LoadTy, dl, Ch, StackPtr, MachinePointerInfo(),
@@ -1599,9 +1613,10 @@ SDValue SelectionDAGLegalize::ExpandFCOPYSIGN(SDNode* Node) {
(FloatVT.getSizeInBits() - 8 * ByteOffset);
assert(BitShift < LoadTy.getSizeInBits() && "Pointer advanced wrong?");
if (BitShift)
- SignBit = DAG.getNode(ISD::SHL, dl, LoadTy, SignBit,
- DAG.getConstant(BitShift, dl,
- TLI.getShiftAmountTy(SignBit.getValueType())));
+ SignBit = DAG.getNode(
+ ISD::SHL, dl, LoadTy, SignBit,
+ DAG.getConstant(BitShift, dl,
+ TLI.getShiftAmountTy(SignBit.getValueType(), DL)));
}
}
// Now get the sign bit proper, by seeing whether the value is negative.
@@ -1777,9 +1792,8 @@ SDValue SelectionDAGLegalize::EmitStackConvert(SDValue SrcOp,
EVT DestVT,
SDLoc dl) {
// Create the stack frame object.
- unsigned SrcAlign =
- TLI.getDataLayout()->getPrefTypeAlignment(SrcOp.getValueType().
- getTypeForEVT(*DAG.getContext()));
+ unsigned SrcAlign = DAG.getDataLayout().getPrefTypeAlignment(
+ SrcOp.getValueType().getTypeForEVT(*DAG.getContext()));
SDValue FIPtr = DAG.CreateStackTemporary(SlotVT, SrcAlign);
FrameIndexSDNode *StackPtrFI = cast<FrameIndexSDNode>(FIPtr);
@@ -1790,7 +1804,7 @@ SDValue SelectionDAGLegalize::EmitStackConvert(SDValue SrcOp,
unsigned SlotSize = SlotVT.getSizeInBits();
unsigned DestSize = DestVT.getSizeInBits();
Type *DestType = DestVT.getTypeForEVT(*DAG.getContext());
- unsigned DestAlign = TLI.getDataLayout()->getPrefTypeAlignment(DestType);
+ unsigned DestAlign = DAG.getDataLayout().getPrefTypeAlignment(DestType);
// Emit a store to the stack slot. Use a truncstore if the input value is
// later than DestVT.
@@ -1994,7 +2008,8 @@ SDValue SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode *Node) {
}
}
Constant *CP = ConstantVector::get(CV);
- SDValue CPIdx = DAG.getConstantPool(CP, TLI.getPointerTy());
+ SDValue CPIdx =
+ DAG.getConstantPool(CP, TLI.getPointerTy(DAG.getDataLayout()));
unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment();
return DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
MachinePointerInfo::getConstantPool(),
@@ -2058,7 +2073,7 @@ SDValue SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, SDNode *Node,
Args.push_back(Entry);
}
SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC),
- TLI.getPointerTy());
+ TLI.getPointerTy(DAG.getDataLayout()));
Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext());
@@ -2106,7 +2121,7 @@ SDValue SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, EVT RetVT,
Args.push_back(Entry);
}
SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC),
- TLI.getPointerTy());
+ TLI.getPointerTy(DAG.getDataLayout()));
Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
@@ -2140,7 +2155,7 @@ SelectionDAGLegalize::ExpandChainLibCall(RTLIB::Libcall LC,
Args.push_back(Entry);
}
SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC),
- TLI.getPointerTy());
+ TLI.getPointerTy(DAG.getDataLayout()));
Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext());
@@ -2277,7 +2292,7 @@ SelectionDAGLegalize::ExpandDivRemLibCall(SDNode *Node,
Args.push_back(Entry);
SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC),
- TLI.getPointerTy());
+ TLI.getPointerTy(DAG.getDataLayout()));
SDLoc dl(Node);
TargetLowering::CallLoweringInfo CLI(DAG);
@@ -2389,7 +2404,7 @@ SelectionDAGLegalize::ExpandSinCosLibCall(SDNode *Node,
Args.push_back(Entry);
SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC),
- TLI.getPointerTy());
+ TLI.getPointerTy(DAG.getDataLayout()));
SDLoc dl(Node);
TargetLowering::CallLoweringInfo CLI(DAG);
@@ -2426,7 +2441,7 @@ SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned,
SDValue Hi = StackSlot;
SDValue Lo = DAG.getNode(ISD::ADD, dl, StackSlot.getValueType(),
StackSlot, WordOff);
- if (TLI.isLittleEndian())
+ if (DAG.getDataLayout().isLittleEndian())
std::swap(Hi, Lo);
// if signed map to unsigned space
@@ -2509,8 +2524,8 @@ SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned,
if (!isSigned) {
SDValue Fast = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, Op0);
- SDValue ShiftConst =
- DAG.getConstant(1, dl, TLI.getShiftAmountTy(Op0.getValueType()));
+ SDValue ShiftConst = DAG.getConstant(
+ 1, dl, TLI.getShiftAmountTy(Op0.getValueType(), DAG.getDataLayout()));
SDValue Shr = DAG.getNode(ISD::SRL, dl, MVT::i64, Op0, ShiftConst);
SDValue AndConst = DAG.getConstant(1, dl, MVT::i64);
SDValue And = DAG.getNode(ISD::AND, dl, MVT::i64, Op0, AndConst);
@@ -2545,7 +2560,7 @@ SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned,
MVT::i64),
ISD::SETUGE);
SDValue Sel2 = DAG.getSelect(dl, MVT::i64, Ge, Sel, Op0);
- EVT SHVT = TLI.getShiftAmountTy(Sel2.getValueType());
+ EVT SHVT = TLI.getShiftAmountTy(Sel2.getValueType(), DAG.getDataLayout());
SDValue Sh = DAG.getNode(ISD::SRL, dl, MVT::i64, Sel2,
DAG.getConstant(32, dl, SHVT));
@@ -2584,11 +2599,13 @@ SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned,
case MVT::i32: FF = 0x4F800000ULL; break; // 2^32 (as a float)
case MVT::i64: FF = 0x5F800000ULL; break; // 2^64 (as a float)
}
- if (TLI.isLittleEndian()) FF <<= 32;
+ if (DAG.getDataLayout().isLittleEndian())
+ FF <<= 32;
Constant *FudgeFactor = ConstantInt::get(
Type::getInt64Ty(*DAG.getContext()), FF);
- SDValue CPIdx = DAG.getConstantPool(FudgeFactor, TLI.getPointerTy());
+ SDValue CPIdx =
+ DAG.getConstantPool(FudgeFactor, TLI.getPointerTy(DAG.getDataLayout()));
unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment();
CPIdx = DAG.getNode(ISD::ADD, dl, CPIdx.getValueType(), CPIdx, CstOffset);
Alignment = std::min(Alignment, 4u);
@@ -2699,7 +2716,7 @@ SDValue SelectionDAGLegalize::PromoteLegalFP_TO_INT(SDValue LegalOp,
/// Open code the operations for BSWAP of the specified operation.
SDValue SelectionDAGLegalize::ExpandBSWAP(SDValue Op, SDLoc dl) {
EVT VT = Op.getValueType();
- EVT SHVT = TLI.getShiftAmountTy(VT);
+ EVT SHVT = TLI.getShiftAmountTy(VT, DAG.getDataLayout());
SDValue Tmp1, Tmp2, Tmp3, Tmp4, Tmp5, Tmp6, Tmp7, Tmp8;
switch (VT.getSimpleVT().SimpleTy) {
default: llvm_unreachable("Unhandled Expand type in BSWAP!");
@@ -2756,7 +2773,7 @@ SDValue SelectionDAGLegalize::ExpandBitCount(unsigned Opc, SDValue Op,
default: llvm_unreachable("Cannot expand this yet!");
case ISD::CTPOP: {
EVT VT = Op.getValueType();
- EVT ShVT = TLI.getShiftAmountTy(VT);
+ EVT ShVT = TLI.getShiftAmountTy(VT, DAG.getDataLayout());
unsigned Len = VT.getSizeInBits();
assert(VT.isInteger() && Len <= 128 && Len % 8 == 0 &&
@@ -2814,7 +2831,7 @@ SDValue SelectionDAGLegalize::ExpandBitCount(unsigned Opc, SDValue Op,
//
// Ref: "Hacker's Delight" by Henry Warren
EVT VT = Op.getValueType();
- EVT ShVT = TLI.getShiftAmountTy(VT);
+ EVT ShVT = TLI.getShiftAmountTy(VT, DAG.getDataLayout());
unsigned len = VT.getSizeInBits();
for (unsigned i = 0; (1U << i) <= (len / 2); ++i) {
SDValue Tmp3 = DAG.getConstant(1ULL << i, dl, ShVT);
@@ -2903,10 +2920,12 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
TargetLowering::ArgListTy Args;
TargetLowering::CallLoweringInfo CLI(DAG);
- CLI.setDebugLoc(dl).setChain(Node->getOperand(0))
- .setCallee(CallingConv::C, Type::getVoidTy(*DAG.getContext()),
- DAG.getExternalSymbol("__sync_synchronize",
- TLI.getPointerTy()), std::move(Args), 0);
+ CLI.setDebugLoc(dl)
+ .setChain(Node->getOperand(0))
+ .setCallee(CallingConv::C, Type::getVoidTy(*DAG.getContext()),
+ DAG.getExternalSymbol("__sync_synchronize",
+ TLI.getPointerTy(DAG.getDataLayout())),
+ std::move(Args), 0);
std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
@@ -3002,10 +3021,12 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
// If this operation is not supported, lower it to 'abort()' call
TargetLowering::ArgListTy Args;
TargetLowering::CallLoweringInfo CLI(DAG);
- CLI.setDebugLoc(dl).setChain(Node->getOperand(0))
- .setCallee(CallingConv::C, Type::getVoidTy(*DAG.getContext()),
- DAG.getExternalSymbol("abort", TLI.getPointerTy()),
- std::move(Args), 0);
+ CLI.setDebugLoc(dl)
+ .setChain(Node->getOperand(0))
+ .setCallee(CallingConv::C, Type::getVoidTy(*DAG.getContext()),
+ DAG.getExternalSymbol("abort",
+ TLI.getPointerTy(DAG.getDataLayout())),
+ std::move(Args), 0);
std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
Results.push_back(CallResult.second);
@@ -3028,7 +3049,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
// SAR. However, it is doubtful that any exist.
EVT ExtraVT = cast<VTSDNode>(Node->getOperand(1))->getVT();
EVT VT = Node->getValueType(0);
- EVT ShiftAmountTy = TLI.getShiftAmountTy(VT);
+ EVT ShiftAmountTy = TLI.getShiftAmountTy(VT, DAG.getDataLayout());
if (VT.isVector())
ShiftAmountTy = VT;
unsigned BitsDiff = VT.getScalarType().getSizeInBits() -
@@ -3092,9 +3113,9 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
Tmp2 = Node->getOperand(1);
unsigned Align = Node->getConstantOperandVal(3);
- SDValue VAListLoad = DAG.getLoad(TLI.getPointerTy(), dl, Tmp1, Tmp2,
- MachinePointerInfo(V),
- false, false, false, 0);
+ SDValue VAListLoad =
+ DAG.getLoad(TLI.getPointerTy(DAG.getDataLayout()), dl, Tmp1, Tmp2,
+ MachinePointerInfo(V), false, false, false, 0);
SDValue VAList = VAListLoad;
if (Align > TLI.getMinStackArgumentAlignment()) {
@@ -3111,10 +3132,9 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
// Increment the pointer, VAList, to the next vaarg
Tmp3 = DAG.getNode(ISD::ADD, dl, VAList.getValueType(), VAList,
- DAG.getConstant(TLI.getDataLayout()->
- getTypeAllocSize(VT.getTypeForEVT(*DAG.getContext())),
- dl,
- VAList.getValueType()));
+ DAG.getConstant(DAG.getDataLayout().getTypeAllocSize(
+ VT.getTypeForEVT(*DAG.getContext())),
+ dl, VAList.getValueType()));
// Store the incremented VAList to the legalized pointer
Tmp3 = DAG.getStore(VAListLoad.getValue(1), dl, Tmp3, Tmp2,
MachinePointerInfo(V), false, false, 0);
@@ -3129,9 +3149,9 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
// output, returning the chain.
const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue();
const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue();
- Tmp1 = DAG.getLoad(TLI.getPointerTy(), dl, Node->getOperand(0),
- Node->getOperand(2), MachinePointerInfo(VS),
- false, false, false, 0);
+ Tmp1 = DAG.getLoad(TLI.getPointerTy(DAG.getDataLayout()), dl,
+ Node->getOperand(0), Node->getOperand(2),
+ MachinePointerInfo(VS), false, false, false, 0);
Tmp1 = DAG.getStore(Tmp1.getValue(1), dl, Tmp1, Node->getOperand(1),
MachinePointerInfo(VD), false, false, 0);
Results.push_back(Tmp1);
@@ -3226,14 +3246,14 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
}
unsigned Idx = Mask[i];
if (Idx < NumElems)
- Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT,
- Op0,
- DAG.getConstant(Idx, dl, TLI.getVectorIdxTy())));
+ Ops.push_back(DAG.getNode(
+ ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Op0,
+ DAG.getConstant(Idx, dl, TLI.getVectorIdxTy(DAG.getDataLayout()))));
else
- Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT,
- Op1,
- DAG.getConstant(Idx - NumElems, dl,
- TLI.getVectorIdxTy())));
+ Ops.push_back(DAG.getNode(
+ ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Op1,
+ DAG.getConstant(Idx - NumElems, dl,
+ TLI.getVectorIdxTy(DAG.getDataLayout()))));
}
Tmp1 = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
@@ -3247,8 +3267,10 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
if (cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue()) {
// 1 -> Hi
Tmp1 = DAG.getNode(ISD::SRL, dl, OpTy, Node->getOperand(0),
- DAG.getConstant(OpTy.getSizeInBits()/2, dl,
- TLI.getShiftAmountTy(Node->getOperand(0).getValueType())));
+ DAG.getConstant(OpTy.getSizeInBits() / 2, dl,
+ TLI.getShiftAmountTy(
+ Node->getOperand(0).getValueType(),
+ DAG.getDataLayout())));
Tmp1 = DAG.getNode(ISD::TRUNCATE, dl, Node->getValueType(0), Tmp1);
} else {
// 0 -> Lo
@@ -3646,8 +3668,9 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
TLI.expandMUL(Node, Lo, Hi, HalfType, DAG)) {
Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Lo);
Hi = DAG.getNode(ISD::ANY_EXTEND, dl, VT, Hi);
- SDValue Shift = DAG.getConstant(HalfType.getSizeInBits(), dl,
- TLI.getShiftAmountTy(HalfType));
+ SDValue Shift =
+ DAG.getConstant(HalfType.getSizeInBits(), dl,
+ TLI.getShiftAmountTy(HalfType, DAG.getDataLayout()));
Hi = DAG.getNode(ISD::SHL, dl, VT, Hi, Shift);
Results.push_back(DAG.getNode(ISD::OR, dl, VT, Lo, Hi));
break;
@@ -3759,12 +3782,14 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
// The high part is obtained by SRA'ing all but one of the bits of low
// part.
unsigned LoSize = VT.getSizeInBits();
- SDValue HiLHS = DAG.getNode(ISD::SRA, dl, VT, RHS,
- DAG.getConstant(LoSize - 1, dl,
- TLI.getPointerTy()));
- SDValue HiRHS = DAG.getNode(ISD::SRA, dl, VT, LHS,
- DAG.getConstant(LoSize - 1, dl,
- TLI.getPointerTy()));
+ SDValue HiLHS =
+ DAG.getNode(ISD::SRA, dl, VT, RHS,
+ DAG.getConstant(LoSize - 1, dl,
+ TLI.getPointerTy(DAG.getDataLayout())));
+ SDValue HiRHS =
+ DAG.getNode(ISD::SRA, dl, VT, LHS,
+ DAG.getConstant(LoSize - 1, dl,
+ TLI.getPointerTy(DAG.getDataLayout())));
// Here we're passing the 2 arguments explicitly as 4 arguments that are
// pre-lowered to the correct types. This all depends upon WideVT not
@@ -3785,8 +3810,9 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
}
if (isSigned) {
- Tmp1 = DAG.getConstant(VT.getSizeInBits() - 1, dl,
- TLI.getShiftAmountTy(BottomHalf.getValueType()));
+ Tmp1 = DAG.getConstant(
+ VT.getSizeInBits() - 1, dl,
+ TLI.getShiftAmountTy(BottomHalf.getValueType(), DAG.getDataLayout()));
Tmp1 = DAG.getNode(ISD::SRA, dl, VT, BottomHalf, Tmp1);
TopHalf = DAG.getSetCC(dl, getSetCCResultType(VT), TopHalf, Tmp1,
ISD::SETNE);
@@ -3802,9 +3828,10 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
EVT PairTy = Node->getValueType(0);
Tmp1 = DAG.getNode(ISD::ZERO_EXTEND, dl, PairTy, Node->getOperand(0));
Tmp2 = DAG.getNode(ISD::ANY_EXTEND, dl, PairTy, Node->getOperand(1));
- Tmp2 = DAG.getNode(ISD::SHL, dl, PairTy, Tmp2,
- DAG.getConstant(PairTy.getSizeInBits()/2, dl,
- TLI.getShiftAmountTy(PairTy)));
+ Tmp2 = DAG.getNode(
+ ISD::SHL, dl, PairTy, Tmp2,
+ DAG.getConstant(PairTy.getSizeInBits() / 2, dl,
+ TLI.getShiftAmountTy(PairTy, DAG.getDataLayout())));
Results.push_back(DAG.getNode(ISD::OR, dl, PairTy, Tmp1, Tmp2));
break;
}
@@ -3828,9 +3855,9 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
SDValue Table = Node->getOperand(1);
SDValue Index = Node->getOperand(2);
- EVT PTy = TLI.getPointerTy();
+ EVT PTy = TLI.getPointerTy(DAG.getDataLayout());
- const DataLayout &TD = *TLI.getDataLayout();
+ const DataLayout &TD = DAG.getDataLayout();
unsigned EntrySize =
DAG.getMachineFunction().getJumpTableInfo()->getEntrySize(TD);
@@ -3936,7 +3963,8 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
assert(!TLI.isOperationExpand(ISD::SELECT, VT) &&
"Cannot expand ISD::SELECT_CC when ISD::SELECT also needs to be "
"expanded.");
- EVT CCVT = TLI.getSetCCResultType(*DAG.getContext(), CmpVT);
+ EVT CCVT =
+ TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), CmpVT);
SDValue Cond = DAG.getNode(ISD::SETCC, dl, CCVT, Tmp1, Tmp2, CC);
Results.push_back(DAG.getSelect(dl, VT, Cond, Tmp3, Tmp4));
break;
@@ -4036,14 +4064,12 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
SmallVector<SDValue, 8> Scalars;
for (unsigned Idx = 0; Idx < NumElem; Idx++) {
- SDValue Ex = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
- VT.getScalarType(),
- Node->getOperand(0),
- DAG.getConstant(Idx, dl, TLI.getVectorIdxTy()));
- SDValue Sh = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
- VT.getScalarType(),
- Node->getOperand(1),
- DAG.getConstant(Idx, dl, TLI.getVectorIdxTy()));
+ SDValue Ex = DAG.getNode(
+ ISD::EXTRACT_VECTOR_ELT, dl, VT.getScalarType(), Node->getOperand(0),
+ DAG.getConstant(Idx, dl, TLI.getVectorIdxTy(DAG.getDataLayout())));
+ SDValue Sh = DAG.getNode(
+ ISD::EXTRACT_VECTOR_ELT, dl, VT.getScalarType(), Node->getOperand(1),
+ DAG.getConstant(Idx, dl, TLI.getVectorIdxTy(DAG.getDataLayout())));
Scalars.push_back(DAG.getNode(Node->getOpcode(), dl,
VT.getScalarType(), Ex, Sh));
}
@@ -4114,9 +4140,10 @@ void SelectionDAGLegalize::PromoteNode(SDNode *Node) {
unsigned DiffBits = NVT.getSizeInBits() - OVT.getSizeInBits();
Tmp1 = DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, Node->getOperand(0));
Tmp1 = DAG.getNode(ISD::BSWAP, dl, NVT, Tmp1);
- Tmp1 = DAG.getNode(ISD::SRL, dl, NVT, Tmp1,
- DAG.getConstant(DiffBits, dl,
- TLI.getShiftAmountTy(NVT)));
+ Tmp1 = DAG.getNode(
+ ISD::SRL, dl, NVT, Tmp1,
+ DAG.getConstant(DiffBits, dl,
+ TLI.getShiftAmountTy(NVT, DAG.getDataLayout())));
Results.push_back(Tmp1);
break;
}
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
index 37fdf44..3c50a41 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
@@ -218,29 +218,35 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_FCOPYSIGN(SDNode *N) {
unsigned RSize = RVT.getSizeInBits();
// First get the sign bit of second operand.
- SDValue SignBit = DAG.getNode(ISD::SHL, dl, RVT, DAG.getConstant(1, dl, RVT),
- DAG.getConstant(RSize - 1, dl,
- TLI.getShiftAmountTy(RVT)));
+ SDValue SignBit = DAG.getNode(
+ ISD::SHL, dl, RVT, DAG.getConstant(1, dl, RVT),
+ DAG.getConstant(RSize - 1, dl,
+ TLI.getShiftAmountTy(RVT, DAG.getDataLayout())));
SignBit = DAG.getNode(ISD::AND, dl, RVT, RHS, SignBit);
// Shift right or sign-extend it if the two operands have different types.
int SizeDiff = RVT.getSizeInBits() - LVT.getSizeInBits();
if (SizeDiff > 0) {
- SignBit = DAG.getNode(ISD::SRL, dl, RVT, SignBit,
- DAG.getConstant(SizeDiff, dl,
- TLI.getShiftAmountTy(SignBit.getValueType())));
+ SignBit =
+ DAG.getNode(ISD::SRL, dl, RVT, SignBit,
+ DAG.getConstant(SizeDiff, dl,
+ TLI.getShiftAmountTy(SignBit.getValueType(),
+ DAG.getDataLayout())));
SignBit = DAG.getNode(ISD::TRUNCATE, dl, LVT, SignBit);
} else if (SizeDiff < 0) {
SignBit = DAG.getNode(ISD::ANY_EXTEND, dl, LVT, SignBit);
- SignBit = DAG.getNode(ISD::SHL, dl, LVT, SignBit,
- DAG.getConstant(-SizeDiff, dl,
- TLI.getShiftAmountTy(SignBit.getValueType())));
+ SignBit =
+ DAG.getNode(ISD::SHL, dl, LVT, SignBit,
+ DAG.getConstant(-SizeDiff, dl,
+ TLI.getShiftAmountTy(SignBit.getValueType(),
+ DAG.getDataLayout())));
}
// Clear the sign bit of the first operand.
- SDValue Mask = DAG.getNode(ISD::SHL, dl, LVT, DAG.getConstant(1, dl, LVT),
- DAG.getConstant(LSize - 1, dl,
- TLI.getShiftAmountTy(LVT)));
+ SDValue Mask = DAG.getNode(
+ ISD::SHL, dl, LVT, DAG.getConstant(1, dl, LVT),
+ DAG.getConstant(LSize - 1, dl,
+ TLI.getShiftAmountTy(LVT, DAG.getDataLayout())));
Mask = DAG.getNode(ISD::SUB, dl, LVT, Mask, DAG.getConstant(1, dl, LVT));
LHS = DAG.getNode(ISD::AND, dl, LVT, LHS, Mask);
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index f41202c..9f060a09 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -282,7 +282,7 @@ SDValue DAGTypeLegalizer::PromoteIntRes_BITCAST(SDNode *N) {
Lo = BitConvertToInteger(Lo);
Hi = BitConvertToInteger(Hi);
- if (TLI.isBigEndian())
+ if (DAG.getDataLayout().isBigEndian())
std::swap(Lo, Hi);
InOp = DAG.getNode(ISD::ANY_EXTEND, dl,
@@ -310,8 +310,10 @@ SDValue DAGTypeLegalizer::PromoteIntRes_BSWAP(SDNode *N) {
SDLoc dl(N);
unsigned DiffBits = NVT.getScalarSizeInBits() - OVT.getScalarSizeInBits();
- return DAG.getNode(ISD::SRL, dl, NVT, DAG.getNode(ISD::BSWAP, dl, NVT, Op),
- DAG.getConstant(DiffBits, dl, TLI.getShiftAmountTy(NVT)));
+ return DAG.getNode(
+ ISD::SRL, dl, NVT, DAG.getNode(ISD::BSWAP, dl, NVT, Op),
+ DAG.getConstant(DiffBits, dl,
+ TLI.getShiftAmountTy(NVT, DAG.getDataLayout())));
}
SDValue DAGTypeLegalizer::PromoteIntRes_BUILD_PAIR(SDNode *N) {
@@ -799,7 +801,7 @@ SDValue DAGTypeLegalizer::PromoteIntRes_VAARG(SDNode *N) {
}
// Handle endianness of the load.
- if (TLI.isBigEndian())
+ if (DAG.getDataLayout().isBigEndian())
std::reverse(Parts.begin(), Parts.end());
// Assemble the parts in the promoted type.
@@ -809,8 +811,8 @@ SDValue DAGTypeLegalizer::PromoteIntRes_VAARG(SDNode *N) {
SDValue Part = DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, Parts[i]);
// Shift it to the right position and "or" it in.
Part = DAG.getNode(ISD::SHL, dl, NVT, Part,
- DAG.getConstant(i*RegVT.getSizeInBits(), dl,
- TLI.getPointerTy()));
+ DAG.getConstant(i * RegVT.getSizeInBits(), dl,
+ TLI.getPointerTy(DAG.getDataLayout())));
Res = DAG.getNode(ISD::OR, dl, NVT, Res, Part);
}
@@ -1004,7 +1006,7 @@ SDValue DAGTypeLegalizer::PromoteIntOp_BUILD_PAIR(SDNode *N) {
Hi = DAG.getNode(ISD::SHL, dl, N->getValueType(0), Hi,
DAG.getConstant(OVT.getSizeInBits(), dl,
- TLI.getPointerTy()));
+ TLI.getPointerTy(DAG.getDataLayout())));
return DAG.getNode(ISD::OR, dl, N->getValueType(0), Lo, Hi);
}
@@ -1063,7 +1065,7 @@ SDValue DAGTypeLegalizer::PromoteIntOp_INSERT_VECTOR_ELT(SDNode *N,
// Promote the index.
SDValue Idx = DAG.getZExtOrTrunc(N->getOperand(2), SDLoc(N),
- TLI.getVectorIdxTy());
+ TLI.getVectorIdxTy(DAG.getDataLayout()));
return SDValue(DAG.UpdateNodeOperands(N, N->getOperand(0),
N->getOperand(1), Idx), 0);
}
@@ -1356,9 +1358,9 @@ std::pair <SDValue, SDValue> DAGTypeLegalizer::ExpandAtomic(SDNode *Node) {
return ExpandChainLibCall(LC, Node, false);
}
-/// ExpandShiftByConstant - N is a shift by a value that needs to be expanded,
+/// N is a shift by a value that needs to be expanded,
/// and the shift amount is a constant 'Amt'. Expand the operation.
-void DAGTypeLegalizer::ExpandShiftByConstant(SDNode *N, unsigned Amt,
+void DAGTypeLegalizer::ExpandShiftByConstant(SDNode *N, const APInt &Amt,
SDValue &Lo, SDValue &Hi) {
SDLoc DL(N);
// Expand the incoming operand to be shifted, so that we have its parts
@@ -1379,9 +1381,9 @@ void DAGTypeLegalizer::ExpandShiftByConstant(SDNode *N, unsigned Amt,
EVT ShTy = N->getOperand(1).getValueType();
if (N->getOpcode() == ISD::SHL) {
- if (Amt > VTBits) {
+ if (Amt.ugt(VTBits)) {
Lo = Hi = DAG.getConstant(0, DL, NVT);
- } else if (Amt > NVTBits) {
+ } else if (Amt.ugt(NVTBits)) {
Lo = DAG.getConstant(0, DL, NVT);
Hi = DAG.getNode(ISD::SHL, DL,
NVT, InL, DAG.getConstant(Amt - NVTBits, DL, ShTy));
@@ -1403,16 +1405,15 @@ void DAGTypeLegalizer::ExpandShiftByConstant(SDNode *N, unsigned Amt,
DAG.getNode(ISD::SHL, DL, NVT, InH,
DAG.getConstant(Amt, DL, ShTy)),
DAG.getNode(ISD::SRL, DL, NVT, InL,
- DAG.getConstant(NVTBits - Amt, DL, ShTy)));
+ DAG.getConstant(-Amt + NVTBits, DL, ShTy)));
}
return;
}
if (N->getOpcode() == ISD::SRL) {
- if (Amt > VTBits) {
- Lo = DAG.getConstant(0, DL, NVT);
- Hi = DAG.getConstant(0, DL, NVT);
- } else if (Amt > NVTBits) {
+ if (Amt.ugt(VTBits)) {
+ Lo = Hi = DAG.getConstant(0, DL, NVT);
+ } else if (Amt.ugt(NVTBits)) {
Lo = DAG.getNode(ISD::SRL, DL,
NVT, InH, DAG.getConstant(Amt - NVTBits, DL, ShTy));
Hi = DAG.getConstant(0, DL, NVT);
@@ -1424,19 +1425,19 @@ void DAGTypeLegalizer::ExpandShiftByConstant(SDNode *N, unsigned Amt,
DAG.getNode(ISD::SRL, DL, NVT, InL,
DAG.getConstant(Amt, DL, ShTy)),
DAG.getNode(ISD::SHL, DL, NVT, InH,
- DAG.getConstant(NVTBits - Amt, DL, ShTy)));
+ DAG.getConstant(-Amt + NVTBits, DL, ShTy)));
Hi = DAG.getNode(ISD::SRL, DL, NVT, InH, DAG.getConstant(Amt, DL, ShTy));
}
return;
}
assert(N->getOpcode() == ISD::SRA && "Unknown shift!");
- if (Amt > VTBits) {
+ if (Amt.ugt(VTBits)) {
Hi = Lo = DAG.getNode(ISD::SRA, DL, NVT, InH,
DAG.getConstant(NVTBits - 1, DL, ShTy));
- } else if (Amt > NVTBits) {
+ } else if (Amt.ugt(NVTBits)) {
Lo = DAG.getNode(ISD::SRA, DL, NVT, InH,
- DAG.getConstant(Amt-NVTBits, DL, ShTy));
+ DAG.getConstant(Amt - NVTBits, DL, ShTy));
Hi = DAG.getNode(ISD::SRA, DL, NVT, InH,
DAG.getConstant(NVTBits - 1, DL, ShTy));
} else if (Amt == NVTBits) {
@@ -1448,7 +1449,7 @@ void DAGTypeLegalizer::ExpandShiftByConstant(SDNode *N, unsigned Amt,
DAG.getNode(ISD::SRL, DL, NVT, InL,
DAG.getConstant(Amt, DL, ShTy)),
DAG.getNode(ISD::SHL, DL, NVT, InH,
- DAG.getConstant(NVTBits - Amt, DL, ShTy)));
+ DAG.getConstant(-Amt + NVTBits, DL, ShTy)));
Hi = DAG.getNode(ISD::SRA, DL, NVT, InH, DAG.getConstant(Amt, DL, ShTy));
}
}
@@ -1808,7 +1809,8 @@ void DAGTypeLegalizer::ExpandIntRes_AssertSext(SDNode *N,
Lo = DAG.getNode(ISD::AssertSext, dl, NVT, Lo, DAG.getValueType(EVT));
// The high part replicates the sign bit of Lo, make it explicit.
Hi = DAG.getNode(ISD::SRA, dl, NVT, Lo,
- DAG.getConstant(NVTBits - 1, dl, TLI.getPointerTy()));
+ DAG.getConstant(NVTBits - 1, dl,
+ TLI.getPointerTy(DAG.getDataLayout())));
}
}
@@ -1975,7 +1977,8 @@ void DAGTypeLegalizer::ExpandIntRes_LOAD(LoadSDNode *N,
// lo part.
unsigned LoSize = Lo.getValueType().getSizeInBits();
Hi = DAG.getNode(ISD::SRA, dl, NVT, Lo,
- DAG.getConstant(LoSize - 1, dl, TLI.getPointerTy()));
+ DAG.getConstant(LoSize - 1, dl,
+ TLI.getPointerTy(DAG.getDataLayout())));
} else if (ExtType == ISD::ZEXTLOAD) {
// The high part is just a zero.
Hi = DAG.getConstant(0, dl, NVT);
@@ -1984,7 +1987,7 @@ void DAGTypeLegalizer::ExpandIntRes_LOAD(LoadSDNode *N,
// The high part is undefined.
Hi = DAG.getUNDEF(NVT);
}
- } else if (TLI.isLittleEndian()) {
+ } else if (DAG.getDataLayout().isLittleEndian()) {
// Little-endian - low bits are at low addresses.
Lo = DAG.getLoad(NVT, dl, Ch, Ptr, N->getPointerInfo(),
isVolatile, isNonTemporal, isInvariant, Alignment,
@@ -2039,15 +2042,16 @@ void DAGTypeLegalizer::ExpandIntRes_LOAD(LoadSDNode *N,
if (ExcessBits < NVT.getSizeInBits()) {
// Transfer low bits from the bottom of Hi to the top of Lo.
- Lo = DAG.getNode(ISD::OR, dl, NVT, Lo,
- DAG.getNode(ISD::SHL, dl, NVT, Hi,
- DAG.getConstant(ExcessBits, dl,
- TLI.getPointerTy())));
+ Lo = DAG.getNode(
+ ISD::OR, dl, NVT, Lo,
+ DAG.getNode(ISD::SHL, dl, NVT, Hi,
+ DAG.getConstant(ExcessBits, dl,
+ TLI.getPointerTy(DAG.getDataLayout()))));
// Move high bits to the right position in Hi.
- Hi = DAG.getNode(ExtType == ISD::SEXTLOAD ? ISD::SRA : ISD::SRL, dl,
- NVT, Hi,
+ Hi = DAG.getNode(ExtType == ISD::SEXTLOAD ? ISD::SRA : ISD::SRL, dl, NVT,
+ Hi,
DAG.getConstant(NVT.getSizeInBits() - ExcessBits, dl,
- TLI.getPointerTy()));
+ TLI.getPointerTy(DAG.getDataLayout())));
}
}
@@ -2173,7 +2177,7 @@ void DAGTypeLegalizer::ExpandIntRes_Shift(SDNode *N,
// If we can emit an efficient shift operation, do so now. Check to see if
// the RHS is a constant.
if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1)))
- return ExpandShiftByConstant(N, CN->getZExtValue(), Lo, Hi);
+ return ExpandShiftByConstant(N, CN->getAPIntValue(), Lo, Hi);
// If we can determine that the high bit of the shift is zero or one, even if
// the low bits are variable, emit this shift in an optimized form.
@@ -2206,7 +2210,7 @@ void DAGTypeLegalizer::ExpandIntRes_Shift(SDNode *N,
// have an illegal type. Fix that first by casting the operand, otherwise
// the new SHL_PARTS operation would need further legalization.
SDValue ShiftOp = N->getOperand(1);
- EVT ShiftTy = TLI.getShiftAmountTy(VT);
+ EVT ShiftTy = TLI.getShiftAmountTy(VT, DAG.getDataLayout());
assert(ShiftTy.getScalarType().getSizeInBits() >=
Log2_32_Ceil(VT.getScalarType().getSizeInBits()) &&
"ShiftAmountTy is too small to cover the range of this type!");
@@ -2276,8 +2280,9 @@ void DAGTypeLegalizer::ExpandIntRes_SIGN_EXTEND(SDNode *N,
Lo = DAG.getNode(ISD::SIGN_EXTEND, dl, NVT, N->getOperand(0));
// The high part is obtained by SRA'ing all but one of the bits of low part.
unsigned LoSize = NVT.getSizeInBits();
- Hi = DAG.getNode(ISD::SRA, dl, NVT, Lo,
- DAG.getConstant(LoSize - 1, dl, TLI.getPointerTy()));
+ Hi = DAG.getNode(
+ ISD::SRA, dl, NVT, Lo,
+ DAG.getConstant(LoSize - 1, dl, TLI.getPointerTy(DAG.getDataLayout())));
} else {
// For example, extension of an i48 to an i64. The operand type necessarily
// promotes to the result type, so will end up being expanded too.
@@ -2312,7 +2317,7 @@ ExpandIntRes_SIGN_EXTEND_INREG(SDNode *N, SDValue &Lo, SDValue &Hi) {
// things like sextinreg V:i64 from i8.
Hi = DAG.getNode(ISD::SRA, dl, Hi.getValueType(), Lo,
DAG.getConstant(Hi.getValueType().getSizeInBits() - 1, dl,
- TLI.getPointerTy()));
+ TLI.getPointerTy(DAG.getDataLayout())));
} else {
// For example, extension of an i48 to an i64. Leave the low part alone,
// sext_inreg the high part.
@@ -2355,10 +2360,10 @@ void DAGTypeLegalizer::ExpandIntRes_TRUNCATE(SDNode *N,
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDLoc dl(N);
Lo = DAG.getNode(ISD::TRUNCATE, dl, NVT, N->getOperand(0));
- Hi = DAG.getNode(ISD::SRL, dl,
- N->getOperand(0).getValueType(), N->getOperand(0),
+ Hi = DAG.getNode(ISD::SRL, dl, N->getOperand(0).getValueType(),
+ N->getOperand(0),
DAG.getConstant(NVT.getSizeInBits(), dl,
- TLI.getPointerTy()));
+ TLI.getPointerTy(DAG.getDataLayout())));
Hi = DAG.getNode(ISD::TRUNCATE, dl, NVT, Hi);
}
@@ -2414,7 +2419,7 @@ void DAGTypeLegalizer::ExpandIntRes_XMULO(SDNode *N,
}
Type *RetTy = VT.getTypeForEVT(*DAG.getContext());
- EVT PtrVT = TLI.getPointerTy();
+ EVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
Type *PtrTy = PtrVT.getTypeForEVT(*DAG.getContext());
// Replace this with a libcall that will check overflow.
@@ -2845,7 +2850,7 @@ SDValue DAGTypeLegalizer::ExpandIntOp_STORE(StoreSDNode *N, unsigned OpNo) {
Alignment, AAInfo);
}
- if (TLI.isLittleEndian()) {
+ if (DAG.getDataLayout().isLittleEndian()) {
// Little-endian - low bits are at low addresses.
GetExpandedInteger(N->getValue(), Lo, Hi);
@@ -2882,11 +2887,12 @@ SDValue DAGTypeLegalizer::ExpandIntOp_STORE(StoreSDNode *N, unsigned OpNo) {
// Transfer high bits from the top of Lo to the bottom of Hi.
Hi = DAG.getNode(ISD::SHL, dl, NVT, Hi,
DAG.getConstant(NVT.getSizeInBits() - ExcessBits, dl,
- TLI.getPointerTy()));
- Hi = DAG.getNode(ISD::OR, dl, NVT, Hi,
- DAG.getNode(ISD::SRL, dl, NVT, Lo,
- DAG.getConstant(ExcessBits, dl,
- TLI.getPointerTy())));
+ TLI.getPointerTy(DAG.getDataLayout())));
+ Hi = DAG.getNode(
+ ISD::OR, dl, NVT, Hi,
+ DAG.getNode(ISD::SRL, dl, NVT, Lo,
+ DAG.getConstant(ExcessBits, dl,
+ TLI.getPointerTy(DAG.getDataLayout()))));
}
// Store both the high bits and maybe some of the low bits.
@@ -2956,14 +2962,15 @@ SDValue DAGTypeLegalizer::ExpandIntOp_UINT_TO_FP(SDNode *N) {
ISD::SETLT);
// Build a 64 bit pair (0, FF) in the constant pool, with FF in the lo bits.
- SDValue FudgePtr = DAG.getConstantPool(
- ConstantInt::get(*DAG.getContext(), FF.zext(64)),
- TLI.getPointerTy());
+ SDValue FudgePtr =
+ DAG.getConstantPool(ConstantInt::get(*DAG.getContext(), FF.zext(64)),
+ TLI.getPointerTy(DAG.getDataLayout()));
// Get a pointer to FF if the sign bit was set, or to 0 otherwise.
SDValue Zero = DAG.getIntPtrConstant(0, dl);
SDValue Four = DAG.getIntPtrConstant(4, dl);
- if (TLI.isBigEndian()) std::swap(Zero, Four);
+ if (DAG.getDataLayout().isBigEndian())
+ std::swap(Zero, Four);
SDValue Offset = DAG.getSelect(dl, Zero.getValueType(), SignSet,
Zero, Four);
unsigned Alignment = cast<ConstantPoolSDNode>(FudgePtr)->getAlignment();
@@ -3113,9 +3120,9 @@ SDValue DAGTypeLegalizer::PromoteIntRes_CONCAT_VECTORS(SDNode *N) {
for (unsigned i = 0; i < NumOperands; ++i) {
SDValue Op = N->getOperand(i);
for (unsigned j = 0; j < NumElem; ++j) {
- SDValue Ext = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
- InElemTy, Op, DAG.getConstant(j, dl,
- TLI.getVectorIdxTy()));
+ SDValue Ext = DAG.getNode(
+ ISD::EXTRACT_VECTOR_ELT, dl, InElemTy, Op,
+ DAG.getConstant(j, dl, TLI.getVectorIdxTy(DAG.getDataLayout())));
Ops[i * NumElem + j] = DAG.getNode(ISD::ANY_EXTEND, dl, OutElemTy, Ext);
}
}
@@ -3142,7 +3149,8 @@ SDValue DAGTypeLegalizer::PromoteIntRes_INSERT_VECTOR_ELT(SDNode *N) {
SDValue DAGTypeLegalizer::PromoteIntOp_EXTRACT_VECTOR_ELT(SDNode *N) {
SDLoc dl(N);
SDValue V0 = GetPromotedInteger(N->getOperand(0));
- SDValue V1 = DAG.getZExtOrTrunc(N->getOperand(1), dl, TLI.getVectorIdxTy());
+ SDValue V1 = DAG.getZExtOrTrunc(N->getOperand(1), dl,
+ TLI.getVectorIdxTy(DAG.getDataLayout()));
SDValue Ext = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
V0->getValueType(0).getScalarType(), V0, V1);
@@ -3179,8 +3187,9 @@ SDValue DAGTypeLegalizer::PromoteIntOp_CONCAT_VECTORS(SDNode *N) {
for (unsigned i=0; i<NumElem; ++i) {
// Extract element from incoming vector
- SDValue Ex = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SclrTy,
- Incoming, DAG.getConstant(i, dl, TLI.getVectorIdxTy()));
+ SDValue Ex = DAG.getNode(
+ ISD::EXTRACT_VECTOR_ELT, dl, SclrTy, Incoming,
+ DAG.getConstant(i, dl, TLI.getVectorIdxTy(DAG.getDataLayout())));
SDValue Tr = DAG.getNode(ISD::TRUNCATE, dl, RetSclrTy, Ex);
NewOps.push_back(Tr);
}
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
index 9c29769..a7392fa 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
@@ -1006,7 +1006,7 @@ SDValue DAGTypeLegalizer::GetVectorElementPointer(SDValue VecPtr, EVT EltVT,
SDValue Index) {
SDLoc dl(Index);
// Make sure the index type is big enough to compute in.
- Index = DAG.getZExtOrTrunc(Index, dl, TLI.getPointerTy());
+ Index = DAG.getZExtOrTrunc(Index, dl, TLI.getPointerTy(DAG.getDataLayout()));
// Calculate the element offset and add it to the pointer.
unsigned EltSize = EltVT.getSizeInBits() / 8; // FIXME: should be ABI size.
@@ -1030,7 +1030,7 @@ SDValue DAGTypeLegalizer::JoinIntegers(SDValue Lo, SDValue Hi) {
Hi = DAG.getNode(ISD::ANY_EXTEND, dlHi, NVT, Hi);
Hi = DAG.getNode(ISD::SHL, dlHi, NVT, Hi,
DAG.getConstant(LVT.getSizeInBits(), dlHi,
- TLI.getPointerTy()));
+ TLI.getPointerTy(DAG.getDataLayout())));
return DAG.getNode(ISD::OR, dlHi, NVT, Lo, Hi);
}
@@ -1079,7 +1079,7 @@ DAGTypeLegalizer::ExpandChainLibCall(RTLIB::Libcall LC,
Args.push_back(Entry);
}
SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC),
- TLI.getPointerTy());
+ TLI.getPointerTy(DAG.getDataLayout()));
Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext());
@@ -1117,7 +1117,7 @@ void DAGTypeLegalizer::SplitInteger(SDValue Op,
Lo = DAG.getNode(ISD::TRUNCATE, dl, LoVT, Op);
Hi = DAG.getNode(ISD::SRL, dl, Op.getValueType(), Op,
DAG.getConstant(LoVT.getSizeInBits(), dl,
- TLI.getPointerTy()));
+ TLI.getPointerTy(DAG.getDataLayout())));
Hi = DAG.getNode(ISD::TRUNCATE, dl, HiVT, Hi);
}
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
index 2f27789..d1131a7 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
@@ -73,7 +73,7 @@ private:
}
EVT getSetCCResultType(EVT VT) const {
- return TLI.getSetCCResultType(*DAG.getContext(), VT);
+ return TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
}
/// IgnoreNodeResults - Pretend all of this node's results are legal.
@@ -167,7 +167,7 @@ private:
SDValue GetVectorElementPointer(SDValue VecPtr, EVT EltVT, SDValue Index);
SDValue JoinIntegers(SDValue Lo, SDValue Hi);
SDValue LibCallify(RTLIB::Libcall LC, SDNode *N, bool isSigned);
-
+
std::pair<SDValue, SDValue> ExpandChainLibCall(RTLIB::Libcall LC,
SDNode *Node, bool isSigned);
std::pair<SDValue, SDValue> ExpandAtomic(SDNode *Node);
@@ -347,7 +347,7 @@ private:
void ExpandIntRes_ATOMIC_LOAD (SDNode *N, SDValue &Lo, SDValue &Hi);
- void ExpandShiftByConstant(SDNode *N, unsigned Amt,
+ void ExpandShiftByConstant(SDNode *N, const APInt &Amt,
SDValue &Lo, SDValue &Hi);
bool ExpandShiftWithKnownAmountBit(SDNode *N, SDValue &Lo, SDValue &Hi);
bool ExpandShiftWithUnknownAmountBit(SDNode *N, SDValue &Lo, SDValue &Hi);
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp
index 330c31c..14d8f77 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp
@@ -60,18 +60,20 @@ void DAGTypeLegalizer::ExpandRes_BITCAST(SDNode *N, SDValue &Lo, SDValue &Hi) {
Hi = DAG.getNode(ISD::BITCAST, dl, NOutVT, Hi);
return;
case TargetLowering::TypeExpandInteger:
- case TargetLowering::TypeExpandFloat:
+ case TargetLowering::TypeExpandFloat: {
+ auto &DL = DAG.getDataLayout();
// Convert the expanded pieces of the input.
GetExpandedOp(InOp, Lo, Hi);
- if (TLI.hasBigEndianPartOrdering(InVT) !=
- TLI.hasBigEndianPartOrdering(OutVT))
+ if (TLI.hasBigEndianPartOrdering(InVT, DL) !=
+ TLI.hasBigEndianPartOrdering(OutVT, DL))
std::swap(Lo, Hi);
Lo = DAG.getNode(ISD::BITCAST, dl, NOutVT, Lo);
Hi = DAG.getNode(ISD::BITCAST, dl, NOutVT, Hi);
return;
+ }
case TargetLowering::TypeSplitVector:
GetSplitVector(InOp, Lo, Hi);
- if (TLI.hasBigEndianPartOrdering(OutVT))
+ if (TLI.hasBigEndianPartOrdering(OutVT, DAG.getDataLayout()))
std::swap(Lo, Hi);
Lo = DAG.getNode(ISD::BITCAST, dl, NOutVT, Lo);
Hi = DAG.getNode(ISD::BITCAST, dl, NOutVT, Hi);
@@ -88,7 +90,7 @@ void DAGTypeLegalizer::ExpandRes_BITCAST(SDNode *N, SDValue &Lo, SDValue &Hi) {
EVT LoVT, HiVT;
std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(InVT);
std::tie(Lo, Hi) = DAG.SplitVector(InOp, dl, LoVT, HiVT);
- if (TLI.hasBigEndianPartOrdering(OutVT))
+ if (TLI.hasBigEndianPartOrdering(OutVT, DAG.getDataLayout()))
std::swap(Lo, Hi);
Lo = DAG.getNode(ISD::BITCAST, dl, NOutVT, Lo);
Hi = DAG.getNode(ISD::BITCAST, dl, NOutVT, Hi);
@@ -119,9 +121,9 @@ void DAGTypeLegalizer::ExpandRes_BITCAST(SDNode *N, SDValue &Lo, SDValue &Hi) {
SmallVector<SDValue, 8> Vals;
for (unsigned i = 0; i < NumElems; ++i)
- Vals.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ElemVT,
- CastInOp, DAG.getConstant(i, dl,
- TLI.getVectorIdxTy())));
+ Vals.push_back(DAG.getNode(
+ ISD::EXTRACT_VECTOR_ELT, dl, ElemVT, CastInOp,
+ DAG.getConstant(i, dl, TLI.getVectorIdxTy(DAG.getDataLayout()))));
// Build Lo, Hi pair by pairing extracted elements if needed.
unsigned Slot = 0;
@@ -131,7 +133,7 @@ void DAGTypeLegalizer::ExpandRes_BITCAST(SDNode *N, SDValue &Lo, SDValue &Hi) {
SDValue LHS = Vals[Slot];
SDValue RHS = Vals[Slot + 1];
- if (TLI.isBigEndian())
+ if (DAG.getDataLayout().isBigEndian())
std::swap(LHS, RHS);
Vals.push_back(DAG.getNode(ISD::BUILD_PAIR, dl,
@@ -143,7 +145,7 @@ void DAGTypeLegalizer::ExpandRes_BITCAST(SDNode *N, SDValue &Lo, SDValue &Hi) {
Lo = Vals[Slot++];
Hi = Vals[Slot++];
- if (TLI.isBigEndian())
+ if (DAG.getDataLayout().isBigEndian())
std::swap(Lo, Hi);
return;
@@ -155,9 +157,8 @@ void DAGTypeLegalizer::ExpandRes_BITCAST(SDNode *N, SDValue &Lo, SDValue &Hi) {
// Create the stack frame object. Make sure it is aligned for both
// the source and expanded destination types.
- unsigned Alignment =
- TLI.getDataLayout()->getPrefTypeAlignment(NOutVT.
- getTypeForEVT(*DAG.getContext()));
+ unsigned Alignment = DAG.getDataLayout().getPrefTypeAlignment(
+ NOutVT.getTypeForEVT(*DAG.getContext()));
SDValue StackPtr = DAG.CreateStackTemporary(InVT, Alignment);
int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(SPFI);
@@ -182,7 +183,7 @@ void DAGTypeLegalizer::ExpandRes_BITCAST(SDNode *N, SDValue &Lo, SDValue &Hi) {
false, false, MinAlign(Alignment, IncrementSize));
// Handle endianness of the load.
- if (TLI.hasBigEndianPartOrdering(OutVT))
+ if (TLI.hasBigEndianPartOrdering(OutVT, DAG.getDataLayout()))
std::swap(Lo, Hi);
}
@@ -241,7 +242,7 @@ void DAGTypeLegalizer::ExpandRes_EXTRACT_VECTOR_ELT(SDNode *N, SDValue &Lo,
DAG.getConstant(1, dl, Idx.getValueType()));
Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, NewVT, NewVec, Idx);
- if (TLI.isBigEndian())
+ if (DAG.getDataLayout().isBigEndian())
std::swap(Lo, Hi);
}
@@ -282,7 +283,7 @@ void DAGTypeLegalizer::ExpandRes_NormalLoad(SDNode *N, SDValue &Lo,
Hi.getValue(1));
// Handle endianness of the load.
- if (TLI.hasBigEndianPartOrdering(ValueVT))
+ if (TLI.hasBigEndianPartOrdering(ValueVT, DAG.getDataLayout()))
std::swap(Lo, Hi);
// Modified the chain - switch anything that used the old chain to use
@@ -302,7 +303,7 @@ void DAGTypeLegalizer::ExpandRes_VAARG(SDNode *N, SDValue &Lo, SDValue &Hi) {
Hi = DAG.getVAArg(NVT, dl, Lo.getValue(1), Ptr, N->getOperand(2), 0);
// Handle endianness of the load.
- if (TLI.hasBigEndianPartOrdering(OVT))
+ if (TLI.hasBigEndianPartOrdering(OVT, DAG.getDataLayout()))
std::swap(Lo, Hi);
// Modified the chain - switch anything that used the old chain to use
@@ -325,7 +326,7 @@ void DAGTypeLegalizer::IntegerToVector(SDValue Op, unsigned NumElements,
if (NumElements > 1) {
NumElements >>= 1;
SplitInteger(Op, Parts[0], Parts[1]);
- if (TLI.isBigEndian())
+ if (DAG.getDataLayout().isBigEndian())
std::swap(Parts[0], Parts[1]);
IntegerToVector(Parts[0], NumElements, Ops, EltVT);
IntegerToVector(Parts[1], NumElements, Ops, EltVT);
@@ -389,7 +390,7 @@ SDValue DAGTypeLegalizer::ExpandOp_BUILD_VECTOR(SDNode *N) {
for (unsigned i = 0; i < NumElts; ++i) {
SDValue Lo, Hi;
GetExpandedOp(N->getOperand(i), Lo, Hi);
- if (TLI.isBigEndian())
+ if (DAG.getDataLayout().isBigEndian())
std::swap(Lo, Hi);
NewElts.push_back(Lo);
NewElts.push_back(Hi);
@@ -431,7 +432,7 @@ SDValue DAGTypeLegalizer::ExpandOp_INSERT_VECTOR_ELT(SDNode *N) {
SDValue Lo, Hi;
GetExpandedOp(Val, Lo, Hi);
- if (TLI.isBigEndian())
+ if (DAG.getDataLayout().isBigEndian())
std::swap(Lo, Hi);
SDValue Idx = N->getOperand(2);
@@ -481,7 +482,7 @@ SDValue DAGTypeLegalizer::ExpandOp_NormalStore(SDNode *N, unsigned OpNo) {
SDValue Lo, Hi;
GetExpandedOp(St->getValue(), Lo, Hi);
- if (TLI.hasBigEndianPartOrdering(ValueVT))
+ if (TLI.hasBigEndianPartOrdering(ValueVT, DAG.getDataLayout()))
std::swap(Lo, Hi);
Lo = DAG.getStore(Chain, dl, Lo, Ptr, St->getPointerInfo(),
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
index ee844a8..83d4ad5 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
@@ -503,7 +503,7 @@ SDValue VectorLegalizer::ExpandLoad(SDValue Op) {
// Instead, we load all significant words, mask bits off, and concatenate
// them to form each element. Finally, they are extended to destination
// scalar type to build the destination vector.
- EVT WideVT = TLI.getPointerTy();
+ EVT WideVT = TLI.getPointerTy(DAG.getDataLayout());
assert(WideVT.isRound() &&
"Could not handle the sophisticated case when the widest integer is"
@@ -563,7 +563,8 @@ SDValue VectorLegalizer::ExpandLoad(SDValue Op) {
SDValue Lo, Hi, ShAmt;
if (BitOffset < WideBits) {
- ShAmt = DAG.getConstant(BitOffset, dl, TLI.getShiftAmountTy(WideVT));
+ ShAmt = DAG.getConstant(
+ BitOffset, dl, TLI.getShiftAmountTy(WideVT, DAG.getDataLayout()));
Lo = DAG.getNode(ISD::SRL, dl, WideVT, LoadVals[WideIdx], ShAmt);
Lo = DAG.getNode(ISD::AND, dl, WideVT, Lo, SrcEltBitMask);
}
@@ -573,8 +574,9 @@ SDValue VectorLegalizer::ExpandLoad(SDValue Op) {
WideIdx++;
BitOffset -= WideBits;
if (BitOffset > 0) {
- ShAmt = DAG.getConstant(SrcEltBits - BitOffset, dl,
- TLI.getShiftAmountTy(WideVT));
+ ShAmt = DAG.getConstant(
+ SrcEltBits - BitOffset, dl,
+ TLI.getShiftAmountTy(WideVT, DAG.getDataLayout()));
Hi = DAG.getNode(ISD::SHL, dl, WideVT, LoadVals[WideIdx], ShAmt);
Hi = DAG.getNode(ISD::AND, dl, WideVT, Hi, SrcEltBitMask);
}
@@ -592,8 +594,9 @@ SDValue VectorLegalizer::ExpandLoad(SDValue Op) {
Lo = DAG.getZExtOrTrunc(Lo, dl, DstEltVT);
break;
case ISD::SEXTLOAD:
- ShAmt = DAG.getConstant(WideBits - SrcEltBits, dl,
- TLI.getShiftAmountTy(WideVT));
+ ShAmt =
+ DAG.getConstant(WideBits - SrcEltBits, dl,
+ TLI.getShiftAmountTy(WideVT, DAG.getDataLayout()));
Lo = DAG.getNode(ISD::SHL, dl, WideVT, Lo, ShAmt);
Lo = DAG.getNode(ISD::SRA, dl, WideVT, Lo, ShAmt);
Lo = DAG.getSExtOrTrunc(Lo, dl, DstEltVT);
@@ -663,8 +666,9 @@ SDValue VectorLegalizer::ExpandStore(SDValue Op) {
// and save them into memory individually.
SmallVector<SDValue, 8> Stores;
for (unsigned Idx = 0; Idx < NumElem; Idx++) {
- SDValue Ex = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
- RegSclVT, Value, DAG.getConstant(Idx, dl, TLI.getVectorIdxTy()));
+ SDValue Ex = DAG.getNode(
+ ISD::EXTRACT_VECTOR_ELT, dl, RegSclVT, Value,
+ DAG.getConstant(Idx, dl, TLI.getVectorIdxTy(DAG.getDataLayout())));
// This scalar TruncStore may be illegal, but we legalize it later.
SDValue Store = DAG.getTruncStore(Chain, dl, Ex, BasePTR,
@@ -803,7 +807,7 @@ SDValue VectorLegalizer::ExpandANY_EXTEND_VECTOR_INREG(SDValue Op) {
// Place the extended lanes into the correct locations.
int ExtLaneScale = NumSrcElements / NumElements;
- int EndianOffset = TLI.isBigEndian() ? ExtLaneScale - 1 : 0;
+ int EndianOffset = DAG.getDataLayout().isBigEndian() ? ExtLaneScale - 1 : 0;
for (int i = 0; i < NumElements; ++i)
ShuffleMask[i * ExtLaneScale + EndianOffset] = i;
@@ -858,7 +862,7 @@ SDValue VectorLegalizer::ExpandZERO_EXTEND_VECTOR_INREG(SDValue Op) {
ShuffleMask.push_back(i);
int ExtLaneScale = NumSrcElements / NumElements;
- int EndianOffset = TLI.isBigEndian() ? ExtLaneScale - 1 : 0;
+ int EndianOffset = DAG.getDataLayout().isBigEndian() ? ExtLaneScale - 1 : 0;
for (int i = 0; i < NumElements; ++i)
ShuffleMask[i * ExtLaneScale + EndianOffset] = NumSrcElements + i;
@@ -995,12 +999,15 @@ SDValue VectorLegalizer::UnrollVSETCC(SDValue Op) {
SDLoc dl(Op);
SmallVector<SDValue, 8> Ops(NumElems);
for (unsigned i = 0; i < NumElems; ++i) {
- SDValue LHSElem = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, TmpEltVT, LHS,
- DAG.getConstant(i, dl, TLI.getVectorIdxTy()));
- SDValue RHSElem = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, TmpEltVT, RHS,
- DAG.getConstant(i, dl, TLI.getVectorIdxTy()));
+ SDValue LHSElem = DAG.getNode(
+ ISD::EXTRACT_VECTOR_ELT, dl, TmpEltVT, LHS,
+ DAG.getConstant(i, dl, TLI.getVectorIdxTy(DAG.getDataLayout())));
+ SDValue RHSElem = DAG.getNode(
+ ISD::EXTRACT_VECTOR_ELT, dl, TmpEltVT, RHS,
+ DAG.getConstant(i, dl, TLI.getVectorIdxTy(DAG.getDataLayout())));
Ops[i] = DAG.getNode(ISD::SETCC, dl,
- TLI.getSetCCResultType(*DAG.getContext(), TmpEltVT),
+ TLI.getSetCCResultType(DAG.getDataLayout(),
+ *DAG.getContext(), TmpEltVT),
LHSElem, RHSElem, CC);
Ops[i] = DAG.getSelect(dl, EltVT, Ops[i],
DAG.getConstant(APInt::getAllOnesValue
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index 905492c..4348ab7 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -251,8 +251,9 @@ SDValue DAGTypeLegalizer::ScalarizeVecRes_UnaryOp(SDNode *N) {
Op = GetScalarizedVector(Op);
} else {
EVT VT = OpVT.getVectorElementType();
- Op = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Op,
- DAG.getConstant(0, DL, TLI.getVectorIdxTy()));
+ Op = DAG.getNode(
+ ISD::EXTRACT_VECTOR_ELT, DL, VT, Op,
+ DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
}
return DAG.getNode(N->getOpcode(), SDLoc(N), DestVT, Op);
}
@@ -384,10 +385,12 @@ SDValue DAGTypeLegalizer::ScalarizeVecRes_VSETCC(SDNode *N) {
RHS = GetScalarizedVector(RHS);
} else {
EVT VT = OpVT.getVectorElementType();
- LHS = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, LHS,
- DAG.getConstant(0, DL, TLI.getVectorIdxTy()));
- RHS = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, RHS,
- DAG.getConstant(0, DL, TLI.getVectorIdxTy()));
+ LHS = DAG.getNode(
+ ISD::EXTRACT_VECTOR_ELT, DL, VT, LHS,
+ DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
+ RHS = DAG.getNode(
+ ISD::EXTRACT_VECTOR_ELT, DL, VT, RHS,
+ DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
}
// Turn it into a scalar SETCC.
@@ -742,7 +745,7 @@ void DAGTypeLegalizer::SplitVecRes_BITCAST(SDNode *N, SDValue &Lo,
// expanded pieces.
if (LoVT == HiVT) {
GetExpandedOp(InOp, Lo, Hi);
- if (TLI.isBigEndian())
+ if (DAG.getDataLayout().isBigEndian())
std::swap(Lo, Hi);
Lo = DAG.getNode(ISD::BITCAST, dl, LoVT, Lo);
Hi = DAG.getNode(ISD::BITCAST, dl, HiVT, Hi);
@@ -761,12 +764,12 @@ void DAGTypeLegalizer::SplitVecRes_BITCAST(SDNode *N, SDValue &Lo,
// In the general case, convert the input to an integer and split it by hand.
EVT LoIntVT = EVT::getIntegerVT(*DAG.getContext(), LoVT.getSizeInBits());
EVT HiIntVT = EVT::getIntegerVT(*DAG.getContext(), HiVT.getSizeInBits());
- if (TLI.isBigEndian())
+ if (DAG.getDataLayout().isBigEndian())
std::swap(LoIntVT, HiIntVT);
SplitInteger(BitConvertToInteger(InOp), LoIntVT, HiIntVT, Lo, Hi);
- if (TLI.isBigEndian())
+ if (DAG.getDataLayout().isBigEndian())
std::swap(Lo, Hi);
Lo = DAG.getNode(ISD::BITCAST, dl, LoVT, Lo);
Hi = DAG.getNode(ISD::BITCAST, dl, HiVT, Hi);
@@ -819,7 +822,7 @@ void DAGTypeLegalizer::SplitVecRes_EXTRACT_SUBVECTOR(SDNode *N, SDValue &Lo,
uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, HiVT, Vec,
DAG.getConstant(IdxVal + LoVT.getVectorNumElements(), dl,
- TLI.getVectorIdxTy()));
+ TLI.getVectorIdxTy(DAG.getDataLayout())));
}
void DAGTypeLegalizer::SplitVecRes_INSERT_SUBVECTOR(SDNode *N, SDValue &Lo,
@@ -840,7 +843,7 @@ void DAGTypeLegalizer::SplitVecRes_INSERT_SUBVECTOR(SDNode *N, SDValue &Lo,
// Store the new subvector into the specified index.
SDValue SubVecPtr = GetVectorElementPointer(StackPtr, SubVecVT, Idx);
Type *VecType = VecVT.getTypeForEVT(*DAG.getContext());
- unsigned Alignment = TLI.getDataLayout()->getPrefTypeAlignment(VecType);
+ unsigned Alignment = DAG.getDataLayout().getPrefTypeAlignment(VecType);
Store = DAG.getStore(Store, dl, SubVec, SubVecPtr, MachinePointerInfo(),
false, false, 0);
@@ -898,9 +901,10 @@ void DAGTypeLegalizer::SplitVecRes_INSERT_VECTOR_ELT(SDNode *N, SDValue &Lo,
Lo = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl,
Lo.getValueType(), Lo, Elt, Idx);
else
- Hi = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, Hi.getValueType(), Hi, Elt,
- DAG.getConstant(IdxVal - LoNumElts, dl,
- TLI.getVectorIdxTy()));
+ Hi =
+ DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, Hi.getValueType(), Hi, Elt,
+ DAG.getConstant(IdxVal - LoNumElts, dl,
+ TLI.getVectorIdxTy(DAG.getDataLayout())));
return;
}
@@ -919,8 +923,7 @@ void DAGTypeLegalizer::SplitVecRes_INSERT_VECTOR_ELT(SDNode *N, SDValue &Lo,
// so use a truncating store.
SDValue EltPtr = GetVectorElementPointer(StackPtr, EltVT, Idx);
Type *VecType = VecVT.getTypeForEVT(*DAG.getContext());
- unsigned Alignment =
- TLI.getDataLayout()->getPrefTypeAlignment(VecType);
+ unsigned Alignment = DAG.getDataLayout().getPrefTypeAlignment(VecType);
Store = DAG.getTruncStore(Store, dl, Elt, EltPtr, MachinePointerInfo(), EltVT,
false, false, 0);
@@ -1292,10 +1295,9 @@ void DAGTypeLegalizer::SplitVecRes_VECTOR_SHUFFLE(ShuffleVectorSDNode *N,
Idx -= Input * NewElts;
// Extract the vector element by hand.
- SVOps.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT,
- Inputs[Input],
- DAG.getConstant(Idx, dl,
- TLI.getVectorIdxTy())));
+ SVOps.push_back(DAG.getNode(
+ ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Inputs[Input],
+ DAG.getConstant(Idx, dl, TLI.getVectorIdxTy(DAG.getDataLayout()))));
}
// Construct the Lo/Hi output using a BUILD_VECTOR.
@@ -1472,7 +1474,7 @@ SDValue DAGTypeLegalizer::SplitVecOp_BITCAST(SDNode *N) {
Lo = BitConvertToInteger(Lo);
Hi = BitConvertToInteger(Hi);
- if (TLI.isBigEndian())
+ if (DAG.getDataLayout().isBigEndian())
std::swap(Lo, Hi);
return DAG.getNode(ISD::BITCAST, SDLoc(N), N->getValueType(0),
@@ -1763,9 +1765,9 @@ SDValue DAGTypeLegalizer::SplitVecOp_CONCAT_VECTORS(SDNode *N) {
for (const SDValue &Op : N->op_values()) {
for (unsigned i = 0, e = Op.getValueType().getVectorNumElements();
i != e; ++i) {
- Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT,
- Op, DAG.getConstant(i, DL, TLI.getVectorIdxTy())));
-
+ Elts.push_back(DAG.getNode(
+ ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Op,
+ DAG.getConstant(i, DL, TLI.getVectorIdxTy(DAG.getDataLayout()))));
}
}
@@ -1829,10 +1831,11 @@ SDValue DAGTypeLegalizer::SplitVecOp_TruncateHelper(SDNode *N) {
// type. This should normally be something that ends up being legal directly,
// but in theory if a target has very wide vectors and an annoyingly
// restricted set of legal types, this split can chain to build things up.
- return IsFloat ?
- DAG.getNode(ISD::FP_ROUND, DL, OutVT, InterVec,
- DAG.getTargetConstant(0, DL, TLI.getPointerTy())) :
- DAG.getNode(ISD::TRUNCATE, DL, OutVT, InterVec);
+ return IsFloat
+ ? DAG.getNode(ISD::FP_ROUND, DL, OutVT, InterVec,
+ DAG.getTargetConstant(
+ 0, DL, TLI.getPointerTy(DAG.getDataLayout())))
+ : DAG.getNode(ISD::TRUNCATE, DL, OutVT, InterVec);
}
SDValue DAGTypeLegalizer::SplitVecOp_VSETCC(SDNode *N) {
@@ -2062,12 +2065,12 @@ SDValue DAGTypeLegalizer::WidenVecRes_BinaryCanTrap(SDNode *N) {
// }
while (CurNumElts != 0) {
while (CurNumElts >= NumElts) {
- SDValue EOp1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, InOp1,
- DAG.getConstant(Idx, dl,
- TLI.getVectorIdxTy()));
- SDValue EOp2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, InOp2,
- DAG.getConstant(Idx, dl,
- TLI.getVectorIdxTy()));
+ SDValue EOp1 = DAG.getNode(
+ ISD::EXTRACT_SUBVECTOR, dl, VT, InOp1,
+ DAG.getConstant(Idx, dl, TLI.getVectorIdxTy(DAG.getDataLayout())));
+ SDValue EOp2 = DAG.getNode(
+ ISD::EXTRACT_SUBVECTOR, dl, VT, InOp2,
+ DAG.getConstant(Idx, dl, TLI.getVectorIdxTy(DAG.getDataLayout())));
ConcatOps[ConcatEnd++] = DAG.getNode(Opcode, dl, VT, EOp1, EOp2);
Idx += NumElts;
CurNumElts -= NumElts;
@@ -2079,14 +2082,12 @@ SDValue DAGTypeLegalizer::WidenVecRes_BinaryCanTrap(SDNode *N) {
if (NumElts == 1) {
for (unsigned i = 0; i != CurNumElts; ++i, ++Idx) {
- SDValue EOp1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, WidenEltVT,
- InOp1,
- DAG.getConstant(Idx, dl,
- TLI.getVectorIdxTy()));
- SDValue EOp2 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, WidenEltVT,
- InOp2,
- DAG.getConstant(Idx, dl,
- TLI.getVectorIdxTy()));
+ SDValue EOp1 = DAG.getNode(
+ ISD::EXTRACT_VECTOR_ELT, dl, WidenEltVT, InOp1,
+ DAG.getConstant(Idx, dl, TLI.getVectorIdxTy(DAG.getDataLayout())));
+ SDValue EOp2 = DAG.getNode(
+ ISD::EXTRACT_VECTOR_ELT, dl, WidenEltVT, InOp2,
+ DAG.getConstant(Idx, dl, TLI.getVectorIdxTy(DAG.getDataLayout())));
ConcatOps[ConcatEnd++] = DAG.getNode(Opcode, dl, WidenEltVT,
EOp1, EOp2);
}
@@ -2123,9 +2124,9 @@ SDValue DAGTypeLegalizer::WidenVecRes_BinaryCanTrap(SDNode *N) {
SDValue VecOp = DAG.getUNDEF(NextVT);
unsigned NumToInsert = ConcatEnd - Idx - 1;
for (unsigned i = 0, OpIdx = Idx+1; i < NumToInsert; i++, OpIdx++) {
- VecOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, NextVT, VecOp,
- ConcatOps[OpIdx],
- DAG.getConstant(i, dl, TLI.getVectorIdxTy()));
+ VecOp = DAG.getNode(
+ ISD::INSERT_VECTOR_ELT, dl, NextVT, VecOp, ConcatOps[OpIdx],
+ DAG.getConstant(i, dl, TLI.getVectorIdxTy(DAG.getDataLayout())));
}
ConcatOps[Idx+1] = VecOp;
ConcatEnd = Idx + 2;
@@ -2211,8 +2212,9 @@ SDValue DAGTypeLegalizer::WidenVecRes_Convert(SDNode *N) {
}
if (InVTNumElts % WidenNumElts == 0) {
- SDValue InVal = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InWidenVT, InOp,
- DAG.getConstant(0, DL, TLI.getVectorIdxTy()));
+ SDValue InVal = DAG.getNode(
+ ISD::EXTRACT_SUBVECTOR, DL, InWidenVT, InOp,
+ DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
// Extract the input and convert the shorten input vector.
if (N->getNumOperands() == 1)
return DAG.getNode(Opcode, DL, WidenVT, InVal);
@@ -2226,8 +2228,9 @@ SDValue DAGTypeLegalizer::WidenVecRes_Convert(SDNode *N) {
unsigned MinElts = std::min(InVTNumElts, WidenNumElts);
unsigned i;
for (i=0; i < MinElts; ++i) {
- SDValue Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, InEltVT, InOp,
- DAG.getConstant(i, DL, TLI.getVectorIdxTy()));
+ SDValue Val = DAG.getNode(
+ ISD::EXTRACT_VECTOR_ELT, DL, InEltVT, InOp,
+ DAG.getConstant(i, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
if (N->getNumOperands() == 1)
Ops[i] = DAG.getNode(Opcode, DL, EltVT, Val);
else
@@ -2453,8 +2456,9 @@ SDValue DAGTypeLegalizer::WidenVecRes_CONCAT_VECTORS(SDNode *N) {
if (InputWidened)
InOp = GetWidenedVector(InOp);
for (unsigned j=0; j < NumInElts; ++j)
- Ops[Idx++] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, InOp,
- DAG.getConstant(j, dl, TLI.getVectorIdxTy()));
+ Ops[Idx++] = DAG.getNode(
+ ISD::EXTRACT_VECTOR_ELT, dl, EltVT, InOp,
+ DAG.getConstant(j, dl, TLI.getVectorIdxTy(DAG.getDataLayout())));
}
SDValue UndefVal = DAG.getUNDEF(EltVT);
for (; Idx < WidenNumElts; ++Idx)
@@ -2511,8 +2515,9 @@ SDValue DAGTypeLegalizer::WidenVecRes_CONVERT_RNDSAT(SDNode *N) {
if (InVTNumElts % WidenNumElts == 0) {
// Extract the input and convert the shorten input vector.
- InOp = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, InWidenVT, InOp,
- DAG.getConstant(0, dl, TLI.getVectorIdxTy()));
+ InOp = DAG.getNode(
+ ISD::EXTRACT_SUBVECTOR, dl, InWidenVT, InOp,
+ DAG.getConstant(0, dl, TLI.getVectorIdxTy(DAG.getDataLayout())));
return DAG.getConvertRndSat(WidenVT, dl, InOp, DTyOp, STyOp, RndOp,
SatOp, CvtCode);
}
@@ -2527,8 +2532,9 @@ SDValue DAGTypeLegalizer::WidenVecRes_CONVERT_RNDSAT(SDNode *N) {
unsigned MinElts = std::min(InVTNumElts, WidenNumElts);
unsigned i;
for (i=0; i < MinElts; ++i) {
- SDValue ExtVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, InEltVT, InOp,
- DAG.getConstant(i, dl, TLI.getVectorIdxTy()));
+ SDValue ExtVal = DAG.getNode(
+ ISD::EXTRACT_VECTOR_ELT, dl, InEltVT, InOp,
+ DAG.getConstant(i, dl, TLI.getVectorIdxTy(DAG.getDataLayout())));
Ops[i] = DAG.getConvertRndSat(WidenVT, dl, ExtVal, DTyOp, STyOp, RndOp,
SatOp, CvtCode);
}
@@ -2570,8 +2576,10 @@ SDValue DAGTypeLegalizer::WidenVecRes_EXTRACT_SUBVECTOR(SDNode *N) {
unsigned NumElts = VT.getVectorNumElements();
unsigned i;
for (i=0; i < NumElts; ++i)
- Ops[i] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, InOp,
- DAG.getConstant(IdxVal + i, dl, TLI.getVectorIdxTy()));
+ Ops[i] =
+ DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, InOp,
+ DAG.getConstant(IdxVal + i, dl,
+ TLI.getVectorIdxTy(DAG.getDataLayout())));
SDValue UndefVal = DAG.getUNDEF(EltVT);
for (; i < WidenNumElts; ++i)
@@ -2872,12 +2880,13 @@ SDValue DAGTypeLegalizer::WidenVecOp_EXTEND(SDNode *N) {
assert(FixedVT.getVectorNumElements() != InVT.getVectorNumElements() &&
"We can't have the same type as we started with!");
if (FixedVT.getVectorNumElements() > InVT.getVectorNumElements())
- InOp = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, FixedVT,
- DAG.getUNDEF(FixedVT), InOp,
- DAG.getConstant(0, DL, TLI.getVectorIdxTy()));
+ InOp = DAG.getNode(
+ ISD::INSERT_SUBVECTOR, DL, FixedVT, DAG.getUNDEF(FixedVT), InOp,
+ DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
else
- InOp = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, FixedVT, InOp,
- DAG.getConstant(0, DL, TLI.getVectorIdxTy()));
+ InOp = DAG.getNode(
+ ISD::EXTRACT_SUBVECTOR, DL, FixedVT, InOp,
+ DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
break;
}
}
@@ -2920,10 +2929,11 @@ SDValue DAGTypeLegalizer::WidenVecOp_Convert(SDNode *N) {
unsigned Opcode = N->getOpcode();
SmallVector<SDValue, 16> Ops(NumElts);
for (unsigned i=0; i < NumElts; ++i)
- Ops[i] = DAG.getNode(Opcode, dl, EltVT,
- DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, InEltVT, InOp,
- DAG.getConstant(i, dl,
- TLI.getVectorIdxTy())));
+ Ops[i] = DAG.getNode(
+ Opcode, dl, EltVT,
+ DAG.getNode(
+ ISD::EXTRACT_VECTOR_ELT, dl, InEltVT, InOp,
+ DAG.getConstant(i, dl, TLI.getVectorIdxTy(DAG.getDataLayout()))));
return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
}
@@ -2943,8 +2953,9 @@ SDValue DAGTypeLegalizer::WidenVecOp_BITCAST(SDNode *N) {
EVT NewVT = EVT::getVectorVT(*DAG.getContext(), VT, NewNumElts);
if (TLI.isTypeLegal(NewVT)) {
SDValue BitOp = DAG.getNode(ISD::BITCAST, dl, NewVT, InOp);
- return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, BitOp,
- DAG.getConstant(0, dl, TLI.getVectorIdxTy()));
+ return DAG.getNode(
+ ISD::EXTRACT_VECTOR_ELT, dl, VT, BitOp,
+ DAG.getConstant(0, dl, TLI.getVectorIdxTy(DAG.getDataLayout())));
}
}
@@ -2971,8 +2982,9 @@ SDValue DAGTypeLegalizer::WidenVecOp_CONCAT_VECTORS(SDNode *N) {
if (getTypeAction(InOp.getValueType()) == TargetLowering::TypeWidenVector)
InOp = GetWidenedVector(InOp);
for (unsigned j=0; j < NumInElts; ++j)
- Ops[Idx++] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, InOp,
- DAG.getConstant(j, dl, TLI.getVectorIdxTy()));
+ Ops[Idx++] = DAG.getNode(
+ ISD::EXTRACT_VECTOR_ELT, dl, EltVT, InOp,
+ DAG.getConstant(j, dl, TLI.getVectorIdxTy(DAG.getDataLayout())));
}
return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
}
@@ -3053,7 +3065,8 @@ SDValue DAGTypeLegalizer::WidenVecOp_SETCC(SDNode *N) {
// Get a new SETCC node to compare the newly widened operands.
// Only some of the compared elements are legal.
- EVT SVT = TLI.getSetCCResultType(*DAG.getContext(), InOp0.getValueType());
+ EVT SVT = TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
+ InOp0.getValueType());
SDValue WideSETCC = DAG.getNode(ISD::SETCC, SDLoc(N),
SVT, InOp0, InOp1, N->getOperand(2));
@@ -3061,9 +3074,9 @@ SDValue DAGTypeLegalizer::WidenVecOp_SETCC(SDNode *N) {
EVT ResVT = EVT::getVectorVT(*DAG.getContext(),
SVT.getVectorElementType(),
N->getValueType(0).getVectorNumElements());
- SDValue CC = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl,
- ResVT, WideSETCC,
- DAG.getConstant(0, dl, TLI.getVectorIdxTy()));
+ SDValue CC = DAG.getNode(
+ ISD::EXTRACT_SUBVECTOR, dl, ResVT, WideSETCC,
+ DAG.getConstant(0, dl, TLI.getVectorIdxTy(DAG.getDataLayout())));
return PromoteTargetBoolean(CC, N->getValueType(0));
}
@@ -3159,8 +3172,9 @@ static SDValue BuildVectorFromScalar(SelectionDAG& DAG, EVT VecTy,
Idx = Idx * LdTy.getSizeInBits() / NewLdTy.getSizeInBits();
LdTy = NewLdTy;
}
- VecOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, NewVecVT, VecOp, LdOps[i],
- DAG.getConstant(Idx++, dl, TLI.getVectorIdxTy()));
+ VecOp = DAG.getNode(
+ ISD::INSERT_VECTOR_ELT, dl, NewVecVT, VecOp, LdOps[i],
+ DAG.getConstant(Idx++, dl, TLI.getVectorIdxTy(DAG.getDataLayout())));
}
return DAG.getNode(ISD::BITCAST, dl, VecTy, VecOp);
}
@@ -3407,9 +3421,9 @@ void DAGTypeLegalizer::GenWidenVectorStores(SmallVectorImpl<SDValue> &StChain,
if (NewVT.isVector()) {
unsigned NumVTElts = NewVT.getVectorNumElements();
do {
- SDValue EOp = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, NewVT, ValOp,
- DAG.getConstant(Idx, dl,
- TLI.getVectorIdxTy()));
+ SDValue EOp = DAG.getNode(
+ ISD::EXTRACT_SUBVECTOR, dl, NewVT, ValOp,
+ DAG.getConstant(Idx, dl, TLI.getVectorIdxTy(DAG.getDataLayout())));
StChain.push_back(DAG.getStore(Chain, dl, EOp, BasePtr,
ST->getPointerInfo().getWithOffset(Offset),
isVolatile, isNonTemporal,
@@ -3429,8 +3443,10 @@ void DAGTypeLegalizer::GenWidenVectorStores(SmallVectorImpl<SDValue> &StChain,
// Readjust index position based on new vector type
Idx = Idx * ValEltWidth / NewVTWidth;
do {
- SDValue EOp = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, NewVT, VecOp,
- DAG.getConstant(Idx++, dl, TLI.getVectorIdxTy()));
+ SDValue EOp = DAG.getNode(
+ ISD::EXTRACT_VECTOR_ELT, dl, NewVT, VecOp,
+ DAG.getConstant(Idx++, dl,
+ TLI.getVectorIdxTy(DAG.getDataLayout())));
StChain.push_back(DAG.getStore(Chain, dl, EOp, BasePtr,
ST->getPointerInfo().getWithOffset(Offset),
isVolatile, isNonTemporal,
@@ -3476,8 +3492,9 @@ DAGTypeLegalizer::GenWidenVectorTruncStores(SmallVectorImpl<SDValue> &StChain,
EVT ValEltVT = ValVT.getVectorElementType();
unsigned Increment = ValEltVT.getSizeInBits() / 8;
unsigned NumElts = StVT.getVectorNumElements();
- SDValue EOp = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ValEltVT, ValOp,
- DAG.getConstant(0, dl, TLI.getVectorIdxTy()));
+ SDValue EOp = DAG.getNode(
+ ISD::EXTRACT_VECTOR_ELT, dl, ValEltVT, ValOp,
+ DAG.getConstant(0, dl, TLI.getVectorIdxTy(DAG.getDataLayout())));
StChain.push_back(DAG.getTruncStore(Chain, dl, EOp, BasePtr,
ST->getPointerInfo(), StEltVT,
isVolatile, isNonTemporal, Align,
@@ -3488,8 +3505,9 @@ DAGTypeLegalizer::GenWidenVectorTruncStores(SmallVectorImpl<SDValue> &StChain,
BasePtr,
DAG.getConstant(Offset, dl,
BasePtr.getValueType()));
- SDValue EOp = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ValEltVT, ValOp,
- DAG.getConstant(0, dl, TLI.getVectorIdxTy()));
+ SDValue EOp = DAG.getNode(
+ ISD::EXTRACT_VECTOR_ELT, dl, ValEltVT, ValOp,
+ DAG.getConstant(0, dl, TLI.getVectorIdxTy(DAG.getDataLayout())));
StChain.push_back(DAG.getTruncStore(Chain, dl, EOp, NewBasePtr,
ST->getPointerInfo().getWithOffset(Offset),
StEltVT, isVolatile, isNonTemporal,
@@ -3525,8 +3543,9 @@ SDValue DAGTypeLegalizer::ModifyToType(SDValue InOp, EVT NVT) {
}
if (WidenNumElts < InNumElts && InNumElts % WidenNumElts)
- return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, NVT, InOp,
- DAG.getConstant(0, dl, TLI.getVectorIdxTy()));
+ return DAG.getNode(
+ ISD::EXTRACT_SUBVECTOR, dl, NVT, InOp,
+ DAG.getConstant(0, dl, TLI.getVectorIdxTy(DAG.getDataLayout())));
// Fall back to extract and build.
SmallVector<SDValue, 16> Ops(WidenNumElts);
@@ -3534,8 +3553,9 @@ SDValue DAGTypeLegalizer::ModifyToType(SDValue InOp, EVT NVT) {
unsigned MinNumElts = std::min(WidenNumElts, InNumElts);
unsigned Idx;
for (Idx = 0; Idx < MinNumElts; ++Idx)
- Ops[Idx] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, InOp,
- DAG.getConstant(Idx, dl, TLI.getVectorIdxTy()));
+ Ops[Idx] = DAG.getNode(
+ ISD::EXTRACT_VECTOR_ELT, dl, EltVT, InOp,
+ DAG.getConstant(Idx, dl, TLI.getVectorIdxTy(DAG.getDataLayout())));
SDValue UndefVal = DAG.getUNDEF(EltVT);
for ( ; Idx < WidenNumElts; ++Idx)
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp
index 00cbae3..34e1a70 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp
@@ -725,9 +725,8 @@ void ScheduleDAGLinearize::Schedule() {
SmallVector<SDNode*, 8> Glues;
unsigned DAGSize = 0;
- for (SelectionDAG::allnodes_iterator I = DAG->allnodes_begin(),
- E = DAG->allnodes_end(); I != E; ++I) {
- SDNode *N = I;
+ for (SDNode &Node : DAG->allnodes()) {
+ SDNode *N = &Node;
// Use node id to record degree.
unsigned Degree = N->use_size();
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp
index b22d6ed..2a6c853 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp
@@ -289,9 +289,8 @@ void ScheduleDAGSDNodes::ClusterNeighboringLoads(SDNode *Node) {
/// ClusterNodes - Cluster certain nodes which should be scheduled together.
///
void ScheduleDAGSDNodes::ClusterNodes() {
- for (SelectionDAG::allnodes_iterator NI = DAG->allnodes_begin(),
- E = DAG->allnodes_end(); NI != E; ++NI) {
- SDNode *Node = &*NI;
+ for (SDNode &NI : DAG->allnodes()) {
+ SDNode *Node = &NI;
if (!Node || !Node->isMachineOpcode())
continue;
@@ -308,9 +307,8 @@ void ScheduleDAGSDNodes::BuildSchedUnits() {
// to their associated SUnits by holding SUnits table indices. A value
// of -1 means the SDNode does not yet have an associated SUnit.
unsigned NumNodes = 0;
- for (SelectionDAG::allnodes_iterator NI = DAG->allnodes_begin(),
- E = DAG->allnodes_end(); NI != E; ++NI) {
- NI->setNodeId(-1);
+ for (SDNode &NI : DAG->allnodes()) {
+ NI.setNodeId(-1);
++NumNodes;
}
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index be54782..14f44cc 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -151,8 +151,8 @@ bool ISD::isBuildVectorAllZeros(const SDNode *N) {
if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
bool IsAllUndef = true;
- for (unsigned i = 0, e = N->getNumOperands(); i < e; ++i) {
- if (N->getOperand(i).getOpcode() == ISD::UNDEF)
+ for (const SDValue &Op : N->op_values()) {
+ if (Op.getOpcode() == ISD::UNDEF)
continue;
IsAllUndef = false;
// Do not accept build_vectors that aren't all constants or which have non-0
@@ -163,12 +163,11 @@ bool ISD::isBuildVectorAllZeros(const SDNode *N) {
// We only want to check enough bits to cover the vector elements, because
// we care if the resultant vector is all zeros, not whether the individual
// constants are.
- SDValue Zero = N->getOperand(i);
unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits();
- if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Zero)) {
+ if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op)) {
if (CN->getAPIntValue().countTrailingZeros() < EltSize)
return false;
- } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Zero)) {
+ } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Op)) {
if (CFPN->getValueAPF().bitcastToAPInt().countTrailingZeros() < EltSize)
return false;
} else
@@ -921,7 +920,7 @@ unsigned SelectionDAG::getEVTAlignment(EVT VT) const {
PointerType::get(Type::getInt8Ty(*getContext()), 0) :
VT.getTypeForEVT(*getContext());
- return TLI->getDataLayout()->getABITypeAlignment(Ty);
+ return getDataLayout().getABITypeAlignment(Ty);
}
// EntryNode could meaningfully have debug info if we can find it...
@@ -1184,7 +1183,7 @@ SDValue SelectionDAG::getConstant(const ConstantInt &Val, SDLoc DL, EVT VT,
// EltParts is currently in little endian order. If we actually want
// big-endian order then reverse it now.
- if (TLI->isBigEndian())
+ if (getDataLayout().isBigEndian())
std::reverse(EltParts.begin(), EltParts.end());
// The elements must be reversed when the element order is different
@@ -1234,7 +1233,7 @@ SDValue SelectionDAG::getConstant(const ConstantInt &Val, SDLoc DL, EVT VT,
}
SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, SDLoc DL, bool isTarget) {
- return getConstant(Val, DL, TLI->getPointerTy(), isTarget);
+ return getConstant(Val, DL, TLI->getPointerTy(getDataLayout()), isTarget);
}
SDValue SelectionDAG::getConstantFP(const APFloat& V, SDLoc DL, EVT VT,
@@ -1303,7 +1302,7 @@ SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, SDLoc DL,
"Cannot set target flags on target-independent globals");
// Truncate (with sign-extension) the offset value to the pointer size.
- unsigned BitWidth = TLI->getPointerTypeSizeInBits(GV->getType());
+ unsigned BitWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType());
if (BitWidth < 64)
Offset = SignExtend64(Offset, BitWidth);
@@ -1373,7 +1372,7 @@ SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT,
assert((TargetFlags == 0 || isTarget) &&
"Cannot set target flags on target-independent globals");
if (Alignment == 0)
- Alignment = TLI->getDataLayout()->getPrefTypeAlignment(C->getType());
+ Alignment = getDataLayout().getPrefTypeAlignment(C->getType());
unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
FoldingSetNodeID ID;
AddNodeIDNode(ID, Opc, getVTList(VT), None);
@@ -1400,7 +1399,7 @@ SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT,
assert((TargetFlags == 0 || isTarget) &&
"Cannot set target flags on target-independent globals");
if (Alignment == 0)
- Alignment = TLI->getDataLayout()->getPrefTypeAlignment(C->getType());
+ Alignment = getDataLayout().getPrefTypeAlignment(C->getType());
unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
FoldingSetNodeID ID;
AddNodeIDNode(ID, Opc, getVTList(VT), None);
@@ -1850,7 +1849,7 @@ SDValue SelectionDAG::getAddrSpaceCast(SDLoc dl, EVT VT, SDValue Ptr,
/// the target's desired shift amount type.
SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) {
EVT OpTy = Op.getValueType();
- EVT ShTy = TLI->getShiftAmountTy(LHSTy);
+ EVT ShTy = TLI->getShiftAmountTy(LHSTy, getDataLayout());
if (OpTy == ShTy || OpTy.isVector()) return Op;
ISD::NodeType Opcode = OpTy.bitsGT(ShTy) ? ISD::TRUNCATE : ISD::ZERO_EXTEND;
@@ -1864,10 +1863,10 @@ SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) {
unsigned ByteSize = VT.getStoreSize();
Type *Ty = VT.getTypeForEVT(*getContext());
unsigned StackAlign =
- std::max((unsigned)TLI->getDataLayout()->getPrefTypeAlignment(Ty), minAlign);
+ std::max((unsigned)getDataLayout().getPrefTypeAlignment(Ty), minAlign);
int FrameIdx = FrameInfo->CreateStackObject(ByteSize, StackAlign, false);
- return getFrameIndex(FrameIdx, TLI->getPointerTy());
+ return getFrameIndex(FrameIdx, TLI->getPointerTy(getDataLayout()));
}
/// CreateStackTemporary - Create a stack temporary suitable for holding
@@ -1877,13 +1876,13 @@ SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) {
VT2.getStoreSizeInBits())/8;
Type *Ty1 = VT1.getTypeForEVT(*getContext());
Type *Ty2 = VT2.getTypeForEVT(*getContext());
- const DataLayout *TD = TLI->getDataLayout();
- unsigned Align = std::max(TD->getPrefTypeAlignment(Ty1),
- TD->getPrefTypeAlignment(Ty2));
+ const DataLayout &DL = getDataLayout();
+ unsigned Align =
+ std::max(DL.getPrefTypeAlignment(Ty1), DL.getPrefTypeAlignment(Ty2));
MachineFrameInfo *FrameInfo = getMachineFunction().getFrameInfo();
int FrameIdx = FrameInfo->CreateStackObject(Bytes, Align, false);
- return getFrameIndex(FrameIdx, TLI->getPointerTy());
+ return getFrameIndex(FrameIdx, TLI->getPointerTy(getDataLayout()));
}
SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1,
@@ -1916,9 +1915,9 @@ SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1,
break;
}
- if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode())) {
+ if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2)) {
const APInt &C2 = N2C->getAPIntValue();
- if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) {
+ if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1)) {
const APInt &C1 = N1C->getAPIntValue();
switch (Cond) {
@@ -1936,8 +1935,8 @@ SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1,
}
}
}
- if (ConstantFPSDNode *N1C = dyn_cast<ConstantFPSDNode>(N1.getNode())) {
- if (ConstantFPSDNode *N2C = dyn_cast<ConstantFPSDNode>(N2.getNode())) {
+ if (ConstantFPSDNode *N1C = dyn_cast<ConstantFPSDNode>(N1)) {
+ if (ConstantFPSDNode *N2C = dyn_cast<ConstantFPSDNode>(N2)) {
APFloat::cmpResult R = N1C->getValueAPF().compare(N2C->getValueAPF());
switch (Cond) {
default: break;
@@ -2356,15 +2355,24 @@ void SelectionDAG::computeKnownBits(SDValue Op, APInt &KnownZero,
// Output known-0 bits are known if clear or set in both the low clear bits
// common to both LHS & RHS. For example, 8+(X<<3) is known to have the
// low 3 bits clear.
+ // Output known-0 bits are also known if the top bits of each input are
+ // known to be clear. For example, if one input has the top 10 bits clear
+ // and the other has the top 8 bits clear, we know the top 7 bits of the
+ // output must be clear.
computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
- unsigned KnownZeroOut = KnownZero2.countTrailingOnes();
+ unsigned KnownZeroHigh = KnownZero2.countLeadingOnes();
+ unsigned KnownZeroLow = KnownZero2.countTrailingOnes();
computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
- KnownZeroOut = std::min(KnownZeroOut,
+ KnownZeroHigh = std::min(KnownZeroHigh,
+ KnownZero2.countLeadingOnes());
+ KnownZeroLow = std::min(KnownZeroLow,
KnownZero2.countTrailingOnes());
if (Op.getOpcode() == ISD::ADD) {
- KnownZero |= APInt::getLowBitsSet(BitWidth, KnownZeroOut);
+ KnownZero |= APInt::getLowBitsSet(BitWidth, KnownZeroLow);
+ if (KnownZeroHigh > 1)
+ KnownZero |= APInt::getHighBitsSet(BitWidth, KnownZeroHigh - 1);
break;
}
@@ -2372,8 +2380,8 @@ void SelectionDAG::computeKnownBits(SDValue Op, APInt &KnownZero,
// information if we know (at least) that the low two bits are clear. We
// then return to the caller that the low bit is unknown but that other bits
// are known zero.
- if (KnownZeroOut >= 2) // ADDE
- KnownZero |= APInt::getBitsSet(BitWidth, 1, KnownZeroOut);
+ if (KnownZeroLow >= 2) // ADDE
+ KnownZero |= APInt::getBitsSet(BitWidth, 1, KnownZeroLow);
break;
}
case ISD::SREM:
@@ -2814,7 +2822,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL,
// doesn't create new constants with different values. Nevertheless, the
// opaque flag is preserved during folding to prevent future folding with
// other constants.
- if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand.getNode())) {
+ if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand)) {
const APInt &Val = C->getAPIntValue();
switch (Opcode) {
default: break;
@@ -2861,7 +2869,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL,
}
// Constant fold unary operations with a floating point constant operand.
- if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand.getNode())) {
+ if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand)) {
APFloat V = C->getValueAPF(); // make copy
switch (Opcode) {
case ISD::FNEG:
@@ -2922,7 +2930,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL,
}
// Constant fold unary operations with a vector integer or float operand.
- if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Operand.getNode())) {
+ if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Operand)) {
if (BV->isConstant()) {
switch (Opcode) {
default:
@@ -3278,8 +3286,8 @@ SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, SDLoc DL, EVT VT,
SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT, SDValue N1,
SDValue N2, const SDNodeFlags *Flags) {
- ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
- ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode());
+ ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
+ ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2);
switch (Opcode) {
default: break;
case ISD::TokenFactor:
@@ -3499,7 +3507,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT, SDValue N1,
Ops.push_back(Op);
continue;
}
- if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getNode())) {
+ if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
APInt Val = C->getAPIntValue();
Ops.push_back(SignExtendInReg(Val));
continue;
@@ -3554,7 +3562,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT, SDValue N1,
// if the indices are known different, extract the element from
// the original vector.
SDValue N1Op2 = N1.getOperand(2);
- ConstantSDNode *N1Op2C = dyn_cast<ConstantSDNode>(N1Op2.getNode());
+ ConstantSDNode *N1Op2C = dyn_cast<ConstantSDNode>(N1Op2);
if (N1Op2C && N2C) {
if (N1Op2C->getZExtValue() == N2C->getZExtValue()) {
@@ -3600,9 +3608,9 @@ SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT, SDValue N1,
assert(VT.getSimpleVT() <= N1.getSimpleValueType() &&
"Extract subvector must be from larger vector to smaller vector!");
- if (isa<ConstantSDNode>(Index.getNode())) {
+ if (isa<ConstantSDNode>(Index)) {
assert((VT.getVectorNumElements() +
- cast<ConstantSDNode>(Index.getNode())->getZExtValue()
+ cast<ConstantSDNode>(Index)->getZExtValue()
<= N1.getValueType().getVectorNumElements())
&& "Extract subvector overflow!");
}
@@ -3628,8 +3636,8 @@ SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT, SDValue N1,
// Constant fold FP operations.
bool HasFPExceptions = TLI->hasFloatingPointExceptions();
- ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1.getNode());
- ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2.getNode());
+ ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
+ ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
if (N1CFP) {
if (!N2CFP && isCommutativeBinOp(Opcode)) {
// Canonicalize constant to RHS if commutative.
@@ -3787,7 +3795,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT, SDValue N1,
SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
SDValue N1, SDValue N2, SDValue N3) {
// Perform various simplifications.
- ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
+ ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
switch (Opcode) {
case ISD::FMA: {
ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
@@ -3845,9 +3853,9 @@ SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT,
"Dest and insert subvector source types must match!");
assert(N2.getSimpleValueType() <= N1.getSimpleValueType() &&
"Insert subvector must be from smaller vector to larger vector!");
- if (isa<ConstantSDNode>(Index.getNode())) {
+ if (isa<ConstantSDNode>(Index)) {
assert((N2.getValueType().getVectorNumElements() +
- cast<ConstantSDNode>(Index.getNode())->getZExtValue()
+ cast<ConstantSDNode>(Index)->getZExtValue()
<= VT.getVectorNumElements())
&& "Insert subvector overflow!");
}
@@ -3994,7 +4002,7 @@ static SDValue getMemsetStringVal(EVT VT, SDLoc dl, SelectionDAG &DAG,
unsigned NumBytes = std::min(NumVTBytes, unsigned(Str.size()));
APInt Val(NumVTBits, 0);
- if (TLI.isLittleEndian()) {
+ if (DAG.getDataLayout().isLittleEndian()) {
for (unsigned i = 0; i != NumBytes; ++i)
Val |= (uint64_t)(unsigned char)Str[i] << i*8;
} else {
@@ -4066,9 +4074,9 @@ static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps,
if (VT == MVT::Other) {
unsigned AS = 0;
- if (DstAlign >= TLI.getDataLayout()->getPointerPrefAlignment(AS) ||
+ if (DstAlign >= DAG.getDataLayout().getPointerPrefAlignment(AS) ||
TLI.allowsMisalignedMemoryAccesses(VT, AS, DstAlign)) {
- VT = TLI.getPointerTy();
+ VT = TLI.getPointerTy(DAG.getDataLayout());
} else {
switch (DstAlign & 7) {
case 0: VT = MVT::i64; break;
@@ -4185,14 +4193,14 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, SDLoc dl,
if (DstAlignCanChange) {
Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
- unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty);
+ unsigned NewAlign = (unsigned)DAG.getDataLayout().getABITypeAlignment(Ty);
// Don't promote to an alignment that would require dynamic stack
// realignment.
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
if (!TRI->needsStackRealignment(MF))
- while (NewAlign > Align &&
- TLI.getDataLayout()->exceedsNaturalStackAlignment(NewAlign))
+ while (NewAlign > Align &&
+ DAG.getDataLayout().exceedsNaturalStackAlignment(NewAlign))
NewAlign /= 2;
if (NewAlign > Align) {
@@ -4294,7 +4302,7 @@ static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, SDLoc dl,
if (DstAlignCanChange) {
Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
- unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty);
+ unsigned NewAlign = (unsigned)DAG.getDataLayout().getABITypeAlignment(Ty);
if (NewAlign > Align) {
// Give the stack frame object a larger alignment if needed.
if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
@@ -4385,7 +4393,7 @@ static SDValue getMemsetStores(SelectionDAG &DAG, SDLoc dl,
if (DstAlignCanChange) {
Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
- unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty);
+ unsigned NewAlign = (unsigned)DAG.getDataLayout().getABITypeAlignment(Ty);
if (NewAlign > Align) {
// Give the stack frame object a larger alignment if needed.
if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
@@ -4488,19 +4496,21 @@ SDValue SelectionDAG::getMemcpy(SDValue Chain, SDLoc dl, SDValue Dst,
// Emit a library call.
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
- Entry.Ty = TLI->getDataLayout()->getIntPtrType(*getContext());
+ Entry.Ty = getDataLayout().getIntPtrType(*getContext());
Entry.Node = Dst; Args.push_back(Entry);
Entry.Node = Src; Args.push_back(Entry);
Entry.Node = Size; Args.push_back(Entry);
// FIXME: pass in SDLoc
TargetLowering::CallLoweringInfo CLI(*this);
- CLI.setDebugLoc(dl).setChain(Chain)
- .setCallee(TLI->getLibcallCallingConv(RTLIB::MEMCPY),
- Type::getVoidTy(*getContext()),
- getExternalSymbol(TLI->getLibcallName(RTLIB::MEMCPY),
- TLI->getPointerTy()), std::move(Args), 0)
- .setDiscardResult()
- .setTailCall(isTailCall);
+ CLI.setDebugLoc(dl)
+ .setChain(Chain)
+ .setCallee(TLI->getLibcallCallingConv(RTLIB::MEMCPY),
+ Type::getVoidTy(*getContext()),
+ getExternalSymbol(TLI->getLibcallName(RTLIB::MEMCPY),
+ TLI->getPointerTy(getDataLayout())),
+ std::move(Args), 0)
+ .setDiscardResult()
+ .setTailCall(isTailCall);
std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
return CallResult.second;
@@ -4544,19 +4554,21 @@ SDValue SelectionDAG::getMemmove(SDValue Chain, SDLoc dl, SDValue Dst,
// Emit a library call.
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
- Entry.Ty = TLI->getDataLayout()->getIntPtrType(*getContext());
+ Entry.Ty = getDataLayout().getIntPtrType(*getContext());
Entry.Node = Dst; Args.push_back(Entry);
Entry.Node = Src; Args.push_back(Entry);
Entry.Node = Size; Args.push_back(Entry);
// FIXME: pass in SDLoc
TargetLowering::CallLoweringInfo CLI(*this);
- CLI.setDebugLoc(dl).setChain(Chain)
- .setCallee(TLI->getLibcallCallingConv(RTLIB::MEMMOVE),
- Type::getVoidTy(*getContext()),
- getExternalSymbol(TLI->getLibcallName(RTLIB::MEMMOVE),
- TLI->getPointerTy()), std::move(Args), 0)
- .setDiscardResult()
- .setTailCall(isTailCall);
+ CLI.setDebugLoc(dl)
+ .setChain(Chain)
+ .setCallee(TLI->getLibcallCallingConv(RTLIB::MEMMOVE),
+ Type::getVoidTy(*getContext()),
+ getExternalSymbol(TLI->getLibcallName(RTLIB::MEMMOVE),
+ TLI->getPointerTy(getDataLayout())),
+ std::move(Args), 0)
+ .setDiscardResult()
+ .setTailCall(isTailCall);
std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
return CallResult.second;
@@ -4594,7 +4606,7 @@ SDValue SelectionDAG::getMemset(SDValue Chain, SDLoc dl, SDValue Dst,
}
// Emit a library call.
- Type *IntPtrTy = TLI->getDataLayout()->getIntPtrType(*getContext());
+ Type *IntPtrTy = getDataLayout().getIntPtrType(*getContext());
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
Entry.Node = Dst; Entry.Ty = IntPtrTy;
@@ -4608,13 +4620,15 @@ SDValue SelectionDAG::getMemset(SDValue Chain, SDLoc dl, SDValue Dst,
// FIXME: pass in SDLoc
TargetLowering::CallLoweringInfo CLI(*this);
- CLI.setDebugLoc(dl).setChain(Chain)
- .setCallee(TLI->getLibcallCallingConv(RTLIB::MEMSET),
- Type::getVoidTy(*getContext()),
- getExternalSymbol(TLI->getLibcallName(RTLIB::MEMSET),
- TLI->getPointerTy()), std::move(Args), 0)
- .setDiscardResult()
- .setTailCall(isTailCall);
+ CLI.setDebugLoc(dl)
+ .setChain(Chain)
+ .setCallee(TLI->getLibcallCallingConv(RTLIB::MEMSET),
+ Type::getVoidTy(*getContext()),
+ getExternalSymbol(TLI->getLibcallName(RTLIB::MEMSET),
+ TLI->getPointerTy(getDataLayout())),
+ std::move(Args), 0)
+ .setDiscardResult()
+ .setTailCall(isTailCall);
std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
return CallResult.second;
@@ -6656,7 +6670,7 @@ bool SDNode::hasAnyUseOfValue(unsigned Value) const {
/// isOnlyUserOf - Return true if this node is the only use of N.
///
-bool SDNode::isOnlyUserOf(SDNode *N) const {
+bool SDNode::isOnlyUserOf(const SDNode *N) const {
bool Seen = false;
for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
SDNode *User = *I;
@@ -6671,16 +6685,16 @@ bool SDNode::isOnlyUserOf(SDNode *N) const {
/// isOperand - Return true if this node is an operand of N.
///
-bool SDValue::isOperandOf(SDNode *N) const {
+bool SDValue::isOperandOf(const SDNode *N) const {
for (const SDValue &Op : N->op_values())
if (*this == Op)
return true;
return false;
}
-bool SDNode::isOperandOf(SDNode *N) const {
- for (unsigned i = 0, e = N->NumOperands; i != e; ++i)
- if (this == N->OperandList[i].getNode())
+bool SDNode::isOperandOf(const SDNode *N) const {
+ for (const SDValue &Op : N->op_values())
+ if (this == Op.getNode())
return true;
return false;
}
@@ -6784,10 +6798,9 @@ SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) {
if (OperandVT.isVector()) {
// A vector operand; extract a single element.
EVT OperandEltVT = OperandVT.getVectorElementType();
- Operands[j] = getNode(ISD::EXTRACT_VECTOR_ELT, dl,
- OperandEltVT,
- Operand,
- getConstant(i, dl, TLI->getVectorIdxTy()));
+ Operands[j] =
+ getNode(ISD::EXTRACT_VECTOR_ELT, dl, OperandEltVT, Operand,
+ getConstant(i, dl, TLI->getVectorIdxTy(getDataLayout())));
} else {
// A scalar operand; just use it as is.
Operands[j] = Operand;
@@ -6891,10 +6904,10 @@ unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const {
const GlobalValue *GV;
int64_t GVOffset = 0;
if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) {
- unsigned PtrWidth = TLI->getPointerTypeSizeInBits(GV->getType());
+ unsigned PtrWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType());
APInt KnownZero(PtrWidth, 0), KnownOne(PtrWidth, 0);
llvm::computeKnownBits(const_cast<GlobalValue *>(GV), KnownZero, KnownOne,
- *TLI->getDataLayout());
+ getDataLayout());
unsigned AlignBits = KnownZero.countTrailingOnes();
unsigned Align = AlignBits ? 1 << std::min(31U, AlignBits) : 0;
if (Align)
@@ -6950,10 +6963,10 @@ SelectionDAG::SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT,
"More vector elements requested than available!");
SDValue Lo, Hi;
Lo = getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, N,
- getConstant(0, DL, TLI->getVectorIdxTy()));
+ getConstant(0, DL, TLI->getVectorIdxTy(getDataLayout())));
Hi = getNode(ISD::EXTRACT_SUBVECTOR, DL, HiVT, N,
getConstant(LoVT.getVectorNumElements(), DL,
- TLI->getVectorIdxTy()));
+ TLI->getVectorIdxTy(getDataLayout())));
return std::make_pair(Lo, Hi);
}
@@ -6965,7 +6978,7 @@ void SelectionDAG::ExtractVectorElements(SDValue Op,
Count = VT.getVectorNumElements();
EVT EltVT = VT.getVectorElementType();
- EVT IdxTy = TLI->getVectorIdxTy();
+ EVT IdxTy = TLI->getVectorIdxTy(getDataLayout());
SDLoc SL(Op);
for (unsigned i = Start, e = Start + Count; i != e; ++i) {
Args.push_back(getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
@@ -7080,14 +7093,12 @@ SDValue BuildVectorSDNode::getSplatValue(BitVector *UndefElements) const {
ConstantSDNode *
BuildVectorSDNode::getConstantSplatNode(BitVector *UndefElements) const {
- return dyn_cast_or_null<ConstantSDNode>(
- getSplatValue(UndefElements).getNode());
+ return dyn_cast_or_null<ConstantSDNode>(getSplatValue(UndefElements));
}
ConstantFPSDNode *
BuildVectorSDNode::getConstantFPSplatNode(BitVector *UndefElements) const {
- return dyn_cast_or_null<ConstantFPSDNode>(
- getSplatValue(UndefElements).getNode());
+ return dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements));
}
bool BuildVectorSDNode::isConstant() const {
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 4897082..2c3c0eb1 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -146,7 +146,7 @@ static SDValue getCopyFromParts(SelectionDAG &DAG, SDLoc DL,
Hi = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[1]);
}
- if (TLI.isBigEndian())
+ if (DAG.getDataLayout().isBigEndian())
std::swap(Lo, Hi);
Val = DAG.getNode(ISD::BUILD_PAIR, DL, RoundVT, Lo, Hi);
@@ -160,13 +160,14 @@ static SDValue getCopyFromParts(SelectionDAG &DAG, SDLoc DL,
// Combine the round and odd parts.
Lo = Val;
- if (TLI.isBigEndian())
+ if (DAG.getDataLayout().isBigEndian())
std::swap(Lo, Hi);
EVT TotalVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
Hi = DAG.getNode(ISD::ANY_EXTEND, DL, TotalVT, Hi);
- Hi = DAG.getNode(ISD::SHL, DL, TotalVT, Hi,
- DAG.getConstant(Lo.getValueType().getSizeInBits(), DL,
- TLI.getPointerTy()));
+ Hi =
+ DAG.getNode(ISD::SHL, DL, TotalVT, Hi,
+ DAG.getConstant(Lo.getValueType().getSizeInBits(), DL,
+ TLI.getPointerTy(DAG.getDataLayout())));
Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, TotalVT, Lo);
Val = DAG.getNode(ISD::OR, DL, TotalVT, Lo, Hi);
}
@@ -177,7 +178,7 @@ static SDValue getCopyFromParts(SelectionDAG &DAG, SDLoc DL,
SDValue Lo, Hi;
Lo = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[0]);
Hi = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[1]);
- if (TLI.hasBigEndianPartOrdering(ValueVT))
+ if (TLI.hasBigEndianPartOrdering(ValueVT, DAG.getDataLayout()))
std::swap(Lo, Hi);
Val = DAG.getNode(ISD::BUILD_PAIR, DL, ValueVT, Lo, Hi);
} else {
@@ -211,8 +212,9 @@ static SDValue getCopyFromParts(SelectionDAG &DAG, SDLoc DL,
if (PartEVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
// FP_ROUND's are always exact here.
if (ValueVT.bitsLT(Val.getValueType()))
- return DAG.getNode(ISD::FP_ROUND, DL, ValueVT, Val,
- DAG.getTargetConstant(1, DL, TLI.getPointerTy()));
+ return DAG.getNode(
+ ISD::FP_ROUND, DL, ValueVT, Val,
+ DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout())));
return DAG.getNode(ISD::FP_EXTEND, DL, ValueVT, Val);
}
@@ -305,8 +307,9 @@ static SDValue getCopyFromPartsVector(SelectionDAG &DAG, SDLoc DL,
if (PartEVT.getVectorElementType() == ValueVT.getVectorElementType()) {
assert(PartEVT.getVectorNumElements() > ValueVT.getVectorNumElements() &&
"Cannot narrow, it would be a lossy transformation");
- return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
- DAG.getConstant(0, DL, TLI.getVectorIdxTy()));
+ return DAG.getNode(
+ ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
+ DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
}
// Vector/Vector bitcast.
@@ -362,10 +365,10 @@ static void getCopyToParts(SelectionDAG &DAG, SDLoc DL,
if (ValueVT.isVector())
return getCopyToPartsVector(DAG, DL, Val, Parts, NumParts, PartVT, V);
- const TargetLowering &TLI = DAG.getTargetLoweringInfo();
unsigned PartBits = PartVT.getSizeInBits();
unsigned OrigNumParts = NumParts;
- assert(TLI.isTypeLegal(PartVT) && "Copying to an illegal type!");
+ assert(DAG.getTargetLoweringInfo().isTypeLegal(PartVT) &&
+ "Copying to an illegal type!");
if (NumParts == 0)
return;
@@ -433,7 +436,7 @@ static void getCopyToParts(SelectionDAG &DAG, SDLoc DL,
DAG.getIntPtrConstant(RoundBits, DL));
getCopyToParts(DAG, DL, OddVal, Parts + RoundParts, OddParts, PartVT, V);
- if (TLI.isBigEndian())
+ if (DAG.getDataLayout().isBigEndian())
// The odd parts were reversed by getCopyToParts - unreverse them.
std::reverse(Parts + RoundParts, Parts + NumParts);
@@ -468,7 +471,7 @@ static void getCopyToParts(SelectionDAG &DAG, SDLoc DL,
}
}
- if (TLI.isBigEndian())
+ if (DAG.getDataLayout().isBigEndian())
std::reverse(Parts, Parts + OrigNumParts);
}
@@ -497,9 +500,9 @@ static void getCopyToPartsVector(SelectionDAG &DAG, SDLoc DL,
// undef elements.
SmallVector<SDValue, 16> Ops;
for (unsigned i = 0, e = ValueVT.getVectorNumElements(); i != e; ++i)
- Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
- ElementVT, Val, DAG.getConstant(i, DL,
- TLI.getVectorIdxTy())));
+ Ops.push_back(DAG.getNode(
+ ISD::EXTRACT_VECTOR_ELT, DL, ElementVT, Val,
+ DAG.getConstant(i, DL, TLI.getVectorIdxTy(DAG.getDataLayout()))));
for (unsigned i = ValueVT.getVectorNumElements(),
e = PartVT.getVectorNumElements(); i != e; ++i)
@@ -524,9 +527,9 @@ static void getCopyToPartsVector(SelectionDAG &DAG, SDLoc DL,
// Vector -> scalar conversion.
assert(ValueVT.getVectorNumElements() == 1 &&
"Only trivial vector-to-scalar conversions should get here!");
- Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
- PartVT, Val,
- DAG.getConstant(0, DL, TLI.getVectorIdxTy()));
+ Val = DAG.getNode(
+ ISD::EXTRACT_VECTOR_ELT, DL, PartVT, Val,
+ DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
bool Smaller = ValueVT.bitsLE(PartVT);
Val = DAG.getNode((Smaller ? ISD::TRUNCATE : ISD::ANY_EXTEND),
@@ -554,14 +557,14 @@ static void getCopyToPartsVector(SelectionDAG &DAG, SDLoc DL,
SmallVector<SDValue, 8> Ops(NumIntermediates);
for (unsigned i = 0; i != NumIntermediates; ++i) {
if (IntermediateVT.isVector())
- Ops[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL,
- IntermediateVT, Val,
- DAG.getConstant(i * (NumElements / NumIntermediates), DL,
- TLI.getVectorIdxTy()));
+ Ops[i] =
+ DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, IntermediateVT, Val,
+ DAG.getConstant(i * (NumElements / NumIntermediates), DL,
+ TLI.getVectorIdxTy(DAG.getDataLayout())));
else
- Ops[i] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
- IntermediateVT, Val,
- DAG.getConstant(i, DL, TLI.getVectorIdxTy()));
+ Ops[i] = DAG.getNode(
+ ISD::EXTRACT_VECTOR_ELT, DL, IntermediateVT, Val,
+ DAG.getConstant(i, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
}
// Split the intermediate operands into legal parts.
@@ -588,14 +591,14 @@ RegsForValue::RegsForValue(const SmallVector<unsigned, 4> &regs, MVT regvt,
EVT valuevt)
: ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs) {}
-RegsForValue::RegsForValue(LLVMContext &Context, const TargetLowering &tli,
- unsigned Reg, Type *Ty) {
- ComputeValueVTs(tli, Ty, ValueVTs);
+RegsForValue::RegsForValue(LLVMContext &Context, const TargetLowering &TLI,
+ const DataLayout &DL, unsigned Reg, Type *Ty) {
+ ComputeValueVTs(TLI, DL, Ty, ValueVTs);
for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
EVT ValueVT = ValueVTs[Value];
- unsigned NumRegs = tli.getNumRegisters(Context, ValueVT);
- MVT RegisterVT = tli.getRegisterType(Context, ValueVT);
+ unsigned NumRegs = TLI.getNumRegisters(Context, ValueVT);
+ MVT RegisterVT = TLI.getRegisterType(Context, ValueVT);
for (unsigned i = 0; i != NumRegs; ++i)
Regs.push_back(Reg + i);
RegVTs.push_back(RegisterVT);
@@ -796,7 +799,7 @@ void RegsForValue::AddInlineAsmOperands(unsigned Code, bool HasMatching,
if (TheReg == SP && Code == InlineAsm::Kind_Clobber) {
// If we clobbered the stack pointer, MFI should know about it.
assert(DAG.getMachineFunction().getFrameInfo()->
- hasInlineAsmWithSPAdjust());
+ hasOpaqueSPAdjustment());
}
}
}
@@ -807,7 +810,7 @@ void SelectionDAGBuilder::init(GCFunctionInfo *gfi, AliasAnalysis &aa,
AA = &aa;
GFI = gfi;
LibInfo = li;
- DL = DAG.getTarget().getDataLayout();
+ DL = &DAG.getDataLayout();
Context = DAG.getContext();
LPadToCallSiteMap.clear();
}
@@ -964,8 +967,8 @@ SDValue SelectionDAGBuilder::getCopyFromRegs(const Value *V, Type *Ty) {
if (It != FuncInfo.ValueMap.end()) {
unsigned InReg = It->second;
- RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(), InReg,
- Ty);
+ RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(),
+ DAG.getDataLayout(), InReg, Ty);
SDValue Chain = DAG.getEntryNode();
Result = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V);
resolveDanglingDebugInfo(V, Result);
@@ -1031,7 +1034,7 @@ SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
if (const Constant *C = dyn_cast<Constant>(V)) {
- EVT VT = TLI.getValueType(V->getType(), true);
+ EVT VT = TLI.getValueType(DAG.getDataLayout(), V->getType(), true);
if (const ConstantInt *CI = dyn_cast<ConstantInt>(C))
return DAG.getConstant(*CI, getCurSDLoc(), VT);
@@ -1041,7 +1044,8 @@ SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
if (isa<ConstantPointerNull>(C)) {
unsigned AS = V->getType()->getPointerAddressSpace();
- return DAG.getConstant(0, getCurSDLoc(), TLI.getPointerTy(AS));
+ return DAG.getConstant(0, getCurSDLoc(),
+ TLI.getPointerTy(DAG.getDataLayout(), AS));
}
if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
@@ -1095,7 +1099,7 @@ SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
"Unknown struct or array constant!");
SmallVector<EVT, 4> ValueVTs;
- ComputeValueVTs(TLI, C->getType(), ValueVTs);
+ ComputeValueVTs(TLI, DAG.getDataLayout(), C->getType(), ValueVTs);
unsigned NumElts = ValueVTs.size();
if (NumElts == 0)
return SDValue(); // empty struct
@@ -1127,7 +1131,8 @@ SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
Ops.push_back(getValue(CV->getOperand(i)));
} else {
assert(isa<ConstantAggregateZero>(C) && "Unknown vector constant!");
- EVT EltVT = TLI.getValueType(VecTy->getElementType());
+ EVT EltVT =
+ TLI.getValueType(DAG.getDataLayout(), VecTy->getElementType());
SDValue Op;
if (EltVT.isFloatingPoint())
@@ -1147,13 +1152,15 @@ SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
DenseMap<const AllocaInst*, int>::iterator SI =
FuncInfo.StaticAllocaMap.find(AI);
if (SI != FuncInfo.StaticAllocaMap.end())
- return DAG.getFrameIndex(SI->second, TLI.getPointerTy());
+ return DAG.getFrameIndex(SI->second,
+ TLI.getPointerTy(DAG.getDataLayout()));
}
// If this is an instruction which fast-isel has deferred, select it now.
if (const Instruction *Inst = dyn_cast<Instruction>(V)) {
unsigned InReg = FuncInfo.InitializeRegForValue(Inst);
- RegsForValue RFV(*DAG.getContext(), TLI, InReg, Inst->getType());
+ RegsForValue RFV(*DAG.getContext(), TLI, DAG.getDataLayout(), InReg,
+ Inst->getType());
SDValue Chain = DAG.getEntryNode();
return RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V);
}
@@ -1163,6 +1170,7 @@ SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ auto &DL = DAG.getDataLayout();
SDValue Chain = getControlRoot();
SmallVector<ISD::OutputArg, 8> Outs;
SmallVector<SDValue, 8> OutVals;
@@ -1175,7 +1183,7 @@ void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
// Leave Outs empty so that LowerReturn won't try to load return
// registers the usual way.
SmallVector<EVT, 1> PtrValueVTs;
- ComputeValueVTs(TLI, PointerType::getUnqual(F->getReturnType()),
+ ComputeValueVTs(TLI, DL, PointerType::getUnqual(F->getReturnType()),
PtrValueVTs);
SDValue RetPtr = DAG.getRegister(DemoteReg, PtrValueVTs[0]);
@@ -1183,7 +1191,7 @@ void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
SmallVector<EVT, 4> ValueVTs;
SmallVector<uint64_t, 4> Offsets;
- ComputeValueVTs(TLI, I.getOperand(0)->getType(), ValueVTs, &Offsets);
+ ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs, &Offsets);
unsigned NumValues = ValueVTs.size();
SmallVector<SDValue, 4> Chains(NumValues);
@@ -1203,7 +1211,7 @@ void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
MVT::Other, Chains);
} else if (I.getNumOperands() != 0) {
SmallVector<EVT, 4> ValueVTs;
- ComputeValueVTs(TLI, I.getOperand(0)->getType(), ValueVTs);
+ ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs);
unsigned NumValues = ValueVTs.size();
if (NumValues) {
SDValue RetOp = getValue(I.getOperand(0));
@@ -1692,7 +1700,7 @@ void SelectionDAGBuilder::visitSwitchCase(CaseBlock &CB,
void SelectionDAGBuilder::visitJumpTable(JumpTable &JT) {
// Emit the code for the jump table
assert(JT.Reg != -1U && "Should lower JT Header first!");
- EVT PTy = DAG.getTargetLoweringInfo().getPointerTy();
+ EVT PTy = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
SDValue Index = DAG.getCopyFromReg(getControlRoot(), getCurSDLoc(),
JT.Reg, PTy);
SDValue Table = DAG.getJumpTable(JT.JTI, PTy);
@@ -1723,9 +1731,10 @@ void SelectionDAGBuilder::visitJumpTableHeader(JumpTable &JT,
// This value may be smaller or larger than the target's pointer type, and
// therefore require extension or truncating.
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
- SwitchOp = DAG.getZExtOrTrunc(Sub, dl, TLI.getPointerTy());
+ SwitchOp = DAG.getZExtOrTrunc(Sub, dl, TLI.getPointerTy(DAG.getDataLayout()));
- unsigned JumpTableReg = FuncInfo.CreateReg(TLI.getPointerTy());
+ unsigned JumpTableReg =
+ FuncInfo.CreateReg(TLI.getPointerTy(DAG.getDataLayout()));
SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl,
JumpTableReg, SwitchOp);
JT.Reg = JumpTableReg;
@@ -1733,11 +1742,10 @@ void SelectionDAGBuilder::visitJumpTableHeader(JumpTable &JT,
// Emit the range check for the jump table, and branch to the default block
// for the switch statement if the value being switched on exceeds the largest
// case in the switch.
- SDValue CMP =
- DAG.getSetCC(dl, TLI.getSetCCResultType(*DAG.getContext(),
- Sub.getValueType()),
- Sub, DAG.getConstant(JTH.Last - JTH.First, dl, VT),
- ISD::SETUGT);
+ SDValue CMP = DAG.getSetCC(
+ dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
+ Sub.getValueType()),
+ Sub, DAG.getConstant(JTH.Last - JTH.First, dl, VT), ISD::SETUGT);
SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
MVT::Other, CopyTo, CMP,
@@ -1762,7 +1770,7 @@ void SelectionDAGBuilder::visitSPDescriptorParent(StackProtectorDescriptor &SPD,
// First create the loads to the guard/stack slot for the comparison.
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
- EVT PtrTy = TLI.getPointerTy();
+ EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
MachineFrameInfo *MFI = ParentBB->getParent()->getFrameInfo();
int FI = MFI->getStackProtectorIndex();
@@ -1771,8 +1779,7 @@ void SelectionDAGBuilder::visitSPDescriptorParent(StackProtectorDescriptor &SPD,
SDValue GuardPtr = getValue(IRGuard);
SDValue StackSlotPtr = DAG.getFrameIndex(FI, PtrTy);
- unsigned Align =
- TLI.getDataLayout()->getPrefTypeAlignment(IRGuard->getType());
+ unsigned Align = DL->getPrefTypeAlignment(IRGuard->getType());
SDValue Guard;
SDLoc dl = getCurSDLoc();
@@ -1799,10 +1806,10 @@ void SelectionDAGBuilder::visitSPDescriptorParent(StackProtectorDescriptor &SPD,
EVT VT = Guard.getValueType();
SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, Guard, StackSlot);
- SDValue Cmp =
- DAG.getSetCC(dl, TLI.getSetCCResultType(*DAG.getContext(),
- Sub.getValueType()),
- Sub, DAG.getConstant(0, dl, VT), ISD::SETNE);
+ SDValue Cmp = DAG.getSetCC(dl, TLI.getSetCCResultType(DAG.getDataLayout(),
+ *DAG.getContext(),
+ Sub.getValueType()),
+ Sub, DAG.getConstant(0, dl, VT), ISD::SETNE);
// If the sub is not 0, then we know the guard/stackslot do not equal, so
// branch to failure MBB.
@@ -1848,10 +1855,10 @@ void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B,
// Check range
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
- SDValue RangeCmp =
- DAG.getSetCC(dl, TLI.getSetCCResultType(*DAG.getContext(),
- Sub.getValueType()),
- Sub, DAG.getConstant(B.Range, dl, VT), ISD::SETUGT);
+ SDValue RangeCmp = DAG.getSetCC(
+ dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
+ Sub.getValueType()),
+ Sub, DAG.getConstant(B.Range, dl, VT), ISD::SETUGT);
// Determine the type of the test operands.
bool UsePtrType = false;
@@ -1867,7 +1874,7 @@ void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B,
}
}
if (UsePtrType) {
- VT = TLI.getPointerTy();
+ VT = TLI.getPointerTy(DAG.getDataLayout());
Sub = DAG.getZExtOrTrunc(Sub, dl, VT);
}
@@ -1909,13 +1916,15 @@ void SelectionDAGBuilder::visitBitTestCase(BitTestBlock &BB,
// Testing for a single bit; just compare the shift count with what it
// would need to be to shift a 1 bit in that position.
Cmp = DAG.getSetCC(
- dl, TLI.getSetCCResultType(*DAG.getContext(), VT), ShiftOp,
- DAG.getConstant(countTrailingZeros(B.Mask), dl, VT), ISD::SETEQ);
+ dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
+ ShiftOp, DAG.getConstant(countTrailingZeros(B.Mask), dl, VT),
+ ISD::SETEQ);
} else if (PopCount == BB.Range) {
// There is only one zero bit in the range, test for it directly.
Cmp = DAG.getSetCC(
- dl, TLI.getSetCCResultType(*DAG.getContext(), VT), ShiftOp,
- DAG.getConstant(countTrailingOnes(B.Mask), dl, VT), ISD::SETNE);
+ dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
+ ShiftOp, DAG.getConstant(countTrailingOnes(B.Mask), dl, VT),
+ ISD::SETNE);
} else {
// Make desired shift
SDValue SwitchVal = DAG.getNode(ISD::SHL, dl, VT,
@@ -1924,8 +1933,9 @@ void SelectionDAGBuilder::visitBitTestCase(BitTestBlock &BB,
// Emit bit tests and jumps
SDValue AndOp = DAG.getNode(ISD::AND, dl,
VT, SwitchVal, DAG.getConstant(B.Mask, dl, VT));
- Cmp = DAG.getSetCC(dl, TLI.getSetCCResultType(*DAG.getContext(), VT), AndOp,
- DAG.getConstant(0, dl, VT), ISD::SETNE);
+ Cmp = DAG.getSetCC(
+ dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
+ AndOp, DAG.getConstant(0, dl, VT), ISD::SETNE);
}
// The branch weight from SwitchBB to B.TargetBB is B.ExtraWeight.
@@ -2013,7 +2023,7 @@ void SelectionDAGBuilder::visitLandingPad(const LandingPadInst &LP) {
SmallVector<EVT, 2> ValueVTs;
SDLoc dl = getCurSDLoc();
- ComputeValueVTs(TLI, LP.getType(), ValueVTs);
+ ComputeValueVTs(TLI, DAG.getDataLayout(), LP.getType(), ValueVTs);
assert(ValueVTs.size() == 2 && "Only two-valued landingpads are supported");
// Get the two live-in registers as SDValues. The physregs have already been
@@ -2022,14 +2032,16 @@ void SelectionDAGBuilder::visitLandingPad(const LandingPadInst &LP) {
if (FuncInfo.ExceptionPointerVirtReg) {
Ops[0] = DAG.getZExtOrTrunc(
DAG.getCopyFromReg(DAG.getEntryNode(), dl,
- FuncInfo.ExceptionPointerVirtReg, TLI.getPointerTy()),
+ FuncInfo.ExceptionPointerVirtReg,
+ TLI.getPointerTy(DAG.getDataLayout())),
dl, ValueVTs[0]);
} else {
- Ops[0] = DAG.getConstant(0, dl, TLI.getPointerTy());
+ Ops[0] = DAG.getConstant(0, dl, TLI.getPointerTy(DAG.getDataLayout()));
}
Ops[1] = DAG.getZExtOrTrunc(
DAG.getCopyFromReg(DAG.getEntryNode(), dl,
- FuncInfo.ExceptionSelectorVirtReg, TLI.getPointerTy()),
+ FuncInfo.ExceptionSelectorVirtReg,
+ TLI.getPointerTy(DAG.getDataLayout())),
dl, ValueVTs[1]);
// Merge into one.
@@ -2038,28 +2050,6 @@ void SelectionDAGBuilder::visitLandingPad(const LandingPadInst &LP) {
setValue(&LP, Res);
}
-unsigned
-SelectionDAGBuilder::visitLandingPadClauseBB(GlobalValue *ClauseGV,
- MachineBasicBlock *LPadBB) {
- SDValue Chain = getControlRoot();
- SDLoc dl = getCurSDLoc();
-
- // Get the typeid that we will dispatch on later.
- const TargetLowering &TLI = DAG.getTargetLoweringInfo();
- const TargetRegisterClass *RC = TLI.getRegClassFor(TLI.getPointerTy());
- unsigned VReg = FuncInfo.MF->getRegInfo().createVirtualRegister(RC);
- unsigned TypeID = DAG.getMachineFunction().getMMI().getTypeIDFor(ClauseGV);
- SDValue Sel = DAG.getConstant(TypeID, dl, TLI.getPointerTy());
- Chain = DAG.getCopyToReg(Chain, dl, VReg, Sel);
-
- // Branch to the main landing pad block.
- MachineBasicBlock *ClauseMBB = FuncInfo.MBB;
- ClauseMBB->addSuccessor(LPadBB);
- DAG.setRoot(DAG.getNode(ISD::BR, dl, MVT::Other, Chain,
- DAG.getBasicBlock(LPadBB)));
- return VReg;
-}
-
void SelectionDAGBuilder::sortAndRangeify(CaseClusterVector &Clusters) {
#ifndef NDEBUG
for (const CaseCluster &CC : Clusters)
@@ -2186,8 +2176,8 @@ void SelectionDAGBuilder::visitShift(const User &I, unsigned Opcode) {
SDValue Op1 = getValue(I.getOperand(0));
SDValue Op2 = getValue(I.getOperand(1));
- EVT ShiftTy =
- DAG.getTargetLoweringInfo().getShiftAmountTy(Op2.getValueType());
+ EVT ShiftTy = DAG.getTargetLoweringInfo().getShiftAmountTy(
+ Op2.getValueType(), DAG.getDataLayout());
// Coerce the shift amount to the right type if we can.
if (!I.getType()->isVectorTy() && Op2.getValueType() != ShiftTy) {
@@ -2256,7 +2246,8 @@ void SelectionDAGBuilder::visitICmp(const User &I) {
SDValue Op2 = getValue(I.getOperand(1));
ISD::CondCode Opcode = getICmpCondCode(predicate);
- EVT DestVT = DAG.getTargetLoweringInfo().getValueType(I.getType());
+ EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
+ I.getType());
setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Opcode));
}
@@ -2271,13 +2262,15 @@ void SelectionDAGBuilder::visitFCmp(const User &I) {
ISD::CondCode Condition = getFCmpCondCode(predicate);
if (TM.Options.NoNaNsFPMath)
Condition = getFCmpCodeWithoutNaN(Condition);
- EVT DestVT = DAG.getTargetLoweringInfo().getValueType(I.getType());
+ EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
+ I.getType());
setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Condition));
}
void SelectionDAGBuilder::visitSelect(const User &I) {
SmallVector<EVT, 4> ValueVTs;
- ComputeValueVTs(DAG.getTargetLoweringInfo(), I.getType(), ValueVTs);
+ ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), I.getType(),
+ ValueVTs);
unsigned NumValues = ValueVTs.size();
if (NumValues == 0) return;
@@ -2336,7 +2329,8 @@ void SelectionDAGBuilder::visitSelect(const User &I) {
void SelectionDAGBuilder::visitTrunc(const User &I) {
// TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest).
SDValue N = getValue(I.getOperand(0));
- EVT DestVT = DAG.getTargetLoweringInfo().getValueType(I.getType());
+ EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
+ I.getType());
setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), DestVT, N));
}
@@ -2344,7 +2338,8 @@ void SelectionDAGBuilder::visitZExt(const User &I) {
// ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
// ZExt also can't be a cast to bool for same reason. So, nothing much to do
SDValue N = getValue(I.getOperand(0));
- EVT DestVT = DAG.getTargetLoweringInfo().getValueType(I.getType());
+ EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
+ I.getType());
setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurSDLoc(), DestVT, N));
}
@@ -2352,7 +2347,8 @@ void SelectionDAGBuilder::visitSExt(const User &I) {
// SExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
// SExt also can't be a cast to bool for same reason. So, nothing much to do
SDValue N = getValue(I.getOperand(0));
- EVT DestVT = DAG.getTargetLoweringInfo().getValueType(I.getType());
+ EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
+ I.getType());
setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurSDLoc(), DestVT, N));
}
@@ -2361,43 +2357,49 @@ void SelectionDAGBuilder::visitFPTrunc(const User &I) {
SDValue N = getValue(I.getOperand(0));
SDLoc dl = getCurSDLoc();
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
- EVT DestVT = TLI.getValueType(I.getType());
+ EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
setValue(&I, DAG.getNode(ISD::FP_ROUND, dl, DestVT, N,
- DAG.getTargetConstant(0, dl, TLI.getPointerTy())));
+ DAG.getTargetConstant(
+ 0, dl, TLI.getPointerTy(DAG.getDataLayout()))));
}
void SelectionDAGBuilder::visitFPExt(const User &I) {
// FPExt is never a no-op cast, no need to check
SDValue N = getValue(I.getOperand(0));
- EVT DestVT = DAG.getTargetLoweringInfo().getValueType(I.getType());
+ EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
+ I.getType());
setValue(&I, DAG.getNode(ISD::FP_EXTEND, getCurSDLoc(), DestVT, N));
}
void SelectionDAGBuilder::visitFPToUI(const User &I) {
// FPToUI is never a no-op cast, no need to check
SDValue N = getValue(I.getOperand(0));
- EVT DestVT = DAG.getTargetLoweringInfo().getValueType(I.getType());
+ EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
+ I.getType());
setValue(&I, DAG.getNode(ISD::FP_TO_UINT, getCurSDLoc(), DestVT, N));
}
void SelectionDAGBuilder::visitFPToSI(const User &I) {
// FPToSI is never a no-op cast, no need to check
SDValue N = getValue(I.getOperand(0));
- EVT DestVT = DAG.getTargetLoweringInfo().getValueType(I.getType());
+ EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
+ I.getType());
setValue(&I, DAG.getNode(ISD::FP_TO_SINT, getCurSDLoc(), DestVT, N));
}
void SelectionDAGBuilder::visitUIToFP(const User &I) {
// UIToFP is never a no-op cast, no need to check
SDValue N = getValue(I.getOperand(0));
- EVT DestVT = DAG.getTargetLoweringInfo().getValueType(I.getType());
+ EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
+ I.getType());
setValue(&I, DAG.getNode(ISD::UINT_TO_FP, getCurSDLoc(), DestVT, N));
}
void SelectionDAGBuilder::visitSIToFP(const User &I) {
// SIToFP is never a no-op cast, no need to check
SDValue N = getValue(I.getOperand(0));
- EVT DestVT = DAG.getTargetLoweringInfo().getValueType(I.getType());
+ EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
+ I.getType());
setValue(&I, DAG.getNode(ISD::SINT_TO_FP, getCurSDLoc(), DestVT, N));
}
@@ -2405,7 +2407,8 @@ void SelectionDAGBuilder::visitPtrToInt(const User &I) {
// What to do depends on the size of the integer and the size of the pointer.
// We can either truncate, zero extend, or no-op, accordingly.
SDValue N = getValue(I.getOperand(0));
- EVT DestVT = DAG.getTargetLoweringInfo().getValueType(I.getType());
+ EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
+ I.getType());
setValue(&I, DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT));
}
@@ -2413,14 +2416,16 @@ void SelectionDAGBuilder::visitIntToPtr(const User &I) {
// What to do depends on the size of the integer and the size of the pointer.
// We can either truncate, zero extend, or no-op, accordingly.
SDValue N = getValue(I.getOperand(0));
- EVT DestVT = DAG.getTargetLoweringInfo().getValueType(I.getType());
+ EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
+ I.getType());
setValue(&I, DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT));
}
void SelectionDAGBuilder::visitBitCast(const User &I) {
SDValue N = getValue(I.getOperand(0));
SDLoc dl = getCurSDLoc();
- EVT DestVT = DAG.getTargetLoweringInfo().getValueType(I.getType());
+ EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
+ I.getType());
// BitCast assures us that source and destination are the same size so this is
// either a BITCAST or a no-op.
@@ -2442,7 +2447,7 @@ void SelectionDAGBuilder::visitAddrSpaceCast(const User &I) {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
const Value *SV = I.getOperand(0);
SDValue N = getValue(SV);
- EVT DestVT = TLI.getValueType(I.getType());
+ EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
unsigned SrcAS = SV->getType()->getPointerAddressSpace();
unsigned DestAS = I.getType()->getPointerAddressSpace();
@@ -2457,19 +2462,21 @@ void SelectionDAGBuilder::visitInsertElement(const User &I) {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
SDValue InVec = getValue(I.getOperand(0));
SDValue InVal = getValue(I.getOperand(1));
- SDValue InIdx = DAG.getSExtOrTrunc(getValue(I.getOperand(2)),
- getCurSDLoc(), TLI.getVectorIdxTy());
+ SDValue InIdx = DAG.getSExtOrTrunc(getValue(I.getOperand(2)), getCurSDLoc(),
+ TLI.getVectorIdxTy(DAG.getDataLayout()));
setValue(&I, DAG.getNode(ISD::INSERT_VECTOR_ELT, getCurSDLoc(),
- TLI.getValueType(I.getType()), InVec, InVal, InIdx));
+ TLI.getValueType(DAG.getDataLayout(), I.getType()),
+ InVec, InVal, InIdx));
}
void SelectionDAGBuilder::visitExtractElement(const User &I) {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
SDValue InVec = getValue(I.getOperand(0));
- SDValue InIdx = DAG.getSExtOrTrunc(getValue(I.getOperand(1)),
- getCurSDLoc(), TLI.getVectorIdxTy());
+ SDValue InIdx = DAG.getSExtOrTrunc(getValue(I.getOperand(1)), getCurSDLoc(),
+ TLI.getVectorIdxTy(DAG.getDataLayout()));
setValue(&I, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurSDLoc(),
- TLI.getValueType(I.getType()), InVec, InIdx));
+ TLI.getValueType(DAG.getDataLayout(), I.getType()),
+ InVec, InIdx));
}
// Utility for visitShuffleVector - Return true if every element in Mask,
@@ -2492,7 +2499,7 @@ void SelectionDAGBuilder::visitShuffleVector(const User &I) {
unsigned MaskNumElts = Mask.size();
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
- EVT VT = TLI.getValueType(I.getType());
+ EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
EVT SrcVT = Src1.getValueType();
unsigned SrcNumElts = SrcVT.getVectorNumElements();
@@ -2614,7 +2621,8 @@ void SelectionDAGBuilder::visitShuffleVector(const User &I) {
SDLoc dl = getCurSDLoc();
Src = DAG.getNode(
ISD::EXTRACT_SUBVECTOR, dl, VT, Src,
- DAG.getConstant(StartIdx[Input], dl, TLI.getVectorIdxTy()));
+ DAG.getConstant(StartIdx[Input], dl,
+ TLI.getVectorIdxTy(DAG.getDataLayout())));
}
}
@@ -2641,7 +2649,7 @@ void SelectionDAGBuilder::visitShuffleVector(const User &I) {
// replacing the shuffle with extract and build vector.
// to insert and build vector.
EVT EltVT = VT.getVectorElementType();
- EVT IdxVT = TLI.getVectorIdxTy();
+ EVT IdxVT = TLI.getVectorIdxTy(DAG.getDataLayout());
SDLoc dl = getCurSDLoc();
SmallVector<SDValue,8> Ops;
for (unsigned i = 0; i != MaskNumElts; ++i) {
@@ -2676,9 +2684,9 @@ void SelectionDAGBuilder::visitInsertValue(const InsertValueInst &I) {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
SmallVector<EVT, 4> AggValueVTs;
- ComputeValueVTs(TLI, AggTy, AggValueVTs);
+ ComputeValueVTs(TLI, DAG.getDataLayout(), AggTy, AggValueVTs);
SmallVector<EVT, 4> ValValueVTs;
- ComputeValueVTs(TLI, ValTy, ValValueVTs);
+ ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs);
unsigned NumAggValues = AggValueVTs.size();
unsigned NumValValues = ValValueVTs.size();
@@ -2722,7 +2730,7 @@ void SelectionDAGBuilder::visitExtractValue(const ExtractValueInst &I) {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
SmallVector<EVT, 4> ValValueVTs;
- ComputeValueVTs(TLI, ValTy, ValValueVTs);
+ ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs);
unsigned NumValValues = ValValueVTs.size();
@@ -2755,6 +2763,16 @@ void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
SDValue N = getValue(Op0);
SDLoc dl = getCurSDLoc();
+ // Normalize Vector GEP - all scalar operands should be converted to the
+ // splat vector.
+ unsigned VectorWidth = I.getType()->isVectorTy() ?
+ cast<VectorType>(I.getType())->getVectorNumElements() : 0;
+
+ if (VectorWidth && !N.getValueType().isVector()) {
+ MVT VT = MVT::getVectorVT(N.getValueType().getSimpleVT(), VectorWidth);
+ SmallVector<SDValue, 16> Ops(VectorWidth, N);
+ N = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
+ }
for (GetElementPtrInst::const_op_iterator OI = I.op_begin()+1, E = I.op_end();
OI != E; ++OI) {
const Value *Idx = *OI;
@@ -2770,16 +2788,25 @@ void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
Ty = StTy->getElementType(Field);
} else {
Ty = cast<SequentialType>(Ty)->getElementType();
- MVT PtrTy = DAG.getTargetLoweringInfo().getPointerTy(AS);
+ MVT PtrTy =
+ DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout(), AS);
unsigned PtrSize = PtrTy.getSizeInBits();
APInt ElementSize(PtrSize, DL->getTypeAllocSize(Ty));
- // If this is a constant subscript, handle it quickly.
- if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
+ // If this is a scalar constant or a splat vector of constants,
+ // handle it quickly.
+ const auto *CI = dyn_cast<ConstantInt>(Idx);
+ if (!CI && isa<ConstantDataVector>(Idx) &&
+ cast<ConstantDataVector>(Idx)->getSplatValue())
+ CI = cast<ConstantInt>(cast<ConstantDataVector>(Idx)->getSplatValue());
+
+ if (CI) {
if (CI->isZero())
continue;
APInt Offs = ElementSize * CI->getValue().sextOrTrunc(PtrSize);
- SDValue OffsVal = DAG.getConstant(Offs, dl, PtrTy);
+ SDValue OffsVal = VectorWidth ?
+ DAG.getConstant(Offs, dl, MVT::getVectorVT(PtrTy, VectorWidth)) :
+ DAG.getConstant(Offs, dl, PtrTy);
N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N, OffsVal);
continue;
}
@@ -2787,6 +2814,11 @@ void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
// N = N + Idx * ElementSize;
SDValue IdxN = getValue(Idx);
+ if (!IdxN.getValueType().isVector() && VectorWidth) {
+ MVT VT = MVT::getVectorVT(IdxN.getValueType().getSimpleVT(), VectorWidth);
+ SmallVector<SDValue, 16> Ops(VectorWidth, IdxN);
+ IdxN = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
+ }
// If the index is smaller or larger than intptr_t, truncate or extend
// it.
IdxN = DAG.getSExtOrTrunc(IdxN, dl, N.getValueType());
@@ -2823,14 +2855,14 @@ void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) {
SDLoc dl = getCurSDLoc();
Type *Ty = I.getAllocatedType();
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
- uint64_t TySize = TLI.getDataLayout()->getTypeAllocSize(Ty);
+ auto &DL = DAG.getDataLayout();
+ uint64_t TySize = DL.getTypeAllocSize(Ty);
unsigned Align =
- std::max((unsigned)TLI.getDataLayout()->getPrefTypeAlignment(Ty),
- I.getAlignment());
+ std::max((unsigned)DL.getPrefTypeAlignment(Ty), I.getAlignment());
SDValue AllocSize = getValue(I.getArraySize());
- EVT IntPtr = TLI.getPointerTy();
+ EVT IntPtr = TLI.getPointerTy(DAG.getDataLayout());
if (AllocSize.getValueType() != IntPtr)
AllocSize = DAG.getZExtOrTrunc(AllocSize, dl, IntPtr);
@@ -2898,7 +2930,7 @@ void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
SmallVector<EVT, 4> ValueVTs;
SmallVector<uint64_t, 4> Offsets;
- ComputeValueVTs(TLI, Ty, ValueVTs, &Offsets);
+ ComputeValueVTs(TLI, DAG.getDataLayout(), Ty, ValueVTs, &Offsets);
unsigned NumValues = ValueVTs.size();
if (NumValues == 0)
return;
@@ -2975,8 +3007,8 @@ void SelectionDAGBuilder::visitStore(const StoreInst &I) {
SmallVector<EVT, 4> ValueVTs;
SmallVector<uint64_t, 4> Offsets;
- ComputeValueVTs(DAG.getTargetLoweringInfo(), SrcV->getType(),
- ValueVTs, &Offsets);
+ ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(),
+ SrcV->getType(), ValueVTs, &Offsets);
unsigned NumValues = ValueVTs.size();
if (NumValues == 0)
return;
@@ -3077,9 +3109,10 @@ static bool getUniformBase(Value *& Ptr, SDValue& Base, SDValue& Index,
else if (SDB->findValue(ShuffleInst)) {
SDValue ShuffleNode = SDB->getValue(ShuffleInst);
SDLoc sdl = ShuffleNode;
- Base = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, sdl,
- ShuffleNode.getValueType().getScalarType(), ShuffleNode,
- DAG.getConstant(0, sdl, TLI.getVectorIdxTy()));
+ Base = DAG.getNode(
+ ISD::EXTRACT_VECTOR_ELT, sdl,
+ ShuffleNode.getValueType().getScalarType(), ShuffleNode,
+ DAG.getConstant(0, sdl, TLI.getVectorIdxTy(DAG.getDataLayout())));
SDB->setValue(Ptr, Base);
}
else
@@ -3126,7 +3159,7 @@ void SelectionDAGBuilder::visitMaskedScatter(const CallInst &I) {
MachineMemOperand::MOStore, VT.getStoreSize(),
Alignment, AAInfo);
if (!UniformBase) {
- Base = DAG.getTargetConstant(0, sdl, TLI.getPointerTy());
+ Base = DAG.getTargetConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
Index = getValue(Ptr);
}
SDValue Ops[] = { getRoot(), Src0, Mask, Base, Index };
@@ -3146,7 +3179,7 @@ void SelectionDAGBuilder::visitMaskedLoad(const CallInst &I) {
SDValue Mask = getValue(I.getArgOperand(2));
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
- EVT VT = TLI.getValueType(I.getType());
+ EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
unsigned Alignment = (cast<ConstantInt>(I.getArgOperand(1)))->getZExtValue();
if (!Alignment)
Alignment = DAG.getEVTAlignment(VT);
@@ -3184,7 +3217,7 @@ void SelectionDAGBuilder::visitMaskedGather(const CallInst &I) {
SDValue Mask = getValue(I.getArgOperand(2));
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
- EVT VT = TLI.getValueType(I.getType());
+ EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
unsigned Alignment = (cast<ConstantInt>(I.getArgOperand(1)))->getZExtValue();
if (!Alignment)
Alignment = DAG.getEVTAlignment(VT);
@@ -3214,7 +3247,7 @@ void SelectionDAGBuilder::visitMaskedGather(const CallInst &I) {
Alignment, AAInfo, Ranges);
if (!UniformBase) {
- Base = DAG.getTargetConstant(0, sdl, TLI.getPointerTy());
+ Base = DAG.getTargetConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
Index = getValue(Ptr);
}
SDValue Ops[] = { Root, Src0, Mask, Base, Index };
@@ -3291,8 +3324,10 @@ void SelectionDAGBuilder::visitFence(const FenceInst &I) {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
SDValue Ops[3];
Ops[0] = getRoot();
- Ops[1] = DAG.getConstant(I.getOrdering(), dl, TLI.getPointerTy());
- Ops[2] = DAG.getConstant(I.getSynchScope(), dl, TLI.getPointerTy());
+ Ops[1] = DAG.getConstant(I.getOrdering(), dl,
+ TLI.getPointerTy(DAG.getDataLayout()));
+ Ops[2] = DAG.getConstant(I.getSynchScope(), dl,
+ TLI.getPointerTy(DAG.getDataLayout()));
DAG.setRoot(DAG.getNode(ISD::ATOMIC_FENCE, dl, MVT::Other, Ops));
}
@@ -3304,7 +3339,7 @@ void SelectionDAGBuilder::visitAtomicLoad(const LoadInst &I) {
SDValue InChain = getRoot();
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
- EVT VT = TLI.getValueType(I.getType());
+ EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
if (I.getAlignment() < VT.getSizeInBits() / 8)
report_fatal_error("Cannot generate unaligned atomic load");
@@ -3339,7 +3374,8 @@ void SelectionDAGBuilder::visitAtomicStore(const StoreInst &I) {
SDValue InChain = getRoot();
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
- EVT VT = TLI.getValueType(I.getValueOperand()->getType());
+ EVT VT =
+ TLI.getValueType(DAG.getDataLayout(), I.getValueOperand()->getType());
if (I.getAlignment() < VT.getSizeInBits() / 8)
report_fatal_error("Cannot generate unaligned atomic store");
@@ -3382,7 +3418,7 @@ void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
if (!IsTgtIntrinsic || Info.opc == ISD::INTRINSIC_VOID ||
Info.opc == ISD::INTRINSIC_W_CHAIN)
Ops.push_back(DAG.getTargetConstant(Intrinsic, getCurSDLoc(),
- TLI.getPointerTy()));
+ TLI.getPointerTy(DAG.getDataLayout())));
// Add all operands of the call to the operand list.
for (unsigned i = 0, e = I.getNumArgOperands(); i != e; ++i) {
@@ -3391,7 +3427,7 @@ void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
}
SmallVector<EVT, 4> ValueVTs;
- ComputeValueVTs(TLI, I.getType(), ValueVTs);
+ ComputeValueVTs(TLI, DAG.getDataLayout(), I.getType(), ValueVTs);
if (HasChain)
ValueVTs.push_back(MVT::Other);
@@ -3425,7 +3461,7 @@ void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
if (!I.getType()->isVoidTy()) {
if (VectorType *PTy = dyn_cast<VectorType>(I.getType())) {
- EVT VT = TLI.getValueType(PTy);
+ EVT VT = TLI.getValueType(DAG.getDataLayout(), PTy);
Result = DAG.getNode(ISD::BITCAST, getCurSDLoc(), VT, Result);
}
@@ -3458,8 +3494,9 @@ GetExponent(SelectionDAG &DAG, SDValue Op, const TargetLowering &TLI,
SDLoc dl) {
SDValue t0 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
DAG.getConstant(0x7f800000, dl, MVT::i32));
- SDValue t1 = DAG.getNode(ISD::SRL, dl, MVT::i32, t0,
- DAG.getConstant(23, dl, TLI.getPointerTy()));
+ SDValue t1 = DAG.getNode(
+ ISD::SRL, dl, MVT::i32, t0,
+ DAG.getConstant(23, dl, TLI.getPointerTy(DAG.getDataLayout())));
SDValue t2 = DAG.getNode(ISD::SUB, dl, MVT::i32, t1,
DAG.getConstant(127, dl, MVT::i32));
return DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, t2);
@@ -3484,7 +3521,8 @@ static SDValue getLimitedPrecisionExp2(SDValue t0, SDLoc dl,
// IntegerPartOfX <<= 23;
IntegerPartOfX = DAG.getNode(
ISD::SHL, dl, MVT::i32, IntegerPartOfX,
- DAG.getConstant(23, dl, DAG.getTargetLoweringInfo().getPointerTy()));
+ DAG.getConstant(23, dl, DAG.getTargetLoweringInfo().getPointerTy(
+ DAG.getDataLayout())));
SDValue TwoToFractionalPartOfX;
if (LimitFloatPrecision <= 6) {
@@ -4071,11 +4109,13 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
case Intrinsic::vaend: visitVAEnd(I); return nullptr;
case Intrinsic::vacopy: visitVACopy(I); return nullptr;
case Intrinsic::returnaddress:
- setValue(&I, DAG.getNode(ISD::RETURNADDR, sdl, TLI.getPointerTy(),
+ setValue(&I, DAG.getNode(ISD::RETURNADDR, sdl,
+ TLI.getPointerTy(DAG.getDataLayout()),
getValue(I.getArgOperand(0))));
return nullptr;
case Intrinsic::frameaddress:
- setValue(&I, DAG.getNode(ISD::FRAMEADDR, sdl, TLI.getPointerTy(),
+ setValue(&I, DAG.getNode(ISD::FRAMEADDR, sdl,
+ TLI.getPointerTy(DAG.getDataLayout()),
getValue(I.getArgOperand(0))));
return nullptr;
case Intrinsic::read_register: {
@@ -4083,7 +4123,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
SDValue Chain = getRoot();
SDValue RegName =
DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
- EVT VT = TLI.getValueType(I.getType());
+ EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
Res = DAG.getNode(ISD::READ_REGISTER, sdl,
DAG.getVTList(VT, MVT::Other), Chain, RegName);
setValue(&I, Res);
@@ -4335,14 +4375,15 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
return nullptr;
case Intrinsic::eh_dwarf_cfa: {
SDValue CfaArg = DAG.getSExtOrTrunc(getValue(I.getArgOperand(0)), sdl,
- TLI.getPointerTy());
+ TLI.getPointerTy(DAG.getDataLayout()));
SDValue Offset = DAG.getNode(ISD::ADD, sdl,
CfaArg.getValueType(),
DAG.getNode(ISD::FRAME_TO_ARGS_OFFSET, sdl,
CfaArg.getValueType()),
CfaArg);
- SDValue FA = DAG.getNode(ISD::FRAMEADDR, sdl, TLI.getPointerTy(),
- DAG.getConstant(0, sdl, TLI.getPointerTy()));
+ SDValue FA = DAG.getNode(
+ ISD::FRAMEADDR, sdl, TLI.getPointerTy(DAG.getDataLayout()),
+ DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout())));
setValue(&I, DAG.getNode(ISD::ADD, sdl, FA.getValueType(),
FA, Offset));
return nullptr;
@@ -4444,7 +4485,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
ShOps[0] = ShAmt;
ShOps[1] = DAG.getConstant(0, sdl, MVT::i32);
ShAmt = DAG.getNode(ISD::BUILD_VECTOR, sdl, ShAmtVT, ShOps);
- EVT DestVT = TLI.getValueType(I.getType());
+ EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
ShAmt = DAG.getNode(ISD::BITCAST, sdl, DestVT, ShAmt);
Res = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, sdl, DestVT,
DAG.getConstant(NewIntrinsic, sdl, MVT::i32),
@@ -4474,7 +4515,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
case Intrinsic::convertus: Code = ISD::CVT_US; break;
case Intrinsic::convertuu: Code = ISD::CVT_UU; break;
}
- EVT DestVT = TLI.getValueType(I.getType());
+ EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
const Value *Op1 = I.getArgOperand(0);
Res = DAG.getConvertRndSat(DestVT, sdl, getValue(Op1),
DAG.getValueType(DestVT),
@@ -4564,7 +4605,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
getValue(I.getArgOperand(2))));
return nullptr;
case Intrinsic::fmuladd: {
- EVT VT = TLI.getValueType(I.getType());
+ EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
TLI.isFMAFasterThanFMulAndFAdd(VT)) {
setValue(&I, DAG.getNode(ISD::FMA, sdl,
@@ -4593,10 +4634,10 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
MVT::i32))));
return nullptr;
case Intrinsic::convert_from_fp16:
- setValue(&I,
- DAG.getNode(ISD::FP_EXTEND, sdl, TLI.getValueType(I.getType()),
- DAG.getNode(ISD::BITCAST, sdl, MVT::f16,
- getValue(I.getArgOperand(0)))));
+ setValue(&I, DAG.getNode(ISD::FP_EXTEND, sdl,
+ TLI.getValueType(DAG.getDataLayout(), I.getType()),
+ DAG.getNode(ISD::BITCAST, sdl, MVT::f16,
+ getValue(I.getArgOperand(0)))));
return nullptr;
case Intrinsic::pcmarker: {
SDValue Tmp = getValue(I.getArgOperand(0));
@@ -4640,8 +4681,9 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
}
case Intrinsic::stacksave: {
SDValue Op = getRoot();
- Res = DAG.getNode(ISD::STACKSAVE, sdl,
- DAG.getVTList(TLI.getPointerTy(), MVT::Other), Op);
+ Res = DAG.getNode(
+ ISD::STACKSAVE, sdl,
+ DAG.getVTList(TLI.getPointerTy(DAG.getDataLayout()), MVT::Other), Op);
setValue(&I, Res);
DAG.setRoot(Res.getValue(1));
return nullptr;
@@ -4655,7 +4697,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
// Emit code into the DAG to store the stack guard onto the stack.
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
- EVT PtrTy = TLI.getPointerTy();
+ EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
SDValue Src, Chain = getRoot();
const Value *Ptr = cast<LoadInst>(I.getArgOperand(0))->getPointerOperand();
const GlobalVariable *GV = dyn_cast<GlobalVariable>(Ptr);
@@ -4753,7 +4795,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
}
case Intrinsic::adjust_trampoline: {
setValue(&I, DAG.getNode(ISD::ADJUST_TRAMPOLINE, sdl,
- TLI.getPointerTy(),
+ TLI.getPointerTy(DAG.getDataLayout()),
getValue(I.getArgOperand(0))));
return nullptr;
}
@@ -4794,10 +4836,11 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
TargetLowering::ArgListTy Args;
TargetLowering::CallLoweringInfo CLI(DAG);
- CLI.setDebugLoc(sdl).setChain(getRoot())
- .setCallee(CallingConv::C, I.getType(),
- DAG.getExternalSymbol(TrapFuncName.data(), TLI.getPointerTy()),
- std::move(Args), 0);
+ CLI.setDebugLoc(sdl).setChain(getRoot()).setCallee(
+ CallingConv::C, I.getType(),
+ DAG.getExternalSymbol(TrapFuncName.data(),
+ TLI.getPointerTy(DAG.getDataLayout())),
+ std::move(Args), 0);
std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
DAG.setRoot(Result.second);
@@ -4873,7 +4916,8 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
SDValue Ops[2];
Ops[0] = getRoot();
- Ops[1] = DAG.getFrameIndex(FI, TLI.getPointerTy(), true);
+ Ops[1] =
+ DAG.getFrameIndex(FI, TLI.getPointerTy(DAG.getDataLayout()), true);
unsigned Opcode = (IsStart ? ISD::LIFETIME_START : ISD::LIFETIME_END);
Res = DAG.getNode(Opcode, sdl, MVT::Other, Ops);
@@ -4883,7 +4927,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
}
case Intrinsic::invariant_start:
// Discard region information.
- setValue(&I, DAG.getUNDEF(TLI.getPointerTy()));
+ setValue(&I, DAG.getUNDEF(TLI.getPointerTy(DAG.getDataLayout())));
return nullptr;
case Intrinsic::invariant_end:
// Discard region information.
@@ -4903,7 +4947,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
case Intrinsic::clear_cache:
return TLI.getClearCacheBuiltinName();
case Intrinsic::eh_actions:
- setValue(&I, DAG.getUNDEF(TLI.getPointerTy()));
+ setValue(&I, DAG.getUNDEF(TLI.getPointerTy(DAG.getDataLayout())));
return nullptr;
case Intrinsic::donothing:
// ignore
@@ -4935,11 +4979,11 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
case Intrinsic::instrprof_increment:
llvm_unreachable("instrprof failed to lower an increment");
- case Intrinsic::frameescape: {
+ case Intrinsic::localescape: {
MachineFunction &MF = DAG.getMachineFunction();
const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo();
- // Directly emit some FRAME_ALLOC machine instrs. Label assignment emission
+ // Directly emit some LOCAL_ESCAPE machine instrs. Label assignment emission
// is the same on all targets.
for (unsigned Idx = 0, E = I.getNumArgOperands(); Idx < E; ++Idx) {
Value *Arg = I.getArgOperand(Idx)->stripPointerCasts();
@@ -4953,7 +4997,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
MF.getMMI().getContext().getOrCreateFrameAllocSymbol(
GlobalValue::getRealLinkageName(MF.getName()), Idx);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, dl,
- TII->get(TargetOpcode::FRAME_ALLOC))
+ TII->get(TargetOpcode::LOCAL_ESCAPE))
.addSym(FrameAllocSym)
.addFrameIndex(FI);
}
@@ -4961,10 +5005,10 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
return nullptr;
}
- case Intrinsic::framerecover: {
- // i8* @llvm.framerecover(i8* %fn, i8* %fp, i32 %idx)
+ case Intrinsic::localrecover: {
+ // i8* @llvm.localrecover(i8* %fn, i8* %fp, i32 %idx)
MachineFunction &MF = DAG.getMachineFunction();
- MVT PtrVT = TLI.getPointerTy(0);
+ MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout(), 0);
// Get the symbol that defines the frame offset.
auto *Fn = cast<Function>(I.getArgOperand(0)->stripPointerCasts());
@@ -4978,7 +5022,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
// that would make this PC relative.
SDValue OffsetSym = DAG.getMCSymbol(FrameAllocSym, PtrVT);
SDValue OffsetVal =
- DAG.getNode(ISD::FRAME_ALLOC_RECOVER, sdl, PtrVT, OffsetSym);
+ DAG.getNode(ISD::LOCAL_RECOVER, sdl, PtrVT, OffsetSym);
// Add the offset to the FP.
Value *FP = I.getArgOperand(1);
@@ -4994,7 +5038,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
case Intrinsic::eh_exceptioncode: {
unsigned Reg = TLI.getExceptionPointerRegister();
assert(Reg && "cannot get exception code on this platform");
- MVT PtrVT = TLI.getPointerTy();
+ MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
const TargetRegisterClass *PtrRC = TLI.getRegClassFor(PtrVT);
assert(FuncInfo.MBB->isLandingPad() && "eh.exceptioncode in non-lpad");
unsigned VReg = FuncInfo.MBB->addLiveIn(Reg, PtrRC);
@@ -5178,7 +5222,8 @@ static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT,
void SelectionDAGBuilder::processIntegerCallValue(const Instruction &I,
SDValue Value,
bool IsSigned) {
- EVT VT = DAG.getTargetLoweringInfo().getValueType(I.getType(), true);
+ EVT VT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
+ I.getType(), true);
if (IsSigned)
Value = DAG.getSExtOrTrunc(Value, getCurSDLoc(), VT);
else
@@ -5203,7 +5248,8 @@ bool SelectionDAGBuilder::visitMemCmpCall(const CallInst &I) {
const Value *Size = I.getArgOperand(2);
const ConstantInt *CSize = dyn_cast<ConstantInt>(Size);
if (CSize && CSize->getZExtValue() == 0) {
- EVT CallVT = DAG.getTargetLoweringInfo().getValueType(I.getType(), true);
+ EVT CallVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
+ I.getType(), true);
setValue(&I, DAG.getConstant(0, getCurSDLoc(), CallVT));
return true;
}
@@ -5640,8 +5686,9 @@ void SelectionDAGBuilder::visitCall(const CallInst &I) {
if (!RenameFn)
Callee = getValue(I.getCalledValue());
else
- Callee = DAG.getExternalSymbol(RenameFn,
- DAG.getTargetLoweringInfo().getPointerTy());
+ Callee = DAG.getExternalSymbol(
+ RenameFn,
+ DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()));
// Check if we can potentially perform a tail call. More detailed checking is
// be done within LowerCallTo, after more information about the call is known.
@@ -5670,13 +5717,12 @@ public:
/// getCallOperandValEVT - Return the EVT of the Value* that this operand
/// corresponds to. If there is no Value* for this operand, it returns
/// MVT::Other.
- EVT getCallOperandValEVT(LLVMContext &Context,
- const TargetLowering &TLI,
- const DataLayout *DL) const {
+ EVT getCallOperandValEVT(LLVMContext &Context, const TargetLowering &TLI,
+ const DataLayout &DL) const {
if (!CallOperandVal) return MVT::Other;
if (isa<BasicBlock>(CallOperandVal))
- return TLI.getPointerTy();
+ return TLI.getPointerTy(DL);
llvm::Type *OpTy = CallOperandVal->getType();
@@ -5698,7 +5744,7 @@ public:
// If OpTy is not a single value, it may be a struct/union that we
// can tile with integers.
if (!OpTy->isSingleValueType() && OpTy->isSized()) {
- unsigned BitSize = DL->getTypeSizeInBits(OpTy);
+ unsigned BitSize = DL.getTypeSizeInBits(OpTy);
switch (BitSize) {
default: break;
case 1:
@@ -5712,7 +5758,7 @@ public:
}
}
- return TLI.getValueType(OpTy, true);
+ return TLI.getValueType(DL, OpTy, true);
}
};
@@ -5838,8 +5884,8 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
SDISelAsmOperandInfoVector ConstraintOperands;
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
- TargetLowering::AsmOperandInfoVector TargetConstraints =
- TLI.ParseConstraints(DAG.getSubtarget().getRegisterInfo(), CS);
+ TargetLowering::AsmOperandInfoVector TargetConstraints = TLI.ParseConstraints(
+ DAG.getDataLayout(), DAG.getSubtarget().getRegisterInfo(), CS);
bool hasMemory = false;
@@ -5864,10 +5910,11 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
// corresponding argument.
assert(!CS.getType()->isVoidTy() && "Bad inline asm!");
if (StructType *STy = dyn_cast<StructType>(CS.getType())) {
- OpVT = TLI.getSimpleValueType(STy->getElementType(ResNo));
+ OpVT = TLI.getSimpleValueType(DAG.getDataLayout(),
+ STy->getElementType(ResNo));
} else {
assert(ResNo == 0 && "Asm only has one result!");
- OpVT = TLI.getSimpleValueType(CS.getType());
+ OpVT = TLI.getSimpleValueType(DAG.getDataLayout(), CS.getType());
}
++ResNo;
break;
@@ -5888,8 +5935,8 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
OpInfo.CallOperand = getValue(OpInfo.CallOperandVal);
}
- OpVT =
- OpInfo.getCallOperandValEVT(*DAG.getContext(), TLI, DL).getSimpleVT();
+ OpVT = OpInfo.getCallOperandValEVT(*DAG.getContext(), TLI,
+ DAG.getDataLayout()).getSimpleVT();
}
OpInfo.ConstraintVT = OpVT;
@@ -5977,17 +6024,19 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
const Value *OpVal = OpInfo.CallOperandVal;
if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) ||
isa<ConstantVector>(OpVal) || isa<ConstantDataVector>(OpVal)) {
- OpInfo.CallOperand = DAG.getConstantPool(cast<Constant>(OpVal),
- TLI.getPointerTy());
+ OpInfo.CallOperand = DAG.getConstantPool(
+ cast<Constant>(OpVal), TLI.getPointerTy(DAG.getDataLayout()));
} else {
// Otherwise, create a stack slot and emit a store to it before the
// asm.
Type *Ty = OpVal->getType();
- uint64_t TySize = TLI.getDataLayout()->getTypeAllocSize(Ty);
- unsigned Align = TLI.getDataLayout()->getPrefTypeAlignment(Ty);
+ auto &DL = DAG.getDataLayout();
+ uint64_t TySize = DL.getTypeAllocSize(Ty);
+ unsigned Align = DL.getPrefTypeAlignment(Ty);
MachineFunction &MF = DAG.getMachineFunction();
int SSFI = MF.getFrameInfo()->CreateStackObject(TySize, Align, false);
- SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getPointerTy());
+ SDValue StackSlot =
+ DAG.getFrameIndex(SSFI, TLI.getPointerTy(DAG.getDataLayout()));
Chain = DAG.getStore(Chain, getCurSDLoc(),
OpInfo.CallOperand, StackSlot,
MachinePointerInfo::getFixedStack(SSFI),
@@ -6022,9 +6071,8 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
// AsmNodeOperands - The operands for the ISD::INLINEASM node.
std::vector<SDValue> AsmNodeOperands;
AsmNodeOperands.push_back(SDValue()); // reserve space for input chain
- AsmNodeOperands.push_back(
- DAG.getTargetExternalSymbol(IA->getAsmString().c_str(),
- TLI.getPointerTy()));
+ AsmNodeOperands.push_back(DAG.getTargetExternalSymbol(
+ IA->getAsmString().c_str(), TLI.getPointerTy(DAG.getDataLayout())));
// If we have a !srcloc metadata node associated with it, we want to attach
// this to the ultimately generated inline asm machineinstr. To do this, we
@@ -6064,8 +6112,8 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
}
}
- AsmNodeOperands.push_back(DAG.getTargetConstant(ExtraInfo, getCurSDLoc(),
- TLI.getPointerTy()));
+ AsmNodeOperands.push_back(DAG.getTargetConstant(
+ ExtraInfo, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
// Loop over all of the inputs, copying the operand values into the
// appropriate registers and processing the output regs.
@@ -6201,8 +6249,8 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
OpFlag = InlineAsm::convertMemFlagWordToMatchingFlagWord(OpFlag);
OpFlag = InlineAsm::getFlagWordForMatchingOp(OpFlag,
OpInfo.getMatchedOperand());
- AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlag, getCurSDLoc(),
- TLI.getPointerTy()));
+ AsmNodeOperands.push_back(DAG.getTargetConstant(
+ OpFlag, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
break;
}
@@ -6227,16 +6275,16 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
// Add information to the INLINEASM node to know about this input.
unsigned ResOpType =
InlineAsm::getFlagWord(InlineAsm::Kind_Imm, Ops.size());
- AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
- getCurSDLoc(),
- TLI.getPointerTy()));
+ AsmNodeOperands.push_back(DAG.getTargetConstant(
+ ResOpType, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
AsmNodeOperands.insert(AsmNodeOperands.end(), Ops.begin(), Ops.end());
break;
}
if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
assert(OpInfo.isIndirect && "Operand must be indirect to be a mem!");
- assert(InOperandVal.getValueType() == TLI.getPointerTy() &&
+ assert(InOperandVal.getValueType() ==
+ TLI.getPointerTy(DAG.getDataLayout()) &&
"Memory operands expect pointer values");
unsigned ConstraintID =
@@ -6314,7 +6362,7 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
// FIXME: Why don't we do this for inline asms with MRVs?
if (CS.getType()->isSingleValueType() && CS.getType()->isSized()) {
- EVT ResultType = TLI.getValueType(CS.getType());
+ EVT ResultType = TLI.getValueType(DAG.getDataLayout(), CS.getType());
// If any of the results of the inline asm is a vector, it may have the
// wrong width/num elts. This can happen for register classes that can
@@ -6380,9 +6428,9 @@ void SelectionDAGBuilder::visitVAStart(const CallInst &I) {
void SelectionDAGBuilder::visitVAArg(const VAArgInst &I) {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
- const DataLayout &DL = *TLI.getDataLayout();
- SDValue V = DAG.getVAArg(TLI.getValueType(I.getType()), getCurSDLoc(),
- getRoot(), getValue(I.getOperand(0)),
+ const DataLayout &DL = DAG.getDataLayout();
+ SDValue V = DAG.getVAArg(TLI.getValueType(DAG.getDataLayout(), I.getType()),
+ getCurSDLoc(), getRoot(), getValue(I.getOperand(0)),
DAG.getSrcValue(I.getOperand(0)),
DL.getABITypeAlignment(I.getType()));
setValue(&I, V);
@@ -6473,8 +6521,8 @@ static void addStackMapLiveVars(ImmutableCallSite CS, unsigned StartIdx,
Builder.DAG.getTargetConstant(C->getSExtValue(), DL, MVT::i64));
} else if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(OpVal)) {
const TargetLowering &TLI = Builder.DAG.getTargetLoweringInfo();
- Ops.push_back(
- Builder.DAG.getTargetFrameIndex(FI->getIndex(), TLI.getPointerTy()));
+ Ops.push_back(Builder.DAG.getTargetFrameIndex(
+ FI->getIndex(), TLI.getPointerTy(Builder.DAG.getDataLayout())));
} else
Ops.push_back(OpVal);
}
@@ -6654,7 +6702,7 @@ void SelectionDAGBuilder::visitPatchpoint(ImmutableCallSite CS,
// Create the return types based on the intrinsic definition
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
SmallVector<EVT, 3> ValueVTs;
- ComputeValueVTs(TLI, CS->getType(), ValueVTs);
+ ComputeValueVTs(TLI, DAG.getDataLayout(), CS->getType(), ValueVTs);
assert(ValueVTs.size() == 1 && "Expected only one return value type.");
// There is always a chain and a glue type at the end
@@ -6718,10 +6766,11 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
Type *OrigRetTy = CLI.RetTy;
SmallVector<EVT, 4> RetTys;
SmallVector<uint64_t, 4> Offsets;
- ComputeValueVTs(*this, CLI.RetTy, RetTys, &Offsets);
+ auto &DL = CLI.DAG.getDataLayout();
+ ComputeValueVTs(*this, DL, CLI.RetTy, RetTys, &Offsets);
SmallVector<ISD::OutputArg, 4> Outs;
- GetReturnInfo(CLI.RetTy, getReturnAttrs(CLI), Outs, *this);
+ GetReturnInfo(CLI.RetTy, getReturnAttrs(CLI), Outs, *this, DL);
bool CanLowerReturn =
this->CanLowerReturn(CLI.CallConv, CLI.DAG.getMachineFunction(),
@@ -6733,13 +6782,13 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
// FIXME: equivalent assert?
// assert(!CS.hasInAllocaArgument() &&
// "sret demotion is incompatible with inalloca");
- uint64_t TySize = getDataLayout()->getTypeAllocSize(CLI.RetTy);
- unsigned Align = getDataLayout()->getPrefTypeAlignment(CLI.RetTy);
+ uint64_t TySize = DL.getTypeAllocSize(CLI.RetTy);
+ unsigned Align = DL.getPrefTypeAlignment(CLI.RetTy);
MachineFunction &MF = CLI.DAG.getMachineFunction();
DemoteStackIdx = MF.getFrameInfo()->CreateStackObject(TySize, Align, false);
Type *StackSlotPtrType = PointerType::getUnqual(CLI.RetTy);
- DemoteStackSlot = CLI.DAG.getFrameIndex(DemoteStackIdx, getPointerTy());
+ DemoteStackSlot = CLI.DAG.getFrameIndex(DemoteStackIdx, getPointerTy(DL));
ArgListEntry Entry;
Entry.Node = DemoteStackSlot;
Entry.Ty = StackSlotPtrType;
@@ -6784,7 +6833,7 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
ArgListTy &Args = CLI.getArgs();
for (unsigned i = 0, e = Args.size(); i != e; ++i) {
SmallVector<EVT, 4> ValueVTs;
- ComputeValueVTs(*this, Args[i].Ty, ValueVTs);
+ ComputeValueVTs(*this, DL, Args[i].Ty, ValueVTs);
Type *FinalType = Args[i].Ty;
if (Args[i].isByVal)
FinalType = cast<PointerType>(Args[i].Ty)->getElementType();
@@ -6797,7 +6846,7 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
SDValue Op = SDValue(Args[i].Node.getNode(),
Args[i].Node.getResNo() + Value);
ISD::ArgFlagsTy Flags;
- unsigned OriginalAlignment = getDataLayout()->getABITypeAlignment(ArgTy);
+ unsigned OriginalAlignment = DL.getABITypeAlignment(ArgTy);
if (Args[i].isZExt)
Flags.setZExt();
@@ -6821,14 +6870,14 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
if (Args[i].isByVal || Args[i].isInAlloca) {
PointerType *Ty = cast<PointerType>(Args[i].Ty);
Type *ElementTy = Ty->getElementType();
- Flags.setByValSize(getDataLayout()->getTypeAllocSize(ElementTy));
+ Flags.setByValSize(DL.getTypeAllocSize(ElementTy));
// For ByVal, alignment should come from FE. BE will guess if this
// info is not there but there are cases it cannot get right.
unsigned FrameAlign;
if (Args[i].Alignment)
FrameAlign = Args[i].Alignment;
else
- FrameAlign = getByValTypeAlignment(ElementTy);
+ FrameAlign = getByValTypeAlignment(ElementTy, DL);
Flags.setByValAlign(FrameAlign);
}
if (Args[i].isNest)
@@ -6923,7 +6972,7 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
SmallVector<EVT, 1> PVTs;
Type *PtrRetTy = PointerType::getUnqual(OrigRetTy);
- ComputeValueVTs(*this, PtrRetTy, PVTs);
+ ComputeValueVTs(*this, DL, PtrRetTy, PVTs);
assert(PVTs.size() == 1 && "Pointers should fit in one register");
EVT PtrVT = PVTs[0];
@@ -6997,7 +7046,8 @@ SelectionDAGBuilder::CopyValueToVirtualRegister(const Value *V, unsigned Reg) {
assert(!TargetRegisterInfo::isPhysicalRegister(Reg) && "Is a physreg");
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
- RegsForValue RFV(V->getContext(), TLI, Reg, V->getType());
+ RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg,
+ V->getType());
SDValue Chain = DAG.getEntryNode();
ISD::NodeType ExtendType = (FuncInfo.PreferredExtendType.find(V) ==
@@ -7030,13 +7080,14 @@ static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel) {
void SelectionDAGISel::LowerArguments(const Function &F) {
SelectionDAG &DAG = SDB->DAG;
SDLoc dl = SDB->getCurSDLoc();
- const DataLayout *DL = TLI->getDataLayout();
+ const DataLayout &DL = DAG.getDataLayout();
SmallVector<ISD::InputArg, 16> Ins;
if (!FuncInfo->CanLowerReturn) {
// Put in an sret pointer parameter before all the other parameters.
SmallVector<EVT, 1> ValueVTs;
- ComputeValueVTs(*TLI, PointerType::getUnqual(F.getReturnType()), ValueVTs);
+ ComputeValueVTs(*TLI, DAG.getDataLayout(),
+ PointerType::getUnqual(F.getReturnType()), ValueVTs);
// NOTE: Assuming that a pointer will never break down to more than one VT
// or one register.
@@ -7053,7 +7104,7 @@ void SelectionDAGISel::LowerArguments(const Function &F) {
for (Function::const_arg_iterator I = F.arg_begin(), E = F.arg_end();
I != E; ++I, ++Idx) {
SmallVector<EVT, 4> ValueVTs;
- ComputeValueVTs(*TLI, I->getType(), ValueVTs);
+ ComputeValueVTs(*TLI, DAG.getDataLayout(), I->getType(), ValueVTs);
bool isArgValueUsed = !I->use_empty();
unsigned PartBase = 0;
Type *FinalType = I->getType();
@@ -7066,7 +7117,7 @@ void SelectionDAGISel::LowerArguments(const Function &F) {
EVT VT = ValueVTs[Value];
Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
ISD::ArgFlagsTy Flags;
- unsigned OriginalAlignment = DL->getABITypeAlignment(ArgTy);
+ unsigned OriginalAlignment = DL.getABITypeAlignment(ArgTy);
if (F.getAttributes().hasAttribute(Idx, Attribute::ZExt))
Flags.setZExt();
@@ -7090,14 +7141,14 @@ void SelectionDAGISel::LowerArguments(const Function &F) {
if (Flags.isByVal() || Flags.isInAlloca()) {
PointerType *Ty = cast<PointerType>(I->getType());
Type *ElementTy = Ty->getElementType();
- Flags.setByValSize(DL->getTypeAllocSize(ElementTy));
+ Flags.setByValSize(DL.getTypeAllocSize(ElementTy));
// For ByVal, alignment should be passed from FE. BE will guess if
// this info is not there but there are cases it cannot get right.
unsigned FrameAlign;
if (F.getParamAlignment(Idx))
FrameAlign = F.getParamAlignment(Idx);
else
- FrameAlign = TLI->getByValTypeAlignment(ElementTy);
+ FrameAlign = TLI->getByValTypeAlignment(ElementTy, DL);
Flags.setByValAlign(FrameAlign);
}
if (F.getAttributes().hasAttribute(Idx, Attribute::Nest))
@@ -7153,7 +7204,8 @@ void SelectionDAGISel::LowerArguments(const Function &F) {
// Create a virtual register for the sret pointer, and put in a copy
// from the sret argument into it.
SmallVector<EVT, 1> ValueVTs;
- ComputeValueVTs(*TLI, PointerType::getUnqual(F.getReturnType()), ValueVTs);
+ ComputeValueVTs(*TLI, DAG.getDataLayout(),
+ PointerType::getUnqual(F.getReturnType()), ValueVTs);
MVT VT = ValueVTs[0].getSimpleVT();
MVT RegVT = TLI->getRegisterType(*CurDAG->getContext(), VT);
ISD::NodeType AssertOp = ISD::DELETED_NODE;
@@ -7177,7 +7229,7 @@ void SelectionDAGISel::LowerArguments(const Function &F) {
++I, ++Idx) {
SmallVector<SDValue, 4> ArgValues;
SmallVector<EVT, 4> ValueVTs;
- ComputeValueVTs(*TLI, I->getType(), ValueVTs);
+ ComputeValueVTs(*TLI, DAG.getDataLayout(), I->getType(), ValueVTs);
unsigned NumValues = ValueVTs.size();
// If this argument is unused then remember its value. It is used to generate
@@ -7324,7 +7376,7 @@ SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
// the input for this MBB.
SmallVector<EVT, 4> ValueVTs;
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
- ComputeValueVTs(TLI, PN->getType(), ValueVTs);
+ ComputeValueVTs(TLI, DAG.getDataLayout(), PN->getType(), ValueVTs);
for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) {
EVT VT = ValueVTs[vti];
unsigned NumRegisters = TLI.getNumRegisters(*DAG.getContext(), VT);
@@ -7595,7 +7647,7 @@ void SelectionDAGBuilder::findJumpTables(CaseClusterVector &Clusters,
bool SelectionDAGBuilder::rangeFitsInWord(const APInt &Low, const APInt &High) {
// FIXME: Using the pointer type doesn't seem ideal.
- uint64_t BW = DAG.getTargetLoweringInfo().getPointerTy().getSizeInBits();
+ uint64_t BW = DAG.getDataLayout().getPointerSizeInBits();
uint64_t Range = (High - Low).getLimitedValue(UINT64_MAX - 1) + 1;
return Range <= BW;
}
@@ -7650,8 +7702,9 @@ bool SelectionDAGBuilder::buildBitTests(CaseClusterVector &Clusters,
APInt LowBound;
APInt CmpRange;
- const int BitWidth =
- DAG.getTargetLoweringInfo().getPointerTy().getSizeInBits();
+ const int BitWidth = DAG.getTargetLoweringInfo()
+ .getPointerTy(DAG.getDataLayout())
+ .getSizeInBits();
assert(rangeFitsInWord(Low, High) && "Case range must fit in bit mask!");
if (Low.isNonNegative() && High.slt(BitWidth)) {
@@ -7731,7 +7784,7 @@ void SelectionDAGBuilder::findBitTestClusters(CaseClusterVector &Clusters,
// If target does not have legal shift left, do not emit bit tests at all.
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
- EVT PTy = TLI.getPointerTy();
+ EVT PTy = TLI.getPointerTy(DAG.getDataLayout());
if (!TLI.isOperationLegal(ISD::SHL, PTy))
return;
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
index f225d54..7006754 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
@@ -755,8 +755,6 @@ public:
void visitJumpTable(JumpTable &JT);
void visitJumpTableHeader(JumpTable &JT, JumpTableHeader &JTH,
MachineBasicBlock *SwitchBB);
- unsigned visitLandingPadClauseBB(GlobalValue *ClauseGV,
- MachineBasicBlock *LPadMBB);
private:
// These all get lowered before this pass.
@@ -915,8 +913,8 @@ struct RegsForValue {
RegsForValue(const SmallVector<unsigned, 4> &regs, MVT regvt, EVT valuevt);
- RegsForValue(LLVMContext &Context, const TargetLowering &tli, unsigned Reg,
- Type *Ty);
+ RegsForValue(LLVMContext &Context, const TargetLowering &TLI,
+ const DataLayout &DL, unsigned Reg, Type *Ty);
/// append - Add the specified values to this one.
void append(const RegsForValue &RHS) {
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
index ef468a2..5b9b182 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
@@ -95,7 +95,7 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
case ISD::GLOBAL_OFFSET_TABLE: return "GLOBAL_OFFSET_TABLE";
case ISD::RETURNADDR: return "RETURNADDR";
case ISD::FRAMEADDR: return "FRAMEADDR";
- case ISD::FRAME_ALLOC_RECOVER: return "FRAME_ALLOC_RECOVER";
+ case ISD::LOCAL_RECOVER: return "LOCAL_RECOVER";
case ISD::READ_REGISTER: return "READ_REGISTER";
case ISD::WRITE_REGISTER: return "WRITE_REGISTER";
case ISD::FRAME_TO_ARGS_OFFSET: return "FRAME_TO_ARGS_OFFSET";
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
index 31f8210..97ece8b 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
@@ -921,7 +921,8 @@ void SelectionDAGISel::DoInstructionSelection() {
bool SelectionDAGISel::PrepareEHLandingPad() {
MachineBasicBlock *MBB = FuncInfo->MBB;
- const TargetRegisterClass *PtrRC = TLI->getRegClassFor(TLI->getPointerTy());
+ const TargetRegisterClass *PtrRC =
+ TLI->getRegClassFor(TLI->getPointerTy(CurDAG->getDataLayout()));
// Add a label to mark the beginning of the landing pad. Deletion of the
// landing pad can thus be detected via the MachineModuleInfo.
@@ -1931,7 +1932,8 @@ SDNode
MDNodeSDNode *MD = dyn_cast<MDNodeSDNode>(Op->getOperand(1));
const MDString *RegStr = dyn_cast<MDString>(MD->getMD()->getOperand(0));
unsigned Reg =
- TLI->getRegisterByName(RegStr->getString().data(), Op->getValueType(0));
+ TLI->getRegisterByName(RegStr->getString().data(), Op->getValueType(0),
+ *CurDAG);
SDValue New = CurDAG->getCopyFromReg(
Op->getOperand(0), dl, Reg, Op->getValueType(0));
New->setNodeId(-1);
@@ -1944,7 +1946,8 @@ SDNode
MDNodeSDNode *MD = dyn_cast<MDNodeSDNode>(Op->getOperand(1));
const MDString *RegStr = dyn_cast<MDString>(MD->getMD()->getOperand(0));
unsigned Reg = TLI->getRegisterByName(RegStr->getString().data(),
- Op->getOperand(2).getValueType());
+ Op->getOperand(2).getValueType(),
+ *CurDAG);
SDValue New = CurDAG->getCopyToReg(
Op->getOperand(0), dl, Reg, Op->getOperand(2));
New->setNodeId(-1);
@@ -2329,21 +2332,23 @@ CheckOpcode(const unsigned char *MatcherTable, unsigned &MatcherIndex,
}
LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
-CheckType(const unsigned char *MatcherTable, unsigned &MatcherIndex,
- SDValue N, const TargetLowering *TLI) {
+CheckType(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N,
+ const TargetLowering *TLI, const DataLayout &DL) {
MVT::SimpleValueType VT = (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
if (N.getValueType() == VT) return true;
// Handle the case when VT is iPTR.
- return VT == MVT::iPTR && N.getValueType() == TLI->getPointerTy();
+ return VT == MVT::iPTR && N.getValueType() == TLI->getPointerTy(DL);
}
LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
CheckChildType(const unsigned char *MatcherTable, unsigned &MatcherIndex,
- SDValue N, const TargetLowering *TLI, unsigned ChildNo) {
+ SDValue N, const TargetLowering *TLI, const DataLayout &DL,
+ unsigned ChildNo) {
if (ChildNo >= N.getNumOperands())
return false; // Match fails if out of range child #.
- return ::CheckType(MatcherTable, MatcherIndex, N.getOperand(ChildNo), TLI);
+ return ::CheckType(MatcherTable, MatcherIndex, N.getOperand(ChildNo), TLI,
+ DL);
}
LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
@@ -2355,13 +2360,13 @@ CheckCondCode(const unsigned char *MatcherTable, unsigned &MatcherIndex,
LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
CheckValueType(const unsigned char *MatcherTable, unsigned &MatcherIndex,
- SDValue N, const TargetLowering *TLI) {
+ SDValue N, const TargetLowering *TLI, const DataLayout &DL) {
MVT::SimpleValueType VT = (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
if (cast<VTSDNode>(N)->getVT() == VT)
return true;
// Handle the case when VT is iPTR.
- return VT == MVT::iPTR && cast<VTSDNode>(N)->getVT() == TLI->getPointerTy();
+ return VT == MVT::iPTR && cast<VTSDNode>(N)->getVT() == TLI->getPointerTy(DL);
}
LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
@@ -2444,7 +2449,8 @@ static unsigned IsPredicateKnownToFail(const unsigned char *Table,
Result = !::CheckOpcode(Table, Index, N.getNode());
return Index;
case SelectionDAGISel::OPC_CheckType:
- Result = !::CheckType(Table, Index, N, SDISel.TLI);
+ Result = !::CheckType(Table, Index, N, SDISel.TLI,
+ SDISel.CurDAG->getDataLayout());
return Index;
case SelectionDAGISel::OPC_CheckChild0Type:
case SelectionDAGISel::OPC_CheckChild1Type:
@@ -2454,15 +2460,16 @@ static unsigned IsPredicateKnownToFail(const unsigned char *Table,
case SelectionDAGISel::OPC_CheckChild5Type:
case SelectionDAGISel::OPC_CheckChild6Type:
case SelectionDAGISel::OPC_CheckChild7Type:
- Result = !::CheckChildType(Table, Index, N, SDISel.TLI,
- Table[Index - 1] -
- SelectionDAGISel::OPC_CheckChild0Type);
+ Result = !::CheckChildType(
+ Table, Index, N, SDISel.TLI, SDISel.CurDAG->getDataLayout(),
+ Table[Index - 1] - SelectionDAGISel::OPC_CheckChild0Type);
return Index;
case SelectionDAGISel::OPC_CheckCondCode:
Result = !::CheckCondCode(Table, Index, N);
return Index;
case SelectionDAGISel::OPC_CheckValueType:
- Result = !::CheckValueType(Table, Index, N, SDISel.TLI);
+ Result = !::CheckValueType(Table, Index, N, SDISel.TLI,
+ SDISel.CurDAG->getDataLayout());
return Index;
case SelectionDAGISel::OPC_CheckInteger:
Result = !::CheckInteger(Table, Index, N);
@@ -2816,7 +2823,8 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
continue;
case OPC_CheckType:
- if (!::CheckType(MatcherTable, MatcherIndex, N, TLI))
+ if (!::CheckType(MatcherTable, MatcherIndex, N, TLI,
+ CurDAG->getDataLayout()))
break;
continue;
@@ -2864,7 +2872,7 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
MVT CaseVT = (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
if (CaseVT == MVT::iPTR)
- CaseVT = TLI->getPointerTy();
+ CaseVT = TLI->getPointerTy(CurDAG->getDataLayout());
// If the VT matches, then we will execute this case.
if (CurNodeVT == CaseVT)
@@ -2887,14 +2895,16 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
case OPC_CheckChild4Type: case OPC_CheckChild5Type:
case OPC_CheckChild6Type: case OPC_CheckChild7Type:
if (!::CheckChildType(MatcherTable, MatcherIndex, N, TLI,
- Opcode-OPC_CheckChild0Type))
+ CurDAG->getDataLayout(),
+ Opcode - OPC_CheckChild0Type))
break;
continue;
case OPC_CheckCondCode:
if (!::CheckCondCode(MatcherTable, MatcherIndex, N)) break;
continue;
case OPC_CheckValueType:
- if (!::CheckValueType(MatcherTable, MatcherIndex, N, TLI))
+ if (!::CheckValueType(MatcherTable, MatcherIndex, N, TLI,
+ CurDAG->getDataLayout()))
break;
continue;
case OPC_CheckInteger:
@@ -3097,7 +3107,7 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
MVT::SimpleValueType VT =
(MVT::SimpleValueType)MatcherTable[MatcherIndex++];
if (VT == MVT::iPTR)
- VT = TLI->getPointerTy().SimpleTy;
+ VT = TLI->getPointerTy(CurDAG->getDataLayout()).SimpleTy;
VTs.push_back(VT);
}
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp
index bd40cac..34688df 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp
@@ -337,9 +337,9 @@ lowerCallFromStatepoint(ImmutableStatepoint ISP, MachineBasicBlock *LandingPad,
// TODO: To eliminate this problem we can remove gc.result intrinsics
// completelly and make statepoint call to return a tuple.
unsigned Reg = Builder.FuncInfo.CreateRegs(ISP.getActualReturnType());
- RegsForValue RFV(*Builder.DAG.getContext(),
- Builder.DAG.getTargetLoweringInfo(), Reg,
- ISP.getActualReturnType());
+ RegsForValue RFV(
+ *Builder.DAG.getContext(), Builder.DAG.getTargetLoweringInfo(),
+ Builder.DAG.getDataLayout(), Reg, ISP.getActualReturnType());
SDValue Chain = Builder.DAG.getEntryNode();
RFV.getCopyToRegs(ReturnValue, Builder.DAG, Builder.getCurSDLoc(), Chain,
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index e7722b3..fbf6512 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -102,7 +102,8 @@ TargetLowering::makeLibCall(SelectionDAG &DAG,
}
if (LC == RTLIB::UNKNOWN_LIBCALL)
report_fatal_error("Unsupported library call operation!");
- SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC), getPointerTy());
+ SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
+ getPointerTy(DAG.getDataLayout()));
Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
TargetLowering::CallLoweringInfo CLI(DAG);
@@ -206,14 +207,16 @@ void TargetLowering::softenSetCCOperands(SelectionDAG &DAG, EVT VT,
NewRHS = DAG.getConstant(0, dl, RetVT);
CCCode = getCmpLibcallCC(LC1);
if (LC2 != RTLIB::UNKNOWN_LIBCALL) {
- SDValue Tmp = DAG.getNode(ISD::SETCC, dl,
- getSetCCResultType(*DAG.getContext(), RetVT),
- NewLHS, NewRHS, DAG.getCondCode(CCCode));
+ SDValue Tmp = DAG.getNode(
+ ISD::SETCC, dl,
+ getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), RetVT),
+ NewLHS, NewRHS, DAG.getCondCode(CCCode));
NewLHS = makeLibCall(DAG, LC2, RetVT, Ops, 2, false/*sign irrelevant*/,
dl).first;
- NewLHS = DAG.getNode(ISD::SETCC, dl,
- getSetCCResultType(*DAG.getContext(), RetVT), NewLHS,
- NewRHS, DAG.getCondCode(getCmpLibcallCC(LC2)));
+ NewLHS = DAG.getNode(
+ ISD::SETCC, dl,
+ getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), RetVT),
+ NewLHS, NewRHS, DAG.getCondCode(getCmpLibcallCC(LC2)));
NewLHS = DAG.getNode(ISD::OR, dl, Tmp.getValueType(), Tmp, NewLHS);
NewRHS = SDValue();
}
@@ -242,7 +245,7 @@ SDValue TargetLowering::getPICJumpTableRelocBase(SDValue Table,
if ((JTEncoding == MachineJumpTableInfo::EK_GPRel64BlockAddress) ||
(JTEncoding == MachineJumpTableInfo::EK_GPRel32BlockAddress))
- return DAG.getGLOBAL_OFFSET_TABLE(getPointerTy(0));
+ return DAG.getGLOBAL_OFFSET_TABLE(getPointerTy(DAG.getDataLayout()));
return Table;
}
@@ -265,9 +268,7 @@ TargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
// In dynamic-no-pic mode, assume that known defined values are safe.
if (getTargetMachine().getRelocationModel() == Reloc::DynamicNoPIC &&
- GA &&
- !GA->getGlobal()->isDeclaration() &&
- !GA->getGlobal()->isWeakForLinker())
+ GA && GA->getGlobal()->isStrongDefinitionForLinker())
return true;
// Otherwise assume nothing is safe.
@@ -383,6 +384,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
"Mask size mismatches value type size!");
APInt NewMask = DemandedMask;
SDLoc dl(Op);
+ auto &DL = TLO.DAG.getDataLayout();
// Don't know anything.
KnownZero = KnownOne = APInt(BitWidth, 0);
@@ -645,7 +647,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
unsigned InnerBits = InnerVT.getSizeInBits();
if (ShAmt < InnerBits && NewMask.lshr(InnerBits) == 0 &&
isTypeDesirableForOp(ISD::SHL, InnerVT)) {
- EVT ShTy = getShiftAmountTy(InnerVT);
+ EVT ShTy = getShiftAmountTy(InnerVT, DL);
if (!APInt(BitWidth, ShAmt).isIntN(ShTy.getSizeInBits()))
ShTy = InnerVT;
SDValue NarrowShl =
@@ -824,7 +826,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
// for scalar types after legalization.
EVT ShiftAmtTy = Op.getValueType();
if (TLO.LegalTypes() && !ShiftAmtTy.isVector())
- ShiftAmtTy = getShiftAmountTy(ShiftAmtTy);
+ ShiftAmtTy = getShiftAmountTy(ShiftAmtTy, DL);
SDValue ShiftAmt = TLO.DAG.getConstant(BitWidth - ShAmt, dl,
ShiftAmtTy);
@@ -1009,8 +1011,8 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
SDValue Shift = In.getOperand(1);
if (TLO.LegalTypes()) {
uint64_t ShVal = ShAmt->getZExtValue();
- Shift =
- TLO.DAG.getConstant(ShVal, dl, getShiftAmountTy(Op.getValueType()));
+ Shift = TLO.DAG.getConstant(ShVal, dl,
+ getShiftAmountTy(Op.getValueType(), DL));
}
APInt HighBits = APInt::getHighBitsSet(OperandBitWidth,
@@ -1400,7 +1402,7 @@ TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
APInt newMask = APInt::getLowBitsSet(maskWidth, width);
for (unsigned offset=0; offset<origWidth/width; offset++) {
if ((newMask & Mask) == Mask) {
- if (!getDataLayout()->isLittleEndian())
+ if (!DAG.getDataLayout().isLittleEndian())
bestOffset = (origWidth/width - offset - 1) * (width/8);
else
bestOffset = (uint64_t)offset * (width/8);
@@ -1473,7 +1475,8 @@ TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
if (DCI.isBeforeLegalizeOps() ||
(isOperationLegal(ISD::SETCC, newVT) &&
getCondCodeAction(Cond, newVT.getSimpleVT()) == Legal)) {
- EVT NewSetCCVT = getSetCCResultType(*DAG.getContext(), newVT);
+ EVT NewSetCCVT =
+ getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), newVT);
SDValue NewConst = DAG.getConstant(C1.trunc(InSize), dl, newVT);
SDValue NewSetCC = DAG.getSetCC(dl, NewSetCCVT, N0.getOperand(0),
@@ -1692,11 +1695,13 @@ TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
(VT == N0.getValueType() ||
(isTypeLegal(VT) && VT.bitsLE(N0.getValueType()))) &&
- N0.getOpcode() == ISD::AND)
+ N0.getOpcode() == ISD::AND) {
+ auto &DL = DAG.getDataLayout();
if (ConstantSDNode *AndRHS =
dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
- EVT ShiftTy = DCI.isBeforeLegalize() ?
- getPointerTy() : getShiftAmountTy(N0.getValueType());
+ EVT ShiftTy = DCI.isBeforeLegalize()
+ ? getPointerTy(DL)
+ : getShiftAmountTy(N0.getValueType(), DL);
if (Cond == ISD::SETNE && C1 == 0) {// (X & 8) != 0 --> (X & 8) >> 3
// Perform the xform if the AND RHS is a single bit.
if (AndRHS->getAPIntValue().isPowerOf2()) {
@@ -1716,6 +1721,7 @@ TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
}
}
}
+ }
if (C1.getMinSignedBits() <= 64 &&
!isLegalICmpImmediate(C1.getSExtValue())) {
@@ -1727,8 +1733,10 @@ TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
const APInt &AndRHSC = AndRHS->getAPIntValue();
if ((-AndRHSC).isPowerOf2() && (AndRHSC & C1) == C1) {
unsigned ShiftBits = AndRHSC.countTrailingZeros();
- EVT ShiftTy = DCI.isBeforeLegalize() ?
- getPointerTy() : getShiftAmountTy(N0.getValueType());
+ auto &DL = DAG.getDataLayout();
+ EVT ShiftTy = DCI.isBeforeLegalize()
+ ? getPointerTy(DL)
+ : getShiftAmountTy(N0.getValueType(), DL);
EVT CmpTy = N0.getValueType();
SDValue Shift = DAG.getNode(ISD::SRL, dl, CmpTy, N0.getOperand(0),
DAG.getConstant(ShiftBits, dl,
@@ -1757,8 +1765,10 @@ TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
NewC = NewC.lshr(ShiftBits);
if (ShiftBits && NewC.getMinSignedBits() <= 64 &&
isLegalICmpImmediate(NewC.getSExtValue())) {
- EVT ShiftTy = DCI.isBeforeLegalize() ?
- getPointerTy() : getShiftAmountTy(N0.getValueType());
+ auto &DL = DAG.getDataLayout();
+ EVT ShiftTy = DCI.isBeforeLegalize()
+ ? getPointerTy(DL)
+ : getShiftAmountTy(N0.getValueType(), DL);
EVT CmpTy = N0.getValueType();
SDValue Shift = DAG.getNode(ISD::SRL, dl, CmpTy, N0,
DAG.getConstant(ShiftBits, dl, ShiftTy));
@@ -1945,10 +1955,12 @@ TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
Cond);
if (N0.getNode()->hasOneUse()) {
assert(N0.getOpcode() == ISD::SUB && "Unexpected operation!");
+ auto &DL = DAG.getDataLayout();
// (Z-X) == X --> Z == X<<1
- SDValue SH = DAG.getNode(ISD::SHL, dl, N1.getValueType(), N1,
- DAG.getConstant(1, dl,
- getShiftAmountTy(N1.getValueType())));
+ SDValue SH = DAG.getNode(
+ ISD::SHL, dl, N1.getValueType(), N1,
+ DAG.getConstant(1, dl,
+ getShiftAmountTy(N1.getValueType(), DL)));
if (!DCI.isCalledByLegalizer())
DCI.AddToWorklist(SH.getNode());
return DAG.getSetCC(dl, VT, N0.getOperand(0), SH, Cond);
@@ -1969,10 +1981,11 @@ TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
DAG.getConstant(0, dl, N1.getValueType()), Cond);
if (N1.getNode()->hasOneUse()) {
assert(N1.getOpcode() == ISD::SUB && "Unexpected operation!");
+ auto &DL = DAG.getDataLayout();
// X == (Z-X) --> X<<1 == Z
- SDValue SH = DAG.getNode(ISD::SHL, dl, N1.getValueType(), N0,
- DAG.getConstant(1, dl,
- getShiftAmountTy(N0.getValueType())));
+ SDValue SH = DAG.getNode(
+ ISD::SHL, dl, N1.getValueType(), N0,
+ DAG.getConstant(1, dl, getShiftAmountTy(N0.getValueType(), DL)));
if (!DCI.isCalledByLegalizer())
DCI.AddToWorklist(SH.getNode());
return DAG.getSetCC(dl, VT, SH, N1.getOperand(0), Cond);
@@ -2105,9 +2118,8 @@ PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const {
// Inline Assembler Implementation Methods
//===----------------------------------------------------------------------===//
-
TargetLowering::ConstraintType
-TargetLowering::getConstraintType(const std::string &Constraint) const {
+TargetLowering::getConstraintType(StringRef Constraint) const {
unsigned S = Constraint.size();
if (S == 1) {
@@ -2140,7 +2152,7 @@ TargetLowering::getConstraintType(const std::string &Constraint) const {
}
if (S > 1 && Constraint[0] == '{' && Constraint[S-1] == '}') {
- if (S == 8 && !Constraint.compare(1, 6, "memory", 6)) // "{memory}"
+ if (S == 8 && Constraint.substr(1, 6) == "memory") // "{memory}"
return C_Memory;
return C_Register;
}
@@ -2206,8 +2218,8 @@ void TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(),
C ? SDLoc(C) : SDLoc(),
Op.getValueType(), Offs));
- return;
}
+ return;
}
if (C) { // just C, no GV.
// Simple constants are not allowed for 's'.
@@ -2217,8 +2229,8 @@ void TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
// ScheduleDAGSDNodes::EmitNode, which is very generic.
Ops.push_back(DAG.getTargetConstant(C->getAPIntValue().getSExtValue(),
SDLoc(C), MVT::i64));
- return;
}
+ return;
}
break;
}
@@ -2227,7 +2239,7 @@ void TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
std::pair<unsigned, const TargetRegisterClass *>
TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *RI,
- const std::string &Constraint,
+ StringRef Constraint,
MVT VT) const {
if (Constraint.empty() || Constraint[0] != '{')
return std::make_pair(0u, static_cast<TargetRegisterClass*>(nullptr));
@@ -2293,7 +2305,8 @@ unsigned TargetLowering::AsmOperandInfo::getMatchedOperand() const {
/// If this returns an empty vector, and if the constraint string itself
/// isn't empty, there was an error parsing.
TargetLowering::AsmOperandInfoVector
-TargetLowering::ParseConstraints(const TargetRegisterInfo *TRI,
+TargetLowering::ParseConstraints(const DataLayout &DL,
+ const TargetRegisterInfo *TRI,
ImmutableCallSite CS) const {
/// ConstraintOperands - Information about all of the constraints.
AsmOperandInfoVector ConstraintOperands;
@@ -2329,10 +2342,11 @@ TargetLowering::ParseConstraints(const TargetRegisterInfo *TRI,
assert(!CS.getType()->isVoidTy() &&
"Bad inline asm!");
if (StructType *STy = dyn_cast<StructType>(CS.getType())) {
- OpInfo.ConstraintVT = getSimpleValueType(STy->getElementType(ResNo));
+ OpInfo.ConstraintVT =
+ getSimpleValueType(DL, STy->getElementType(ResNo));
} else {
assert(ResNo == 0 && "Asm only has one result!");
- OpInfo.ConstraintVT = getSimpleValueType(CS.getType());
+ OpInfo.ConstraintVT = getSimpleValueType(DL, CS.getType());
}
++ResNo;
break;
@@ -2361,7 +2375,7 @@ TargetLowering::ParseConstraints(const TargetRegisterInfo *TRI,
// If OpTy is not a single value, it may be a struct/union that we
// can tile with integers.
if (!OpTy->isSingleValueType() && OpTy->isSized()) {
- unsigned BitSize = getDataLayout()->getTypeSizeInBits(OpTy);
+ unsigned BitSize = DL.getTypeSizeInBits(OpTy);
switch (BitSize) {
default: break;
case 1:
@@ -2375,8 +2389,7 @@ TargetLowering::ParseConstraints(const TargetRegisterInfo *TRI,
break;
}
} else if (PointerType *PT = dyn_cast<PointerType>(OpTy)) {
- unsigned PtrSize
- = getDataLayout()->getPointerSizeInBits(PT->getAddressSpace());
+ unsigned PtrSize = DL.getPointerSizeInBits(PT->getAddressSpace());
OpInfo.ConstraintVT = MVT::getIntegerVT(PtrSize);
} else {
OpInfo.ConstraintVT = MVT::getVT(OpTy, true);
@@ -2684,7 +2697,8 @@ static SDValue BuildExactSDIV(const TargetLowering &TLI, SDValue Op1, APInt d,
if (ShAmt) {
// TODO: For UDIV use SRL instead of SRA.
SDValue Amt =
- DAG.getConstant(ShAmt, dl, TLI.getShiftAmountTy(Op1.getValueType()));
+ DAG.getConstant(ShAmt, dl, TLI.getShiftAmountTy(Op1.getValueType(),
+ DAG.getDataLayout()));
SDNodeFlags Flags;
Flags.setExact(true);
Op1 = DAG.getNode(ISD::SRA, dl, Op1.getValueType(), Op1, Amt, &Flags);
@@ -2750,17 +2764,19 @@ SDValue TargetLowering::BuildSDIV(SDNode *N, const APInt &Divisor,
Q = DAG.getNode(ISD::SUB, dl, VT, Q, N->getOperand(0));
Created->push_back(Q.getNode());
}
+ auto &DL = DAG.getDataLayout();
// Shift right algebraic if shift value is nonzero
if (magics.s > 0) {
- Q = DAG.getNode(ISD::SRA, dl, VT, Q,
- DAG.getConstant(magics.s, dl,
- getShiftAmountTy(Q.getValueType())));
+ Q = DAG.getNode(
+ ISD::SRA, dl, VT, Q,
+ DAG.getConstant(magics.s, dl, getShiftAmountTy(Q.getValueType(), DL)));
Created->push_back(Q.getNode());
}
// Extract the sign bit and add it to the quotient
- SDValue T = DAG.getNode(ISD::SRL, dl, VT, Q,
- DAG.getConstant(VT.getScalarSizeInBits() - 1, dl,
- getShiftAmountTy(Q.getValueType())));
+ SDValue T =
+ DAG.getNode(ISD::SRL, dl, VT, Q,
+ DAG.getConstant(VT.getScalarSizeInBits() - 1, dl,
+ getShiftAmountTy(Q.getValueType(), DL)));
Created->push_back(T.getNode());
return DAG.getNode(ISD::ADD, dl, VT, Q, T);
}
@@ -2776,6 +2792,7 @@ SDValue TargetLowering::BuildUDIV(SDNode *N, const APInt &Divisor,
EVT VT = N->getValueType(0);
SDLoc dl(N);
+ auto &DL = DAG.getDataLayout();
// Check to see if we can do this.
// FIXME: We should be more aggressive here.
@@ -2792,9 +2809,9 @@ SDValue TargetLowering::BuildUDIV(SDNode *N, const APInt &Divisor,
// the divided value upfront.
if (magics.a != 0 && !Divisor[0]) {
unsigned Shift = Divisor.countTrailingZeros();
- Q = DAG.getNode(ISD::SRL, dl, VT, Q,
- DAG.getConstant(Shift, dl,
- getShiftAmountTy(Q.getValueType())));
+ Q = DAG.getNode(
+ ISD::SRL, dl, VT, Q,
+ DAG.getConstant(Shift, dl, getShiftAmountTy(Q.getValueType(), DL)));
Created->push_back(Q.getNode());
// Get magic number for the shifted divisor.
@@ -2819,21 +2836,22 @@ SDValue TargetLowering::BuildUDIV(SDNode *N, const APInt &Divisor,
if (magics.a == 0) {
assert(magics.s < Divisor.getBitWidth() &&
"We shouldn't generate an undefined shift!");
- return DAG.getNode(ISD::SRL, dl, VT, Q,
- DAG.getConstant(magics.s, dl,
- getShiftAmountTy(Q.getValueType())));
+ return DAG.getNode(
+ ISD::SRL, dl, VT, Q,
+ DAG.getConstant(magics.s, dl, getShiftAmountTy(Q.getValueType(), DL)));
} else {
SDValue NPQ = DAG.getNode(ISD::SUB, dl, VT, N->getOperand(0), Q);
Created->push_back(NPQ.getNode());
- NPQ = DAG.getNode(ISD::SRL, dl, VT, NPQ,
- DAG.getConstant(1, dl,
- getShiftAmountTy(NPQ.getValueType())));
+ NPQ = DAG.getNode(
+ ISD::SRL, dl, VT, NPQ,
+ DAG.getConstant(1, dl, getShiftAmountTy(NPQ.getValueType(), DL)));
Created->push_back(NPQ.getNode());
NPQ = DAG.getNode(ISD::ADD, dl, VT, NPQ, Q);
Created->push_back(NPQ.getNode());
- return DAG.getNode(ISD::SRL, dl, VT, NPQ,
- DAG.getConstant(magics.s - 1, dl,
- getShiftAmountTy(NPQ.getValueType())));
+ return DAG.getNode(
+ ISD::SRL, dl, VT, NPQ,
+ DAG.getConstant(magics.s - 1, dl,
+ getShiftAmountTy(NPQ.getValueType(), DL)));
}
}
@@ -2919,8 +2937,9 @@ bool TargetLowering::expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT,
if (!LH.getNode() && !RH.getNode() &&
isOperationLegalOrCustom(ISD::SRL, VT) &&
isOperationLegalOrCustom(ISD::TRUNCATE, HiLoVT)) {
+ auto &DL = DAG.getDataLayout();
unsigned ShiftAmt = VT.getSizeInBits() - HiLoVT.getSizeInBits();
- SDValue Shift = DAG.getConstant(ShiftAmt, dl, getShiftAmountTy(VT));
+ SDValue Shift = DAG.getConstant(ShiftAmt, dl, getShiftAmountTy(VT, DL));
LH = DAG.getNode(ISD::SRL, dl, VT, N->getOperand(0), Shift);
LH = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, LH);
RH = DAG.getNode(ISD::SRL, dl, VT, N->getOperand(1), Shift);
@@ -2980,14 +2999,15 @@ bool TargetLowering::expandFP_TO_SINT(SDNode *Node, SDValue &Result,
SDValue Bits = DAG.getNode(ISD::BITCAST, dl, IntVT, Node->getOperand(0));
- SDValue ExponentBits = DAG.getNode(ISD::SRL, dl, IntVT,
- DAG.getNode(ISD::AND, dl, IntVT, Bits, ExponentMask),
- DAG.getZExtOrTrunc(ExponentLoBit, dl, getShiftAmountTy(IntVT)));
+ auto &DL = DAG.getDataLayout();
+ SDValue ExponentBits = DAG.getNode(
+ ISD::SRL, dl, IntVT, DAG.getNode(ISD::AND, dl, IntVT, Bits, ExponentMask),
+ DAG.getZExtOrTrunc(ExponentLoBit, dl, getShiftAmountTy(IntVT, DL)));
SDValue Exponent = DAG.getNode(ISD::SUB, dl, IntVT, ExponentBits, Bias);
- SDValue Sign = DAG.getNode(ISD::SRA, dl, IntVT,
- DAG.getNode(ISD::AND, dl, IntVT, Bits, SignMask),
- DAG.getZExtOrTrunc(SignLowBit, dl, getShiftAmountTy(IntVT)));
+ SDValue Sign = DAG.getNode(
+ ISD::SRA, dl, IntVT, DAG.getNode(ISD::AND, dl, IntVT, Bits, SignMask),
+ DAG.getZExtOrTrunc(SignLowBit, dl, getShiftAmountTy(IntVT, DL)));
Sign = DAG.getSExtOrTrunc(Sign, dl, NVT);
SDValue R = DAG.getNode(ISD::OR, dl, IntVT,
@@ -2996,17 +3016,17 @@ bool TargetLowering::expandFP_TO_SINT(SDNode *Node, SDValue &Result,
R = DAG.getZExtOrTrunc(R, dl, NVT);
-
- R = DAG.getSelectCC(dl, Exponent, ExponentLoBit,
- DAG.getNode(ISD::SHL, dl, NVT, R,
- DAG.getZExtOrTrunc(
- DAG.getNode(ISD::SUB, dl, IntVT, Exponent, ExponentLoBit),
- dl, getShiftAmountTy(IntVT))),
- DAG.getNode(ISD::SRL, dl, NVT, R,
- DAG.getZExtOrTrunc(
- DAG.getNode(ISD::SUB, dl, IntVT, ExponentLoBit, Exponent),
- dl, getShiftAmountTy(IntVT))),
- ISD::SETGT);
+ R = DAG.getSelectCC(
+ dl, Exponent, ExponentLoBit,
+ DAG.getNode(ISD::SHL, dl, NVT, R,
+ DAG.getZExtOrTrunc(
+ DAG.getNode(ISD::SUB, dl, IntVT, Exponent, ExponentLoBit),
+ dl, getShiftAmountTy(IntVT, DL))),
+ DAG.getNode(ISD::SRL, dl, NVT, R,
+ DAG.getZExtOrTrunc(
+ DAG.getNode(ISD::SUB, dl, IntVT, ExponentLoBit, Exponent),
+ dl, getShiftAmountTy(IntVT, DL))),
+ ISD::SETGT);
SDValue Ret = DAG.getNode(ISD::SUB, dl, NVT,
DAG.getNode(ISD::XOR, dl, NVT, R, Sign),
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/TargetSelectionDAGInfo.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/TargetSelectionDAGInfo.cpp
index 0e89bad..00db942 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/TargetSelectionDAGInfo.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/TargetSelectionDAGInfo.cpp
@@ -15,9 +15,5 @@
#include "llvm/Target/TargetMachine.h"
using namespace llvm;
-TargetSelectionDAGInfo::TargetSelectionDAGInfo(const DataLayout *DL)
- : DL(DL) {
-}
-
TargetSelectionDAGInfo::~TargetSelectionDAGInfo() {
}
diff --git a/contrib/llvm/lib/CodeGen/SjLjEHPrepare.cpp b/contrib/llvm/lib/CodeGen/SjLjEHPrepare.cpp
index 116fd5b..d236e1f 100644
--- a/contrib/llvm/lib/CodeGen/SjLjEHPrepare.cpp
+++ b/contrib/llvm/lib/CodeGen/SjLjEHPrepare.cpp
@@ -45,7 +45,6 @@ STATISTIC(NumSpilled, "Number of registers live across unwind edges");
namespace {
class SjLjEHPrepare : public FunctionPass {
- const TargetMachine *TM;
Type *doubleUnderDataTy;
Type *doubleUnderJBufTy;
Type *FunctionContextTy;
@@ -63,7 +62,7 @@ class SjLjEHPrepare : public FunctionPass {
public:
static char ID; // Pass identification, replacement for typeid
- explicit SjLjEHPrepare(const TargetMachine *TM) : FunctionPass(ID), TM(TM) {}
+ explicit SjLjEHPrepare() : FunctionPass(ID) {}
bool doInitialization(Module &M) override;
bool runOnFunction(Function &F) override;
@@ -83,11 +82,11 @@ private:
} // end anonymous namespace
char SjLjEHPrepare::ID = 0;
+INITIALIZE_PASS(SjLjEHPrepare, "sjljehprepare", "Prepare SjLj exceptions",
+ false, false)
// Public Interface To the SjLjEHPrepare pass.
-FunctionPass *llvm::createSjLjEHPreparePass(const TargetMachine *TM) {
- return new SjLjEHPrepare(TM);
-}
+FunctionPass *llvm::createSjLjEHPreparePass() { return new SjLjEHPrepare(); }
// doInitialization - Set up decalarations and types needed to process
// exceptions.
bool SjLjEHPrepare::doInitialization(Module &M) {
@@ -196,9 +195,8 @@ Value *SjLjEHPrepare::setupFunctionContext(Function &F,
// Create an alloca for the incoming jump buffer ptr and the new jump buffer
// that needs to be restored on all exits from the function. This is an alloca
// because the value needs to be added to the global context list.
- const TargetLowering *TLI = TM->getSubtargetImpl(F)->getTargetLowering();
- unsigned Align =
- TLI->getDataLayout()->getPrefTypeAlignment(FunctionContextTy);
+ auto &DL = F.getParent()->getDataLayout();
+ unsigned Align = DL.getPrefTypeAlignment(FunctionContextTy);
FuncCtx = new AllocaInst(FunctionContextTy, nullptr, Align, "fn_context",
EntryBB->begin());
diff --git a/contrib/llvm/lib/CodeGen/StackMapLivenessAnalysis.cpp b/contrib/llvm/lib/CodeGen/StackMapLivenessAnalysis.cpp
index d88be57..8550583 100644
--- a/contrib/llvm/lib/CodeGen/StackMapLivenessAnalysis.cpp
+++ b/contrib/llvm/lib/CodeGen/StackMapLivenessAnalysis.cpp
@@ -49,7 +49,6 @@ namespace {
/// information provided by this pass is optional and not required by the
/// aformentioned intrinsic to function.
class StackMapLiveness : public MachineFunctionPass {
- MachineFunction *MF;
const TargetRegisterInfo *TRI;
LivePhysRegs LiveRegs;
@@ -68,14 +67,14 @@ public:
private:
/// \brief Performs the actual liveness calculation for the function.
- bool calculateLiveness();
+ bool calculateLiveness(MachineFunction &MF);
/// \brief Add the current register live set to the instruction.
- void addLiveOutSetToMI(MachineInstr &MI);
+ void addLiveOutSetToMI(MachineFunction &MF, MachineInstr &MI);
/// \brief Create a register mask and initialize it with the registers from
/// the register live set.
- uint32_t *createRegisterMask() const;
+ uint32_t *createRegisterMask(MachineFunction &MF) const;
};
} // namespace
@@ -95,8 +94,7 @@ void StackMapLiveness::getAnalysisUsage(AnalysisUsage &AU) const {
// We preserve all information.
AU.setPreservesAll();
AU.setPreservesCFG();
- // Default dependencie for all MachineFunction passes.
- AU.addRequired<MachineFunctionAnalysis>();
+ MachineFunctionPass::getAnalysisUsage(AU);
}
/// Calculate the liveness information for the given machine function.
@@ -106,7 +104,6 @@ bool StackMapLiveness::runOnMachineFunction(MachineFunction &MF) {
DEBUG(dbgs() << "********** COMPUTING STACKMAP LIVENESS: " << MF.getName()
<< " **********\n");
- this->MF = &MF;
TRI = MF.getSubtarget().getRegisterInfo();
++NumStackMapFuncVisited;
@@ -115,25 +112,23 @@ bool StackMapLiveness::runOnMachineFunction(MachineFunction &MF) {
++NumStackMapFuncSkipped;
return false;
}
- return calculateLiveness();
+ return calculateLiveness(MF);
}
/// Performs the actual liveness calculation for the function.
-bool StackMapLiveness::calculateLiveness() {
+bool StackMapLiveness::calculateLiveness(MachineFunction &MF) {
bool HasChanged = false;
// For all basic blocks in the function.
- for (MachineFunction::iterator MBBI = MF->begin(), MBBE = MF->end();
- MBBI != MBBE; ++MBBI) {
- DEBUG(dbgs() << "****** BB " << MBBI->getName() << " ******\n");
+ for (auto &MBB : MF) {
+ DEBUG(dbgs() << "****** BB " << MBB.getName() << " ******\n");
LiveRegs.init(TRI);
- LiveRegs.addLiveOuts(MBBI);
+ LiveRegs.addLiveOuts(&MBB);
bool HasStackMap = false;
// Reverse iterate over all instructions and add the current live register
// set to an instruction if we encounter a patchpoint instruction.
- for (MachineBasicBlock::reverse_iterator I = MBBI->rbegin(),
- E = MBBI->rend(); I != E; ++I) {
+ for (auto I = MBB.rbegin(), E = MBB.rend(); I != E; ++I) {
if (I->getOpcode() == TargetOpcode::PATCHPOINT) {
- addLiveOutSetToMI(*I);
+ addLiveOutSetToMI(MF, *I);
HasChanged = true;
HasStackMap = true;
++NumStackMaps;
@@ -149,21 +144,23 @@ bool StackMapLiveness::calculateLiveness() {
}
/// Add the current register live set to the instruction.
-void StackMapLiveness::addLiveOutSetToMI(MachineInstr &MI) {
- uint32_t *Mask = createRegisterMask();
+void StackMapLiveness::addLiveOutSetToMI(MachineFunction &MF,
+ MachineInstr &MI) {
+ uint32_t *Mask = createRegisterMask(MF);
MachineOperand MO = MachineOperand::CreateRegLiveOut(Mask);
- MI.addOperand(*MF, MO);
+ MI.addOperand(MF, MO);
}
/// Create a register mask and initialize it with the registers from the
/// register live set.
-uint32_t *StackMapLiveness::createRegisterMask() const {
+uint32_t *StackMapLiveness::createRegisterMask(MachineFunction &MF) const {
// The mask is owned and cleaned up by the Machine Function.
- uint32_t *Mask = MF->allocateRegisterMask(TRI->getNumRegs());
- for (LivePhysRegs::const_iterator RI = LiveRegs.begin(), RE = LiveRegs.end();
- RI != RE; ++RI)
- Mask[*RI / 32] |= 1U << (*RI % 32);
+ uint32_t *Mask = MF.allocateRegisterMask(TRI->getNumRegs());
+ for (auto Reg : LiveRegs)
+ Mask[Reg / 32] |= 1U << (Reg % 32);
+ // Give the target a chance to adjust the mask.
TRI->adjustStackMapLiveOutMask(Mask);
+
return Mask;
}
diff --git a/contrib/llvm/lib/CodeGen/StackMaps.cpp b/contrib/llvm/lib/CodeGen/StackMaps.cpp
index 1e8e03f..116eef6 100644
--- a/contrib/llvm/lib/CodeGen/StackMaps.cpp
+++ b/contrib/llvm/lib/CodeGen/StackMaps.cpp
@@ -29,17 +29,17 @@ using namespace llvm;
#define DEBUG_TYPE "stackmaps"
-static cl::opt<int> StackMapVersion("stackmap-version", cl::init(1),
- cl::desc("Specify the stackmap encoding version (default = 1)"));
+static cl::opt<int> StackMapVersion(
+ "stackmap-version", cl::init(1),
+ cl::desc("Specify the stackmap encoding version (default = 1)"));
const char *StackMaps::WSMP = "Stack Maps: ";
PatchPointOpers::PatchPointOpers(const MachineInstr *MI)
- : MI(MI),
- HasDef(MI->getOperand(0).isReg() && MI->getOperand(0).isDef() &&
- !MI->getOperand(0).isImplicit()),
- IsAnyReg(MI->getOperand(getMetaIdx(CCPos)).getImm() == CallingConv::AnyReg)
-{
+ : MI(MI), HasDef(MI->getOperand(0).isReg() && MI->getOperand(0).isDef() &&
+ !MI->getOperand(0).isImplicit()),
+ IsAnyReg(MI->getOperand(getMetaIdx(CCPos)).getImm() ==
+ CallingConv::AnyReg) {
#ifndef NDEBUG
unsigned CheckStartIdx = 0, e = MI->getNumOperands();
while (CheckStartIdx < e && MI->getOperand(CheckStartIdx).isReg() &&
@@ -76,30 +76,31 @@ StackMaps::StackMaps(AsmPrinter &AP) : AP(AP) {
/// Go up the super-register chain until we hit a valid dwarf register number.
static unsigned getDwarfRegNum(unsigned Reg, const TargetRegisterInfo *TRI) {
- int RegNo = TRI->getDwarfRegNum(Reg, false);
- for (MCSuperRegIterator SR(Reg, TRI); SR.isValid() && RegNo < 0; ++SR)
- RegNo = TRI->getDwarfRegNum(*SR, false);
+ int RegNum = TRI->getDwarfRegNum(Reg, false);
+ for (MCSuperRegIterator SR(Reg, TRI); SR.isValid() && RegNum < 0; ++SR)
+ RegNum = TRI->getDwarfRegNum(*SR, false);
- assert(RegNo >= 0 && "Invalid Dwarf register number.");
- return (unsigned) RegNo;
+ assert(RegNum >= 0 && "Invalid Dwarf register number.");
+ return (unsigned)RegNum;
}
MachineInstr::const_mop_iterator
StackMaps::parseOperand(MachineInstr::const_mop_iterator MOI,
- MachineInstr::const_mop_iterator MOE,
- LocationVec &Locs, LiveOutVec &LiveOuts) const {
+ MachineInstr::const_mop_iterator MOE, LocationVec &Locs,
+ LiveOutVec &LiveOuts) const {
const TargetRegisterInfo *TRI = AP.MF->getSubtarget().getRegisterInfo();
if (MOI->isImm()) {
switch (MOI->getImm()) {
- default: llvm_unreachable("Unrecognized operand type.");
+ default:
+ llvm_unreachable("Unrecognized operand type.");
case StackMaps::DirectMemRefOp: {
unsigned Size = AP.TM.getDataLayout()->getPointerSizeInBits();
assert((Size % 8) == 0 && "Need pointer size in bytes.");
Size /= 8;
unsigned Reg = (++MOI)->getReg();
int64_t Imm = (++MOI)->getImm();
- Locs.push_back(Location(StackMaps::Location::Direct, Size,
- getDwarfRegNum(Reg, TRI), Imm));
+ Locs.emplace_back(StackMaps::Location::Direct, Size,
+ getDwarfRegNum(Reg, TRI), Imm);
break;
}
case StackMaps::IndirectMemRefOp: {
@@ -107,15 +108,15 @@ StackMaps::parseOperand(MachineInstr::const_mop_iterator MOI,
assert(Size > 0 && "Need a valid size for indirect memory locations.");
unsigned Reg = (++MOI)->getReg();
int64_t Imm = (++MOI)->getImm();
- Locs.push_back(Location(StackMaps::Location::Indirect, Size,
- getDwarfRegNum(Reg, TRI), Imm));
+ Locs.emplace_back(StackMaps::Location::Indirect, Size,
+ getDwarfRegNum(Reg, TRI), Imm);
break;
}
case StackMaps::ConstantOp: {
++MOI;
assert(MOI->isImm() && "Expected constant operand.");
int64_t Imm = MOI->getImm();
- Locs.push_back(Location(Location::Constant, sizeof(int64_t), 0, Imm));
+ Locs.emplace_back(Location::Constant, sizeof(int64_t), 0, Imm);
break;
}
}
@@ -137,14 +138,13 @@ StackMaps::parseOperand(MachineInstr::const_mop_iterator MOI,
assert(!MOI->getSubReg() && "Physical subreg still around.");
unsigned Offset = 0;
- unsigned RegNo = getDwarfRegNum(MOI->getReg(), TRI);
- unsigned LLVMRegNo = TRI->getLLVMRegNum(RegNo, false);
- unsigned SubRegIdx = TRI->getSubRegIndex(LLVMRegNo, MOI->getReg());
+ unsigned DwarfRegNum = getDwarfRegNum(MOI->getReg(), TRI);
+ unsigned LLVMRegNum = TRI->getLLVMRegNum(DwarfRegNum, false);
+ unsigned SubRegIdx = TRI->getSubRegIndex(LLVMRegNum, MOI->getReg());
if (SubRegIdx)
Offset = TRI->getSubRegIdxOffset(SubRegIdx);
- Locs.push_back(
- Location(Location::Register, RC->getSize(), RegNo, Offset));
+ Locs.emplace_back(Location::Register, RC->getSize(), DwarfRegNum, Offset);
return ++MOI;
}
@@ -165,19 +165,19 @@ void StackMaps::print(raw_ostream &OS) {
OS << WSMP << "callsite " << CSI.ID << "\n";
OS << WSMP << " has " << CSLocs.size() << " locations\n";
- unsigned OperIdx = 0;
+ unsigned Idx = 0;
for (const auto &Loc : CSLocs) {
- OS << WSMP << " Loc " << OperIdx << ": ";
- switch (Loc.LocType) {
+ OS << WSMP << "\t\tLoc " << Idx << ": ";
+ switch (Loc.Type) {
case Location::Unprocessed:
OS << "<Unprocessed operand>";
break;
case Location::Register:
OS << "Register ";
- if (TRI)
- OS << TRI->getName(Loc.Reg);
- else
- OS << Loc.Reg;
+ if (TRI)
+ OS << TRI->getName(Loc.Reg);
+ else
+ OS << Loc.Reg;
break;
case Location::Direct:
OS << "Direct ";
@@ -203,23 +203,23 @@ void StackMaps::print(raw_ostream &OS) {
OS << "Constant Index " << Loc.Offset;
break;
}
- OS << " [encoding: .byte " << Loc.LocType << ", .byte " << Loc.Size
+ OS << "\t[encoding: .byte " << Loc.Type << ", .byte " << Loc.Size
<< ", .short " << Loc.Reg << ", .int " << Loc.Offset << "]\n";
- OperIdx++;
+ Idx++;
}
- OS << WSMP << " has " << LiveOuts.size() << " live-out registers\n";
+ OS << WSMP << "\thas " << LiveOuts.size() << " live-out registers\n";
- OperIdx = 0;
+ Idx = 0;
for (const auto &LO : LiveOuts) {
- OS << WSMP << " LO " << OperIdx << ": ";
+ OS << WSMP << "\t\tLO " << Idx << ": ";
if (TRI)
OS << TRI->getName(LO.Reg);
else
OS << LO.Reg;
- OS << " [encoding: .short " << LO.RegNo << ", .byte 0, .byte "
+ OS << "\t[encoding: .short " << LO.DwarfRegNum << ", .byte 0, .byte "
<< LO.Size << "]\n";
- OperIdx++;
+ Idx++;
}
}
}
@@ -227,9 +227,9 @@ void StackMaps::print(raw_ostream &OS) {
/// Create a live-out register record for the given register Reg.
StackMaps::LiveOutReg
StackMaps::createLiveOutReg(unsigned Reg, const TargetRegisterInfo *TRI) const {
- unsigned RegNo = getDwarfRegNum(Reg, TRI);
+ unsigned DwarfRegNum = getDwarfRegNum(Reg, TRI);
unsigned Size = TRI->getMinimalPhysRegClass(Reg)->getSize();
- return LiveOutReg(Reg, RegNo, Size);
+ return LiveOutReg(Reg, DwarfRegNum, Size);
}
/// Parse the register live-out mask and return a vector of live-out registers
@@ -248,11 +248,16 @@ StackMaps::parseRegisterLiveOutMask(const uint32_t *Mask) const {
// We don't need to keep track of a register if its super-register is already
// in the list. Merge entries that refer to the same dwarf register and use
// the maximum size that needs to be spilled.
- std::sort(LiveOuts.begin(), LiveOuts.end());
- for (LiveOutVec::iterator I = LiveOuts.begin(), E = LiveOuts.end();
- I != E; ++I) {
- for (LiveOutVec::iterator II = std::next(I); II != E; ++II) {
- if (I->RegNo != II->RegNo) {
+
+ std::sort(LiveOuts.begin(), LiveOuts.end(),
+ [](const LiveOutReg &LHS, const LiveOutReg &RHS) {
+ // Only sort by the dwarf register number.
+ return LHS.DwarfRegNum < RHS.DwarfRegNum;
+ });
+
+ for (auto I = LiveOuts.begin(), E = LiveOuts.end(); I != E; ++I) {
+ for (auto II = std::next(I); II != E; ++II) {
+ if (I->DwarfRegNum != II->DwarfRegNum) {
// Skip all the now invalid entries.
I = --II;
break;
@@ -260,11 +265,15 @@ StackMaps::parseRegisterLiveOutMask(const uint32_t *Mask) const {
I->Size = std::max(I->Size, II->Size);
if (TRI->isSuperRegister(I->Reg, II->Reg))
I->Reg = II->Reg;
- II->MarkInvalid();
+ II->Reg = 0; // mark for deletion.
}
}
- LiveOuts.erase(std::remove_if(LiveOuts.begin(), LiveOuts.end(),
- LiveOutReg::IsInvalid), LiveOuts.end());
+
+ LiveOuts.erase(
+ std::remove_if(LiveOuts.begin(), LiveOuts.end(),
+ [](const LiveOutReg &LO) { return LO.Reg == 0; }),
+ LiveOuts.end());
+
return LiveOuts;
}
@@ -282,8 +291,8 @@ void StackMaps::recordStackMapOpers(const MachineInstr &MI, uint64_t ID,
if (recordResult) {
assert(PatchPointOpers(&MI).hasDef() && "Stackmap has no return value.");
- parseOperand(MI.operands_begin(), std::next(MI.operands_begin()),
- Locations, LiveOuts);
+ parseOperand(MI.operands_begin(), std::next(MI.operands_begin()), Locations,
+ LiveOuts);
}
// Parse operands.
@@ -292,33 +301,31 @@ void StackMaps::recordStackMapOpers(const MachineInstr &MI, uint64_t ID,
}
// Move large constants into the constant pool.
- for (LocationVec::iterator I = Locations.begin(), E = Locations.end();
- I != E; ++I) {
+ for (auto &Loc : Locations) {
// Constants are encoded as sign-extended integers.
// -1 is directly encoded as .long 0xFFFFFFFF with no constant pool.
- if (I->LocType == Location::Constant && !isInt<32>(I->Offset)) {
- I->LocType = Location::ConstantIndex;
+ if (Loc.Type == Location::Constant && !isInt<32>(Loc.Offset)) {
+ Loc.Type = Location::ConstantIndex;
// ConstPool is intentionally a MapVector of 'uint64_t's (as
// opposed to 'int64_t's). We should never be in a situation
// where we have to insert either the tombstone or the empty
// keys into a map, and for a DenseMap<uint64_t, T> these are
// (uint64_t)0 and (uint64_t)-1. They can be and are
// represented using 32 bit integers.
-
- assert((uint64_t)I->Offset != DenseMapInfo<uint64_t>::getEmptyKey() &&
- (uint64_t)I->Offset != DenseMapInfo<uint64_t>::getTombstoneKey() &&
+ assert((uint64_t)Loc.Offset != DenseMapInfo<uint64_t>::getEmptyKey() &&
+ (uint64_t)Loc.Offset !=
+ DenseMapInfo<uint64_t>::getTombstoneKey() &&
"empty and tombstone keys should fit in 32 bits!");
- auto Result = ConstPool.insert(std::make_pair(I->Offset, I->Offset));
- I->Offset = Result.first - ConstPool.begin();
+ auto Result = ConstPool.insert(std::make_pair(Loc.Offset, Loc.Offset));
+ Loc.Offset = Result.first - ConstPool.begin();
}
}
// Create an expression to calculate the offset of the callsite from function
// entry.
const MCExpr *CSOffsetExpr = MCBinaryExpr::createSub(
- MCSymbolRefExpr::create(MILabel, OutContext),
- MCSymbolRefExpr::create(AP.CurrentFnSymForSize, OutContext),
- OutContext);
+ MCSymbolRefExpr::create(MILabel, OutContext),
+ MCSymbolRefExpr::create(AP.CurrentFnSymForSize, OutContext), OutContext);
CSInfos.emplace_back(CSOffsetExpr, ID, std::move(Locations),
std::move(LiveOuts));
@@ -326,10 +333,10 @@ void StackMaps::recordStackMapOpers(const MachineInstr &MI, uint64_t ID,
// Record the stack size of the current function.
const MachineFrameInfo *MFI = AP.MF->getFrameInfo();
const TargetRegisterInfo *RegInfo = AP.MF->getSubtarget().getRegisterInfo();
- const bool DynamicFrameSize = MFI->hasVarSizedObjects() ||
- RegInfo->needsStackRealignment(*(AP.MF));
+ bool HasDynamicFrameSize =
+ MFI->hasVarSizedObjects() || RegInfo->needsStackRealignment(*(AP.MF));
FnStackSize[AP.CurrentFnSym] =
- DynamicFrameSize ? UINT64_MAX : MFI->getStackSize();
+ HasDynamicFrameSize ? UINT64_MAX : MFI->getStackSize();
}
void StackMaps::recordStackMap(const MachineInstr &MI) {
@@ -346,25 +353,23 @@ void StackMaps::recordPatchPoint(const MachineInstr &MI) {
PatchPointOpers opers(&MI);
int64_t ID = opers.getMetaOper(PatchPointOpers::IDPos).getImm();
- MachineInstr::const_mop_iterator MOI =
- std::next(MI.operands_begin(), opers.getStackMapStartIdx());
+ auto MOI = std::next(MI.operands_begin(), opers.getStackMapStartIdx());
recordStackMapOpers(MI, ID, MOI, MI.operands_end(),
opers.isAnyReg() && opers.hasDef());
#ifndef NDEBUG
// verify anyregcc
- LocationVec &Locations = CSInfos.back().Locations;
+ auto &Locations = CSInfos.back().Locations;
if (opers.isAnyReg()) {
unsigned NArgs = opers.getMetaOper(PatchPointOpers::NArgPos).getImm();
- for (unsigned i = 0, e = (opers.hasDef() ? NArgs+1 : NArgs); i != e; ++i)
- assert(Locations[i].LocType == Location::Register &&
+ for (unsigned i = 0, e = (opers.hasDef() ? NArgs + 1 : NArgs); i != e; ++i)
+ assert(Locations[i].Type == Location::Register &&
"anyreg arg must be in reg.");
}
#endif
}
void StackMaps::recordStatepoint(const MachineInstr &MI) {
- assert(MI.getOpcode() == TargetOpcode::STATEPOINT &&
- "expected statepoint");
+ assert(MI.getOpcode() == TargetOpcode::STATEPOINT && "expected statepoint");
StatepointOpers opers(&MI);
// Record all the deopt and gc operands (they're contiguous and run from the
@@ -387,8 +392,8 @@ void StackMaps::recordStatepoint(const MachineInstr &MI) {
void StackMaps::emitStackmapHeader(MCStreamer &OS) {
// Header.
OS.EmitIntValue(StackMapVersion, 1); // Version.
- OS.EmitIntValue(0, 1); // Reserved.
- OS.EmitIntValue(0, 2); // Reserved.
+ OS.EmitIntValue(0, 1); // Reserved.
+ OS.EmitIntValue(0, 2); // Reserved.
// Num functions.
DEBUG(dbgs() << WSMP << "#functions = " << FnStackSize.size() << '\n');
@@ -412,7 +417,7 @@ void StackMaps::emitFunctionFrameRecords(MCStreamer &OS) {
DEBUG(dbgs() << WSMP << "functions:\n");
for (auto const &FR : FnStackSize) {
DEBUG(dbgs() << WSMP << "function addr: " << FR.first
- << " frame size: " << FR.second);
+ << " frame size: " << FR.second);
OS.EmitSymbolValue(FR.first, 8);
OS.EmitIntValue(FR.second, 8);
}
@@ -424,7 +429,7 @@ void StackMaps::emitFunctionFrameRecords(MCStreamer &OS) {
void StackMaps::emitConstantPoolEntries(MCStreamer &OS) {
// Constant pool entries.
DEBUG(dbgs() << WSMP << "constants:\n");
- for (auto ConstEntry : ConstPool) {
+ for (const auto &ConstEntry : ConstPool) {
DEBUG(dbgs() << WSMP << ConstEntry.second << '\n');
OS.EmitIntValue(ConstEntry.second, 8);
}
@@ -489,7 +494,7 @@ void StackMaps::emitCallsiteEntries(MCStreamer &OS) {
OS.EmitIntValue(CSLocs.size(), 2);
for (const auto &Loc : CSLocs) {
- OS.EmitIntValue(Loc.LocType, 1);
+ OS.EmitIntValue(Loc.Type, 1);
OS.EmitIntValue(Loc.Size, 1);
OS.EmitIntValue(Loc.Reg, 2);
OS.EmitIntValue(Loc.Offset, 4);
@@ -500,7 +505,7 @@ void StackMaps::emitCallsiteEntries(MCStreamer &OS) {
OS.EmitIntValue(LiveOuts.size(), 2);
for (const auto &LO : LiveOuts) {
- OS.EmitIntValue(LO.RegNo, 2);
+ OS.EmitIntValue(LO.DwarfRegNum, 2);
OS.EmitIntValue(0, 1);
OS.EmitIntValue(LO.Size, 1);
}
@@ -511,7 +516,7 @@ void StackMaps::emitCallsiteEntries(MCStreamer &OS) {
/// Serialize the stackmap data.
void StackMaps::serializeToStackMapSection() {
- (void) WSMP;
+ (void)WSMP;
// Bail out if there's no stack map data.
assert((!CSInfos.empty() || (CSInfos.empty() && ConstPool.empty())) &&
"Expected empty constant pool too!");
diff --git a/contrib/llvm/lib/CodeGen/StackProtector.cpp b/contrib/llvm/lib/CodeGen/StackProtector.cpp
index 0824d6f..bcea37a 100644
--- a/contrib/llvm/lib/CodeGen/StackProtector.cpp
+++ b/contrib/llvm/lib/CodeGen/StackProtector.cpp
@@ -122,7 +122,7 @@ bool StackProtector::ContainsProtectableArray(Type *Ty, bool &IsLarge,
// If an array has more than SSPBufferSize bytes of allocated space, then we
// emit stack protectors.
- if (SSPBufferSize <= TLI->getDataLayout()->getTypeAllocSize(AT)) {
+ if (SSPBufferSize <= M->getDataLayout().getTypeAllocSize(AT)) {
IsLarge = true;
return true;
}
diff --git a/contrib/llvm/lib/CodeGen/TargetFrameLoweringImpl.cpp b/contrib/llvm/lib/CodeGen/TargetFrameLoweringImpl.cpp
index 5638324..f3cccd8 100644
--- a/contrib/llvm/lib/CodeGen/TargetFrameLoweringImpl.cpp
+++ b/contrib/llvm/lib/CodeGen/TargetFrameLoweringImpl.cpp
@@ -11,9 +11,12 @@
//
//===----------------------------------------------------------------------===//
+#include "llvm/ADT/BitVector.h"
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/IR/Function.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
@@ -54,3 +57,30 @@ bool TargetFrameLowering::needsFrameIndexResolution(
const MachineFunction &MF) const {
return MF.getFrameInfo()->hasStackObjects();
}
+
+void TargetFrameLowering::determineCalleeSaves(MachineFunction &MF,
+ BitVector &SavedRegs,
+ RegScavenger *RS) const {
+ // Get the callee saved register list...
+ const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
+ const MCPhysReg *CSRegs = TRI.getCalleeSavedRegs(&MF);
+
+ // Early exit if there are no callee saved registers.
+ if (!CSRegs || CSRegs[0] == 0)
+ return;
+
+ SavedRegs.resize(TRI.getNumRegs());
+
+ // In Naked functions we aren't going to save any registers.
+ if (MF.getFunction()->hasFnAttribute(Attribute::Naked))
+ return;
+
+ // Functions which call __builtin_unwind_init get all their registers saved.
+ bool CallsUnwindInit = MF.getMMI().callsUnwindInit();
+ const MachineRegisterInfo &MRI = MF.getRegInfo();
+ for (unsigned i = 0; CSRegs[i]; ++i) {
+ unsigned Reg = CSRegs[i];
+ if (CallsUnwindInit || MRI.isPhysRegModified(Reg))
+ SavedRegs.set(Reg);
+ }
+}
diff --git a/contrib/llvm/lib/CodeGen/TargetLoweringBase.cpp b/contrib/llvm/lib/CodeGen/TargetLoweringBase.cpp
index 78492a6..ecfd659 100644
--- a/contrib/llvm/lib/CodeGen/TargetLoweringBase.cpp
+++ b/contrib/llvm/lib/CodeGen/TargetLoweringBase.cpp
@@ -750,7 +750,6 @@ TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm) : TM(tm) {
initActions();
// Perform these initializations only once.
- IsLittleEndian = getDataLayout()->isLittleEndian();
MaxStoresPerMemset = MaxStoresPerMemcpy = MaxStoresPerMemmove = 8;
MaxStoresPerMemsetOptSize = MaxStoresPerMemcpyOptSize
= MaxStoresPerMemmoveOptSize = 4;
@@ -879,28 +878,17 @@ void TargetLoweringBase::initActions() {
setOperationAction(ISD::DEBUGTRAP, MVT::Other, Expand);
}
-MVT TargetLoweringBase::getPointerTy(uint32_t AS) const {
- return MVT::getIntegerVT(getPointerSizeInBits(AS));
+MVT TargetLoweringBase::getScalarShiftAmountTy(const DataLayout &DL,
+ EVT) const {
+ return MVT::getIntegerVT(8 * DL.getPointerSize(0));
}
-unsigned TargetLoweringBase::getPointerSizeInBits(uint32_t AS) const {
- return getDataLayout()->getPointerSizeInBits(AS);
-}
-
-unsigned TargetLoweringBase::getPointerTypeSizeInBits(Type *Ty) const {
- assert(Ty->isPointerTy());
- return getPointerSizeInBits(Ty->getPointerAddressSpace());
-}
-
-MVT TargetLoweringBase::getScalarShiftAmountTy(EVT LHSTy) const {
- return MVT::getIntegerVT(8 * getDataLayout()->getPointerSize(0));
-}
-
-EVT TargetLoweringBase::getShiftAmountTy(EVT LHSTy) const {
+EVT TargetLoweringBase::getShiftAmountTy(EVT LHSTy,
+ const DataLayout &DL) const {
assert(LHSTy.isInteger() && "Shift amount is not an integer type!");
if (LHSTy.isVector())
return LHSTy;
- return getScalarShiftAmountTy(LHSTy);
+ return getScalarShiftAmountTy(DL, LHSTy);
}
/// canOpTrap - Returns true if the operation can trap for the value type.
@@ -1398,9 +1386,10 @@ void TargetLoweringBase::computeRegisterProperties(
}
}
-EVT TargetLoweringBase::getSetCCResultType(LLVMContext &, EVT VT) const {
+EVT TargetLoweringBase::getSetCCResultType(const DataLayout &DL, LLVMContext &,
+ EVT VT) const {
assert(!VT.isVector() && "No default SetCC type for vectors!");
- return getPointerTy(0).SimpleTy;
+ return getPointerTy(DL).SimpleTy;
}
MVT::SimpleValueType TargetLoweringBase::getCmpLibcallReturnType() const {
@@ -1485,11 +1474,11 @@ unsigned TargetLoweringBase::getVectorTypeBreakdown(LLVMContext &Context, EVT VT
/// type of the given function. This does not require a DAG or a return value,
/// and is suitable for use before any DAGs for the function are constructed.
/// TODO: Move this out of TargetLowering.cpp.
-void llvm::GetReturnInfo(Type* ReturnType, AttributeSet attr,
+void llvm::GetReturnInfo(Type *ReturnType, AttributeSet attr,
SmallVectorImpl<ISD::OutputArg> &Outs,
- const TargetLowering &TLI) {
+ const TargetLowering &TLI, const DataLayout &DL) {
SmallVector<EVT, 4> ValueVTs;
- ComputeValueVTs(TLI, ReturnType, ValueVTs);
+ ComputeValueVTs(TLI, DL, ReturnType, ValueVTs);
unsigned NumValues = ValueVTs.size();
if (NumValues == 0) return;
@@ -1534,8 +1523,9 @@ void llvm::GetReturnInfo(Type* ReturnType, AttributeSet attr,
/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
/// function arguments in the caller parameter area. This is the actual
/// alignment, not its logarithm.
-unsigned TargetLoweringBase::getByValTypeAlignment(Type *Ty) const {
- return getDataLayout()->getABITypeAlignment(Ty);
+unsigned TargetLoweringBase::getByValTypeAlignment(Type *Ty,
+ const DataLayout &DL) const {
+ return DL.getABITypeAlignment(Ty);
}
//===----------------------------------------------------------------------===//
@@ -1614,9 +1604,10 @@ int TargetLoweringBase::InstructionOpcodeToISD(unsigned Opcode) const {
}
std::pair<unsigned, MVT>
-TargetLoweringBase::getTypeLegalizationCost(Type *Ty) const {
+TargetLoweringBase::getTypeLegalizationCost(const DataLayout &DL,
+ Type *Ty) const {
LLVMContext &C = Ty->getContext();
- EVT MTy = getValueType(Ty);
+ EVT MTy = getValueType(DL, Ty);
unsigned Cost = 1;
// We keep legalizing the type until we find a legal kind. We assume that
@@ -1642,8 +1633,8 @@ TargetLoweringBase::getTypeLegalizationCost(Type *Ty) const {
/// isLegalAddressingMode - Return true if the addressing mode represented
/// by AM is legal for this target, for a load/store of the specified type.
-bool TargetLoweringBase::isLegalAddressingMode(const AddrMode &AM,
- Type *Ty,
+bool TargetLoweringBase::isLegalAddressingMode(const DataLayout &DL,
+ const AddrMode &AM, Type *Ty,
unsigned AS) const {
// The default implementation of this implements a conservative RISCy, r+r and
// r+i addr mode.
diff --git a/contrib/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp b/contrib/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp
index e84bea6..1e30821 100644
--- a/contrib/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp
+++ b/contrib/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp
@@ -1215,11 +1215,11 @@ tryInstructionTransform(MachineBasicBlock::iterator &mi,
// addl %esi, %edi
// movl %edi, %eax
// ret
- bool commuted = false;
+ bool Commuted = false;
// If it's profitable to commute, try to do so.
if (TryCommute && commuteInstruction(mi, regB, regC, Dist)) {
- commuted = true;
+ Commuted = true;
++NumCommuted;
if (AggressiveCommute)
++NumAggrCommuted;
@@ -1232,7 +1232,7 @@ tryInstructionTransform(MachineBasicBlock::iterator &mi,
// If there is one more use of regB later in the same MBB, consider
// re-schedule this MI below it.
- if (!commuted && EnableRescheduling && rescheduleMIBelowKill(mi, nmi, regB)) {
+ if (!Commuted && EnableRescheduling && rescheduleMIBelowKill(mi, nmi, regB)) {
++NumReSchedDowns;
return true;
}
@@ -1250,7 +1250,7 @@ tryInstructionTransform(MachineBasicBlock::iterator &mi,
}
// Return if it is commuted but 3 addr conversion is failed.
- if (commuted)
+ if (Commuted)
return false;
// If there is one more use of regB later in the same MBB, consider
diff --git a/contrib/llvm/lib/CodeGen/VirtRegMap.cpp b/contrib/llvm/lib/CodeGen/VirtRegMap.cpp
index 2912bdd..02341b4 100644
--- a/contrib/llvm/lib/CodeGen/VirtRegMap.cpp
+++ b/contrib/llvm/lib/CodeGen/VirtRegMap.cpp
@@ -163,7 +163,6 @@ class VirtRegRewriter : public MachineFunctionPass {
SlotIndexes *Indexes;
LiveIntervals *LIS;
VirtRegMap *VRM;
- SparseSet<unsigned> PhysRegs;
void rewrite();
void addMBBLiveIns();
@@ -319,54 +318,15 @@ void VirtRegRewriter::rewrite() {
SmallVector<unsigned, 8> SuperDeads;
SmallVector<unsigned, 8> SuperDefs;
SmallVector<unsigned, 8> SuperKills;
- SmallPtrSet<const MachineInstr *, 4> NoReturnInsts;
-
- // Here we have a SparseSet to hold which PhysRegs are actually encountered
- // in the MF we are about to iterate over so that later when we call
- // setPhysRegUsed, we are only doing it for physRegs that were actually found
- // in the program and not for all of the possible physRegs for the given
- // target architecture. If the target has a lot of physRegs, then for a small
- // program there will be a significant compile time reduction here.
- PhysRegs.clear();
- PhysRegs.setUniverse(TRI->getNumRegs());
-
- // The function with uwtable should guarantee that the stack unwinder
- // can unwind the stack to the previous frame. Thus, we can't apply the
- // noreturn optimization if the caller function has uwtable attribute.
- bool HasUWTable = MF->getFunction()->hasFnAttribute(Attribute::UWTable);
for (MachineFunction::iterator MBBI = MF->begin(), MBBE = MF->end();
MBBI != MBBE; ++MBBI) {
DEBUG(MBBI->print(dbgs(), Indexes));
- bool IsExitBB = MBBI->succ_empty();
for (MachineBasicBlock::instr_iterator
MII = MBBI->instr_begin(), MIE = MBBI->instr_end(); MII != MIE;) {
MachineInstr *MI = MII;
++MII;
- // Check if this instruction is a call to a noreturn function. If this
- // is a call to noreturn function and we don't need the stack unwinding
- // functionality (i.e. this function does not have uwtable attribute and
- // the callee function has the nounwind attribute), then we can ignore
- // the definitions set by this instruction.
- if (!HasUWTable && IsExitBB && MI->isCall()) {
- for (MachineInstr::mop_iterator MOI = MI->operands_begin(),
- MOE = MI->operands_end(); MOI != MOE; ++MOI) {
- MachineOperand &MO = *MOI;
- if (!MO.isGlobal())
- continue;
- const Function *Func = dyn_cast<Function>(MO.getGlobal());
- if (!Func || !Func->hasFnAttribute(Attribute::NoReturn) ||
- // We need to keep correct unwind information
- // even if the function will not return, since the
- // runtime may need it.
- !Func->hasFnAttribute(Attribute::NoUnwind))
- continue;
- NoReturnInsts.insert(MI);
- break;
- }
- }
-
for (MachineInstr::mop_iterator MOI = MI->operands_begin(),
MOE = MI->operands_end(); MOI != MOE; ++MOI) {
MachineOperand &MO = *MOI;
@@ -375,15 +335,6 @@ void VirtRegRewriter::rewrite() {
if (MO.isRegMask())
MRI->addPhysRegsUsedFromRegMask(MO.getRegMask());
- // If we encounter a VirtReg or PhysReg then get at the PhysReg and add
- // it to the physreg bitset. Later we use only the PhysRegs that were
- // actually encountered in the MF to populate the MRI's used physregs.
- if (MO.isReg() && MO.getReg())
- PhysRegs.insert(
- TargetRegisterInfo::isVirtualRegister(MO.getReg()) ?
- VRM->getPhys(MO.getReg()) :
- MO.getReg());
-
if (!MO.isReg() || !TargetRegisterInfo::isVirtualRegister(MO.getReg()))
continue;
unsigned VirtReg = MO.getReg();
@@ -470,29 +421,5 @@ void VirtRegRewriter::rewrite() {
}
}
}
-
- // Tell MRI about physical registers in use.
- if (NoReturnInsts.empty()) {
- for (SparseSet<unsigned>::iterator
- RegI = PhysRegs.begin(), E = PhysRegs.end(); RegI != E; ++RegI)
- if (!MRI->reg_nodbg_empty(*RegI))
- MRI->setPhysRegUsed(*RegI);
- } else {
- for (SparseSet<unsigned>::iterator
- I = PhysRegs.begin(), E = PhysRegs.end(); I != E; ++I) {
- unsigned Reg = *I;
- if (MRI->reg_nodbg_empty(Reg))
- continue;
- // Check if this register has a use that will impact the rest of the
- // code. Uses in debug and noreturn instructions do not impact the
- // generated code.
- for (MachineInstr &It : MRI->reg_nodbg_instructions(Reg)) {
- if (!NoReturnInsts.count(&It)) {
- MRI->setPhysRegUsed(Reg);
- break;
- }
- }
- }
- }
}
diff --git a/contrib/llvm/lib/CodeGen/WinEHPrepare.cpp b/contrib/llvm/lib/CodeGen/WinEHPrepare.cpp
index dbc0d91..0d26ed3 100644
--- a/contrib/llvm/lib/CodeGen/WinEHPrepare.cpp
+++ b/contrib/llvm/lib/CodeGen/WinEHPrepare.cpp
@@ -155,7 +155,7 @@ private:
// outlined but before the outlined code is pruned from the parent function.
DenseMap<const BasicBlock *, BasicBlock *> LPadTargetBlocks;
- // Map from outlined handler to call to llvm.frameaddress(1). Only used for
+ // Map from outlined handler to call to parent local address. Only used for
// 32-bit EH.
DenseMap<Function *, Value *> HandlerToParentFP;
@@ -533,9 +533,9 @@ void WinEHPrepare::findSEHEHReturnPoints(
BasicBlock *NextBB;
Constant *Selector;
if (isSelectorDispatch(BB, CatchHandler, Selector, NextBB)) {
- // Split the edge if there is a phi node. Returning from EH to a phi node
- // is just as impossible as having a phi after an indirectbr.
- if (isa<PHINode>(CatchHandler->begin())) {
+ // Split the edge if there are multiple predecessors. This creates a place
+ // where we can insert EH recovery code.
+ if (!CatchHandler->getSinglePredecessor()) {
DEBUG(dbgs() << "splitting EH return edge from " << BB->getName()
<< " to " << CatchHandler->getName() << '\n');
BBI = CatchHandler = SplitCriticalEdge(
@@ -616,6 +616,26 @@ void WinEHPrepare::demoteValuesLiveAcrossHandlers(
// identifyEHBlocks() should have been called before this function.
assert(!NormalBlocks.empty());
+ // Try to avoid demoting EH pointer and selector values. They get in the way
+ // of our pattern matching.
+ SmallPtrSet<Instruction *, 10> EHVals;
+ for (BasicBlock &BB : F) {
+ LandingPadInst *LP = BB.getLandingPadInst();
+ if (!LP)
+ continue;
+ EHVals.insert(LP);
+ for (User *U : LP->users()) {
+ auto *EI = dyn_cast<ExtractValueInst>(U);
+ if (!EI)
+ continue;
+ EHVals.insert(EI);
+ for (User *U2 : EI->users()) {
+ if (auto *PN = dyn_cast<PHINode>(U2))
+ EHVals.insert(PN);
+ }
+ }
+ }
+
SetVector<Argument *> ArgsToDemote;
SetVector<Instruction *> InstrsToDemote;
for (BasicBlock &BB : F) {
@@ -641,7 +661,11 @@ void WinEHPrepare::demoteValuesLiveAcrossHandlers(
continue;
}
+ // Don't demote EH values.
auto *OpI = cast<Instruction>(Op);
+ if (EHVals.count(OpI))
+ continue;
+
BasicBlock *OpBB = OpI->getParent();
// If a value is produced and consumed in the same BB, we don't need to
// demote it.
@@ -822,7 +846,8 @@ bool WinEHPrepare::prepareExceptionHandlers(
LPad->replaceAllUsesWith(UndefValue::get(LPad->getType()));
// Rewrite uses of the exception pointer to loads of an alloca.
- for (Instruction *E : SEHCodeUses) {
+ while (!SEHCodeUses.empty()) {
+ Instruction *E = SEHCodeUses.pop_back_val();
SmallVector<Use *, 4> Uses;
for (Use &U : E->uses())
Uses.push_back(&U);
@@ -830,13 +855,10 @@ bool WinEHPrepare::prepareExceptionHandlers(
auto *I = cast<Instruction>(U->getUser());
if (isa<ResumeInst>(I))
continue;
- LoadInst *LI;
if (auto *Phi = dyn_cast<PHINode>(I))
- LI = new LoadInst(SEHExceptionCodeSlot, "sehcode", false,
- Phi->getIncomingBlock(*U));
+ SEHCodeUses.push_back(Phi);
else
- LI = new LoadInst(SEHExceptionCodeSlot, "sehcode", false, I);
- U->set(LI);
+ U->set(new LoadInst(SEHExceptionCodeSlot, "sehcode", false, I));
}
E->replaceAllUsesWith(UndefValue::get(E->getType()));
E->eraseFromParent();
@@ -953,16 +975,16 @@ bool WinEHPrepare::prepareExceptionHandlers(
Builder.SetInsertPoint(Entry->getFirstInsertionPt());
Function *FrameEscapeFn =
- Intrinsic::getDeclaration(M, Intrinsic::frameescape);
+ Intrinsic::getDeclaration(M, Intrinsic::localescape);
Function *RecoverFrameFn =
- Intrinsic::getDeclaration(M, Intrinsic::framerecover);
+ Intrinsic::getDeclaration(M, Intrinsic::localrecover);
SmallVector<Value *, 8> AllocasToEscape;
- // Scan the entry block for an existing call to llvm.frameescape. We need to
+ // Scan the entry block for an existing call to llvm.localescape. We need to
// keep escaping those objects.
for (Instruction &I : F.front()) {
auto *II = dyn_cast<IntrinsicInst>(&I);
- if (II && II->getIntrinsicID() == Intrinsic::frameescape) {
+ if (II && II->getIntrinsicID() == Intrinsic::localescape) {
auto Args = II->arg_operands();
AllocasToEscape.append(Args.begin(), Args.end());
II->eraseFromParent();
@@ -971,7 +993,7 @@ bool WinEHPrepare::prepareExceptionHandlers(
}
// Finally, replace all of the temporary allocas for frame variables used in
- // the outlined handlers with calls to llvm.framerecover.
+ // the outlined handlers with calls to llvm.localrecover.
for (auto &VarInfoEntry : FrameVarInfo) {
Value *ParentVal = VarInfoEntry.first;
TinyPtrVector<AllocaInst *> &Allocas = VarInfoEntry.second;
@@ -992,7 +1014,7 @@ bool WinEHPrepare::prepareExceptionHandlers(
llvm::Value *FP = HandlerToParentFP[HandlerFn];
assert(FP);
- // FIXME: Sink this framerecover into the blocks where it is used.
+ // FIXME: Sink this localrecover into the blocks where it is used.
Builder.SetInsertPoint(TempAlloca);
Builder.SetCurrentDebugLocation(TempAlloca->getDebugLoc());
Value *RecoverArgs[] = {
@@ -1014,7 +1036,7 @@ bool WinEHPrepare::prepareExceptionHandlers(
}
} // End for each FrameVarInfo entry.
- // Insert 'call void (...)* @llvm.frameescape(...)' at the end of the entry
+ // Insert 'call void (...)* @llvm.localescape(...)' at the end of the entry
// block.
Builder.SetInsertPoint(&F.getEntryBlock().back());
Builder.CreateCall(FrameEscapeFn, AllocasToEscape);
@@ -1595,9 +1617,8 @@ void LandingPadMap::remapEHValues(ValueToValueMapTy &VMap, Value *EHPtrValue,
VMap[Extract] = SelectorValue;
}
-static bool isFrameAddressCall(const Value *V) {
- return match(const_cast<Value *>(V),
- m_Intrinsic<Intrinsic::frameaddress>(m_SpecificInt(0)));
+static bool isLocalAddressCall(const Value *V) {
+ return match(const_cast<Value *>(V), m_Intrinsic<Intrinsic::localaddress>());
}
CloningDirector::CloningAction WinEHCloningDirectorBase::handleInstruction(
@@ -1639,9 +1660,9 @@ CloningDirector::CloningAction WinEHCloningDirectorBase::handleInstruction(
if (match(Inst, m_Intrinsic<Intrinsic::eh_typeid_for>()))
return handleTypeIdFor(VMap, Inst, NewBB);
- // When outlining llvm.frameaddress(i32 0), remap that to the second argument,
+ // When outlining llvm.localaddress(), remap that to the second argument,
// which is the FP of the parent.
- if (isFrameAddressCall(Inst)) {
+ if (isLocalAddressCall(Inst)) {
VMap[Inst] = ParentFP;
return CloningDirector::SkipInstruction;
}
@@ -1961,7 +1982,7 @@ Value *WinEHFrameVariableMaterializer::materializeValueFor(Value *V) {
// If we're asked to materialize a static alloca, we temporarily create an
// alloca in the outlined function and add this to the FrameVarInfo map. When
// all the outlining is complete, we'll replace these temporary allocas with
- // calls to llvm.framerecover.
+ // calls to llvm.localrecover.
if (auto *AV = dyn_cast<AllocaInst>(V)) {
assert(AV->isStaticAlloca() &&
"cannot materialize un-demoted dynamic alloca");
@@ -1991,7 +2012,7 @@ void WinEHFrameVariableMaterializer::escapeCatchObject(Value *V) {
// of a catch parameter, add a sentinel to the multimap to indicate that it's
// used from another handler. This will prevent us from trying to sink the
// alloca into the handler and ensure that the catch parameter is present in
- // the call to llvm.frameescape.
+ // the call to llvm.localescape.
FrameVarInfo[V].push_back(getCatchObjectSentinel());
}
@@ -2233,16 +2254,16 @@ static void createCleanupHandler(LandingPadActions &Actions,
static CallSite matchOutlinedFinallyCall(BasicBlock *BB,
Instruction *MaybeCall) {
// Look for finally blocks that Clang has already outlined for us.
- // %fp = call i8* @llvm.frameaddress(i32 0)
+ // %fp = call i8* @llvm.localaddress()
// call void @"fin$parent"(iN 1, i8* %fp)
- if (isFrameAddressCall(MaybeCall) && MaybeCall != BB->getTerminator())
+ if (isLocalAddressCall(MaybeCall) && MaybeCall != BB->getTerminator())
MaybeCall = MaybeCall->getNextNode();
CallSite FinallyCall(MaybeCall);
if (!FinallyCall || FinallyCall.arg_size() != 2)
return CallSite();
if (!match(FinallyCall.getArgument(0), m_SpecificInt(1)))
return CallSite();
- if (!isFrameAddressCall(FinallyCall.getArgument(1)))
+ if (!isLocalAddressCall(FinallyCall.getArgument(1)))
return CallSite();
return FinallyCall;
}
OpenPOWER on IntegriCloud