summaryrefslogtreecommitdiffstats
path: root/include/llvm/Target
diff options
context:
space:
mode:
authordim <dim@FreeBSD.org>2010-09-17 15:48:55 +0000
committerdim <dim@FreeBSD.org>2010-09-17 15:48:55 +0000
commit5d5cc59cc77afe655b3707cb0e69e0827b444cad (patch)
tree36453626c792cccd91f783a38a169d610a6b9db9 /include/llvm/Target
parent786a18553586229ad99ecb5ecde8a9d914c45e27 (diff)
downloadFreeBSD-src-5d5cc59cc77afe655b3707cb0e69e0827b444cad.zip
FreeBSD-src-5d5cc59cc77afe655b3707cb0e69e0827b444cad.tar.gz
Vendor import of llvm r114020 (from the release_28 branch):
http://llvm.org/svn/llvm-project/llvm/branches/release_28@114020 Approved by: rpaulo (mentor)
Diffstat (limited to 'include/llvm/Target')
-rw-r--r--include/llvm/Target/Target.td7
-rw-r--r--include/llvm/Target/TargetAsmParser.h14
-rw-r--r--include/llvm/Target/TargetCallingConv.td9
-rw-r--r--include/llvm/Target/TargetData.h42
-rw-r--r--include/llvm/Target/TargetInstrDesc.h14
-rw-r--r--include/llvm/Target/TargetInstrInfo.h53
-rw-r--r--include/llvm/Target/TargetLowering.h140
-rw-r--r--include/llvm/Target/TargetMachine.h38
-rw-r--r--include/llvm/Target/TargetOpcodes.h2
-rw-r--r--include/llvm/Target/TargetOptions.h23
-rw-r--r--include/llvm/Target/TargetRegisterInfo.h67
-rw-r--r--include/llvm/Target/TargetRegistry.h17
-rw-r--r--include/llvm/Target/TargetSelect.h25
13 files changed, 313 insertions, 138 deletions
diff --git a/include/llvm/Target/Target.td b/include/llvm/Target/Target.td
index 9a89dc9..b141a77 100644
--- a/include/llvm/Target/Target.td
+++ b/include/llvm/Target/Target.td
@@ -198,6 +198,7 @@ class Instruction {
bit isReturn = 0; // Is this instruction a return instruction?
bit isBranch = 0; // Is this instruction a branch instruction?
bit isIndirectBranch = 0; // Is this instruction an indirect branch?
+ bit isCompare = 0; // Is this instruction a comparison instruction?
bit isBarrier = 0; // Can control flow fall through this instruction?
bit isCall = 0; // Is this instruction a call instruction?
bit canFoldAsLoad = 0; // Can this be folded as a simple memory operand?
@@ -409,7 +410,7 @@ def INLINEASM : Instruction {
let InOperandList = (ins variable_ops);
let AsmString = "";
}
-def DBG_LABEL : Instruction {
+def PROLOG_LABEL : Instruction {
let OutOperandList = (outs);
let InOperandList = (ins i32imm:$id);
let AsmString = "";
@@ -510,10 +511,6 @@ class AsmParser {
// perform target specific instruction post-processing.
string AsmParserInstCleanup = "";
- // MatchInstructionName - The name of the instruction matching function to
- // generate.
- string MatchInstructionName = "MatchInstruction";
-
// Variant - AsmParsers can be of multiple different variants. Variants are
// used to support targets that need to parser multiple formats for the
// assembly language.
diff --git a/include/llvm/Target/TargetAsmParser.h b/include/llvm/Target/TargetAsmParser.h
index f431c38..5830d1f 100644
--- a/include/llvm/Target/TargetAsmParser.h
+++ b/include/llvm/Target/TargetAsmParser.h
@@ -28,14 +28,20 @@ class TargetAsmParser : public MCAsmParserExtension {
protected: // Can only create subclasses.
TargetAsmParser(const Target &);
- /// TheTarget - The Target that this machine was created for.
+ /// The Target that this machine was created for.
const Target &TheTarget;
+ /// The current set of available features.
+ unsigned AvailableFeatures;
+
public:
virtual ~TargetAsmParser();
const Target &getTarget() const { return TheTarget; }
+ unsigned getAvailableFeatures() const { return AvailableFeatures; }
+ void setAvailableFeatures(unsigned Value) { AvailableFeatures = Value; }
+
/// ParseInstruction - Parse one assembly instruction.
///
/// The parser is positioned following the instruction name. The target
@@ -67,8 +73,12 @@ public:
/// MatchInstruction - Recognize a series of operands of a parsed instruction
/// as an actual MCInst. This returns false and fills in Inst on success and
/// returns true on failure to match.
+ ///
+ /// On failure, the target parser is responsible for emitting a diagnostic
+ /// explaining the match failure.
virtual bool
- MatchInstruction(const SmallVectorImpl<MCParsedAsmOperand*> &Operands,
+ MatchInstruction(SMLoc IDLoc,
+ const SmallVectorImpl<MCParsedAsmOperand*> &Operands,
MCInst &Inst) = 0;
};
diff --git a/include/llvm/Target/TargetCallingConv.td b/include/llvm/Target/TargetCallingConv.td
index ceaeb0b..6da3ba1 100644
--- a/include/llvm/Target/TargetCallingConv.td
+++ b/include/llvm/Target/TargetCallingConv.td
@@ -42,7 +42,7 @@ class CCIf<string predicate, CCAction A> : CCPredicateAction<A> {
class CCIfByVal<CCAction A> : CCIf<"ArgFlags.isByVal()", A> {
}
-/// CCIfCC - Match of the current calling convention is 'CC'.
+/// CCIfCC - Match if the current calling convention is 'CC'.
class CCIfCC<string CC, CCAction A>
: CCIf<!strconcat("State.getCallingConv() == ", CC), A> {}
@@ -89,6 +89,13 @@ class CCAssignToStack<int size, int align> : CCAction {
int Align = align;
}
+/// CCAssignToStackWithShadow - Same as CCAssignToStack, but with a register
+/// to be shadowed.
+class CCAssignToStackWithShadow<int size, int align, Register reg> :
+ CCAssignToStack<size, align> {
+ Register ShadowReg = reg;
+}
+
/// CCPassByVal - This action always matches: it assigns the value to a stack
/// slot to implement ByVal aggregate parameter passing. Size and alignment
/// specify the minimum size and alignment for the stack slot.
diff --git a/include/llvm/Target/TargetData.h b/include/llvm/Target/TargetData.h
index cc88dae..b89cbe0 100644
--- a/include/llvm/Target/TargetData.h
+++ b/include/llvm/Target/TargetData.h
@@ -50,13 +50,13 @@ enum AlignTypeEnum {
/// padding and make the structure slightly more cache friendly.
struct TargetAlignElem {
AlignTypeEnum AlignType : 8; //< Alignment type (AlignTypeEnum)
- unsigned char ABIAlign; //< ABI alignment for this type/bitw
- unsigned char PrefAlign; //< Pref. alignment for this type/bitw
+ unsigned ABIAlign; //< ABI alignment for this type/bitw
+ unsigned PrefAlign; //< Pref. alignment for this type/bitw
uint32_t TypeBitWidth; //< Type bit width
/// Initializer
- static TargetAlignElem get(AlignTypeEnum align_type, unsigned char abi_align,
- unsigned char pref_align, uint32_t bit_width);
+ static TargetAlignElem get(AlignTypeEnum align_type, unsigned abi_align,
+ unsigned pref_align, uint32_t bit_width);
/// Equality predicate
bool operator==(const TargetAlignElem &rhs) const;
};
@@ -64,9 +64,9 @@ struct TargetAlignElem {
class TargetData : public ImmutablePass {
private:
bool LittleEndian; ///< Defaults to false
- unsigned char PointerMemSize; ///< Pointer size in bytes
- unsigned char PointerABIAlign; ///< Pointer ABI alignment
- unsigned char PointerPrefAlign; ///< Pointer preferred alignment
+ unsigned PointerMemSize; ///< Pointer size in bytes
+ unsigned PointerABIAlign; ///< Pointer ABI alignment
+ unsigned PointerPrefAlign; ///< Pointer preferred alignment
SmallVector<unsigned char, 8> LegalIntWidths; ///< Legal Integers.
@@ -86,12 +86,12 @@ private:
mutable void *LayoutMap;
//! Set/initialize target alignments
- void setAlignment(AlignTypeEnum align_type, unsigned char abi_align,
- unsigned char pref_align, uint32_t bit_width);
+ void setAlignment(AlignTypeEnum align_type, unsigned abi_align,
+ unsigned pref_align, uint32_t bit_width);
unsigned getAlignmentInfo(AlignTypeEnum align_type, uint32_t bit_width,
bool ABIAlign, const Type *Ty) const;
//! Internal helper method that returns requested alignment for type.
- unsigned char getAlignment(const Type *Ty, bool abi_or_pref) const;
+ unsigned getAlignment(const Type *Ty, bool abi_or_pref) const;
/// Valid alignment predicate.
///
@@ -110,7 +110,7 @@ public:
/// Constructs a TargetData from a specification string. See init().
explicit TargetData(StringRef TargetDescription)
- : ImmutablePass(&ID) {
+ : ImmutablePass(ID) {
init(TargetDescription);
}
@@ -118,7 +118,7 @@ public:
explicit TargetData(const Module *M);
TargetData(const TargetData &TD) :
- ImmutablePass(&ID),
+ ImmutablePass(ID),
LittleEndian(TD.isLittleEndian()),
PointerMemSize(TD.PointerMemSize),
PointerABIAlign(TD.PointerABIAlign),
@@ -161,13 +161,13 @@ public:
}
/// Target pointer alignment
- unsigned char getPointerABIAlignment() const { return PointerABIAlign; }
+ unsigned getPointerABIAlignment() const { return PointerABIAlign; }
/// Return target's alignment for stack-based pointers
- unsigned char getPointerPrefAlignment() const { return PointerPrefAlign; }
+ unsigned getPointerPrefAlignment() const { return PointerPrefAlign; }
/// Target pointer size
- unsigned char getPointerSize() const { return PointerMemSize; }
+ unsigned getPointerSize() const { return PointerMemSize; }
/// Target pointer size, in bits
- unsigned char getPointerSizeInBits() const { return 8*PointerMemSize; }
+ unsigned getPointerSizeInBits() const { return 8*PointerMemSize; }
/// Size examples:
///
@@ -223,26 +223,26 @@ public:
/// getABITypeAlignment - Return the minimum ABI-required alignment for the
/// specified type.
- unsigned char getABITypeAlignment(const Type *Ty) const;
+ unsigned getABITypeAlignment(const Type *Ty) const;
/// getABIIntegerTypeAlignment - Return the minimum ABI-required alignment for
/// an integer type of the specified bitwidth.
- unsigned char getABIIntegerTypeAlignment(unsigned BitWidth) const;
+ unsigned getABIIntegerTypeAlignment(unsigned BitWidth) const;
/// getCallFrameTypeAlignment - Return the minimum ABI-required alignment
/// for the specified type when it is part of a call frame.
- unsigned char getCallFrameTypeAlignment(const Type *Ty) const;
+ unsigned getCallFrameTypeAlignment(const Type *Ty) const;
/// getPrefTypeAlignment - Return the preferred stack/global alignment for
/// the specified type. This is always at least as good as the ABI alignment.
- unsigned char getPrefTypeAlignment(const Type *Ty) const;
+ unsigned getPrefTypeAlignment(const Type *Ty) const;
/// getPreferredTypeAlignmentShift - Return the preferred alignment for the
/// specified type, returned as log2 of the value (a shift amount).
///
- unsigned char getPreferredTypeAlignmentShift(const Type *Ty) const;
+ unsigned getPreferredTypeAlignmentShift(const Type *Ty) const;
/// getIntPtrType - Return an unsigned integer type that is the same size or
/// greater to the host pointer size.
diff --git a/include/llvm/Target/TargetInstrDesc.h b/include/llvm/Target/TargetInstrDesc.h
index 8f0a6cb..a127aed 100644
--- a/include/llvm/Target/TargetInstrDesc.h
+++ b/include/llvm/Target/TargetInstrDesc.h
@@ -105,6 +105,7 @@ namespace TID {
IndirectBranch,
Predicable,
NotDuplicable,
+ Compare,
DelaySlot,
FoldableAsLoad,
MayLoad,
@@ -151,6 +152,12 @@ public:
return -1;
}
+ /// getRegClass - Returns the register class constraint for OpNum, or NULL.
+ const TargetRegisterClass *getRegClass(unsigned OpNum,
+ const TargetRegisterInfo *TRI) const {
+ return OpNum < NumOperands ? OpInfo[OpNum].getRegClass(TRI) : 0;
+ }
+
/// getOpcode - Return the opcode number for this descriptor.
unsigned getOpcode() const {
return Opcode;
@@ -315,7 +322,7 @@ public:
bool isIndirectBranch() const {
return Flags & (1 << TID::IndirectBranch);
}
-
+
/// isConditionalBranch - Return true if this is a branch which may fall
/// through to the next instruction or may transfer control flow to some other
/// block. The TargetInstrInfo::AnalyzeBranch method can be used to get more
@@ -340,6 +347,11 @@ public:
return Flags & (1 << TID::Predicable);
}
+ /// isCompare - Return true if this instruction is a comparison.
+ bool isCompare() const {
+ return Flags & (1 << TID::Compare);
+ }
+
/// isNotDuplicable - Return true if this instruction cannot be safely
/// duplicated. For example, if the instruction has a unique labels attached
/// to it, duplicating it would cause multiple definition errors.
diff --git a/include/llvm/Target/TargetInstrInfo.h b/include/llvm/Target/TargetInstrInfo.h
index e42be26..520c41b 100644
--- a/include/llvm/Target/TargetInstrInfo.h
+++ b/include/llvm/Target/TargetInstrInfo.h
@@ -92,15 +92,6 @@ private:
AliasAnalysis *AA) const;
public:
- /// isMoveInstr - Return true if the instruction is a register to register
- /// move and return the source and dest operands and their sub-register
- /// indices by reference.
- virtual bool isMoveInstr(const MachineInstr& MI,
- unsigned& SrcReg, unsigned& DstReg,
- unsigned& SrcSubIdx, unsigned& DstSubIdx) const {
- return false;
- }
-
/// isCoalescableExtInstr - Return true if the instruction is a "coalescable"
/// extension instruction. That is, it's like a copy where it's legal for the
/// source to overlap the destination. e.g. X86::MOVSX64rr32. If this returns
@@ -113,22 +104,6 @@ public:
return false;
}
- /// isIdentityCopy - Return true if the instruction is a copy (or
- /// extract_subreg, insert_subreg, subreg_to_reg) where the source and
- /// destination registers are the same.
- bool isIdentityCopy(const MachineInstr &MI) const {
- unsigned SrcReg, DstReg, SrcSubIdx, DstSubIdx;
- if (isMoveInstr(MI, SrcReg, DstReg, SrcSubIdx, DstSubIdx) &&
- SrcReg == DstReg)
- return true;
-
- if ((MI.getOpcode() == TargetOpcode::INSERT_SUBREG ||
- MI.getOpcode() == TargetOpcode::SUBREG_TO_REG) &&
- MI.getOperand(0).getReg() == MI.getOperand(2).getReg())
- return true;
- return false;
- }
-
/// isLoadFromStackSlot - If the specified machine instruction is a direct
/// load from a stack slot, return the virtual or physical register number of
/// the destination along with the FrameIndex of the loaded stack slot. If
@@ -591,18 +566,6 @@ public:
const MachineBasicBlock *MBB,
const MachineFunction &MF) const = 0;
- /// GetInstSize - Returns the size of the specified Instruction.
- ///
- virtual unsigned GetInstSizeInBytes(const MachineInstr *MI) const {
- assert(0 && "Target didn't implement TargetInstrInfo::GetInstSize!");
- return 0;
- }
-
- /// GetFunctionSizeInBytes - Returns the size of the specified
- /// MachineFunction.
- ///
- virtual unsigned GetFunctionSizeInBytes(const MachineFunction &MF) const = 0;
-
/// Measure the specified inline asm to determine an approximation of its
/// length.
virtual unsigned getInlineAsmLength(const char *Str,
@@ -613,6 +576,21 @@ public:
/// register allocation.
virtual ScheduleHazardRecognizer*
CreateTargetPostRAHazardRecognizer(const InstrItineraryData&) const = 0;
+
+ /// AnalyzeCompare - For a comparison instruction, return the source register
+ /// in SrcReg and the value it compares against in CmpValue. Return true if
+ /// the comparison instruction can be analyzed.
+ virtual bool AnalyzeCompare(const MachineInstr *MI,
+ unsigned &SrcReg, int &CmpValue) const {
+ return false;
+ }
+
+ /// ConvertToSetZeroFlag - Convert the instruction to set the zero flag so
+ /// that we can remove a "comparison with zero".
+ virtual bool ConvertToSetZeroFlag(MachineInstr *Instr,
+ MachineInstr *CmpInstr) const {
+ return false;
+ }
};
/// TargetInstrInfoImpl - This is the default implementation of
@@ -646,7 +624,6 @@ public:
virtual bool isSchedulingBoundary(const MachineInstr *MI,
const MachineBasicBlock *MBB,
const MachineFunction &MF) const;
- virtual unsigned GetFunctionSizeInBytes(const MachineFunction &MF) const;
virtual ScheduleHazardRecognizer *
CreateTargetPostRAHazardRecognizer(const InstrItineraryData&) const;
diff --git a/include/llvm/Target/TargetLowering.h b/include/llvm/Target/TargetLowering.h
index 2b6e4fa..29de994 100644
--- a/include/llvm/Target/TargetLowering.h
+++ b/include/llvm/Target/TargetLowering.h
@@ -168,6 +168,32 @@ public:
return RC;
}
+ /// getRepRegClassFor - Return the 'representative' register class for the
+ /// specified value type. The 'representative' register class is the largest
+ /// legal super-reg register class for the register class of the value type.
+ /// For example, on i386 the rep register class for i8, i16, and i32 are GR32;
+ /// while the rep register class is GR64 on x86_64.
+ virtual const TargetRegisterClass *getRepRegClassFor(EVT VT) const {
+ assert(VT.isSimple() && "getRepRegClassFor called on illegal type!");
+ const TargetRegisterClass *RC = RepRegClassForVT[VT.getSimpleVT().SimpleTy];
+ return RC;
+ }
+
+ /// getRepRegClassCostFor - Return the cost of the 'representative' register
+ /// class for the specified value type.
+ virtual uint8_t getRepRegClassCostFor(EVT VT) const {
+ assert(VT.isSimple() && "getRepRegClassCostFor called on illegal type!");
+ return RepRegClassCostForVT[VT.getSimpleVT().SimpleTy];
+ }
+
+ /// getRegPressureLimit - Return the register pressure "high water mark" for
+ /// the specific register class. The scheduler is in high register pressure
+ /// mode (for the specific register class) if it goes over the limit.
+ virtual unsigned getRegPressureLimit(const TargetRegisterClass *RC,
+ MachineFunction &MF) const {
+ return 0;
+ }
+
/// isTypeLegal - Return true if the target has native support for the
/// specified value type. This means that it has a register that directly
/// holds it without promotions or expansions.
@@ -188,24 +214,53 @@ public:
/// ValueTypeActions - For each value type, keep a LegalizeAction enum
/// that indicates how instruction selection should deal with the type.
uint8_t ValueTypeActions[MVT::LAST_VALUETYPE];
+
+ LegalizeAction getExtendedTypeAction(EVT VT) const {
+ // Handle non-vector integers.
+ if (!VT.isVector()) {
+ assert(VT.isInteger() && "Unsupported extended type!");
+ unsigned BitSize = VT.getSizeInBits();
+ // First promote to a power-of-two size, then expand if necessary.
+ if (BitSize < 8 || !isPowerOf2_32(BitSize))
+ return Promote;
+ return Expand;
+ }
+
+ // If this is a type smaller than a legal vector type, promote to that
+ // type, e.g. <2 x float> -> <4 x float>.
+ if (VT.getVectorElementType().isSimple() &&
+ VT.getVectorNumElements() != 1) {
+ MVT EltType = VT.getVectorElementType().getSimpleVT();
+ unsigned NumElts = VT.getVectorNumElements();
+ while (1) {
+ // Round up to the nearest power of 2.
+ NumElts = (unsigned)NextPowerOf2(NumElts);
+
+ MVT LargerVector = MVT::getVectorVT(EltType, NumElts);
+ if (LargerVector == MVT()) break;
+
+ // If this the larger type is legal, promote to it.
+ if (getTypeAction(LargerVector) == Legal) return Promote;
+ }
+ }
+
+ return VT.isPow2VectorType() ? Expand : Promote;
+ }
public:
ValueTypeActionImpl() {
std::fill(ValueTypeActions, array_endof(ValueTypeActions), 0);
}
- LegalizeAction getTypeAction(LLVMContext &Context, EVT VT) const {
- if (VT.isExtended()) {
- if (VT.isVector()) {
- return VT.isPow2VectorType() ? Expand : Promote;
- }
- if (VT.isInteger())
- // First promote to a power-of-two size, then expand if necessary.
- return VT == VT.getRoundIntegerType(Context) ? Expand : Promote;
- assert(0 && "Unsupported extended type!");
- return Legal;
- }
- unsigned I = VT.getSimpleVT().SimpleTy;
- return (LegalizeAction)ValueTypeActions[I];
+
+ LegalizeAction getTypeAction(EVT VT) const {
+ if (!VT.isExtended())
+ return getTypeAction(VT.getSimpleVT());
+ return getExtendedTypeAction(VT);
}
+
+ LegalizeAction getTypeAction(MVT VT) const {
+ return (LegalizeAction)ValueTypeActions[VT.SimpleTy];
+ }
+
void setTypeAction(EVT VT, LegalizeAction Action) {
unsigned I = VT.getSimpleVT().SimpleTy;
ValueTypeActions[I] = Action;
@@ -220,10 +275,13 @@ public:
/// it is already legal (return 'Legal') or we need to promote it to a larger
/// type (return 'Promote'), or we need to expand it into multiple registers
/// of smaller integer type (return 'Expand'). 'Custom' is not an option.
- LegalizeAction getTypeAction(LLVMContext &Context, EVT VT) const {
- return ValueTypeActions.getTypeAction(Context, VT);
+ LegalizeAction getTypeAction(EVT VT) const {
+ return ValueTypeActions.getTypeAction(VT);
}
-
+ LegalizeAction getTypeAction(MVT VT) const {
+ return ValueTypeActions.getTypeAction(VT);
+ }
+
/// getTypeToTransformTo - For types supported by the target, this is an
/// identity function. For types that must be promoted to larger types, this
/// returns the larger type to promote to. For integer types that are larger
@@ -235,7 +293,7 @@ public:
assert((unsigned)VT.getSimpleVT().SimpleTy <
array_lengthof(TransformToType));
EVT NVT = TransformToType[VT.getSimpleVT().SimpleTy];
- assert(getTypeAction(Context, NVT) != Promote &&
+ assert(getTypeAction(NVT) != Promote &&
"Promote may not follow Expand or Promote");
return NVT;
}
@@ -250,17 +308,16 @@ public:
EltVT : EVT::getVectorVT(Context, EltVT, NumElts / 2);
}
// Promote to a power of two size, avoiding multi-step promotion.
- return getTypeAction(Context, NVT) == Promote ?
+ return getTypeAction(NVT) == Promote ?
getTypeToTransformTo(Context, NVT) : NVT;
} else if (VT.isInteger()) {
EVT NVT = VT.getRoundIntegerType(Context);
- if (NVT == VT)
- // Size is a power of two - expand to half the size.
+ if (NVT == VT) // Size is a power of two - expand to half the size.
return EVT::getIntegerVT(Context, VT.getSizeInBits() / 2);
- else
- // Promote to a power of two size, avoiding multi-step promotion.
- return getTypeAction(Context, NVT) == Promote ?
- getTypeToTransformTo(Context, NVT) : NVT;
+
+ // Promote to a power of two size, avoiding multi-step promotion.
+ return getTypeAction(NVT) == Promote ?
+ getTypeToTransformTo(Context, NVT) : NVT;
}
assert(0 && "Unsupported extended type!");
return MVT(MVT::Other); // Not reached
@@ -273,7 +330,7 @@ public:
EVT getTypeToExpandTo(LLVMContext &Context, EVT VT) const {
assert(!VT.isVector());
while (true) {
- switch (getTypeAction(Context, VT)) {
+ switch (getTypeAction(VT)) {
case Legal:
return VT;
case Expand:
@@ -766,6 +823,12 @@ public:
return false;
}
+ /// getMaximalGlobalOffset - Returns the maximal possible offset which can be
+ /// used for loads / stores from the global.
+ virtual unsigned getMaximalGlobalOffset() const {
+ return 0;
+ }
+
//===--------------------------------------------------------------------===//
// TargetLowering Optimization Methods
//
@@ -981,6 +1044,11 @@ protected:
Synthesizable[VT.getSimpleVT().SimpleTy] = isSynthesizable;
}
+ /// findRepresentativeClass - Return the largest legal super-reg register class
+ /// of the register class for the specified type and its associated "cost".
+ virtual std::pair<const TargetRegisterClass*, uint8_t>
+ findRepresentativeClass(EVT VT) const;
+
/// computeRegisterProperties - Once all of the register classes are added,
/// this allows us to compute derived properties we expose.
void computeRegisterProperties();
@@ -1562,6 +1630,19 @@ private:
unsigned char NumRegistersForVT[MVT::LAST_VALUETYPE];
EVT RegisterTypeForVT[MVT::LAST_VALUETYPE];
+ /// RepRegClassForVT - This indicates the "representative" register class to
+ /// use for each ValueType the target supports natively. This information is
+ /// used by the scheduler to track register pressure. By default, the
+ /// representative register class is the largest legal super-reg register
+ /// class of the register class of the specified type. e.g. On x86, i8, i16,
+ /// and i32's representative class would be GR32.
+ const TargetRegisterClass *RepRegClassForVT[MVT::LAST_VALUETYPE];
+
+ /// RepRegClassCostForVT - This indicates the "cost" of the "representative"
+ /// register class for each ValueType. The cost is used by the scheduler to
+ /// approximate register pressure.
+ uint8_t RepRegClassCostForVT[MVT::LAST_VALUETYPE];
+
/// Synthesizable indicates whether it is OK for the compiler to create new
/// operations using this type. All Legal types are Synthesizable except
/// MMX types on X86. Non-Legal types are not Synthesizable.
@@ -1672,6 +1753,15 @@ protected:
/// This field specifies whether the target can benefit from code placement
/// optimization.
bool benefitFromCodePlacementOpt;
+
+private:
+ /// isLegalRC - Return true if the value types that can be represented by the
+ /// specified register class are all legal.
+ bool isLegalRC(const TargetRegisterClass *RC) const;
+
+ /// hasLegalSuperRegRegClasses - Return true if the specified register class
+ /// has one or more super-reg register classes that are legal.
+ bool hasLegalSuperRegRegClasses(const TargetRegisterClass *RC) const;
};
/// GetReturnInfo - Given an LLVM IR type and return type attributes,
diff --git a/include/llvm/Target/TargetMachine.h b/include/llvm/Target/TargetMachine.h
index 227499b..42e99e0 100644
--- a/include/llvm/Target/TargetMachine.h
+++ b/include/llvm/Target/TargetMachine.h
@@ -75,7 +75,8 @@ namespace Sched {
None, // No preference
Latency, // Scheduling for shortest total latency.
RegPressure, // Scheduling for lowest register pressure.
- Hybrid // Scheduling for both latency and register pressure.
+ Hybrid, // Scheduling for both latency and register pressure.
+ ILP // Scheduling for ILP in low register pressure mode.
};
}
@@ -244,6 +245,18 @@ public:
bool = true) {
return true;
}
+
+ /// addPassesToEmitMC - Add passes to the specified pass manager to get
+ /// machine code emitted with the MCJIT. This method returns true if machine
+ /// code is not supported. It fills the MCContext Ctx pointer which can be
+ /// used to build custom MCStreamer.
+ ///
+ virtual bool addPassesToEmitMC(PassManagerBase &,
+ MCContext *&,
+ CodeGenOpt::Level,
+ bool = true) {
+ return true;
+ }
};
/// LLVMTargetMachine - This class describes a target machine that is
@@ -287,12 +300,27 @@ public:
JITCodeEmitter &MCE,
CodeGenOpt::Level,
bool DisableVerify = true);
+
+ /// addPassesToEmitMC - Add passes to the specified pass manager to get
+ /// machine code emitted with the MCJIT. This method returns true if machine
+ /// code is not supported. It fills the MCContext Ctx pointer which can be
+ /// used to build custom MCStreamer.
+ ///
+ virtual bool addPassesToEmitMC(PassManagerBase &PM,
+ MCContext *&Ctx,
+ CodeGenOpt::Level OptLevel,
+ bool DisableVerify = true);
/// Target-Independent Code Generator Pass Configuration Options.
-
- /// addInstSelector - This method should add any "last minute" LLVM->LLVM
- /// passes, then install an instruction selector pass, which converts from
- /// LLVM code to machine instructions.
+
+ /// addPreISelPasses - This method should add any "last minute" LLVM->LLVM
+ /// passes (which are run just before instruction selector).
+ virtual bool addPreISel(PassManagerBase &, CodeGenOpt::Level) {
+ return true;
+ }
+
+ /// addInstSelector - This method should install an instruction selector pass,
+ /// which converts from LLVM code to machine instructions.
virtual bool addInstSelector(PassManagerBase &, CodeGenOpt::Level) {
return true;
}
diff --git a/include/llvm/Target/TargetOpcodes.h b/include/llvm/Target/TargetOpcodes.h
index cb772ec..01fba66 100644
--- a/include/llvm/Target/TargetOpcodes.h
+++ b/include/llvm/Target/TargetOpcodes.h
@@ -25,7 +25,7 @@ namespace TargetOpcode {
enum {
PHI = 0,
INLINEASM = 1,
- DBG_LABEL = 2,
+ PROLOG_LABEL = 2,
EH_LABEL = 3,
GC_LABEL = 4,
diff --git a/include/llvm/Target/TargetOptions.h b/include/llvm/Target/TargetOptions.h
index b369880..97ceffd 100644
--- a/include/llvm/Target/TargetOptions.h
+++ b/include/llvm/Target/TargetOptions.h
@@ -71,13 +71,18 @@ namespace llvm {
/// UnsafeFPMath implies LessPreciseFPMAD.
extern bool UnsafeFPMath;
- /// FiniteOnlyFPMath - This returns true when the -enable-finite-only-fp-math
- /// option is specified on the command line. If this returns false (default),
- /// the code generator is not allowed to assume that FP arithmetic arguments
- /// and results are never NaNs or +-Infs.
- extern bool FiniteOnlyFPMathOption;
- extern bool FiniteOnlyFPMath();
-
+ /// NoInfsFPMath - This flag is enabled when the
+ /// -enable-no-infs-fp-math flag is specified on the command line. When
+ /// this flag is off (the default), the code generator is not allowed to
+ /// assume the FP arithmetic arguments and results are never +-Infs.
+ extern bool NoInfsFPMath;
+
+ /// NoNaNsFPMath - This flag is enabled when the
+ /// -enable-no-nans-fp-math flag is specified on the command line. When
+ /// this flag is off (the default), the code generator is not allowed to
+ /// assume the FP arithmetic arguments and results are never NaNs.
+ extern bool NoNaNsFPMath;
+
/// HonorSignDependentRoundingFPMath - This returns true when the
/// -enable-sign-dependent-rounding-fp-math is specified. If this returns
/// false (the default), the code generator is allowed to assume that the
@@ -135,8 +140,8 @@ namespace llvm {
/// StackAlignment - Override default stack alignment for target.
extern unsigned StackAlignment;
- /// RealignStack - This flag indicates, whether stack should be automatically
- /// realigned, if needed.
+ /// RealignStack - This flag indicates whether the stack should be
+ /// automatically realigned, if needed.
extern bool RealignStack;
/// DisableJumpTables - This flag indicates jump tables should not be
diff --git a/include/llvm/Target/TargetRegisterInfo.h b/include/llvm/Target/TargetRegisterInfo.h
index f6ac2b7..81dec3e 100644
--- a/include/llvm/Target/TargetRegisterInfo.h
+++ b/include/llvm/Target/TargetRegisterInfo.h
@@ -301,7 +301,7 @@ public:
/// considered to be a 'virtual' register, which is part of the SSA
/// namespace. This must be the same for all targets, which means that each
/// target is limited to this fixed number of registers.
- FirstVirtualRegister = 1024
+ FirstVirtualRegister = 16384
};
/// isPhysicalRegister - Return true if the specified register number is in
@@ -593,6 +593,13 @@ public:
return false;
}
+ /// requiresVirtualBaseRegisters - Returns true if the target wants the
+ /// LocalStackAllocation pass to be run and virtual base registers
+ /// used for more efficient stack access.
+ virtual bool requiresVirtualBaseRegisters(const MachineFunction &MF) const {
+ return false;
+ }
+
/// hasFP - Return true if the specified function should have a dedicated
/// frame pointer register. For most targets this is true only if the function
/// has variable sized allocas or if frame pointer elimination is disabled.
@@ -603,18 +610,18 @@ public:
/// immediately on entry to the current function. This eliminates the need for
/// add/sub sp brackets around call sites. Returns true if the call frame is
/// included as part of the stack frame.
- virtual bool hasReservedCallFrame(MachineFunction &MF) const {
+ virtual bool hasReservedCallFrame(const MachineFunction &MF) const {
return !hasFP(MF);
}
/// canSimplifyCallFramePseudos - When possible, it's best to simplify the
/// call frame pseudo ops before doing frame index elimination. This is
/// possible only when frame index references between the pseudos won't
- /// need adjusted for the call frame adjustments. Normally, that's true
+ /// need adjusting for the call frame adjustments. Normally, that's true
/// if the function has a reserved call frame or a frame pointer. Some
/// targets (Thumb2, for example) may have more complicated criteria,
/// however, and can override this behavior.
- virtual bool canSimplifyCallFramePseudos(MachineFunction &MF) const {
+ virtual bool canSimplifyCallFramePseudos(const MachineFunction &MF) const {
return hasReservedCallFrame(MF) || hasFP(MF);
}
@@ -624,7 +631,7 @@ public:
/// reserved as its spill slot. This tells PEI not to create a new stack frame
/// object for the given register. It should be called only after
/// processFunctionBeforeCalleeSavedScan().
- virtual bool hasReservedSpillSlot(MachineFunction &MF, unsigned Reg,
+ virtual bool hasReservedSpillSlot(const MachineFunction &MF, unsigned Reg,
int &FrameIdx) const {
return false;
}
@@ -636,6 +643,44 @@ public:
return false;
}
+ /// getFrameIndexInstrOffset - Get the offset from the referenced frame
+ /// index in the instruction, if the is one.
+ virtual int64_t getFrameIndexInstrOffset(const MachineInstr *MI,
+ int Idx) const {
+ return 0;
+ }
+
+ /// needsFrameBaseReg - Returns true if the instruction's frame index
+ /// reference would be better served by a base register other than FP
+ /// or SP. Used by LocalStackFrameAllocation to determine which frame index
+ /// references it should create new base registers for.
+ virtual bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
+ return false;
+ }
+
+ /// materializeFrameBaseRegister - Insert defining instruction(s) for
+ /// BaseReg to be a pointer to FrameIdx before insertion point I.
+ virtual void materializeFrameBaseRegister(MachineBasicBlock::iterator I,
+ unsigned BaseReg, int FrameIdx,
+ int64_t Offset) const {
+ assert(0 && "materializeFrameBaseRegister does not exist on this target");
+ }
+
+ /// resolveFrameIndex - Resolve a frame index operand of an instruction
+ /// to reference the indicated base register plus offset instead.
+ virtual void resolveFrameIndex(MachineBasicBlock::iterator I,
+ unsigned BaseReg, int64_t Offset) const {
+ assert(0 && "resolveFrameIndex does not exist on this target");
+ }
+
+ /// isFrameOffsetLegal - Determine whether a given offset immediate is
+ /// encodable to resolve a frame index.
+ virtual bool isFrameOffsetLegal(const MachineInstr *MI,
+ int64_t Offset) const {
+ assert(0 && "isFrameOffsetLegal does not exist on this target");
+ return false; // Must return a value in order to compile with VS 2005
+ }
+
/// getCallFrameSetup/DestroyOpcode - These methods return the opcode of the
/// frame setup/destroy instructions if they exist (-1 otherwise). Some
/// targets use pseudo instructions in order to abstract away the difference
@@ -671,7 +716,7 @@ public:
}
/// processFunctionBeforeFrameFinalized - This method is called immediately
- /// before the specified functions frame layout (MF.getFrameInfo()) is
+ /// before the specified function's frame layout (MF.getFrameInfo()) is
/// finalized. Once the frame is finalized, MO_FrameIndex operands are
/// replaced with direct constants. This method is optional.
///
@@ -698,14 +743,8 @@ public:
/// specified instruction, as long as it keeps the iterator pointing at the
/// finished product. SPAdj is the SP adjustment due to call frame setup
/// instruction.
- ///
- /// When -enable-frame-index-scavenging is enabled, the virtual register
- /// allocated for this frame index is returned and its value is stored in
- /// *Value.
- typedef std::pair<unsigned, int> FrameIndexValue;
- virtual unsigned eliminateFrameIndex(MachineBasicBlock::iterator MI,
- int SPAdj, FrameIndexValue *Value = NULL,
- RegScavenger *RS=NULL) const = 0;
+ virtual void eliminateFrameIndex(MachineBasicBlock::iterator MI,
+ int SPAdj, RegScavenger *RS=NULL) const = 0;
/// emitProlog/emitEpilog - These methods insert prolog and epilog code into
/// the function.
diff --git a/include/llvm/Target/TargetRegistry.h b/include/llvm/Target/TargetRegistry.h
index 1418bee..2817b0c 100644
--- a/include/llvm/Target/TargetRegistry.h
+++ b/include/llvm/Target/TargetRegistry.h
@@ -65,7 +65,8 @@ namespace llvm {
const std::string &TT);
typedef TargetAsmLexer *(*AsmLexerCtorTy)(const Target &T,
const MCAsmInfo &MAI);
- typedef TargetAsmParser *(*AsmParserCtorTy)(const Target &T,MCAsmParser &P);
+ typedef TargetAsmParser *(*AsmParserCtorTy)(const Target &T,MCAsmParser &P,
+ TargetMachine &TM);
typedef MCDisassembler *(*MCDisassemblerCtorTy)(const Target &T);
typedef MCInstPrinter *(*MCInstPrinterCtorTy)(const Target &T,
unsigned SyntaxVariant,
@@ -237,10 +238,11 @@ namespace llvm {
///
/// \arg Parser - The target independent parser implementation to use for
/// parsing and lexing.
- TargetAsmParser *createAsmParser(MCAsmParser &Parser) const {
+ TargetAsmParser *createAsmParser(MCAsmParser &Parser,
+ TargetMachine &TM) const {
if (!AsmParserCtorFn)
return 0;
- return AsmParserCtorFn(*this, Parser);
+ return AsmParserCtorFn(*this, Parser, TM);
}
/// createAsmPrinter - Create a target specific assembly printer pass. This
@@ -276,9 +278,9 @@ namespace llvm {
///
/// \arg TT - The target triple.
/// \arg Ctx - The target context.
- /// \arg TAB - The target assembler backend object.
+ /// \arg TAB - The target assembler backend object. Takes ownership.
/// \arg _OS - The stream object.
- /// \arg _Emitter - The target independent assembler object.
+ /// \arg _Emitter - The target independent assembler object.Takes ownership.
/// \arg RelaxAll - Relax all fixups?
MCStreamer *createObjectStreamer(const std::string &TT, MCContext &Ctx,
TargetAsmBackend &TAB,
@@ -667,8 +669,9 @@ namespace llvm {
}
private:
- static TargetAsmParser *Allocator(const Target &T, MCAsmParser &P) {
- return new AsmParserImpl(T, P);
+ static TargetAsmParser *Allocator(const Target &T, MCAsmParser &P,
+ TargetMachine &TM) {
+ return new AsmParserImpl(T, P, TM);
}
};
diff --git a/include/llvm/Target/TargetSelect.h b/include/llvm/Target/TargetSelect.h
index 951e7fa..1891f87 100644
--- a/include/llvm/Target/TargetSelect.h
+++ b/include/llvm/Target/TargetSelect.h
@@ -16,7 +16,7 @@
#ifndef LLVM_TARGET_TARGETSELECT_H
#define LLVM_TARGET_TARGETSELECT_H
-#include "llvm/Config/config.h"
+#include "llvm/Config/llvm-config.h"
extern "C" {
// Declare all of the target-initialization functions that are available.
@@ -100,15 +100,22 @@ namespace llvm {
/// It is legal for a client to make multiple calls to this function.
inline bool InitializeNativeTarget() {
// If we have a native target, initialize it to ensure it is linked in.
-#ifdef LLVM_NATIVE_ARCH
-#define DoInit2(TARG) \
- LLVMInitialize ## TARG ## Info (); \
- LLVMInitialize ## TARG ()
-#define DoInit(T) DoInit2(T)
- DoInit(LLVM_NATIVE_ARCH);
+#ifdef LLVM_NATIVE_TARGET
+ LLVM_NATIVE_TARGETINFO();
+ LLVM_NATIVE_TARGET();
+ return false;
+#else
+ return true;
+#endif
+ }
+
+ /// InitializeNativeTargetAsmPrinter - The main program should call
+ /// this function to initialize the native target asm printer.
+ inline bool InitializeNativeTargetAsmPrinter() {
+ // If we have a native target, initialize the corresponding asm printer.
+#ifdef LLVM_NATIVE_ASMPRINTER
+ LLVM_NATIVE_ASMPRINTER();
return false;
-#undef DoInit
-#undef DoInit2
#else
return true;
#endif
OpenPOWER on IntegriCloud