summaryrefslogtreecommitdiffstats
path: root/include/llvm/Target
diff options
context:
space:
mode:
authordim <dim@FreeBSD.org>2011-02-20 12:57:14 +0000
committerdim <dim@FreeBSD.org>2011-02-20 12:57:14 +0000
commitcbb70ce070d220642b038ea101d9c0f9fbf860d6 (patch)
treed2b61ce94e654cb01a254d2195259db5f9cc3f3c /include/llvm/Target
parent4ace901e87dac5bbbac78ed325e75462e48e386e (diff)
downloadFreeBSD-src-cbb70ce070d220642b038ea101d9c0f9fbf860d6.zip
FreeBSD-src-cbb70ce070d220642b038ea101d9c0f9fbf860d6.tar.gz
Vendor import of llvm trunk r126079:
http://llvm.org/svn/llvm-project/llvm/trunk@126079
Diffstat (limited to 'include/llvm/Target')
-rw-r--r--include/llvm/Target/Mangler.h7
-rw-r--r--include/llvm/Target/SubtargetFeature.h2
-rw-r--r--include/llvm/Target/Target.td129
-rw-r--r--include/llvm/Target/TargetAsmBackend.h67
-rw-r--r--include/llvm/Target/TargetAsmInfo.h75
-rw-r--r--include/llvm/Target/TargetAsmParser.h16
-rw-r--r--include/llvm/Target/TargetCallingConv.h14
-rw-r--r--include/llvm/Target/TargetData.h3
-rw-r--r--include/llvm/Target/TargetELFWriterInfo.h3
-rw-r--r--include/llvm/Target/TargetFrameInfo.h97
-rw-r--r--include/llvm/Target/TargetFrameLowering.h196
-rw-r--r--include/llvm/Target/TargetInstrDesc.h13
-rw-r--r--include/llvm/Target/TargetInstrInfo.h187
-rw-r--r--include/llvm/Target/TargetInstrItineraries.h88
-rw-r--r--include/llvm/Target/TargetJITInfo.h2
-rw-r--r--include/llvm/Target/TargetLibraryInfo.h66
-rw-r--r--include/llvm/Target/TargetLowering.h171
-rw-r--r--include/llvm/Target/TargetLoweringObjectFile.h15
-rw-r--r--include/llvm/Target/TargetMachine.h56
-rw-r--r--include/llvm/Target/TargetRegisterInfo.h214
-rw-r--r--include/llvm/Target/TargetRegistry.h66
-rw-r--r--include/llvm/Target/TargetSchedule.td43
-rw-r--r--include/llvm/Target/TargetSelectionDAG.td122
-rw-r--r--include/llvm/Target/TargetSelectionDAGInfo.h10
24 files changed, 1161 insertions, 501 deletions
diff --git a/include/llvm/Target/Mangler.h b/include/llvm/Target/Mangler.h
index a9f3576..c1c118b 100644
--- a/include/llvm/Target/Mangler.h
+++ b/include/llvm/Target/Mangler.h
@@ -15,7 +15,6 @@
#define LLVM_SUPPORT_MANGLER_H
#include "llvm/ADT/DenseMap.h"
-#include <string>
namespace llvm {
class StringRef;
@@ -69,12 +68,6 @@ public:
/// empty.
void getNameWithPrefix(SmallVectorImpl<char> &OutName, const Twine &GVName,
ManglerPrefixTy PrefixTy = Mangler::Default);
-
- /// getNameWithPrefix - Return the name of the appropriate prefix
- /// and the specified global variable's name. If the global variable doesn't
- /// have a name, this fills in a unique name for the global.
- std::string getNameWithPrefix(const GlobalValue *GV,
- bool isImplicitlyPrivate = false);
};
} // End llvm namespace
diff --git a/include/llvm/Target/SubtargetFeature.h b/include/llvm/Target/SubtargetFeature.h
index 4546871..6c21ae9 100644
--- a/include/llvm/Target/SubtargetFeature.h
+++ b/include/llvm/Target/SubtargetFeature.h
@@ -22,7 +22,7 @@
#include <vector>
#include <cstring>
#include "llvm/ADT/Triple.h"
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
namespace llvm {
class raw_ostream;
diff --git a/include/llvm/Target/Target.td b/include/llvm/Target/Target.td
index b141a77..0f7e6aa 100644
--- a/include/llvm/Target/Target.td
+++ b/include/llvm/Target/Target.td
@@ -1,10 +1,10 @@
//===- Target.td - Target Independent TableGen interface ---*- tablegen -*-===//
-//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file defines the target-independent interfaces which should be
@@ -47,7 +47,7 @@ class Register<string n> {
// modification of this register can potentially read or modify the aliased
// registers.
list<Register> Aliases = [];
-
+
// SubRegs - A list of registers that are parts of this register. Note these
// are "immediate" sub-registers and the registers within the list do not
// themselves overlap. e.g. For X86, EAX's SubRegs list contains only [AX],
@@ -84,7 +84,7 @@ class Register<string n> {
// need to specify sub-registers.
// List "subregs" specifies which registers are sub-registers to this one. This
// is used to populate the SubRegs and AliasSet fields of TargetRegisterDesc.
-// This allows the code generator to be careful not to put two values with
+// This allows the code generator to be careful not to put two values with
// overlapping live ranges into registers which alias.
class RegisterWithSubRegs<string n, list<Register> subregs> : Register<n> {
let SubRegs = subregs;
@@ -101,7 +101,7 @@ class RegisterClass<string namespace, list<ValueType> regTypes, int alignment,
// RegType - Specify the list ValueType of the registers in this register
// class. Note that all registers in a register class must have the same
- // ValueTypes. This is a list because some targets permit storing different
+ // ValueTypes. This is a list because some targets permit storing different
// types in same register, for example vector values with 128-bit total size,
// but different count/size of items, like SSE on x86.
//
@@ -127,13 +127,13 @@ class RegisterClass<string namespace, list<ValueType> regTypes, int alignment,
// allocation used by the register allocator.
//
list<Register> MemberList = regList;
-
+
// SubRegClasses - Specify the register class of subregisters as a list of
// dags: (RegClass SubRegIndex, SubRegindex, ...)
list<dag> SubRegClasses = [];
// MethodProtos/MethodBodies - These members can be used to insert arbitrary
- // code into a generated register class. The normal usage of this is to
+ // code into a generated register class. The normal usage of this is to
// overload virtual methods.
code MethodProtos = [{}];
code MethodBodies = [{}];
@@ -150,8 +150,8 @@ class DwarfRegNum<list<int> Numbers> {
// These values can be determined by locating the <target>.h file in the
// directory llvmgcc/gcc/config/<target>/ and looking for REGISTER_NAMES. The
// order of these names correspond to the enumeration used by gcc. A value of
- // -1 indicates that the gcc number is undefined and -2 that register number is
- // invalid for this mode/flavour.
+ // -1 indicates that the gcc number is undefined and -2 that register number
+ // is invalid for this mode/flavour.
list<int> DwarfNumbers = Numbers;
}
@@ -199,6 +199,7 @@ class Instruction {
bit isBranch = 0; // Is this instruction a branch instruction?
bit isIndirectBranch = 0; // Is this instruction an indirect branch?
bit isCompare = 0; // Is this instruction a comparison instruction?
+ bit isMoveImm = 0; // Is this instruction a move immediate instruction?
bit isBarrier = 0; // Can control flow fall through this instruction?
bit isCall = 0; // Is this instruction a call instruction?
bit canFoldAsLoad = 0; // Can this be folded as a simple memory operand?
@@ -243,14 +244,29 @@ class Instruction {
/// be encoded into the output machineinstr.
string DisableEncoding = "";
+ string PostEncoderMethod = "";
+ string DecoderMethod = "";
+
/// Target-specific flags. This becomes the TSFlags field in TargetInstrDesc.
bits<64> TSFlags = 0;
+
+ ///@name Assembler Parser Support
+ ///@{
+
+ string AsmMatchConverter = "";
+
+ ///@}
}
/// Predicates - These are extra conditionals which are turned into instruction
/// selector matching code. Currently each predicate is just a string.
class Predicate<string cond> {
string CondString = cond;
+
+ /// AssemblerMatcherPredicate - If this feature can be used by the assembler
+ /// matcher, this is true. Targets should set this by inheriting their
+ /// feature from the AssemblerPredicate class in addition to Predicate.
+ bit AssemblerMatcherPredicate = 0;
}
/// NoHonorSignDependentRounding - This predicate is true if support for
@@ -262,9 +278,9 @@ class Requires<list<Predicate> preds> {
list<Predicate> Predicates = preds;
}
-/// ops definition - This is just a simple marker used to identify the operands
-/// list for an instruction. outs and ins are identical both syntatically and
-/// semantically, they are used to define def operands and use operands to
+/// ops definition - This is just a simple marker used to identify the operand
+/// list for an instruction. outs and ins are identical both syntactically and
+/// semanticallyr; they are used to define def operands and use operands to
/// improve readibility. This should be used like this:
/// (outs R32:$dst), (ins R32:$src1, R32:$src2) or something similar.
def ops;
@@ -326,18 +342,26 @@ class AsmOperandClass {
/// signature should be:
/// void addFooOperands(MCInst &Inst, unsigned N) const;
string RenderMethod = ?;
+
+ /// The name of the method on the target specific operand to call to custom
+ /// handle the operand parsing. This is useful when the operands do not relate
+ /// to immediates or registers and are very instruction specific (as flags to
+ /// set in a processor register, coprocessor number, ...).
+ string ParserMethod = ?;
}
def ImmAsmOperand : AsmOperandClass {
let Name = "Imm";
}
-
+
/// Operand Types - These provide the built-in operand types that may be used
/// by a target. Targets can optionally provide their own operand types as
/// needed, though this should not be needed for RISC targets.
class Operand<ValueType ty> {
ValueType Type = ty;
string PrintMethod = "printOperand";
+ string EncoderMethod = "";
+ string DecoderMethod = "";
string AsmOperandLowerMethod = ?;
dag MIOperandInfo = (ops);
@@ -409,6 +433,7 @@ def INLINEASM : Instruction {
let OutOperandList = (outs);
let InOperandList = (ins variable_ops);
let AsmString = "";
+ let neverHasSideEffects = 1; // Note side effect is encoded in an operand.
}
def PROLOG_LABEL : Instruction {
let OutOperandList = (outs);
@@ -475,7 +500,7 @@ def DBG_VALUE : Instruction {
let OutOperandList = (outs);
let InOperandList = (ins variable_ops);
let AsmString = "DBG_VALUE";
- let isAsCheapAsAMove = 1;
+ let neverHasSideEffects = 1;
}
def REG_SEQUENCE : Instruction {
let OutOperandList = (outs unknown:$dst);
@@ -506,9 +531,9 @@ class AsmParser {
// name.
string AsmParserClassName = "AsmParser";
- // AsmParserInstCleanup - If non-empty, this is the name of a custom function on the
- // AsmParser class to call on every matched instruction. This can be used to
- // perform target specific instruction post-processing.
+ // AsmParserInstCleanup - If non-empty, this is the name of a custom member
+ // function of the AsmParser class to call on every matched instruction.
+ // This can be used to perform target specific instruction post-processing.
string AsmParserInstCleanup = "";
// Variant - AsmParsers can be of multiple different variants. Variants are
@@ -529,6 +554,49 @@ class AsmParser {
}
def DefaultAsmParser : AsmParser;
+/// AssemblerPredicate - This is a Predicate that can be used when the assembler
+/// matches instructions and aliases.
+class AssemblerPredicate {
+ bit AssemblerMatcherPredicate = 1;
+}
+
+
+
+/// MnemonicAlias - This class allows targets to define assembler mnemonic
+/// aliases. This should be used when all forms of one mnemonic are accepted
+/// with a different mnemonic. For example, X86 allows:
+/// sal %al, 1 -> shl %al, 1
+/// sal %ax, %cl -> shl %ax, %cl
+/// sal %eax, %cl -> shl %eax, %cl
+/// etc. Though "sal" is accepted with many forms, all of them are directly
+/// translated to a shl, so it can be handled with (in the case of X86, it
+/// actually has one for each suffix as well):
+/// def : MnemonicAlias<"sal", "shl">;
+///
+/// Mnemonic aliases are mapped before any other translation in the match phase,
+/// and do allow Requires predicates, e.g.:
+///
+/// def : MnemonicAlias<"pushf", "pushfq">, Requires<[In64BitMode]>;
+/// def : MnemonicAlias<"pushf", "pushfl">, Requires<[In32BitMode]>;
+///
+class MnemonicAlias<string From, string To> {
+ string FromMnemonic = From;
+ string ToMnemonic = To;
+
+ // Predicates - Predicates that must be true for this remapping to happen.
+ list<Predicate> Predicates = [];
+}
+
+/// InstAlias - This defines an alternate assembly syntax that is allowed to
+/// match an instruction that has a different (more canonical) assembly
+/// representation.
+class InstAlias<string Asm, dag Result> {
+ string AsmString = Asm; // The .s format to match the instruction with.
+ dag ResultInst = Result; // The MCInst to generate.
+
+ // Predicates - Predicates that must be true for this to match.
+ list<Predicate> Predicates = [];
+}
//===----------------------------------------------------------------------===//
// AsmWriter - This class can be implemented by targets that need to customize
@@ -543,10 +611,6 @@ class AsmWriter {
// name.
string AsmWriterClassName = "AsmPrinter";
- // InstFormatName - AsmWriters can specify the name of the format string to
- // print instructions with.
- string InstFormatName = "AsmString";
-
// Variant - AsmWriters can be of multiple different variants. Variants are
// used to support targets that need to emit assembly code in ways that are
// mostly the same for different targets, but have minor differences in
@@ -554,17 +618,22 @@ class AsmWriter {
// will specify which alternative to use. For example "{x|y|z}" with Variant
// == 1, will expand to "y".
int Variant = 0;
-
-
+
+
// FirstOperandColumn/OperandSpacing - If the assembler syntax uses a columnar
// layout, the asmwriter can actually generate output in this columns (in
// verbose-asm mode). These two values indicate the width of the first column
// (the "opcode" area) and the width to reserve for subsequent operands. When
// verbose asm mode is enabled, operands will be indented to respect this.
int FirstOperandColumn = -1;
-
+
// OperandSpacing - Space between operand columns.
int OperandSpacing = -1;
+
+ // isMCAsmWriter - Is this assembly writer for an MC emitter? This controls
+ // generation of the printInstruction() method. For MC printers, it takes
+ // an MCInstr* operand, otherwise it takes a MachineInstr*.
+ bit isMCAsmWriter = 0;
}
def DefaultAsmWriter : AsmWriter;
@@ -592,15 +661,15 @@ class SubtargetFeature<string n, string a, string v, string d,
// appropriate target chip.
//
string Name = n;
-
+
// Attribute - Attribute to be set by feature.
//
string Attribute = a;
-
+
// Value - Value the attribute to be set to by feature.
//
string Value = v;
-
+
// Desc - Feature description. Used by command line (-mattr=) to display help
// information.
//
@@ -622,12 +691,12 @@ class Processor<string n, ProcessorItineraries pi, list<SubtargetFeature> f> {
// appropriate target chip.
//
string Name = n;
-
+
// ProcItin - The scheduling information for the target processor.
//
ProcessorItineraries ProcItin = pi;
-
- // Features - list of
+
+ // Features - list of
list<SubtargetFeature> Features = f;
}
diff --git a/include/llvm/Target/TargetAsmBackend.h b/include/llvm/Target/TargetAsmBackend.h
index 979595a..7527298 100644
--- a/include/llvm/Target/TargetAsmBackend.h
+++ b/include/llvm/Target/TargetAsmBackend.h
@@ -10,17 +10,18 @@
#ifndef LLVM_TARGET_TARGETASMBACKEND_H
#define LLVM_TARGET_TARGETASMBACKEND_H
-#include "llvm/System/DataTypes.h"
+#include "llvm/MC/MCDirectives.h"
+#include "llvm/MC/MCFixup.h"
+#include "llvm/MC/MCFixupKindInfo.h"
+#include "llvm/Support/DataTypes.h"
namespace llvm {
-class MCDataFragment;
class MCFixup;
class MCInst;
class MCObjectWriter;
class MCSection;
template<typename T>
class SmallVectorImpl;
-class Target;
class raw_ostream;
/// TargetAsmBackend - Generic interface to target specific assembler backends.
@@ -28,37 +29,17 @@ class TargetAsmBackend {
TargetAsmBackend(const TargetAsmBackend &); // DO NOT IMPLEMENT
void operator=(const TargetAsmBackend &); // DO NOT IMPLEMENT
protected: // Can only create subclasses.
- TargetAsmBackend(const Target &);
+ TargetAsmBackend();
- /// TheTarget - The Target that this machine was created for.
- const Target &TheTarget;
-
- unsigned HasAbsolutizedSet : 1;
unsigned HasReliableSymbolDifference : 1;
- unsigned HasScatteredSymbols : 1;
public:
virtual ~TargetAsmBackend();
- const Target &getTarget() const { return TheTarget; }
-
/// createObjectWriter - Create a new MCObjectWriter instance for use by the
/// assembler backend to emit the final object file.
virtual MCObjectWriter *createObjectWriter(raw_ostream &OS) const = 0;
- /// hasAbsolutizedSet - Check whether this target "absolutizes"
- /// assignments. That is, given code like:
- /// a:
- /// ...
- /// b:
- /// tmp = a - b
- /// .long tmp
- /// will the value of 'tmp' be a relocatable expression, or the assembly time
- /// value of L0 - L1. This distinction is only relevant for platforms that
- /// support scattered symbols, since in the absence of scattered symbols (a -
- /// b) cannot change after assembly.
- bool hasAbsolutizedSet() const { return HasAbsolutizedSet; }
-
/// hasReliableSymbolDifference - Check whether this target implements
/// accurate relocations for differences between symbols. If not, differences
/// between symbols will always be relocatable expressions and any references
@@ -68,21 +49,11 @@ public:
/// This should always be true (since it results in fewer relocations with no
/// loss of functionality), but is currently supported as a way to maintain
/// exact object compatibility with Darwin 'as' (on non-x86_64). It should
- /// eventually should be eliminated. See also \see hasAbsolutizedSet.
+ /// eventually should be eliminated.
bool hasReliableSymbolDifference() const {
return HasReliableSymbolDifference;
}
- /// hasScatteredSymbols - Check whether this target supports scattered
- /// symbols. If so, the assembler should assume that atoms can be scattered by
- /// the linker. In particular, this means that the offsets between symbols
- /// which are in distinct atoms is not known at link time, and the assembler
- /// must generate fixups and relocations appropriately.
- ///
- /// Note that the assembler currently does not reason about atoms, instead it
- /// assumes all temporary symbols reside in the "current atom".
- bool hasScatteredSymbols() const { return HasScatteredSymbols; }
-
/// doesSectionRequireSymbols - Check whether the given section requires that
/// all symbols (even temporaries) have symbol table entries.
virtual bool doesSectionRequireSymbols(const MCSection &Section) const {
@@ -97,16 +68,28 @@ public:
return true;
}
- /// isVirtualSection - Check whether the given section is "virtual", that is
- /// has no actual object file contents.
- virtual bool isVirtualSection(const MCSection &Section) const = 0;
+ /// @name Target Fixup Interfaces
+ /// @{
+
+ /// getNumFixupKinds - Get the number of target specific fixup kinds.
+ virtual unsigned getNumFixupKinds() const = 0;
+
+ /// getFixupKindInfo - Get information on a fixup kind.
+ virtual const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const;
+
+ /// @}
/// ApplyFixup - Apply the \arg Value for given \arg Fixup into the provided
/// data fragment, at the offset specified by the fixup and following the
/// fixup kind as appropriate.
- virtual void ApplyFixup(const MCFixup &Fixup, MCDataFragment &Fragment,
+ virtual void ApplyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
uint64_t Value) const = 0;
+ /// @}
+
+ /// @name Target Relaxation Interfaces
+ /// @{
+
/// MayNeedRelaxation - Check whether the given instruction may need
/// relaxation.
///
@@ -121,12 +104,18 @@ public:
/// \parm Res [output] - On return, the relaxed instruction.
virtual void RelaxInstruction(const MCInst &Inst, MCInst &Res) const = 0;
+ /// @}
+
/// WriteNopData - Write an (optimal) nop sequence of Count bytes to the given
/// output. If the target cannot generate such a sequence, it should return an
/// error.
///
/// \return - True on success.
virtual bool WriteNopData(uint64_t Count, MCObjectWriter *OW) const = 0;
+
+ /// HandleAssemblerFlag - Handle any target-specific assembler flags.
+ /// By default, do nothing.
+ virtual void HandleAssemblerFlag(MCAssemblerFlag Flag) {}
};
} // End llvm namespace
diff --git a/include/llvm/Target/TargetAsmInfo.h b/include/llvm/Target/TargetAsmInfo.h
new file mode 100644
index 0000000..98aab14
--- /dev/null
+++ b/include/llvm/Target/TargetAsmInfo.h
@@ -0,0 +1,75 @@
+//===-- llvm/Target/TargetAsmInfo.h -----------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Interface to provide the information necessary for producing assembly files.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TARGET_TARGETASMINFO_H
+#define LLVM_TARGET_TARGETASMINFO_H
+
+#include "llvm/CodeGen/MachineLocation.h"
+#include "llvm/Target/TargetLoweringObjectFile.h"
+#include "llvm/Target/TargetFrameLowering.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+
+namespace llvm {
+ class MCSection;
+ class MCContext;
+ class TargetMachine;
+ class TargetLoweringObjectFile;
+
+class TargetAsmInfo {
+ unsigned PointerSize;
+ bool IsLittleEndian;
+ TargetFrameLowering::StackDirection StackDir;
+ const TargetRegisterInfo *TRI;
+ std::vector<MachineMove> InitialFrameState;
+ const TargetLoweringObjectFile *TLOF;
+
+public:
+ explicit TargetAsmInfo(const TargetMachine &TM);
+
+ /// getPointerSize - Get the pointer size in bytes.
+ unsigned getPointerSize() const {
+ return PointerSize;
+ }
+
+ /// islittleendian - True if the target is little endian.
+ bool isLittleEndian() const {
+ return IsLittleEndian;
+ }
+
+ TargetFrameLowering::StackDirection getStackGrowthDirection() const {
+ return StackDir;
+ }
+
+ const MCSection *getDwarfLineSection() const {
+ return TLOF->getDwarfLineSection();
+ }
+
+ const MCSection *getEHFrameSection() const {
+ return TLOF->getEHFrameSection();
+ }
+
+ unsigned getDwarfRARegNum(bool isEH) const {
+ return TRI->getDwarfRegNum(TRI->getRARegister(), isEH);
+ }
+
+ const std::vector<MachineMove> &getInitialFrameState() const {
+ return InitialFrameState;
+ }
+
+ int getDwarfRegNum(unsigned RegNum, bool isEH) const {
+ return TRI->getDwarfRegNum(RegNum, isEH);
+ }
+};
+
+}
+#endif
diff --git a/include/llvm/Target/TargetAsmParser.h b/include/llvm/Target/TargetAsmParser.h
index 5830d1f..9ff50cb 100644
--- a/include/llvm/Target/TargetAsmParser.h
+++ b/include/llvm/Target/TargetAsmParser.h
@@ -13,7 +13,7 @@
#include "llvm/MC/MCParser/MCAsmParserExtension.h"
namespace llvm {
-class MCInst;
+class MCStreamer;
class StringRef;
class Target;
class SMLoc;
@@ -42,6 +42,8 @@ public:
unsigned getAvailableFeatures() const { return AvailableFeatures; }
void setAvailableFeatures(unsigned Value) { AvailableFeatures = Value; }
+ virtual bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) = 0;
+
/// ParseInstruction - Parse one assembly instruction.
///
/// The parser is positioned following the instruction name. The target
@@ -70,16 +72,16 @@ public:
/// \param DirectiveID - the identifier token of the directive.
virtual bool ParseDirective(AsmToken DirectiveID) = 0;
- /// MatchInstruction - Recognize a series of operands of a parsed instruction
- /// as an actual MCInst. This returns false and fills in Inst on success and
- /// returns true on failure to match.
+ /// MatchAndEmitInstruction - Recognize a series of operands of a parsed
+ /// instruction as an actual MCInst and emit it to the specified MCStreamer.
+ /// This returns false on success and returns true on failure to match.
///
/// On failure, the target parser is responsible for emitting a diagnostic
/// explaining the match failure.
virtual bool
- MatchInstruction(SMLoc IDLoc,
- const SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- MCInst &Inst) = 0;
+ MatchAndEmitInstruction(SMLoc IDLoc,
+ SmallVectorImpl<MCParsedAsmOperand*> &Operands,
+ MCStreamer &Out) = 0;
};
diff --git a/include/llvm/Target/TargetCallingConv.h b/include/llvm/Target/TargetCallingConv.h
index f368a2e..275957e 100644
--- a/include/llvm/Target/TargetCallingConv.h
+++ b/include/llvm/Target/TargetCallingConv.h
@@ -106,14 +106,13 @@ namespace ISD {
///
struct InputArg {
ArgFlagsTy Flags;
- EVT VT;
+ MVT VT;
bool Used;
InputArg() : VT(MVT::Other), Used(false) {}
InputArg(ArgFlagsTy flags, EVT vt, bool used)
- : Flags(flags), VT(vt), Used(used) {
- assert(VT.isSimple() &&
- "InputArg value type must be Simple!");
+ : Flags(flags), Used(used) {
+ VT = vt.getSimpleVT();
}
};
@@ -123,16 +122,15 @@ namespace ISD {
///
struct OutputArg {
ArgFlagsTy Flags;
- EVT VT;
+ MVT VT;
/// IsFixed - Is this a "fixed" value, ie not passed through a vararg "...".
bool IsFixed;
OutputArg() : IsFixed(false) {}
OutputArg(ArgFlagsTy flags, EVT vt, bool isfixed)
- : Flags(flags), VT(vt), IsFixed(isfixed) {
- assert(VT.isSimple() &&
- "OutputArg value type must be Simple!");
+ : Flags(flags), IsFixed(isfixed) {
+ VT = vt.getSimpleVT();
}
};
}
diff --git a/include/llvm/Target/TargetData.h b/include/llvm/Target/TargetData.h
index b89cbe0..25065d3 100644
--- a/include/llvm/Target/TargetData.h
+++ b/include/llvm/Target/TargetData.h
@@ -22,6 +22,7 @@
#include "llvm/Pass.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/DataTypes.h"
namespace llvm {
@@ -143,7 +144,7 @@ public:
std::string getStringRepresentation() const;
/// isLegalInteger - This function returns true if the specified type is
- /// known tobe a native integer type supported by the CPU. For example,
+ /// known to be a native integer type supported by the CPU. For example,
/// i64 is not native on most 32-bit CPUs and i37 is not native on any known
/// one. This returns false if the integer width is not legal.
///
diff --git a/include/llvm/Target/TargetELFWriterInfo.h b/include/llvm/Target/TargetELFWriterInfo.h
index 7cb6931..b97f3e2 100644
--- a/include/llvm/Target/TargetELFWriterInfo.h
+++ b/include/llvm/Target/TargetELFWriterInfo.h
@@ -28,7 +28,6 @@ namespace llvm {
// EMachine - This field is the target specific value to emit as the
// e_machine member of the ELF header.
unsigned short EMachine;
- TargetMachine &TM;
bool is64Bit, isLittleEndian;
public:
@@ -62,7 +61,7 @@ namespace llvm {
ELFDATA2MSB = 2 // Big-endian object file
};
- explicit TargetELFWriterInfo(TargetMachine &tm);
+ explicit TargetELFWriterInfo(bool is64Bit_, bool isLittleEndian_);
virtual ~TargetELFWriterInfo();
unsigned short getEMachine() const { return EMachine; }
diff --git a/include/llvm/Target/TargetFrameInfo.h b/include/llvm/Target/TargetFrameInfo.h
deleted file mode 100644
index 975d156..0000000
--- a/include/llvm/Target/TargetFrameInfo.h
+++ /dev/null
@@ -1,97 +0,0 @@
-//===-- llvm/Target/TargetFrameInfo.h ---------------------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// Interface to describe the layout of a stack frame on the target machine.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TARGET_TARGETFRAMEINFO_H
-#define LLVM_TARGET_TARGETFRAMEINFO_H
-
-#include <utility>
-
-namespace llvm {
-
-/// Information about stack frame layout on the target. It holds the direction
-/// of stack growth, the known stack alignment on entry to each function, and
-/// the offset to the locals area.
-///
-/// The offset to the local area is the offset from the stack pointer on
-/// function entry to the first location where function data (local variables,
-/// spill locations) can be stored.
-class TargetFrameInfo {
-public:
- enum StackDirection {
- StackGrowsUp, // Adding to the stack increases the stack address
- StackGrowsDown // Adding to the stack decreases the stack address
- };
-
- // Maps a callee saved register to a stack slot with a fixed offset.
- struct SpillSlot {
- unsigned Reg;
- int Offset; // Offset relative to stack pointer on function entry.
- };
-private:
- StackDirection StackDir;
- unsigned StackAlignment;
- unsigned TransientStackAlignment;
- int LocalAreaOffset;
-public:
- TargetFrameInfo(StackDirection D, unsigned StackAl, int LAO,
- unsigned TransAl = 1)
- : StackDir(D), StackAlignment(StackAl), TransientStackAlignment(TransAl),
- LocalAreaOffset(LAO) {}
-
- virtual ~TargetFrameInfo();
-
- // These methods return information that describes the abstract stack layout
- // of the target machine.
-
- /// getStackGrowthDirection - Return the direction the stack grows
- ///
- StackDirection getStackGrowthDirection() const { return StackDir; }
-
- /// getStackAlignment - This method returns the number of bytes to which the
- /// stack pointer must be aligned on entry to a function. Typically, this
- /// is the largest alignment for any data object in the target.
- ///
- unsigned getStackAlignment() const { return StackAlignment; }
-
- /// getTransientStackAlignment - This method returns the number of bytes to
- /// which the stack pointer must be aligned at all times, even between
- /// calls.
- ///
- unsigned getTransientStackAlignment() const {
- return TransientStackAlignment;
- }
-
- /// getOffsetOfLocalArea - This method returns the offset of the local area
- /// from the stack pointer on entrance to a function.
- ///
- int getOffsetOfLocalArea() const { return LocalAreaOffset; }
-
- /// getCalleeSavedSpillSlots - This method returns a pointer to an array of
- /// pairs, that contains an entry for each callee saved register that must be
- /// spilled to a particular stack location if it is spilled.
- ///
- /// Each entry in this array contains a <register,offset> pair, indicating the
- /// fixed offset from the incoming stack pointer that each register should be
- /// spilled at. If a register is not listed here, the code generator is
- /// allowed to spill it anywhere it chooses.
- ///
- virtual const SpillSlot *
- getCalleeSavedSpillSlots(unsigned &NumEntries) const {
- NumEntries = 0;
- return 0;
- }
-};
-
-} // End llvm namespace
-
-#endif
diff --git a/include/llvm/Target/TargetFrameLowering.h b/include/llvm/Target/TargetFrameLowering.h
new file mode 100644
index 0000000..e104b16
--- /dev/null
+++ b/include/llvm/Target/TargetFrameLowering.h
@@ -0,0 +1,196 @@
+//===-- llvm/Target/TargetFrameLowering.h ---------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Interface to describe the layout of a stack frame on the target machine.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TARGET_TARGETFRAMELOWERING_H
+#define LLVM_TARGET_TARGETFRAMELOWERING_H
+
+#include "llvm/CodeGen/MachineBasicBlock.h"
+
+#include <utility>
+#include <vector>
+
+namespace llvm {
+ class CalleeSavedInfo;
+ class MachineFunction;
+ class MachineBasicBlock;
+ class MachineMove;
+ class RegScavenger;
+
+/// Information about stack frame layout on the target. It holds the direction
+/// of stack growth, the known stack alignment on entry to each function, and
+/// the offset to the locals area.
+///
+/// The offset to the local area is the offset from the stack pointer on
+/// function entry to the first location where function data (local variables,
+/// spill locations) can be stored.
+class TargetFrameLowering {
+public:
+ enum StackDirection {
+ StackGrowsUp, // Adding to the stack increases the stack address
+ StackGrowsDown // Adding to the stack decreases the stack address
+ };
+
+ // Maps a callee saved register to a stack slot with a fixed offset.
+ struct SpillSlot {
+ unsigned Reg;
+ int Offset; // Offset relative to stack pointer on function entry.
+ };
+private:
+ StackDirection StackDir;
+ unsigned StackAlignment;
+ unsigned TransientStackAlignment;
+ int LocalAreaOffset;
+public:
+ TargetFrameLowering(StackDirection D, unsigned StackAl, int LAO,
+ unsigned TransAl = 1)
+ : StackDir(D), StackAlignment(StackAl), TransientStackAlignment(TransAl),
+ LocalAreaOffset(LAO) {}
+
+ virtual ~TargetFrameLowering();
+
+ // These methods return information that describes the abstract stack layout
+ // of the target machine.
+
+ /// getStackGrowthDirection - Return the direction the stack grows
+ ///
+ StackDirection getStackGrowthDirection() const { return StackDir; }
+
+ /// getStackAlignment - This method returns the number of bytes to which the
+ /// stack pointer must be aligned on entry to a function. Typically, this
+ /// is the largest alignment for any data object in the target.
+ ///
+ unsigned getStackAlignment() const { return StackAlignment; }
+
+ /// getTransientStackAlignment - This method returns the number of bytes to
+ /// which the stack pointer must be aligned at all times, even between
+ /// calls.
+ ///
+ unsigned getTransientStackAlignment() const {
+ return TransientStackAlignment;
+ }
+
+ /// getOffsetOfLocalArea - This method returns the offset of the local area
+ /// from the stack pointer on entrance to a function.
+ ///
+ int getOffsetOfLocalArea() const { return LocalAreaOffset; }
+
+ /// getCalleeSavedSpillSlots - This method returns a pointer to an array of
+ /// pairs, that contains an entry for each callee saved register that must be
+ /// spilled to a particular stack location if it is spilled.
+ ///
+ /// Each entry in this array contains a <register,offset> pair, indicating the
+ /// fixed offset from the incoming stack pointer that each register should be
+ /// spilled at. If a register is not listed here, the code generator is
+ /// allowed to spill it anywhere it chooses.
+ ///
+ virtual const SpillSlot *
+ getCalleeSavedSpillSlots(unsigned &NumEntries) const {
+ NumEntries = 0;
+ return 0;
+ }
+
+ /// targetHandlesStackFrameRounding - Returns true if the target is
+ /// responsible for rounding up the stack frame (probably at emitPrologue
+ /// time).
+ virtual bool targetHandlesStackFrameRounding() const {
+ return false;
+ }
+
+ /// emitProlog/emitEpilog - These methods insert prolog and epilog code into
+ /// the function.
+ virtual void emitPrologue(MachineFunction &MF) const = 0;
+ virtual void emitEpilogue(MachineFunction &MF,
+ MachineBasicBlock &MBB) const = 0;
+
+ /// spillCalleeSavedRegisters - Issues instruction(s) to spill all callee
+ /// saved registers and returns true if it isn't possible / profitable to do
+ /// so by issuing a series of store instructions via
+ /// storeRegToStackSlot(). Returns false otherwise.
+ virtual bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const {
+ return false;
+ }
+
+ /// restoreCalleeSavedRegisters - Issues instruction(s) to restore all callee
+ /// saved registers and returns true if it isn't possible / profitable to do
+ /// so by issuing a series of load instructions via loadRegToStackSlot().
+ /// Returns false otherwise.
+ virtual bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const {
+ return false;
+ }
+
+ /// hasFP - Return true if the specified function should have a dedicated
+ /// frame pointer register. For most targets this is true only if the function
+ /// has variable sized allocas or if frame pointer elimination is disabled.
+ virtual bool hasFP(const MachineFunction &MF) const = 0;
+
+ /// hasReservedCallFrame - Under normal circumstances, when a frame pointer is
+ /// not required, we reserve argument space for call sites in the function
+ /// immediately on entry to the current function. This eliminates the need for
+ /// add/sub sp brackets around call sites. Returns true if the call frame is
+ /// included as part of the stack frame.
+ virtual bool hasReservedCallFrame(const MachineFunction &MF) const {
+ return !hasFP(MF);
+ }
+
+ /// canSimplifyCallFramePseudos - When possible, it's best to simplify the
+ /// call frame pseudo ops before doing frame index elimination. This is
+ /// possible only when frame index references between the pseudos won't
+ /// need adjusting for the call frame adjustments. Normally, that's true
+ /// if the function has a reserved call frame or a frame pointer. Some
+ /// targets (Thumb2, for example) may have more complicated criteria,
+ /// however, and can override this behavior.
+ virtual bool canSimplifyCallFramePseudos(const MachineFunction &MF) const {
+ return hasReservedCallFrame(MF) || hasFP(MF);
+ }
+
+ /// getInitialFrameState - Returns a list of machine moves that are assumed
+ /// on entry to all functions. Note that LabelID is ignored (assumed to be
+ /// the beginning of the function.)
+ virtual void getInitialFrameState(std::vector<MachineMove> &Moves) const;
+
+ /// getFrameIndexOffset - Returns the displacement from the frame register to
+ /// the stack frame of the specified index.
+ virtual int getFrameIndexOffset(const MachineFunction &MF, int FI) const;
+
+ /// getFrameIndexReference - This method should return the base register
+ /// and offset used to reference a frame index location. The offset is
+ /// returned directly, and the base register is returned via FrameReg.
+ virtual int getFrameIndexReference(const MachineFunction &MF, int FI,
+ unsigned &FrameReg) const;
+
+ /// processFunctionBeforeCalleeSavedScan - This method is called immediately
+ /// before PrologEpilogInserter scans the physical registers used to determine
+ /// what callee saved registers should be spilled. This method is optional.
+ virtual void processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
+ RegScavenger *RS = NULL) const {
+
+ }
+
+ /// processFunctionBeforeFrameFinalized - This method is called immediately
+ /// before the specified function's frame layout (MF.getFrameInfo()) is
+ /// finalized. Once the frame is finalized, MO_FrameIndex operands are
+ /// replaced with direct constants. This method is optional.
+ ///
+ virtual void processFunctionBeforeFrameFinalized(MachineFunction &MF) const {
+ }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/include/llvm/Target/TargetInstrDesc.h b/include/llvm/Target/TargetInstrDesc.h
index a127aed..8823d5a 100644
--- a/include/llvm/Target/TargetInstrDesc.h
+++ b/include/llvm/Target/TargetInstrDesc.h
@@ -15,7 +15,7 @@
#ifndef LLVM_TARGET_TARGETINSTRDESC_H
#define LLVM_TARGET_TARGETINSTRDESC_H
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
namespace llvm {
@@ -103,13 +103,14 @@ namespace TID {
Terminator,
Branch,
IndirectBranch,
- Predicable,
- NotDuplicable,
Compare,
+ MoveImm,
DelaySlot,
FoldableAsLoad,
MayLoad,
MayStore,
+ Predicable,
+ NotDuplicable,
UnmodeledSideEffects,
Commutable,
ConvertibleTo3Addr,
@@ -352,6 +353,12 @@ public:
return Flags & (1 << TID::Compare);
}
+ /// isMoveImmediate - Return true if this instruction is a move immediate
+ /// (including conditional moves) instruction.
+ bool isMoveImmediate() const {
+ return Flags & (1 << TID::MoveImm);
+ }
+
/// isNotDuplicable - Return true if this instruction cannot be safely
/// duplicated. For example, if the instruction has a unique labels attached
/// to it, duplicating it would cause multiple definition errors.
diff --git a/include/llvm/Target/TargetInstrInfo.h b/include/llvm/Target/TargetInstrInfo.h
index 520c41b..fc7b51e 100644
--- a/include/llvm/Target/TargetInstrInfo.h
+++ b/include/llvm/Target/TargetInstrInfo.h
@@ -19,16 +19,17 @@
namespace llvm {
-class CalleeSavedInfo;
class InstrItineraryData;
class LiveVariables;
class MCAsmInfo;
class MachineMemOperand;
+class MachineRegisterInfo;
class MDNode;
class MCInst;
class SDNode;
class ScheduleHazardRecognizer;
class SelectionDAG;
+class ScheduleDAG;
class TargetRegisterClass;
class TargetRegisterInfo;
@@ -134,7 +135,7 @@ public:
int &FrameIndex) const {
return 0;
}
-
+
/// isStoreToStackSlot - If the specified machine instruction is a direct
/// store to a stack slot, return the virtual or physical register number of
/// the source reg along with the FrameIndex of the loaded stack slot. If
@@ -227,9 +228,12 @@ public:
/// produceSameValue - Return true if two machine instructions would produce
/// identical values. By default, this is only true when the two instructions
- /// are deemed identical except for defs.
+ /// are deemed identical except for defs. If this function is called when the
+ /// IR is still in SSA form, the caller can pass the MachineRegisterInfo for
+ /// aggressive checks.
virtual bool produceSameValue(const MachineInstr *MI0,
- const MachineInstr *MI1) const = 0;
+ const MachineInstr *MI1,
+ const MachineRegisterInfo *MRI = 0) const = 0;
/// AnalyzeBranch - Analyze the branching code at the end of MBB, returning
/// true if it cannot be understood (e.g. it's a switch dispatch or isn't
@@ -267,7 +271,7 @@ public:
/// This is only invoked in cases where AnalyzeBranch returns success. It
/// returns the number of instructions that were removed.
virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const {
- assert(0 && "Target didn't implement TargetInstrInfo::RemoveBranch!");
+ assert(0 && "Target didn't implement TargetInstrInfo::RemoveBranch!");
return 0;
}
@@ -285,7 +289,7 @@ public:
MachineBasicBlock *FBB,
const SmallVectorImpl<MachineOperand> &Cond,
DebugLoc DL) const {
- assert(0 && "Target didn't implement TargetInstrInfo::InsertBranch!");
+ assert(0 && "Target didn't implement TargetInstrInfo::InsertBranch!");
return 0;
}
@@ -303,31 +307,45 @@ public:
return true;
}
- /// isProfitableToIfCvt - Return true if it's profitable to first "NumInstrs"
- /// of the specified basic block.
+ /// isProfitableToIfCvt - Return true if it's profitable to predicate
+ /// instructions with accumulated instruction latency of "NumCycles"
+ /// of the specified basic block, where the probability of the instructions
+ /// being executed is given by Probability, and Confidence is a measure
+ /// of our confidence that it will be properly predicted.
virtual
- bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumInstrs) const {
+ bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCyles,
+ unsigned ExtraPredCycles,
+ float Probability, float Confidence) const {
return false;
}
-
+
/// isProfitableToIfCvt - Second variant of isProfitableToIfCvt, this one
/// checks for the case where two basic blocks from true and false path
/// of a if-then-else (diamond) are predicated on mutally exclusive
- /// predicates.
+ /// predicates, where the probability of the true path being taken is given
+ /// by Probability, and Confidence is a measure of our confidence that it
+ /// will be properly predicted.
virtual bool
- isProfitableToIfCvt(MachineBasicBlock &TMBB, unsigned NumTInstrs,
- MachineBasicBlock &FMBB, unsigned NumFInstrs) const {
+ isProfitableToIfCvt(MachineBasicBlock &TMBB,
+ unsigned NumTCycles, unsigned ExtraTCycles,
+ MachineBasicBlock &FMBB,
+ unsigned NumFCycles, unsigned ExtraFCycles,
+ float Probability, float Confidence) const {
return false;
}
/// isProfitableToDupForIfCvt - Return true if it's profitable for
- /// if-converter to duplicate a specific number of instructions in the
- /// specified MBB to enable if-conversion.
+ /// if-converter to duplicate instructions of specified accumulated
+ /// instruction latencies in the specified MBB to enable if-conversion.
+ /// The probability of the instructions being executed is given by
+ /// Probability, and Confidence is a measure of our confidence that it
+ /// will be properly predicted.
virtual bool
- isProfitableToDupForIfCvt(MachineBasicBlock &MBB,unsigned NumInstrs) const {
+ isProfitableToDupForIfCvt(MachineBasicBlock &MBB, unsigned NumCyles,
+ float Probability, float Confidence) const {
return false;
}
-
+
/// copyPhysReg - Emit instructions to copy a pair of physical registers.
virtual void copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI, DebugLoc DL,
@@ -360,29 +378,7 @@ public:
const TargetRegisterInfo *TRI) const {
assert(0 && "Target didn't implement TargetInstrInfo::loadRegFromStackSlot!");
}
-
- /// spillCalleeSavedRegisters - Issues instruction(s) to spill all callee
- /// saved registers and returns true if it isn't possible / profitable to do
- /// so by issuing a series of store instructions via
- /// storeRegToStackSlot(). Returns false otherwise.
- virtual bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const {
- return false;
- }
- /// restoreCalleeSavedRegisters - Issues instruction(s) to restore all callee
- /// saved registers and returns true if it isn't possible / profitable to do
- /// so by issuing a series of load instructions via loadRegToStackSlot().
- /// Returns false otherwise.
- virtual bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const {
- return false;
- }
-
/// emitFrameIndexDebugValue - Emit a target-dependent form of
/// DBG_VALUE encoding the address of a frame index. Addresses would
/// normally be lowered the same way as other addresses on the target,
@@ -493,7 +489,7 @@ public:
unsigned NumLoads) const {
return false;
}
-
+
/// ReverseBranchCondition - Reverses the branch condition of the specified
/// condition list, returning false on success and true if it cannot be
/// reversed.
@@ -501,19 +497,19 @@ public:
bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
return true;
}
-
+
/// insertNoop - Insert a noop into the instruction stream at the specified
/// point.
- virtual void insertNoop(MachineBasicBlock &MBB,
+ virtual void insertNoop(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI) const;
-
-
+
+
/// getNoopForMachoTarget - Return the noop instruction to use for a noop.
virtual void getNoopForMachoTarget(MCInst &NopInst) const {
// Default to just using 'nop' string.
}
-
-
+
+
/// isPredicated - Returns true if the instruction is already predicated.
///
virtual bool isPredicated(const MachineInstr *MI) const {
@@ -571,26 +567,98 @@ public:
virtual unsigned getInlineAsmLength(const char *Str,
const MCAsmInfo &MAI) const;
- /// CreateTargetHazardRecognizer - Allocate and return a hazard recognizer
- /// to use for this target when scheduling the machine instructions after
+ /// CreateTargetHazardRecognizer - Allocate and return a hazard recognizer to
+ /// use for this target when scheduling the machine instructions before
/// register allocation.
virtual ScheduleHazardRecognizer*
- CreateTargetPostRAHazardRecognizer(const InstrItineraryData&) const = 0;
+ CreateTargetHazardRecognizer(const TargetMachine *TM,
+ const ScheduleDAG *DAG) const = 0;
+
+ /// CreateTargetPostRAHazardRecognizer - Allocate and return a hazard
+ /// recognizer to use for this target when scheduling the machine instructions
+ /// after register allocation.
+ virtual ScheduleHazardRecognizer*
+ CreateTargetPostRAHazardRecognizer(const InstrItineraryData*,
+ const ScheduleDAG *DAG) const = 0;
/// AnalyzeCompare - For a comparison instruction, return the source register
/// in SrcReg and the value it compares against in CmpValue. Return true if
/// the comparison instruction can be analyzed.
virtual bool AnalyzeCompare(const MachineInstr *MI,
- unsigned &SrcReg, int &CmpValue) const {
+ unsigned &SrcReg, int &Mask, int &Value) const {
+ return false;
+ }
+
+ /// OptimizeCompareInstr - See if the comparison instruction can be converted
+ /// into something more efficient. E.g., on ARM most instructions can set the
+ /// flags register, obviating the need for a separate CMP.
+ virtual bool OptimizeCompareInstr(MachineInstr *CmpInstr,
+ unsigned SrcReg, int Mask, int Value,
+ const MachineRegisterInfo *MRI) const {
return false;
}
- /// ConvertToSetZeroFlag - Convert the instruction to set the zero flag so
- /// that we can remove a "comparison with zero".
- virtual bool ConvertToSetZeroFlag(MachineInstr *Instr,
- MachineInstr *CmpInstr) const {
+ /// FoldImmediate - 'Reg' is known to be defined by a move immediate
+ /// instruction, try to fold the immediate into the use instruction.
+ virtual bool FoldImmediate(MachineInstr *UseMI, MachineInstr *DefMI,
+ unsigned Reg, MachineRegisterInfo *MRI) const {
return false;
}
+
+ /// getNumMicroOps - Return the number of u-operations the given machine
+ /// instruction will be decoded to on the target cpu.
+ virtual unsigned getNumMicroOps(const InstrItineraryData *ItinData,
+ const MachineInstr *MI) const;
+
+ /// isZeroCost - Return true for pseudo instructions that don't consume any
+ /// machine resources in their current form. These are common cases that the
+ /// scheduler should consider free, rather than conservatively handling them
+ /// as instructions with no itinerary.
+ bool isZeroCost(unsigned Opcode) const {
+ return Opcode <= TargetOpcode::COPY;
+ }
+
+ /// getOperandLatency - Compute and return the use operand latency of a given
+ /// pair of def and use.
+ /// In most cases, the static scheduling itinerary was enough to determine the
+ /// operand latency. But it may not be possible for instructions with variable
+ /// number of defs / uses.
+ virtual int getOperandLatency(const InstrItineraryData *ItinData,
+ const MachineInstr *DefMI, unsigned DefIdx,
+ const MachineInstr *UseMI, unsigned UseIdx) const;
+
+ virtual int getOperandLatency(const InstrItineraryData *ItinData,
+ SDNode *DefNode, unsigned DefIdx,
+ SDNode *UseNode, unsigned UseIdx) const;
+
+ /// getInstrLatency - Compute the instruction latency of a given instruction.
+ /// If the instruction has higher cost when predicated, it's returned via
+ /// PredCost.
+ virtual int getInstrLatency(const InstrItineraryData *ItinData,
+ const MachineInstr *MI,
+ unsigned *PredCost = 0) const;
+
+ virtual int getInstrLatency(const InstrItineraryData *ItinData,
+ SDNode *Node) const;
+
+ /// hasHighOperandLatency - Compute operand latency between a def of 'Reg'
+ /// and an use in the current loop, return true if the target considered
+ /// it 'high'. This is used by optimization passes such as machine LICM to
+ /// determine whether it makes sense to hoist an instruction out even in
+ /// high register pressure situation.
+ virtual
+ bool hasHighOperandLatency(const InstrItineraryData *ItinData,
+ const MachineRegisterInfo *MRI,
+ const MachineInstr *DefMI, unsigned DefIdx,
+ const MachineInstr *UseMI, unsigned UseIdx) const {
+ return false;
+ }
+
+ /// hasLowDefLatency - Compute operand latency of a def of 'Reg', return true
+ /// if the target considered it 'low'.
+ virtual
+ bool hasLowDefLatency(const InstrItineraryData *ItinData,
+ const MachineInstr *DefMI, unsigned DefIdx) const;
};
/// TargetInstrInfoImpl - This is the default implementation of
@@ -620,13 +688,20 @@ public:
virtual MachineInstr *duplicate(MachineInstr *Orig,
MachineFunction &MF) const;
virtual bool produceSameValue(const MachineInstr *MI0,
- const MachineInstr *MI1) const;
+ const MachineInstr *MI1,
+ const MachineRegisterInfo *MRI) const;
virtual bool isSchedulingBoundary(const MachineInstr *MI,
const MachineBasicBlock *MBB,
const MachineFunction &MF) const;
+ bool usePreRAHazardRecognizer() const;
+
+ virtual ScheduleHazardRecognizer *
+ CreateTargetHazardRecognizer(const TargetMachine*, const ScheduleDAG*) const;
+
virtual ScheduleHazardRecognizer *
- CreateTargetPostRAHazardRecognizer(const InstrItineraryData&) const;
+ CreateTargetPostRAHazardRecognizer(const InstrItineraryData*,
+ const ScheduleDAG*) const;
};
} // End llvm namespace
diff --git a/include/llvm/Target/TargetInstrItineraries.h b/include/llvm/Target/TargetInstrItineraries.h
index 39648c2..a95b70f 100644
--- a/include/llvm/Target/TargetInstrItineraries.h
+++ b/include/llvm/Target/TargetInstrItineraries.h
@@ -95,6 +95,7 @@ struct InstrStage {
/// operands are read and written.
///
struct InstrItinerary {
+ unsigned NumMicroOps; ///< # of micro-ops, 0 means it's variable
unsigned FirstStage; ///< Index of first stage in itinerary
unsigned LastStage; ///< Index of last + 1 stage in itinerary
unsigned FirstOperandCycle; ///< Index of first operand rd/wr
@@ -110,38 +111,42 @@ class InstrItineraryData {
public:
const InstrStage *Stages; ///< Array of stages selected
const unsigned *OperandCycles; ///< Array of operand cycles selected
- const InstrItinerary *Itineratries; ///< Array of itineraries selected
+ const unsigned *Forwardings; ///< Array of pipeline forwarding pathes
+ const InstrItinerary *Itineraries; ///< Array of itineraries selected
+ unsigned IssueWidth; ///< Max issue per cycle. 0=Unknown.
/// Ctors.
///
- InstrItineraryData() : Stages(0), OperandCycles(0), Itineratries(0) {}
+ InstrItineraryData() : Stages(0), OperandCycles(0), Forwardings(0),
+ Itineraries(0), IssueWidth(0) {}
+
InstrItineraryData(const InstrStage *S, const unsigned *OS,
- const InstrItinerary *I)
- : Stages(S), OperandCycles(OS), Itineratries(I) {}
-
+ const unsigned *F, const InstrItinerary *I)
+ : Stages(S), OperandCycles(OS), Forwardings(F), Itineraries(I) {}
+
/// isEmpty - Returns true if there are no itineraries.
///
- bool isEmpty() const { return Itineratries == 0; }
+ bool isEmpty() const { return Itineraries == 0; }
/// isEndMarker - Returns true if the index is for the end marker
/// itinerary.
///
bool isEndMarker(unsigned ItinClassIndx) const {
- return ((Itineratries[ItinClassIndx].FirstStage == ~0U) &&
- (Itineratries[ItinClassIndx].LastStage == ~0U));
+ return ((Itineraries[ItinClassIndx].FirstStage == ~0U) &&
+ (Itineraries[ItinClassIndx].LastStage == ~0U));
}
/// beginStage - Return the first stage of the itinerary.
- ///
+ ///
const InstrStage *beginStage(unsigned ItinClassIndx) const {
- unsigned StageIdx = Itineratries[ItinClassIndx].FirstStage;
+ unsigned StageIdx = Itineraries[ItinClassIndx].FirstStage;
return Stages + StageIdx;
}
/// endStage - Return the last+1 stage of the itinerary.
- ///
+ ///
const InstrStage *endStage(unsigned ItinClassIndx) const {
- unsigned StageIdx = Itineratries[ItinClassIndx].LastStage;
+ unsigned StageIdx = Itineraries[ItinClassIndx].LastStage;
return Stages + StageIdx;
}
@@ -173,13 +178,68 @@ public:
if (isEmpty())
return -1;
- unsigned FirstIdx = Itineratries[ItinClassIndx].FirstOperandCycle;
- unsigned LastIdx = Itineratries[ItinClassIndx].LastOperandCycle;
+ unsigned FirstIdx = Itineraries[ItinClassIndx].FirstOperandCycle;
+ unsigned LastIdx = Itineraries[ItinClassIndx].LastOperandCycle;
if ((FirstIdx + OperandIdx) >= LastIdx)
return -1;
return (int)OperandCycles[FirstIdx + OperandIdx];
}
+
+ /// hasPipelineForwarding - Return true if there is a pipeline forwarding
+ /// between instructions of itinerary classes DefClass and UseClasses so that
+ /// value produced by an instruction of itinerary class DefClass, operand
+ /// index DefIdx can be bypassed when it's read by an instruction of
+ /// itinerary class UseClass, operand index UseIdx.
+ bool hasPipelineForwarding(unsigned DefClass, unsigned DefIdx,
+ unsigned UseClass, unsigned UseIdx) const {
+ unsigned FirstDefIdx = Itineraries[DefClass].FirstOperandCycle;
+ unsigned LastDefIdx = Itineraries[DefClass].LastOperandCycle;
+ if ((FirstDefIdx + DefIdx) >= LastDefIdx)
+ return false;
+ if (Forwardings[FirstDefIdx + DefIdx] == 0)
+ return false;
+
+ unsigned FirstUseIdx = Itineraries[UseClass].FirstOperandCycle;
+ unsigned LastUseIdx = Itineraries[UseClass].LastOperandCycle;
+ if ((FirstUseIdx + UseIdx) >= LastUseIdx)
+ return false;
+
+ return Forwardings[FirstDefIdx + DefIdx] ==
+ Forwardings[FirstUseIdx + UseIdx];
+ }
+
+ /// getOperandLatency - Compute and return the use operand latency of a given
+ /// itinerary class and operand index if the value is produced by an
+ /// instruction of the specified itinerary class and def operand index.
+ int getOperandLatency(unsigned DefClass, unsigned DefIdx,
+ unsigned UseClass, unsigned UseIdx) const {
+ if (isEmpty())
+ return -1;
+
+ int DefCycle = getOperandCycle(DefClass, DefIdx);
+ if (DefCycle == -1)
+ return -1;
+
+ int UseCycle = getOperandCycle(UseClass, UseIdx);
+ if (UseCycle == -1)
+ return -1;
+
+ UseCycle = DefCycle - UseCycle + 1;
+ if (UseCycle > 0 &&
+ hasPipelineForwarding(DefClass, DefIdx, UseClass, UseIdx))
+ // FIXME: This assumes one cycle benefit for every pipeline forwarding.
+ --UseCycle;
+ return UseCycle;
+ }
+
+ /// isMicroCoded - Return true if the instructions in the given class decode
+ /// to more than one micro-ops.
+ bool isMicroCoded(unsigned ItinClassIndx) const {
+ if (isEmpty())
+ return false;
+ return Itineraries[ItinClassIndx].NumMicroOps != 1;
+ }
};
diff --git a/include/llvm/Target/TargetJITInfo.h b/include/llvm/Target/TargetJITInfo.h
index 7208a8d..b198eb6 100644
--- a/include/llvm/Target/TargetJITInfo.h
+++ b/include/llvm/Target/TargetJITInfo.h
@@ -19,7 +19,7 @@
#include <cassert>
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
namespace llvm {
class Function;
diff --git a/include/llvm/Target/TargetLibraryInfo.h b/include/llvm/Target/TargetLibraryInfo.h
new file mode 100644
index 0000000..bdd214b
--- /dev/null
+++ b/include/llvm/Target/TargetLibraryInfo.h
@@ -0,0 +1,66 @@
+//===-- llvm/Target/TargetLibraryInfo.h - Library information ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TARGET_TARGETLIBRARYINFO_H
+#define LLVM_TARGET_TARGETLIBRARYINFO_H
+
+#include "llvm/Pass.h"
+
+namespace llvm {
+ class Triple;
+
+ namespace LibFunc {
+ enum Func {
+ /// void *memset(void *b, int c, size_t len);
+ memset,
+
+ // void *memcpy(void *s1, const void *s2, size_t n);
+ memcpy,
+
+ /// void memset_pattern16(void *b, const void *pattern16, size_t len);
+ memset_pattern16,
+
+ NumLibFuncs
+ };
+ }
+
+/// TargetLibraryInfo - This immutable pass captures information about what
+/// library functions are available for the current target, and allows a
+/// frontend to disable optimizations through -fno-builtin etc.
+class TargetLibraryInfo : public ImmutablePass {
+ unsigned char AvailableArray[(LibFunc::NumLibFuncs+7)/8];
+public:
+ static char ID;
+ TargetLibraryInfo();
+ TargetLibraryInfo(const Triple &T);
+
+ /// has - This function is used by optimizations that want to match on or form
+ /// a given library function.
+ bool has(LibFunc::Func F) const {
+ return (AvailableArray[F/8] & (1 << (F&7))) != 0;
+ }
+
+ /// setUnavailable - this can be used by whatever sets up TargetLibraryInfo to
+ /// ban use of specific library functions.
+ void setUnavailable(LibFunc::Func F) {
+ AvailableArray[F/8] &= ~(1 << (F&7));
+ }
+
+ void setAvailable(LibFunc::Func F) {
+ AvailableArray[F/8] |= 1 << (F&7);
+ }
+
+ /// disableAllFunctions - This disables all builtins, which is used for
+ /// options like -fno-builtin.
+ void disableAllFunctions();
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/include/llvm/Target/TargetLowering.h b/include/llvm/Target/TargetLowering.h
index 29de994..5141b7b 100644
--- a/include/llvm/Target/TargetLowering.h
+++ b/include/llvm/Target/TargetLowering.h
@@ -25,13 +25,9 @@
#include "llvm/CallingConv.h"
#include "llvm/InlineAsm.h"
#include "llvm/Attributes.h"
+#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
#include "llvm/CodeGen/RuntimeLibcalls.h"
-#include "llvm/ADT/APFloat.h"
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/SmallSet.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/DebugLoc.h"
#include "llvm/Target/TargetCallingConv.h"
#include "llvm/Target/TargetMachine.h"
@@ -41,10 +37,12 @@
namespace llvm {
class AllocaInst;
+ class APFloat;
class CallInst;
class Function;
class FastISel;
class FunctionLoweringInfo;
+ class ImmutableCallSite;
class MachineBasicBlock;
class MachineFunction;
class MachineFrameInfo;
@@ -55,6 +53,7 @@ namespace llvm {
class SDNode;
class SDValue;
class SelectionDAG;
+ template<typename T> class SmallVectorImpl;
class TargetData;
class TargetMachine;
class TargetRegisterClass;
@@ -126,6 +125,10 @@ public:
/// srl/add/sra.
bool isPow2DivCheap() const { return Pow2DivIsCheap; }
+ /// isJumpExpensive() - Return true if Flow Control is an expensive operation
+ /// that should be avoided.
+ bool isJumpExpensive() const { return JumpIsExpensive; }
+
/// getSetCCResultType - Return the ValueType of the result of SETCC
/// operations. Also used to obtain the target's preferred type for
/// the condition operand of SELECT and BRCOND nodes. In the case of
@@ -203,13 +206,6 @@ public:
return VT.isSimple() && RegClassForVT[VT.getSimpleVT().SimpleTy] != 0;
}
- /// isTypeSynthesizable - Return true if it's OK for the compiler to create
- /// new operations of this type. All Legal types are synthesizable except
- /// MMX vector types on X86. Non-Legal types are not synthesizable.
- bool isTypeSynthesizable(EVT VT) const {
- return isTypeLegal(VT) && Synthesizable[VT.getSimpleVT().SimpleTy];
- }
-
class ValueTypeActionImpl {
/// ValueTypeActions - For each value type, keep a LegalizeAction enum
/// that indicates how instruction selection should deal with the type.
@@ -441,7 +437,7 @@ public:
/// for it.
LegalizeAction getLoadExtAction(unsigned ExtType, EVT VT) const {
assert(ExtType < ISD::LAST_LOADEXT_TYPE &&
- (unsigned)VT.getSimpleVT().SimpleTy < MVT::LAST_VALUETYPE &&
+ VT.getSimpleVT() < MVT::LAST_VALUETYPE &&
"Table isn't big enough!");
return (LegalizeAction)LoadExtActions[VT.getSimpleVT().SimpleTy][ExtType];
}
@@ -459,8 +455,8 @@ public:
/// to be expanded to some other code sequence, or the target has a custom
/// expander for it.
LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT) const {
- assert((unsigned)ValVT.getSimpleVT().SimpleTy < MVT::LAST_VALUETYPE &&
- (unsigned)MemVT.getSimpleVT().SimpleTy < MVT::LAST_VALUETYPE &&
+ assert(ValVT.getSimpleVT() < MVT::LAST_VALUETYPE &&
+ MemVT.getSimpleVT() < MVT::LAST_VALUETYPE &&
"Table isn't big enough!");
return (LegalizeAction)TruncStoreActions[ValVT.getSimpleVT().SimpleTy]
[MemVT.getSimpleVT().SimpleTy];
@@ -480,8 +476,8 @@ public:
/// for it.
LegalizeAction
getIndexedLoadAction(unsigned IdxMode, EVT VT) const {
- assert( IdxMode < ISD::LAST_INDEXED_MODE &&
- ((unsigned)VT.getSimpleVT().SimpleTy) < MVT::LAST_VALUETYPE &&
+ assert(IdxMode < ISD::LAST_INDEXED_MODE &&
+ VT.getSimpleVT() < MVT::LAST_VALUETYPE &&
"Table isn't big enough!");
unsigned Ty = (unsigned)VT.getSimpleVT().SimpleTy;
return (LegalizeAction)((IndexedModeActions[Ty][IdxMode] & 0xf0) >> 4);
@@ -501,8 +497,8 @@ public:
/// for it.
LegalizeAction
getIndexedStoreAction(unsigned IdxMode, EVT VT) const {
- assert( IdxMode < ISD::LAST_INDEXED_MODE &&
- ((unsigned)VT.getSimpleVT().SimpleTy) < MVT::LAST_VALUETYPE &&
+ assert(IdxMode < ISD::LAST_INDEXED_MODE &&
+ VT.getSimpleVT() < MVT::LAST_VALUETYPE &&
"Table isn't big enough!");
unsigned Ty = (unsigned)VT.getSimpleVT().SimpleTy;
return (LegalizeAction)(IndexedModeActions[Ty][IdxMode] & 0x0f);
@@ -646,21 +642,30 @@ public:
/// This function returns the maximum number of store operations permitted
/// to replace a call to llvm.memset. The value is set by the target at the
- /// performance threshold for such a replacement.
+ /// performance threshold for such a replacement. If OptSize is true,
+ /// return the limit for functions that have OptSize attribute.
/// @brief Get maximum # of store operations permitted for llvm.memset
- unsigned getMaxStoresPerMemset() const { return maxStoresPerMemset; }
+ unsigned getMaxStoresPerMemset(bool OptSize) const {
+ return OptSize ? maxStoresPerMemsetOptSize : maxStoresPerMemset;
+ }
/// This function returns the maximum number of store operations permitted
/// to replace a call to llvm.memcpy. The value is set by the target at the
- /// performance threshold for such a replacement.
+ /// performance threshold for such a replacement. If OptSize is true,
+ /// return the limit for functions that have OptSize attribute.
/// @brief Get maximum # of store operations permitted for llvm.memcpy
- unsigned getMaxStoresPerMemcpy() const { return maxStoresPerMemcpy; }
+ unsigned getMaxStoresPerMemcpy(bool OptSize) const {
+ return OptSize ? maxStoresPerMemcpyOptSize : maxStoresPerMemcpy;
+ }
/// This function returns the maximum number of store operations permitted
/// to replace a call to llvm.memmove. The value is set by the target at the
- /// performance threshold for such a replacement.
+ /// performance threshold for such a replacement. If OptSize is true,
+ /// return the limit for functions that have OptSize attribute.
/// @brief Get maximum # of store operations permitted for llvm.memmove
- unsigned getMaxStoresPerMemmove() const { return maxStoresPerMemmove; }
+ unsigned getMaxStoresPerMemmove(bool OptSize) const {
+ return OptSize ? maxStoresPerMemmoveOptSize : maxStoresPerMemmove;
+ }
/// This function returns true if the target allows unaligned memory accesses.
/// of the specified type. This is used, for example, in situations where an
@@ -958,6 +963,13 @@ public:
return isTypeLegal(VT);
}
+ /// isDesirableToPromoteOp - Return true if it is profitable for dag combiner
+ /// to transform a floating point op of specified opcode to a equivalent op of
+ /// an integer type. e.g. f32 load -> i32 load can be profitable on ARM.
+ virtual bool isDesirableToTransformToIntegerOp(unsigned Opc, EVT VT) const {
+ return false;
+ }
+
/// IsDesirableToPromoteOp - This method query the target whether it is
/// beneficial for dag combiner to promote the specified node. If true, it
/// should return the desired promotion type by reference.
@@ -1021,7 +1033,16 @@ protected:
/// SelectIsExpensive - Tells the code generator not to expand operations
/// into sequences that use the select operations if possible.
- void setSelectIsExpensive() { SelectIsExpensive = true; }
+ void setSelectIsExpensive(bool isExpensive = true) {
+ SelectIsExpensive = isExpensive;
+ }
+
+ /// JumpIsExpensive - Tells the code generator not to expand sequence of
+ /// operations into a seperate sequences that increases the amount of
+ /// flow control.
+ void setJumpIsExpensive(bool isExpensive = true) {
+ JumpIsExpensive = isExpensive;
+ }
/// setIntDivIsCheap - Tells the code generator that integer divide is
/// expensive, and if possible, should be replaced by an alternate sequence
@@ -1036,12 +1057,10 @@ protected:
/// addRegisterClass - Add the specified register class as an available
/// regclass for the specified value type. This indicates the selector can
/// handle values of that class natively.
- void addRegisterClass(EVT VT, TargetRegisterClass *RC,
- bool isSynthesizable = true) {
+ void addRegisterClass(EVT VT, TargetRegisterClass *RC) {
assert((unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegClassForVT));
AvailableRegClasses.push_back(std::make_pair(VT, RC));
RegClassForVT[VT.getSimpleVT().SimpleTy] = RC;
- Synthesizable[VT.getSimpleVT().SimpleTy] = isSynthesizable;
}
/// findRepresentativeClass - Return the largest legal super-reg register class
@@ -1065,8 +1084,7 @@ protected:
/// not work with the specified type and indicate what to do about it.
void setLoadExtAction(unsigned ExtType, MVT VT,
LegalizeAction Action) {
- assert(ExtType < ISD::LAST_LOADEXT_TYPE &&
- (unsigned)VT.SimpleTy < MVT::LAST_VALUETYPE &&
+ assert(ExtType < ISD::LAST_LOADEXT_TYPE && VT < MVT::LAST_VALUETYPE &&
"Table isn't big enough!");
LoadExtActions[VT.SimpleTy][ExtType] = (uint8_t)Action;
}
@@ -1075,8 +1093,7 @@ protected:
/// not work with the specified type and indicate what to do about it.
void setTruncStoreAction(MVT ValVT, MVT MemVT,
LegalizeAction Action) {
- assert((unsigned)ValVT.SimpleTy < MVT::LAST_VALUETYPE &&
- (unsigned)MemVT.SimpleTy < MVT::LAST_VALUETYPE &&
+ assert(ValVT < MVT::LAST_VALUETYPE && MemVT < MVT::LAST_VALUETYPE &&
"Table isn't big enough!");
TruncStoreActions[ValVT.SimpleTy][MemVT.SimpleTy] = (uint8_t)Action;
}
@@ -1087,10 +1104,8 @@ protected:
/// TargetLowering.cpp
void setIndexedLoadAction(unsigned IdxMode, MVT VT,
LegalizeAction Action) {
- assert((unsigned)VT.SimpleTy < MVT::LAST_VALUETYPE &&
- IdxMode < ISD::LAST_INDEXED_MODE &&
- (unsigned)Action < 0xf &&
- "Table isn't big enough!");
+ assert(VT < MVT::LAST_VALUETYPE && IdxMode < ISD::LAST_INDEXED_MODE &&
+ (unsigned)Action < 0xf && "Table isn't big enough!");
// Load action are kept in the upper half.
IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0xf0;
IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action) <<4;
@@ -1102,10 +1117,8 @@ protected:
/// TargetLowering.cpp
void setIndexedStoreAction(unsigned IdxMode, MVT VT,
LegalizeAction Action) {
- assert((unsigned)VT.SimpleTy < MVT::LAST_VALUETYPE &&
- IdxMode < ISD::LAST_INDEXED_MODE &&
- (unsigned)Action < 0xf &&
- "Table isn't big enough!");
+ assert(VT < MVT::LAST_VALUETYPE && IdxMode < ISD::LAST_INDEXED_MODE &&
+ (unsigned)Action < 0xf && "Table isn't big enough!");
// Store action are kept in the lower half.
IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0x0f;
IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action);
@@ -1115,7 +1128,7 @@ protected:
/// supported on the target and indicate what to do about it.
void setCondCodeAction(ISD::CondCode CC, MVT VT,
LegalizeAction Action) {
- assert((unsigned)VT.SimpleTy < MVT::LAST_VALUETYPE &&
+ assert(VT < MVT::LAST_VALUETYPE &&
(unsigned)CC < array_lengthof(CondCodeActions) &&
"Table isn't big enough!");
CondCodeActions[(unsigned)CC] &= ~(uint64_t(3UL) << VT.SimpleTy*2);
@@ -1261,6 +1274,13 @@ public:
return SDValue(); // this is here to silence compiler errors
}
+ /// isUsedByReturnOnly - Return true if result of the specified node is used
+ /// by a return node only. This is used to determine whether it is possible
+ /// to codegen a libcall as tail call at legalization time.
+ virtual bool isUsedByReturnOnly(SDNode *N) const {
+ return false;
+ }
+
/// LowerOperationWrapper - This callback is invoked by the type legalizer
/// to legalize nodes with an illegal operand type but legal result types.
/// It replaces the LowerOperation callback in the type Legalizer.
@@ -1328,6 +1348,22 @@ public:
C_Unknown // Unsupported constraint.
};
+ enum ConstraintWeight {
+ // Generic weights.
+ CW_Invalid = -1, // No match.
+ CW_Okay = 0, // Acceptable.
+ CW_Good = 1, // Good weight.
+ CW_Better = 2, // Better weight.
+ CW_Best = 3, // Best weight.
+
+ // Well-known weights.
+ CW_SpecificReg = CW_Okay, // Specific register operands.
+ CW_Register = CW_Good, // Register operands.
+ CW_Memory = CW_Better, // Memory operands.
+ CW_Constant = CW_Best, // Constant operand.
+ CW_Default = CW_Okay // Default or don't know type.
+ };
+
/// AsmOperandInfo - This contains information for each constraint that we are
/// lowering.
struct AsmOperandInfo : public InlineAsm::ConstraintInfo {
@@ -1356,12 +1392,41 @@ public:
/// returns the output operand it matches.
unsigned getMatchedOperand() const;
+ /// Copy constructor for copying from an AsmOperandInfo.
+ AsmOperandInfo(const AsmOperandInfo &info)
+ : InlineAsm::ConstraintInfo(info),
+ ConstraintCode(info.ConstraintCode),
+ ConstraintType(info.ConstraintType),
+ CallOperandVal(info.CallOperandVal),
+ ConstraintVT(info.ConstraintVT) {
+ }
+
+ /// Copy constructor for copying from a ConstraintInfo.
AsmOperandInfo(const InlineAsm::ConstraintInfo &info)
: InlineAsm::ConstraintInfo(info),
ConstraintType(TargetLowering::C_Unknown),
CallOperandVal(0), ConstraintVT(MVT::Other) {
}
};
+
+ typedef std::vector<AsmOperandInfo> AsmOperandInfoVector;
+
+ /// ParseConstraints - Split up the constraint string from the inline
+ /// assembly value into the specific constraints and their prefixes,
+ /// and also tie in the associated operand values.
+ /// If this returns an empty vector, and if the constraint string itself
+ /// isn't empty, there was an error parsing.
+ virtual AsmOperandInfoVector ParseConstraints(ImmutableCallSite CS) const;
+
+ /// Examine constraint type and operand type and determine a weight value.
+ /// The operand object must already have been set up with the operand type.
+ virtual ConstraintWeight getMultipleConstraintMatchWeight(
+ AsmOperandInfo &info, int maIndex) const;
+
+ /// Examine constraint string and operand type and determine a weight value.
+ /// The operand object must already have been set up with the operand type.
+ virtual ConstraintWeight getSingleConstraintMatchWeight(
+ AsmOperandInfo &info, const char *constraint) const;
/// ComputeConstraintToUse - Determines the constraint code and constraint
/// type to use for the specific AsmOperandInfo, setting
@@ -1568,6 +1633,11 @@ private:
/// it.
bool Pow2DivIsCheap;
+ /// JumpIsExpensive - Tells the code generator that it shouldn't generate
+ /// extra flow control instructions and should attempt to combine flow
+ /// control instructions via predication.
+ bool JumpIsExpensive;
+
/// UseUnderscoreSetJmp - This target prefers to use _setjmp to implement
/// llvm.setjmp. Defaults to false.
bool UseUnderscoreSetJmp;
@@ -1643,11 +1713,6 @@ private:
/// approximate register pressure.
uint8_t RepRegClassCostForVT[MVT::LAST_VALUETYPE];
- /// Synthesizable indicates whether it is OK for the compiler to create new
- /// operations using this type. All Legal types are Synthesizable except
- /// MMX types on X86. Non-Legal types are not Synthesizable.
- bool Synthesizable[MVT::LAST_VALUETYPE];
-
/// TransformToType - For any value types we are promoting or expanding, this
/// contains the value type that we are changing to. For Expanded types, this
/// contains one step of the expand (e.g. i64 -> i32), even if there are
@@ -1727,6 +1792,10 @@ protected:
/// @brief Specify maximum number of store instructions per memset call.
unsigned maxStoresPerMemset;
+ /// Maximum number of stores operations that may be substituted for the call
+ /// to memset, used for functions with OptSize attribute.
+ unsigned maxStoresPerMemsetOptSize;
+
/// When lowering \@llvm.memcpy this field specifies the maximum number of
/// store operations that may be substituted for a call to memcpy. Targets
/// must set this value based on the cost threshold for that target. Targets
@@ -1739,6 +1808,10 @@ protected:
/// @brief Specify maximum bytes of store instructions per memcpy call.
unsigned maxStoresPerMemcpy;
+ /// Maximum number of store operations that may be substituted for a call
+ /// to memcpy, used for functions with OptSize attribute.
+ unsigned maxStoresPerMemcpyOptSize;
+
/// When lowering \@llvm.memmove this field specifies the maximum number of
/// store instructions that may be substituted for a call to memmove. Targets
/// must set this value based on the cost threshold for that target. Targets
@@ -1750,6 +1823,10 @@ protected:
/// @brief Specify maximum bytes of store instructions per memmove call.
unsigned maxStoresPerMemmove;
+ /// Maximum number of store instructions that may be substituted for a call
+ /// to memmove, used for functions with OpSize attribute.
+ unsigned maxStoresPerMemmoveOptSize;
+
/// This field specifies whether the target can benefit from code placement
/// optimization.
bool benefitFromCodePlacementOpt;
diff --git a/include/llvm/Target/TargetLoweringObjectFile.h b/include/llvm/Target/TargetLoweringObjectFile.h
index 819709f..34bf271 100644
--- a/include/llvm/Target/TargetLoweringObjectFile.h
+++ b/include/llvm/Target/TargetLoweringObjectFile.h
@@ -69,10 +69,6 @@ protected:
/// the section the Language Specific Data Area information is emitted to.
const MCSection *LSDASection;
- /// EHFrameSection - If exception handling is supported by the target, this is
- /// the section the EH Frame is emitted to.
- const MCSection *EHFrameSection;
-
// Dwarf sections for debug info. If a target supports debug info, these must
// be set.
const MCSection *DwarfAbbrevSection;
@@ -92,6 +88,11 @@ protected:
// information for a TLS variable, it'll go here.
const MCSection *TLSExtraDataSection;
+ /// CommDirectiveSupportsAlignment - True if .comm supports alignment. This
+ /// is a hack for as long as we support 10.4 Tiger, whose assembler doesn't
+ /// support alignment on comm.
+ bool CommDirectiveSupportsAlignment;
+
/// SupportsWeakEmptyEHFrame - True if target object file supports a
/// weak_definition of constant 0 for an omitted EH frame.
bool SupportsWeakOmittedEHFrame;
@@ -128,13 +129,17 @@ public:
return SupportsWeakOmittedEHFrame;
}
+ bool getCommDirectiveSupportsAlignment() const {
+ return CommDirectiveSupportsAlignment;
+ }
+
const MCSection *getTextSection() const { return TextSection; }
const MCSection *getDataSection() const { return DataSection; }
const MCSection *getBSSSection() const { return BSSSection; }
const MCSection *getStaticCtorSection() const { return StaticCtorSection; }
const MCSection *getStaticDtorSection() const { return StaticDtorSection; }
const MCSection *getLSDASection() const { return LSDASection; }
- const MCSection *getEHFrameSection() const { return EHFrameSection; }
+ virtual const MCSection *getEHFrameSection() const = 0;
const MCSection *getDwarfAbbrevSection() const { return DwarfAbbrevSection; }
const MCSection *getDwarfInfoSection() const { return DwarfInfoSection; }
const MCSection *getDwarfLineSection() const { return DwarfLineSection; }
diff --git a/include/llvm/Target/TargetMachine.h b/include/llvm/Target/TargetMachine.h
index 42e99e0..030bf5b 100644
--- a/include/llvm/Target/TargetMachine.h
+++ b/include/llvm/Target/TargetMachine.h
@@ -29,7 +29,7 @@ class TargetIntrinsicInfo;
class TargetJITInfo;
class TargetLowering;
class TargetSelectionDAGInfo;
-class TargetFrameInfo;
+class TargetFrameLowering;
class JITCodeEmitter;
class MCContext;
class TargetRegisterInfo;
@@ -98,12 +98,14 @@ protected: // Can only create subclasses.
/// TheTarget - The Target that this machine was created for.
const Target &TheTarget;
-
+
/// AsmInfo - Contains target specific asm information.
///
const MCAsmInfo *AsmInfo;
unsigned MCRelaxAll : 1;
+ unsigned MCNoExecStack : 1;
+ unsigned MCUseLoc : 1;
public:
virtual ~TargetMachine();
@@ -116,16 +118,16 @@ public:
// -- Stack frame information
// -- Selection DAG lowering information
//
- virtual const TargetInstrInfo *getInstrInfo() const { return 0; }
- virtual const TargetFrameInfo *getFrameInfo() const { return 0; }
+ virtual const TargetInstrInfo *getInstrInfo() const { return 0; }
+ virtual const TargetFrameLowering *getFrameLowering() const { return 0; }
virtual const TargetLowering *getTargetLowering() const { return 0; }
virtual const TargetSelectionDAGInfo *getSelectionDAGInfo() const{ return 0; }
- virtual const TargetData *getTargetData() const { return 0; }
-
+ virtual const TargetData *getTargetData() const { return 0; }
+
/// getMCAsmInfo - Return target specific asm information.
///
const MCAsmInfo *getMCAsmInfo() const { return AsmInfo; }
-
+
/// getSubtarget - This method returns a pointer to the specified type of
/// TargetSubtarget. In debug builds, it verifies that the object being
/// returned is of the correct type.
@@ -138,7 +140,7 @@ public:
/// details of graph coloring register allocation removed from it.
///
virtual const TargetRegisterInfo *getRegisterInfo() const { return 0; }
-
+
/// getIntrinsicInfo - If intrinsic information is available, return it. If
/// not, return null.
///
@@ -148,17 +150,17 @@ public:
/// otherwise return null.
///
virtual TargetJITInfo *getJITInfo() { return 0; }
-
+
/// getInstrItineraryData - Returns instruction itinerary data for the target
/// or specific subtarget.
///
- virtual const InstrItineraryData getInstrItineraryData() const {
- return InstrItineraryData();
+ virtual const InstrItineraryData *getInstrItineraryData() const {
+ return 0;
}
/// getELFWriterInfo - If this target supports an ELF writer, return
/// information for it, otherwise return null.
- ///
+ ///
virtual const TargetELFWriterInfo *getELFWriterInfo() const { return 0; }
/// hasMCRelaxAll - Check whether all machine code instructions should be
@@ -169,6 +171,18 @@ public:
/// relaxed.
void setMCRelaxAll(bool Value) { MCRelaxAll = Value; }
+ /// hasMCNoExecStack - Check whether an executable stack is not needed.
+ bool hasMCNoExecStack() const { return MCNoExecStack; }
+
+ /// setMCNoExecStack - Set whether an executabel stack is not needed.
+ void setMCNoExecStack(bool Value) { MCNoExecStack = Value; }
+
+ /// hasMCUseLoc - Check whether we should use dwarf's .loc directive.
+ bool hasMCUseLoc() const { return MCUseLoc; }
+
+ /// setMCUseLoc - Set whether all we should use dwarf's .loc directive.
+ void setMCUseLoc(bool Value) { MCUseLoc = Value; }
+
/// getRelocationModel - Returns the code generation relocation model. The
/// choices are static, PIC, and dynamic-no-pic, and target default.
static Reloc::Model getRelocationModel();
@@ -267,7 +281,7 @@ class LLVMTargetMachine : public TargetMachine {
protected: // Can only create subclasses.
LLVMTargetMachine(const Target &T, const std::string &TargetTriple);
-
+
private:
/// addCommonCodeGenPasses - Add standard LLVM codegen passes used for
/// both emitting to assembly files or machine code output.
@@ -277,9 +291,11 @@ private:
virtual void setCodeModelForJIT();
virtual void setCodeModelForStatic();
-
+
public:
-
+
+ const std::string &getTargetTriple() const { return TargetTriple; }
+
/// addPassesToEmitFile - Add passes to the specified pass manager to get the
/// specified file emitted. Typically this will involve several steps of code
/// generation. If OptLevel is None, the code generator should emit code as
@@ -289,7 +305,7 @@ public:
CodeGenFileType FileType,
CodeGenOpt::Level,
bool DisableVerify = true);
-
+
/// addPassesToEmitMachineCode - Add passes to the specified pass manager to
/// get machine code emitted. This uses a JITCodeEmitter object to handle
/// actually outputting the machine code and resolving things like the address
@@ -310,7 +326,7 @@ public:
MCContext *&Ctx,
CodeGenOpt::Level OptLevel,
bool DisableVerify = true);
-
+
/// Target-Independent Code Generator Pass Configuration Options.
/// addPreISelPasses - This method should add any "last minute" LLVM->LLVM
@@ -347,15 +363,15 @@ public:
virtual bool addPreSched2(PassManagerBase &, CodeGenOpt::Level) {
return false;
}
-
+
/// addPreEmitPass - This pass may be implemented by targets that want to run
/// passes immediately before machine code is emitted. This should return
/// true if -print-machineinstrs should print out the code after the passes.
virtual bool addPreEmitPass(PassManagerBase &, CodeGenOpt::Level) {
return false;
}
-
-
+
+
/// addCodeEmitter - This pass should be overridden by the target to add a
/// code emitter, if supported. If this is not supported, 'true' should be
/// returned.
diff --git a/include/llvm/Target/TargetRegisterInfo.h b/include/llvm/Target/TargetRegisterInfo.h
index 81dec3e..121091c 100644
--- a/include/llvm/Target/TargetRegisterInfo.h
+++ b/include/llvm/Target/TargetRegisterInfo.h
@@ -29,21 +29,21 @@ class MachineFunction;
class MachineMove;
class RegScavenger;
template<class T> class SmallVectorImpl;
+class raw_ostream;
/// TargetRegisterDesc - This record contains all of the information known about
-/// a particular register. The AliasSet field (if not null) contains a pointer
-/// to a Zero terminated array of registers that this register aliases. This is
-/// needed for architectures like X86 which have AL alias AX alias EAX.
-/// Registers that this does not apply to simply should set this to null.
-/// The SubRegs field is a zero terminated array of registers that are
-/// sub-registers of the specific register, e.g. AL, AH are sub-registers of AX.
-/// The SuperRegs field is a zero terminated array of registers that are
+/// a particular register. The Overlaps field contains a pointer to a zero
+/// terminated array of registers that this register aliases, starting with
+/// itself. This is needed for architectures like X86 which have AL alias AX
+/// alias EAX. The SubRegs field is a zero terminated array of registers that
+/// are sub-registers of the specific register, e.g. AL, AH are sub-registers of
+/// AX. The SuperRegs field is a zero terminated array of registers that are
/// super-registers of the specific register, e.g. RAX, EAX, are super-registers
/// of AX.
///
struct TargetRegisterDesc {
const char *Name; // Printable name for the reg (for debugging)
- const unsigned *AliasSet; // Register Alias Set, described above
+ const unsigned *Overlaps; // Overlapping registers, described above
const unsigned *SubRegs; // Sub-register set, described above
const unsigned *SuperRegs; // Super-register set, described above
};
@@ -123,7 +123,7 @@ public:
/// hasType - return true if this TargetRegisterClass has the ValueType vt.
///
bool hasType(EVT vt) const {
- for(int i = 0; VTs[i].getSimpleVT().SimpleTy != MVT::Other; ++i)
+ for(int i = 0; VTs[i] != MVT::Other; ++i)
if (VTs[i] == vt)
return true;
return false;
@@ -137,7 +137,7 @@ public:
vt_iterator vt_end() const {
vt_iterator I = VTs;
- while (I->getSimpleVT().SimpleTy != MVT::Other) ++I;
+ while (*I != MVT::Other) ++I;
return I;
}
@@ -227,9 +227,12 @@ public:
/// cheaper to allocate caller saved registers.
///
/// These methods take a MachineFunction argument, which can be used to tune
- /// the allocatable registers based on the characteristics of the function.
- /// One simple example is that the frame pointer register can be used if
- /// frame-pointer-elimination is performed.
+ /// the allocatable registers based on the characteristics of the function,
+ /// subtarget, or other criteria.
+ ///
+ /// Register allocators should account for the fact that an allocation
+ /// order iterator may return a reserved register and always check
+ /// if the register is allocatable (getAllocatableSet()) before using it.
///
/// By default, these methods return all registers in the class.
///
@@ -292,30 +295,68 @@ protected:
virtual ~TargetRegisterInfo();
public:
- enum { // Define some target independent constants
- /// NoRegister - This physical register is not a real target register. It
- /// is useful as a sentinal.
- NoRegister = 0,
+ // Register numbers can represent physical registers, virtual registers, and
+ // sometimes stack slots. The unsigned values are divided into these ranges:
+ //
+ // 0 Not a register, can be used as a sentinel.
+ // [1;2^30) Physical registers assigned by TableGen.
+ // [2^30;2^31) Stack slots. (Rarely used.)
+ // [2^31;2^32) Virtual registers assigned by MachineRegisterInfo.
+ //
+ // Further sentinels can be allocated from the small negative integers.
+ // DenseMapInfo<unsigned> uses -1u and -2u.
- /// FirstVirtualRegister - This is the first register number that is
- /// considered to be a 'virtual' register, which is part of the SSA
- /// namespace. This must be the same for all targets, which means that each
- /// target is limited to this fixed number of registers.
- FirstVirtualRegister = 16384
- };
+ /// isStackSlot - Sometimes it is useful the be able to store a non-negative
+ /// frame index in a variable that normally holds a register. isStackSlot()
+ /// returns true if Reg is in the range used for stack slots.
+ ///
+ /// Note that isVirtualRegister() and isPhysicalRegister() cannot handle stack
+ /// slots, so if a variable may contains a stack slot, always check
+ /// isStackSlot() first.
+ ///
+ static bool isStackSlot(unsigned Reg) {
+ return int(Reg) >= (1 << 30);
+ }
+
+ /// stackSlot2Index - Compute the frame index from a register value
+ /// representing a stack slot.
+ static int stackSlot2Index(unsigned Reg) {
+ assert(isStackSlot(Reg) && "Not a stack slot");
+ return int(Reg - (1u << 30));
+ }
+
+ /// index2StackSlot - Convert a non-negative frame index to a stack slot
+ /// register value.
+ static unsigned index2StackSlot(int FI) {
+ assert(FI >= 0 && "Cannot hold a negative frame index.");
+ return FI + (1u << 30);
+ }
/// isPhysicalRegister - Return true if the specified register number is in
/// the physical register namespace.
static bool isPhysicalRegister(unsigned Reg) {
- assert(Reg && "this is not a register!");
- return Reg < FirstVirtualRegister;
+ assert(!isStackSlot(Reg) && "Not a register! Check isStackSlot() first.");
+ return int(Reg) > 0;
}
/// isVirtualRegister - Return true if the specified register number is in
/// the virtual register namespace.
static bool isVirtualRegister(unsigned Reg) {
- assert(Reg && "this is not a register!");
- return Reg >= FirstVirtualRegister;
+ assert(!isStackSlot(Reg) && "Not a register! Check isStackSlot() first.");
+ return int(Reg) < 0;
+ }
+
+ /// virtReg2Index - Convert a virtual register number to a 0-based index.
+ /// The first virtual register in a function will get the index 0.
+ static unsigned virtReg2Index(unsigned Reg) {
+ assert(isVirtualRegister(Reg) && "Not a virtual register");
+ return Reg - (1u << 31);
+ }
+
+ /// index2VirtReg - Convert a 0-based index to a virtual register number.
+ /// This is the inverse operation of VirtReg2IndexFunctor below.
+ static unsigned index2VirtReg(unsigned Index) {
+ return Index + (1u << 31);
}
/// getMinimalPhysRegClass - Returns the Register Class of a physical
@@ -348,7 +389,17 @@ public:
/// terminated.
///
const unsigned *getAliasSet(unsigned RegNo) const {
- return get(RegNo).AliasSet;
+ // The Overlaps set always begins with Reg itself.
+ return get(RegNo).Overlaps + 1;
+ }
+
+ /// getOverlaps - Return a list of registers that overlap Reg, including
+ /// itself. This is the same as the alias set except Reg is included in the
+ /// list.
+ /// These are exactly the registers in { x | regsOverlap(x, Reg) }.
+ ///
+ const unsigned *getOverlaps(unsigned RegNo) const {
+ return get(RegNo).Overlaps;
}
/// getSubRegisters - Return the list of registers that are sub-registers of
@@ -574,13 +625,6 @@ public:
// Do nothing.
}
- /// targetHandlesStackFrameRounding - Returns true if the target is
- /// responsible for rounding up the stack frame (probably at emitPrologue
- /// time).
- virtual bool targetHandlesStackFrameRounding() const {
- return false;
- }
-
/// requiresRegisterScavenging - returns true if the target requires (and can
/// make use of) the register scavenger.
virtual bool requiresRegisterScavenging(const MachineFunction &MF) const {
@@ -600,31 +644,6 @@ public:
return false;
}
- /// hasFP - Return true if the specified function should have a dedicated
- /// frame pointer register. For most targets this is true only if the function
- /// has variable sized allocas or if frame pointer elimination is disabled.
- virtual bool hasFP(const MachineFunction &MF) const = 0;
-
- /// hasReservedCallFrame - Under normal circumstances, when a frame pointer is
- /// not required, we reserve argument space for call sites in the function
- /// immediately on entry to the current function. This eliminates the need for
- /// add/sub sp brackets around call sites. Returns true if the call frame is
- /// included as part of the stack frame.
- virtual bool hasReservedCallFrame(const MachineFunction &MF) const {
- return !hasFP(MF);
- }
-
- /// canSimplifyCallFramePseudos - When possible, it's best to simplify the
- /// call frame pseudo ops before doing frame index elimination. This is
- /// possible only when frame index references between the pseudos won't
- /// need adjusting for the call frame adjustments. Normally, that's true
- /// if the function has a reserved call frame or a frame pointer. Some
- /// targets (Thumb2, for example) may have more complicated criteria,
- /// however, and can override this behavior.
- virtual bool canSimplifyCallFramePseudos(const MachineFunction &MF) const {
- return hasReservedCallFrame(MF) || hasFP(MF);
- }
-
/// hasReservedSpillSlot - Return true if target has reserved a spill slot in
/// the stack frame of the given function for the specified register. e.g. On
/// x86, if the frame register is required, the first fixed stack object is
@@ -644,7 +663,7 @@ public:
}
/// getFrameIndexInstrOffset - Get the offset from the referenced frame
- /// index in the instruction, if the is one.
+ /// index in the instruction, if there is one.
virtual int64_t getFrameIndexInstrOffset(const MachineInstr *MI,
int Idx) const {
return 0;
@@ -660,7 +679,7 @@ public:
/// materializeFrameBaseRegister - Insert defining instruction(s) for
/// BaseReg to be a pointer to FrameIdx before insertion point I.
- virtual void materializeFrameBaseRegister(MachineBasicBlock::iterator I,
+ virtual void materializeFrameBaseRegister(MachineBasicBlock *MBB,
unsigned BaseReg, int FrameIdx,
int64_t Offset) const {
assert(0 && "materializeFrameBaseRegister does not exist on this target");
@@ -707,21 +726,6 @@ public:
assert(0 && "Call Frame Pseudo Instructions do not exist on this target!");
}
- /// processFunctionBeforeCalleeSavedScan - This method is called immediately
- /// before PrologEpilogInserter scans the physical registers used to determine
- /// what callee saved registers should be spilled. This method is optional.
- virtual void processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
- RegScavenger *RS = NULL) const {
-
- }
-
- /// processFunctionBeforeFrameFinalized - This method is called immediately
- /// before the specified function's frame layout (MF.getFrameInfo()) is
- /// finalized. Once the frame is finalized, MO_FrameIndex operands are
- /// replaced with direct constants. This method is optional.
- ///
- virtual void processFunctionBeforeFrameFinalized(MachineFunction &MF) const {
- }
/// saveScavengerRegister - Spill the register so it can be used by the
/// register scavenger. Return true if the register was spilled, false
@@ -746,12 +750,6 @@ public:
virtual void eliminateFrameIndex(MachineBasicBlock::iterator MI,
int SPAdj, RegScavenger *RS=NULL) const = 0;
- /// emitProlog/emitEpilog - These methods insert prolog and epilog code into
- /// the function.
- virtual void emitPrologue(MachineFunction &MF) const = 0;
- virtual void emitEpilogue(MachineFunction &MF,
- MachineBasicBlock &MBB) const = 0;
-
//===--------------------------------------------------------------------===//
/// Debug information queries.
@@ -765,37 +763,16 @@ public:
/// for values allocated in the current stack frame.
virtual unsigned getFrameRegister(const MachineFunction &MF) const = 0;
- /// getFrameIndexOffset - Returns the displacement from the frame register to
- /// the stack frame of the specified index.
- virtual int getFrameIndexOffset(const MachineFunction &MF, int FI) const;
-
- /// getFrameIndexReference - This method should return the base register
- /// and offset used to reference a frame index location. The offset is
- /// returned directly, and the base register is returned via FrameReg.
- virtual int getFrameIndexReference(const MachineFunction &MF, int FI,
- unsigned &FrameReg) const {
- // By default, assume all frame indices are referenced via whatever
- // getFrameRegister() says. The target can override this if it's doing
- // something different.
- FrameReg = getFrameRegister(MF);
- return getFrameIndexOffset(MF, FI);
- }
-
/// getRARegister - This method should return the register where the return
/// address can be found.
virtual unsigned getRARegister() const = 0;
-
- /// getInitialFrameState - Returns a list of machine moves that are assumed
- /// on entry to all functions. Note that LabelID is ignored (assumed to be
- /// the beginning of the function.)
- virtual void getInitialFrameState(std::vector<MachineMove> &Moves) const;
};
// This is useful when building IndexedMaps keyed on virtual registers
struct VirtReg2IndexFunctor : public std::unary_function<unsigned, unsigned> {
unsigned operator()(unsigned Reg) const {
- return Reg - TargetRegisterInfo::FirstVirtualRegister;
+ return TargetRegisterInfo::virtReg2Index(Reg);
}
};
@@ -804,6 +781,33 @@ struct VirtReg2IndexFunctor : public std::unary_function<unsigned, unsigned> {
const TargetRegisterClass *getCommonSubClass(const TargetRegisterClass *A,
const TargetRegisterClass *B);
+/// PrintReg - Helper class for printing registers on a raw_ostream.
+/// Prints virtual and physical registers with or without a TRI instance.
+///
+/// The format is:
+/// %noreg - NoRegister
+/// %vreg5 - a virtual register.
+/// %vreg5:sub_8bit - a virtual register with sub-register index (with TRI).
+/// %EAX - a physical register
+/// %physreg17 - a physical register when no TRI instance given.
+///
+/// Usage: OS << PrintReg(Reg, TRI) << '\n';
+///
+class PrintReg {
+ const TargetRegisterInfo *TRI;
+ unsigned Reg;
+ unsigned SubIdx;
+public:
+ PrintReg(unsigned reg, const TargetRegisterInfo *tri = 0, unsigned subidx = 0)
+ : TRI(tri), Reg(reg), SubIdx(subidx) {}
+ void print(raw_ostream&) const;
+};
+
+static inline raw_ostream &operator<<(raw_ostream &OS, const PrintReg &PR) {
+ PR.print(OS);
+ return OS;
+}
+
} // End llvm namespace
#endif
diff --git a/include/llvm/Target/TargetRegistry.h b/include/llvm/Target/TargetRegistry.h
index 2817b0c..f851ad0 100644
--- a/include/llvm/Target/TargetRegistry.h
+++ b/include/llvm/Target/TargetRegistry.h
@@ -39,6 +39,15 @@ namespace llvm {
class TargetAsmParser;
class TargetMachine;
class raw_ostream;
+ class formatted_raw_ostream;
+
+ MCStreamer *createAsmStreamer(MCContext &Ctx, formatted_raw_ostream &OS,
+ bool isVerboseAsm,
+ bool useLoc,
+ MCInstPrinter *InstPrint,
+ MCCodeEmitter *CE,
+ TargetAsmBackend *TAB,
+ bool ShowInst);
/// Target - Wrapper for Target specific information.
///
@@ -80,7 +89,16 @@ namespace llvm {
TargetAsmBackend &TAB,
raw_ostream &_OS,
MCCodeEmitter *_Emitter,
- bool RelaxAll);
+ bool RelaxAll,
+ bool NoExecStack);
+ typedef MCStreamer *(*AsmStreamerCtorTy)(MCContext &Ctx,
+ formatted_raw_ostream &OS,
+ bool isVerboseAsm,
+ bool useLoc,
+ MCInstPrinter *InstPrint,
+ MCCodeEmitter *CE,
+ TargetAsmBackend *TAB,
+ bool ShowInst);
private:
/// Next - The next registered target in the linked list, maintained by the
@@ -138,7 +156,13 @@ namespace llvm {
/// ObjectStreamer, if registered.
ObjectStreamerCtorTy ObjectStreamerCtorFn;
+ /// AsmStreamerCtorFn - Construction function for this target's
+ /// AsmStreamer, if registered (default = llvm::createAsmStreamer).
+ AsmStreamerCtorTy AsmStreamerCtorFn;
+
public:
+ Target() : AsmStreamerCtorFn(llvm::createAsmStreamer) {}
+
/// @name Target Information
/// @{
@@ -185,6 +209,9 @@ namespace llvm {
/// hasObjectStreamer - Check if this target supports streaming to files.
bool hasObjectStreamer() const { return ObjectStreamerCtorFn != 0; }
+ /// hasAsmStreamer - Check if this target supports streaming to files.
+ bool hasAsmStreamer() const { return AsmStreamerCtorFn != 0; }
+
/// @}
/// @name Feature Constructors
/// @{
@@ -282,14 +309,31 @@ namespace llvm {
/// \arg _OS - The stream object.
/// \arg _Emitter - The target independent assembler object.Takes ownership.
/// \arg RelaxAll - Relax all fixups?
+ /// \arg NoExecStack - Mark file as not needing a executable stack.
MCStreamer *createObjectStreamer(const std::string &TT, MCContext &Ctx,
TargetAsmBackend &TAB,
raw_ostream &_OS,
MCCodeEmitter *_Emitter,
- bool RelaxAll) const {
+ bool RelaxAll,
+ bool NoExecStack) const {
if (!ObjectStreamerCtorFn)
return 0;
- return ObjectStreamerCtorFn(*this, TT, Ctx, TAB, _OS, _Emitter, RelaxAll);
+ return ObjectStreamerCtorFn(*this, TT, Ctx, TAB, _OS, _Emitter, RelaxAll,
+ NoExecStack);
+ }
+
+ /// createAsmStreamer - Create a target specific MCStreamer.
+ MCStreamer *createAsmStreamer(MCContext &Ctx,
+ formatted_raw_ostream &OS,
+ bool isVerboseAsm,
+ bool useLoc,
+ MCInstPrinter *InstPrint,
+ MCCodeEmitter *CE,
+ TargetAsmBackend *TAB,
+ bool ShowInst) const {
+ // AsmStreamerCtorFn is default to llvm::createAsmStreamer
+ return AsmStreamerCtorFn(Ctx, OS, isVerboseAsm, useLoc,
+ InstPrint, CE, TAB, ShowInst);
}
/// @}
@@ -513,7 +557,7 @@ namespace llvm {
T.CodeEmitterCtorFn = Fn;
}
- /// RegisterObjectStreamer - Register an MCStreamer implementation
+ /// RegisterObjectStreamer - Register a object code MCStreamer implementation
/// for the given target.
///
/// Clients are responsible for ensuring that registration doesn't occur
@@ -527,6 +571,20 @@ namespace llvm {
T.ObjectStreamerCtorFn = Fn;
}
+ /// RegisterAsmStreamer - Register an assembly MCStreamer implementation
+ /// for the given target.
+ ///
+ /// Clients are responsible for ensuring that registration doesn't occur
+ /// while another thread is attempting to access the registry. Typically
+ /// this is done by initializing all targets at program startup.
+ ///
+ /// @param T - The target being registered.
+ /// @param Fn - A function to construct an MCStreamer for the target.
+ static void RegisterAsmStreamer(Target &T, Target::AsmStreamerCtorTy Fn) {
+ if (T.AsmStreamerCtorFn == createAsmStreamer)
+ T.AsmStreamerCtorFn = Fn;
+ }
+
/// @}
};
diff --git a/include/llvm/Target/TargetSchedule.td b/include/llvm/Target/TargetSchedule.td
index 96c8367..97ea82a 100644
--- a/include/llvm/Target/TargetSchedule.td
+++ b/include/llvm/Target/TargetSchedule.td
@@ -22,6 +22,13 @@
//
class FuncUnit;
+//===----------------------------------------------------------------------===//
+// Pipeline bypass / forwarding - These values specifies the symbolic names of
+// pipeline bypasses which can be used to forward results of instructions
+// that are forwarded to uses.
+class Bypass;
+def NoBypass : Bypass;
+
class ReservationKind<bits<1> val> {
int Value = val;
}
@@ -66,30 +73,58 @@ class InstrStage<int cycles, list<FuncUnit> units,
// across all chip sets. Thus a new chip set can be added without modifying
// instruction information.
//
-class InstrItinClass;
+// NumMicroOps represents the number of micro-operations that each instruction
+// in the class are decoded to. If the number is zero, then it means the
+// instruction can decode into variable number of micro-ops and it must be
+// determined dynamically.
+//
+class InstrItinClass<int ops = 1> {
+ int NumMicroOps = ops;
+}
def NoItinerary : InstrItinClass;
//===----------------------------------------------------------------------===//
// Instruction itinerary data - These values provide a runtime map of an
// instruction itinerary class (name) to its itinerary data.
//
+// OperandCycles are optional "cycle counts". They specify the cycle after
+// instruction issue the values which correspond to specific operand indices
+// are defined or read. Bypasses are optional "pipeline forwarding pathes", if
+// a def by an instruction is available on a specific bypass and the use can
+// read from the same bypass, then the operand use latency is reduced by one.
+//
+// InstrItinData<IIC_iLoad_i , [InstrStage<1, [A9_Pipe1]>,
+// InstrStage<1, [A9_AGU]>],
+// [3, 1], [A9_LdBypass]>,
+// InstrItinData<IIC_iMVNr , [InstrStage<1, [A9_Pipe0, A9_Pipe1]>],
+// [1, 1], [NoBypass, A9_LdBypass]>,
+//
+// In this example, the instruction of IIC_iLoadi reads its input on cycle 1
+// (after issue) and the result of the load is available on cycle 3. The result
+// is available via forwarding path A9_LdBypass. If it's used by the first
+// source operand of instructions of IIC_iMVNr class, then the operand latency
+// is reduced by 1.
class InstrItinData<InstrItinClass Class, list<InstrStage> stages,
- list<int> operandcycles = []> {
+ list<int> operandcycles = [],
+ list<Bypass> bypasses = []> {
InstrItinClass TheClass = Class;
list<InstrStage> Stages = stages;
list<int> OperandCycles = operandcycles;
+ list<Bypass> Bypasses = bypasses;
}
//===----------------------------------------------------------------------===//
// Processor itineraries - These values represent the set of all itinerary
// classes for a given chip set.
//
-class ProcessorItineraries<list<FuncUnit> fu, list<InstrItinData> iid> {
+class ProcessorItineraries<list<FuncUnit> fu, list<Bypass> bp,
+ list<InstrItinData> iid> {
list<FuncUnit> FU = fu;
+ list<Bypass> BP = bp;
list<InstrItinData> IID = iid;
}
// NoItineraries - A marker that can be used by processors without schedule
// info.
-def NoItineraries : ProcessorItineraries<[], []>;
+def NoItineraries : ProcessorItineraries<[], [], []>;
diff --git a/include/llvm/Target/TargetSelectionDAG.td b/include/llvm/Target/TargetSelectionDAG.td
index 58ccfba..c9be40d 100644
--- a/include/llvm/Target/TargetSelectionDAG.td
+++ b/include/llvm/Target/TargetSelectionDAG.td
@@ -1,10 +1,10 @@
//===- TargetSelectionDAG.td - Common code for DAG isels ---*- tablegen -*-===//
-//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file defines the target-independent interfaces used by SelectionDAG
@@ -61,6 +61,13 @@ class SDTCisEltOfVec<int ThisOp, int OtherOp>
int OtherOpNum = OtherOp;
}
+/// SDTCisSubVecOfVec - This indicates that ThisOp is a vector type
+/// with length less that of OtherOp, which is a vector type.
+class SDTCisSubVecOfVec<int ThisOp, int OtherOp>
+ : SDTypeConstraint<ThisOp> {
+ int OtherOpNum = OtherOp;
+}
+
//===----------------------------------------------------------------------===//
// Selection DAG Type Profile definitions.
//
@@ -123,10 +130,10 @@ def SDTFPRoundOp : SDTypeProfile<1, 1, [ // fround
def SDTFPExtendOp : SDTypeProfile<1, 1, [ // fextend
SDTCisFP<0>, SDTCisFP<1>, SDTCisOpSmallerThanOp<1, 0>
]>;
-def SDTIntToFPOp : SDTypeProfile<1, 1, [ // [su]int_to_fp
+def SDTIntToFPOp : SDTypeProfile<1, 1, [ // [su]int_to_fp
SDTCisFP<0>, SDTCisInt<1>
]>;
-def SDTFPToIntOp : SDTypeProfile<1, 1, [ // fp_to_[su]int
+def SDTFPToIntOp : SDTypeProfile<1, 1, [ // fp_to_[su]int
SDTCisInt<0>, SDTCisFP<1>
]>;
def SDTExtInreg : SDTypeProfile<1, 2, [ // sext_inreg
@@ -138,7 +145,7 @@ def SDTSetCC : SDTypeProfile<1, 3, [ // setcc
SDTCisInt<0>, SDTCisSameAs<1, 2>, SDTCisVT<3, OtherVT>
]>;
-def SDTSelect : SDTypeProfile<1, 3, [ // select
+def SDTSelect : SDTypeProfile<1, 3, [ // select
SDTCisInt<1>, SDTCisSameAs<0, 2>, SDTCisSameAs<2, 3>
]>;
@@ -162,11 +169,11 @@ def SDTBrind : SDTypeProfile<0, 1, [ // brind
def SDTNone : SDTypeProfile<0, 0, []>; // ret, trap
def SDTLoad : SDTypeProfile<1, 1, [ // load
- SDTCisPtrTy<1>
+ SDTCisPtrTy<1>
]>;
def SDTStore : SDTypeProfile<0, 2, [ // store
- SDTCisPtrTy<1>
+ SDTCisPtrTy<1>
]>;
def SDTIStore : SDTypeProfile<1, 3, [ // indexed store
@@ -183,18 +190,25 @@ def SDTVecInsert : SDTypeProfile<1, 3, [ // vector insert
SDTCisEltOfVec<2, 1>, SDTCisSameAs<0, 1>, SDTCisPtrTy<3>
]>;
-def STDPrefetch : SDTypeProfile<0, 3, [ // prefetch
+def SDTSubVecExtract : SDTypeProfile<1, 2, [// subvector extract
+ SDTCisSubVecOfVec<0,1>, SDTCisInt<2>
+]>;
+def SDTSubVecInsert : SDTypeProfile<1, 3, [ // subvector insert
+ SDTCisSubVecOfVec<2, 1>, SDTCisSameAs<0,1>, SDTCisInt<3>
+]>;
+
+def SDTPrefetch : SDTypeProfile<0, 3, [ // prefetch
SDTCisPtrTy<0>, SDTCisSameAs<1, 2>, SDTCisInt<1>
]>;
-def STDMemBarrier : SDTypeProfile<0, 5, [ // memory barier
+def SDTMemBarrier : SDTypeProfile<0, 5, [ // memory barier
SDTCisSameAs<0,1>, SDTCisSameAs<0,2>, SDTCisSameAs<0,3>, SDTCisSameAs<0,4>,
SDTCisInt<0>
]>;
-def STDAtomic3 : SDTypeProfile<1, 3, [
+def SDTAtomic3 : SDTypeProfile<1, 3, [
SDTCisSameAs<0,2>, SDTCisSameAs<0,3>, SDTCisInt<0>, SDTCisPtrTy<1>
]>;
-def STDAtomic2 : SDTypeProfile<1, 2, [
+def SDTAtomic2 : SDTypeProfile<1, 2, [
SDTCisSameAs<0,2>, SDTCisInt<0>, SDTCisPtrTy<1>
]>;
@@ -216,20 +230,27 @@ class SDNodeProperty;
def SDNPCommutative : SDNodeProperty; // X op Y == Y op X
def SDNPAssociative : SDNodeProperty; // (X op Y) op Z == X op (Y op Z)
def SDNPHasChain : SDNodeProperty; // R/W chain operand and result
-def SDNPOutFlag : SDNodeProperty; // Write a flag result
-def SDNPInFlag : SDNodeProperty; // Read a flag operand
-def SDNPOptInFlag : SDNodeProperty; // Optionally read a flag operand
+def SDNPOutGlue : SDNodeProperty; // Write a flag result
+def SDNPInGlue : SDNodeProperty; // Read a flag operand
+def SDNPOptInGlue : SDNodeProperty; // Optionally read a flag operand
def SDNPMayStore : SDNodeProperty; // May write to memory, sets 'mayStore'.
def SDNPMayLoad : SDNodeProperty; // May read memory, sets 'mayLoad'.
def SDNPSideEffect : SDNodeProperty; // Sets 'HasUnmodelledSideEffects'.
def SDNPMemOperand : SDNodeProperty; // Touches memory, has assoc MemOperand
def SDNPVariadic : SDNodeProperty; // Node has variable arguments.
+def SDNPWantRoot : SDNodeProperty; // ComplexPattern gets the root of match
+def SDNPWantParent : SDNodeProperty; // ComplexPattern gets the parent
+
+//===----------------------------------------------------------------------===//
+// Selection DAG Pattern Operations
+class SDPatternOperator;
//===----------------------------------------------------------------------===//
// Selection DAG Node definitions.
//
class SDNode<string opcode, SDTypeProfile typeprof,
- list<SDNodeProperty> props = [], string sdclass = "SDNode"> {
+ list<SDNodeProperty> props = [], string sdclass = "SDNode">
+ : SDPatternOperator {
string Opcode = opcode;
string SDClass = sdclass;
list<SDNodeProperty> Properties = props;
@@ -305,14 +326,14 @@ def or : SDNode<"ISD::OR" , SDTIntBinOp,
def xor : SDNode<"ISD::XOR" , SDTIntBinOp,
[SDNPCommutative, SDNPAssociative]>;
def addc : SDNode<"ISD::ADDC" , SDTIntBinOp,
- [SDNPCommutative, SDNPOutFlag]>;
+ [SDNPCommutative, SDNPOutGlue]>;
def adde : SDNode<"ISD::ADDE" , SDTIntBinOp,
- [SDNPCommutative, SDNPOutFlag, SDNPInFlag]>;
+ [SDNPCommutative, SDNPOutGlue, SDNPInGlue]>;
def subc : SDNode<"ISD::SUBC" , SDTIntBinOp,
- [SDNPOutFlag]>;
+ [SDNPOutGlue]>;
def sube : SDNode<"ISD::SUBE" , SDTIntBinOp,
- [SDNPOutFlag, SDNPInFlag]>;
-
+ [SDNPOutGlue, SDNPInGlue]>;
+
def sext_inreg : SDNode<"ISD::SIGN_EXTEND_INREG", SDTExtInreg>;
def bswap : SDNode<"ISD::BSWAP" , SDTIntUnaryOp>;
def ctlz : SDNode<"ISD::CTLZ" , SDTIntUnaryOp>;
@@ -322,11 +343,11 @@ def sext : SDNode<"ISD::SIGN_EXTEND", SDTIntExtendOp>;
def zext : SDNode<"ISD::ZERO_EXTEND", SDTIntExtendOp>;
def anyext : SDNode<"ISD::ANY_EXTEND" , SDTIntExtendOp>;
def trunc : SDNode<"ISD::TRUNCATE" , SDTIntTruncOp>;
-def bitconvert : SDNode<"ISD::BIT_CONVERT", SDTUnaryOp>;
+def bitconvert : SDNode<"ISD::BITCAST" , SDTUnaryOp>;
def extractelt : SDNode<"ISD::EXTRACT_VECTOR_ELT", SDTVecExtract>;
def insertelt : SDNode<"ISD::INSERT_VECTOR_ELT", SDTVecInsert>;
-
+
def fadd : SDNode<"ISD::FADD" , SDTFPBinOp, [SDNPCommutative]>;
def fsub : SDNode<"ISD::FSUB" , SDTFPBinOp>;
def fmul : SDNode<"ISD::FMUL" , SDTFPBinOp, [SDNPCommutative]>;
@@ -367,35 +388,36 @@ def br : SDNode<"ISD::BR" , SDTBr, [SDNPHasChain]>;
def trap : SDNode<"ISD::TRAP" , SDTNone,
[SDNPHasChain, SDNPSideEffect]>;
-def prefetch : SDNode<"ISD::PREFETCH" , STDPrefetch,
- [SDNPHasChain, SDNPMayLoad, SDNPMayStore]>;
+def prefetch : SDNode<"ISD::PREFETCH" , SDTPrefetch,
+ [SDNPHasChain, SDNPMayLoad, SDNPMayStore,
+ SDNPMemOperand]>;
-def membarrier : SDNode<"ISD::MEMBARRIER" , STDMemBarrier,
+def membarrier : SDNode<"ISD::MEMBARRIER" , SDTMemBarrier,
[SDNPHasChain, SDNPSideEffect]>;
-def atomic_cmp_swap : SDNode<"ISD::ATOMIC_CMP_SWAP" , STDAtomic3,
+def atomic_cmp_swap : SDNode<"ISD::ATOMIC_CMP_SWAP" , SDTAtomic3,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
-def atomic_load_add : SDNode<"ISD::ATOMIC_LOAD_ADD" , STDAtomic2,
+def atomic_load_add : SDNode<"ISD::ATOMIC_LOAD_ADD" , SDTAtomic2,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
-def atomic_swap : SDNode<"ISD::ATOMIC_SWAP", STDAtomic2,
+def atomic_swap : SDNode<"ISD::ATOMIC_SWAP", SDTAtomic2,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
-def atomic_load_sub : SDNode<"ISD::ATOMIC_LOAD_SUB" , STDAtomic2,
+def atomic_load_sub : SDNode<"ISD::ATOMIC_LOAD_SUB" , SDTAtomic2,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
-def atomic_load_and : SDNode<"ISD::ATOMIC_LOAD_AND" , STDAtomic2,
+def atomic_load_and : SDNode<"ISD::ATOMIC_LOAD_AND" , SDTAtomic2,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
-def atomic_load_or : SDNode<"ISD::ATOMIC_LOAD_OR" , STDAtomic2,
+def atomic_load_or : SDNode<"ISD::ATOMIC_LOAD_OR" , SDTAtomic2,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
-def atomic_load_xor : SDNode<"ISD::ATOMIC_LOAD_XOR" , STDAtomic2,
+def atomic_load_xor : SDNode<"ISD::ATOMIC_LOAD_XOR" , SDTAtomic2,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
-def atomic_load_nand: SDNode<"ISD::ATOMIC_LOAD_NAND", STDAtomic2,
+def atomic_load_nand: SDNode<"ISD::ATOMIC_LOAD_NAND", SDTAtomic2,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
-def atomic_load_min : SDNode<"ISD::ATOMIC_LOAD_MIN", STDAtomic2,
+def atomic_load_min : SDNode<"ISD::ATOMIC_LOAD_MIN", SDTAtomic2,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
-def atomic_load_max : SDNode<"ISD::ATOMIC_LOAD_MAX", STDAtomic2,
+def atomic_load_max : SDNode<"ISD::ATOMIC_LOAD_MAX", SDTAtomic2,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
-def atomic_load_umin : SDNode<"ISD::ATOMIC_LOAD_UMIN", STDAtomic2,
+def atomic_load_umin : SDNode<"ISD::ATOMIC_LOAD_UMIN", SDTAtomic2,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
-def atomic_load_umax : SDNode<"ISD::ATOMIC_LOAD_UMAX", STDAtomic2,
+def atomic_load_umax : SDNode<"ISD::ATOMIC_LOAD_UMAX", SDTAtomic2,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
// Do not use ld, st directly. Use load, extload, sextload, zextload, store,
@@ -415,16 +437,26 @@ def vector_extract : SDNode<"ISD::EXTRACT_VECTOR_ELT",
SDTypeProfile<1, 2, [SDTCisPtrTy<2>]>, []>;
def vector_insert : SDNode<"ISD::INSERT_VECTOR_ELT",
SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisPtrTy<3>]>, []>;
-
+
+// This operator does not do subvector type checking. The ARM
+// backend, at least, needs it.
+def vector_extract_subvec : SDNode<"ISD::EXTRACT_SUBVECTOR",
+ SDTypeProfile<1, 2, [SDTCisInt<2>, SDTCisVec<1>, SDTCisVec<0>]>,
+ []>;
+
+// This operator does subvector type checking.
+def extract_subvector : SDNode<"ISD::EXTRACT_SUBVECTOR", SDTSubVecExtract, []>;
+def insert_subvector : SDNode<"ISD::INSERT_SUBVECTOR", SDTSubVecInsert, []>;
+
// Nodes for intrinsics, you should use the intrinsic itself and let tblgen use
// these internally. Don't reference these directly.
-def intrinsic_void : SDNode<"ISD::INTRINSIC_VOID",
+def intrinsic_void : SDNode<"ISD::INTRINSIC_VOID",
SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>,
[SDNPHasChain]>;
-def intrinsic_w_chain : SDNode<"ISD::INTRINSIC_W_CHAIN",
+def intrinsic_w_chain : SDNode<"ISD::INTRINSIC_W_CHAIN",
SDTypeProfile<1, -1, [SDTCisPtrTy<1>]>,
[SDNPHasChain]>;
-def intrinsic_wo_chain : SDNode<"ISD::INTRINSIC_WO_CHAIN",
+def intrinsic_wo_chain : SDNode<"ISD::INTRINSIC_WO_CHAIN",
SDTypeProfile<1, -1, [SDTCisPtrTy<1>]>, []>;
// Do not use cvt directly. Use cvt forms below
@@ -469,10 +501,10 @@ def NOOP_SDNodeXForm : SDNodeXForm<imm, [{}]>;
//
/// PatFrag - Represents a pattern fragment. This can match something on the
-/// DAG, frame a single node to multiply nested other fragments.
+/// DAG, from a single node to multiple nested other fragments.
///
class PatFrag<dag ops, dag frag, code pred = [{}],
- SDNodeXForm xform = NOOP_SDNodeXForm> {
+ SDNodeXForm xform = NOOP_SDNodeXForm> : SDPatternOperator {
dag Operands = ops;
dag Fragment = frag;
code Predicate = pred;
@@ -822,7 +854,7 @@ def cvtfu : PatFrag<(ops node:$val, node:$dty, node:$sty, node:$rd, node:$sat),
//===----------------------------------------------------------------------===//
// Selection DAG Pattern Support.
//
-// Patterns are what are actually matched against the target-flavored
+// Patterns are what are actually matched against by the target-flavored
// instruction selection DAG. Instructions defined by the target implicitly
// define patterns in most cases, but patterns can also be explicitly added when
// an operation is defined by a sequence of instructions (e.g. loading a large
@@ -834,7 +866,7 @@ class Pattern<dag patternToMatch, list<dag> resultInstrs> {
dag PatternToMatch = patternToMatch;
list<dag> ResultInstrs = resultInstrs;
list<Predicate> Predicates = []; // See class Instruction in Target.td.
- int AddedComplexity = 0; // See class Instruction in Target.td.
+ int AddedComplexity = 0; // See class Instruction in Target.td.
}
// Pat - A simple (but common) form of a pattern, which produces a simple result
diff --git a/include/llvm/Target/TargetSelectionDAGInfo.h b/include/llvm/Target/TargetSelectionDAGInfo.h
index 2be1834..c9ca722 100644
--- a/include/llvm/Target/TargetSelectionDAGInfo.h
+++ b/include/llvm/Target/TargetSelectionDAGInfo.h
@@ -59,8 +59,8 @@ public:
SDValue Op1, SDValue Op2,
SDValue Op3, unsigned Align, bool isVolatile,
bool AlwaysInline,
- const Value *DstSV, uint64_t DstOff,
- const Value *SrcSV, uint64_t SrcOff) const {
+ MachinePointerInfo DstPtrInfo,
+ MachinePointerInfo SrcPtrInfo) const {
return SDValue();
}
@@ -75,8 +75,8 @@ public:
SDValue Chain,
SDValue Op1, SDValue Op2,
SDValue Op3, unsigned Align, bool isVolatile,
- const Value *DstSV, uint64_t DstOff,
- const Value *SrcSV, uint64_t SrcOff) const {
+ MachinePointerInfo DstPtrInfo,
+ MachinePointerInfo SrcPtrInfo) const {
return SDValue();
}
@@ -91,7 +91,7 @@ public:
SDValue Chain,
SDValue Op1, SDValue Op2,
SDValue Op3, unsigned Align, bool isVolatile,
- const Value *DstSV, uint64_t DstOff) const {
+ MachinePointerInfo DstPtrInfo) const {
return SDValue();
}
};
OpenPOWER on IntegriCloud