diff options
author | rdivacky <rdivacky@FreeBSD.org> | 2009-10-14 17:57:32 +0000 |
---|---|---|
committer | rdivacky <rdivacky@FreeBSD.org> | 2009-10-14 17:57:32 +0000 |
commit | cd749a9c07f1de2fb8affde90537efa4bc3e7c54 (patch) | |
tree | b21f6de4e08b89bb7931806bab798fc2a5e3a686 /include/llvm/Target | |
parent | 72621d11de5b873f1695f391eb95f0b336c3d2d4 (diff) | |
download | FreeBSD-src-cd749a9c07f1de2fb8affde90537efa4bc3e7c54.zip FreeBSD-src-cd749a9c07f1de2fb8affde90537efa4bc3e7c54.tar.gz |
Update llvm to r84119.
Diffstat (limited to 'include/llvm/Target')
20 files changed, 1927 insertions, 483 deletions
diff --git a/include/llvm/Target/SubtargetFeature.h b/include/llvm/Target/SubtargetFeature.h index 5cfdc02..58333e2 100644 --- a/include/llvm/Target/SubtargetFeature.h +++ b/include/llvm/Target/SubtargetFeature.h @@ -20,12 +20,12 @@ #include <string> #include <vector> -#include <iosfwd> #include <cstring> #include "llvm/Support/DataTypes.h" namespace llvm { - + class raw_ostream; + //===----------------------------------------------------------------------===// /// /// SubtargetFeatureKV - Used to provide key value pairs for feature and @@ -102,8 +102,7 @@ public: void *getInfo(const SubtargetInfoKV *Table, size_t TableSize); /// Print feature string. - void print(std::ostream &OS) const; - void print(std::ostream *OS) const { if (OS) print(*OS); } + void print(raw_ostream &OS) const; // Dump feature info. void dump() const; diff --git a/include/llvm/Target/Target.td b/include/llvm/Target/Target.td index ebd826a..4d65b19 100644 --- a/include/llvm/Target/Target.td +++ b/include/llvm/Target/Target.td @@ -203,6 +203,8 @@ class Instruction { bit hasCtrlDep = 0; // Does this instruction r/w ctrl-flow chains? bit isNotDuplicable = 0; // Is it unsafe to duplicate this instruction? bit isAsCheapAsAMove = 0; // As cheap (or cheaper) than a move instruction. + bit hasExtraSrcRegAllocReq = 0; // Sources have special regalloc requirement? + bit hasExtraDefRegAllocReq = 0; // Defs have special regalloc requirement? // Side effect flags - When set, the flags have these meanings: // @@ -221,6 +223,11 @@ class Instruction { bit mayHaveSideEffects = 0; bit neverHasSideEffects = 0; + // Is this instruction a "real" instruction (with a distinct machine + // encoding), or is it a pseudo instruction used for codegen modeling + // purposes. + bit isCodeGenOnly = 0; + InstrItinClass Itinerary = NoItinerary;// Execution steps used for scheduling. string Constraints = ""; // OperandConstraint, e.g. $src = $dst. @@ -258,16 +265,63 @@ def ins; /// of operands. def variable_ops; + +/// PointerLikeRegClass - Values that are designed to have pointer width are +/// derived from this. TableGen treats the register class as having a symbolic +/// type that it doesn't know, and resolves the actual regclass to use by using +/// the TargetRegisterInfo::getPointerRegClass() hook at codegen time. +class PointerLikeRegClass<int Kind> { + int RegClassKind = Kind; +} + + /// ptr_rc definition - Mark this operand as being a pointer value whose /// register class is resolved dynamically via a callback to TargetInstrInfo. /// FIXME: We should probably change this to a class which contain a list of /// flags. But currently we have but one flag. -def ptr_rc; +def ptr_rc : PointerLikeRegClass<0>; /// unknown definition - Mark this operand as being of unknown type, causing /// it to be resolved by inference in the context it is used. def unknown; +/// AsmOperandClass - Representation for the kinds of operands which the target +/// specific parser can create and the assembly matcher may need to distinguish. +/// +/// Operand classes are used to define the order in which instructions are +/// matched, to ensure that the instruction which gets matched for any +/// particular list of operands is deterministic. +/// +/// The target specific parser must be able to classify a parsed operand into a +/// unique class which does not partially overlap with any other classes. It can +/// match a subset of some other class, in which case the super class field +/// should be defined. +class AsmOperandClass { + /// The name to use for this class, which should be usable as an enum value. + string Name = ?; + + /// The super class of this operand. + AsmOperandClass SuperClass = ?; + + /// The name of the method on the target specific operand to call to test + /// whether the operand is an instance of this class. If not set, this will + /// default to "isFoo", where Foo is the AsmOperandClass name. The method + /// signature should be: + /// bool isFoo() const; + string PredicateMethod = ?; + + /// The name of the method on the target specific operand to call to add the + /// target specific operand to an MCInst. If not set, this will default to + /// "addFooOperands", where Foo is the AsmOperandClass name. The method + /// signature should be: + /// void addFooOperands(MCInst &Inst, unsigned N) const; + string RenderMethod = ?; +} + +def ImmAsmOperand : AsmOperandClass { + let Name = "Imm"; +} + /// Operand Types - These provide the built-in operand types that may be used /// by a target. Targets can optionally provide their own operand types as /// needed, though this should not be needed for RISC targets. @@ -276,6 +330,16 @@ class Operand<ValueType ty> { string PrintMethod = "printOperand"; string AsmOperandLowerMethod = ?; dag MIOperandInfo = (ops); + + // ParserMatchClass - The "match class" that operands of this type fit + // in. Match classes are used to define the order in which instructions are + // match, to ensure that which instructions gets matched is deterministic. + // + // The target specific parser must be able to classify an parsed operand + // into a unique class, which does not partially overlap with any other + // classes. It can match a subset of some other class, in which case + // ParserMatchSuperClass should be set to the name of that class. + AsmOperandClass ParserMatchClass = ImmAsmOperand; } def i1imm : Operand<i1>; @@ -302,8 +366,8 @@ class PredicateOperand<ValueType ty, dag OpTypes, dag AlwaysVal> } /// OptionalDefOperand - This is used to define a optional definition operand -/// for an instruction. DefaultOps is the register the operand represents if none -/// is supplied, e.g. zero_reg. +/// for an instruction. DefaultOps is the register the operand represents if +/// none is supplied, e.g. zero_reg. class OptionalDefOperand<ValueType ty, dag OpTypes, dag defaultops> : Operand<ty> { let MIOperandInfo = OpTypes; @@ -329,7 +393,8 @@ class InstrInfo { bit isLittleEndianEncoding = 0; } -// Standard Instructions. +// Standard Pseudo Instructions. +let isCodeGenOnly = 1 in { def PHI : Instruction { let OutOperandList = (ops); let InOperandList = (ops variable_ops); @@ -363,12 +428,12 @@ def GC_LABEL : Instruction { let Namespace = "TargetInstrInfo"; let hasCtrlDep = 1; } -def DECLARE : Instruction { +def KILL : Instruction { let OutOperandList = (ops); let InOperandList = (ops variable_ops); let AsmString = ""; let Namespace = "TargetInstrInfo"; - let hasCtrlDep = 1; + let neverHasSideEffects = 1; } def EXTRACT_SUBREG : Instruction { let OutOperandList = (ops unknown:$dst); @@ -409,6 +474,39 @@ def COPY_TO_REGCLASS : Instruction { let neverHasSideEffects = 1; let isAsCheapAsAMove = 1; } +} + +//===----------------------------------------------------------------------===// +// AsmParser - This class can be implemented by targets that wish to implement +// .s file parsing. +// +// Subtargets can have multiple different assembly parsers (e.g. AT&T vs Intel +// syntax on X86 for example). +// +class AsmParser { + // AsmParserClassName - This specifies the suffix to use for the asmparser + // class. Generated AsmParser classes are always prefixed with the target + // name. + string AsmParserClassName = "AsmParser"; + + // Variant - AsmParsers can be of multiple different variants. Variants are + // used to support targets that need to parser multiple formats for the + // assembly language. + int Variant = 0; + + // CommentDelimiter - If given, the delimiter string used to recognize + // comments which are hard coded in the .td assembler strings for individual + // instructions. + string CommentDelimiter = ""; + + // RegisterPrefix - If given, the token prefix which indicates a register + // token. This is used by the matcher to automatically recognize hard coded + // register tokens as constrained registers, instead of tokens, for the + // purposes of matching. + string RegisterPrefix = ""; +} +def DefaultAsmParser : AsmParser; + //===----------------------------------------------------------------------===// // AsmWriter - This class can be implemented by targets that need to customize @@ -434,6 +532,17 @@ class AsmWriter { // will specify which alternative to use. For example "{x|y|z}" with Variant // == 1, will expand to "y". int Variant = 0; + + + // FirstOperandColumn/OperandSpacing - If the assembler syntax uses a columnar + // layout, the asmwriter can actually generate output in this columns (in + // verbose-asm mode). These two values indicate the width of the first column + // (the "opcode" area) and the width to reserve for subsequent operands. When + // verbose asm mode is enabled, operands will be indented to respect this. + int FirstOperandColumn = -1; + + // OperandSpacing - Space between operand columns. + int OperandSpacing = -1; } def DefaultAsmWriter : AsmWriter; @@ -445,6 +554,9 @@ class Target { // InstructionSet - Instruction set description for this target. InstrInfo InstructionSet; + // AssemblyParsers - The AsmParser instances available for this target. + list<AsmParser> AssemblyParsers = [DefaultAsmParser]; + // AssemblyWriters - The AsmWriter instances available for this target. list<AsmWriter> AssemblyWriters = [DefaultAsmWriter]; } diff --git a/include/llvm/Target/TargetAsmParser.h b/include/llvm/Target/TargetAsmParser.h new file mode 100644 index 0000000..ef1fc49 --- /dev/null +++ b/include/llvm/Target/TargetAsmParser.h @@ -0,0 +1,65 @@ +//===-- llvm/Target/TargetAsmParser.h - Target Assembly Parser --*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TARGET_TARGETPARSER_H +#define LLVM_TARGET_TARGETPARSER_H + +#include "llvm/MC/MCAsmLexer.h" + +namespace llvm { +class MCAsmParser; +class MCInst; +class StringRef; +class Target; + +/// TargetAsmParser - Generic interface to target specific assembly parsers. +class TargetAsmParser { + TargetAsmParser(const TargetAsmParser &); // DO NOT IMPLEMENT + void operator=(const TargetAsmParser &); // DO NOT IMPLEMENT +protected: // Can only create subclasses. + TargetAsmParser(const Target &); + + /// TheTarget - The Target that this machine was created for. + const Target &TheTarget; + +public: + virtual ~TargetAsmParser(); + + const Target &getTarget() const { return TheTarget; } + + /// ParseInstruction - Parse one assembly instruction. + /// + /// The parser is positioned following the instruction name. The target + /// specific instruction parser should parse the entire instruction and + /// construct the appropriate MCInst, or emit an error. On success, the entire + /// line should be parsed up to and including the end-of-statement token. On + /// failure, the parser is not required to read to the end of the line. + // + /// \param AP - The current parser object. + /// \param Name - The instruction name. + /// \param Inst [out] - On success, the parsed instruction. + /// \return True on failure. + virtual bool ParseInstruction(const StringRef &Name, MCInst &Inst) = 0; + + /// ParseDirective - Parse a target specific assembler directive + /// + /// The parser is positioned following the directive name. The target + /// specific directive parser should parse the entire directive doing or + /// recording any target specific work, or return true and do nothing if the + /// directive is not target specific. If the directive is specific for + /// the target, the entire line is parsed up to and including the + /// end-of-statement token and false is returned. + /// + /// \param ID - the identifier token of the directive. + virtual bool ParseDirective(AsmToken DirectiveID) = 0; +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/Target/TargetCallingConv.td b/include/llvm/Target/TargetCallingConv.td index da3cbd2..ceaeb0b 100644 --- a/include/llvm/Target/TargetCallingConv.td +++ b/include/llvm/Target/TargetCallingConv.td @@ -58,6 +58,10 @@ class CCIfNest<CCAction A> : CCIf<"ArgFlags.isNest()", A> {} /// the specified action. class CCIfSplit<CCAction A> : CCIf<"ArgFlags.isSplit()", A> {} +/// CCIfSRet - If this argument is marked with the 'sret' attribute, apply +/// the specified action. +class CCIfSRet<CCAction A> : CCIf<"ArgFlags.isSRet()", A> {} + /// CCIfNotVarArg - If the current function is not vararg - apply the action class CCIfNotVarArg<CCAction A> : CCIf<"!State.isVarArg()", A> {} @@ -105,6 +109,12 @@ class CCBitConvertToType<ValueType destTy> : CCAction { ValueType DestTy = destTy; } +/// CCPassIndirect - If applied, this stores the value to stack and passes the pointer +/// as normal argument. +class CCPassIndirect<ValueType destTy> : CCAction { + ValueType DestTy = destTy; +} + /// CCDelegateTo - This action invokes the specified sub-calling-convention. It /// is successful if the specified CC matches. class CCDelegateTo<CallingConv cc> : CCAction { diff --git a/include/llvm/Target/TargetData.h b/include/llvm/Target/TargetData.h index 82abfc7..f8ea64b 100644 --- a/include/llvm/Target/TargetData.h +++ b/include/llvm/Target/TargetData.h @@ -22,6 +22,7 @@ #include "llvm/Pass.h" #include "llvm/Support/DataTypes.h" +#include "llvm/Support/ErrorHandling.h" #include "llvm/ADT/SmallVector.h" #include <string> @@ -33,6 +34,7 @@ class IntegerType; class StructType; class StructLayout; class GlobalVariable; +class LLVMContext; /// Enum used to categorize the alignment types stored by TargetAlignElem enum AlignTypeEnum { @@ -89,6 +91,9 @@ private: */ static const TargetAlignElem InvalidAlignmentElem; + // Opaque pointer for the StructType -> StructLayout map. + mutable void* LayoutMap; + //! Set/initialize target alignments void setAlignment(AlignTypeEnum align_type, unsigned char abi_align, unsigned char pref_align, uint32_t bit_width); @@ -111,9 +116,8 @@ public: /// @note This has to exist, because this is a pass, but it should never be /// used. TargetData() : ImmutablePass(&ID) { - assert(0 && "ERROR: Bad TargetData ctor used. " - "Tool did not specify a TargetData to use?"); - abort(); + llvm_report_error("Bad TargetData ctor used. " + "Tool did not specify a TargetData to use?"); } /// Constructs a TargetData from a specification string. See init(). @@ -131,7 +135,8 @@ public: PointerMemSize(TD.PointerMemSize), PointerABIAlign(TD.PointerABIAlign), PointerPrefAlign(TD.PointerPrefAlign), - Alignments(TD.Alignments) + Alignments(TD.Alignments), + LayoutMap(0) { } ~TargetData(); // Not virtual, do not subclass this class @@ -229,7 +234,7 @@ public: /// getIntPtrType - Return an unsigned integer type that is the same size or /// greater to the host pointer size. /// - const IntegerType *getIntPtrType() const; + const IntegerType *getIntPtrType(LLVMContext &C) const; /// getIndexedOffset - return the offset from the beginning of the type for /// the specified indices. This is used to implement getelementptr. diff --git a/include/llvm/Target/TargetELFWriterInfo.h b/include/llvm/Target/TargetELFWriterInfo.h index a5b30c4..7cb6931 100644 --- a/include/llvm/Target/TargetELFWriterInfo.h +++ b/include/llvm/Target/TargetELFWriterInfo.h @@ -97,9 +97,26 @@ namespace llvm { /// ELF relocation entry. virtual bool hasRelocationAddend() const = 0; - /// getAddendForRelTy - Gets the addend value for an ELF relocation entry - /// based on the target relocation type. If addend is not used returns 0. - virtual long int getAddendForRelTy(unsigned RelTy) const = 0; + /// getDefaultAddendForRelTy - Gets the default addend value for a + /// relocation entry based on the target ELF relocation type. + virtual long int getDefaultAddendForRelTy(unsigned RelTy, + long int Modifier = 0) const = 0; + + /// getRelTySize - Returns the size of relocatable field in bits + virtual unsigned getRelocationTySize(unsigned RelTy) const = 0; + + /// isPCRelativeRel - True if the relocation type is pc relative + virtual bool isPCRelativeRel(unsigned RelTy) const = 0; + + /// getJumpTableRelocationTy - Returns the machine relocation type used + /// to reference a jumptable. + virtual unsigned getAbsoluteLabelMachineRelTy() const = 0; + + /// computeRelocation - Some relocatable fields could be relocated + /// directly, avoiding the relocation symbol emission, compute the + /// final relocation value for this symbol. + virtual long int computeRelocation(unsigned SymOffset, unsigned RelOffset, + unsigned RelTy) const = 0; }; } // end llvm namespace diff --git a/include/llvm/Target/TargetFrameInfo.h b/include/llvm/Target/TargetFrameInfo.h index 3e26b9d..975d156 100644 --- a/include/llvm/Target/TargetFrameInfo.h +++ b/include/llvm/Target/TargetFrameInfo.h @@ -31,13 +31,22 @@ public: StackGrowsUp, // Adding to the stack increases the stack address StackGrowsDown // Adding to the stack decreases the stack address }; + + // Maps a callee saved register to a stack slot with a fixed offset. + struct SpillSlot { + unsigned Reg; + int Offset; // Offset relative to stack pointer on function entry. + }; private: StackDirection StackDir; unsigned StackAlignment; + unsigned TransientStackAlignment; int LocalAreaOffset; public: - TargetFrameInfo(StackDirection D, unsigned StackAl, int LAO) - : StackDir(D), StackAlignment(StackAl), LocalAreaOffset(LAO) {} + TargetFrameInfo(StackDirection D, unsigned StackAl, int LAO, + unsigned TransAl = 1) + : StackDir(D), StackAlignment(StackAl), TransientStackAlignment(TransAl), + LocalAreaOffset(LAO) {} virtual ~TargetFrameInfo(); @@ -48,12 +57,20 @@ public: /// StackDirection getStackGrowthDirection() const { return StackDir; } - /// getStackAlignment - This method returns the number of bytes that the stack - /// pointer must be aligned to. Typically, this is the largest alignment for - /// any data object in the target. + /// getStackAlignment - This method returns the number of bytes to which the + /// stack pointer must be aligned on entry to a function. Typically, this + /// is the largest alignment for any data object in the target. /// unsigned getStackAlignment() const { return StackAlignment; } + /// getTransientStackAlignment - This method returns the number of bytes to + /// which the stack pointer must be aligned at all times, even between + /// calls. + /// + unsigned getTransientStackAlignment() const { + return TransientStackAlignment; + } + /// getOffsetOfLocalArea - This method returns the offset of the local area /// from the stack pointer on entrance to a function. /// @@ -65,10 +82,10 @@ public: /// /// Each entry in this array contains a <register,offset> pair, indicating the /// fixed offset from the incoming stack pointer that each register should be - /// spilled at. If a register is not listed here, the code generator is + /// spilled at. If a register is not listed here, the code generator is /// allowed to spill it anywhere it chooses. /// - virtual const std::pair<unsigned, int> * + virtual const SpillSlot * getCalleeSavedSpillSlots(unsigned &NumEntries) const { NumEntries = 0; return 0; diff --git a/include/llvm/Target/TargetInstrDesc.h b/include/llvm/Target/TargetInstrDesc.h index 622a216..d828a23 100644 --- a/include/llvm/Target/TargetInstrDesc.h +++ b/include/llvm/Target/TargetInstrDesc.h @@ -18,7 +18,8 @@ namespace llvm { class TargetRegisterClass; - +class TargetRegisterInfo; + //===----------------------------------------------------------------------===// // Machine Operand Flags and Description //===----------------------------------------------------------------------===// @@ -45,14 +46,28 @@ namespace TOI { class TargetOperandInfo { public: /// RegClass - This specifies the register class enumeration of the operand - /// if the operand is a register. If not, this contains 0. + /// if the operand is a register. If isLookupPtrRegClass is set, then this is + /// an index that is passed to TargetRegisterInfo::getPointerRegClass(x) to + /// get a dynamic register class. + /// + /// NOTE: This member should be considered to be private, all access should go + /// through "getRegClass(TRI)" below. unsigned short RegClass; + + /// Flags - These are flags from the TOI::OperandFlags enum. unsigned short Flags; + /// Lower 16 bits are used to specify which constraints are set. The higher 16 /// bits are used to specify the value of constraints (4 bits each). - unsigned int Constraints; + unsigned Constraints; /// Currently no other information. + /// getRegClass - Get the register class for the operand, handling resolution + /// of "symbolic" pointer register classes etc. If this is not a register + /// operand, this returns null. + const TargetRegisterClass *getRegClass(const TargetRegisterInfo *TRI) const; + + /// isLookupPtrRegClass - Set if this operand is a pointer value and it /// requires a callback to look up its register class. bool isLookupPtrRegClass() const { return Flags&(1 <<TOI::LookupPtrRegClass);} @@ -96,7 +111,9 @@ namespace TID { ConvertibleTo3Addr, UsesCustomDAGSchedInserter, Rematerializable, - CheapAsAMove + CheapAsAMove, + ExtraSrcRegAllocReq, + ExtraDefRegAllocReq }; } @@ -428,6 +445,26 @@ public: bool isAsCheapAsAMove() const { return Flags & (1 << TID::CheapAsAMove); } + + /// hasExtraSrcRegAllocReq - Returns true if this instruction source operands + /// have special register allocation requirements that are not captured by the + /// operand register classes. e.g. ARM::STRD's two source registers must be an + /// even / odd pair, ARM::STM registers have to be in ascending order. + /// Post-register allocation passes should not attempt to change allocations + /// for sources of instructions with this flag. + bool hasExtraSrcRegAllocReq() const { + return Flags & (1 << TID::ExtraSrcRegAllocReq); + } + + /// hasExtraDefRegAllocReq - Returns true if this instruction def operands + /// have special register allocation requirements that are not captured by the + /// operand register classes. e.g. ARM::LDRD's two def registers must be an + /// even / odd pair, ARM::LDM registers have to be in ascending order. + /// Post-register allocation passes should not attempt to change allocations + /// for definitions of instructions with this flag. + bool hasExtraDefRegAllocReq() const { + return Flags & (1 << TID::ExtraDefRegAllocReq); + } }; } // end namespace llvm diff --git a/include/llvm/Target/TargetInstrInfo.h b/include/llvm/Target/TargetInstrInfo.h index ecdd682..919bef1 100644 --- a/include/llvm/Target/TargetInstrInfo.h +++ b/include/llvm/Target/TargetInstrInfo.h @@ -19,6 +19,7 @@ namespace llvm { +class MCAsmInfo; class TargetRegisterClass; class TargetRegisterInfo; class LiveVariables; @@ -50,7 +51,10 @@ public: DBG_LABEL = 2, EH_LABEL = 3, GC_LABEL = 4, - DECLARE = 5, + + /// KILL - This instruction is a noop that is used only to adjust the liveness + /// of registers. This can be useful when dealing with sub-registers. + KILL = 5, /// EXTRACT_SUBREG - This instruction takes two operands: a register /// that has subregisters, and a subregister index. It returns the @@ -99,24 +103,35 @@ public: /// isTriviallyReMaterializable - Return true if the instruction is trivially /// rematerializable, meaning it has no side effects and requires no operands /// that aren't always available. - bool isTriviallyReMaterializable(const MachineInstr *MI) const { - return MI->getDesc().isRematerializable() && - isReallyTriviallyReMaterializable(MI); + bool isTriviallyReMaterializable(const MachineInstr *MI, + AliasAnalysis *AA = 0) const { + return MI->getOpcode() == IMPLICIT_DEF || + (MI->getDesc().isRematerializable() && + (isReallyTriviallyReMaterializable(MI, AA) || + isReallyTriviallyReMaterializableGeneric(MI, AA))); } protected: /// isReallyTriviallyReMaterializable - For instructions with opcodes for - /// which the M_REMATERIALIZABLE flag is set, this function tests whether the - /// instruction itself is actually trivially rematerializable, considering - /// its operands. This is used for targets that have instructions that are - /// only trivially rematerializable for specific uses. This predicate must - /// return false if the instruction has any side effects other than - /// producing a value, or if it requres any address registers that are not - /// always available. - virtual bool isReallyTriviallyReMaterializable(const MachineInstr *MI) const { - return true; + /// which the M_REMATERIALIZABLE flag is set, this hook lets the target + /// specify whether the instruction is actually trivially rematerializable, + /// taking into consideration its operands. This predicate must return false + /// if the instruction has any side effects other than producing a value, or + /// if it requres any address registers that are not always available. + virtual bool isReallyTriviallyReMaterializable(const MachineInstr *MI, + AliasAnalysis *AA) const { + return false; } +private: + /// isReallyTriviallyReMaterializableGeneric - For instructions with opcodes + /// for which the M_REMATERIALIZABLE flag is set and the target hook + /// isReallyTriviallyReMaterializable returns false, this function does + /// target-independent tests to determine if the instruction is really + /// trivially rematerializable. + bool isReallyTriviallyReMaterializableGeneric(const MachineInstr *MI, + AliasAnalysis *AA) const; + public: /// Return true if the instruction is a register to register move and return /// the source and dest operands and their sub-register indices by reference. @@ -150,19 +165,9 @@ public: /// specific location targeting a new destination register. virtual void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, - unsigned DestReg, + unsigned DestReg, unsigned SubIdx, const MachineInstr *Orig) const = 0; - /// isInvariantLoad - Return true if the specified instruction (which is - /// marked mayLoad) is loading from a location whose value is invariant across - /// the function. For example, loading a value from the constant pool or from - /// from the argument area of a function if it does not change. This should - /// only return true of *all* loads the instruction does are invariant (if it - /// does multiple loads). - virtual bool isInvariantLoad(const MachineInstr *MI) const { - return false; - } - /// convertToThreeAddress - This method must be implemented by targets that /// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target /// may be able to convert a two-address instruction into one or more true @@ -194,13 +199,11 @@ public: virtual MachineInstr *commuteInstruction(MachineInstr *MI, bool NewMI = false) const = 0; - /// CommuteChangesDestination - Return true if commuting the specified - /// instruction will also changes the destination operand. Also return the - /// current operand index of the would be new destination register by - /// reference. This can happen when the commutable instruction is also a - /// two-address instruction. - virtual bool CommuteChangesDestination(MachineInstr *MI, - unsigned &OpIdx) const = 0; + /// findCommutedOpIndices - If specified MI is commutable, return the two + /// operand indices that would swap value. Return true if the instruction + /// is not in a form which this routine understands. + virtual bool findCommutedOpIndices(MachineInstr *MI, unsigned &SrcOpIdx1, + unsigned &SrcOpIdx2) const = 0; /// AnalyzeBranch - Analyze the branching code at the end of MBB, returning /// true if it cannot be understood (e.g. it's a switch dispatch or isn't @@ -212,15 +215,15 @@ public: /// 2. If this block ends with only an unconditional branch, it sets TBB to be /// the destination block. /// 3. If this block ends with an conditional branch and it falls through to - /// an successor block, it sets TBB to be the branch destination block and + /// a successor block, it sets TBB to be the branch destination block and /// a list of operands that evaluate the condition. These /// operands can be passed to other TargetInstrInfo methods to create new /// branches. - /// 4. If this block ends with an conditional branch and an unconditional - /// block, it returns the 'true' destination in TBB, the 'false' - /// destination in FBB, and a list of operands that evaluate the condition. - /// These operands can be passed to other TargetInstrInfo methods to create - /// new branches. + /// 4. If this block ends with a conditional branch followed by an + /// unconditional branch, it returns the 'true' destination in TBB, the + /// 'false' destination in FBB, and a list of operands that evaluate the + /// condition. These operands can be passed to other TargetInstrInfo + /// methods to create new branches. /// /// Note that RemoveBranch and InsertBranch must be implemented to support /// cases where this method returns success. @@ -234,7 +237,7 @@ public: bool AllowModify = false) const { return true; } - + /// RemoveBranch - Remove the branching code at the end of the specific MBB. /// This is only invoked in cases where AnalyzeBranch returns success. It /// returns the number of instructions that were removed. @@ -242,13 +245,12 @@ public: assert(0 && "Target didn't implement TargetInstrInfo::RemoveBranch!"); return 0; } - - /// InsertBranch - Insert a branch into the end of the specified - /// MachineBasicBlock. This operands to this method are the same as those - /// returned by AnalyzeBranch. This is invoked in cases where AnalyzeBranch - /// returns success and when an unconditional branch (TBB is non-null, FBB is - /// null, Cond is empty) needs to be inserted. It returns the number of - /// instructions inserted. + + /// InsertBranch - Insert branch code into the end of the specified + /// MachineBasicBlock. The operands to this method are the same as those + /// returned by AnalyzeBranch. This is only invoked in cases where + /// AnalyzeBranch returns success. It returns the number of instructions + /// inserted. /// /// It is also invoked by tail merging to add unconditional branches in /// cases where AnalyzeBranch doesn't apply because there was no original @@ -285,18 +287,6 @@ public: assert(0 && "Target didn't implement TargetInstrInfo::storeRegToStackSlot!"); } - /// storeRegToAddr - Store the specified register of the given register class - /// to the specified address. The store instruction is to be added to the - /// given machine basic block before the specified machine instruction. If - /// isKill is true, the register operand is the last use and must be marked - /// kill. - virtual void storeRegToAddr(MachineFunction &MF, unsigned SrcReg, bool isKill, - SmallVectorImpl<MachineOperand> &Addr, - const TargetRegisterClass *RC, - SmallVectorImpl<MachineInstr*> &NewMIs) const { - assert(0 && "Target didn't implement TargetInstrInfo::storeRegToAddr!"); - } - /// loadRegFromStackSlot - Load the specified register of the given register /// class from the specified stack frame index. The load instruction is to be /// added to the given machine basic block before the specified machine @@ -307,16 +297,6 @@ public: const TargetRegisterClass *RC) const { assert(0 && "Target didn't implement TargetInstrInfo::loadRegFromStackSlot!"); } - - /// loadRegFromAddr - Load the specified register of the given register class - /// class from the specified address. The load instruction is to be added to - /// the given machine basic block before the specified machine instruction. - virtual void loadRegFromAddr(MachineFunction &MF, unsigned DestReg, - SmallVectorImpl<MachineOperand> &Addr, - const TargetRegisterClass *RC, - SmallVectorImpl<MachineInstr*> &NewMIs) const { - assert(0 && "Target didn't implement TargetInstrInfo::loadRegFromAddr!"); - } /// spillCalleeSavedRegisters - Issues instruction(s) to spill all callee /// saved registers and returns true if it isn't possible / profitable to do @@ -429,11 +409,8 @@ public: /// insertNoop - Insert a noop into the instruction stream at the specified /// point. virtual void insertNoop(MachineBasicBlock &MBB, - MachineBasicBlock::iterator MI) const { - assert(0 && "Target didn't implement insertNoop!"); - abort(); - } - + MachineBasicBlock::iterator MI) const; + /// isPredicated - Returns true if the instruction is already predicated. /// virtual bool isPredicated(const MachineInstr *MI) const { @@ -479,9 +456,15 @@ public: return 0; } - /// GetFunctionSizeInBytes - Returns the size of the specified MachineFunction. + /// GetFunctionSizeInBytes - Returns the size of the specified + /// MachineFunction. /// virtual unsigned GetFunctionSizeInBytes(const MachineFunction &MF) const = 0; + + /// Measure the specified inline asm to determine an approximation of its + /// length. + virtual unsigned getInlineAsmLength(const char *Str, + const MCAsmInfo &MAI) const; }; /// TargetInstrInfoImpl - This is the default implementation of @@ -495,23 +478,17 @@ protected: public: virtual MachineInstr *commuteInstruction(MachineInstr *MI, bool NewMI = false) const; - virtual bool CommuteChangesDestination(MachineInstr *MI, - unsigned &OpIdx) const; + virtual bool findCommutedOpIndices(MachineInstr *MI, unsigned &SrcOpIdx1, + unsigned &SrcOpIdx2) const; virtual bool PredicateInstruction(MachineInstr *MI, const SmallVectorImpl<MachineOperand> &Pred) const; virtual void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, - unsigned DestReg, + unsigned DestReg, unsigned SubReg, const MachineInstr *Orig) const; virtual unsigned GetFunctionSizeInBytes(const MachineFunction &MF) const; }; -/// getInstrOperandRegClass - Return register class of the operand of an -/// instruction of the specified TargetInstrDesc. -const TargetRegisterClass* -getInstrOperandRegClass(const TargetRegisterInfo *TRI, - const TargetInstrDesc &II, unsigned Op); - } // End llvm namespace #endif diff --git a/include/llvm/Target/TargetInstrItineraries.h b/include/llvm/Target/TargetInstrItineraries.h index 18931ea..420fa94 100644 --- a/include/llvm/Target/TargetInstrItineraries.h +++ b/include/llvm/Target/TargetInstrItineraries.h @@ -7,90 +7,160 @@ // //===----------------------------------------------------------------------===// // -// This file describes the structures used for instruction itineraries and -// states. This is used by schedulers to determine instruction states and -// latencies. +// This file describes the structures used for instruction +// itineraries, stages, and operand reads/writes. This is used by +// schedulers to determine instruction stages and latencies. // //===----------------------------------------------------------------------===// #ifndef LLVM_TARGET_TARGETINSTRITINERARIES_H #define LLVM_TARGET_TARGETINSTRITINERARIES_H +#include <algorithm> + namespace llvm { //===----------------------------------------------------------------------===// -/// Instruction stage - These values represent a step in the execution of an -/// instruction. The latency represents the number of discrete time slots used -/// need to complete the stage. Units represent the choice of functional units -/// that can be used to complete the stage. Eg. IntUnit1, IntUnit2. +/// Instruction stage - These values represent a non-pipelined step in +/// the execution of an instruction. Cycles represents the number of +/// discrete time slots needed to complete the stage. Units represent +/// the choice of functional units that can be used to complete the +/// stage. Eg. IntUnit1, IntUnit2. NextCycles indicates how many +/// cycles should elapse from the start of this stage to the start of +/// the next stage in the itinerary. A value of -1 indicates that the +/// next stage should start immediately after the current one. +/// For example: +/// +/// { 1, x, -1 } +/// indicates that the stage occupies FU x for 1 cycle and that +/// the next stage starts immediately after this one. +/// +/// { 2, x|y, 1 } +/// indicates that the stage occupies either FU x or FU y for 2 +/// consecuative cycles and that the next stage starts one cycle +/// after this stage starts. That is, the stage requirements +/// overlap in time. +/// +/// { 1, x, 0 } +/// indicates that the stage occupies FU x for 1 cycle and that +/// the next stage starts in this same cycle. This can be used to +/// indicate that the instruction requires multiple stages at the +/// same time. /// struct InstrStage { - unsigned Cycles; ///< Length of stage in machine cycles - unsigned Units; ///< Choice of functional units + unsigned Cycles_; ///< Length of stage in machine cycles + unsigned Units_; ///< Choice of functional units + int NextCycles_; ///< Number of machine cycles to next stage + + /// getCycles - returns the number of cycles the stage is occupied + unsigned getCycles() const { + return Cycles_; + } + + /// getUnits - returns the choice of FUs + unsigned getUnits() const { + return Units_; + } + + /// getNextCycles - returns the number of cycles from the start of + /// this stage to the start of the next stage in the itinerary + unsigned getNextCycles() const { + return (NextCycles_ >= 0) ? (unsigned)NextCycles_ : Cycles_; + } }; //===----------------------------------------------------------------------===// -/// Instruction itinerary - An itinerary represents a sequential series of steps -/// required to complete an instruction. Itineraries are represented as -/// sequences of instruction stages. +/// Instruction itinerary - An itinerary represents the scheduling +/// information for an instruction. This includes a set of stages +/// occupies by the instruction, and the pipeline cycle in which +/// operands are read and written. /// struct InstrItinerary { - unsigned First; ///< Index of first stage in itinerary - unsigned Last; ///< Index of last + 1 stage in itinerary + unsigned FirstStage; ///< Index of first stage in itinerary + unsigned LastStage; ///< Index of last + 1 stage in itinerary + unsigned FirstOperandCycle; ///< Index of first operand rd/wr + unsigned LastOperandCycle; ///< Index of last + 1 operand rd/wr }; - //===----------------------------------------------------------------------===// /// Instruction itinerary Data - Itinerary data supplied by a subtarget to be /// used by a target. /// struct InstrItineraryData { const InstrStage *Stages; ///< Array of stages selected + const unsigned *OperandCycles; ///< Array of operand cycles selected const InstrItinerary *Itineratries; ///< Array of itineraries selected /// Ctors. /// - InstrItineraryData() : Stages(0), Itineratries(0) {} - InstrItineraryData(const InstrStage *S, const InstrItinerary *I) - : Stages(S), Itineratries(I) {} + InstrItineraryData() : Stages(0), OperandCycles(0), Itineratries(0) {} + InstrItineraryData(const InstrStage *S, const unsigned *OS, + const InstrItinerary *I) + : Stages(S), OperandCycles(OS), Itineratries(I) {} /// isEmpty - Returns true if there are no itineraries. /// bool isEmpty() const { return Itineratries == 0; } - - /// begin - Return the first stage of the itinerary. + + /// isEndMarker - Returns true if the index is for the end marker + /// itinerary. + /// + bool isEndMarker(unsigned ItinClassIndx) const { + return ((Itineratries[ItinClassIndx].FirstStage == ~0U) && + (Itineratries[ItinClassIndx].LastStage == ~0U)); + } + + /// beginStage - Return the first stage of the itinerary. /// - const InstrStage *begin(unsigned ItinClassIndx) const { - unsigned StageIdx = Itineratries[ItinClassIndx].First; + const InstrStage *beginStage(unsigned ItinClassIndx) const { + unsigned StageIdx = Itineratries[ItinClassIndx].FirstStage; return Stages + StageIdx; } - /// end - Return the last+1 stage of the itinerary. + /// endStage - Return the last+1 stage of the itinerary. /// - const InstrStage *end(unsigned ItinClassIndx) const { - unsigned StageIdx = Itineratries[ItinClassIndx].Last; + const InstrStage *endStage(unsigned ItinClassIndx) const { + unsigned StageIdx = Itineratries[ItinClassIndx].LastStage; return Stages + StageIdx; } - /// getLatency - Return the scheduling latency of the given class. A - /// simple latency value for an instruction is an over-simplification - /// for some architectures, but it's a reasonable first approximation. + /// getStageLatency - Return the total stage latency of the given + /// class. The latency is the maximum completion time for any stage + /// in the itinerary. /// - unsigned getLatency(unsigned ItinClassIndx) const { - // If the target doesn't provide latency information, use a simple - // non-zero default value for all instructions. + unsigned getStageLatency(unsigned ItinClassIndx) const { + // If the target doesn't provide itinerary information, use a + // simple non-zero default value for all instructions. if (isEmpty()) return 1; - // Just sum the cycle count for each stage. - unsigned Latency = 0; - for (const InstrStage *IS = begin(ItinClassIndx), *E = end(ItinClassIndx); - IS != E; ++IS) - Latency += IS->Cycles; + // Calculate the maximum completion time for any stage. + unsigned Latency = 0, StartCycle = 0; + for (const InstrStage *IS = beginStage(ItinClassIndx), + *E = endStage(ItinClassIndx); IS != E; ++IS) { + Latency = std::max(Latency, StartCycle + IS->getCycles()); + StartCycle += IS->getNextCycles(); + } + return Latency; } + + /// getOperandCycle - Return the cycle for the given class and + /// operand. Return -1 if no cycle is specified for the operand. + /// + int getOperandCycle(unsigned ItinClassIndx, unsigned OperandIdx) const { + if (isEmpty()) + return -1; + + unsigned FirstIdx = Itineratries[ItinClassIndx].FirstOperandCycle; + unsigned LastIdx = Itineratries[ItinClassIndx].LastOperandCycle; + if ((FirstIdx + OperandIdx) >= LastIdx) + return -1; + + return (int)OperandCycles[FirstIdx + OperandIdx]; + } }; diff --git a/include/llvm/Target/TargetLowering.h b/include/llvm/Target/TargetLowering.h index d24ca67..4f567b0 100644 --- a/include/llvm/Target/TargetLowering.h +++ b/include/llvm/Target/TargetLowering.h @@ -22,6 +22,7 @@ #ifndef LLVM_TARGET_TARGETLOWERING_H #define LLVM_TARGET_TARGETLOWERING_H +#include "llvm/CallingConv.h" #include "llvm/InlineAsm.h" #include "llvm/CodeGen/SelectionDAGNodes.h" #include "llvm/CodeGen/RuntimeLibcalls.h" @@ -54,6 +55,7 @@ namespace llvm { class TargetMachine; class TargetRegisterClass; class TargetSubtarget; + class TargetLoweringObjectFile; class Value; // FIXME: should this be here? @@ -77,6 +79,8 @@ namespace llvm { /// target-specific constructs to SelectionDAG operators. /// class TargetLowering { + TargetLowering(const TargetLowering&); // DO NOT IMPLEMENT + void operator=(const TargetLowering&); // DO NOT IMPLEMENT public: /// LegalizeAction - This enum indicates whether operations are valid for a /// target, and if not, what action should be used to make them valid. @@ -87,12 +91,6 @@ public: Custom // Use the LowerOperation hook to implement custom lowering. }; - enum OutOfRangeShiftAmount { - Undefined, // Oversized shift amounts are undefined (default). - Mask, // Shift amounts are auto masked (anded) to value size. - Extend // Oversized shift pulls in zeros or sign bits. - }; - enum BooleanContent { // How the target represents true/false values. UndefinedBooleanContent, // Only bit 0 counts, the rest can hold garbage. ZeroOrOneBooleanContent, // All bits zero except for bit 0. @@ -104,17 +102,18 @@ public: SchedulingForRegPressure // Scheduling for lowest register pressure. }; - explicit TargetLowering(TargetMachine &TM); + /// NOTE: The constructor takes ownership of TLOF. + explicit TargetLowering(TargetMachine &TM, TargetLoweringObjectFile *TLOF); virtual ~TargetLowering(); TargetMachine &getTargetMachine() const { return TM; } const TargetData *getTargetData() const { return TD; } + TargetLoweringObjectFile &getObjFileLowering() const { return TLOF; } bool isBigEndian() const { return !IsLittleEndian; } bool isLittleEndian() const { return IsLittleEndian; } MVT getPointerTy() const { return PointerTy; } MVT getShiftAmountTy() const { return ShiftAmountTy; } - OutOfRangeShiftAmount getShiftAmountFlavor() const {return ShiftAmtHandling; } /// usesGlobalOffsetTable - Return true if this target uses a GOT for PIC /// codegen. @@ -137,7 +136,8 @@ public: /// the condition operand of SELECT and BRCOND nodes. In the case of /// BRCOND the argument passed is MVT::Other since there are no other /// operands to get a type hint from. - virtual MVT getSetCCResultType(MVT VT) const; + virtual + MVT::SimpleValueType getSetCCResultType(EVT VT) const; /// getBooleanContents - For targets without i1 registers, this gives the /// nature of the high-bits of boolean values held in types wider than i1. @@ -153,9 +153,9 @@ public: /// getRegClassFor - Return the register class that should be used for the /// specified value type. This may only be called on legal types. - TargetRegisterClass *getRegClassFor(MVT VT) const { - assert((unsigned)VT.getSimpleVT() < array_lengthof(RegClassForVT)); - TargetRegisterClass *RC = RegClassForVT[VT.getSimpleVT()]; + TargetRegisterClass *getRegClassFor(EVT VT) const { + assert(VT.isSimple() && "getRegClassFor called on illegal type!"); + TargetRegisterClass *RC = RegClassForVT[VT.getSimpleVT().SimpleTy]; assert(RC && "This value type is not natively supported!"); return RC; } @@ -163,10 +163,10 @@ public: /// isTypeLegal - Return true if the target has native support for the /// specified value type. This means that it has a register that directly /// holds it without promotions or expansions. - bool isTypeLegal(MVT VT) const { + bool isTypeLegal(EVT VT) const { assert(!VT.isSimple() || - (unsigned)VT.getSimpleVT() < array_lengthof(RegClassForVT)); - return VT.isSimple() && RegClassForVT[VT.getSimpleVT()] != 0; + (unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegClassForVT)); + return VT.isSimple() && RegClassForVT[VT.getSimpleVT().SimpleTy] != 0; } class ValueTypeActionImpl { @@ -187,23 +187,23 @@ public: ValueTypeActions[3] = RHS.ValueTypeActions[3]; } - LegalizeAction getTypeAction(MVT VT) const { + LegalizeAction getTypeAction(LLVMContext &Context, EVT VT) const { if (VT.isExtended()) { if (VT.isVector()) { return VT.isPow2VectorType() ? Expand : Promote; } if (VT.isInteger()) // First promote to a power-of-two size, then expand if necessary. - return VT == VT.getRoundIntegerType() ? Expand : Promote; + return VT == VT.getRoundIntegerType(Context) ? Expand : Promote; assert(0 && "Unsupported extended type!"); return Legal; } - unsigned I = VT.getSimpleVT(); + unsigned I = VT.getSimpleVT().SimpleTy; assert(I<4*array_lengthof(ValueTypeActions)*sizeof(ValueTypeActions[0])); return (LegalizeAction)((ValueTypeActions[I>>4] >> ((2*I) & 31)) & 3); } - void setTypeAction(MVT VT, LegalizeAction Action) { - unsigned I = VT.getSimpleVT(); + void setTypeAction(EVT VT, LegalizeAction Action) { + unsigned I = VT.getSimpleVT().SimpleTy; assert(I<4*array_lengthof(ValueTypeActions)*sizeof(ValueTypeActions[0])); ValueTypeActions[I>>4] |= Action << ((I*2) & 31); } @@ -217,8 +217,8 @@ public: /// it is already legal (return 'Legal') or we need to promote it to a larger /// type (return 'Promote'), or we need to expand it into multiple registers /// of smaller integer type (return 'Expand'). 'Custom' is not an option. - LegalizeAction getTypeAction(MVT VT) const { - return ValueTypeActions.getTypeAction(VT); + LegalizeAction getTypeAction(LLVMContext &Context, EVT VT) const { + return ValueTypeActions.getTypeAction(Context, VT); } /// getTypeToTransformTo - For types supported by the target, this is an @@ -227,33 +227,37 @@ public: /// than the largest integer register, this contains one step in the expansion /// to get to the smaller register. For illegal floating point types, this /// returns the integer type to transform to. - MVT getTypeToTransformTo(MVT VT) const { + EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const { if (VT.isSimple()) { - assert((unsigned)VT.getSimpleVT() < array_lengthof(TransformToType)); - MVT NVT = TransformToType[VT.getSimpleVT()]; - assert(getTypeAction(NVT) != Promote && + assert((unsigned)VT.getSimpleVT().SimpleTy < + array_lengthof(TransformToType)); + EVT NVT = TransformToType[VT.getSimpleVT().SimpleTy]; + assert(getTypeAction(Context, NVT) != Promote && "Promote may not follow Expand or Promote"); return NVT; } if (VT.isVector()) { - MVT NVT = VT.getPow2VectorType(); + EVT NVT = VT.getPow2VectorType(Context); if (NVT == VT) { // Vector length is a power of 2 - split to half the size. unsigned NumElts = VT.getVectorNumElements(); - MVT EltVT = VT.getVectorElementType(); - return (NumElts == 1) ? EltVT : MVT::getVectorVT(EltVT, NumElts / 2); + EVT EltVT = VT.getVectorElementType(); + return (NumElts == 1) ? + EltVT : EVT::getVectorVT(Context, EltVT, NumElts / 2); } // Promote to a power of two size, avoiding multi-step promotion. - return getTypeAction(NVT) == Promote ? getTypeToTransformTo(NVT) : NVT; + return getTypeAction(Context, NVT) == Promote ? + getTypeToTransformTo(Context, NVT) : NVT; } else if (VT.isInteger()) { - MVT NVT = VT.getRoundIntegerType(); + EVT NVT = VT.getRoundIntegerType(Context); if (NVT == VT) // Size is a power of two - expand to half the size. - return MVT::getIntegerVT(VT.getSizeInBits() / 2); + return EVT::getIntegerVT(Context, VT.getSizeInBits() / 2); else // Promote to a power of two size, avoiding multi-step promotion. - return getTypeAction(NVT) == Promote ? getTypeToTransformTo(NVT) : NVT; + return getTypeAction(Context, NVT) == Promote ? + getTypeToTransformTo(Context, NVT) : NVT; } assert(0 && "Unsupported extended type!"); return MVT(MVT::Other); // Not reached @@ -263,14 +267,14 @@ public: /// identity function. For types that must be expanded (i.e. integer types /// that are larger than the largest integer register or illegal floating /// point types), this returns the largest legal type it will be expanded to. - MVT getTypeToExpandTo(MVT VT) const { + EVT getTypeToExpandTo(LLVMContext &Context, EVT VT) const { assert(!VT.isVector()); while (true) { - switch (getTypeAction(VT)) { + switch (getTypeAction(Context, VT)) { case Legal: return VT; case Expand: - VT = getTypeToTransformTo(VT); + VT = getTypeToTransformTo(Context, VT); break; default: assert(false && "Type is not legal nor is it to be expanded!"); @@ -281,18 +285,18 @@ public: } /// getVectorTypeBreakdown - Vector types are broken down into some number of - /// legal first class types. For example, MVT::v8f32 maps to 2 MVT::v4f32 - /// with Altivec or SSE1, or 8 promoted MVT::f64 values with the X86 FP stack. - /// Similarly, MVT::v2i64 turns into 4 MVT::i32 values with both PPC and X86. + /// legal first class types. For example, EVT::v8f32 maps to 2 EVT::v4f32 + /// with Altivec or SSE1, or 8 promoted EVT::f64 values with the X86 FP stack. + /// Similarly, EVT::v2i64 turns into 4 EVT::i32 values with both PPC and X86. /// /// This method returns the number of registers needed, and the VT for each /// register. It also returns the VT and quantity of the intermediate values /// before they are promoted/expanded. /// - unsigned getVectorTypeBreakdown(MVT VT, - MVT &IntermediateVT, + unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT, + EVT &IntermediateVT, unsigned &NumIntermediates, - MVT &RegisterVT) const; + EVT &RegisterVT) const; /// getTgtMemIntrinsic: Given an intrinsic, checks if on the target the /// intrinsic will need to map to a MemIntrinsicNode (touches memory). If @@ -300,7 +304,7 @@ public: /// information into the IntrinsicInfo that was passed to the function. typedef struct IntrinsicInfo { unsigned opc; // target opcode - MVT memVT; // memory VT + EVT memVT; // memory VT const Value* ptrVal; // value representing memory location int offset; // offset off of ptrVal unsigned align; // alignment @@ -319,7 +323,7 @@ public: /// If there is no vector type that we want to widen to, returns MVT::Other /// When and were to widen is target dependent based on the cost of /// scalarizing vs using the wider vector type. - virtual MVT getWidenVectorType(MVT VT) const; + virtual EVT getWidenVectorType(EVT VT) const; typedef std::vector<APFloat>::const_iterator legal_fpimm_iterator; legal_fpimm_iterator legal_fpimm_begin() const { @@ -334,7 +338,7 @@ public: /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values /// are assumed to be legal. virtual bool isShuffleMaskLegal(const SmallVectorImpl<int> &Mask, - MVT VT) const { + EVT VT) const { return true; } @@ -343,7 +347,7 @@ public: /// VECTOR_SHUFFLE that can be used to replace a VAND with a constant /// pool entry. virtual bool isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask, - MVT VT) const { + EVT VT) const { return false; } @@ -351,12 +355,12 @@ public: /// it is legal, needs to be promoted to a larger size, needs to be /// expanded to some other code sequence, or the target has a custom expander /// for it. - LegalizeAction getOperationAction(unsigned Op, MVT VT) const { + LegalizeAction getOperationAction(unsigned Op, EVT VT) const { if (VT.isExtended()) return Expand; assert(Op < array_lengthof(OpActions[0]) && - (unsigned)VT.getSimpleVT() < sizeof(OpActions[0][0])*8 && + (unsigned)VT.getSimpleVT().SimpleTy < sizeof(OpActions[0][0])*8 && "Table isn't big enough!"); - unsigned I = (unsigned) VT.getSimpleVT(); + unsigned I = (unsigned) VT.getSimpleVT().SimpleTy; unsigned J = I & 31; I = I >> 5; return (LegalizeAction)((OpActions[I][Op] >> (J*2) ) & 3); @@ -365,7 +369,7 @@ public: /// isOperationLegalOrCustom - Return true if the specified operation is /// legal on this target or can be made legal with custom lowering. This /// is used to help guide high-level lowering decisions. - bool isOperationLegalOrCustom(unsigned Op, MVT VT) const { + bool isOperationLegalOrCustom(unsigned Op, EVT VT) const { return (VT == MVT::Other || isTypeLegal(VT)) && (getOperationAction(Op, VT) == Legal || getOperationAction(Op, VT) == Custom); @@ -373,7 +377,7 @@ public: /// isOperationLegal - Return true if the specified operation is legal on this /// target. - bool isOperationLegal(unsigned Op, MVT VT) const { + bool isOperationLegal(unsigned Op, EVT VT) const { return (VT == MVT::Other || isTypeLegal(VT)) && getOperationAction(Op, VT) == Legal; } @@ -382,16 +386,17 @@ public: /// either it is legal, needs to be promoted to a larger size, needs to be /// expanded to some other code sequence, or the target has a custom expander /// for it. - LegalizeAction getLoadExtAction(unsigned LType, MVT VT) const { + LegalizeAction getLoadExtAction(unsigned LType, EVT VT) const { assert(LType < array_lengthof(LoadExtActions) && - (unsigned)VT.getSimpleVT() < sizeof(LoadExtActions[0])*4 && + (unsigned)VT.getSimpleVT().SimpleTy < sizeof(LoadExtActions[0])*4 && "Table isn't big enough!"); - return (LegalizeAction)((LoadExtActions[LType] >> (2*VT.getSimpleVT())) & 3); + return (LegalizeAction)((LoadExtActions[LType] >> + (2*VT.getSimpleVT().SimpleTy)) & 3); } /// isLoadExtLegal - Return true if the specified load with extension is legal /// on this target. - bool isLoadExtLegal(unsigned LType, MVT VT) const { + bool isLoadExtLegal(unsigned LType, EVT VT) const { return VT.isSimple() && (getLoadExtAction(LType, VT) == Legal || getLoadExtAction(LType, VT) == Custom); @@ -401,18 +406,20 @@ public: /// treated: either it is legal, needs to be promoted to a larger size, needs /// to be expanded to some other code sequence, or the target has a custom /// expander for it. - LegalizeAction getTruncStoreAction(MVT ValVT, - MVT MemVT) const { - assert((unsigned)ValVT.getSimpleVT() < array_lengthof(TruncStoreActions) && - (unsigned)MemVT.getSimpleVT() < sizeof(TruncStoreActions[0])*4 && + LegalizeAction getTruncStoreAction(EVT ValVT, + EVT MemVT) const { + assert((unsigned)ValVT.getSimpleVT().SimpleTy < + array_lengthof(TruncStoreActions) && + (unsigned)MemVT.getSimpleVT().SimpleTy < + sizeof(TruncStoreActions[0])*4 && "Table isn't big enough!"); - return (LegalizeAction)((TruncStoreActions[ValVT.getSimpleVT()] >> - (2*MemVT.getSimpleVT())) & 3); + return (LegalizeAction)((TruncStoreActions[ValVT.getSimpleVT().SimpleTy] >> + (2*MemVT.getSimpleVT().SimpleTy)) & 3); } /// isTruncStoreLegal - Return true if the specified store with truncation is /// legal on this target. - bool isTruncStoreLegal(MVT ValVT, MVT MemVT) const { + bool isTruncStoreLegal(EVT ValVT, EVT MemVT) const { return isTypeLegal(ValVT) && MemVT.isSimple() && (getTruncStoreAction(ValVT, MemVT) == Legal || getTruncStoreAction(ValVT, MemVT) == Custom); @@ -423,16 +430,17 @@ public: /// expanded to some other code sequence, or the target has a custom expander /// for it. LegalizeAction - getIndexedLoadAction(unsigned IdxMode, MVT VT) const { + getIndexedLoadAction(unsigned IdxMode, EVT VT) const { assert( IdxMode < array_lengthof(IndexedModeActions[0][0]) && - ((unsigned)VT.getSimpleVT()) < MVT::LAST_VALUETYPE && + ((unsigned)VT.getSimpleVT().SimpleTy) < MVT::LAST_VALUETYPE && "Table isn't big enough!"); - return (LegalizeAction)((IndexedModeActions[(unsigned)VT.getSimpleVT()][0][IdxMode])); + return (LegalizeAction)((IndexedModeActions[ + (unsigned)VT.getSimpleVT().SimpleTy][0][IdxMode])); } /// isIndexedLoadLegal - Return true if the specified indexed load is legal /// on this target. - bool isIndexedLoadLegal(unsigned IdxMode, MVT VT) const { + bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const { return VT.isSimple() && (getIndexedLoadAction(IdxMode, VT) == Legal || getIndexedLoadAction(IdxMode, VT) == Custom); @@ -443,16 +451,17 @@ public: /// expanded to some other code sequence, or the target has a custom expander /// for it. LegalizeAction - getIndexedStoreAction(unsigned IdxMode, MVT VT) const { + getIndexedStoreAction(unsigned IdxMode, EVT VT) const { assert(IdxMode < array_lengthof(IndexedModeActions[0][1]) && - (unsigned)VT.getSimpleVT() < MVT::LAST_VALUETYPE && + (unsigned)VT.getSimpleVT().SimpleTy < MVT::LAST_VALUETYPE && "Table isn't big enough!"); - return (LegalizeAction)((IndexedModeActions[(unsigned)VT.getSimpleVT()][1][IdxMode])); + return (LegalizeAction)((IndexedModeActions[ + (unsigned)VT.getSimpleVT().SimpleTy][1][IdxMode])); } /// isIndexedStoreLegal - Return true if the specified indexed load is legal /// on this target. - bool isIndexedStoreLegal(unsigned IdxMode, MVT VT) const { + bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const { return VT.isSimple() && (getIndexedStoreAction(IdxMode, VT) == Legal || getIndexedStoreAction(IdxMode, VT) == Custom); @@ -463,17 +472,19 @@ public: /// expanded to some other code sequence, or the target has a custom expander /// for it. LegalizeAction - getConvertAction(MVT FromVT, MVT ToVT) const { - assert((unsigned)FromVT.getSimpleVT() < array_lengthof(ConvertActions) && - (unsigned)ToVT.getSimpleVT() < sizeof(ConvertActions[0])*4 && + getConvertAction(EVT FromVT, EVT ToVT) const { + assert((unsigned)FromVT.getSimpleVT().SimpleTy < + array_lengthof(ConvertActions) && + (unsigned)ToVT.getSimpleVT().SimpleTy < + sizeof(ConvertActions[0])*4 && "Table isn't big enough!"); - return (LegalizeAction)((ConvertActions[FromVT.getSimpleVT()] >> - (2*ToVT.getSimpleVT())) & 3); + return (LegalizeAction)((ConvertActions[FromVT.getSimpleVT().SimpleTy] >> + (2*ToVT.getSimpleVT().SimpleTy)) & 3); } /// isConvertLegal - Return true if the specified conversion is legal /// on this target. - bool isConvertLegal(MVT FromVT, MVT ToVT) const { + bool isConvertLegal(EVT FromVT, EVT ToVT) const { return isTypeLegal(FromVT) && isTypeLegal(ToVT) && (getConvertAction(FromVT, ToVT) == Legal || getConvertAction(FromVT, ToVT) == Custom); @@ -483,19 +494,19 @@ public: /// either it is legal, needs to be expanded to some other code sequence, /// or the target has a custom expander for it. LegalizeAction - getCondCodeAction(ISD::CondCode CC, MVT VT) const { + getCondCodeAction(ISD::CondCode CC, EVT VT) const { assert((unsigned)CC < array_lengthof(CondCodeActions) && - (unsigned)VT.getSimpleVT() < sizeof(CondCodeActions[0])*4 && + (unsigned)VT.getSimpleVT().SimpleTy < sizeof(CondCodeActions[0])*4 && "Table isn't big enough!"); LegalizeAction Action = (LegalizeAction) - ((CondCodeActions[CC] >> (2*VT.getSimpleVT())) & 3); + ((CondCodeActions[CC] >> (2*VT.getSimpleVT().SimpleTy)) & 3); assert(Action != Promote && "Can't promote condition code!"); return Action; } /// isCondCodeLegal - Return true if the specified condition code is legal /// on this target. - bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const { + bool isCondCodeLegal(ISD::CondCode CC, EVT VT) const { return getCondCodeAction(CC, VT) == Legal || getCondCodeAction(CC, VT) == Custom; } @@ -503,22 +514,22 @@ public: /// getTypeToPromoteTo - If the action for this operation is to promote, this /// method returns the ValueType to promote to. - MVT getTypeToPromoteTo(unsigned Op, MVT VT) const { + EVT getTypeToPromoteTo(unsigned Op, EVT VT) const { assert(getOperationAction(Op, VT) == Promote && "This operation isn't promoted!"); // See if this has an explicit type specified. std::map<std::pair<unsigned, MVT::SimpleValueType>, MVT::SimpleValueType>::const_iterator PTTI = - PromoteToType.find(std::make_pair(Op, VT.getSimpleVT())); + PromoteToType.find(std::make_pair(Op, VT.getSimpleVT().SimpleTy)); if (PTTI != PromoteToType.end()) return PTTI->second; assert((VT.isInteger() || VT.isFloatingPoint()) && "Cannot autopromote this type, add it with AddPromotedToType."); - MVT NVT = VT; + EVT NVT = VT; do { - NVT = (MVT::SimpleValueType)(NVT.getSimpleVT()+1); + NVT = (MVT::SimpleValueType)(NVT.getSimpleVT().SimpleTy+1); assert(NVT.isInteger() == VT.isInteger() && NVT != MVT::isVoid && "Didn't find type to promote to!"); } while (!isTypeLegal(NVT) || @@ -526,13 +537,13 @@ public: return NVT; } - /// getValueType - Return the MVT corresponding to this LLVM type. + /// getValueType - Return the EVT corresponding to this LLVM type. /// This is fixed by the LLVM operations except for the pointer size. If - /// AllowUnknown is true, this will return MVT::Other for types with no MVT + /// AllowUnknown is true, this will return MVT::Other for types with no EVT /// counterpart (e.g. structs), otherwise it will assert. - MVT getValueType(const Type *Ty, bool AllowUnknown = false) const { - MVT VT = MVT::getMVT(Ty, AllowUnknown); - return VT == MVT::iPTR ? PointerTy : VT; + EVT getValueType(const Type *Ty, bool AllowUnknown = false) const { + EVT VT = EVT::getEVT(Ty, AllowUnknown); + return VT == MVT:: iPTR ? PointerTy : VT; } /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate @@ -542,22 +553,31 @@ public: /// getRegisterType - Return the type of registers that this ValueType will /// eventually require. - MVT getRegisterType(MVT VT) const { + EVT getRegisterType(MVT VT) const { + assert((unsigned)VT.SimpleTy < array_lengthof(RegisterTypeForVT)); + return RegisterTypeForVT[VT.SimpleTy]; + } + + /// getRegisterType - Return the type of registers that this ValueType will + /// eventually require. + EVT getRegisterType(LLVMContext &Context, EVT VT) const { if (VT.isSimple()) { - assert((unsigned)VT.getSimpleVT() < array_lengthof(RegisterTypeForVT)); - return RegisterTypeForVT[VT.getSimpleVT()]; + assert((unsigned)VT.getSimpleVT().SimpleTy < + array_lengthof(RegisterTypeForVT)); + return RegisterTypeForVT[VT.getSimpleVT().SimpleTy]; } if (VT.isVector()) { - MVT VT1, RegisterVT; + EVT VT1, RegisterVT; unsigned NumIntermediates; - (void)getVectorTypeBreakdown(VT, VT1, NumIntermediates, RegisterVT); + (void)getVectorTypeBreakdown(Context, VT, VT1, + NumIntermediates, RegisterVT); return RegisterVT; } if (VT.isInteger()) { - return getRegisterType(getTypeToTransformTo(VT)); + return getRegisterType(Context, getTypeToTransformTo(Context, VT)); } assert(0 && "Unsupported extended type!"); - return MVT(MVT::Other); // Not reached + return EVT(MVT::Other); // Not reached } /// getNumRegisters - Return the number of registers that this ValueType will @@ -566,19 +586,20 @@ public: /// into pieces. For types like i140, which are first promoted then expanded, /// it is the number of registers needed to hold all the bits of the original /// type. For an i140 on a 32 bit machine this means 5 registers. - unsigned getNumRegisters(MVT VT) const { + unsigned getNumRegisters(LLVMContext &Context, EVT VT) const { if (VT.isSimple()) { - assert((unsigned)VT.getSimpleVT() < array_lengthof(NumRegistersForVT)); - return NumRegistersForVT[VT.getSimpleVT()]; + assert((unsigned)VT.getSimpleVT().SimpleTy < + array_lengthof(NumRegistersForVT)); + return NumRegistersForVT[VT.getSimpleVT().SimpleTy]; } if (VT.isVector()) { - MVT VT1, VT2; + EVT VT1, VT2; unsigned NumIntermediates; - return getVectorTypeBreakdown(VT, VT1, NumIntermediates, VT2); + return getVectorTypeBreakdown(Context, VT, VT1, NumIntermediates, VT2); } if (VT.isInteger()) { unsigned BitWidth = VT.getSizeInBits(); - unsigned RegWidth = getRegisterType(VT).getSizeInBits(); + unsigned RegWidth = getRegisterType(Context, VT).getSizeInBits(); return (BitWidth + RegWidth - 1) / RegWidth; } assert(0 && "Unsupported extended type!"); @@ -588,7 +609,7 @@ public: /// ShouldShrinkFPConstant - If true, then instruction selection should /// seek to shrink the FP constant of the specified type to a smaller type /// in order to save space and / or reduce runtime. - virtual bool ShouldShrinkFPConstant(MVT VT) const { return true; } + virtual bool ShouldShrinkFPConstant(EVT VT) const { return true; } /// hasTargetDAGCombine - If true, the target has custom DAG combine /// transformations that it can perform for the specified node. @@ -616,13 +637,13 @@ public: unsigned getMaxStoresPerMemmove() const { return maxStoresPerMemmove; } /// This function returns true if the target allows unaligned memory accesses. - /// This is used, for example, in situations where an array copy/move/set is - /// converted to a sequence of store operations. It's use helps to ensure that - /// such replacements don't generate code that causes an alignment error - /// (trap) on the target machine. + /// of the specified type. This is used, for example, in situations where an + /// array copy/move/set is converted to a sequence of store operations. It's + /// use helps to ensure that such replacements don't generate code that causes + /// an alignment error (trap) on the target machine. /// @brief Determine if the target supports unaligned memory accesses. - bool allowsUnalignedMemoryAccesses() const { - return allowUnalignedMemoryAccesses; + virtual bool allowsUnalignedMemoryAccesses(EVT VT) const { + return false; } /// This function returns true if the target would benefit from code placement @@ -634,9 +655,9 @@ public: /// getOptimalMemOpType - Returns the target specific optimal type for load /// and store operations as a result of memset, memcpy, and memmove lowering. - /// It returns MVT::iAny if SelectionDAG should be responsible for + /// It returns EVT::iAny if SelectionDAG should be responsible for /// determining it. - virtual MVT getOptimalMemOpType(uint64_t Size, unsigned Align, + virtual EVT getOptimalMemOpType(uint64_t Size, unsigned Align, bool isSrcConst, bool isSrcStr, SelectionDAG &DAG) const { return MVT::iAny; @@ -804,14 +825,17 @@ public: struct DAGCombinerInfo { void *DC; // The DAG Combiner object. bool BeforeLegalize; + bool BeforeLegalizeOps; bool CalledByLegalizer; public: SelectionDAG &DAG; - DAGCombinerInfo(SelectionDAG &dag, bool bl, bool cl, void *dc) - : DC(dc), BeforeLegalize(bl), CalledByLegalizer(cl), DAG(dag) {} + DAGCombinerInfo(SelectionDAG &dag, bool bl, bool blo, bool cl, void *dc) + : DC(dc), BeforeLegalize(bl), BeforeLegalizeOps(blo), + CalledByLegalizer(cl), DAG(dag) {} bool isBeforeLegalize() const { return BeforeLegalize; } + bool isBeforeLegalizeOps() const { return BeforeLegalizeOps; } bool isCalledByLegalizer() const { return CalledByLegalizer; } void AddToWorklist(SDNode *N); @@ -825,7 +849,7 @@ public: /// SimplifySetCC - Try to simplify a setcc built with the specified operands /// and cc. If it is unable to simplify it, return a null SDValue. - SDValue SimplifySetCC(MVT VT, SDValue N0, SDValue N1, + SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond, bool foldBooleans, DAGCombinerInfo &DCI, DebugLoc dl) const; @@ -878,12 +902,6 @@ protected: SchedPreferenceInfo = Pref; } - /// setShiftAmountFlavor - Describe how the target handles out of range shift - /// amounts. - void setShiftAmountFlavor(OutOfRangeShiftAmount OORSA) { - ShiftAmtHandling = OORSA; - } - /// setUseUnderscoreSetJmp - Indicate whether this target prefers to /// use _setjmp to implement llvm.setjmp or the non _ version. /// Defaults to false. @@ -936,10 +954,10 @@ protected: /// addRegisterClass - Add the specified register class as an available /// regclass for the specified value type. This indicates the selector can /// handle values of that class natively. - void addRegisterClass(MVT VT, TargetRegisterClass *RC) { - assert((unsigned)VT.getSimpleVT() < array_lengthof(RegClassForVT)); + void addRegisterClass(EVT VT, TargetRegisterClass *RC) { + assert((unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegClassForVT)); AvailableRegClasses.push_back(std::make_pair(VT, RC)); - RegClassForVT[VT.getSimpleVT()] = RC; + RegClassForVT[VT.getSimpleVT().SimpleTy] = RC; } /// computeRegisterProperties - Once all of the register classes are added, @@ -950,9 +968,7 @@ protected: /// with the specified type and indicate what to do about it. void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action) { - assert((unsigned)VT.getSimpleVT() < sizeof(OpActions[0][0])*8 && - Op < array_lengthof(OpActions[0]) && "Table isn't big enough!"); - unsigned I = (unsigned) VT.getSimpleVT(); + unsigned I = (unsigned)VT.SimpleTy; unsigned J = I & 31; I = I >> 5; OpActions[I][Op] &= ~(uint64_t(3UL) << (J*2)); @@ -963,24 +979,22 @@ protected: /// not work with the with specified type and indicate what to do about it. void setLoadExtAction(unsigned ExtType, MVT VT, LegalizeAction Action) { - assert((unsigned)VT.getSimpleVT() < sizeof(LoadExtActions[0])*4 && + assert((unsigned)VT.SimpleTy < sizeof(LoadExtActions[0])*4 && ExtType < array_lengthof(LoadExtActions) && "Table isn't big enough!"); - LoadExtActions[ExtType] &= ~(uint64_t(3UL) << VT.getSimpleVT()*2); - LoadExtActions[ExtType] |= (uint64_t)Action << VT.getSimpleVT()*2; + LoadExtActions[ExtType] &= ~(uint64_t(3UL) << VT.SimpleTy*2); + LoadExtActions[ExtType] |= (uint64_t)Action << VT.SimpleTy*2; } /// setTruncStoreAction - Indicate that the specified truncating store does /// not work with the with specified type and indicate what to do about it. void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action) { - assert((unsigned)ValVT.getSimpleVT() < array_lengthof(TruncStoreActions) && - (unsigned)MemVT.getSimpleVT() < sizeof(TruncStoreActions[0])*4 && + assert((unsigned)ValVT.SimpleTy < array_lengthof(TruncStoreActions) && + (unsigned)MemVT.SimpleTy < sizeof(TruncStoreActions[0])*4 && "Table isn't big enough!"); - TruncStoreActions[ValVT.getSimpleVT()] &= ~(uint64_t(3UL) << - MemVT.getSimpleVT()*2); - TruncStoreActions[ValVT.getSimpleVT()] |= (uint64_t)Action << - MemVT.getSimpleVT()*2; + TruncStoreActions[ValVT.SimpleTy] &= ~(uint64_t(3UL) << MemVT.SimpleTy*2); + TruncStoreActions[ValVT.SimpleTy] |= (uint64_t)Action << MemVT.SimpleTy*2; } /// setIndexedLoadAction - Indicate that the specified indexed load does or @@ -989,10 +1003,10 @@ protected: /// TargetLowering.cpp void setIndexedLoadAction(unsigned IdxMode, MVT VT, LegalizeAction Action) { - assert((unsigned)VT.getSimpleVT() < MVT::LAST_VALUETYPE && + assert((unsigned)VT.SimpleTy < MVT::LAST_VALUETYPE && IdxMode < array_lengthof(IndexedModeActions[0][0]) && "Table isn't big enough!"); - IndexedModeActions[(unsigned)VT.getSimpleVT()][0][IdxMode] = (uint8_t)Action; + IndexedModeActions[(unsigned)VT.SimpleTy][0][IdxMode] = (uint8_t)Action; } /// setIndexedStoreAction - Indicate that the specified indexed store does or @@ -1001,33 +1015,32 @@ protected: /// TargetLowering.cpp void setIndexedStoreAction(unsigned IdxMode, MVT VT, LegalizeAction Action) { - assert((unsigned)VT.getSimpleVT() < MVT::LAST_VALUETYPE && + assert((unsigned)VT.SimpleTy < MVT::LAST_VALUETYPE && IdxMode < array_lengthof(IndexedModeActions[0][1] ) && "Table isn't big enough!"); - IndexedModeActions[(unsigned)VT.getSimpleVT()][1][IdxMode] = (uint8_t)Action; + IndexedModeActions[(unsigned)VT.SimpleTy][1][IdxMode] = (uint8_t)Action; } /// setConvertAction - Indicate that the specified conversion does or does /// not work with the with specified type and indicate what to do about it. void setConvertAction(MVT FromVT, MVT ToVT, LegalizeAction Action) { - assert((unsigned)FromVT.getSimpleVT() < array_lengthof(ConvertActions) && - (unsigned)ToVT.getSimpleVT() < sizeof(ConvertActions[0])*4 && + assert((unsigned)FromVT.SimpleTy < array_lengthof(ConvertActions) && + (unsigned)ToVT.SimpleTy < sizeof(ConvertActions[0])*4 && "Table isn't big enough!"); - ConvertActions[FromVT.getSimpleVT()] &= ~(uint64_t(3UL) << - ToVT.getSimpleVT()*2); - ConvertActions[FromVT.getSimpleVT()] |= (uint64_t)Action << - ToVT.getSimpleVT()*2; + ConvertActions[FromVT.SimpleTy] &= ~(uint64_t(3UL) << ToVT.SimpleTy*2); + ConvertActions[FromVT.SimpleTy] |= (uint64_t)Action << ToVT.SimpleTy*2; } /// setCondCodeAction - Indicate that the specified condition code is or isn't /// supported on the target and indicate what to do about it. - void setCondCodeAction(ISD::CondCode CC, MVT VT, LegalizeAction Action) { - assert((unsigned)VT.getSimpleVT() < sizeof(CondCodeActions[0])*4 && + void setCondCodeAction(ISD::CondCode CC, MVT VT, + LegalizeAction Action) { + assert((unsigned)VT.SimpleTy < sizeof(CondCodeActions[0])*4 && (unsigned)CC < array_lengthof(CondCodeActions) && "Table isn't big enough!"); - CondCodeActions[(unsigned)CC] &= ~(uint64_t(3UL) << VT.getSimpleVT()*2); - CondCodeActions[(unsigned)CC] |= (uint64_t)Action << VT.getSimpleVT()*2; + CondCodeActions[(unsigned)CC] &= ~(uint64_t(3UL) << VT.SimpleTy*2); + CondCodeActions[(unsigned)CC] |= (uint64_t)Action << VT.SimpleTy*2; } /// AddPromotedToType - If Opc/OrigVT is specified as being promoted, the @@ -1035,8 +1048,7 @@ protected: /// one that works. If that default is insufficient, this method can be used /// by the target to override the default. void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) { - PromoteToType[std::make_pair(Opc, OrigVT.getSimpleVT())] = - DestVT.getSimpleVT(); + PromoteToType[std::make_pair(Opc, OrigVT.SimpleTy)] = DestVT.SimpleTy; } /// addLegalFPImmediate - Indicate that this target can instruction select @@ -1090,21 +1102,33 @@ public: assert(0 && "Not Implemented"); return NULL; // this is here to silence compiler errors } + //===--------------------------------------------------------------------===// // Lowering methods - These methods must be implemented by targets so that // the SelectionDAGLowering code knows how to lower these. // - /// LowerArguments - This hook must be implemented to indicate how we should - /// lower the arguments for the specified function, into the specified DAG. - virtual void - LowerArguments(Function &F, SelectionDAG &DAG, - SmallVectorImpl<SDValue>& ArgValues, DebugLoc dl); + /// LowerFormalArguments - This hook must be implemented to lower the + /// incoming (formal) arguments, described by the Ins array, into the + /// specified DAG. The implementation should fill in the InVals array + /// with legal-type argument values, and return the resulting token + /// chain value. + /// + virtual SDValue + LowerFormalArguments(SDValue Chain, + CallingConv::ID CallConv, bool isVarArg, + const SmallVectorImpl<ISD::InputArg> &Ins, + DebugLoc dl, SelectionDAG &DAG, + SmallVectorImpl<SDValue> &InVals) { + assert(0 && "Not Implemented"); + return SDValue(); // this is here to silence compiler errors + } - /// LowerCallTo - This hook lowers an abstract call to a function into an + /// LowerCallTo - This function lowers an abstract call to a function into an /// actual call. This returns a pair of operands. The first element is the /// return value for the function (if RetTy is not VoidTy). The second - /// element is the outgoing token chain. + /// element is the outgoing token chain. It calls LowerCall to do the actual + /// lowering. struct ArgListEntry { SDValue Node; const Type* Ty; @@ -1120,11 +1144,48 @@ public: isSRet(false), isNest(false), isByVal(false), Alignment(0) { } }; typedef std::vector<ArgListEntry> ArgListTy; - virtual std::pair<SDValue, SDValue> + std::pair<SDValue, SDValue> LowerCallTo(SDValue Chain, const Type *RetTy, bool RetSExt, bool RetZExt, bool isVarArg, bool isInreg, unsigned NumFixedArgs, - unsigned CallingConv, bool isTailCall, SDValue Callee, - ArgListTy &Args, SelectionDAG &DAG, DebugLoc dl); + CallingConv::ID CallConv, bool isTailCall, + bool isReturnValueUsed, SDValue Callee, ArgListTy &Args, + SelectionDAG &DAG, DebugLoc dl); + + /// LowerCall - This hook must be implemented to lower calls into the + /// the specified DAG. The outgoing arguments to the call are described + /// by the Outs array, and the values to be returned by the call are + /// described by the Ins array. The implementation should fill in the + /// InVals array with legal-type return values from the call, and return + /// the resulting token chain value. + /// + /// The isTailCall flag here is normative. If it is true, the + /// implementation must emit a tail call. The + /// IsEligibleForTailCallOptimization hook should be used to catch + /// cases that cannot be handled. + /// + virtual SDValue + LowerCall(SDValue Chain, SDValue Callee, + CallingConv::ID CallConv, bool isVarArg, bool isTailCall, + const SmallVectorImpl<ISD::OutputArg> &Outs, + const SmallVectorImpl<ISD::InputArg> &Ins, + DebugLoc dl, SelectionDAG &DAG, + SmallVectorImpl<SDValue> &InVals) { + assert(0 && "Not Implemented"); + return SDValue(); // this is here to silence compiler errors + } + + /// LowerReturn - This hook must be implemented to lower outgoing + /// return values, described by the Outs array, into the specified + /// DAG. The implementation should return the resulting token chain + /// value. + /// + virtual SDValue + LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, + const SmallVectorImpl<ISD::OutputArg> &Outs, + DebugLoc dl, SelectionDAG &DAG) { + assert(0 && "Not Implemented"); + return SDValue(); // this is here to silence compiler errors + } /// EmitTargetCodeForMemcpy - Emit target-specific code that performs a /// memcpy. This can be used by targets to provide code sequences for cases @@ -1220,19 +1281,17 @@ public: /// IsEligibleForTailCallOptimization - Check whether the call is eligible for /// tail call optimization. Targets which want to do tail call optimization - /// should override this function. - virtual bool IsEligibleForTailCallOptimization(CallSDNode *Call, - SDValue Ret, - SelectionDAG &DAG) const { + /// should override this function. + virtual bool + IsEligibleForTailCallOptimization(SDValue Callee, + CallingConv::ID CalleeCC, + bool isVarArg, + const SmallVectorImpl<ISD::InputArg> &Ins, + SelectionDAG& DAG) const { + // Conservative default: no calls are eligible. return false; } - /// CheckTailCallReturnConstraints - Check whether CALL node immediatly - /// preceeds the RET node and whether the return uses the result of the node - /// or is a void return. This function can be used by the target to determine - /// eligiblity of tail call optimization. - static bool CheckTailCallReturnConstraints(CallSDNode *TheCall, SDValue Ret); - /// GetPossiblePreceedingTailCall - Get preceeding TailCallNodeOpCode node if /// it exists. Skip a possible ISD::TokenFactor. static SDValue GetPossiblePreceedingTailCall(SDValue Chain, @@ -1270,6 +1329,14 @@ public: // Inline Asm Support hooks // + /// ExpandInlineAsm - This hook allows the target to expand an inline asm + /// call to be explicit llvm code if it wants to. This is useful for + /// turning simple inline asms into LLVM intrinsics, which gives the + /// compiler more information about the behavior of the code. + virtual bool ExpandInlineAsm(CallInst *CI) const { + return false; + } + enum ConstraintType { C_Register, // Constraint represents specific register(s). C_RegisterClass, // Constraint represents any of register(s) in class. @@ -1296,7 +1363,7 @@ public: Value *CallOperandVal; /// ConstraintVT - The ValueType for the operand value. - MVT ConstraintVT; + EVT ConstraintVT; /// isMatchingInputConstraint - Return true of this is an input operand that /// is a matching constraint like "4". @@ -1333,7 +1400,7 @@ public: /// This should only be used for C_RegisterClass constraints. virtual std::vector<unsigned> getRegClassForInlineAsmConstraint(const std::string &Constraint, - MVT VT) const; + EVT VT) const; /// getRegForInlineAsmConstraint - Given a physical register constraint (e.g. /// {edx}), return the register number and the register class for the @@ -1347,13 +1414,13 @@ public: /// this returns a register number of 0 and a null register class pointer.. virtual std::pair<unsigned, const TargetRegisterClass*> getRegForInlineAsmConstraint(const std::string &Constraint, - MVT VT) const; + EVT VT) const; /// LowerXConstraint - try to replace an X constraint, which matches anything, /// with another that has more specific requirements based on the type of the /// corresponding operand. This returns null if there is no replacement to /// make. - virtual const char *LowerXConstraint(MVT ConstraintVT) const; + virtual const char *LowerXConstraint(EVT ConstraintVT) const; /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops /// vector. If it is invalid, don't add anything to Ops. If hasMemory is true @@ -1373,8 +1440,12 @@ public: // instructions are special in various ways, which require special support to // insert. The specified MachineInstr is created but not inserted into any // basic blocks, and the scheduler passes ownership of it to this method. + // When new basic blocks are inserted and the edges from MBB to its successors + // are modified, the method should insert pairs of <OldSucc, NewSucc> into the + // DenseMap. virtual MachineBasicBlock *EmitInstrWithCustomInserter(MachineInstr *MI, - MachineBasicBlock *MBB) const; + MachineBasicBlock *MBB, + DenseMap<MachineBasicBlock*, MachineBasicBlock*> *EM) const; //===--------------------------------------------------------------------===// // Addressing mode description hooks (used by LSR etc). @@ -1410,7 +1481,7 @@ public: return false; } - virtual bool isTruncateFree(MVT VT1, MVT VT2) const { + virtual bool isTruncateFree(EVT VT1, EVT VT2) const { return false; } @@ -1426,14 +1497,14 @@ public: return false; } - virtual bool isZExtFree(MVT VT1, MVT VT2) const { + virtual bool isZExtFree(EVT VT1, EVT VT2) const { return false; } /// isNarrowingProfitable - Return true if it's profitable to narrow /// operations of type VT1 to VT2. e.g. on x86, it's profitable to narrow /// from i32 to i8 but not from i32 to i16. - virtual bool isNarrowingProfitable(MVT VT1, MVT VT2) const { + virtual bool isNarrowingProfitable(EVT VT1, EVT VT2) const { return false; } @@ -1474,9 +1545,22 @@ public: return CmpLibcallCCs[Call]; } + /// setLibcallCallingConv - Set the CallingConv that should be used for the + /// specified libcall. + void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC) { + LibcallCallingConvs[Call] = CC; + } + + /// getLibcallCallingConv - Get the CallingConv that should be used for the + /// specified libcall. + CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const { + return LibcallCallingConvs[Call]; + } + private: TargetMachine &TM; const TargetData *TD; + TargetLoweringObjectFile &TLOF; /// PointerTy - The type to use for pointers, usually i32 or i64. /// @@ -1517,8 +1601,6 @@ private: /// PointerTy is. MVT ShiftAmountTy; - OutOfRangeShiftAmount ShiftAmtHandling; - /// BooleanContents - Information about the contents of the high-bits in /// boolean values held in a type wider than i1. See getBooleanContents. BooleanContent BooleanContents; @@ -1565,14 +1647,14 @@ private: /// each ValueType the target supports natively. TargetRegisterClass *RegClassForVT[MVT::LAST_VALUETYPE]; unsigned char NumRegistersForVT[MVT::LAST_VALUETYPE]; - MVT RegisterTypeForVT[MVT::LAST_VALUETYPE]; + EVT RegisterTypeForVT[MVT::LAST_VALUETYPE]; /// TransformToType - For any value types we are promoting or expanding, this /// contains the value type that we are changing to. For Expanded types, this /// contains one step of the expand (e.g. i64 -> i32), even if there are /// multiple steps required (e.g. i64 -> i16). For types natively supported /// by the system, this holds the same type (e.g. i32 -> i32). - MVT TransformToType[MVT::LAST_VALUETYPE]; + EVT TransformToType[MVT::LAST_VALUETYPE]; /// OpActions - For each operation and each value type, keep a LegalizeAction /// that indicates how instruction selection should deal with the operation. @@ -1616,7 +1698,7 @@ private: std::vector<APFloat> LegalFPImmediates; - std::vector<std::pair<MVT, TargetRegisterClass*> > AvailableRegClasses; + std::vector<std::pair<EVT, TargetRegisterClass*> > AvailableRegClasses; /// TargetDAGCombineArray - Targets can specify ISD nodes that they would /// like PerformDAGCombine callbacks for by calling setTargetDAGCombine(), @@ -1641,6 +1723,10 @@ private: /// of each of the comparison libcall against zero. ISD::CondCode CmpLibcallCCs[RTLIB::UNKNOWN_LIBCALL]; + /// LibcallCallingConvs - Stores the CallingConv that should be used for each + /// libcall. + CallingConv::ID LibcallCallingConvs[RTLIB::UNKNOWN_LIBCALL]; + protected: /// When lowering \@llvm.memset this field specifies the maximum number of /// store operations that may be substituted for the call to memset. Targets @@ -1676,12 +1762,6 @@ protected: /// @brief Specify maximum bytes of store instructions per memmove call. unsigned maxStoresPerMemmove; - /// This field specifies whether the target machine permits unaligned memory - /// accesses. This is used, for example, to determine the size of store - /// operations when copying small arrays and other similar tasks. - /// @brief Indicate whether the target permits unaligned memory accesses. - bool allowUnalignedMemoryAccesses; - /// This field specifies whether the target can benefit from code placement /// optimization. bool benefitFromCodePlacementOpt; diff --git a/include/llvm/Target/TargetLoweringObjectFile.h b/include/llvm/Target/TargetLoweringObjectFile.h new file mode 100644 index 0000000..821e537 --- /dev/null +++ b/include/llvm/Target/TargetLoweringObjectFile.h @@ -0,0 +1,361 @@ +//===-- llvm/Target/TargetLoweringObjectFile.h - Object Info ----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements classes used to handle lowerings specific to common +// object file formats. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TARGET_TARGETLOWERINGOBJECTFILE_H +#define LLVM_TARGET_TARGETLOWERINGOBJECTFILE_H + +#include "llvm/MC/SectionKind.h" + +namespace llvm { + class MachineModuleInfo; + class Mangler; + class MCAsmInfo; + class MCExpr; + class MCSection; + class MCSectionMachO; + class MCContext; + class GlobalValue; + class StringRef; + class TargetMachine; + +class TargetLoweringObjectFile { + MCContext *Ctx; + + TargetLoweringObjectFile(const TargetLoweringObjectFile&); // DO NOT IMPLEMENT + void operator=(const TargetLoweringObjectFile&); // DO NOT IMPLEMENT +protected: + + TargetLoweringObjectFile(); + + /// TextSection - Section directive for standard text. + /// + const MCSection *TextSection; + + /// DataSection - Section directive for standard data. + /// + const MCSection *DataSection; + + /// BSSSection - Section that is default initialized to zero. + const MCSection *BSSSection; + + /// ReadOnlySection - Section that is readonly and can contain arbitrary + /// initialized data. Targets are not required to have a readonly section. + /// If they don't, various bits of code will fall back to using the data + /// section for constants. + const MCSection *ReadOnlySection; + + /// StaticCtorSection - This section contains the static constructor pointer + /// list. + const MCSection *StaticCtorSection; + + /// StaticDtorSection - This section contains the static destructor pointer + /// list. + const MCSection *StaticDtorSection; + + /// LSDASection - If exception handling is supported by the target, this is + /// the section the Language Specific Data Area information is emitted to. + const MCSection *LSDASection; + + /// EHFrameSection - If exception handling is supported by the target, this is + /// the section the EH Frame is emitted to. + const MCSection *EHFrameSection; + + // Dwarf sections for debug info. If a target supports debug info, these must + // be set. + const MCSection *DwarfAbbrevSection; + const MCSection *DwarfInfoSection; + const MCSection *DwarfLineSection; + const MCSection *DwarfFrameSection; + const MCSection *DwarfPubNamesSection; + const MCSection *DwarfPubTypesSection; + const MCSection *DwarfDebugInlineSection; + const MCSection *DwarfStrSection; + const MCSection *DwarfLocSection; + const MCSection *DwarfARangesSection; + const MCSection *DwarfRangesSection; + const MCSection *DwarfMacroInfoSection; + +public: + + MCContext &getContext() const { return *Ctx; } + + + virtual ~TargetLoweringObjectFile(); + + /// Initialize - this method must be called before any actual lowering is + /// done. This specifies the current context for codegen, and gives the + /// lowering implementations a chance to set up their default sections. + virtual void Initialize(MCContext &ctx, const TargetMachine &TM) { + Ctx = &ctx; + } + + + const MCSection *getTextSection() const { return TextSection; } + const MCSection *getDataSection() const { return DataSection; } + const MCSection *getBSSSection() const { return BSSSection; } + const MCSection *getStaticCtorSection() const { return StaticCtorSection; } + const MCSection *getStaticDtorSection() const { return StaticDtorSection; } + const MCSection *getLSDASection() const { return LSDASection; } + const MCSection *getEHFrameSection() const { return EHFrameSection; } + const MCSection *getDwarfAbbrevSection() const { return DwarfAbbrevSection; } + const MCSection *getDwarfInfoSection() const { return DwarfInfoSection; } + const MCSection *getDwarfLineSection() const { return DwarfLineSection; } + const MCSection *getDwarfFrameSection() const { return DwarfFrameSection; } + const MCSection *getDwarfPubNamesSection() const{return DwarfPubNamesSection;} + const MCSection *getDwarfPubTypesSection() const{return DwarfPubTypesSection;} + const MCSection *getDwarfDebugInlineSection() const { + return DwarfDebugInlineSection; + } + const MCSection *getDwarfStrSection() const { return DwarfStrSection; } + const MCSection *getDwarfLocSection() const { return DwarfLocSection; } + const MCSection *getDwarfARangesSection() const { return DwarfARangesSection;} + const MCSection *getDwarfRangesSection() const { return DwarfRangesSection; } + const MCSection *getDwarfMacroInfoSection() const { + return DwarfMacroInfoSection; + } + + /// shouldEmitUsedDirectiveFor - This hook allows targets to selectively + /// decide not to emit the UsedDirective for some symbols in llvm.used. + /// FIXME: REMOVE this (rdar://7071300) + virtual bool shouldEmitUsedDirectiveFor(const GlobalValue *GV, + Mangler *) const { + return GV != 0; + } + + /// getSectionForConstant - Given a constant with the SectionKind, return a + /// section that it should be placed in. + virtual const MCSection *getSectionForConstant(SectionKind Kind) const; + + /// getKindForGlobal - Classify the specified global variable into a set of + /// target independent categories embodied in SectionKind. + static SectionKind getKindForGlobal(const GlobalValue *GV, + const TargetMachine &TM); + + /// SectionForGlobal - This method computes the appropriate section to emit + /// the specified global variable or function definition. This should not + /// be passed external (or available externally) globals. + const MCSection *SectionForGlobal(const GlobalValue *GV, + SectionKind Kind, Mangler *Mang, + const TargetMachine &TM) const; + + /// SectionForGlobal - This method computes the appropriate section to emit + /// the specified global variable or function definition. This should not + /// be passed external (or available externally) globals. + const MCSection *SectionForGlobal(const GlobalValue *GV, + Mangler *Mang, + const TargetMachine &TM) const { + return SectionForGlobal(GV, getKindForGlobal(GV, TM), Mang, TM); + } + + + + /// getExplicitSectionGlobal - Targets should implement this method to assign + /// a section to globals with an explicit section specfied. The + /// implementation of this method can assume that GV->hasSection() is true. + virtual const MCSection * + getExplicitSectionGlobal(const GlobalValue *GV, SectionKind Kind, + Mangler *Mang, const TargetMachine &TM) const = 0; + + /// getSpecialCasedSectionGlobals - Allow the target to completely override + /// section assignment of a global. + virtual const MCSection * + getSpecialCasedSectionGlobals(const GlobalValue *GV, Mangler *Mang, + SectionKind Kind) const { + return 0; + } + + /// getSymbolForDwarfGlobalReference - Return an MCExpr to use for a + /// pc-relative reference to the specified global variable from exception + /// handling information. In addition to the symbol, this returns + /// by-reference: + /// + /// IsIndirect - True if the returned symbol is actually a stub that contains + /// the address of the symbol, false if the symbol is the global itself. + /// + /// IsPCRel - True if the symbol reference is already pc-relative, false if + /// the caller needs to subtract off the address of the reference from the + /// symbol. + /// + virtual const MCExpr * + getSymbolForDwarfGlobalReference(const GlobalValue *GV, Mangler *Mang, + MachineModuleInfo *MMI, + bool &IsIndirect, bool &IsPCRel) const; + +protected: + virtual const MCSection * + SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind, + Mangler *Mang, const TargetMachine &TM) const; +}; + + + + +class TargetLoweringObjectFileELF : public TargetLoweringObjectFile { + mutable void *UniquingMap; +protected: + /// TLSDataSection - Section directive for Thread Local data. + /// + const MCSection *TLSDataSection; // Defaults to ".tdata". + + /// TLSBSSSection - Section directive for Thread Local uninitialized data. + /// Null if this target doesn't support a BSS section. + /// + const MCSection *TLSBSSSection; // Defaults to ".tbss". + + const MCSection *DataRelSection; + const MCSection *DataRelLocalSection; + const MCSection *DataRelROSection; + const MCSection *DataRelROLocalSection; + + const MCSection *MergeableConst4Section; + const MCSection *MergeableConst8Section; + const MCSection *MergeableConst16Section; + +protected: + const MCSection *getELFSection(StringRef Section, unsigned Type, + unsigned Flags, SectionKind Kind, + bool IsExplicit = false) const; +public: + TargetLoweringObjectFileELF() : UniquingMap(0) {} + ~TargetLoweringObjectFileELF(); + + virtual void Initialize(MCContext &Ctx, const TargetMachine &TM); + + /// getSectionForConstant - Given a constant with the SectionKind, return a + /// section that it should be placed in. + virtual const MCSection *getSectionForConstant(SectionKind Kind) const; + + + virtual const MCSection * + getExplicitSectionGlobal(const GlobalValue *GV, SectionKind Kind, + Mangler *Mang, const TargetMachine &TM) const; + + virtual const MCSection * + SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind, + Mangler *Mang, const TargetMachine &TM) const; +}; + + + +class TargetLoweringObjectFileMachO : public TargetLoweringObjectFile { + mutable void *UniquingMap; + + const MCSection *CStringSection; + const MCSection *UStringSection; + const MCSection *TextCoalSection; + const MCSection *ConstTextCoalSection; + const MCSection *ConstDataCoalSection; + const MCSection *ConstDataSection; + const MCSection *DataCoalSection; + const MCSection *FourByteConstantSection; + const MCSection *EightByteConstantSection; + const MCSection *SixteenByteConstantSection; + + const MCSection *LazySymbolPointerSection; + const MCSection *NonLazySymbolPointerSection; +public: + TargetLoweringObjectFileMachO() : UniquingMap(0) {} + ~TargetLoweringObjectFileMachO(); + + virtual void Initialize(MCContext &Ctx, const TargetMachine &TM); + + virtual const MCSection * + SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind, + Mangler *Mang, const TargetMachine &TM) const; + + virtual const MCSection * + getExplicitSectionGlobal(const GlobalValue *GV, SectionKind Kind, + Mangler *Mang, const TargetMachine &TM) const; + + virtual const MCSection *getSectionForConstant(SectionKind Kind) const; + + /// shouldEmitUsedDirectiveFor - This hook allows targets to selectively + /// decide not to emit the UsedDirective for some symbols in llvm.used. + /// FIXME: REMOVE this (rdar://7071300) + virtual bool shouldEmitUsedDirectiveFor(const GlobalValue *GV, + Mangler *) const; + + /// getMachOSection - Return the MCSection for the specified mach-o section. + /// This requires the operands to be valid. + const MCSectionMachO *getMachOSection(const StringRef &Segment, + const StringRef &Section, + unsigned TypeAndAttributes, + SectionKind K) const { + return getMachOSection(Segment, Section, TypeAndAttributes, 0, K); + } + const MCSectionMachO *getMachOSection(const StringRef &Segment, + const StringRef &Section, + unsigned TypeAndAttributes, + unsigned Reserved2, + SectionKind K) const; + + /// getTextCoalSection - Return the "__TEXT,__textcoal_nt" section we put weak + /// text symbols into. + const MCSection *getTextCoalSection() const { + return TextCoalSection; + } + + /// getConstTextCoalSection - Return the "__TEXT,__const_coal" section + /// we put weak read-only symbols into. + const MCSection *getConstTextCoalSection() const { + return ConstTextCoalSection; + } + + /// getLazySymbolPointerSection - Return the section corresponding to + /// the .lazy_symbol_pointer directive. + const MCSection *getLazySymbolPointerSection() const { + return LazySymbolPointerSection; + } + + /// getNonLazySymbolPointerSection - Return the section corresponding to + /// the .non_lazy_symbol_pointer directive. + const MCSection *getNonLazySymbolPointerSection() const { + return NonLazySymbolPointerSection; + } + + /// getSymbolForDwarfGlobalReference - The mach-o version of this method + /// defaults to returning a stub reference. + virtual const MCExpr * + getSymbolForDwarfGlobalReference(const GlobalValue *GV, Mangler *Mang, + MachineModuleInfo *MMI, + bool &IsIndirect, bool &IsPCRel) const; +}; + + + +class TargetLoweringObjectFileCOFF : public TargetLoweringObjectFile { + mutable void *UniquingMap; +public: + TargetLoweringObjectFileCOFF() : UniquingMap(0) {} + ~TargetLoweringObjectFileCOFF(); + + virtual void Initialize(MCContext &Ctx, const TargetMachine &TM); + + virtual const MCSection * + getExplicitSectionGlobal(const GlobalValue *GV, SectionKind Kind, + Mangler *Mang, const TargetMachine &TM) const; + + virtual const MCSection * + SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind, + Mangler *Mang, const TargetMachine &TM) const; + + /// getCOFFSection - Return the MCSection for the specified COFF section. + /// FIXME: Switch this to a semantic view eventually. + const MCSection *getCOFFSection(const char *Name, bool isDirective, + SectionKind K) const; +}; + +} // end namespace llvm + +#endif diff --git a/include/llvm/Target/TargetMachine.h b/include/llvm/Target/TargetMachine.h index 33fc451..92b648c 100644 --- a/include/llvm/Target/TargetMachine.h +++ b/include/llvm/Target/TargetMachine.h @@ -16,10 +16,12 @@ #include "llvm/Target/TargetInstrItineraries.h" #include <cassert> +#include <string> namespace llvm { -class TargetAsmInfo; +class Target; +class MCAsmInfo; class TargetData; class TargetSubtarget; class TargetInstrInfo; @@ -29,14 +31,14 @@ class TargetLowering; class TargetFrameInfo; class MachineCodeEmitter; class JITCodeEmitter; +class ObjectCodeEmitter; class TargetRegisterInfo; -class Module; class PassManagerBase; class PassManager; class Pass; class TargetMachOWriterInfo; class TargetELFWriterInfo; -class raw_ostream; +class formatted_raw_ostream; // Relocation model types. namespace Reloc { @@ -79,15 +81,6 @@ namespace CodeGenOpt { } -// Possible float ABI settings. Used with FloatABIType in TargetOptions.h. -namespace FloatABI { - enum ABIType { - Default, // Target-specific (either soft of hard depending on triple, etc). - Soft, // Soft float. - Hard // Hard float. - }; -} - //===----------------------------------------------------------------------===// /// /// TargetMachine - Primary interface to the complete machine description for @@ -98,35 +91,23 @@ class TargetMachine { TargetMachine(const TargetMachine &); // DO NOT IMPLEMENT void operator=(const TargetMachine &); // DO NOT IMPLEMENT protected: // Can only create subclasses. - TargetMachine(); + TargetMachine(const Target &); /// getSubtargetImpl - virtual method implemented by subclasses that returns /// a reference to that target's TargetSubtarget-derived member variable. virtual const TargetSubtarget *getSubtargetImpl() const { return 0; } + + /// TheTarget - The Target that this machine was created for. + const Target &TheTarget; /// AsmInfo - Contains target specific asm information. /// - mutable const TargetAsmInfo *AsmInfo; + const MCAsmInfo *AsmInfo; - /// createTargetAsmInfo - Create a new instance of target specific asm - /// information. - virtual const TargetAsmInfo *createTargetAsmInfo() const { return 0; } - public: virtual ~TargetMachine(); - /// getModuleMatchQuality - This static method should be implemented by - /// targets to indicate how closely they match the specified module. This is - /// used by the LLC tool to determine which target to use when an explicit - /// -march option is not specified. If a target returns zero, it will never - /// be chosen without an explicit -march option. - static unsigned getModuleMatchQuality(const Module &) { return 0; } - - /// getJITMatchQuality - This static method should be implemented by targets - /// that provide JIT capabilities to indicate how suitable they are for - /// execution on the current host. If a value of 0 is returned, the target - /// will not be used unless an explicit -march option is used. - static unsigned getJITMatchQuality() { return 0; } + const Target &getTarget() const { return TheTarget; } // Interfaces to the major aspects of target machine information: // -- Instruction opcode and operand information @@ -139,12 +120,9 @@ public: virtual TargetLowering *getTargetLowering() const { return 0; } virtual const TargetData *getTargetData() const { return 0; } - /// getTargetAsmInfo - Return target specific asm information. + /// getMCAsmInfo - Return target specific asm information. /// - const TargetAsmInfo *getTargetAsmInfo() const { - if (!AsmInfo) AsmInfo = createTargetAsmInfo(); - return AsmInfo; - } + const MCAsmInfo *getMCAsmInfo() const { return AsmInfo; } /// getSubtarget - This method returns a pointer to the specified type of /// TargetSubtarget. In debug builds, it verifies that the object being @@ -225,13 +203,12 @@ public: /// addPassesToEmitFile - Add passes to the specified pass manager to get the /// specified file emitted. Typically this will involve several steps of code - /// generation. If Fast is set to true, the code generator should emit code - /// as fast as possible, though the generated code may be less efficient. + /// generation. /// This method should return FileModel::Error if emission of this file type /// is not supported. /// virtual FileModel::Model addPassesToEmitFile(PassManagerBase &, - raw_ostream &, + formatted_raw_ostream &, CodeGenFileType, CodeGenOpt::Level) { return FileModel::None; @@ -257,6 +234,16 @@ public: return true; } + /// addPassesToEmitFileFinish - If the passes to emit the specified file had + /// to be split up (e.g., to add an object writer pass), this method can be + /// used to finish up adding passes to emit the file, if necessary. + /// + virtual bool addPassesToEmitFileFinish(PassManagerBase &, + ObjectCodeEmitter *, + CodeGenOpt::Level) { + return true; + } + /// addPassesToEmitMachineCode - Add passes to the specified pass manager to /// get machine code emitted. This uses a MachineCodeEmitter object to handle /// actually outputting the machine code and resolving things like the address @@ -285,7 +272,7 @@ public: /// require having the entire module at once. This is not recommended, do not /// use this. virtual bool WantsWholeFile() const { return false; } - virtual bool addPassesToEmitWholeFile(PassManager &, raw_ostream &, + virtual bool addPassesToEmitWholeFile(PassManager &, formatted_raw_ostream &, CodeGenFileType, CodeGenOpt::Level) { return true; @@ -297,8 +284,8 @@ public: /// class LLVMTargetMachine : public TargetMachine { protected: // Can only create subclasses. - LLVMTargetMachine() { } - + LLVMTargetMachine(const Target &T, const std::string &TargetTriple); + /// addCommonCodeGenPasses - Add standard LLVM codegen passes used for /// both emitting to assembly files or machine code output. /// @@ -318,7 +305,7 @@ public: /// target-specific passes in standard locations. /// virtual FileModel::Model addPassesToEmitFile(PassManagerBase &PM, - raw_ostream &Out, + formatted_raw_ostream &Out, CodeGenFileType FileType, CodeGenOpt::Level); @@ -335,7 +322,15 @@ public: /// used to finish up adding passes to emit the file, if necessary. /// virtual bool addPassesToEmitFileFinish(PassManagerBase &PM, - JITCodeEmitter *MCE, + JITCodeEmitter *JCE, + CodeGenOpt::Level); + + /// addPassesToEmitFileFinish - If the passes to emit the specified file had + /// to be split up (e.g., to add an object writer pass), this method can be + /// used to finish up adding passes to emit the file, if necessary. + /// + virtual bool addPassesToEmitFileFinish(PassManagerBase &PM, + ObjectCodeEmitter *OCE, CodeGenOpt::Level); /// addPassesToEmitMachineCode - Add passes to the specified pass manager to @@ -367,20 +362,28 @@ public: return true; } - /// addPreRegAllocPasses - This method may be implemented by targets that want - /// to run passes immediately before register allocation. This should return + /// addPreRegAlloc - This method may be implemented by targets that want to + /// run passes immediately before register allocation. This should return /// true if -print-machineinstrs should print after these passes. virtual bool addPreRegAlloc(PassManagerBase &, CodeGenOpt::Level) { return false; } - /// addPostRegAllocPasses - This method may be implemented by targets that - /// want to run passes after register allocation but before prolog-epilog + /// addPostRegAlloc - This method may be implemented by targets that want + /// to run passes after register allocation but before prolog-epilog /// insertion. This should return true if -print-machineinstrs should print /// after these passes. virtual bool addPostRegAlloc(PassManagerBase &, CodeGenOpt::Level) { return false; } + + /// addPreSched2 - This method may be implemented by targets that want to + /// run passes after prolog-epilog insertion and before the second instruction + /// scheduling pass. This should return true if -print-machineinstrs should + /// print after these passes. + virtual bool addPreSched2(PassManagerBase &, CodeGenOpt::Level) { + return false; + } /// addPreEmitPass - This pass may be implemented by targets that want to run /// passes immediately before machine code is emitted. This should return @@ -390,51 +393,57 @@ public: } - /// addAssemblyEmitter - This pass should be overridden by the target to add - /// the asmprinter, if asm emission is supported. If this is not supported, - /// 'true' should be returned. - virtual bool addAssemblyEmitter(PassManagerBase &, CodeGenOpt::Level, - bool /* VerboseAsmDefault */, raw_ostream &) { - return true; - } - /// addCodeEmitter - This pass should be overridden by the target to add a /// code emitter, if supported. If this is not supported, 'true' should be - /// returned. If DumpAsm is true, the generated assembly is printed to cerr. + /// returned. virtual bool addCodeEmitter(PassManagerBase &, CodeGenOpt::Level, - bool /*DumpAsm*/, MachineCodeEmitter &) { + MachineCodeEmitter &) { return true; } /// addCodeEmitter - This pass should be overridden by the target to add a /// code emitter, if supported. If this is not supported, 'true' should be - /// returned. If DumpAsm is true, the generated assembly is printed to cerr. + /// returned. virtual bool addCodeEmitter(PassManagerBase &, CodeGenOpt::Level, - bool /*DumpAsm*/, JITCodeEmitter &) { + JITCodeEmitter &) { return true; } /// addSimpleCodeEmitter - This pass should be overridden by the target to add /// a code emitter (without setting flags), if supported. If this is not - /// supported, 'true' should be returned. If DumpAsm is true, the generated - /// assembly is printed to cerr. + /// supported, 'true' should be returned. virtual bool addSimpleCodeEmitter(PassManagerBase &, CodeGenOpt::Level, - bool /*DumpAsm*/, MachineCodeEmitter &) { + MachineCodeEmitter &) { return true; } /// addSimpleCodeEmitter - This pass should be overridden by the target to add /// a code emitter (without setting flags), if supported. If this is not - /// supported, 'true' should be returned. If DumpAsm is true, the generated - /// assembly is printed to cerr. + /// supported, 'true' should be returned. virtual bool addSimpleCodeEmitter(PassManagerBase &, CodeGenOpt::Level, - bool /*DumpAsm*/, JITCodeEmitter &) { + JITCodeEmitter &) { + return true; + } + + /// addSimpleCodeEmitter - This pass should be overridden by the target to add + /// a code emitter (without setting flags), if supported. If this is not + /// supported, 'true' should be returned. + virtual bool addSimpleCodeEmitter(PassManagerBase &, CodeGenOpt::Level, + ObjectCodeEmitter &) { return true; } /// getEnableTailMergeDefault - the default setting for -enable-tail-merge /// on this target. User flag overrides. virtual bool getEnableTailMergeDefault() const { return true; } + + /// addAssemblyEmitter - Helper function which creates a target specific + /// assembly printer, if available. + /// + /// \return Returns 'false' on success. + bool addAssemblyEmitter(PassManagerBase &, CodeGenOpt::Level, + bool /* VerboseAsmDefault */, + formatted_raw_ostream &); }; } // End llvm namespace diff --git a/include/llvm/Target/TargetOptions.h b/include/llvm/Target/TargetOptions.h index 377e03f..8d52dad 100644 --- a/include/llvm/Target/TargetOptions.h +++ b/include/llvm/Target/TargetOptions.h @@ -16,6 +16,15 @@ #define LLVM_TARGET_TARGETOPTIONS_H namespace llvm { + // Possible float ABI settings. Used with FloatABIType in TargetOptions.h. + namespace FloatABI { + enum ABIType { + Default, // Target-specific (either soft of hard depending on triple, etc). + Soft, // Soft float. + Hard // Hard float. + }; + } + /// PrintMachineCode - This flag is enabled when the -print-machineinstrs /// option is specified on the command line, and should enable debugging /// output from the code generator. @@ -85,10 +94,23 @@ namespace llvm { /// .bss section. This flag disables such behaviour (necessary, e.g. for /// crt*.o compiling). extern bool NoZerosInBSS; - - /// ExceptionHandling - This flag indicates that exception information should - /// be emitted. - extern bool ExceptionHandling; + + /// DwarfExceptionHandling - This flag indicates that Dwarf exception + /// information should be emitted. + extern bool DwarfExceptionHandling; + + /// SjLjExceptionHandling - This flag indicates that SJLJ exception + /// information should be emitted. + extern bool SjLjExceptionHandling; + + /// JITEmitDebugInfo - This flag indicates that the JIT should try to emit + /// debug information and notify a debugger about it. + extern bool JITEmitDebugInfo; + + /// JITEmitDebugInfoToDisk - This flag indicates that the JIT should write + /// the object files generated by the JITEmitDebugInfo flag to disk. This + /// flag is hidden and is only for debugging the debug info. + extern bool JITEmitDebugInfoToDisk; /// UnwindTablesMandatory - This flag indicates that unwind tables should /// be emitted for all functions. diff --git a/include/llvm/Target/TargetRegisterInfo.h b/include/llvm/Target/TargetRegisterInfo.h index 91e8f80..e90fc6c 100644 --- a/include/llvm/Target/TargetRegisterInfo.h +++ b/include/llvm/Target/TargetRegisterInfo.h @@ -41,7 +41,6 @@ class RegScavenger; /// of AX. /// struct TargetRegisterDesc { - const char *AsmName; // Assembly language name for the register const char *Name; // Printable name for the reg (for debugging) const unsigned *AliasSet; // Register Alias Set, described above const unsigned *SubRegs; // Sub-register set, described above @@ -53,7 +52,7 @@ public: typedef const unsigned* iterator; typedef const unsigned* const_iterator; - typedef const MVT* vt_iterator; + typedef const EVT* vt_iterator; typedef const TargetRegisterClass* const * sc_iterator; private: unsigned ID; @@ -70,7 +69,7 @@ private: public: TargetRegisterClass(unsigned id, const char *name, - const MVT *vts, + const EVT *vts, const TargetRegisterClass * const *subcs, const TargetRegisterClass * const *supcs, const TargetRegisterClass * const *subregcs, @@ -84,7 +83,7 @@ public: RegSet.insert(*I); } virtual ~TargetRegisterClass() {} // Allow subclasses - + /// getID() - Return the register class ID number. /// unsigned getID() const { return ID; } @@ -117,13 +116,13 @@ public: /// hasType - return true if this TargetRegisterClass has the ValueType vt. /// - bool hasType(MVT vt) const { - for(int i = 0; VTs[i] != MVT::Other; ++i) + bool hasType(EVT vt) const { + for(int i = 0; VTs[i].getSimpleVT().SimpleTy != MVT::Other; ++i) if (VTs[i] == vt) return true; return false; } - + /// vt_begin / vt_end - Loop over all of the value types that can be /// represented by values in this register class. vt_iterator vt_begin() const { @@ -132,7 +131,7 @@ public: vt_iterator vt_end() const { vt_iterator I = VTs; - while (*I != MVT::Other) ++I; + while (I->getSimpleVT().SimpleTy != MVT::Other) ++I; return I; } @@ -173,7 +172,7 @@ public: /// hasSubClass - return true if the the specified TargetRegisterClass /// is a proper subset of this TargetRegisterClass. bool hasSubClass(const TargetRegisterClass *cs) const { - for (int i = 0; SubClasses[i] != NULL; ++i) + for (int i = 0; SubClasses[i] != NULL; ++i) if (SubClasses[i] == cs) return true; return false; @@ -184,17 +183,17 @@ public: sc_iterator subclasses_begin() const { return SubClasses; } - + sc_iterator subclasses_end() const { sc_iterator I = SubClasses; while (*I != NULL) ++I; return I; } - + /// hasSuperClass - return true if the specified TargetRegisterClass is a /// proper superset of this TargetRegisterClass. bool hasSuperClass(const TargetRegisterClass *cs) const { - for (int i = 0; SuperClasses[i] != NULL; ++i) + for (int i = 0; SuperClasses[i] != NULL; ++i) if (SuperClasses[i] == cs) return true; return false; @@ -205,7 +204,7 @@ public: sc_iterator superclasses_begin() const { return SuperClasses; } - + sc_iterator superclasses_end() const { sc_iterator I = SuperClasses; while (*I != NULL) ++I; @@ -217,7 +216,7 @@ public: bool isASubClass() const { return SuperClasses[0] != 0; } - + /// allocation_order_begin/end - These methods define a range of registers /// which specify the registers in this class that are valid to register /// allocate, and the preferred order to allocate them in. For example, @@ -318,15 +317,15 @@ public: } /// getPhysicalRegisterRegClass - Returns the Register Class of a physical - /// register of the given type. If type is MVT::Other, then just return any + /// register of the given type. If type is EVT::Other, then just return any /// register class the register belongs to. virtual const TargetRegisterClass * - getPhysicalRegisterRegClass(unsigned Reg, MVT VT = MVT::Other) const; + getPhysicalRegisterRegClass(unsigned Reg, EVT VT = MVT::Other) const; /// getAllocatableSet - Returns a bitset indexed by register number /// indicating if a register is allocatable or not. If a register class is /// specified, returns the subset for the class. - BitVector getAllocatableSet(MachineFunction &MF, + BitVector getAllocatableSet(const MachineFunction &MF, const TargetRegisterClass *RC = NULL) const; const TargetRegisterDesc &operator[](unsigned RegNo) const { @@ -368,12 +367,6 @@ public: return get(RegNo).SuperRegs; } - /// getAsmName - Return the symbolic target-specific name for the - /// specified physical register. - const char *getAsmName(unsigned RegNo) const { - return get(RegNo).AsmName; - } - /// getName - Return the human-readable symbolic target-specific name for the /// specified physical register. const char *getName(unsigned RegNo) const { @@ -386,9 +379,16 @@ public: return NumRegs; } - /// areAliases - Returns true if the two registers alias each other, false - /// otherwise - bool areAliases(unsigned regA, unsigned regB) const { + /// regsOverlap - Returns true if the two registers are equal or alias each + /// other. The registers may be virtual register. + bool regsOverlap(unsigned regA, unsigned regB) const { + if (regA == regB) + return true; + + if (isVirtualRegister(regA) || isVirtualRegister(regB)) + return false; + + // regA and regB are distinct physical registers. Do they alias? size_t index = (regA + regB * 37) & (AliasesHashSize-1); unsigned ProbeAmt = 0; while (AliasesHash[index*2] != 0 && @@ -403,17 +403,6 @@ public: return false; } - /// regsOverlap - Returns true if the two registers are equal or alias each - /// other. The registers may be virtual register. - bool regsOverlap(unsigned regA, unsigned regB) const { - if (regA == regB) - return true; - - if (isVirtualRegister(regA) || isVirtualRegister(regB)) - return false; - return areAliases(regA, regB); - } - /// isSubRegister - Returns true if regB is a sub-register of regA. /// bool isSubRegister(unsigned regA, unsigned regB) const { @@ -424,11 +413,11 @@ public: SubregHash[index*2+1] != 0) { if (SubregHash[index*2] == regA && SubregHash[index*2+1] == regB) return true; - + index = (index + ProbeAmt) & (SubregHashSize-1); ProbeAmt += 2; } - + return false; } @@ -442,11 +431,11 @@ public: SuperregHash[index*2+1] != 0) { if (SuperregHash[index*2] == regA && SuperregHash[index*2+1] == regB) return true; - + index = (index + ProbeAmt) & (SuperregHashSize-1); ProbeAmt += 2; } - + return false; } @@ -476,7 +465,7 @@ public: /// getMatchingSuperReg - Return a super-register of the specified register /// Reg so its sub-register of index SubIdx is Reg. - unsigned getMatchingSuperReg(unsigned Reg, unsigned SubIdx, + unsigned getMatchingSuperReg(unsigned Reg, unsigned SubIdx, const TargetRegisterClass *RC) const { for (const unsigned *SRs = getSuperRegisters(Reg); unsigned SR = *SRs;++SRs) if (Reg == getSubReg(SR, SubIdx) && RC->contains(SR)) @@ -484,6 +473,15 @@ public: return 0; } + /// getMatchingSuperRegClass - Return a subclass of the specified register + /// class A so that each register in it has a sub-register of the + /// specified sub-register index which is in the specified register class B. + virtual const TargetRegisterClass * + getMatchingSuperRegClass(const TargetRegisterClass *A, + const TargetRegisterClass *B, unsigned Idx) const { + return 0; + } + //===--------------------------------------------------------------------===// // Register Class Information // @@ -496,7 +494,7 @@ public: unsigned getNumRegClasses() const { return (unsigned)(regclass_end()-regclass_begin()); } - + /// getRegClass - Returns the register class associated with the enumeration /// value. See class TargetOperandInfo. const TargetRegisterClass *getRegClass(unsigned i) const { @@ -505,8 +503,9 @@ public: } /// getPointerRegClass - Returns a TargetRegisterClass used for pointer - /// values. - virtual const TargetRegisterClass *getPointerRegClass() const { + /// values. If a target supports multiple different pointer register classes, + /// kind specifies which one is indicated. + virtual const TargetRegisterClass *getPointerRegClass(unsigned Kind=0) const { assert(0 && "Target didn't implement getPointerRegClass!"); return 0; // Must return a value in order to compile with VS 2005 } @@ -561,24 +560,41 @@ public: virtual bool requiresRegisterScavenging(const MachineFunction &MF) const { return false; } - + + /// requiresFrameIndexScavenging - returns true if the target requires post + /// PEI scavenging of registers for materializing frame index constants. + virtual bool requiresFrameIndexScavenging(const MachineFunction &MF) const { + return false; + } + /// hasFP - Return true if the specified function should have a dedicated /// frame pointer register. For most targets this is true only if the function /// has variable sized allocas or if frame pointer elimination is disabled. virtual bool hasFP(const MachineFunction &MF) const = 0; - // hasReservedCallFrame - Under normal circumstances, when a frame pointer is - // not required, we reserve argument space for call sites in the function - // immediately on entry to the current function. This eliminates the need for - // add/sub sp brackets around call sites. Returns true if the call frame is - // included as part of the stack frame. + /// hasReservedCallFrame - Under normal circumstances, when a frame pointer is + /// not required, we reserve argument space for call sites in the function + /// immediately on entry to the current function. This eliminates the need for + /// add/sub sp brackets around call sites. Returns true if the call frame is + /// included as part of the stack frame. virtual bool hasReservedCallFrame(MachineFunction &MF) const { return !hasFP(MF); } - // needsStackRealignment - true if storage within the function requires the - // stack pointer to be aligned more than the normal calling convention calls - // for. + /// hasReservedSpillSlot - Return true if target has reserved a spill slot in + /// the stack frame of the given function for the specified register. e.g. On + /// x86, if the frame register is required, the first fixed stack object is + /// reserved as its spill slot. This tells PEI not to create a new stack frame + /// object for the given register. It should be called only after + /// processFunctionBeforeCalleeSavedScan(). + virtual bool hasReservedSpillSlot(MachineFunction &MF, unsigned Reg, + int &FrameIdx) const { + return false; + } + + /// needsStackRealignment - true if storage within the function requires the + /// stack pointer to be aligned more than the normal calling convention calls + /// for. virtual bool needsStackRealignment(const MachineFunction &MF) const { return false; } @@ -625,6 +641,24 @@ public: virtual void processFunctionBeforeFrameFinalized(MachineFunction &MF) const { } + /// saveScavengerRegister - Save the register so it can be used by the + /// register scavenger. Return true if the register was saved, false + /// otherwise. If this function does not save the register, the scavenger + /// will instead spill it to the emergency spill slot. + /// + virtual bool saveScavengerRegister(MachineBasicBlock &MBB, + MachineBasicBlock::iterator I, + const TargetRegisterClass *RC, + unsigned Reg) const {return false;} + + /// restoreScavengerRegister - Restore a register saved by + /// saveScavengerRegister(). + /// + virtual void restoreScavengerRegister(MachineBasicBlock &MBB, + MachineBasicBlock::iterator I, + const TargetRegisterClass *RC, + unsigned Reg) const {} + /// eliminateFrameIndex - This method must be overriden to eliminate abstract /// frame indices from instructions which may use them. The instruction /// referenced by the iterator contains an MO_FrameIndex operand which must be @@ -632,18 +666,23 @@ public: /// specified instruction, as long as it keeps the iterator pointing the the /// finished product. SPAdj is the SP adjustment due to call frame setup /// instruction. - virtual void eliminateFrameIndex(MachineBasicBlock::iterator MI, - int SPAdj, RegScavenger *RS=NULL) const = 0; + /// + /// When -enable-frame-index-scavenging is enabled, the virtual register + /// allocated for this frame index is returned and its value is stored in + /// *Value. + virtual unsigned eliminateFrameIndex(MachineBasicBlock::iterator MI, + int SPAdj, int *Value = NULL, + RegScavenger *RS=NULL) const = 0; /// emitProlog/emitEpilog - These methods insert prolog and epilog code into /// the function. virtual void emitPrologue(MachineFunction &MF) const = 0; virtual void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const = 0; - + //===--------------------------------------------------------------------===// /// Debug information queries. - + /// getDwarfRegNum - Map a target register to an equivalent dwarf register /// number. Returns -1 if there is no equivalent value. The second /// parameter allows targets to use different numberings for EH info and @@ -657,11 +696,11 @@ public: /// getFrameIndexOffset - Returns the displacement from the frame register to /// the stack frame of the specified index. virtual int getFrameIndexOffset(MachineFunction &MF, int FI) const; - + /// getRARegister - This method should return the register where the return /// address can be found. virtual unsigned getRARegister() const = 0; - + /// getInitialFrameState - Returns a list of machine moves that are assumed /// on entry to all functions. Note that LabelID is ignored (assumed to be /// the beginning of the function.) @@ -670,7 +709,7 @@ public: // This is useful when building IndexedMaps keyed on virtual registers -struct VirtReg2IndexFunctor : std::unary_function<unsigned, unsigned> { +struct VirtReg2IndexFunctor : public std::unary_function<unsigned, unsigned> { unsigned operator()(unsigned Reg) const { return Reg - TargetRegisterInfo::FirstVirtualRegister; } diff --git a/include/llvm/Target/TargetRegistry.h b/include/llvm/Target/TargetRegistry.h new file mode 100644 index 0000000..8042d23 --- /dev/null +++ b/include/llvm/Target/TargetRegistry.h @@ -0,0 +1,560 @@ +//===-- Target/TargetRegistry.h - Target Registration -----------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file exposes the TargetRegistry interface, which tools can use to access +// the appropriate target specific classes (TargetMachine, AsmPrinter, etc.) +// which have been registered. +// +// Target specific class implementations should register themselves using the +// appropriate TargetRegistry interfaces. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TARGET_TARGETREGISTRY_H +#define LLVM_TARGET_TARGETREGISTRY_H + +#include "llvm/ADT/Triple.h" +#include <string> +#include <cassert> + +namespace llvm { + class AsmPrinter; + class MCAsmParser; + class MCCodeEmitter; + class Module; + class MCAsmInfo; + class MCDisassembler; + class MCInstPrinter; + class TargetAsmParser; + class TargetMachine; + class formatted_raw_ostream; + class raw_ostream; + + /// Target - Wrapper for Target specific information. + /// + /// For registration purposes, this is a POD type so that targets can be + /// registered without the use of static constructors. + /// + /// Targets should implement a single global instance of this class (which + /// will be zero initialized), and pass that instance to the TargetRegistry as + /// part of their initialization. + class Target { + public: + friend struct TargetRegistry; + + typedef unsigned (*TripleMatchQualityFnTy)(const std::string &TT); + + typedef const MCAsmInfo *(*AsmInfoCtorFnTy)(const Target &T, + const StringRef &TT); + typedef TargetMachine *(*TargetMachineCtorTy)(const Target &T, + const std::string &TT, + const std::string &Features); + typedef AsmPrinter *(*AsmPrinterCtorTy)(formatted_raw_ostream &OS, + TargetMachine &TM, + const MCAsmInfo *MAI, + bool VerboseAsm); + typedef TargetAsmParser *(*AsmParserCtorTy)(const Target &T, + MCAsmParser &P); + typedef const MCDisassembler *(*MCDisassemblerCtorTy)(const Target &T); + typedef MCInstPrinter *(*MCInstPrinterCtorTy)(const Target &T, + unsigned SyntaxVariant, + const MCAsmInfo &MAI, + raw_ostream &O); + typedef MCCodeEmitter *(*CodeEmitterCtorTy)(const Target &T, + TargetMachine &TM); + + private: + /// Next - The next registered target in the linked list, maintained by the + /// TargetRegistry. + Target *Next; + + /// TripleMatchQualityFn - The target function for rating the match quality + /// of a triple. + TripleMatchQualityFnTy TripleMatchQualityFn; + + /// Name - The target name. + const char *Name; + + /// ShortDesc - A short description of the target. + const char *ShortDesc; + + /// HasJIT - Whether this target supports the JIT. + bool HasJIT; + + AsmInfoCtorFnTy AsmInfoCtorFn; + + /// TargetMachineCtorFn - Construction function for this target's + /// TargetMachine, if registered. + TargetMachineCtorTy TargetMachineCtorFn; + + /// AsmPrinterCtorFn - Construction function for this target's AsmPrinter, + /// if registered. + AsmPrinterCtorTy AsmPrinterCtorFn; + + /// AsmParserCtorFn - Construction function for this target's AsmParser, + /// if registered. + AsmParserCtorTy AsmParserCtorFn; + + /// MCDisassemblerCtorFn - Construction function for this target's + /// MCDisassembler, if registered. + MCDisassemblerCtorTy MCDisassemblerCtorFn; + + + /// MCInstPrinterCtorFn - Construction function for this target's + /// MCInstPrinter, if registered. + MCInstPrinterCtorTy MCInstPrinterCtorFn; + + /// CodeEmitterCtorFn - Construction function for this target's CodeEmitter, + /// if registered. + CodeEmitterCtorTy CodeEmitterCtorFn; + + public: + /// @name Target Information + /// @{ + + // getNext - Return the next registered target. + const Target *getNext() const { return Next; } + + /// getName - Get the target name. + const char *getName() const { return Name; } + + /// getShortDescription - Get a short description of the target. + const char *getShortDescription() const { return ShortDesc; } + + /// @} + /// @name Feature Predicates + /// @{ + + /// hasJIT - Check if this targets supports the just-in-time compilation. + bool hasJIT() const { return HasJIT; } + + /// hasTargetMachine - Check if this target supports code generation. + bool hasTargetMachine() const { return TargetMachineCtorFn != 0; } + + /// hasAsmPrinter - Check if this target supports .s printing. + bool hasAsmPrinter() const { return AsmPrinterCtorFn != 0; } + + /// hasAsmParser - Check if this target supports .s parsing. + bool hasAsmParser() const { return AsmParserCtorFn != 0; } + + /// hasMCDisassembler - Check if this target has a disassembler. + bool hasMCDisassembler() const { return MCDisassemblerCtorFn != 0; } + + /// hasMCInstPrinter - Check if this target has an instruction printer. + bool hasMCInstPrinter() const { return MCInstPrinterCtorFn != 0; } + + /// hasCodeEmitter - Check if this target supports instruction encoding. + bool hasCodeEmitter() const { return CodeEmitterCtorFn != 0; } + + /// @} + /// @name Feature Constructors + /// @{ + + /// createAsmInfo - Create a MCAsmInfo implementation for the specified + /// target triple. + /// + /// \arg Triple - This argument is used to determine the target machine + /// feature set; it should always be provided. Generally this should be + /// either the target triple from the module, or the target triple of the + /// host if that does not exist. + const MCAsmInfo *createAsmInfo(const StringRef &Triple) const { + if (!AsmInfoCtorFn) + return 0; + return AsmInfoCtorFn(*this, Triple); + } + + /// createTargetMachine - Create a target specific machine implementation + /// for the specified \arg Triple. + /// + /// \arg Triple - This argument is used to determine the target machine + /// feature set; it should always be provided. Generally this should be + /// either the target triple from the module, or the target triple of the + /// host if that does not exist. + TargetMachine *createTargetMachine(const std::string &Triple, + const std::string &Features) const { + if (!TargetMachineCtorFn) + return 0; + return TargetMachineCtorFn(*this, Triple, Features); + } + + /// createAsmPrinter - Create a target specific assembly printer pass. + AsmPrinter *createAsmPrinter(formatted_raw_ostream &OS, TargetMachine &TM, + const MCAsmInfo *MAI, bool Verbose) const { + if (!AsmPrinterCtorFn) + return 0; + return AsmPrinterCtorFn(OS, TM, MAI, Verbose); + } + + /// createAsmParser - Create a target specific assembly parser. + /// + /// \arg Parser - The target independent parser implementation to use for + /// parsing and lexing. + TargetAsmParser *createAsmParser(MCAsmParser &Parser) const { + if (!AsmParserCtorFn) + return 0; + return AsmParserCtorFn(*this, Parser); + } + + const MCDisassembler *createMCDisassembler() const { + if (!MCDisassemblerCtorFn) + return 0; + return MCDisassemblerCtorFn(*this); + } + + MCInstPrinter *createMCInstPrinter(unsigned SyntaxVariant, + const MCAsmInfo &MAI, + raw_ostream &O) const { + if (!MCInstPrinterCtorFn) + return 0; + return MCInstPrinterCtorFn(*this, SyntaxVariant, MAI, O); + } + + + /// createCodeEmitter - Create a target specific code emitter. + MCCodeEmitter *createCodeEmitter(TargetMachine &TM) const { + if (!CodeEmitterCtorFn) + return 0; + return CodeEmitterCtorFn(*this, TM); + } + + /// @} + }; + + /// TargetRegistry - Generic interface to target specific features. + struct TargetRegistry { + class iterator { + const Target *Current; + explicit iterator(Target *T) : Current(T) {} + friend struct TargetRegistry; + public: + iterator(const iterator &I) : Current(I.Current) {} + iterator() : Current(0) {} + + bool operator==(const iterator &x) const { + return Current == x.Current; + } + bool operator!=(const iterator &x) const { + return !operator==(x); + } + + // Iterator traversal: forward iteration only + iterator &operator++() { // Preincrement + assert(Current && "Cannot increment end iterator!"); + Current = Current->getNext(); + return *this; + } + iterator operator++(int) { // Postincrement + iterator tmp = *this; + ++*this; + return tmp; + } + + const Target &operator*() const { + assert(Current && "Cannot dereference end iterator!"); + return *Current; + } + + const Target *operator->() const { + return &operator*(); + } + }; + + /// @name Registry Access + /// @{ + + static iterator begin(); + + static iterator end() { return iterator(); } + + /// lookupTarget - Lookup a target based on a target triple. + /// + /// \param Triple - The triple to use for finding a target. + /// \param Error - On failure, an error string describing why no target was + /// found. + static const Target *lookupTarget(const std::string &Triple, + std::string &Error); + + /// getClosestTargetForJIT - Pick the best target that is compatible with + /// the current host. If no close target can be found, this returns null + /// and sets the Error string to a reason. + /// + /// Maintained for compatibility through 2.6. + static const Target *getClosestTargetForJIT(std::string &Error); + + /// @} + /// @name Target Registration + /// @{ + + /// RegisterTarget - Register the given target. Attempts to register a + /// target which has already been registered will be ignored. + /// + /// Clients are responsible for ensuring that registration doesn't occur + /// while another thread is attempting to access the registry. Typically + /// this is done by initializing all targets at program startup. + /// + /// @param T - The target being registered. + /// @param Name - The target name. This should be a static string. + /// @param ShortDesc - A short target description. This should be a static + /// string. + /// @param TQualityFn - The triple match quality computation function for + /// this target. + /// @param HasJIT - Whether the target supports JIT code + /// generation. + static void RegisterTarget(Target &T, + const char *Name, + const char *ShortDesc, + Target::TripleMatchQualityFnTy TQualityFn, + bool HasJIT = false); + + /// RegisterAsmInfo - Register a MCAsmInfo implementation for the + /// given target. + /// + /// Clients are responsible for ensuring that registration doesn't occur + /// while another thread is attempting to access the registry. Typically + /// this is done by initializing all targets at program startup. + /// + /// @param T - The target being registered. + /// @param Fn - A function to construct a MCAsmInfo for the target. + static void RegisterAsmInfo(Target &T, Target::AsmInfoCtorFnTy Fn) { + // Ignore duplicate registration. + if (!T.AsmInfoCtorFn) + T.AsmInfoCtorFn = Fn; + } + + /// RegisterTargetMachine - Register a TargetMachine implementation for the + /// given target. + /// + /// Clients are responsible for ensuring that registration doesn't occur + /// while another thread is attempting to access the registry. Typically + /// this is done by initializing all targets at program startup. + /// + /// @param T - The target being registered. + /// @param Fn - A function to construct a TargetMachine for the target. + static void RegisterTargetMachine(Target &T, + Target::TargetMachineCtorTy Fn) { + // Ignore duplicate registration. + if (!T.TargetMachineCtorFn) + T.TargetMachineCtorFn = Fn; + } + + /// RegisterAsmPrinter - Register an AsmPrinter implementation for the given + /// target. + /// + /// Clients are responsible for ensuring that registration doesn't occur + /// while another thread is attempting to access the registry. Typically + /// this is done by initializing all targets at program startup. + /// + /// @param T - The target being registered. + /// @param Fn - A function to construct an AsmPrinter for the target. + static void RegisterAsmPrinter(Target &T, Target::AsmPrinterCtorTy Fn) { + // Ignore duplicate registration. + if (!T.AsmPrinterCtorFn) + T.AsmPrinterCtorFn = Fn; + } + + /// RegisterAsmParser - Register a TargetAsmParser implementation for the + /// given target. + /// + /// Clients are responsible for ensuring that registration doesn't occur + /// while another thread is attempting to access the registry. Typically + /// this is done by initializing all targets at program startup. + /// + /// @param T - The target being registered. + /// @param Fn - A function to construct an AsmPrinter for the target. + static void RegisterAsmParser(Target &T, Target::AsmParserCtorTy Fn) { + if (!T.AsmParserCtorFn) + T.AsmParserCtorFn = Fn; + } + + /// RegisterMCDisassembler - Register a MCDisassembler implementation for + /// the given target. + /// + /// Clients are responsible for ensuring that registration doesn't occur + /// while another thread is attempting to access the registry. Typically + /// this is done by initializing all targets at program startup. + /// + /// @param T - The target being registered. + /// @param Fn - A function to construct an MCDisassembler for the target. + static void RegisterMCDisassembler(Target &T, + Target::MCDisassemblerCtorTy Fn) { + if (!T.MCDisassemblerCtorFn) + T.MCDisassemblerCtorFn = Fn; + } + + static void RegisterMCInstPrinter(Target &T, + Target::MCInstPrinterCtorTy Fn) { + if (!T.MCInstPrinterCtorFn) + T.MCInstPrinterCtorFn = Fn; + } + + /// RegisterCodeEmitter - Register a MCCodeEmitter implementation for the + /// given target. + /// + /// Clients are responsible for ensuring that registration doesn't occur + /// while another thread is attempting to access the registry. Typically + /// this is done by initializing all targets at program startup. + /// + /// @param T - The target being registered. + /// @param Fn - A function to construct an AsmPrinter for the target. + static void RegisterCodeEmitter(Target &T, Target::CodeEmitterCtorTy Fn) { + if (!T.CodeEmitterCtorFn) + T.CodeEmitterCtorFn = Fn; + } + + /// @} + }; + + + //===--------------------------------------------------------------------===// + + /// RegisterTarget - Helper template for registering a target, for use in the + /// target's initialization function. Usage: + /// + /// + /// Target TheFooTarget; // The global target instance. + /// + /// extern "C" void LLVMInitializeFooTargetInfo() { + /// RegisterTarget<Triple::foo> X(TheFooTarget, "foo", "Foo description"); + /// } + template<Triple::ArchType TargetArchType = Triple::InvalidArch, + bool HasJIT = false> + struct RegisterTarget { + RegisterTarget(Target &T, const char *Name, const char *Desc) { + TargetRegistry::RegisterTarget(T, Name, Desc, + &getTripleMatchQuality, + HasJIT); + } + + static unsigned getTripleMatchQuality(const std::string &TT) { + if (Triple(TT).getArch() == TargetArchType) + return 20; + return 0; + } + }; + + /// RegisterAsmInfo - Helper template for registering a target assembly info + /// implementation. This invokes the static "Create" method on the class to + /// actually do the construction. Usage: + /// + /// extern "C" void LLVMInitializeFooTarget() { + /// extern Target TheFooTarget; + /// RegisterAsmInfo<FooMCAsmInfo> X(TheFooTarget); + /// } + template<class MCAsmInfoImpl> + struct RegisterAsmInfo { + RegisterAsmInfo(Target &T) { + TargetRegistry::RegisterAsmInfo(T, &Allocator); + } + private: + static const MCAsmInfo *Allocator(const Target &T, const StringRef &TT) { + return new MCAsmInfoImpl(T, TT); + } + + }; + + /// RegisterAsmInfoFn - Helper template for registering a target assembly info + /// implementation. This invokes the specified function to do the + /// construction. Usage: + /// + /// extern "C" void LLVMInitializeFooTarget() { + /// extern Target TheFooTarget; + /// RegisterAsmInfoFn X(TheFooTarget, TheFunction); + /// } + struct RegisterAsmInfoFn { + RegisterAsmInfoFn(Target &T, Target::AsmInfoCtorFnTy Fn) { + TargetRegistry::RegisterAsmInfo(T, Fn); + } + }; + + + /// RegisterTargetMachine - Helper template for registering a target machine + /// implementation, for use in the target machine initialization + /// function. Usage: + /// + /// extern "C" void LLVMInitializeFooTarget() { + /// extern Target TheFooTarget; + /// RegisterTargetMachine<FooTargetMachine> X(TheFooTarget); + /// } + template<class TargetMachineImpl> + struct RegisterTargetMachine { + RegisterTargetMachine(Target &T) { + TargetRegistry::RegisterTargetMachine(T, &Allocator); + } + + private: + static TargetMachine *Allocator(const Target &T, const std::string &TT, + const std::string &FS) { + return new TargetMachineImpl(T, TT, FS); + } + }; + + /// RegisterAsmPrinter - Helper template for registering a target specific + /// assembly printer, for use in the target machine initialization + /// function. Usage: + /// + /// extern "C" void LLVMInitializeFooAsmPrinter() { + /// extern Target TheFooTarget; + /// RegisterAsmPrinter<FooAsmPrinter> X(TheFooTarget); + /// } + template<class AsmPrinterImpl> + struct RegisterAsmPrinter { + RegisterAsmPrinter(Target &T) { + TargetRegistry::RegisterAsmPrinter(T, &Allocator); + } + + private: + static AsmPrinter *Allocator(formatted_raw_ostream &OS, TargetMachine &TM, + const MCAsmInfo *MAI, bool Verbose) { + return new AsmPrinterImpl(OS, TM, MAI, Verbose); + } + }; + + /// RegisterAsmParser - Helper template for registering a target specific + /// assembly parser, for use in the target machine initialization + /// function. Usage: + /// + /// extern "C" void LLVMInitializeFooAsmParser() { + /// extern Target TheFooTarget; + /// RegisterAsmParser<FooAsmParser> X(TheFooTarget); + /// } + template<class AsmParserImpl> + struct RegisterAsmParser { + RegisterAsmParser(Target &T) { + TargetRegistry::RegisterAsmParser(T, &Allocator); + } + + private: + static TargetAsmParser *Allocator(const Target &T, MCAsmParser &P) { + return new AsmParserImpl(T, P); + } + }; + + /// RegisterCodeEmitter - Helper template for registering a target specific + /// machine code emitter, for use in the target initialization + /// function. Usage: + /// + /// extern "C" void LLVMInitializeFooCodeEmitter() { + /// extern Target TheFooTarget; + /// RegisterCodeEmitter<FooCodeEmitter> X(TheFooTarget); + /// } + template<class CodeEmitterImpl> + struct RegisterCodeEmitter { + RegisterCodeEmitter(Target &T) { + TargetRegistry::RegisterCodeEmitter(T, &Allocator); + } + + private: + static MCCodeEmitter *Allocator(const Target &T, TargetMachine &TM) { + return new CodeEmitterImpl(T, TM); + } + }; + +} + +#endif diff --git a/include/llvm/Target/TargetSchedule.td b/include/llvm/Target/TargetSchedule.td index 38461c5..dcc0992 100644 --- a/include/llvm/Target/TargetSchedule.td +++ b/include/llvm/Target/TargetSchedule.td @@ -23,14 +23,23 @@ class FuncUnit; //===----------------------------------------------------------------------===// -// Instruction stage - These values represent a step in the execution of an -// instruction. The latency represents the number of discrete time slots used -// need to complete the stage. Units represent the choice of functional units -// that can be used to complete the stage. Eg. IntUnit1, IntUnit2. +// Instruction stage - These values represent a non-pipelined step in +// the execution of an instruction. Cycles represents the number of +// discrete time slots needed to complete the stage. Units represent +// the choice of functional units that can be used to complete the +// stage. Eg. IntUnit1, IntUnit2. NextCycles indicates how many +// cycles should elapse from the start of this stage to the start of +// the next stage in the itinerary. For example: // -class InstrStage<int cycles, list<FuncUnit> units> { +// A stage is specified in one of two ways: +// +// InstrStage<1, [FU_x, FU_y]> - TimeInc defaults to Cycles +// InstrStage<1, [FU_x, FU_y], 0> - TimeInc explicit +// +class InstrStage<int cycles, list<FuncUnit> units, int timeinc = -1> { int Cycles = cycles; // length of stage in machine cycles list<FuncUnit> Units = units; // choice of functional units + int TimeInc = timeinc; // cycles till start of next stage } //===----------------------------------------------------------------------===// @@ -51,11 +60,13 @@ def NoItinerary : InstrItinClass; //===----------------------------------------------------------------------===// // Instruction itinerary data - These values provide a runtime map of an -// instruction itinerary class (name) to it's itinerary data. +// instruction itinerary class (name) to its itinerary data. // -class InstrItinData<InstrItinClass Class, list<InstrStage> stages> { +class InstrItinData<InstrItinClass Class, list<InstrStage> stages, + list<int> operandcycles = []> { InstrItinClass TheClass = Class; list<InstrStage> Stages = stages; + list<int> OperandCycles = operandcycles; } //===----------------------------------------------------------------------===// diff --git a/include/llvm/Target/TargetSelect.h b/include/llvm/Target/TargetSelect.h index 002d5fc..e79f651 100644 --- a/include/llvm/Target/TargetSelect.h +++ b/include/llvm/Target/TargetSelect.h @@ -1,4 +1,4 @@ -//===- TargetSelect.h - Target Selection & Registration -------------------===// +//===- TargetSelect.h - Target Selection & Registration ---------*- C++ -*-===// // // The LLVM Compiler Infrastructure // @@ -20,37 +20,76 @@ extern "C" { // Declare all of the target-initialization functions that are available. +#define LLVM_TARGET(TargetName) void LLVMInitialize##TargetName##TargetInfo(); +#include "llvm/Config/Targets.def" + #define LLVM_TARGET(TargetName) void LLVMInitialize##TargetName##Target(); #include "llvm/Config/Targets.def" - // Declare all of the available asm-printer initialization functions. + // Declare all of the available assembly printer initialization functions. #define LLVM_ASM_PRINTER(TargetName) void LLVMInitialize##TargetName##AsmPrinter(); #include "llvm/Config/AsmPrinters.def" + + // Declare all of the available assembly parser initialization functions. +#define LLVM_ASM_PARSER(TargetName) void LLVMInitialize##TargetName##AsmParser(); +#include "llvm/Config/AsmParsers.def" } namespace llvm { + /// InitializeAllTargetInfos - The main program should call this function if + /// it wants access to all available targets that LLVM is configured to + /// support, to make them available via the TargetRegistry. + /// + /// It is legal for a client to make multiple calls to this function. + inline void InitializeAllTargetInfos() { +#define LLVM_TARGET(TargetName) LLVMInitialize##TargetName##TargetInfo(); +#include "llvm/Config/Targets.def" + } + /// InitializeAllTargets - The main program should call this function if it - /// wants to link in all available targets that LLVM is configured to support. + /// wants access to all available target machines that LLVM is configured to + /// support, to make them available via the TargetRegistry. + /// + /// It is legal for a client to make multiple calls to this function. inline void InitializeAllTargets() { + // FIXME: Remove this, clients should do it. + InitializeAllTargetInfos(); + #define LLVM_TARGET(TargetName) LLVMInitialize##TargetName##Target(); #include "llvm/Config/Targets.def" } /// InitializeAllAsmPrinters - The main program should call this function if - /// it wants all asm printers that LLVM is configured to support. This will - /// cause them to be linked into its executable. + /// it wants all asm printers that LLVM is configured to support, to make them + /// available via the TargetRegistry. + /// + /// It is legal for a client to make multiple calls to this function. inline void InitializeAllAsmPrinters() { #define LLVM_ASM_PRINTER(TargetName) LLVMInitialize##TargetName##AsmPrinter(); #include "llvm/Config/AsmPrinters.def" } + /// InitializeAllAsmParsers - The main program should call this function if it + /// wants all asm parsers that LLVM is configured to support, to make them + /// available via the TargetRegistry. + /// + /// It is legal for a client to make multiple calls to this function. + inline void InitializeAllAsmParsers() { +#define LLVM_ASM_PARSER(TargetName) LLVMInitialize##TargetName##AsmParser(); +#include "llvm/Config/AsmParsers.def" + } + /// InitializeNativeTarget - The main program should call this function to /// initialize the native target corresponding to the host. This is useful /// for JIT applications to ensure that the target gets linked in correctly. + /// + /// It is legal for a client to make multiple calls to this function. inline bool InitializeNativeTarget() { // If we have a native target, initialize it to ensure it is linked in. #ifdef LLVM_NATIVE_ARCH -#define DoInit2(TARG) LLVMInitialize ## TARG () +#define DoInit2(TARG) \ + LLVMInitialize ## TARG ## Info (); \ + LLVMInitialize ## TARG () #define DoInit(T) DoInit2(T) DoInit(LLVM_NATIVE_ARCH); return false; diff --git a/include/llvm/Target/TargetSelectionDAG.td b/include/llvm/Target/TargetSelectionDAG.td index 364d4d0..700c64c 100644 --- a/include/llvm/Target/TargetSelectionDAG.td +++ b/include/llvm/Target/TargetSelectionDAG.td @@ -30,12 +30,15 @@ class SDTCisVT<int OpNum, ValueType vt> : SDTypeConstraint<OpNum> { class SDTCisPtrTy<int OpNum> : SDTypeConstraint<OpNum>; -// SDTCisInt - The specified operand is has integer type. +// SDTCisInt - The specified operand has integer type. class SDTCisInt<int OpNum> : SDTypeConstraint<OpNum>; -// SDTCisFP - The specified operand is has floating point type. +// SDTCisFP - The specified operand has floating-point type. class SDTCisFP<int OpNum> : SDTypeConstraint<OpNum>; +// SDTCisVec - The specified operand has a vector type. +class SDTCisVec<int OpNum> : SDTypeConstraint<OpNum>; + // SDTCisSameAs - The two specified operands have identical types. class SDTCisSameAs<int OpNum, int OtherOp> : SDTypeConstraint<OpNum> { int OtherOperandNum = OtherOp; @@ -345,7 +348,6 @@ def vsetcc : SDNode<"ISD::VSETCC" , SDTSetCC>; def brcond : SDNode<"ISD::BRCOND" , SDTBrcond, [SDNPHasChain]>; def brind : SDNode<"ISD::BRIND" , SDTBrind, [SDNPHasChain]>; def br : SDNode<"ISD::BR" , SDTBr, [SDNPHasChain]>; -def ret : SDNode<"ISD::RET" , SDTNone, [SDNPHasChain]>; def trap : SDNode<"ISD::TRAP" , SDTNone, [SDNPHasChain, SDNPSideEffect]>; diff --git a/include/llvm/Target/TargetSubtarget.h b/include/llvm/Target/TargetSubtarget.h index eca45eb..ac094f6 100644 --- a/include/llvm/Target/TargetSubtarget.h +++ b/include/llvm/Target/TargetSubtarget.h @@ -16,6 +16,9 @@ namespace llvm { +class SDep; +class SUnit; + //===----------------------------------------------------------------------===// /// /// TargetSubtarget - Generic base class for all target subtargets. All @@ -35,6 +38,15 @@ public: /// indicating the number of scheduling cycles of backscheduling that /// should be attempted. virtual unsigned getSpecialAddressLatency() const { return 0; } + + // enablePostRAScheduler - Return true to enable + // post-register-allocation scheduling. + virtual bool enablePostRAScheduler() const { return false; } + + // adjustSchedDependency - Perform target specific adjustments to + // the latency of a schedule dependency. + virtual void adjustSchedDependency(SUnit *def, SUnit *use, + SDep& dep) const { } }; } // End llvm namespace |